feat: Waku v2 bridge

Issue #12610
This commit is contained in:
Michal Iskierko
2023-11-12 13:29:38 +01:00
parent 56e7bd01ca
commit 6d31343205
6716 changed files with 1982502 additions and 5891 deletions

5
vendor/github.com/ethereum/go-ethereum/.dockerignore generated vendored Normal file
View File

@@ -0,0 +1,5 @@
**/*_test.go
build/_workspace
build/_bin
tests/testdata

View File

@@ -0,0 +1,3 @@
# Auto detect text files and perform LF normalization
* text=auto
*.sol linguist-language=Solidity

49
vendor/github.com/ethereum/go-ethereum/.gitignore generated vendored Normal file
View File

@@ -0,0 +1,49 @@
# See http://help.github.com/ignore-files/ for more about ignoring files.
#
# If you find yourself ignoring temporary files generated by your text editor
# or operating system, you probably want to add a global ignore instead:
# git config --global core.excludesfile ~/.gitignore_global
/tmp
*/**/*un~
*/**/*.test
*un~
.DS_Store
*/**/.DS_Store
.ethtest
*/**/*tx_database*
*/**/*dapps*
build/_vendor/pkg
#*
.#*
*#
*~
.project
.settings
# used by the Makefile
/build/_workspace/
/build/cache/
/build/bin/
/geth*.zip
# travis
profile.tmp
profile.cov
# IdeaIDE
.idea
# VS Code
.vscode
# dashboard
/dashboard/assets/flow-typed
/dashboard/assets/node_modules
/dashboard/assets/stats.json
/dashboard/assets/bundle.js
/dashboard/assets/bundle.js.map
/dashboard/assets/package-lock.json
**/yarn-error.log

8
vendor/github.com/ethereum/go-ethereum/.gitmodules generated vendored Normal file
View File

@@ -0,0 +1,8 @@
[submodule "tests"]
path = tests/testdata
url = https://github.com/ethereum/tests
shallow = true
[submodule "evm-benchmarks"]
path = tests/evm-benchmarks
url = https://github.com/ipsilon/evm-benchmarks
shallow = true

64
vendor/github.com/ethereum/go-ethereum/.golangci.yml generated vendored Normal file
View File

@@ -0,0 +1,64 @@
# This file configures github.com/golangci/golangci-lint.
run:
timeout: 20m
tests: true
# default is true. Enables skipping of directories:
# vendor$, third_party$, testdata$, examples$, Godeps$, builtin$
skip-dirs-use-default: true
skip-files:
- core/genesis_alloc.go
linters:
disable-all: true
enable:
- goconst
- goimports
- gosimple
- govet
- ineffassign
- misspell
- unconvert
- typecheck
- unused
- staticcheck
- bidichk
- durationcheck
- exportloopref
- whitespace
# - structcheck # lots of false positives
# - errcheck #lot of false positives
# - contextcheck
# - errchkjson # lots of false positives
# - errorlint # this check crashes
# - exhaustive # silly check
# - makezero # false positives
# - nilerr # several intentional
linters-settings:
gofmt:
simplify: true
goconst:
min-len: 3 # minimum length of string constant
min-occurrences: 6 # minimum number of occurrences
issues:
exclude-rules:
- path: crypto/bn256/cloudflare/optate.go
linters:
- deadcode
- staticcheck
- path: internal/build/pgp.go
text: 'SA1019: "golang.org/x/crypto/openpgp" is deprecated: this package is unmaintained except for security fixes.'
- path: core/vm/contracts.go
text: 'SA1019: "golang.org/x/crypto/ripemd160" is deprecated: RIPEMD-160 is a legacy hash and should not be used for new applications.'
- path: accounts/usbwallet/trezor.go
text: 'SA1019: "github.com/golang/protobuf/proto" is deprecated: Use the "google.golang.org/protobuf/proto" package instead.'
- path: accounts/usbwallet/trezor/
text: 'SA1019: "github.com/golang/protobuf/proto" is deprecated: Use the "google.golang.org/protobuf/proto" package instead.'
exclude:
- 'SA1019: event.TypeMux is deprecated: use Feed'
- 'SA1019: strings.Title is deprecated'
- 'SA1019: strings.Title has been deprecated since Go 1.18 and an alternative has been available since Go 1.0: The rule Title uses for word boundaries does not handle Unicode punctuation properly. Use golang.org/x/text/cases instead.'
- 'SA1029: should not use built-in type string as key for value'

237
vendor/github.com/ethereum/go-ethereum/.mailmap generated vendored Normal file
View File

@@ -0,0 +1,237 @@
Aaron Buchwald <aaron.buchwald56@gmail.com>
Aaron Kumavis <kumavis@users.noreply.github.com>
Abel Nieto <abel.nieto90@gmail.com>
Abel Nieto <abel.nieto90@gmail.com> <anietoro@uwaterloo.ca>
Afri Schoedon <58883403+q9f@users.noreply.github.com>
Afri Schoedon <5chdn@users.noreply.github.com> <58883403+q9f@users.noreply.github.com>
Alec Perseghin <aperseghin@gmail.com>
Aleksey Smyrnov <i@soar.name>
Alex Leverington <alex@ethdev.com>
Alex Leverington <alex@ethdev.com> <subtly@users.noreply.github.com>
Alex Pozhilenkov <alex_pozhilenkov@adoriasoft.com>
Alex Pozhilenkov <alex_pozhilenkov@adoriasoft.com> <leshiy12345678@gmail.com>
Alexey Akhunov <akhounov@gmail.com>
Alon Muroch <alonmuroch@gmail.com>
Andrey Petrov <shazow@gmail.com>
Andrey Petrov <shazow@gmail.com> <andrey.petrov@shazow.net>
Arkadiy Paronyan <arkadiy@ethdev.com>
Armin Braun <me@obrown.io>
Aron Fischer <github@aron.guru> <homotopycolimit@users.noreply.github.com>
Austin Roberts <code@ausiv.com>
Austin Roberts <code@ausiv.com> <git@ausiv.com>
Bas van Kervel <bas@ethdev.com>
Bas van Kervel <bas@ethdev.com> <basvankervel@ziggo.nl>
Bas van Kervel <bas@ethdev.com> <basvankervel@gmail.com>
Bas van Kervel <bas@ethdev.com> <bas-vk@users.noreply.github.com>
Boqin Qin <bobbqqin@bupt.edu.cn>
Boqin Qin <bobbqqin@bupt.edu.cn> <Bobbqqin@gmail.com>
Casey Detrio <cdetrio@gmail.com>
Cheng Li <lob4tt@gmail.com>
Chris Ziogas <ziogaschr@gmail.com>
Chris Ziogas <ziogaschr@gmail.com> <ziogas_chr@hotmail.com>
Christoph Jentzsch <jentzsch.software@gmail.com>
Diederik Loerakker <proto@protolambda.com>
Dimitry Khokhlov <winsvega@mail.ru>
Domino Valdano <dominoplural@gmail.com>
Domino Valdano <dominoplural@gmail.com> <jeff@okcupid.com>
Edgar Aroutiounian <edgar.factorial@gmail.com>
Elliot Shepherd <elliot@identitii.com>
Enrique Fynn <enriquefynn@gmail.com>
Enrique Fynn <me@enriquefynn.com>
Enrique Fynn <me@enriquefynn.com> <enriquefynn@gmail.com>
Ernesto del Toro <ernesto.deltoro@gmail.com>
Ernesto del Toro <ernesto.deltoro@gmail.com> <ernestodeltoro@users.noreply.github.com>
Everton Fraga <ev@ethereum.org>
Felix Lange <fjl@twurst.com>
Felix Lange <fjl@twurst.com> <fjl@users.noreply.github.com>
Frank Wang <eternnoir@gmail.com>
Gary Rong <garyrong0905@gmail.com>
Gavin Wood <i@gavwood.com>
Gregg Dourgarian <greggd@tempworks.com>
Guillaume Ballet <gballet@gmail.com>
Guillaume Ballet <gballet@gmail.com> <3272758+gballet@users.noreply.github.com>
Guillaume Nicolas <guin56@gmail.com>
Hanjiang Yu <delacroix.yu@gmail.com>
Hanjiang Yu <delacroix.yu@gmail.com> <42531996+de1acr0ix@users.noreply.github.com>
Heiko Hees <heiko@heiko.org>
Henning Diedrich <hd@eonblast.com>
Henning Diedrich <hd@eonblast.com> Drake Burroughs <wildfyre@hotmail.com>
Hwanjo Heo <34005989+hwanjo@users.noreply.github.com>
Iskander (Alex) Sharipov <quasilyte@gmail.com>
Iskander (Alex) Sharipov <quasilyte@gmail.com> <i.sharipov@corp.vk.com>
Jae Kwon <jkwon.work@gmail.com>
Janoš Guljaš <janos@resenje.org> <janos@users.noreply.github.com>
Janoš Guljaš <janos@resenje.org> Janos Guljas <janos@resenje.org>
Jared Wasinger <j-wasinger@hotmail.com>
Jason Carver <jacarver@linkedin.com>
Jason Carver <jacarver@linkedin.com> <ut96caarrs@snkmail.com>
Javier Peletier <jm@epiclabs.io>
Javier Peletier <jm@epiclabs.io> <jpeletier@users.noreply.github.com>
Jeffrey Wilcke <jeffrey@ethereum.org>
Jeffrey Wilcke <jeffrey@ethereum.org> <geffobscura@gmail.com>
Jeffrey Wilcke <jeffrey@ethereum.org> <obscuren@obscura.com>
Jeffrey Wilcke <jeffrey@ethereum.org> <obscuren@users.noreply.github.com>
Jens Agerberg <github@agerberg.me>
Joseph Chow <ethereum@outlook.com>
Joseph Chow <ethereum@outlook.com> ethers <TODO>
Joseph Goulden <joegoulden@gmail.com>
Justin Drake <drakefjustin@gmail.com>
Kenso Trabing <ktrabing@acm.org>
Kenso Trabing <ktrabing@acm.org> <kenso.trabing@bloomwebsite.com>
Liang Ma <liangma@liangbit.com>
Liang Ma <liangma@liangbit.com> <liangma.ul@gmail.com>
Louis Holbrook <dev@holbrook.no>
Louis Holbrook <dev@holbrook.no> <nolash@users.noreply.github.com>
Maran Hidskes <maran.hidskes@gmail.com>
Marian Oancea <contact@siteshop.ro>
Martin Becze <mjbecze@gmail.com>
Martin Becze <mjbecze@gmail.com> <wanderer@users.noreply.github.com>
Martin Lundfall <martin.lundfall@protonmail.com>
Matt Garnett <14004106+lightclient@users.noreply.github.com>
Matthew Halpern <matthalp@gmail.com>
Matthew Halpern <matthalp@gmail.com> <matthalp@google.com>
Michael Riabzev <michael@starkware.co>
Nchinda Nchinda <nchinda2@gmail.com>
Nick Dodson <silentcicero@outlook.com>
Nick Johnson <arachnid@notdot.net>
Nick Savers <nicksavers@gmail.com>
Nishant Das <nishdas93@gmail.com>
Nishant Das <nishdas93@gmail.com> <nish1993@hotmail.com>
Olivier Hervieu <olivier.hervieu@gmail.com>
Pascal Dierich <pascal@merkleplant.xyz>
Pascal Dierich <pascal@merkleplant.xyz> <pascal@pascaldierich.com>
RJ Catalano <catalanor0220@gmail.com>
RJ Catalano <catalanor0220@gmail.com> <rj@erisindustries.com>
Ralph Caraveo <deckarep@gmail.com>
Rene Lubov <41963722+renaynay@users.noreply.github.com>
Robert Zaremba <robert@zaremba.ch>
Robert Zaremba <robert@zaremba.ch> <robert.zaremba@scale-it.pl>
Roman Mandeleil <roman.mandeleil@gmail.com>
Sorin Neacsu <sorin.neacsu@gmail.com>
Sorin Neacsu <sorin.neacsu@gmail.com> <sorin@users.noreply.github.com>
Sven Ehlert <sven@ethdev.com>
Taylor Gerring <taylor.gerring@gmail.com>
Taylor Gerring <taylor.gerring@gmail.com> <taylor.gerring@ethereum.org>
Thomas Bocek <tom@tomp2p.net>
Tim Cooijmans <timcooijmans@gmail.com>
Valentin Wüstholz <wuestholz@gmail.com>
Valentin Wüstholz <wuestholz@gmail.com> <wuestholz@users.noreply.github.com>
Victor Tran <vu.tran54@gmail.com>
Viktor Trón <viktor.tron@gmail.com>
Ville Sundell <github@solarius.fi>
Vincent G <caktux@gmail.com>
Vitalik Buterin <v@buterin.com>
Vlad Gluhovsky <gluk256@gmail.com>
Vlad Gluhovsky <gluk256@gmail.com> <gluk256@users.noreply.github.com>
Wenshao Zhong <wzhong20@uic.edu>
Wenshao Zhong <wzhong20@uic.edu> <11510383@mail.sustc.edu.cn>
Wenshao Zhong <wzhong20@uic.edu> <374662347@qq.com>
Will Villanueva <hello@willvillanueva.com>
Xiaobing Jiang <s7v7nislands@gmail.com>
Xudong Liu <33193253+r1cs@users.noreply.github.com>
Yohann Léon <sybiload@gmail.com>
Zachinquarantine <Zachinquarantine@protonmail.com>
Zachinquarantine <Zachinquarantine@protonmail.com> <zachinquarantine@yahoo.com>
Ziyuan Zhong <zzy.albert@163.com>
Zsolt Felföldi <zsfelfoldi@gmail.com>
meowsbits <b5c6@protonmail.com>
meowsbits <b5c6@protonmail.com> <45600330+meowsbits@users.noreply.github.com>
nedifi <103940716+nedifi@users.noreply.github.com>
Максим Чусовлянов <mchusovlianov@gmail.com>

247
vendor/github.com/ethereum/go-ethereum/.travis.yml generated vendored Normal file
View File

@@ -0,0 +1,247 @@
language: go
go_import_path: github.com/ethereum/go-ethereum
sudo: false
jobs:
allow_failures:
- stage: build
os: osx
go: 1.17.x
env:
- azure-osx
- azure-ios
- cocoapods-ios
include:
# This builder only tests code linters on latest version of Go
- stage: lint
os: linux
dist: bionic
go: 1.19.x
env:
- lint
git:
submodules: false # avoid cloning ethereum/tests
script:
- go run build/ci.go lint
# These builders create the Docker sub-images for multi-arch push and each
# will attempt to push the multi-arch image if they are the last builder
- stage: build
if: type = push
os: linux
arch: amd64
dist: bionic
go: 1.19.x
env:
- docker
services:
- docker
git:
submodules: false # avoid cloning ethereum/tests
before_install:
- export DOCKER_CLI_EXPERIMENTAL=enabled
script:
- go run build/ci.go docker -image -manifest amd64,arm64 -upload ethereum/client-go
- stage: build
if: type = push
os: linux
arch: arm64
dist: bionic
go: 1.19.x
env:
- docker
services:
- docker
git:
submodules: false # avoid cloning ethereum/tests
before_install:
- export DOCKER_CLI_EXPERIMENTAL=enabled
script:
- go run build/ci.go docker -image -manifest amd64,arm64 -upload ethereum/client-go
# This builder does the Ubuntu PPA upload
- stage: build
if: type = push
os: linux
dist: bionic
go: 1.19.x
env:
- ubuntu-ppa
- GO111MODULE=on
git:
submodules: false # avoid cloning ethereum/tests
addons:
apt:
packages:
- devscripts
- debhelper
- dput
- fakeroot
- python-bzrlib
- python-paramiko
script:
- echo '|1|7SiYPr9xl3uctzovOTj4gMwAC1M=|t6ReES75Bo/PxlOPJ6/GsGbTrM0= ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEA0aKz5UTUndYgIGG7dQBV+HaeuEZJ2xPHo2DS2iSKvUL4xNMSAY4UguNW+pX56nAQmZKIZZ8MaEvSj6zMEDiq6HFfn5JcTlM80UwlnyKe8B8p7Nk06PPQLrnmQt5fh0HmEcZx+JU9TZsfCHPnX7MNz4ELfZE6cFsclClrKim3BHUIGq//t93DllB+h4O9LHjEUsQ1Sr63irDLSutkLJD6RXchjROXkNirlcNVHH/jwLWR5RcYilNX7S5bIkK8NlWPjsn/8Ua5O7I9/YoE97PpO6i73DTGLh5H9JN/SITwCKBkgSDWUt61uPK3Y11Gty7o2lWsBjhBUm2Y38CBsoGmBw==' >> ~/.ssh/known_hosts
- go run build/ci.go debsrc -upload ethereum/ethereum -sftp-user geth-ci -signer "Go Ethereum Linux Builder <geth-ci@ethereum.org>"
# This builder does the Linux Azure uploads
- stage: build
if: type = push
os: linux
dist: bionic
sudo: required
go: 1.19.x
env:
- azure-linux
- GO111MODULE=on
git:
submodules: false # avoid cloning ethereum/tests
addons:
apt:
packages:
- gcc-multilib
script:
# Build for the primary platforms that Trusty can manage
- go run build/ci.go install -dlgo
- go run build/ci.go archive -type tar -signer LINUX_SIGNING_KEY -signify SIGNIFY_KEY -upload gethstore/builds
- go run build/ci.go install -dlgo -arch 386
- go run build/ci.go archive -arch 386 -type tar -signer LINUX_SIGNING_KEY -signify SIGNIFY_KEY -upload gethstore/builds
# Switch over GCC to cross compilation (breaks 386, hence why do it here only)
- sudo -E apt-get -yq --no-install-suggests --no-install-recommends --force-yes install gcc-arm-linux-gnueabi libc6-dev-armel-cross gcc-arm-linux-gnueabihf libc6-dev-armhf-cross gcc-aarch64-linux-gnu libc6-dev-arm64-cross
- sudo ln -s /usr/include/asm-generic /usr/include/asm
- GOARM=5 go run build/ci.go install -dlgo -arch arm -cc arm-linux-gnueabi-gcc
- GOARM=5 go run build/ci.go archive -arch arm -type tar -signer LINUX_SIGNING_KEY -signify SIGNIFY_KEY -upload gethstore/builds
- GOARM=6 go run build/ci.go install -dlgo -arch arm -cc arm-linux-gnueabi-gcc
- GOARM=6 go run build/ci.go archive -arch arm -type tar -signer LINUX_SIGNING_KEY -signify SIGNIFY_KEY -upload gethstore/builds
- GOARM=7 go run build/ci.go install -dlgo -arch arm -cc arm-linux-gnueabihf-gcc
- GOARM=7 go run build/ci.go archive -arch arm -type tar -signer LINUX_SIGNING_KEY -signify SIGNIFY_KEY -upload gethstore/builds
- go run build/ci.go install -dlgo -arch arm64 -cc aarch64-linux-gnu-gcc
- go run build/ci.go archive -arch arm64 -type tar -signer LINUX_SIGNING_KEY -signify SIGNIFY_KEY -upload gethstore/builds
# This builder does the Android Maven and Azure uploads
- stage: build
if: type = push
os: linux
dist: bionic
addons:
apt:
packages:
- openjdk-8-jdk
env:
- azure-android
- maven-android
- GO111MODULE=on
git:
submodules: false # avoid cloning ethereum/tests
before_install:
# Install Android and it's dependencies manually, Travis is stale
- export JAVA_HOME=/usr/lib/jvm/java-8-openjdk-amd64
- curl https://dl.google.com/android/repository/commandlinetools-linux-6858069_latest.zip -o android.zip
- unzip -q android.zip -d $HOME/sdk && rm android.zip
- mv $HOME/sdk/cmdline-tools $HOME/sdk/latest && mkdir $HOME/sdk/cmdline-tools && mv $HOME/sdk/latest $HOME/sdk/cmdline-tools
- export PATH=$PATH:$HOME/sdk/cmdline-tools/latest/bin
- export ANDROID_HOME=$HOME/sdk
- yes | sdkmanager --licenses >/dev/null
- sdkmanager "platform-tools" "platforms;android-15" "platforms;android-19" "platforms;android-24" "ndk-bundle"
# Install Go to allow building with
- curl https://dl.google.com/go/go1.18.linux-amd64.tar.gz | tar -xz
- export PATH=`pwd`/go/bin:$PATH
- export GOROOT=`pwd`/go
- export GOPATH=$HOME/go
script:
# Build the Android archive and upload it to Maven Central and Azure
- mkdir -p $GOPATH/src/github.com/ethereum
- ln -s `pwd` $GOPATH/src/github.com/ethereum/go-ethereum
- go run build/ci.go aar -signer ANDROID_SIGNING_KEY -signify SIGNIFY_KEY -deploy https://oss.sonatype.org -upload gethstore/builds
# This builder does the OSX Azure, iOS CocoaPods and iOS Azure uploads
- stage: build
if: type = push
os: osx
go: 1.19.x
env:
- azure-osx
- azure-ios
- cocoapods-ios
- GO111MODULE=on
git:
submodules: false # avoid cloning ethereum/tests
script:
- go run build/ci.go install -dlgo
- go run build/ci.go archive -type tar -signer OSX_SIGNING_KEY -signify SIGNIFY_KEY -upload gethstore/builds
# Build the iOS framework and upload it to CocoaPods and Azure
- gem uninstall cocoapods -a -x
- gem install cocoapods
- mv ~/.cocoapods/repos/master ~/.cocoapods/repos/master.bak
- sed -i '.bak' 's/repo.join/!repo.join/g' $(dirname `gem which cocoapods`)/cocoapods/sources_manager.rb
- if [ "$TRAVIS_PULL_REQUEST" = "false" ]; then git clone --depth=1 https://github.com/CocoaPods/Specs.git ~/.cocoapods/repos/master && pod setup --verbose; fi
- xctool -version
- xcrun simctl list
# Workaround for https://github.com/golang/go/issues/23749
- export CGO_CFLAGS_ALLOW='-fmodules|-fblocks|-fobjc-arc'
- go run build/ci.go xcode -signer IOS_SIGNING_KEY -signify SIGNIFY_KEY -deploy trunk -upload gethstore/builds
# These builders run the tests
- stage: build
os: linux
arch: amd64
dist: bionic
go: 1.19.x
env:
- GO111MODULE=on
script:
- go run build/ci.go test -coverage $TEST_PACKAGES
- stage: build
if: type = pull_request
os: linux
arch: arm64
dist: bionic
go: 1.18.x
env:
- GO111MODULE=on
script:
- go run build/ci.go test -coverage $TEST_PACKAGES
- stage: build
os: linux
dist: bionic
go: 1.18.x
env:
- GO111MODULE=on
script:
- go run build/ci.go test -coverage $TEST_PACKAGES
# This builder does the Azure archive purges to avoid accumulating junk
- stage: build
if: type = cron
os: linux
dist: bionic
go: 1.19.x
env:
- azure-purge
- GO111MODULE=on
git:
submodules: false # avoid cloning ethereum/tests
script:
- go run build/ci.go purge -store gethstore/builds -days 14
# This builder executes race tests
- stage: build
if: type = cron
os: linux
dist: bionic
go: 1.19.x
env:
- GO111MODULE=on
script:
- go run build/ci.go test -race -coverage $TEST_PACKAGES

591
vendor/github.com/ethereum/go-ethereum/AUTHORS generated vendored Normal file
View File

@@ -0,0 +1,591 @@
# This is the official list of go-ethereum authors for copyright purposes.
6543 <6543@obermui.de>
a e r t h <aerth@users.noreply.github.com>
Aaron Buchwald <aaron.buchwald56@gmail.com>
Abel Nieto <abel.nieto90@gmail.com>
Adam Babik <a.babik@designfortress.com>
Adam Schmideg <adamschmideg@users.noreply.github.com>
Aditya <adityasripal@gmail.com>
Aditya Arora <arora.aditya520@gmail.com>
Adrià Cidre <adria.cidre@gmail.com>
Afanasii Kurakin <afanasy@users.noreply.github.com>
Afri Schoedon <5chdn@users.noreply.github.com>
Agustin Armellini Fischer <armellini13@gmail.com>
Ahyun <urbanart2251@gmail.com>
Airead <fgh1987168@gmail.com>
Alan Chen <alanchchen@users.noreply.github.com>
Alejandro Isaza <alejandro.isaza@gmail.com>
Aleksey Smyrnov <i@soar.name>
Ales Katona <ales@coinbase.com>
Alex Beregszaszi <alex@rtfs.hu>
Alex Leverington <alex@ethdev.com>
Alex Mazalov <mazalov@gmail.com>
Alex Pozhilenkov <alex_pozhilenkov@adoriasoft.com>
Alex Prut <1648497+alexprut@users.noreply.github.com>
Alex Wu <wuyiding@gmail.com>
Alexander van der Meij <alexandervdm@users.noreply.github.com>
Alexander Yastrebov <yastrebov.alex@gmail.com>
Alexandre Van de Sande <alex.vandesande@ethdev.com>
Alexey Akhunov <akhounov@gmail.com>
Alexey Shekhirin <a.shekhirin@gmail.com>
alexwang <39109351+dipingxian2@users.noreply.github.com>
Ali Atiia <42751398+aliatiia@users.noreply.github.com>
Ali Hajimirza <Ali92hm@users.noreply.github.com>
am2rican5 <am2rican5@gmail.com>
AmitBRD <60668103+AmitBRD@users.noreply.github.com>
Anatole <62328077+a2br@users.noreply.github.com>
Andrea Franz <andrea@gravityblast.com>
Andrei Maiboroda <andrei@ethereum.org>
Andrey Petrov <shazow@gmail.com>
ANOTHEL <anothel1@naver.com>
Antoine Rondelet <rondelet.antoine@gmail.com>
Antoine Toulme <atoulme@users.noreply.github.com>
Anton Evangelatov <anton.evangelatov@gmail.com>
Antonio Salazar Cardozo <savedfastcool@gmail.com>
Arba Sasmoyo <arba.sasmoyo@gmail.com>
Armani Ferrante <armaniferrante@berkeley.edu>
Armin Braun <me@obrown.io>
Aron Fischer <github@aron.guru>
atsushi-ishibashi <atsushi.ishibashi@finatext.com>
Austin Roberts <code@ausiv.com>
ayeowch <ayeowch@gmail.com>
b00ris <b00ris@mail.ru>
b1ackd0t <blackd0t@protonmail.com>
bailantaotao <Edwin@maicoin.com>
baizhenxuan <nkbai@163.com>
Balaji Shetty Pachai <32358081+balajipachai@users.noreply.github.com>
Balint Gabor <balint.g@gmail.com>
baptiste-b-pegasys <85155432+baptiste-b-pegasys@users.noreply.github.com>
Bas van Kervel <bas@ethdev.com>
Benjamin Brent <benjamin@benjaminbrent.com>
benma <mbencun@gmail.com>
Benoit Verkindt <benoit.verkindt@gmail.com>
Binacs <bin646891055@gmail.com>
bloonfield <bloonfield@163.com>
Bo <bohende@gmail.com>
Bo Ye <boy.e.computer.1982@outlook.com>
Bob Glickstein <bobg@users.noreply.github.com>
Boqin Qin <bobbqqin@bupt.edu.cn>
Brandon Harden <b.harden92@gmail.com>
Brent <bmperrea@gmail.com>
Brian Schroeder <bts@gmail.com>
Bruno Škvorc <bruno@skvorc.me>
C. Brown <hackdom@majoolr.io>
Caesar Chad <BLUE.WEB.GEEK@gmail.com>
Casey Detrio <cdetrio@gmail.com>
CDsigma <cdsigma271@gmail.com>
Ceelog <chenwei@ceelog.org>
Ceyhun Onur <ceyhun.onur@avalabs.org>
chabashilah <doumodoumo@gmail.com>
changhong <changhong.yu@shanbay.com>
Chase Wright <mysticryuujin@gmail.com>
Chen Quan <terasum@163.com>
Cheng Li <lob4tt@gmail.com>
chenglin <910372762@qq.com>
chenyufeng <yufengcode@gmail.com>
Chris Pacia <ctpacia@gmail.com>
Chris Ziogas <ziogaschr@gmail.com>
Christian Muehlhaeuser <muesli@gmail.com>
Christoph Jentzsch <jentzsch.software@gmail.com>
chuwt <weitaochu@gmail.com>
cong <ackratos@users.noreply.github.com>
Connor Stein <connor.stein@mail.mcgill.ca>
Corey Lin <514971757@qq.com>
courtier <derinilter@gmail.com>
cpusoft <cpusoft@live.com>
Crispin Flowerday <crispin@bitso.com>
croath <croathliu@gmail.com>
cui <523516579@qq.com>
Dan DeGreef <dan.degreef@gmail.com>
Dan Kinsley <dan@joincivil.com>
Dan Sosedoff <dan.sosedoff@gmail.com>
Daniel A. Nagy <nagy.da@gmail.com>
Daniel Perez <daniel@perez.sh>
Daniel Sloof <goapsychadelic@gmail.com>
Darioush Jalali <darioush.jalali@avalabs.org>
Darrel Herbst <dherbst@gmail.com>
Dave Appleton <calistralabs@gmail.com>
Dave McGregor <dave.s.mcgregor@gmail.com>
David Cai <davidcai1993@yahoo.com>
David Huie <dahuie@gmail.com>
Denver <aeharvlee@gmail.com>
Derek Chiang <me@derekchiang.com>
Derek Gottfrid <derek@codecubed.com>
Di Peng <pendyaaa@gmail.com>
Diederik Loerakker <proto@protolambda.com>
Diego Siqueira <DiSiqueira@users.noreply.github.com>
Diep Pham <mrfavadi@gmail.com>
dipingxian2 <39109351+dipingxian2@users.noreply.github.com>
divergencetech <94644849+divergencetech@users.noreply.github.com>
dm4 <sunrisedm4@gmail.com>
Dmitrij Koniajev <dimchansky@gmail.com>
Dmitry Shulyak <yashulyak@gmail.com>
Dmitry Zenovich <dzenovich@gmail.com>
Domino Valdano <dominoplural@gmail.com>
Dragan Milic <dragan@netice9.com>
dragonvslinux <35779158+dragononcrypto@users.noreply.github.com>
Edgar Aroutiounian <edgar.factorial@gmail.com>
Eduard S <eduardsanou@posteo.net>
Egon Elbre <egonelbre@gmail.com>
Elad <theman@elad.im>
Eli <elihanover@yahoo.com>
Elias Naur <elias.naur@gmail.com>
Elliot Shepherd <elliot@identitii.com>
Emil <mursalimovemeel@gmail.com>
emile <emile@users.noreply.github.com>
Emmanuel T Odeke <odeke@ualberta.ca>
Eng Zer Jun <engzerjun@gmail.com>
Enrique Fynn <me@enriquefynn.com>
Enrique Ortiz <hi@enriqueortiz.dev>
EOS Classic <info@eos-classic.io>
Erichin <erichinbato@gmail.com>
Ernesto del Toro <ernesto.deltoro@gmail.com>
Ethan Buchman <ethan@coinculture.info>
ethersphere <thesw@rm.eth>
Eugene Lepeico <eugenelepeico@gmail.com>
Eugene Valeyev <evgen.povt@gmail.com>
Evangelos Pappas <epappas@evalonlabs.com>
Everton Fraga <ev@ethereum.org>
Evgeny <awesome.observer@yandex.com>
Evgeny Danilenko <6655321@bk.ru>
evgk <evgeniy.kamyshev@gmail.com>
Evolution404 <35091674+Evolution404@users.noreply.github.com>
EXEC <execvy@gmail.com>
Fabian Vogelsteller <fabian@frozeman.de>
Fabio Barone <fabio.barone.co@gmail.com>
Fabio Berger <fabioberger1991@gmail.com>
FaceHo <facehoshi@gmail.com>
Felipe Strozberg <48066928+FelStroz@users.noreply.github.com>
Felix Lange <fjl@twurst.com>
Ferenc Szabo <frncmx@gmail.com>
ferhat elmas <elmas.ferhat@gmail.com>
Ferran Borreguero <ferranbt@protonmail.com>
Fiisio <liangcszzu@163.com>
Fire Man <55934298+basdevelop@users.noreply.github.com>
flowerofdream <775654398@qq.com>
fomotrader <82184770+fomotrader@users.noreply.github.com>
ForLina <471133417@qq.com>
Frank Szendzielarz <33515470+FrankSzendzielarz@users.noreply.github.com>
Frank Wang <eternnoir@gmail.com>
Franklin <mr_franklin@126.com>
Furkan KAMACI <furkankamaci@gmail.com>
Fuyang Deng <dengfuyang@outlook.com>
GagziW <leon.stanko@rwth-aachen.de>
Gary Rong <garyrong0905@gmail.com>
Gautam Botrel <gautam.botrel@gmail.com>
George Ornbo <george@shapeshed.com>
Giuseppe Bertone <bertone.giuseppe@gmail.com>
Greg Colvin <greg@colvin.org>
Gregg Dourgarian <greggd@tempworks.com>
Gregory Markou <16929357+GregTheGreek@users.noreply.github.com>
Guifel <toowik@gmail.com>
Guilherme Salgado <gsalgado@gmail.com>
Guillaume Ballet <gballet@gmail.com>
Guillaume Nicolas <guin56@gmail.com>
GuiltyMorishita <morilliantblue@gmail.com>
Guruprasad Kamath <48196632+gurukamath@users.noreply.github.com>
Gus <yo@soygus.com>
Gustav Simonsson <gustav.simonsson@gmail.com>
Gísli Kristjánsson <gislik@hamstur.is>
Ha ĐANG <dvietha@gmail.com>
HackyMiner <hackyminer@gmail.com>
hadv <dvietha@gmail.com>
Hanjiang Yu <delacroix.yu@gmail.com>
Hao Bryan Cheng <haobcheng@gmail.com>
Hao Duan <duanhao0814@gmail.com>
HAOYUatHZ <37070449+HAOYUatHZ@users.noreply.github.com>
Harry Dutton <me@bytejedi.com>
haryu703 <34744512+haryu703@users.noreply.github.com>
Hendrik Hofstadt <hendrik@nexantic.com>
Henning Diedrich <hd@eonblast.com>
henopied <13500516+henopied@users.noreply.github.com>
hero5512 <lvshuaino@gmail.com>
holisticode <holistic.computing@gmail.com>
Hongbin Mao <hello2mao@gmail.com>
Hsien-Tang Kao <htkao@pm.me>
hsyodyssey <47173566+hsyodyssey@users.noreply.github.com>
Husam Ibrahim <39692071+HusamIbrahim@users.noreply.github.com>
Hwanjo Heo <34005989+hwanjo@users.noreply.github.com>
hydai <z54981220@gmail.com>
Hyung-Kyu Hqueue Choi <hyungkyu.choi@gmail.com>
Håvard Anda Estensen <haavard.ae@gmail.com>
Ian Macalinao <me@ian.pw>
Ian Norden <iannordenn@gmail.com>
icodezjb <icodezjb@163.com>
Ikko Ashimine <eltociear@gmail.com>
Ilan Gitter <8359193+gitteri@users.noreply.github.com>
ImanSharaf <78227895+ImanSharaf@users.noreply.github.com>
Isidoro Ghezzi <isidoro.ghezzi@icloud.com>
Iskander (Alex) Sharipov <quasilyte@gmail.com>
Ivan Bogatyy <bogatyi@gmail.com>
Ivan Daniluk <ivan.daniluk@gmail.com>
Ivo Georgiev <ivo@strem.io>
jacksoom <lifengliu1994@gmail.com>
Jae Kwon <jkwon.work@gmail.com>
James Prestwich <10149425+prestwich@users.noreply.github.com>
Jamie Pitts <james.pitts@gmail.com>
Janoš Guljaš <janos@resenje.org>
Jared Wasinger <j-wasinger@hotmail.com>
Jason Carver <jacarver@linkedin.com>
Javier Peletier <jm@epiclabs.io>
Javier Sagredo <jasataco@gmail.com>
Jay <codeholic.arena@gmail.com>
Jay Guo <guojiannan1101@gmail.com>
Jaynti Kanani <jdkanani@gmail.com>
Jeff Prestes <jeffprestes@gmail.com>
Jeff R. Allen <jra@nella.org>
Jeff Wentworth <jeff@curvegrid.com>
Jeffery Robert Walsh <rlxrlps@gmail.com>
Jeffrey Wilcke <jeffrey@ethereum.org>
Jens Agerberg <github@agerberg.me>
Jeremy McNevin <jeremy.mcnevin@optum.com>
Jeremy Schlatter <jeremy.schlatter@gmail.com>
Jerzy Lasyk <jerzylasyk@gmail.com>
Jesse Tane <jesse.tane@gmail.com>
Jia Chenhui <jiachenhui1989@gmail.com>
Jim McDonald <Jim@mcdee.net>
jk-jeongkyun <45347815+jeongkyun-oh@users.noreply.github.com>
jkcomment <jkcomment@gmail.com>
JoeGruffins <34998433+JoeGruffins@users.noreply.github.com>
Joel Burget <joelburget@gmail.com>
John C. Vernaleo <john@netpurgatory.com>
John Difool <johndifoolpi@gmail.com>
Johns Beharry <johns@peakshift.com>
Jonas <felberj@users.noreply.github.com>
Jonathan Brown <jbrown@bluedroplet.com>
Jonathan Chappelow <chappjc@users.noreply.github.com>
Jonathan Gimeno <jgimeno@gmail.com>
JoranHonig <JoranHonig@users.noreply.github.com>
Jordan Krage <jmank88@gmail.com>
Jorropo <jorropo.pgm@gmail.com>
Joseph Chow <ethereum@outlook.com>
Joshua Colvin <jcolvin@offchainlabs.com>
Joshua Gutow <jbgutow@gmail.com>
jovijovi <mageyul@hotmail.com>
jtakalai <juuso.takalainen@streamr.com>
JU HYEONG PARK <dkdkajej@gmail.com>
Julian Y <jyap808@users.noreply.github.com>
Justin Clark-Casey <justincc@justincc.org>
Justin Drake <drakefjustin@gmail.com>
Justus <jus@gtsbr.org>
Kawashima <91420903+sscodereth@users.noreply.github.com>
ken10100147 <sunhongping@kanjian.com>
Kenji Siu <kenji@isuntv.com>
Kenso Trabing <ktrabing@acm.org>
Kevin <denk.kevin@web.de>
kevin.xu <cming.xu@gmail.com>
KibGzr <kibgzr@gmail.com>
kiel barry <kiel.j.barry@gmail.com>
kilic <onurkilic1004@gmail.com>
kimmylin <30611210+kimmylin@users.noreply.github.com>
Kitten King <53072918+kittenking@users.noreply.github.com>
knarfeh <hejun1874@gmail.com>
Kobi Gurkan <kobigurk@gmail.com>
komika <komika@komika.org>
Konrad Feldmeier <konrad@brainbot.com>
Kris Shinn <raggamuffin.music@gmail.com>
Kristofer Peterson <svenski123@users.noreply.github.com>
Kumar Anirudha <mail@anirudha.dev>
Kurkó Mihály <kurkomisi@users.noreply.github.com>
Kushagra Sharma <ksharm01@gmail.com>
Kwuaint <34888408+kwuaint@users.noreply.github.com>
Kyuntae Ethan Kim <ethan.kyuntae.kim@gmail.com>
Lee Bousfield <ljbousfield@gmail.com>
Lefteris Karapetsas <lefteris@refu.co>
Leif Jurvetson <leijurv@gmail.com>
Leo Shklovskii <leo@thermopylae.net>
LeoLiao <leofantast@gmail.com>
Lewis Marshall <lewis@lmars.net>
lhendre <lhendre2@gmail.com>
Li Dongwei <lidw1988@126.com>
Liang Ma <liangma@liangbit.com>
Liang ZOU <liang.d.zou@gmail.com>
libby kent <viskovitzzz@gmail.com>
libotony <liboliqi@gmail.com>
LieutenantRoger <dijsky_2015@hotmail.com>
ligi <ligi@ligi.de>
Lio李欧 <lionello@users.noreply.github.com>
lmittmann <lmittmann@users.noreply.github.com>
Lorenzo Manacorda <lorenzo@kinvolk.io>
Louis Holbrook <dev@holbrook.no>
Luca Zeug <luclu@users.noreply.github.com>
Lucas Hendren <lhendre2@gmail.com>
lzhfromustc <43191155+lzhfromustc@users.noreply.github.com>
Magicking <s@6120.eu>
manlio <manlio.poltronieri@gmail.com>
Maran Hidskes <maran.hidskes@gmail.com>
Marek Kotewicz <marek.kotewicz@gmail.com>
Mariano Cortesi <mcortesi@gmail.com>
Marius van der Wijden <m.vanderwijden@live.de>
Mark <markya0616@gmail.com>
Mark Rushakoff <mark.rushakoff@gmail.com>
mark.lin <mark@maicoin.com>
Martin Alex Philip Dawson <u1356770@gmail.com>
Martin Holst Swende <martin@swende.se>
Martin Klepsch <martinklepsch@googlemail.com>
Martin Lundfall <martin.lundfall@protonmail.com>
Martin Michlmayr <tbm@cyrius.com>
Martin Redmond <21436+reds@users.noreply.github.com>
Mason Fischer <mason@kissr.co>
Mateusz Morusiewicz <11313015+Ruteri@users.noreply.github.com>
Mats Julian Olsen <mats@plysjbyen.net>
Matt Garnett <14004106+lightclient@users.noreply.github.com>
Matt K <1036969+mkrump@users.noreply.github.com>
Matthew Di Ferrante <mattdf@users.noreply.github.com>
Matthew Halpern <matthalp@gmail.com>
Matthew Wampler-Doty <matthew.wampler.doty@gmail.com>
Max Sistemich <mafrasi2@googlemail.com>
Maxim Zhiburt <zhiburt@gmail.com>
Maximilian Meister <mmeister@suse.de>
me020523 <me020523@gmail.com>
Melvin Junhee Woo <melvin.woo@groundx.xyz>
meowsbits <b5c6@protonmail.com>
Micah Zoltu <micah@zoltu.net>
Michael Forney <mforney@mforney.org>
Michael Riabzev <michael@starkware.co>
Michael Ruminer <michael.ruminer+github@gmail.com>
michael1011 <me@michael1011.at>
Miguel Mota <miguelmota2@gmail.com>
Mike Burr <mburr@nightmare.com>
Mikhail Mikheev <mmvsha73@gmail.com>
milesvant <milesvant@gmail.com>
Miro <mirokuratczyk@users.noreply.github.com>
Miya Chen <miyatlchen@gmail.com>
Mohanson <mohanson@outlook.com>
mr_franklin <mr_franklin@126.com>
Mudit Gupta <guptamudit@ymail.com>
Mymskmkt <1847234666@qq.com>
Nalin Bhardwaj <nalinbhardwaj@nibnalin.me>
Natsu Kagami <natsukagami@gmail.com>
Nchinda Nchinda <nchinda2@gmail.com>
nebojsa94 <nebojsa94@users.noreply.github.com>
necaremus <necaremus@gmail.com>
nedifi <103940716+nedifi@users.noreply.github.com>
needkane <604476380@qq.com>
Nguyen Kien Trung <trung.n.k@gmail.com>
Nguyen Sy Thanh Son <thanhson1085@gmail.com>
Nic Jansma <nic@nicj.net>
Nick Dodson <silentcicero@outlook.com>
Nick Johnson <arachnid@notdot.net>
Nicolas Feignon <nfeignon@gmail.com>
Nicolas Guillaume <gunicolas@sqli.com>
Nikita Kozhemyakin <enginegl.ec@gmail.com>
Nikola Madjarevic <nikola.madjarevic@gmail.com>
Nilesh Trivedi <nilesh@hypertrack.io>
Nimrod Gutman <nimrod.gutman@gmail.com>
Nishant Das <nishdas93@gmail.com>
njupt-moon <1015041018@njupt.edu.cn>
nkbai <nkbai@163.com>
noam-alchemy <76969113+noam-alchemy@users.noreply.github.com>
nobody <ddean2009@163.com>
Noman <noman@noman.land>
nujabes403 <nujabes403@gmail.com>
Nye Liu <nyet@nyet.org>
Oleg Kovalov <iamolegkovalov@gmail.com>
Oli Bye <olibye@users.noreply.github.com>
Oliver Tale-Yazdi <oliver@perun.network>
Olivier Hervieu <olivier.hervieu@gmail.com>
Or Neeman <oneeman@gmail.com>
Osoro Bironga <fanosoro@gmail.com>
Osuke <arget-fee.free.dgm@hotmail.co.jp>
Pantelis Peslis <pespantelis@gmail.com>
Pascal Dierich <pascal@merkleplant.xyz>
Patrick O'Grady <prohb125@gmail.com>
Pau <pau@dabax.net>
Paul Berg <hello@paulrberg.com>
Paul Litvak <litvakpol@012.net.il>
Paul-Armand Verhaegen <paularmand.verhaegen@gmail.com>
Paulo L F Casaretto <pcasaretto@gmail.com>
Paweł Bylica <chfast@gmail.com>
Pedro Gomes <otherview@gmail.com>
Pedro Pombeiro <PombeirP@users.noreply.github.com>
Peter Broadhurst <peter@themumbles.net>
peter cresswell <pcresswell@gmail.com>
Peter Pratscher <pratscher@gmail.com>
Peter Simard <petesimard56@gmail.com>
Petr Mikusek <petr@mikusek.info>
Philip Schlump <pschlump@gmail.com>
Pierre Neter <pierreneter@gmail.com>
Pierre R <p.rousset@gmail.com>
piersy <pierspowlesland@gmail.com>
PilkyuJung <anothel1@naver.com>
Piotr Dyraga <piotr.dyraga@keep.network>
ploui <64719999+ploui@users.noreply.github.com>
Preston Van Loon <preston@prysmaticlabs.com>
Prince Sinha <sinhaprince013@gmail.com>
Péter Szilágyi <peterke@gmail.com>
qd-ethan <31876119+qdgogogo@users.noreply.github.com>
Qian Bin <cola.tin.com@gmail.com>
Quest Henkart <qhenkart@gmail.com>
Rachel Franks <nfranks@protonmail.com>
Rafael Matias <rafael@skyle.net>
Raghav Sood <raghavsood@gmail.com>
Ralph Caraveo <deckarep@gmail.com>
Ramesh Nair <ram@hiddentao.com>
rangzen <public@l-homme.com>
reinerRubin <tolstov.georgij@gmail.com>
Rene Lubov <41963722+renaynay@users.noreply.github.com>
rhaps107 <dod-source@yandex.ru>
Ricardo Catalinas Jiménez <r@untroubled.be>
Ricardo Domingos <ricardohsd@gmail.com>
Richard Hart <richardhart92@gmail.com>
Rick <rick.no@groundx.xyz>
RJ Catalano <catalanor0220@gmail.com>
Rob <robert@rojotek.com>
Rob Mulholand <rmulholand@8thlight.com>
Robert Zaremba <robert@zaremba.ch>
Roc Yu <rociiu0112@gmail.com>
Roman Mazalov <83914728+gopherxyz@users.noreply.github.com>
Ross <9055337+Chadsr@users.noreply.github.com>
Runchao Han <elvisage941102@gmail.com>
Russ Cox <rsc@golang.org>
Ryan Schneider <ryanleeschneider@gmail.com>
ryanc414 <ryan@tokencard.io>
Rémy Roy <remyroy@remyroy.com>
S. Matthew English <s-matthew-english@users.noreply.github.com>
salanfe <salanfe@users.noreply.github.com>
Sam <39165351+Xia-Sam@users.noreply.github.com>
Sammy Libre <7374093+sammy007@users.noreply.github.com>
Samuel Marks <samuelmarks@gmail.com>
sanskarkhare <sanskarkhare47@gmail.com>
Sarlor <kinsleer@outlook.com>
Sasuke1964 <neilperry1964@gmail.com>
Satpal <28562234+SatpalSandhu61@users.noreply.github.com>
Saulius Grigaitis <saulius@necolt.com>
Sean <darcys22@gmail.com>
Serhat Şevki Dinçer <jfcgauss@gmail.com>
Shane Bammel <sjb933@gmail.com>
shawn <36943337+lxex@users.noreply.github.com>
shigeyuki azuchi <azuchi@chaintope.com>
Shihao Xia <charlesxsh@hotmail.com>
Shiming <codingmylife@gmail.com>
Shintaro Kaneko <kaneshin0120@gmail.com>
shiqinfeng1 <150627601@qq.com>
Shuai Qi <qishuai231@gmail.com>
Shude Li <islishude@gmail.com>
Shunsuke Watanabe <ww.shunsuke@gmail.com>
silence <wangsai.silence@qq.com>
Simon Jentzsch <simon@slock.it>
Sina Mahmoodi <1591639+s1na@users.noreply.github.com>
sixdays <lj491685571@126.com>
SjonHortensius <SjonHortensius@users.noreply.github.com>
Slava Karpenko <slavikus@gmail.com>
slumber1122 <slumber1122@gmail.com>
Smilenator <yurivanenko@yandex.ru>
soc1c <soc1c@users.noreply.github.com>
Sorin Neacsu <sorin.neacsu@gmail.com>
Sparty <vignesh.crysis@gmail.com>
Stein Dekker <dekker.stein@gmail.com>
Steve Gattuso <steve@stevegattuso.me>
Steve Ruckdashel <steve.ruckdashel@gmail.com>
Steve Waldman <swaldman@mchange.com>
Steven E. Harris <seh@panix.com>
Steven Roose <stevenroose@gmail.com>
stompesi <stompesi@gmail.com>
stormpang <jialinpeng@vip.qq.com>
sunxiaojun2014 <sunxiaojun-xy@360.cn>
Suriyaa Sundararuban <isc.suriyaa@gmail.com>
Sylvain Laurent <s@6120.eu>
Taeik Lim <sibera21@gmail.com>
tamirms <tamir@trello.com>
Tangui Clairet <tangui.clairet@gmail.com>
Tatsuya Shimoda <tacoo@users.noreply.github.com>
Taylor Gerring <taylor.gerring@gmail.com>
TColl <38299499+TColl@users.noreply.github.com>
terasum <terasum@163.com>
tgyKomgo <52910426+tgyKomgo@users.noreply.github.com>
Thad Guidry <thadguidry@gmail.com>
Thomas Bocek <tom@tomp2p.net>
thomasmodeneis <thomas.modeneis@gmail.com>
thumb8432 <thumb8432@gmail.com>
Ti Zhou <tizhou1986@gmail.com>
tia-99 <67107070+tia-99@users.noreply.github.com>
Tim Cooijmans <timcooijmans@gmail.com>
Tobias Hildebrandt <79341166+tobias-hildebrandt@users.noreply.github.com>
Tosh Camille <tochecamille@gmail.com>
tsarpaul <Litvakpol@012.net.il>
Tyler Chambers <2775339+tylerchambers@users.noreply.github.com>
tzapu <alex@tzapu.com>
ucwong <ucwong@126.com>
uji <49834542+uji@users.noreply.github.com>
ult-bobonovski <alex@ultiledger.io>
Valentin Trinqué <ValentinTrinque@users.noreply.github.com>
Valentin Wüstholz <wuestholz@gmail.com>
Vedhavyas Singareddi <vedhavyas.singareddi@gmail.com>
Victor Farazdagi <simple.square@gmail.com>
Victor Tran <vu.tran54@gmail.com>
Vie <yangchenzhong@gmail.com>
Viktor Trón <viktor.tron@gmail.com>
Ville Sundell <github@solarius.fi>
vim88 <vim88vim88@gmail.com>
Vincent G <caktux@gmail.com>
Vincent Serpoul <vincent@serpoul.com>
Vinod Damle <vdamle@users.noreply.github.com>
Vitalik Buterin <v@buterin.com>
Vitaly Bogdanov <vsbogd@gmail.com>
Vitaly V <vvelikodny@gmail.com>
Vivek Anand <vivekanand1101@users.noreply.github.com>
Vlad Bokov <razum2um@mail.ru>
Vlad Gluhovsky <gluk256@gmail.com>
Ward Bradt <wardbradt5@gmail.com>
Water <44689567+codeoneline@users.noreply.github.com>
wbt <wbt@users.noreply.github.com>
weimumu <934657014@qq.com>
Wenbiao Zheng <delweng@gmail.com>
Wenshao Zhong <wzhong20@uic.edu>
Will Villanueva <hello@willvillanueva.com>
William Morriss <wjmelements@gmail.com>
William Setzer <bootstrapsetzer@gmail.com>
williambannas <wrschwartz@wpi.edu>
wuff1996 <33193253+wuff1996@users.noreply.github.com>
Wuxiang <wuxiangzhou2010@gmail.com>
Xiaobing Jiang <s7v7nislands@gmail.com>
xiekeyang <xiekeyang@users.noreply.github.com>
xincaosu <xincaosu@126.com>
xinluyin <31590468+xinluyin@users.noreply.github.com>
Xudong Liu <33193253+r1cs@users.noreply.github.com>
xwjack <XWJACK@users.noreply.github.com>
yahtoo <yahtoo.ma@gmail.com>
Yang Hau <vulxj0j8j8@gmail.com>
YaoZengzeng <yaozengzeng@zju.edu.cn>
YH-Zhou <yanhong.zhou05@gmail.com>
Yihau Chen <a122092487@gmail.com>
Yohann Léon <sybiload@gmail.com>
Yoichi Hirai <i@yoichihirai.com>
Yole <007yuyue@gmail.com>
Yondon Fu <yondon.fu@gmail.com>
YOSHIDA Masanori <masanori.yoshida@gmail.com>
yoza <yoza.is12s@gmail.com>
yumiel yoomee1313 <yumiel.ko@groundx.xyz>
Yusup <awklsgrep@gmail.com>
yutianwu <wzxingbupt@gmail.com>
ywzqwwt <39263032+ywzqwwt@users.noreply.github.com>
zaccoding <zaccoding725@gmail.com>
Zach <zach.ramsay@gmail.com>
Zachinquarantine <Zachinquarantine@protonmail.com>
zah <zahary@gmail.com>
Zahoor Mohamed <zahoor@zahoor.in>
Zak Cole <zak@beattiecole.com>
zcheng9 <zcheng9@hawk.iit.edu>
zer0to0ne <36526113+zer0to0ne@users.noreply.github.com>
zgfzgf <48779939+zgfzgf@users.noreply.github.com>
Zhang Zhuo <mycinbrin@gmail.com>
zhangsoledad <787953403@qq.com>
zhaochonghe <41711151+zhaochonghe@users.noreply.github.com>
Zhenguo Niu <Niu.ZGlinux@gmail.com>
zhiqiangxu <652732310@qq.com>
Zhou Zhiyao <ZHOU0250@e.ntu.edu.sg>
Ziyuan Zhong <zzy.albert@163.com>
Zoe Nolan <github@zoenolan.org>
Zou Guangxian <zouguangxian@gmail.com>
Zsolt Felföldi <zsfelfoldi@gmail.com>
Łukasz Kurowski <crackcomm@users.noreply.github.com>
Łukasz Zimnoch <lukaszzimnoch1994@gmail.com>
ΞTHΞЯSPHΞЯΞ <{viktor.tron,nagydani,zsfelfoldi}@gmail.com>
Максим Чусовлянов <mchusovlianov@gmail.com>
大彬 <hz_stb@163.com>
沉风 <myself659@users.noreply.github.com>
贺鹏飞 <hpf@hackerful.cn>
陈佳 <chenjiablog@gmail.com>
유용환 <33824408+eric-yoo@users.noreply.github.com>

674
vendor/github.com/ethereum/go-ethereum/COPYING generated vendored Normal file
View File

@@ -0,0 +1,674 @@
GNU GENERAL PUBLIC LICENSE
Version 3, 29 June 2007
Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
Everyone is permitted to copy and distribute verbatim copies
of this license document, but changing it is not allowed.
Preamble
The GNU General Public License is a free, copyleft license for
software and other kinds of works.
The licenses for most software and other practical works are designed
to take away your freedom to share and change the works. By contrast,
the GNU General Public License is intended to guarantee your freedom to
share and change all versions of a program--to make sure it remains free
software for all its users. We, the Free Software Foundation, use the
GNU General Public License for most of our software; it applies also to
any other work released this way by its authors. You can apply it to
your programs, too.
When we speak of free software, we are referring to freedom, not
price. Our General Public Licenses are designed to make sure that you
have the freedom to distribute copies of free software (and charge for
them if you wish), that you receive source code or can get it if you
want it, that you can change the software or use pieces of it in new
free programs, and that you know you can do these things.
To protect your rights, we need to prevent others from denying you
these rights or asking you to surrender the rights. Therefore, you have
certain responsibilities if you distribute copies of the software, or if
you modify it: responsibilities to respect the freedom of others.
For example, if you distribute copies of such a program, whether
gratis or for a fee, you must pass on to the recipients the same
freedoms that you received. You must make sure that they, too, receive
or can get the source code. And you must show them these terms so they
know their rights.
Developers that use the GNU GPL protect your rights with two steps:
(1) assert copyright on the software, and (2) offer you this License
giving you legal permission to copy, distribute and/or modify it.
For the developers' and authors' protection, the GPL clearly explains
that there is no warranty for this free software. For both users' and
authors' sake, the GPL requires that modified versions be marked as
changed, so that their problems will not be attributed erroneously to
authors of previous versions.
Some devices are designed to deny users access to install or run
modified versions of the software inside them, although the manufacturer
can do so. This is fundamentally incompatible with the aim of
protecting users' freedom to change the software. The systematic
pattern of such abuse occurs in the area of products for individuals to
use, which is precisely where it is most unacceptable. Therefore, we
have designed this version of the GPL to prohibit the practice for those
products. If such problems arise substantially in other domains, we
stand ready to extend this provision to those domains in future versions
of the GPL, as needed to protect the freedom of users.
Finally, every program is threatened constantly by software patents.
States should not allow patents to restrict development and use of
software on general-purpose computers, but in those that do, we wish to
avoid the special danger that patents applied to a free program could
make it effectively proprietary. To prevent this, the GPL assures that
patents cannot be used to render the program non-free.
The precise terms and conditions for copying, distribution and
modification follow.
TERMS AND CONDITIONS
0. Definitions.
"This License" refers to version 3 of the GNU General Public License.
"Copyright" also means copyright-like laws that apply to other kinds of
works, such as semiconductor masks.
"The Program" refers to any copyrightable work licensed under this
License. Each licensee is addressed as "you". "Licensees" and
"recipients" may be individuals or organizations.
To "modify" a work means to copy from or adapt all or part of the work
in a fashion requiring copyright permission, other than the making of an
exact copy. The resulting work is called a "modified version" of the
earlier work or a work "based on" the earlier work.
A "covered work" means either the unmodified Program or a work based
on the Program.
To "propagate" a work means to do anything with it that, without
permission, would make you directly or secondarily liable for
infringement under applicable copyright law, except executing it on a
computer or modifying a private copy. Propagation includes copying,
distribution (with or without modification), making available to the
public, and in some countries other activities as well.
To "convey" a work means any kind of propagation that enables other
parties to make or receive copies. Mere interaction with a user through
a computer network, with no transfer of a copy, is not conveying.
An interactive user interface displays "Appropriate Legal Notices"
to the extent that it includes a convenient and prominently visible
feature that (1) displays an appropriate copyright notice, and (2)
tells the user that there is no warranty for the work (except to the
extent that warranties are provided), that licensees may convey the
work under this License, and how to view a copy of this License. If
the interface presents a list of user commands or options, such as a
menu, a prominent item in the list meets this criterion.
1. Source Code.
The "source code" for a work means the preferred form of the work
for making modifications to it. "Object code" means any non-source
form of a work.
A "Standard Interface" means an interface that either is an official
standard defined by a recognized standards body, or, in the case of
interfaces specified for a particular programming language, one that
is widely used among developers working in that language.
The "System Libraries" of an executable work include anything, other
than the work as a whole, that (a) is included in the normal form of
packaging a Major Component, but which is not part of that Major
Component, and (b) serves only to enable use of the work with that
Major Component, or to implement a Standard Interface for which an
implementation is available to the public in source code form. A
"Major Component", in this context, means a major essential component
(kernel, window system, and so on) of the specific operating system
(if any) on which the executable work runs, or a compiler used to
produce the work, or an object code interpreter used to run it.
The "Corresponding Source" for a work in object code form means all
the source code needed to generate, install, and (for an executable
work) run the object code and to modify the work, including scripts to
control those activities. However, it does not include the work's
System Libraries, or general-purpose tools or generally available free
programs which are used unmodified in performing those activities but
which are not part of the work. For example, Corresponding Source
includes interface definition files associated with source files for
the work, and the source code for shared libraries and dynamically
linked subprograms that the work is specifically designed to require,
such as by intimate data communication or control flow between those
subprograms and other parts of the work.
The Corresponding Source need not include anything that users
can regenerate automatically from other parts of the Corresponding
Source.
The Corresponding Source for a work in source code form is that
same work.
2. Basic Permissions.
All rights granted under this License are granted for the term of
copyright on the Program, and are irrevocable provided the stated
conditions are met. This License explicitly affirms your unlimited
permission to run the unmodified Program. The output from running a
covered work is covered by this License only if the output, given its
content, constitutes a covered work. This License acknowledges your
rights of fair use or other equivalent, as provided by copyright law.
You may make, run and propagate covered works that you do not
convey, without conditions so long as your license otherwise remains
in force. You may convey covered works to others for the sole purpose
of having them make modifications exclusively for you, or provide you
with facilities for running those works, provided that you comply with
the terms of this License in conveying all material for which you do
not control copyright. Those thus making or running the covered works
for you must do so exclusively on your behalf, under your direction
and control, on terms that prohibit them from making any copies of
your copyrighted material outside their relationship with you.
Conveying under any other circumstances is permitted solely under
the conditions stated below. Sublicensing is not allowed; section 10
makes it unnecessary.
3. Protecting Users' Legal Rights From Anti-Circumvention Law.
No covered work shall be deemed part of an effective technological
measure under any applicable law fulfilling obligations under article
11 of the WIPO copyright treaty adopted on 20 December 1996, or
similar laws prohibiting or restricting circumvention of such
measures.
When you convey a covered work, you waive any legal power to forbid
circumvention of technological measures to the extent such circumvention
is effected by exercising rights under this License with respect to
the covered work, and you disclaim any intention to limit operation or
modification of the work as a means of enforcing, against the work's
users, your or third parties' legal rights to forbid circumvention of
technological measures.
4. Conveying Verbatim Copies.
You may convey verbatim copies of the Program's source code as you
receive it, in any medium, provided that you conspicuously and
appropriately publish on each copy an appropriate copyright notice;
keep intact all notices stating that this License and any
non-permissive terms added in accord with section 7 apply to the code;
keep intact all notices of the absence of any warranty; and give all
recipients a copy of this License along with the Program.
You may charge any price or no price for each copy that you convey,
and you may offer support or warranty protection for a fee.
5. Conveying Modified Source Versions.
You may convey a work based on the Program, or the modifications to
produce it from the Program, in the form of source code under the
terms of section 4, provided that you also meet all of these conditions:
a) The work must carry prominent notices stating that you modified
it, and giving a relevant date.
b) The work must carry prominent notices stating that it is
released under this License and any conditions added under section
7. This requirement modifies the requirement in section 4 to
"keep intact all notices".
c) You must license the entire work, as a whole, under this
License to anyone who comes into possession of a copy. This
License will therefore apply, along with any applicable section 7
additional terms, to the whole of the work, and all its parts,
regardless of how they are packaged. This License gives no
permission to license the work in any other way, but it does not
invalidate such permission if you have separately received it.
d) If the work has interactive user interfaces, each must display
Appropriate Legal Notices; however, if the Program has interactive
interfaces that do not display Appropriate Legal Notices, your
work need not make them do so.
A compilation of a covered work with other separate and independent
works, which are not by their nature extensions of the covered work,
and which are not combined with it such as to form a larger program,
in or on a volume of a storage or distribution medium, is called an
"aggregate" if the compilation and its resulting copyright are not
used to limit the access or legal rights of the compilation's users
beyond what the individual works permit. Inclusion of a covered work
in an aggregate does not cause this License to apply to the other
parts of the aggregate.
6. Conveying Non-Source Forms.
You may convey a covered work in object code form under the terms
of sections 4 and 5, provided that you also convey the
machine-readable Corresponding Source under the terms of this License,
in one of these ways:
a) Convey the object code in, or embodied in, a physical product
(including a physical distribution medium), accompanied by the
Corresponding Source fixed on a durable physical medium
customarily used for software interchange.
b) Convey the object code in, or embodied in, a physical product
(including a physical distribution medium), accompanied by a
written offer, valid for at least three years and valid for as
long as you offer spare parts or customer support for that product
model, to give anyone who possesses the object code either (1) a
copy of the Corresponding Source for all the software in the
product that is covered by this License, on a durable physical
medium customarily used for software interchange, for a price no
more than your reasonable cost of physically performing this
conveying of source, or (2) access to copy the
Corresponding Source from a network server at no charge.
c) Convey individual copies of the object code with a copy of the
written offer to provide the Corresponding Source. This
alternative is allowed only occasionally and noncommercially, and
only if you received the object code with such an offer, in accord
with subsection 6b.
d) Convey the object code by offering access from a designated
place (gratis or for a charge), and offer equivalent access to the
Corresponding Source in the same way through the same place at no
further charge. You need not require recipients to copy the
Corresponding Source along with the object code. If the place to
copy the object code is a network server, the Corresponding Source
may be on a different server (operated by you or a third party)
that supports equivalent copying facilities, provided you maintain
clear directions next to the object code saying where to find the
Corresponding Source. Regardless of what server hosts the
Corresponding Source, you remain obligated to ensure that it is
available for as long as needed to satisfy these requirements.
e) Convey the object code using peer-to-peer transmission, provided
you inform other peers where the object code and Corresponding
Source of the work are being offered to the general public at no
charge under subsection 6d.
A separable portion of the object code, whose source code is excluded
from the Corresponding Source as a System Library, need not be
included in conveying the object code work.
A "User Product" is either (1) a "consumer product", which means any
tangible personal property which is normally used for personal, family,
or household purposes, or (2) anything designed or sold for incorporation
into a dwelling. In determining whether a product is a consumer product,
doubtful cases shall be resolved in favor of coverage. For a particular
product received by a particular user, "normally used" refers to a
typical or common use of that class of product, regardless of the status
of the particular user or of the way in which the particular user
actually uses, or expects or is expected to use, the product. A product
is a consumer product regardless of whether the product has substantial
commercial, industrial or non-consumer uses, unless such uses represent
the only significant mode of use of the product.
"Installation Information" for a User Product means any methods,
procedures, authorization keys, or other information required to install
and execute modified versions of a covered work in that User Product from
a modified version of its Corresponding Source. The information must
suffice to ensure that the continued functioning of the modified object
code is in no case prevented or interfered with solely because
modification has been made.
If you convey an object code work under this section in, or with, or
specifically for use in, a User Product, and the conveying occurs as
part of a transaction in which the right of possession and use of the
User Product is transferred to the recipient in perpetuity or for a
fixed term (regardless of how the transaction is characterized), the
Corresponding Source conveyed under this section must be accompanied
by the Installation Information. But this requirement does not apply
if neither you nor any third party retains the ability to install
modified object code on the User Product (for example, the work has
been installed in ROM).
The requirement to provide Installation Information does not include a
requirement to continue to provide support service, warranty, or updates
for a work that has been modified or installed by the recipient, or for
the User Product in which it has been modified or installed. Access to a
network may be denied when the modification itself materially and
adversely affects the operation of the network or violates the rules and
protocols for communication across the network.
Corresponding Source conveyed, and Installation Information provided,
in accord with this section must be in a format that is publicly
documented (and with an implementation available to the public in
source code form), and must require no special password or key for
unpacking, reading or copying.
7. Additional Terms.
"Additional permissions" are terms that supplement the terms of this
License by making exceptions from one or more of its conditions.
Additional permissions that are applicable to the entire Program shall
be treated as though they were included in this License, to the extent
that they are valid under applicable law. If additional permissions
apply only to part of the Program, that part may be used separately
under those permissions, but the entire Program remains governed by
this License without regard to the additional permissions.
When you convey a copy of a covered work, you may at your option
remove any additional permissions from that copy, or from any part of
it. (Additional permissions may be written to require their own
removal in certain cases when you modify the work.) You may place
additional permissions on material, added by you to a covered work,
for which you have or can give appropriate copyright permission.
Notwithstanding any other provision of this License, for material you
add to a covered work, you may (if authorized by the copyright holders of
that material) supplement the terms of this License with terms:
a) Disclaiming warranty or limiting liability differently from the
terms of sections 15 and 16 of this License; or
b) Requiring preservation of specified reasonable legal notices or
author attributions in that material or in the Appropriate Legal
Notices displayed by works containing it; or
c) Prohibiting misrepresentation of the origin of that material, or
requiring that modified versions of such material be marked in
reasonable ways as different from the original version; or
d) Limiting the use for publicity purposes of names of licensors or
authors of the material; or
e) Declining to grant rights under trademark law for use of some
trade names, trademarks, or service marks; or
f) Requiring indemnification of licensors and authors of that
material by anyone who conveys the material (or modified versions of
it) with contractual assumptions of liability to the recipient, for
any liability that these contractual assumptions directly impose on
those licensors and authors.
All other non-permissive additional terms are considered "further
restrictions" within the meaning of section 10. If the Program as you
received it, or any part of it, contains a notice stating that it is
governed by this License along with a term that is a further
restriction, you may remove that term. If a license document contains
a further restriction but permits relicensing or conveying under this
License, you may add to a covered work material governed by the terms
of that license document, provided that the further restriction does
not survive such relicensing or conveying.
If you add terms to a covered work in accord with this section, you
must place, in the relevant source files, a statement of the
additional terms that apply to those files, or a notice indicating
where to find the applicable terms.
Additional terms, permissive or non-permissive, may be stated in the
form of a separately written license, or stated as exceptions;
the above requirements apply either way.
8. Termination.
You may not propagate or modify a covered work except as expressly
provided under this License. Any attempt otherwise to propagate or
modify it is void, and will automatically terminate your rights under
this License (including any patent licenses granted under the third
paragraph of section 11).
However, if you cease all violation of this License, then your
license from a particular copyright holder is reinstated (a)
provisionally, unless and until the copyright holder explicitly and
finally terminates your license, and (b) permanently, if the copyright
holder fails to notify you of the violation by some reasonable means
prior to 60 days after the cessation.
Moreover, your license from a particular copyright holder is
reinstated permanently if the copyright holder notifies you of the
violation by some reasonable means, this is the first time you have
received notice of violation of this License (for any work) from that
copyright holder, and you cure the violation prior to 30 days after
your receipt of the notice.
Termination of your rights under this section does not terminate the
licenses of parties who have received copies or rights from you under
this License. If your rights have been terminated and not permanently
reinstated, you do not qualify to receive new licenses for the same
material under section 10.
9. Acceptance Not Required for Having Copies.
You are not required to accept this License in order to receive or
run a copy of the Program. Ancillary propagation of a covered work
occurring solely as a consequence of using peer-to-peer transmission
to receive a copy likewise does not require acceptance. However,
nothing other than this License grants you permission to propagate or
modify any covered work. These actions infringe copyright if you do
not accept this License. Therefore, by modifying or propagating a
covered work, you indicate your acceptance of this License to do so.
10. Automatic Licensing of Downstream Recipients.
Each time you convey a covered work, the recipient automatically
receives a license from the original licensors, to run, modify and
propagate that work, subject to this License. You are not responsible
for enforcing compliance by third parties with this License.
An "entity transaction" is a transaction transferring control of an
organization, or substantially all assets of one, or subdividing an
organization, or merging organizations. If propagation of a covered
work results from an entity transaction, each party to that
transaction who receives a copy of the work also receives whatever
licenses to the work the party's predecessor in interest had or could
give under the previous paragraph, plus a right to possession of the
Corresponding Source of the work from the predecessor in interest, if
the predecessor has it or can get it with reasonable efforts.
You may not impose any further restrictions on the exercise of the
rights granted or affirmed under this License. For example, you may
not impose a license fee, royalty, or other charge for exercise of
rights granted under this License, and you may not initiate litigation
(including a cross-claim or counterclaim in a lawsuit) alleging that
any patent claim is infringed by making, using, selling, offering for
sale, or importing the Program or any portion of it.
11. Patents.
A "contributor" is a copyright holder who authorizes use under this
License of the Program or a work on which the Program is based. The
work thus licensed is called the contributor's "contributor version".
A contributor's "essential patent claims" are all patent claims
owned or controlled by the contributor, whether already acquired or
hereafter acquired, that would be infringed by some manner, permitted
by this License, of making, using, or selling its contributor version,
but do not include claims that would be infringed only as a
consequence of further modification of the contributor version. For
purposes of this definition, "control" includes the right to grant
patent sublicenses in a manner consistent with the requirements of
this License.
Each contributor grants you a non-exclusive, worldwide, royalty-free
patent license under the contributor's essential patent claims, to
make, use, sell, offer for sale, import and otherwise run, modify and
propagate the contents of its contributor version.
In the following three paragraphs, a "patent license" is any express
agreement or commitment, however denominated, not to enforce a patent
(such as an express permission to practice a patent or covenant not to
sue for patent infringement). To "grant" such a patent license to a
party means to make such an agreement or commitment not to enforce a
patent against the party.
If you convey a covered work, knowingly relying on a patent license,
and the Corresponding Source of the work is not available for anyone
to copy, free of charge and under the terms of this License, through a
publicly available network server or other readily accessible means,
then you must either (1) cause the Corresponding Source to be so
available, or (2) arrange to deprive yourself of the benefit of the
patent license for this particular work, or (3) arrange, in a manner
consistent with the requirements of this License, to extend the patent
license to downstream recipients. "Knowingly relying" means you have
actual knowledge that, but for the patent license, your conveying the
covered work in a country, or your recipient's use of the covered work
in a country, would infringe one or more identifiable patents in that
country that you have reason to believe are valid.
If, pursuant to or in connection with a single transaction or
arrangement, you convey, or propagate by procuring conveyance of, a
covered work, and grant a patent license to some of the parties
receiving the covered work authorizing them to use, propagate, modify
or convey a specific copy of the covered work, then the patent license
you grant is automatically extended to all recipients of the covered
work and works based on it.
A patent license is "discriminatory" if it does not include within
the scope of its coverage, prohibits the exercise of, or is
conditioned on the non-exercise of one or more of the rights that are
specifically granted under this License. You may not convey a covered
work if you are a party to an arrangement with a third party that is
in the business of distributing software, under which you make payment
to the third party based on the extent of your activity of conveying
the work, and under which the third party grants, to any of the
parties who would receive the covered work from you, a discriminatory
patent license (a) in connection with copies of the covered work
conveyed by you (or copies made from those copies), or (b) primarily
for and in connection with specific products or compilations that
contain the covered work, unless you entered into that arrangement,
or that patent license was granted, prior to 28 March 2007.
Nothing in this License shall be construed as excluding or limiting
any implied license or other defenses to infringement that may
otherwise be available to you under applicable patent law.
12. No Surrender of Others' Freedom.
If conditions are imposed on you (whether by court order, agreement or
otherwise) that contradict the conditions of this License, they do not
excuse you from the conditions of this License. If you cannot convey a
covered work so as to satisfy simultaneously your obligations under this
License and any other pertinent obligations, then as a consequence you may
not convey it at all. For example, if you agree to terms that obligate you
to collect a royalty for further conveying from those to whom you convey
the Program, the only way you could satisfy both those terms and this
License would be to refrain entirely from conveying the Program.
13. Use with the GNU Affero General Public License.
Notwithstanding any other provision of this License, you have
permission to link or combine any covered work with a work licensed
under version 3 of the GNU Affero General Public License into a single
combined work, and to convey the resulting work. The terms of this
License will continue to apply to the part which is the covered work,
but the special requirements of the GNU Affero General Public License,
section 13, concerning interaction through a network will apply to the
combination as such.
14. Revised Versions of this License.
The Free Software Foundation may publish revised and/or new versions of
the GNU General Public License from time to time. Such new versions will
be similar in spirit to the present version, but may differ in detail to
address new problems or concerns.
Each version is given a distinguishing version number. If the
Program specifies that a certain numbered version of the GNU General
Public License "or any later version" applies to it, you have the
option of following the terms and conditions either of that numbered
version or of any later version published by the Free Software
Foundation. If the Program does not specify a version number of the
GNU General Public License, you may choose any version ever published
by the Free Software Foundation.
If the Program specifies that a proxy can decide which future
versions of the GNU General Public License can be used, that proxy's
public statement of acceptance of a version permanently authorizes you
to choose that version for the Program.
Later license versions may give you additional or different
permissions. However, no additional obligations are imposed on any
author or copyright holder as a result of your choosing to follow a
later version.
15. Disclaimer of Warranty.
THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
16. Limitation of Liability.
IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
SUCH DAMAGES.
17. Interpretation of Sections 15 and 16.
If the disclaimer of warranty and limitation of liability provided
above cannot be given local legal effect according to their terms,
reviewing courts shall apply local law that most closely approximates
an absolute waiver of all civil liability in connection with the
Program, unless a warranty or assumption of liability accompanies a
copy of the Program in return for a fee.
END OF TERMS AND CONDITIONS
How to Apply These Terms to Your New Programs
If you develop a new program, and you want it to be of the greatest
possible use to the public, the best way to achieve this is to make it
free software which everyone can redistribute and change under these terms.
To do so, attach the following notices to the program. It is safest
to attach them to the start of each source file to most effectively
state the exclusion of warranty; and each file should have at least
the "copyright" line and a pointer to where the full notice is found.
<one line to give the program's name and a brief idea of what it does.>
Copyright (C) <year> <name of author>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <https://www.gnu.org/licenses/>.
Also add information on how to contact you by electronic and paper mail.
If the program does terminal interaction, make it output a short
notice like this when it starts in an interactive mode:
<program> Copyright (C) <year> <name of author>
This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
This is free software, and you are welcome to redistribute it
under certain conditions; type `show c' for details.
The hypothetical commands `show w' and `show c' should show the appropriate
parts of the General Public License. Of course, your program's commands
might be different; for a GUI interface, you would use an "about box".
You should also get your employer (if you work as a programmer) or school,
if any, to sign a "copyright disclaimer" for the program, if necessary.
For more information on this, and how to apply and follow the GNU GPL, see
<https://www.gnu.org/licenses/>.
The GNU General Public License does not permit incorporating your program
into proprietary programs. If your program is a subroutine library, you
may consider it more useful to permit linking proprietary applications with
the library. If this is what you want to do, use the GNU Lesser General
Public License instead of this License. But first, please read
<https://www.gnu.org/licenses/why-not-lgpl.html>.

165
vendor/github.com/ethereum/go-ethereum/COPYING.LESSER generated vendored Normal file
View File

@@ -0,0 +1,165 @@
GNU LESSER GENERAL PUBLIC LICENSE
Version 3, 29 June 2007
Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/>
Everyone is permitted to copy and distribute verbatim copies
of this license document, but changing it is not allowed.
This version of the GNU Lesser General Public License incorporates
the terms and conditions of version 3 of the GNU General Public
License, supplemented by the additional permissions listed below.
0. Additional Definitions.
As used herein, "this License" refers to version 3 of the GNU Lesser
General Public License, and the "GNU GPL" refers to version 3 of the GNU
General Public License.
"The Library" refers to a covered work governed by this License,
other than an Application or a Combined Work as defined below.
An "Application" is any work that makes use of an interface provided
by the Library, but which is not otherwise based on the Library.
Defining a subclass of a class defined by the Library is deemed a mode
of using an interface provided by the Library.
A "Combined Work" is a work produced by combining or linking an
Application with the Library. The particular version of the Library
with which the Combined Work was made is also called the "Linked
Version".
The "Minimal Corresponding Source" for a Combined Work means the
Corresponding Source for the Combined Work, excluding any source code
for portions of the Combined Work that, considered in isolation, are
based on the Application, and not on the Linked Version.
The "Corresponding Application Code" for a Combined Work means the
object code and/or source code for the Application, including any data
and utility programs needed for reproducing the Combined Work from the
Application, but excluding the System Libraries of the Combined Work.
1. Exception to Section 3 of the GNU GPL.
You may convey a covered work under sections 3 and 4 of this License
without being bound by section 3 of the GNU GPL.
2. Conveying Modified Versions.
If you modify a copy of the Library, and, in your modifications, a
facility refers to a function or data to be supplied by an Application
that uses the facility (other than as an argument passed when the
facility is invoked), then you may convey a copy of the modified
version:
a) under this License, provided that you make a good faith effort to
ensure that, in the event an Application does not supply the
function or data, the facility still operates, and performs
whatever part of its purpose remains meaningful, or
b) under the GNU GPL, with none of the additional permissions of
this License applicable to that copy.
3. Object Code Incorporating Material from Library Header Files.
The object code form of an Application may incorporate material from
a header file that is part of the Library. You may convey such object
code under terms of your choice, provided that, if the incorporated
material is not limited to numerical parameters, data structure
layouts and accessors, or small macros, inline functions and templates
(ten or fewer lines in length), you do both of the following:
a) Give prominent notice with each copy of the object code that the
Library is used in it and that the Library and its use are
covered by this License.
b) Accompany the object code with a copy of the GNU GPL and this license
document.
4. Combined Works.
You may convey a Combined Work under terms of your choice that,
taken together, effectively do not restrict modification of the
portions of the Library contained in the Combined Work and reverse
engineering for debugging such modifications, if you also do each of
the following:
a) Give prominent notice with each copy of the Combined Work that
the Library is used in it and that the Library and its use are
covered by this License.
b) Accompany the Combined Work with a copy of the GNU GPL and this license
document.
c) For a Combined Work that displays copyright notices during
execution, include the copyright notice for the Library among
these notices, as well as a reference directing the user to the
copies of the GNU GPL and this license document.
d) Do one of the following:
0) Convey the Minimal Corresponding Source under the terms of this
License, and the Corresponding Application Code in a form
suitable for, and under terms that permit, the user to
recombine or relink the Application with a modified version of
the Linked Version to produce a modified Combined Work, in the
manner specified by section 6 of the GNU GPL for conveying
Corresponding Source.
1) Use a suitable shared library mechanism for linking with the
Library. A suitable mechanism is one that (a) uses at run time
a copy of the Library already present on the user's computer
system, and (b) will operate properly with a modified version
of the Library that is interface-compatible with the Linked
Version.
e) Provide Installation Information, but only if you would otherwise
be required to provide such information under section 6 of the
GNU GPL, and only to the extent that such information is
necessary to install and execute a modified version of the
Combined Work produced by recombining or relinking the
Application with a modified version of the Linked Version. (If
you use option 4d0, the Installation Information must accompany
the Minimal Corresponding Source and Corresponding Application
Code. If you use option 4d1, you must provide the Installation
Information in the manner specified by section 6 of the GNU GPL
for conveying Corresponding Source.)
5. Combined Libraries.
You may place library facilities that are a work based on the
Library side by side in a single library together with other library
facilities that are not Applications and are not covered by this
License, and convey such a combined library under terms of your
choice, if you do both of the following:
a) Accompany the combined library with a copy of the same work based
on the Library, uncombined with any other library facilities,
conveyed under the terms of this License.
b) Give prominent notice with the combined library that part of it
is a work based on the Library, and explaining where to find the
accompanying uncombined form of the same work.
6. Revised Versions of the GNU Lesser General Public License.
The Free Software Foundation may publish revised and/or new versions
of the GNU Lesser General Public License from time to time. Such new
versions will be similar in spirit to the present version, but may
differ in detail to address new problems or concerns.
Each version is given a distinguishing version number. If the
Library as you received it specifies that a certain numbered version
of the GNU Lesser General Public License "or any later version"
applies to it, you have the option of following the terms and
conditions either of that published version or of any later version
published by the Free Software Foundation. If the Library as you
received it does not specify a version number of the GNU Lesser
General Public License, you may choose any version of the GNU Lesser
General Public License ever published by the Free Software Foundation.
If the Library as you received it specifies that a proxy can decide
whether future versions of the GNU Lesser General Public License shall
apply, that proxy's public statement of acceptance of any version is
permanent authorization for you to choose that version for the
Library.

33
vendor/github.com/ethereum/go-ethereum/Dockerfile generated vendored Normal file
View File

@@ -0,0 +1,33 @@
# Support setting various labels on the final image
ARG COMMIT=""
ARG VERSION=""
ARG BUILDNUM=""
# Build Geth in a stock Go builder container
FROM golang:1.18-alpine as builder
RUN apk add --no-cache gcc musl-dev linux-headers git
# Get dependencies - will also be cached if we won't change go.mod/go.sum
COPY go.mod /go-ethereum/
COPY go.sum /go-ethereum/
RUN cd /go-ethereum && go mod download
ADD . /go-ethereum
RUN cd /go-ethereum && go run build/ci.go install -static ./cmd/geth
# Pull Geth into a second stage deploy alpine container
FROM alpine:latest
RUN apk add --no-cache ca-certificates
COPY --from=builder /go-ethereum/build/bin/geth /usr/local/bin/
EXPOSE 8545 8546 30303 30303/udp
ENTRYPOINT ["geth"]
# Add some metadata labels to help programatic image consumption
ARG COMMIT=""
ARG VERSION=""
ARG BUILDNUM=""
LABEL commit="$COMMIT" version="$VERSION" buildnum="$BUILDNUM"

View File

@@ -0,0 +1,32 @@
# Support setting various labels on the final image
ARG COMMIT=""
ARG VERSION=""
ARG BUILDNUM=""
# Build Geth in a stock Go builder container
FROM golang:1.18-alpine as builder
RUN apk add --no-cache gcc musl-dev linux-headers git
# Get dependencies - will also be cached if we won't change go.mod/go.sum
COPY go.mod /go-ethereum/
COPY go.sum /go-ethereum/
RUN cd /go-ethereum && go mod download
ADD . /go-ethereum
RUN cd /go-ethereum && go run build/ci.go install -static
# Pull all binaries into a second stage deploy alpine container
FROM alpine:latest
RUN apk add --no-cache ca-certificates
COPY --from=builder /go-ethereum/build/bin/* /usr/local/bin/
EXPOSE 8545 8546 30303 30303/udp
# Add some metadata labels to help programatic image consumption
ARG COMMIT=""
ARG VERSION=""
ARG BUILDNUM=""
LABEL commit="$COMMIT" version="$VERSION" buildnum="$BUILDNUM"

50
vendor/github.com/ethereum/go-ethereum/Makefile generated vendored Normal file
View File

@@ -0,0 +1,50 @@
# This Makefile is meant to be used by people that do not usually work
# with Go source code. If you know what GOPATH is then you probably
# don't need to bother with make.
.PHONY: geth android ios evm all test clean
GOBIN = ./build/bin
GO ?= latest
GORUN = env GO111MODULE=on go run
geth:
$(GORUN) build/ci.go install ./cmd/geth
@echo "Done building."
@echo "Run \"$(GOBIN)/geth\" to launch geth."
all:
$(GORUN) build/ci.go install
android:
$(GORUN) build/ci.go aar --local
@echo "Done building."
@echo "Import \"$(GOBIN)/geth.aar\" to use the library."
@echo "Import \"$(GOBIN)/geth-sources.jar\" to add javadocs"
@echo "For more info see https://stackoverflow.com/questions/20994336/android-studio-how-to-attach-javadoc"
ios:
$(GORUN) build/ci.go xcode --local
@echo "Done building."
@echo "Import \"$(GOBIN)/Geth.framework\" to use the library."
test: all
$(GORUN) build/ci.go test
lint: ## Run linters.
$(GORUN) build/ci.go lint
clean:
env GO111MODULE=on go clean -cache
rm -fr build/_workspace/pkg/ $(GOBIN)/*
# The devtools target installs tools required for 'go generate'.
# You need to put $GOBIN (or $GOPATH/bin) in your PATH to use 'go generate'.
devtools:
env GOBIN= go install golang.org/x/tools/cmd/stringer@latest
env GOBIN= go install github.com/fjl/gencodec@latest
env GOBIN= go install github.com/golang/protobuf/protoc-gen-go@latest
env GOBIN= go install ./cmd/abigen
@type "solc" 2> /dev/null || echo 'Please install solc'
@type "protoc" 2> /dev/null || echo 'Please install protoc'

379
vendor/github.com/ethereum/go-ethereum/README.md generated vendored Normal file
View File

@@ -0,0 +1,379 @@
## Go Ethereum
Official Golang execution layer implementation of the Ethereum protocol.
[![API Reference](
https://camo.githubusercontent.com/915b7be44ada53c290eb157634330494ebe3e30a/68747470733a2f2f676f646f632e6f72672f6769746875622e636f6d2f676f6c616e672f6764646f3f7374617475732e737667
)](https://pkg.go.dev/github.com/ethereum/go-ethereum?tab=doc)
[![Go Report Card](https://goreportcard.com/badge/github.com/ethereum/go-ethereum)](https://goreportcard.com/report/github.com/ethereum/go-ethereum)
[![Travis](https://travis-ci.com/ethereum/go-ethereum.svg?branch=master)](https://travis-ci.com/ethereum/go-ethereum)
[![Discord](https://img.shields.io/badge/discord-join%20chat-blue.svg)](https://discord.gg/nthXNEv)
Automated builds are available for stable releases and the unstable master branch. Binary
archives are published at https://geth.ethereum.org/downloads/.
## Building the source
For prerequisites and detailed build instructions please read the [Installation Instructions](https://geth.ethereum.org/docs/install-and-build/installing-geth).
Building `geth` requires both a Go (version 1.16 or later) and a C compiler. You can install
them using your favourite package manager. Once the dependencies are installed, run
```shell
make geth
```
or, to build the full suite of utilities:
```shell
make all
```
## Executables
The go-ethereum project comes with several wrappers/executables found in the `cmd`
directory.
| Command | Description |
| :-----------: | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| **`geth`** | Our main Ethereum CLI client. It is the entry point into the Ethereum network (main-, test- or private net), capable of running as a full node (default), archive node (retaining all historical state) or a light node (retrieving data live). It can be used by other processes as a gateway into the Ethereum network via JSON RPC endpoints exposed on top of HTTP, WebSocket and/or IPC transports. `geth --help` and the [CLI page](https://geth.ethereum.org/docs/interface/command-line-options) for command line options. |
| `clef` | Stand-alone signing tool, which can be used as a backend signer for `geth`. |
| `devp2p` | Utilities to interact with nodes on the networking layer, without running a full blockchain. |
| `abigen` | Source code generator to convert Ethereum contract definitions into easy-to-use, compile-time type-safe Go packages. It operates on plain [Ethereum contract ABIs](https://docs.soliditylang.org/en/develop/abi-spec.html) with expanded functionality if the contract bytecode is also available. However, it also accepts Solidity source files, making development much more streamlined. Please see our [Native DApps](https://geth.ethereum.org/docs/dapp/native-bindings) page for details. |
| `bootnode` | Stripped down version of our Ethereum client implementation that only takes part in the network node discovery protocol, but does not run any of the higher level application protocols. It can be used as a lightweight bootstrap node to aid in finding peers in private networks. |
| `evm` | Developer utility version of the EVM (Ethereum Virtual Machine) that is capable of running bytecode snippets within a configurable environment and execution mode. Its purpose is to allow isolated, fine-grained debugging of EVM opcodes (e.g. `evm --code 60ff60ff --debug run`). |
| `rlpdump` | Developer utility tool to convert binary RLP ([Recursive Length Prefix](https://ethereum.org/en/developers/docs/data-structures-and-encoding/rlp)) dumps (data encoding used by the Ethereum protocol both network as well as consensus wise) to user-friendlier hierarchical representation (e.g. `rlpdump --hex CE0183FFFFFFC4C304050583616263`). |
| `puppeth` | a CLI wizard that aids in creating a new Ethereum network. |
## Running `geth`
Going through all the possible command line flags is out of scope here (please consult our
[CLI Wiki page](https://geth.ethereum.org/docs/interface/command-line-options)),
but we've enumerated a few common parameter combos to get you up to speed quickly
on how you can run your own `geth` instance.
### Hardware Requirements
Minimum:
* CPU with 2+ cores
* 4GB RAM
* 1TB free storage space to sync the Mainnet
* 8 MBit/sec download Internet service
Recommended:
* Fast CPU with 4+ cores
* 16GB+ RAM
* High-performance SSD with at least 1TB of free space
* 25+ MBit/sec download Internet service
### Full node on the main Ethereum network
By far the most common scenario is people wanting to simply interact with the Ethereum
network: create accounts; transfer funds; deploy and interact with contracts. For this
particular use case, the user doesn't care about years-old historical data, so we can
sync quickly to the current state of the network. To do so:
```shell
$ geth console
```
This command will:
* Start `geth` in snap sync mode (default, can be changed with the `--syncmode` flag),
causing it to download more data in exchange for avoiding processing the entire history
of the Ethereum network, which is very CPU intensive.
* Start the built-in interactive [JavaScript console](https://geth.ethereum.org/docs/interface/javascript-console),
(via the trailing `console` subcommand) through which you can interact using [`web3` methods](https://github.com/ChainSafe/web3.js/blob/0.20.7/DOCUMENTATION.md)
(note: the `web3` version bundled within `geth` is very old, and not up to date with official docs),
as well as `geth`'s own [management APIs](https://geth.ethereum.org/docs/rpc/server).
This tool is optional and if you leave it out you can always attach it to an already running
`geth` instance with `geth attach`.
### A Full node on the Görli test network
Transitioning towards developers, if you'd like to play around with creating Ethereum
contracts, you almost certainly would like to do that without any real money involved until
you get the hang of the entire system. In other words, instead of attaching to the main
network, you want to join the **test** network with your node, which is fully equivalent to
the main network, but with play-Ether only.
```shell
$ geth --goerli console
```
The `console` subcommand has the same meaning as above and is equally
useful on the testnet too.
Specifying the `--goerli` flag, however, will reconfigure your `geth` instance a bit:
* Instead of connecting to the main Ethereum network, the client will connect to the Görli
test network, which uses different P2P bootnodes, different network IDs and genesis
states.
* Instead of using the default data directory (`~/.ethereum` on Linux for example), `geth`
will nest itself one level deeper into a `goerli` subfolder (`~/.ethereum/goerli` on
Linux). Note, on OSX and Linux this also means that attaching to a running testnet node
requires the use of a custom endpoint since `geth attach` will try to attach to a
production node endpoint by default, e.g.,
`geth attach <datadir>/goerli/geth.ipc`. Windows users are not affected by
this.
*Note: Although some internal protective measures prevent transactions from
crossing over between the main network and test network, you should always
use separate accounts for play and real money. Unless you manually move
accounts, `geth` will by default correctly separate the two networks and will not make any
accounts available between them.*
### Full node on the Rinkeby test network
Go Ethereum also supports connecting to the older proof-of-authority based test network
called [*Rinkeby*](https://www.rinkeby.io) which is operated by members of the community.
```shell
$ geth --rinkeby console
```
### Full node on the Ropsten test network
In addition to Görli and Rinkeby, Geth also supports the ancient Ropsten testnet. The
Ropsten test network is based on the Ethash proof-of-work consensus algorithm. As such,
it has certain extra overhead and is more susceptible to reorganization attacks due to the
network's low difficulty/security.
```shell
$ geth --ropsten console
```
*Note: Older Geth configurations store the Ropsten database in the `testnet` subdirectory.*
### Configuration
As an alternative to passing the numerous flags to the `geth` binary, you can also pass a
configuration file via:
```shell
$ geth --config /path/to/your_config.toml
```
To get an idea of how the file should look like you can use the `dumpconfig` subcommand to
export your existing configuration:
```shell
$ geth --your-favourite-flags dumpconfig
```
*Note: This works only with `geth` v1.6.0 and above.*
#### Docker quick start
One of the quickest ways to get Ethereum up and running on your machine is by using
Docker:
```shell
docker run -d --name ethereum-node -v /Users/alice/ethereum:/root \
-p 8545:8545 -p 30303:30303 \
ethereum/client-go
```
This will start `geth` in snap-sync mode with a DB memory allowance of 1GB, as the
above command does. It will also create a persistent volume in your home directory for
saving your blockchain as well as map the default ports. There is also an `alpine` tag
available for a slim version of the image.
Do not forget `--http.addr 0.0.0.0`, if you want to access RPC from other containers
and/or hosts. By default, `geth` binds to the local interface and RPC endpoints are not
accessible from the outside.
### Programmatically interfacing `geth` nodes
As a developer, sooner rather than later you'll want to start interacting with `geth` and the
Ethereum network via your own programs and not manually through the console. To aid
this, `geth` has built-in support for a JSON-RPC based APIs ([standard APIs](https://ethereum.github.io/execution-apis/api-documentation/)
and [`geth` specific APIs](https://geth.ethereum.org/docs/rpc/server)).
These can be exposed via HTTP, WebSockets and IPC (UNIX sockets on UNIX based
platforms, and named pipes on Windows).
The IPC interface is enabled by default and exposes all the APIs supported by `geth`,
whereas the HTTP and WS interfaces need to manually be enabled and only expose a
subset of APIs due to security reasons. These can be turned on/off and configured as
you'd expect.
HTTP based JSON-RPC API options:
* `--http` Enable the HTTP-RPC server
* `--http.addr` HTTP-RPC server listening interface (default: `localhost`)
* `--http.port` HTTP-RPC server listening port (default: `8545`)
* `--http.api` API's offered over the HTTP-RPC interface (default: `eth,net,web3`)
* `--http.corsdomain` Comma separated list of domains from which to accept cross origin requests (browser enforced)
* `--ws` Enable the WS-RPC server
* `--ws.addr` WS-RPC server listening interface (default: `localhost`)
* `--ws.port` WS-RPC server listening port (default: `8546`)
* `--ws.api` API's offered over the WS-RPC interface (default: `eth,net,web3`)
* `--ws.origins` Origins from which to accept WebSocket requests
* `--ipcdisable` Disable the IPC-RPC server
* `--ipcapi` API's offered over the IPC-RPC interface (default: `admin,debug,eth,miner,net,personal,txpool,web3`)
* `--ipcpath` Filename for IPC socket/pipe within the datadir (explicit paths escape it)
You'll need to use your own programming environments' capabilities (libraries, tools, etc) to
connect via HTTP, WS or IPC to a `geth` node configured with the above flags and you'll
need to speak [JSON-RPC](https://www.jsonrpc.org/specification) on all transports. You
can reuse the same connection for multiple requests!
**Note: Please understand the security implications of opening up an HTTP/WS based
transport before doing so! Hackers on the internet are actively trying to subvert
Ethereum nodes with exposed APIs! Further, all browser tabs can access locally
running web servers, so malicious web pages could try to subvert locally available
APIs!**
### Operating a private network
Maintaining your own private network is more involved as a lot of configurations taken for
granted in the official networks need to be manually set up.
#### Defining the private genesis state
First, you'll need to create the genesis state of your networks, which all nodes need to be
aware of and agree upon. This consists of a small JSON file (e.g. call it `genesis.json`):
```json
{
"config": {
"chainId": <arbitrary positive integer>,
"homesteadBlock": 0,
"eip150Block": 0,
"eip155Block": 0,
"eip158Block": 0,
"byzantiumBlock": 0,
"constantinopleBlock": 0,
"petersburgBlock": 0,
"istanbulBlock": 0,
"berlinBlock": 0,
"londonBlock": 0
},
"alloc": {},
"coinbase": "0x0000000000000000000000000000000000000000",
"difficulty": "0x20000",
"extraData": "",
"gasLimit": "0x2fefd8",
"nonce": "0x0000000000000042",
"mixhash": "0x0000000000000000000000000000000000000000000000000000000000000000",
"parentHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
"timestamp": "0x00"
}
```
The above fields should be fine for most purposes, although we'd recommend changing
the `nonce` to some random value so you prevent unknown remote nodes from being able
to connect to you. If you'd like to pre-fund some accounts for easier testing, create
the accounts and populate the `alloc` field with their addresses.
```json
"alloc": {
"0x0000000000000000000000000000000000000001": {
"balance": "111111111"
},
"0x0000000000000000000000000000000000000002": {
"balance": "222222222"
}
}
```
With the genesis state defined in the above JSON file, you'll need to initialize **every**
`geth` node with it prior to starting it up to ensure all blockchain parameters are correctly
set:
```shell
$ geth init path/to/genesis.json
```
#### Creating the rendezvous point
With all nodes that you want to run initialized to the desired genesis state, you'll need to
start a bootstrap node that others can use to find each other in your network and/or over
the internet. The clean way is to configure and run a dedicated bootnode:
```shell
$ bootnode --genkey=boot.key
$ bootnode --nodekey=boot.key
```
With the bootnode online, it will display an [`enode` URL](https://ethereum.org/en/developers/docs/networking-layer/network-addresses/#enode)
that other nodes can use to connect to it and exchange peer information. Make sure to
replace the displayed IP address information (most probably `[::]`) with your externally
accessible IP to get the actual `enode` URL.
*Note: You could also use a full-fledged `geth` node as a bootnode, but it's the less
recommended way.*
#### Starting up your member nodes
With the bootnode operational and externally reachable (you can try
`telnet <ip> <port>` to ensure it's indeed reachable), start every subsequent `geth`
node pointed to the bootnode for peer discovery via the `--bootnodes` flag. It will
probably also be desirable to keep the data directory of your private network separated, so
do also specify a custom `--datadir` flag.
```shell
$ geth --datadir=path/to/custom/data/folder --bootnodes=<bootnode-enode-url-from-above>
```
*Note: Since your network will be completely cut off from the main and test networks, you'll
also need to configure a miner to process transactions and create new blocks for you.*
#### Running a private miner
Mining on the public Ethereum network is a complex task as it's only feasible using GPUs,
requiring an OpenCL or CUDA enabled `ethminer` instance. For information on such a
setup, please consult the [EtherMining subreddit](https://www.reddit.com/r/EtherMining/)
and the [ethminer](https://github.com/ethereum-mining/ethminer) repository.
In a private network setting, however, a single CPU miner instance is more than enough for
practical purposes as it can produce a stable stream of blocks at the correct intervals
without needing heavy resources (consider running on a single thread, no need for multiple
ones either). To start a `geth` instance for mining, run it with all your usual flags, extended
by:
```shell
$ geth <usual-flags> --mine --miner.threads=1 --miner.etherbase=0x0000000000000000000000000000000000000000
```
Which will start mining blocks and transactions on a single CPU thread, crediting all
proceedings to the account specified by `--miner.etherbase`. You can further tune the mining
by changing the default gas limit blocks converge to (`--miner.targetgaslimit`) and the price
transactions are accepted at (`--miner.gasprice`).
## Contribution
Thank you for considering helping out with the source code! We welcome contributions
from anyone on the internet, and are grateful for even the smallest of fixes!
If you'd like to contribute to go-ethereum, please fork, fix, commit and send a pull request
for the maintainers to review and merge into the main code base. If you wish to submit
more complex changes though, please check up with the core devs first on [our Discord Server](https://discord.gg/invite/nthXNEv)
to ensure those changes are in line with the general philosophy of the project and/or get
some early feedback which can make both your efforts much lighter as well as our review
and merge procedures quick and simple.
Please make sure your contributions adhere to our coding guidelines:
* Code must adhere to the official Go [formatting](https://golang.org/doc/effective_go.html#formatting)
guidelines (i.e. uses [gofmt](https://golang.org/cmd/gofmt/)).
* Code must be documented adhering to the official Go [commentary](https://golang.org/doc/effective_go.html#commentary)
guidelines.
* Pull requests need to be based on and opened against the `master` branch.
* Commit messages should be prefixed with the package(s) they modify.
* E.g. "eth, rpc: make trace configs optional"
Please see the [Developers' Guide](https://geth.ethereum.org/docs/developers/devguide)
for more details on configuring your environment, managing project dependencies, and
testing procedures.
## License
The go-ethereum library (i.e. all code outside of the `cmd` directory) is licensed under the
[GNU Lesser General Public License v3.0](https://www.gnu.org/licenses/lgpl-3.0.en.html),
also included in our repository in the `COPYING.LESSER` file.
The go-ethereum binaries (i.e. all code inside of the `cmd` directory) are licensed under the
[GNU General Public License v3.0](https://www.gnu.org/licenses/gpl-3.0.en.html), also
included in our repository in the `COPYING` file.

175
vendor/github.com/ethereum/go-ethereum/SECURITY.md generated vendored Normal file
View File

@@ -0,0 +1,175 @@
# Security Policy
## Supported Versions
Please see [Releases](https://github.com/ethereum/go-ethereum/releases). We recommend using the [most recently released version](https://github.com/ethereum/go-ethereum/releases/latest).
## Audit reports
Audit reports are published in the `docs` folder: https://github.com/ethereum/go-ethereum/tree/master/docs/audits
| Scope | Date | Report Link |
| ------- | ------- | ----------- |
| `geth` | 20170425 | [pdf](https://github.com/ethereum/go-ethereum/blob/master/docs/audits/2017-04-25_Geth-audit_Truesec.pdf) |
| `clef` | 20180914 | [pdf](https://github.com/ethereum/go-ethereum/blob/master/docs/audits/2018-09-14_Clef-audit_NCC.pdf) |
| `Discv5` | 20191015 | [pdf](https://github.com/ethereum/go-ethereum/blob/master/docs/audits/2019-10-15_Discv5_audit_LeastAuthority.pdf) |
| `Discv5` | 20200124 | [pdf](https://github.com/ethereum/go-ethereum/blob/master/docs/audits/2020-01-24_DiscV5_audit_Cure53.pdf) |
## Reporting a Vulnerability
**Please do not file a public ticket** mentioning the vulnerability.
To find out how to disclose a vulnerability in Ethereum visit [https://bounty.ethereum.org](https://bounty.ethereum.org) or email bounty@ethereum.org. Please read the [disclosure page](https://github.com/ethereum/go-ethereum/security/advisories?state=published) for more information about publicly disclosed security vulnerabilities.
Use the built-in `geth version-check` feature to check whether the software is affected by any known vulnerability. This command will fetch the latest [`vulnerabilities.json`](https://geth.ethereum.org/docs/vulnerabilities/vulnerabilities.json) file which contains known security vulnerabilities concerning `geth`, and cross-check the data against its own version number.
The following key may be used to communicate sensitive information to developers.
Fingerprint: `AE96 ED96 9E47 9B00 84F3 E17F E88D 3334 FA5F 6A0A`
```
-----BEGIN PGP PUBLIC KEY BLOCK-----
Version: SKS 1.1.6
Comment: Hostname: pgp.mit.edu
mQINBFgl3tgBEAC8A1tUBkD9YV+eLrOmtgy+/JS/H9RoZvkg3K1WZ8IYfj6iIRaYneAk3Bp1
82GUPVz/zhKr2g0tMXIScDR3EnaDsY+Qg+JqQl8NOG+Cikr1nnkG2on9L8c8yiqry1ZTCmYM
qCa2acTFqnyuXJ482aZNtB4QG2BpzfhW4k8YThpegk/EoRUim+y7buJDtoNf7YILlhDQXN8q
lHB02DWOVUihph9tUIFsPK6BvTr9SIr/eG6j6k0bfUo9pexOn7LS4SojoJmsm/5dp6AoKlac
48cZU5zwR9AYcq/nvkrfmf2WkObg/xRdEvKZzn05jRopmAIwmoC3CiLmqCHPmT5a29vEob/y
PFE335k+ujjZCPOu7OwjzDk7M0zMSfnNfDq8bXh16nn+ueBxJ0NzgD1oC6c2PhM+XRQCXCho
yI8vbfp4dGvCvYqvQAE1bWjqnumZ/7vUPgZN6gDfiAzG2mUxC2SeFBhacgzDvtQls+uuvm+F
nQOUgg2Hh8x2zgoZ7kqV29wjaUPFREuew7e+Th5BxielnzOfVycVXeSuvvIn6cd3g/s8mX1c
2kLSXJR7+KdWDrIrR5Az0kwAqFZt6B6QTlDrPswu3mxsm5TzMbny0PsbL/HBM+GZEZCjMXxB
8bqV2eSaktjnSlUNX1VXxyOxXA+ZG2jwpr51egi57riVRXokrQARAQABtDRFdGhlcmV1bSBG
b3VuZGF0aW9uIEJ1ZyBCb3VudHkgPGJvdW50eUBldGhlcmV1bS5vcmc+iQIcBBEBCAAGBQJa
FCY6AAoJEHoMA3Q0/nfveH8P+gJBPo9BXZL8isUfbUWjwLi81Yi70hZqIJUnz64SWTqBzg5b
mCZ69Ji5637THsxQetS2ARabz0DybQ779FhD/IWnqV9T3KuBM/9RzJtuhLzKCyMrAINPMo28
rKWdunHHarpuR4m3tL2zWJkle5QVYb+vkZXJJE98PJw+N4IYeKKeCs2ubeqZu636GA0sMzzB
Jn3m/dRRA2va+/zzbr6F6b51ynzbMxWKTsJnstjC8gs8EeI+Zcd6otSyelLtCUkk3h5sTvpV
Wv67BNSU0BYsMkxyFi9PUyy07Wixgeas89K5jG1oOtDva/FkpRHrTE/WA5OXDRcLrHJM+SwD
CwqcLQqJd09NxwUW1iKeBmPptTiOGu1Gv2o7aEyoaWrHRBO7JuYrQrj6q2B3H1Je0zjAd2qt
09ni2bLwLn4LA+VDpprNTO+eZDprv09s2oFSU6NwziHybovu0y7X4pADGkK2evOM7c86PohX
QRQ1M1T16xLj6wP8/Ykwl6v/LUk7iDPXP3GPILnh4YOkwBR3DsCOPn8098xy7FxEELmupRzt
Cj9oC7YAoweeShgUjBPzb+nGY1m6OcFfbUPBgFyMMfwF6joHbiVIO+39+Ut2g2ysZa7KF+yp
XqVDqyEkYXsOLb25OC7brt8IJEPgBPwcHK5GNag6RfLxnQV+iVZ9KNH1yQgSiQI+BBMBAgAo
AhsDBgsJCAcDAgYVCAIJCgsEFgIDAQIeAQIXgAUCWglh+gUJBaNgWAAKCRDojTM0+l9qCgQ2
D/4udJpV4zGIZW1yNaVvtd3vfKsTLi7GIRJLUBqVb2Yx/uhnN8jTl/tAhCVosCQ1pzvi9kMl
s8qO1vu2kw5EWFFkwK96roI8pTql3VIjwhRVQrCkR7oAk/eUd1U/nt2q6J4UTYeVgqbq4dsI
ZZTRyPJMD667YpuAIcaah+w9j/E5xksYQdMeprnDrQkkBCb4FIMqfDzBPKvEa8DcQr949K85
kxhr6LDq9i5l4Egxt2JdH8DaR4GLca6+oHy0MyPs/bZOsfmZUObfM2oZgPpqYM96JanhzO1j
dpnItyBii2pc+kNx5nMOf4eikE/MBv+WUJ0TttWzApGGmFUzDhtuEvRH9NBjtJ/pMrYspIGu
O/QNY5KKOKQTvVIlwGcm8dTsSkqtBDSUwZyWbfKfKOI1/RhM9dC3gj5/BOY57DYYV4rdTK01
ZtYjuhdfs2bhuP1uF/cgnSSZlv8azvf7Egh7tHPnYxvLjfq1bJAhCIX0hNg0a81/ndPAEFky
fSko+JPKvdSvsUcSi2QQ4U2HX//jNBjXRfG4F0utgbJnhXzEckz6gqt7wSDZH2oddVuO8Ssc
T7sK+CdXthSKnRyuI+sGUpG+6glpKWIfYkWFKNZWuQ+YUatY3QEDHXTIioycSmV8p4d/g/0S
V6TegidLxY8bXMkbqz+3n6FArRffv5MH7qt3cYkCPgQTAQIAKAUCWCXhOwIbAwUJAeEzgAYL
CQgHAwIGFQgCCQoLBBYCAwECHgECF4AACgkQ6I0zNPpfagrN/w/+Igp3vtYdNunikw3yHnYf
Jkm0MmaMDUM9mtsaXVN6xb9n25N3Xa3GWCpmdsbYZ8334tI/oQ4/NHq/bEI5WFH5F1aFkMkm
5AJVLuUkipCtmCZ5NkbRPJA9l0uNUUE6uuFXBhf4ddu7jb0jMetRF/kifJHVCCo5fISUNhLp
7bwcWq9qgDQNZNYMOo4s9WX5Tl+5x4gTZdd2/cAYt49h/wnkw+huM+Jm0GojpLqIQ1jZiffm
otf5rF4L+JhIIdW0W4IIh1v9BhHVllXw+z9oj0PALstT5h8/DuKoIiirFJ4DejU85GR1KKAS
DeO19G/lSpWj1rSgFv2N2gAOxq0X+BbQTua2jdcY6JpHR4H1JJ2wzfHsHPgDQcgY1rGlmjVF
aqU73WV4/hzXc/HshK/k4Zd8uD4zypv6rFsZ3UemK0aL2zXLVpV8SPWQ61nS03x675SmDlYr
A80ENfdqvsn00JQuBVIv4Tv0Ub7NfDraDGJCst8rObjBT/0vnBWTBCebb2EsnS2iStIFkWdz
/WXs4L4Yzre1iJwqRjiuqahZR5jHsjAUf2a0O29HVHE7zlFtCFmLPClml2lGQfQOpm5klGZF
rmvus+qZ9rt35UgWHPZezykkwtWrFOwspwuCWaPDto6tgbRJZ4ftitpdYYM3dKW9IGJXBwrt
BQrMsu+lp0vDF+yJAlUEEwEIAD8CGwMGCwkIBwMCBhUIAgkKCwQWAgMBAh4BAheAFiEErpbt
lp5HmwCE8+F/6I0zNPpfagoFAmEAEJwFCQycmLgACgkQ6I0zNPpfagpWoBAAhOcbMAUw6Zt0
GYzT3sR5/c0iatezPzXEXJf9ebzR8M5uPElXcxcnMx1dvXZmGPXPJKCPa99WCu1NZYy8F+Wj
GTOY9tfIkvSxhys1p/giPAmvid6uQmD+bz7ivktnyzCkDWfMA+l8lsCSEqVlaq6y5T+a6SWB
6TzC2S0MPb/RrC/7DpwyrNYWumvyVJh09adm1Mw/UGgst/sZ8eMaRYEd3X0yyT1CBpX4zp2E
qQj9IEOTizvzv1x2jkHe5ZUeU3+nTBNlhSA+WFHUi0pfBdo2qog3Mv2EC1P2qMKoSdD5tPbA
zql1yKoHHnXOMsqdftGwbiv2sYXWvrYvmaCd3Ys/viOyt3HOy9uV2ZEtBd9Yqo9x/NZj8QMA
nY5k8jjrIXbUC89MqrJsQ6xxWQIg5ikMT7DvY0Ln89ev4oJyVvwIQAwCm4jUzFNm9bZLYDOP
5lGJCV7tF5NYVU7NxNM8vescKc40mVNK/pygS5mxhK9QYOUjZsIv8gddrl1TkqrFMuxFnTyN
WvzE29wFu/n4N1DkF+ZBqS70SlRvB+Hjz5LrDgEzF1Wf1eA/wq1dZbvMjjDVIc2VGlYp8Cp2
8ob23c1seTtYXTNYgSR5go4EpH+xi+bIWv01bQQ9xGwBbT5sm4WUeWOcmX4QewzLZ3T/wK9+
N4Ye/hmU9O34FwWJOY58EIe0OUV0aGVyZXVtIEZvdW5kYXRpb24gU2VjdXJpdHkgVGVhbSA8
c2VjdXJpdHlAZXRoZXJldW0ub3JnPokCHAQRAQgABgUCWhQmOgAKCRB6DAN0NP5372LSEACT
wZk1TASWZj5QF7rmkIM1GEyBxLE+PundNcMgM9Ktj1315ED8SmiukNI4knVS1MY99OIgXhQl
D1foF2GKdTomrwwC4012zTNyUYCY60LnPZ6Z511HG+rZgZtZrbkz0IiUpwAlhGQND77lBqem
J3K+CFX2XpDA/ojui/kqrY4cwMT5P8xPJkwgpRgw/jgdcZyJTsXdHblV9IGU4H1Vd1SgcfAf
Db3YxDUlBtzlp0NkZqxen8irLIXUQvsfuIfRUbUSkWoK/n3U/gOCajAe8ZNF07iX4OWjH4Sw
NDA841WhFWcGE+d8+pfMVfPASU3UPKH72uw86b2VgR46Av6voyMFd1pj+yCA+YAhJuOpV4yL
QaGg2Z0kVOjuNWK/kBzp1F58DWGh4YBatbhE/UyQOqAAtR7lNf0M3QF9AdrHTxX8oZeqVW3V
Fmi2mk0NwCIUv8SSrZr1dTchp04OtyXe5gZBXSfzncCSRQIUDC8OgNWaOzAaUmK299v4bvye
uSCxOysxC7Q1hZtjzFPKdljS81mRlYeUL4fHlJU9R57bg8mriSXLmn7eKrSEDm/EG5T8nRx7
TgX2MqJs8sWFxD2+bboVEu75yuFmZ//nmCBApAit9Hr2/sCshGIEpa9MQ6xJCYUxyqeJH+Cc
Aja0UfXhnK2uvPClpJLIl4RE3gm4OXeE1IkCPgQTAQIAKAIbAwYLCQgHAwIGFQgCCQoLBBYC
AwECHgECF4AFAloJYfoFCQWjYFgACgkQ6I0zNPpfagr4MQ//cfp3GSbSG8dkqgctW67Fy7cQ
diiTmx3cwxY+tlI3yrNmdjtrIQMzGdqtY6LNz7aN87F8mXNf+DyVHX9+wd1Y8U+E+hVCTzKC
sefUfxTz6unD9TTcGqaoelgIPMn4IiKz1RZE6eKpfDWe6q78W1Y6x1bE0qGNSjqT/QSxpezF
E/OAm/t8RRxVxDtqz8LfH2zLea5zaC+ADj8EqgY9vX9TQa4DyVV8MgOyECCCadJQCD5O5hIA
B2gVDWwrAUw+KBwskXZ7Iq4reJTKLEmt5z9zgtJ/fABwaCFt66ojwg0/RjbO9cNA3ZwHLGwU
C6hkb6bRzIoZoMfYxVS84opiqf/Teq+t/XkBYCxbSXTJDA5MKjcVuw3N6YKWbkGP/EfQThe7
BfAKFwwIw5YmsWjHK8IQj6R6hBxzTz9rz8y1Lu8EAAFfA7OJKaboI2qbOlauH98OuOUmVtr1
TczHO+pTcgWVN0ytq2/pX5KBf4vbmULNbg3HFRq+gHx8CW+jyXGkcqjbgU/5FwtDxeqRTdGJ
SyBGNBEU6pBNolyynyaKaaJjJ/biY27pvjymL5rlz95BH3Dn16Z4RRmqwlT6eq/wFYginujg
CCE1icqOSE+Vjl7V8tV8AcgANkXKdbBE+Q8wlKsGI/kS1w4XFAYcaNHFT8qNeS8TSFXFhvU8
HylYxO79t56JAj4EEwECACgFAlgl3tgCGwMFCQHhM4AGCwkIBwMCBhUIAgkKCwQWAgMBAh4B
AheAAAoJEOiNMzT6X2oKmUMP/0hnaL6bVyepAq2LIdvIUbHfagt/Oo/KVfZs4bkM+xJOitJR
0kwZV9PTihXFdzhL/YNWc2+LtEBtKItqkJZKmWC0E6OPXGVuU6hfFPebuzVccYJfm0Q3Ej19
VJI9Uomf59Bpak8HYyEED7WVQjoYn7XVPsonwus/9+LDX+c5vutbrUdbjga3KjHbewD93X4O
wVVoXyHEmU2Plyg8qvzFbNDylCWO7N2McO6SN6+7DitGZGr2+jO+P2R4RT1cnl2V3IRVcWZ0
OTspPSnRGVr2fFiHN/+v8G/wHPLQcJZFvYPfUGNdcYbTmhWdiY0bEYXFiNrgzCCsyad7eKUR
WN9QmxqmyqLDjUEDJCAh19ES6Vg3tqGwXk+uNUCoF30ga0TxQt6UXZJDEQFAGeASQ/RqE/q1
EAuLv8IGM8o7IqKO2pWfLuqsY6dTbKBwDzz9YOJt7EOGuPPQbHxaYStTushZmJnm7hi8lhVG
jT7qsEJdE95Il+I/mHWnXsCevaXjZugBiyV9yvOq4Hwwe2s1zKfrnQ4u0cadvGAh2eIqum7M
Y3o6nD47aJ3YmEPX/WnhI56bACa2GmWvUwjI4c0/er3esSPYnuHnM9L8Am4qQwMVSmyU80tC
MI7A9e13Mvv+RRkYFLJ7PVPdNpbW5jqX1doklFpKf6/XM+B+ngYneU+zgCUBiQJVBBMBCAA/
AhsDBgsJCAcDAgYVCAIJCgsEFgIDAQIeAQIXgBYhBK6W7ZaeR5sAhPPhf+iNMzT6X2oKBQJh
ABCQBQkMnJi4AAoJEOiNMzT6X2oKAv0P+gJ3twBp5efNWyVLcIg4h4cOo9uD0NPvz8/fm2gX
FoOJL3MeigtPuSVfE9kuTaTuRbArzuFtdvH6G/kcRQvOlO4zyiIRHCk1gDHoIvvtn6RbRhVm
/Xo4uGIsFHst7n4A7BjicwEK5Op6Ih5Hoq19xz83YSBgBVk2fYEJIRyJiKFbyPjH0eSYe8v+
Ra5/F85ugLx1P6mMVkW+WPzULns89riW7BGTnZmXFHZp8nO2pkUlcI7F3KRG7l4kmlC50ox6
DiG/6AJCVulbAClky9C68TmJ/R1RazQxU/9IqVywsydq66tbJQbm5Z7GEti0C5jjbSRJL2oT
1xC7Rilr85PMREkPL3vegJdgj5PKlffZ/MocD/0EohiQ7wFpejFD4iTljeh0exRUwCRb6655
9ib34JSQgU8Hl4JJu+mEgd9v0ZHD0/1mMD6fnAR84zca+O3cdASbnQmzTOKcGzLIrkE8TEnU
+2UZ8Ol7SAAqmBgzY1gKOilUho6dkyCAwNL+QDpvrITDPLEFPsjyB/M2KudZSVEn+Rletju1
qkMW31qFMNlsbwzMZw+0USeGcs31Cs0B2/WQsro99CExlhS9auUFkmoVjJmYVTIYOM0zuPa4
OyGspqPhRu5hEsmMDPDWD7Aad5k4GTqogQNnuKyRliZjXXrDZqFD5nfsJSL8Ky/sJGEMuQIN
BFgl3tgBEACbgq6HTN5gEBi0lkD/MafInmNi+59U5gRGYqk46WlfRjhHudXjDpgD0lolGb4h
YontkMaKRlCg2Rvgjvk3Zve0PKWjKw7gr8YBa9fMFY8BhAXI32OdyI9rFhxEZFfWAfwKVmT1
9BdeAQRFvcfd+8w8f1XVc+zddULMJFBTr+xKDlIRWwTkdLPQeWbjo0eHl/g4tuLiLrTxVbnj
26bf+2+1DbM/w5VavzPrkviHqvKe/QP/gay4QDViWvFgLb90idfAHIdsPgflp0VDS5rVHFL6
D73rSRdIRo3I8c8mYoNjSR4XDuvgOkAKW9LR3pvouFHHjp6Fr0GesRbrbb2EG66iPsR99MQ7
FqIL9VMHPm2mtR+XvbnKkH2rYyEqaMbSdk29jGapkAWle4sIhSKk749A4tGkHl08KZ2N9o6G
rfUehP/V2eJLaph2DioFL1HxRryrKy80QQKLMJRekxigq8greW8xB4zuf9Mkuou+RHNmo8Pe
bHjFstLigiD6/zP2e+4tUmrT0/JTGOShoGMl8Rt0VRxdPImKun+4LOXbfOxArOSkY6i35+gs
gkkSy1gTJE0BY3S9auT6+YrglY/TWPQ9IJxWVOKlT+3WIp5wJu2bBKQ420VLqDYzkoWytel/
bM1ACUtipMiIVeUs2uFiRjpzA1Wy0QHKPTdSuGlJPRrfcQARAQABiQIlBBgBAgAPAhsMBQJa
CWIIBQkFo2BYAAoJEOiNMzT6X2oKgSwQAKKs7BGF8TyZeIEO2EUK7R2bdQDCdSGZY06tqLFg
3IHMGxDMb/7FVoa2AEsFgv6xpoebxBB5zkhUk7lslgxvKiSLYjxfNjTBltfiFJ+eQnf+OTs8
KeR51lLa66rvIH2qUzkNDCCTF45H4wIDpV05AXhBjKYkrDCrtey1rQyFp5fxI+0IQ1UKKXvz
ZK4GdxhxDbOUSd38MYy93nqcmclGSGK/gF8XiyuVjeifDCM6+T1NQTX0K9lneidcqtBDvlgg
JTLJtQPO33o5EHzXSiud+dKth1uUhZOFEaYRZoye1YE3yB0TNOOE8fXlvu8iuIAMBSDL9ep6
sEIaXYwoD60I2gHdWD0lkP0DOjGQpi4ouXM3Edsd5MTi0MDRNTij431kn8T/D0LCgmoUmYYM
BgbwFhXr67axPZlKjrqR0z3F/Elv0ZPPcVg1tNznsALYQ9Ovl6b5M3cJ5GapbbvNWC7yEE1q
Scl9HiMxjt/H6aPastH63/7wcN0TslW+zRBy05VNJvpWGStQXcngsSUeJtI1Gd992YNjUJq4
/Lih6Z1TlwcFVap+cTcDptoUvXYGg/9mRNNPZwErSfIJ0Ibnx9wPVuRN6NiCLOt2mtKp2F1p
M6AOQPpZ85vEh6I8i6OaO0w/Z0UHBwvpY6jDUliaROsWUQsqz78Z34CVj4cy6vPW2EF4iQIl
BBgBAgAPBQJYJd7YAhsMBQkB4TOAAAoJEOiNMzT6X2oKTjgP/1ojCVyGyvHMLUgnX0zwrR5Q
1M5RKFz6kHwKjODVLR3Isp8I935oTQt3DY7yFDI4t0GqbYRQMtxcNEb7maianhK2trCXfhPs
6/L04igjDf5iTcmzamXN6xnh5xkz06hZJJCMuu4MvKxC9MQHCVKAwjswl/9H9JqIBXAY3E2l
LpX5P+5jDZuPxS86p3+k4Rrdp9KTGXjiuEleM3zGlz5BLWydqovOck7C2aKh27ETFpDYY0z3
yQ5AsPJyk1rAr0wrH6+ywmwWlzuQewavnrLnJ2M8iMFXpIhyHeEIU/f7o8f+dQk72rZ9CGzd
cqig2za/BS3zawZWgbv2vB2elNsIllYLdir45jxBOxx2yvJvEuu4glz78y4oJTCTAYAbMlle
5gVdPkVcGyvvVS9tinnSaiIzuvWrYHKWll1uYPm2Q1CDs06P5I7bUGAXpgQLUh/XQguy/0sX
GWqW3FS5JzP+XgcR/7UASvwBdHylubKbeqEpB7G1s+m+8C67qOrc7EQv3Jmy1YDOkhEyNig1
rmjplLuir3tC1X+D7dHpn7NJe7nMwFx2b2MpMkLA9jPPAGPp/ekcu5sxCe+E0J/4UF++K+CR
XIxgtzU2UJfp8p9x+ygbx5qHinR0tVRdIzv3ZnGsXrfxnWfSOaB582cU3VRN9INzHHax8ETa
QVDnGO5uQa+FiQI8BBgBCAAmAhsMFiEErpbtlp5HmwCE8+F/6I0zNPpfagoFAmEAELYFCQyc
mN4ACgkQ6I0zNPpfagoqAQ/+MnDjBx8JWMd/XjeFoYKx/Oo0ntkInV+ME61JTBls4PdVk+TB
8PWZdPQHw9SnTvRmykFeznXIRzuxkowjrZYXdPXBxY2b1WyD5V3Ati1TM9vqpaR4osyPs2xy
I4dzDssh9YvUsIRL99O04/65lGiYeBNuACq+yK/7nD/ErzBkDYJHhMCdadbVWUACxvVIDvro
yQeVLKMsHqMCd8BTGD7VDs79NXskPnN77pAFnkzS4Z2b8SNzrlgTc5pUiuZHIXPIpEYmsYzh
ucTU6uI3dN1PbSFHK5tG2pHb4ZrPxY3L20Dgc2Tfu5/SDApZzwvvKTqjdO891MEJ++H+ssOz
i4O1UeWKs9owWttan9+PI47ozBSKOTxmMqLSQ0f56Np9FJsV0ilGxRKfjhzJ4KniOMUBA7mP
+m+TmXfVtthJred4sHlJMTJNpt+sCcT6wLMmyc3keIEAu33gsJj3LTpkEA2q+V+ZiP6Q8HRB
402ITklABSArrPSE/fQU9L8hZ5qmy0Z96z0iyILgVMLuRCCfQOMWhwl8yQWIIaf1yPI07xur
epy6lH7HmxjjOR7eo0DaSxQGQpThAtFGwkWkFh8yki8j3E42kkrxvEyyYZDXn2YcI3bpqhJx
PtwCMZUJ3kc/skOrs6bOI19iBNaEoNX5Dllm7UHjOgWNDQkcCuOCxucKano=
=arte
-----END PGP PUBLIC KEY BLOCK------
```

View File

@@ -0,0 +1,255 @@
// Copyright 2015 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package abi
import (
"bytes"
"encoding/json"
"errors"
"fmt"
"io"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/crypto"
)
// The ABI holds information about a contract's context and available
// invokable methods. It will allow you to type check function calls and
// packs data accordingly.
type ABI struct {
Constructor Method
Methods map[string]Method
Events map[string]Event
Errors map[string]Error
// Additional "special" functions introduced in solidity v0.6.0.
// It's separated from the original default fallback. Each contract
// can only define one fallback and receive function.
Fallback Method // Note it's also used to represent legacy fallback before v0.6.0
Receive Method
}
// JSON returns a parsed ABI interface and error if it failed.
func JSON(reader io.Reader) (ABI, error) {
dec := json.NewDecoder(reader)
var abi ABI
if err := dec.Decode(&abi); err != nil {
return ABI{}, err
}
return abi, nil
}
// Pack the given method name to conform the ABI. Method call's data
// will consist of method_id, args0, arg1, ... argN. Method id consists
// of 4 bytes and arguments are all 32 bytes.
// Method ids are created from the first 4 bytes of the hash of the
// methods string signature. (signature = baz(uint32,string32))
func (abi ABI) Pack(name string, args ...interface{}) ([]byte, error) {
// Fetch the ABI of the requested method
if name == "" {
// constructor
arguments, err := abi.Constructor.Inputs.Pack(args...)
if err != nil {
return nil, err
}
return arguments, nil
}
method, exist := abi.Methods[name]
if !exist {
return nil, fmt.Errorf("method '%s' not found", name)
}
arguments, err := method.Inputs.Pack(args...)
if err != nil {
return nil, err
}
// Pack up the method ID too if not a constructor and return
return append(method.ID, arguments...), nil
}
func (abi ABI) getArguments(name string, data []byte) (Arguments, error) {
// since there can't be naming collisions with contracts and events,
// we need to decide whether we're calling a method or an event
var args Arguments
if method, ok := abi.Methods[name]; ok {
if len(data)%32 != 0 {
return nil, fmt.Errorf("abi: improperly formatted output: %s - Bytes: [%+v]", string(data), data)
}
args = method.Outputs
}
if event, ok := abi.Events[name]; ok {
args = event.Inputs
}
if args == nil {
return nil, fmt.Errorf("abi: could not locate named method or event: %s", name)
}
return args, nil
}
// Unpack unpacks the output according to the abi specification.
func (abi ABI) Unpack(name string, data []byte) ([]interface{}, error) {
args, err := abi.getArguments(name, data)
if err != nil {
return nil, err
}
return args.Unpack(data)
}
// UnpackIntoInterface unpacks the output in v according to the abi specification.
// It performs an additional copy. Please only use, if you want to unpack into a
// structure that does not strictly conform to the abi structure (e.g. has additional arguments)
func (abi ABI) UnpackIntoInterface(v interface{}, name string, data []byte) error {
args, err := abi.getArguments(name, data)
if err != nil {
return err
}
unpacked, err := args.Unpack(data)
if err != nil {
return err
}
return args.Copy(v, unpacked)
}
// UnpackIntoMap unpacks a log into the provided map[string]interface{}.
func (abi ABI) UnpackIntoMap(v map[string]interface{}, name string, data []byte) (err error) {
args, err := abi.getArguments(name, data)
if err != nil {
return err
}
return args.UnpackIntoMap(v, data)
}
// UnmarshalJSON implements json.Unmarshaler interface.
func (abi *ABI) UnmarshalJSON(data []byte) error {
var fields []struct {
Type string
Name string
Inputs []Argument
Outputs []Argument
// Status indicator which can be: "pure", "view",
// "nonpayable" or "payable".
StateMutability string
// Deprecated Status indicators, but removed in v0.6.0.
Constant bool // True if function is either pure or view
Payable bool // True if function is payable
// Event relevant indicator represents the event is
// declared as anonymous.
Anonymous bool
}
if err := json.Unmarshal(data, &fields); err != nil {
return err
}
abi.Methods = make(map[string]Method)
abi.Events = make(map[string]Event)
abi.Errors = make(map[string]Error)
for _, field := range fields {
switch field.Type {
case "constructor":
abi.Constructor = NewMethod("", "", Constructor, field.StateMutability, field.Constant, field.Payable, field.Inputs, nil)
case "function":
name := ResolveNameConflict(field.Name, func(s string) bool { _, ok := abi.Methods[s]; return ok })
abi.Methods[name] = NewMethod(name, field.Name, Function, field.StateMutability, field.Constant, field.Payable, field.Inputs, field.Outputs)
case "fallback":
// New introduced function type in v0.6.0, check more detail
// here https://solidity.readthedocs.io/en/v0.6.0/contracts.html#fallback-function
if abi.HasFallback() {
return errors.New("only single fallback is allowed")
}
abi.Fallback = NewMethod("", "", Fallback, field.StateMutability, field.Constant, field.Payable, nil, nil)
case "receive":
// New introduced function type in v0.6.0, check more detail
// here https://solidity.readthedocs.io/en/v0.6.0/contracts.html#fallback-function
if abi.HasReceive() {
return errors.New("only single receive is allowed")
}
if field.StateMutability != "payable" {
return errors.New("the statemutability of receive can only be payable")
}
abi.Receive = NewMethod("", "", Receive, field.StateMutability, field.Constant, field.Payable, nil, nil)
case "event":
name := ResolveNameConflict(field.Name, func(s string) bool { _, ok := abi.Events[s]; return ok })
abi.Events[name] = NewEvent(name, field.Name, field.Anonymous, field.Inputs)
case "error":
// Errors cannot be overloaded or overridden but are inherited,
// no need to resolve the name conflict here.
abi.Errors[field.Name] = NewError(field.Name, field.Inputs)
default:
return fmt.Errorf("abi: could not recognize type %v of field %v", field.Type, field.Name)
}
}
return nil
}
// MethodById looks up a method by the 4-byte id,
// returns nil if none found.
func (abi *ABI) MethodById(sigdata []byte) (*Method, error) {
if len(sigdata) < 4 {
return nil, fmt.Errorf("data too short (%d bytes) for abi method lookup", len(sigdata))
}
for _, method := range abi.Methods {
if bytes.Equal(method.ID, sigdata[:4]) {
return &method, nil
}
}
return nil, fmt.Errorf("no method with id: %#x", sigdata[:4])
}
// EventByID looks an event up by its topic hash in the
// ABI and returns nil if none found.
func (abi *ABI) EventByID(topic common.Hash) (*Event, error) {
for _, event := range abi.Events {
if bytes.Equal(event.ID.Bytes(), topic.Bytes()) {
return &event, nil
}
}
return nil, fmt.Errorf("no event with id: %#x", topic.Hex())
}
// HasFallback returns an indicator whether a fallback function is included.
func (abi *ABI) HasFallback() bool {
return abi.Fallback.Type == Fallback
}
// HasReceive returns an indicator whether a receive function is included.
func (abi *ABI) HasReceive() bool {
return abi.Receive.Type == Receive
}
// revertSelector is a special function selector for revert reason unpacking.
var revertSelector = crypto.Keccak256([]byte("Error(string)"))[:4]
// UnpackRevert resolves the abi-encoded revert reason. According to the solidity
// spec https://solidity.readthedocs.io/en/latest/control-structures.html#revert,
// the provided revert reason is abi-encoded as if it were a call to a function
// `Error(string)`. So it's a special tool for it.
func UnpackRevert(data []byte) (string, error) {
if len(data) < 4 {
return "", errors.New("invalid data for unpacking")
}
if !bytes.Equal(data[:4], revertSelector) {
return "", errors.New("invalid data for unpacking")
}
typ, _ := NewType("string", "", nil)
unpacked, err := (Arguments{{Type: typ}}).Unpack(data[4:])
if err != nil {
return "", err
}
return unpacked[0].(string), nil
}

View File

@@ -0,0 +1,273 @@
// Copyright 2015 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package abi
import (
"encoding/json"
"errors"
"fmt"
"reflect"
"strings"
)
// Argument holds the name of the argument and the corresponding type.
// Types are used when packing and testing arguments.
type Argument struct {
Name string
Type Type
Indexed bool // indexed is only used by events
}
type Arguments []Argument
type ArgumentMarshaling struct {
Name string
Type string
InternalType string
Components []ArgumentMarshaling
Indexed bool
}
// UnmarshalJSON implements json.Unmarshaler interface.
func (argument *Argument) UnmarshalJSON(data []byte) error {
var arg ArgumentMarshaling
err := json.Unmarshal(data, &arg)
if err != nil {
return fmt.Errorf("argument json err: %v", err)
}
argument.Type, err = NewType(arg.Type, arg.InternalType, arg.Components)
if err != nil {
return err
}
argument.Name = arg.Name
argument.Indexed = arg.Indexed
return nil
}
// NonIndexed returns the arguments with indexed arguments filtered out.
func (arguments Arguments) NonIndexed() Arguments {
var ret []Argument
for _, arg := range arguments {
if !arg.Indexed {
ret = append(ret, arg)
}
}
return ret
}
// isTuple returns true for non-atomic constructs, like (uint,uint) or uint[].
func (arguments Arguments) isTuple() bool {
return len(arguments) > 1
}
// Unpack performs the operation hexdata -> Go format.
func (arguments Arguments) Unpack(data []byte) ([]interface{}, error) {
if len(data) == 0 {
if len(arguments.NonIndexed()) != 0 {
return nil, errors.New("abi: attempting to unmarshall an empty string while arguments are expected")
}
return make([]interface{}, 0), nil
}
return arguments.UnpackValues(data)
}
// UnpackIntoMap performs the operation hexdata -> mapping of argument name to argument value.
func (arguments Arguments) UnpackIntoMap(v map[string]interface{}, data []byte) error {
// Make sure map is not nil
if v == nil {
return errors.New("abi: cannot unpack into a nil map")
}
if len(data) == 0 {
if len(arguments.NonIndexed()) != 0 {
return errors.New("abi: attempting to unmarshall an empty string while arguments are expected")
}
return nil // Nothing to unmarshal, return
}
marshalledValues, err := arguments.UnpackValues(data)
if err != nil {
return err
}
for i, arg := range arguments.NonIndexed() {
v[arg.Name] = marshalledValues[i]
}
return nil
}
// Copy performs the operation go format -> provided struct.
func (arguments Arguments) Copy(v interface{}, values []interface{}) error {
// make sure the passed value is arguments pointer
if reflect.Ptr != reflect.ValueOf(v).Kind() {
return fmt.Errorf("abi: Unpack(non-pointer %T)", v)
}
if len(values) == 0 {
if len(arguments.NonIndexed()) != 0 {
return errors.New("abi: attempting to copy no values while arguments are expected")
}
return nil // Nothing to copy, return
}
if arguments.isTuple() {
return arguments.copyTuple(v, values)
}
return arguments.copyAtomic(v, values[0])
}
// unpackAtomic unpacks ( hexdata -> go ) a single value
func (arguments Arguments) copyAtomic(v interface{}, marshalledValues interface{}) error {
dst := reflect.ValueOf(v).Elem()
src := reflect.ValueOf(marshalledValues)
if dst.Kind() == reflect.Struct {
return set(dst.Field(0), src)
}
return set(dst, src)
}
// copyTuple copies a batch of values from marshalledValues to v.
func (arguments Arguments) copyTuple(v interface{}, marshalledValues []interface{}) error {
value := reflect.ValueOf(v).Elem()
nonIndexedArgs := arguments.NonIndexed()
switch value.Kind() {
case reflect.Struct:
argNames := make([]string, len(nonIndexedArgs))
for i, arg := range nonIndexedArgs {
argNames[i] = arg.Name
}
var err error
abi2struct, err := mapArgNamesToStructFields(argNames, value)
if err != nil {
return err
}
for i, arg := range nonIndexedArgs {
field := value.FieldByName(abi2struct[arg.Name])
if !field.IsValid() {
return fmt.Errorf("abi: field %s can't be found in the given value", arg.Name)
}
if err := set(field, reflect.ValueOf(marshalledValues[i])); err != nil {
return err
}
}
case reflect.Slice, reflect.Array:
if value.Len() < len(marshalledValues) {
return fmt.Errorf("abi: insufficient number of arguments for unpack, want %d, got %d", len(arguments), value.Len())
}
for i := range nonIndexedArgs {
if err := set(value.Index(i), reflect.ValueOf(marshalledValues[i])); err != nil {
return err
}
}
default:
return fmt.Errorf("abi:[2] cannot unmarshal tuple in to %v", value.Type())
}
return nil
}
// UnpackValues can be used to unpack ABI-encoded hexdata according to the ABI-specification,
// without supplying a struct to unpack into. Instead, this method returns a list containing the
// values. An atomic argument will be a list with one element.
func (arguments Arguments) UnpackValues(data []byte) ([]interface{}, error) {
nonIndexedArgs := arguments.NonIndexed()
retval := make([]interface{}, 0, len(nonIndexedArgs))
virtualArgs := 0
for index, arg := range nonIndexedArgs {
marshalledValue, err := toGoType((index+virtualArgs)*32, arg.Type, data)
if err != nil {
return nil, err
}
if arg.Type.T == ArrayTy && !isDynamicType(arg.Type) {
// If we have a static array, like [3]uint256, these are coded as
// just like uint256,uint256,uint256.
// This means that we need to add two 'virtual' arguments when
// we count the index from now on.
//
// Array values nested multiple levels deep are also encoded inline:
// [2][3]uint256: uint256,uint256,uint256,uint256,uint256,uint256
//
// Calculate the full array size to get the correct offset for the next argument.
// Decrement it by 1, as the normal index increment is still applied.
virtualArgs += getTypeSize(arg.Type)/32 - 1
} else if arg.Type.T == TupleTy && !isDynamicType(arg.Type) {
// If we have a static tuple, like (uint256, bool, uint256), these are
// coded as just like uint256,bool,uint256
virtualArgs += getTypeSize(arg.Type)/32 - 1
}
retval = append(retval, marshalledValue)
}
return retval, nil
}
// PackValues performs the operation Go format -> Hexdata.
// It is the semantic opposite of UnpackValues.
func (arguments Arguments) PackValues(args []interface{}) ([]byte, error) {
return arguments.Pack(args...)
}
// Pack performs the operation Go format -> Hexdata.
func (arguments Arguments) Pack(args ...interface{}) ([]byte, error) {
// Make sure arguments match up and pack them
abiArgs := arguments
if len(args) != len(abiArgs) {
return nil, fmt.Errorf("argument count mismatch: got %d for %d", len(args), len(abiArgs))
}
// variable input is the output appended at the end of packed
// output. This is used for strings and bytes types input.
var variableInput []byte
// input offset is the bytes offset for packed output
inputOffset := 0
for _, abiArg := range abiArgs {
inputOffset += getTypeSize(abiArg.Type)
}
var ret []byte
for i, a := range args {
input := abiArgs[i]
// pack the input
packed, err := input.Type.pack(reflect.ValueOf(a))
if err != nil {
return nil, err
}
// check for dynamic types
if isDynamicType(input.Type) {
// set the offset
ret = append(ret, packNum(reflect.ValueOf(inputOffset))...)
// calculate next offset
inputOffset += len(packed)
// append to variable input
variableInput = append(variableInput, packed...)
} else {
// append the packed value to the input
ret = append(ret, packed...)
}
}
// append the variable input at the end of the packed input
ret = append(ret, variableInput...)
return ret, nil
}
// ToCamelCase converts an under-score string to a camel-case string
func ToCamelCase(input string) string {
parts := strings.Split(input, "_")
for i, s := range parts {
if len(s) > 0 {
parts[i] = strings.ToUpper(s[:1]) + s[1:]
}
}
return strings.Join(parts, "")
}

View File

@@ -0,0 +1,179 @@
// Copyright 2016 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package bind
import (
"context"
"crypto/ecdsa"
"errors"
"io"
"math/big"
"github.com/ethereum/go-ethereum/accounts"
"github.com/ethereum/go-ethereum/accounts/external"
"github.com/ethereum/go-ethereum/accounts/keystore"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/log"
)
// ErrNoChainID is returned whenever the user failed to specify a chain id.
var ErrNoChainID = errors.New("no chain id specified")
// ErrNotAuthorized is returned when an account is not properly unlocked.
var ErrNotAuthorized = errors.New("not authorized to sign this account")
// NewTransactor is a utility method to easily create a transaction signer from
// an encrypted json key stream and the associated passphrase.
//
// Deprecated: Use NewTransactorWithChainID instead.
func NewTransactor(keyin io.Reader, passphrase string) (*TransactOpts, error) {
log.Warn("WARNING: NewTransactor has been deprecated in favour of NewTransactorWithChainID")
json, err := io.ReadAll(keyin)
if err != nil {
return nil, err
}
key, err := keystore.DecryptKey(json, passphrase)
if err != nil {
return nil, err
}
return NewKeyedTransactor(key.PrivateKey), nil
}
// NewKeyStoreTransactor is a utility method to easily create a transaction signer from
// an decrypted key from a keystore.
//
// Deprecated: Use NewKeyStoreTransactorWithChainID instead.
func NewKeyStoreTransactor(keystore *keystore.KeyStore, account accounts.Account) (*TransactOpts, error) {
log.Warn("WARNING: NewKeyStoreTransactor has been deprecated in favour of NewTransactorWithChainID")
signer := types.HomesteadSigner{}
return &TransactOpts{
From: account.Address,
Signer: func(address common.Address, tx *types.Transaction) (*types.Transaction, error) {
if address != account.Address {
return nil, ErrNotAuthorized
}
signature, err := keystore.SignHash(account, signer.Hash(tx).Bytes())
if err != nil {
return nil, err
}
return tx.WithSignature(signer, signature)
},
Context: context.Background(),
}, nil
}
// NewKeyedTransactor is a utility method to easily create a transaction signer
// from a single private key.
//
// Deprecated: Use NewKeyedTransactorWithChainID instead.
func NewKeyedTransactor(key *ecdsa.PrivateKey) *TransactOpts {
log.Warn("WARNING: NewKeyedTransactor has been deprecated in favour of NewKeyedTransactorWithChainID")
keyAddr := crypto.PubkeyToAddress(key.PublicKey)
signer := types.HomesteadSigner{}
return &TransactOpts{
From: keyAddr,
Signer: func(address common.Address, tx *types.Transaction) (*types.Transaction, error) {
if address != keyAddr {
return nil, ErrNotAuthorized
}
signature, err := crypto.Sign(signer.Hash(tx).Bytes(), key)
if err != nil {
return nil, err
}
return tx.WithSignature(signer, signature)
},
Context: context.Background(),
}
}
// NewTransactorWithChainID is a utility method to easily create a transaction signer from
// an encrypted json key stream and the associated passphrase.
func NewTransactorWithChainID(keyin io.Reader, passphrase string, chainID *big.Int) (*TransactOpts, error) {
json, err := io.ReadAll(keyin)
if err != nil {
return nil, err
}
key, err := keystore.DecryptKey(json, passphrase)
if err != nil {
return nil, err
}
return NewKeyedTransactorWithChainID(key.PrivateKey, chainID)
}
// NewKeyStoreTransactorWithChainID is a utility method to easily create a transaction signer from
// an decrypted key from a keystore.
func NewKeyStoreTransactorWithChainID(keystore *keystore.KeyStore, account accounts.Account, chainID *big.Int) (*TransactOpts, error) {
if chainID == nil {
return nil, ErrNoChainID
}
signer := types.LatestSignerForChainID(chainID)
return &TransactOpts{
From: account.Address,
Signer: func(address common.Address, tx *types.Transaction) (*types.Transaction, error) {
if address != account.Address {
return nil, ErrNotAuthorized
}
signature, err := keystore.SignHash(account, signer.Hash(tx).Bytes())
if err != nil {
return nil, err
}
return tx.WithSignature(signer, signature)
},
Context: context.Background(),
}, nil
}
// NewKeyedTransactorWithChainID is a utility method to easily create a transaction signer
// from a single private key.
func NewKeyedTransactorWithChainID(key *ecdsa.PrivateKey, chainID *big.Int) (*TransactOpts, error) {
keyAddr := crypto.PubkeyToAddress(key.PublicKey)
if chainID == nil {
return nil, ErrNoChainID
}
signer := types.LatestSignerForChainID(chainID)
return &TransactOpts{
From: keyAddr,
Signer: func(address common.Address, tx *types.Transaction) (*types.Transaction, error) {
if address != keyAddr {
return nil, ErrNotAuthorized
}
signature, err := crypto.Sign(signer.Hash(tx).Bytes(), key)
if err != nil {
return nil, err
}
return tx.WithSignature(signer, signature)
},
Context: context.Background(),
}, nil
}
// NewClefTransactor is a utility method to easily create a transaction signer
// with a clef backend.
func NewClefTransactor(clef *external.ExternalSigner, account accounts.Account) *TransactOpts {
return &TransactOpts{
From: account.Address,
Signer: func(address common.Address, transaction *types.Transaction) (*types.Transaction, error) {
if address != account.Address {
return nil, ErrNotAuthorized
}
return clef.SignTx(account, transaction, nil) // Clef enforces its own chain id
},
Context: context.Background(),
}
}

View File

@@ -0,0 +1,126 @@
// Copyright 2015 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package bind
import (
"context"
"errors"
"math/big"
"github.com/ethereum/go-ethereum"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
)
var (
// ErrNoCode is returned by call and transact operations for which the requested
// recipient contract to operate on does not exist in the state db or does not
// have any code associated with it (i.e. suicided).
ErrNoCode = errors.New("no contract code at given address")
// ErrNoPendingState is raised when attempting to perform a pending state action
// on a backend that doesn't implement PendingContractCaller.
ErrNoPendingState = errors.New("backend does not support pending state")
// ErrNoCodeAfterDeploy is returned by WaitDeployed if contract creation leaves
// an empty contract behind.
ErrNoCodeAfterDeploy = errors.New("no contract code after deployment")
)
// ContractCaller defines the methods needed to allow operating with a contract on a read
// only basis.
type ContractCaller interface {
// CodeAt returns the code of the given account. This is needed to differentiate
// between contract internal errors and the local chain being out of sync.
CodeAt(ctx context.Context, contract common.Address, blockNumber *big.Int) ([]byte, error)
// CallContract executes an Ethereum contract call with the specified data as the
// input.
CallContract(ctx context.Context, call ethereum.CallMsg, blockNumber *big.Int) ([]byte, error)
}
// PendingContractCaller defines methods to perform contract calls on the pending state.
// Call will try to discover this interface when access to the pending state is requested.
// If the backend does not support the pending state, Call returns ErrNoPendingState.
type PendingContractCaller interface {
// PendingCodeAt returns the code of the given account in the pending state.
PendingCodeAt(ctx context.Context, contract common.Address) ([]byte, error)
// PendingCallContract executes an Ethereum contract call against the pending state.
PendingCallContract(ctx context.Context, call ethereum.CallMsg) ([]byte, error)
}
// ContractTransactor defines the methods needed to allow operating with a contract
// on a write only basis. Besides the transacting method, the remainder are helpers
// used when the user does not provide some needed values, but rather leaves it up
// to the transactor to decide.
type ContractTransactor interface {
// HeaderByNumber returns a block header from the current canonical chain. If
// number is nil, the latest known header is returned.
HeaderByNumber(ctx context.Context, number *big.Int) (*types.Header, error)
// PendingCodeAt returns the code of the given account in the pending state.
PendingCodeAt(ctx context.Context, account common.Address) ([]byte, error)
// PendingNonceAt retrieves the current pending nonce associated with an account.
PendingNonceAt(ctx context.Context, account common.Address) (uint64, error)
// SuggestGasPrice retrieves the currently suggested gas price to allow a timely
// execution of a transaction.
SuggestGasPrice(ctx context.Context) (*big.Int, error)
// SuggestGasTipCap retrieves the currently suggested 1559 priority fee to allow
// a timely execution of a transaction.
SuggestGasTipCap(ctx context.Context) (*big.Int, error)
// EstimateGas tries to estimate the gas needed to execute a specific
// transaction based on the current pending state of the backend blockchain.
// There is no guarantee that this is the true gas limit requirement as other
// transactions may be added or removed by miners, but it should provide a basis
// for setting a reasonable default.
EstimateGas(ctx context.Context, call ethereum.CallMsg) (gas uint64, err error)
// SendTransaction injects the transaction into the pending pool for execution.
SendTransaction(ctx context.Context, tx *types.Transaction) error
}
// ContractFilterer defines the methods needed to access log events using one-off
// queries or continuous event subscriptions.
type ContractFilterer interface {
// FilterLogs executes a log filter operation, blocking during execution and
// returning all the results in one batch.
//
// TODO(karalabe): Deprecate when the subscription one can return past data too.
FilterLogs(ctx context.Context, query ethereum.FilterQuery) ([]types.Log, error)
// SubscribeFilterLogs creates a background log filtering operation, returning
// a subscription immediately, which can be used to stream the found events.
SubscribeFilterLogs(ctx context.Context, query ethereum.FilterQuery, ch chan<- types.Log) (ethereum.Subscription, error)
}
// DeployBackend wraps the operations needed by WaitMined and WaitDeployed.
type DeployBackend interface {
TransactionReceipt(ctx context.Context, txHash common.Hash) (*types.Receipt, error)
CodeAt(ctx context.Context, account common.Address, blockNumber *big.Int) ([]byte, error)
}
// ContractBackend defines the methods needed to work with contracts on a read-write basis.
type ContractBackend interface {
ContractCaller
ContractTransactor
ContractFilterer
}

View File

@@ -0,0 +1,536 @@
// Copyright 2015 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package bind
import (
"context"
"errors"
"fmt"
"math/big"
"strings"
"sync"
"github.com/ethereum/go-ethereum"
"github.com/ethereum/go-ethereum/accounts/abi"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/event"
)
const basefeeWiggleMultiplier = 2
// SignerFn is a signer function callback when a contract requires a method to
// sign the transaction before submission.
type SignerFn func(common.Address, *types.Transaction) (*types.Transaction, error)
// CallOpts is the collection of options to fine tune a contract call request.
type CallOpts struct {
Pending bool // Whether to operate on the pending state or the last known one
From common.Address // Optional the sender address, otherwise the first account is used
BlockNumber *big.Int // Optional the block number on which the call should be performed
Context context.Context // Network context to support cancellation and timeouts (nil = no timeout)
}
// TransactOpts is the collection of authorization data required to create a
// valid Ethereum transaction.
type TransactOpts struct {
From common.Address // Ethereum account to send the transaction from
Nonce *big.Int // Nonce to use for the transaction execution (nil = use pending state)
Signer SignerFn // Method to use for signing the transaction (mandatory)
Value *big.Int // Funds to transfer along the transaction (nil = 0 = no funds)
GasPrice *big.Int // Gas price to use for the transaction execution (nil = gas price oracle)
GasFeeCap *big.Int // Gas fee cap to use for the 1559 transaction execution (nil = gas price oracle)
GasTipCap *big.Int // Gas priority fee cap to use for the 1559 transaction execution (nil = gas price oracle)
GasLimit uint64 // Gas limit to set for the transaction execution (0 = estimate)
Context context.Context // Network context to support cancellation and timeouts (nil = no timeout)
NoSign bool // Do all transact steps but do not sign or send the transaction
NoSend bool // Do all transact steps but do not send the transaction
}
// FilterOpts is the collection of options to fine tune filtering for events
// within a bound contract.
type FilterOpts struct {
Start uint64 // Start of the queried range
End *uint64 // End of the range (nil = latest)
Context context.Context // Network context to support cancellation and timeouts (nil = no timeout)
}
// WatchOpts is the collection of options to fine tune subscribing for events
// within a bound contract.
type WatchOpts struct {
Start *uint64 // Start of the queried range (nil = latest)
Context context.Context // Network context to support cancellation and timeouts (nil = no timeout)
}
// MetaData collects all metadata for a bound contract.
type MetaData struct {
mu sync.Mutex
Sigs map[string]string
Bin string
ABI string
ab *abi.ABI
}
func (m *MetaData) GetAbi() (*abi.ABI, error) {
m.mu.Lock()
defer m.mu.Unlock()
if m.ab != nil {
return m.ab, nil
}
if parsed, err := abi.JSON(strings.NewReader(m.ABI)); err != nil {
return nil, err
} else {
m.ab = &parsed
}
return m.ab, nil
}
// BoundContract is the base wrapper object that reflects a contract on the
// Ethereum network. It contains a collection of methods that are used by the
// higher level contract bindings to operate.
type BoundContract struct {
address common.Address // Deployment address of the contract on the Ethereum blockchain
abi abi.ABI // Reflect based ABI to access the correct Ethereum methods
caller ContractCaller // Read interface to interact with the blockchain
transactor ContractTransactor // Write interface to interact with the blockchain
filterer ContractFilterer // Event filtering to interact with the blockchain
}
// NewBoundContract creates a low level contract interface through which calls
// and transactions may be made through.
func NewBoundContract(address common.Address, abi abi.ABI, caller ContractCaller, transactor ContractTransactor, filterer ContractFilterer) *BoundContract {
return &BoundContract{
address: address,
abi: abi,
caller: caller,
transactor: transactor,
filterer: filterer,
}
}
// DeployContract deploys a contract onto the Ethereum blockchain and binds the
// deployment address with a Go wrapper.
func DeployContract(opts *TransactOpts, abi abi.ABI, bytecode []byte, backend ContractBackend, params ...interface{}) (common.Address, *types.Transaction, *BoundContract, error) {
// Otherwise try to deploy the contract
c := NewBoundContract(common.Address{}, abi, backend, backend, backend)
input, err := c.abi.Pack("", params...)
if err != nil {
return common.Address{}, nil, nil, err
}
tx, err := c.transact(opts, nil, append(bytecode, input...))
if err != nil {
return common.Address{}, nil, nil, err
}
c.address = crypto.CreateAddress(opts.From, tx.Nonce())
return c.address, tx, c, nil
}
// Call invokes the (constant) contract method with params as input values and
// sets the output to result. The result type might be a single field for simple
// returns, a slice of interfaces for anonymous returns and a struct for named
// returns.
func (c *BoundContract) Call(opts *CallOpts, results *[]interface{}, method string, params ...interface{}) error {
// Don't crash on a lazy user
if opts == nil {
opts = new(CallOpts)
}
if results == nil {
results = new([]interface{})
}
// Pack the input, call and unpack the results
input, err := c.abi.Pack(method, params...)
if err != nil {
return err
}
var (
msg = ethereum.CallMsg{From: opts.From, To: &c.address, Data: input}
ctx = ensureContext(opts.Context)
code []byte
output []byte
)
if opts.Pending {
pb, ok := c.caller.(PendingContractCaller)
if !ok {
return ErrNoPendingState
}
output, err = pb.PendingCallContract(ctx, msg)
if err != nil {
return err
}
if len(output) == 0 {
// Make sure we have a contract to operate on, and bail out otherwise.
if code, err = pb.PendingCodeAt(ctx, c.address); err != nil {
return err
} else if len(code) == 0 {
return ErrNoCode
}
}
} else {
output, err = c.caller.CallContract(ctx, msg, opts.BlockNumber)
if err != nil {
return err
}
if len(output) == 0 {
// Make sure we have a contract to operate on, and bail out otherwise.
if code, err = c.caller.CodeAt(ctx, c.address, opts.BlockNumber); err != nil {
return err
} else if len(code) == 0 {
return ErrNoCode
}
}
}
if len(*results) == 0 {
res, err := c.abi.Unpack(method, output)
*results = res
return err
}
res := *results
return c.abi.UnpackIntoInterface(res[0], method, output)
}
// Transact invokes the (paid) contract method with params as input values.
func (c *BoundContract) Transact(opts *TransactOpts, method string, params ...interface{}) (*types.Transaction, error) {
// Otherwise pack up the parameters and invoke the contract
input, err := c.abi.Pack(method, params...)
if err != nil {
return nil, err
}
// todo(rjl493456442) check the method is payable or not,
// reject invalid transaction at the first place
return c.transact(opts, &c.address, input)
}
// RawTransact initiates a transaction with the given raw calldata as the input.
// It's usually used to initiate transactions for invoking **Fallback** function.
func (c *BoundContract) RawTransact(opts *TransactOpts, calldata []byte) (*types.Transaction, error) {
// todo(rjl493456442) check the method is payable or not,
// reject invalid transaction at the first place
return c.transact(opts, &c.address, calldata)
}
// Transfer initiates a plain transaction to move funds to the contract, calling
// its default method if one is available.
func (c *BoundContract) Transfer(opts *TransactOpts) (*types.Transaction, error) {
// todo(rjl493456442) check the payable fallback or receive is defined
// or not, reject invalid transaction at the first place
return c.transact(opts, &c.address, nil)
}
func (c *BoundContract) createDynamicTx(opts *TransactOpts, contract *common.Address, input []byte, head *types.Header) (*types.Transaction, error) {
// Normalize value
value := opts.Value
if value == nil {
value = new(big.Int)
}
// Estimate TipCap
gasTipCap := opts.GasTipCap
if gasTipCap == nil {
tip, err := c.transactor.SuggestGasTipCap(ensureContext(opts.Context))
if err != nil {
return nil, err
}
gasTipCap = tip
}
// Estimate FeeCap
gasFeeCap := opts.GasFeeCap
if gasFeeCap == nil {
gasFeeCap = new(big.Int).Add(
gasTipCap,
new(big.Int).Mul(head.BaseFee, big.NewInt(basefeeWiggleMultiplier)),
)
}
if gasFeeCap.Cmp(gasTipCap) < 0 {
return nil, fmt.Errorf("maxFeePerGas (%v) < maxPriorityFeePerGas (%v)", gasFeeCap, gasTipCap)
}
// Estimate GasLimit
gasLimit := opts.GasLimit
if opts.GasLimit == 0 {
var err error
gasLimit, err = c.estimateGasLimit(opts, contract, input, nil, gasTipCap, gasFeeCap, value)
if err != nil {
return nil, err
}
}
// create the transaction
nonce, err := c.getNonce(opts)
if err != nil {
return nil, err
}
baseTx := &types.DynamicFeeTx{
To: contract,
Nonce: nonce,
GasFeeCap: gasFeeCap,
GasTipCap: gasTipCap,
Gas: gasLimit,
Value: value,
Data: input,
}
return types.NewTx(baseTx), nil
}
func (c *BoundContract) createLegacyTx(opts *TransactOpts, contract *common.Address, input []byte) (*types.Transaction, error) {
if opts.GasFeeCap != nil || opts.GasTipCap != nil {
return nil, errors.New("maxFeePerGas or maxPriorityFeePerGas specified but london is not active yet")
}
// Normalize value
value := opts.Value
if value == nil {
value = new(big.Int)
}
// Estimate GasPrice
gasPrice := opts.GasPrice
if gasPrice == nil {
price, err := c.transactor.SuggestGasPrice(ensureContext(opts.Context))
if err != nil {
return nil, err
}
gasPrice = price
}
// Estimate GasLimit
gasLimit := opts.GasLimit
if opts.GasLimit == 0 {
var err error
gasLimit, err = c.estimateGasLimit(opts, contract, input, gasPrice, nil, nil, value)
if err != nil {
return nil, err
}
}
// create the transaction
nonce, err := c.getNonce(opts)
if err != nil {
return nil, err
}
baseTx := &types.LegacyTx{
To: contract,
Nonce: nonce,
GasPrice: gasPrice,
Gas: gasLimit,
Value: value,
Data: input,
}
return types.NewTx(baseTx), nil
}
func (c *BoundContract) estimateGasLimit(opts *TransactOpts, contract *common.Address, input []byte, gasPrice, gasTipCap, gasFeeCap, value *big.Int) (uint64, error) {
if contract != nil {
// Gas estimation cannot succeed without code for method invocations.
if code, err := c.transactor.PendingCodeAt(ensureContext(opts.Context), c.address); err != nil {
return 0, err
} else if len(code) == 0 {
return 0, ErrNoCode
}
}
msg := ethereum.CallMsg{
From: opts.From,
To: contract,
GasPrice: gasPrice,
GasTipCap: gasTipCap,
GasFeeCap: gasFeeCap,
Value: value,
Data: input,
}
return c.transactor.EstimateGas(ensureContext(opts.Context), msg)
}
func (c *BoundContract) getNonce(opts *TransactOpts) (uint64, error) {
if opts.Nonce == nil {
return c.transactor.PendingNonceAt(ensureContext(opts.Context), opts.From)
} else {
return opts.Nonce.Uint64(), nil
}
}
// transact executes an actual transaction invocation, first deriving any missing
// authorization fields, and then scheduling the transaction for execution.
func (c *BoundContract) transact(opts *TransactOpts, contract *common.Address, input []byte) (*types.Transaction, error) {
if opts.GasPrice != nil && (opts.GasFeeCap != nil || opts.GasTipCap != nil) {
return nil, errors.New("both gasPrice and (maxFeePerGas or maxPriorityFeePerGas) specified")
}
// Create the transaction
var (
rawTx *types.Transaction
err error
)
if opts.GasPrice != nil {
rawTx, err = c.createLegacyTx(opts, contract, input)
} else {
// Only query for basefee if gasPrice not specified
if head, errHead := c.transactor.HeaderByNumber(ensureContext(opts.Context), nil); errHead != nil {
return nil, errHead
} else if head.BaseFee != nil {
rawTx, err = c.createDynamicTx(opts, contract, input, head)
} else {
// Chain is not London ready -> use legacy transaction
rawTx, err = c.createLegacyTx(opts, contract, input)
}
}
if err != nil {
return nil, err
}
if opts.NoSign {
return rawTx, nil
}
// Sign the transaction and schedule it for execution
if opts.Signer == nil {
return nil, errors.New("no signer to authorize the transaction with")
}
signedTx, err := opts.Signer(opts.From, rawTx)
if err != nil {
return nil, err
}
if opts.NoSend {
return signedTx, nil
}
if err := c.transactor.SendTransaction(ensureContext(opts.Context), signedTx); err != nil {
return nil, err
}
return signedTx, nil
}
// FilterLogs filters contract logs for past blocks, returning the necessary
// channels to construct a strongly typed bound iterator on top of them.
func (c *BoundContract) FilterLogs(opts *FilterOpts, name string, query ...[]interface{}) (chan types.Log, event.Subscription, error) {
// Don't crash on a lazy user
if opts == nil {
opts = new(FilterOpts)
}
// Append the event selector to the query parameters and construct the topic set
query = append([][]interface{}{{c.abi.Events[name].ID}}, query...)
topics, err := abi.MakeTopics(query...)
if err != nil {
return nil, nil, err
}
// Start the background filtering
logs := make(chan types.Log, 128)
config := ethereum.FilterQuery{
Addresses: []common.Address{c.address},
Topics: topics,
FromBlock: new(big.Int).SetUint64(opts.Start),
}
if opts.End != nil {
config.ToBlock = new(big.Int).SetUint64(*opts.End)
}
/* TODO(karalabe): Replace the rest of the method below with this when supported
sub, err := c.filterer.SubscribeFilterLogs(ensureContext(opts.Context), config, logs)
*/
buff, err := c.filterer.FilterLogs(ensureContext(opts.Context), config)
if err != nil {
return nil, nil, err
}
sub, err := event.NewSubscription(func(quit <-chan struct{}) error {
for _, log := range buff {
select {
case logs <- log:
case <-quit:
return nil
}
}
return nil
}), nil
if err != nil {
return nil, nil, err
}
return logs, sub, nil
}
// WatchLogs filters subscribes to contract logs for future blocks, returning a
// subscription object that can be used to tear down the watcher.
func (c *BoundContract) WatchLogs(opts *WatchOpts, name string, query ...[]interface{}) (chan types.Log, event.Subscription, error) {
// Don't crash on a lazy user
if opts == nil {
opts = new(WatchOpts)
}
// Append the event selector to the query parameters and construct the topic set
query = append([][]interface{}{{c.abi.Events[name].ID}}, query...)
topics, err := abi.MakeTopics(query...)
if err != nil {
return nil, nil, err
}
// Start the background filtering
logs := make(chan types.Log, 128)
config := ethereum.FilterQuery{
Addresses: []common.Address{c.address},
Topics: topics,
}
if opts.Start != nil {
config.FromBlock = new(big.Int).SetUint64(*opts.Start)
}
sub, err := c.filterer.SubscribeFilterLogs(ensureContext(opts.Context), config, logs)
if err != nil {
return nil, nil, err
}
return logs, sub, nil
}
// UnpackLog unpacks a retrieved log into the provided output structure.
func (c *BoundContract) UnpackLog(out interface{}, event string, log types.Log) error {
if log.Topics[0] != c.abi.Events[event].ID {
return fmt.Errorf("event signature mismatch")
}
if len(log.Data) > 0 {
if err := c.abi.UnpackIntoInterface(out, event, log.Data); err != nil {
return err
}
}
var indexed abi.Arguments
for _, arg := range c.abi.Events[event].Inputs {
if arg.Indexed {
indexed = append(indexed, arg)
}
}
return abi.ParseTopics(out, indexed, log.Topics[1:])
}
// UnpackLogIntoMap unpacks a retrieved log into the provided map.
func (c *BoundContract) UnpackLogIntoMap(out map[string]interface{}, event string, log types.Log) error {
if log.Topics[0] != c.abi.Events[event].ID {
return fmt.Errorf("event signature mismatch")
}
if len(log.Data) > 0 {
if err := c.abi.UnpackIntoMap(out, event, log.Data); err != nil {
return err
}
}
var indexed abi.Arguments
for _, arg := range c.abi.Events[event].Inputs {
if arg.Indexed {
indexed = append(indexed, arg)
}
}
return abi.ParseTopicsIntoMap(out, indexed, log.Topics[1:])
}
// ensureContext is a helper method to ensure a context is not nil, even if the
// user specified it as such.
func ensureContext(ctx context.Context) context.Context {
if ctx == nil {
return context.Background()
}
return ctx
}

View File

@@ -0,0 +1,649 @@
// Copyright 2016 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
// Package bind generates Ethereum contract Go bindings.
//
// Detailed usage document and tutorial available on the go-ethereum Wiki page:
// https://github.com/ethereum/go-ethereum/wiki/Native-DApps:-Go-bindings-to-Ethereum-contracts
package bind
import (
"bytes"
"errors"
"fmt"
"go/format"
"regexp"
"strings"
"text/template"
"unicode"
"github.com/ethereum/go-ethereum/accounts/abi"
"github.com/ethereum/go-ethereum/log"
)
// Lang is a target programming language selector to generate bindings for.
type Lang int
const (
LangGo Lang = iota
LangJava
LangObjC
)
func isKeyWord(arg string) bool {
switch arg {
case "break":
case "case":
case "chan":
case "const":
case "continue":
case "default":
case "defer":
case "else":
case "fallthrough":
case "for":
case "func":
case "go":
case "goto":
case "if":
case "import":
case "interface":
case "iota":
case "map":
case "make":
case "new":
case "package":
case "range":
case "return":
case "select":
case "struct":
case "switch":
case "type":
case "var":
default:
return false
}
return true
}
// Bind generates a Go wrapper around a contract ABI. This wrapper isn't meant
// to be used as is in client code, but rather as an intermediate struct which
// enforces compile time type safety and naming convention opposed to having to
// manually maintain hard coded strings that break on runtime.
func Bind(types []string, abis []string, bytecodes []string, fsigs []map[string]string, pkg string, lang Lang, libs map[string]string, aliases map[string]string) (string, error) {
var (
// contracts is the map of each individual contract requested binding
contracts = make(map[string]*tmplContract)
// structs is the map of all redeclared structs shared by passed contracts.
structs = make(map[string]*tmplStruct)
// isLib is the map used to flag each encountered library as such
isLib = make(map[string]struct{})
)
for i := 0; i < len(types); i++ {
// Parse the actual ABI to generate the binding for
evmABI, err := abi.JSON(strings.NewReader(abis[i]))
if err != nil {
return "", err
}
// Strip any whitespace from the JSON ABI
strippedABI := strings.Map(func(r rune) rune {
if unicode.IsSpace(r) {
return -1
}
return r
}, abis[i])
// Extract the call and transact methods; events, struct definitions; and sort them alphabetically
var (
calls = make(map[string]*tmplMethod)
transacts = make(map[string]*tmplMethod)
events = make(map[string]*tmplEvent)
fallback *tmplMethod
receive *tmplMethod
// identifiers are used to detect duplicated identifiers of functions
// and events. For all calls, transacts and events, abigen will generate
// corresponding bindings. However we have to ensure there is no
// identifier collisions in the bindings of these categories.
callIdentifiers = make(map[string]bool)
transactIdentifiers = make(map[string]bool)
eventIdentifiers = make(map[string]bool)
)
for _, input := range evmABI.Constructor.Inputs {
if hasStruct(input.Type) {
bindStructType[lang](input.Type, structs)
}
}
for _, original := range evmABI.Methods {
// Normalize the method for capital cases and non-anonymous inputs/outputs
normalized := original
normalizedName := methodNormalizer[lang](alias(aliases, original.Name))
// Ensure there is no duplicated identifier
var identifiers = callIdentifiers
if !original.IsConstant() {
identifiers = transactIdentifiers
}
if identifiers[normalizedName] {
return "", fmt.Errorf("duplicated identifier \"%s\"(normalized \"%s\"), use --alias for renaming", original.Name, normalizedName)
}
identifiers[normalizedName] = true
normalized.Name = normalizedName
normalized.Inputs = make([]abi.Argument, len(original.Inputs))
copy(normalized.Inputs, original.Inputs)
for j, input := range normalized.Inputs {
if input.Name == "" || isKeyWord(input.Name) {
normalized.Inputs[j].Name = fmt.Sprintf("arg%d", j)
}
if hasStruct(input.Type) {
bindStructType[lang](input.Type, structs)
}
}
normalized.Outputs = make([]abi.Argument, len(original.Outputs))
copy(normalized.Outputs, original.Outputs)
for j, output := range normalized.Outputs {
if output.Name != "" {
normalized.Outputs[j].Name = capitalise(output.Name)
}
if hasStruct(output.Type) {
bindStructType[lang](output.Type, structs)
}
}
// Append the methods to the call or transact lists
if original.IsConstant() {
calls[original.Name] = &tmplMethod{Original: original, Normalized: normalized, Structured: structured(original.Outputs)}
} else {
transacts[original.Name] = &tmplMethod{Original: original, Normalized: normalized, Structured: structured(original.Outputs)}
}
}
for _, original := range evmABI.Events {
// Skip anonymous events as they don't support explicit filtering
if original.Anonymous {
continue
}
// Normalize the event for capital cases and non-anonymous outputs
normalized := original
// Ensure there is no duplicated identifier
normalizedName := methodNormalizer[lang](alias(aliases, original.Name))
if eventIdentifiers[normalizedName] {
return "", fmt.Errorf("duplicated identifier \"%s\"(normalized \"%s\"), use --alias for renaming", original.Name, normalizedName)
}
eventIdentifiers[normalizedName] = true
normalized.Name = normalizedName
used := make(map[string]bool)
normalized.Inputs = make([]abi.Argument, len(original.Inputs))
copy(normalized.Inputs, original.Inputs)
for j, input := range normalized.Inputs {
if input.Name == "" || isKeyWord(input.Name) {
normalized.Inputs[j].Name = fmt.Sprintf("arg%d", j)
}
// Event is a bit special, we need to define event struct in binding,
// ensure there is no camel-case-style name conflict.
for index := 0; ; index++ {
if !used[capitalise(normalized.Inputs[j].Name)] {
used[capitalise(normalized.Inputs[j].Name)] = true
break
}
normalized.Inputs[j].Name = fmt.Sprintf("%s%d", normalized.Inputs[j].Name, index)
}
if hasStruct(input.Type) {
bindStructType[lang](input.Type, structs)
}
}
// Append the event to the accumulator list
events[original.Name] = &tmplEvent{Original: original, Normalized: normalized}
}
// Add two special fallback functions if they exist
if evmABI.HasFallback() {
fallback = &tmplMethod{Original: evmABI.Fallback}
}
if evmABI.HasReceive() {
receive = &tmplMethod{Original: evmABI.Receive}
}
// There is no easy way to pass arbitrary java objects to the Go side.
if len(structs) > 0 && lang == LangJava {
return "", errors.New("java binding for tuple arguments is not supported yet")
}
contracts[types[i]] = &tmplContract{
Type: capitalise(types[i]),
InputABI: strings.ReplaceAll(strippedABI, "\"", "\\\""),
InputBin: strings.TrimPrefix(strings.TrimSpace(bytecodes[i]), "0x"),
Constructor: evmABI.Constructor,
Calls: calls,
Transacts: transacts,
Fallback: fallback,
Receive: receive,
Events: events,
Libraries: make(map[string]string),
}
// Function 4-byte signatures are stored in the same sequence
// as types, if available.
if len(fsigs) > i {
contracts[types[i]].FuncSigs = fsigs[i]
}
// Parse library references.
for pattern, name := range libs {
matched, err := regexp.Match("__\\$"+pattern+"\\$__", []byte(contracts[types[i]].InputBin))
if err != nil {
log.Error("Could not search for pattern", "pattern", pattern, "contract", contracts[types[i]], "err", err)
}
if matched {
contracts[types[i]].Libraries[pattern] = name
// keep track that this type is a library
if _, ok := isLib[name]; !ok {
isLib[name] = struct{}{}
}
}
}
}
// Check if that type has already been identified as a library
for i := 0; i < len(types); i++ {
_, ok := isLib[types[i]]
contracts[types[i]].Library = ok
}
// Generate the contract template data content and render it
data := &tmplData{
Package: pkg,
Contracts: contracts,
Libraries: libs,
Structs: structs,
}
buffer := new(bytes.Buffer)
funcs := map[string]interface{}{
"bindtype": bindType[lang],
"bindtopictype": bindTopicType[lang],
"namedtype": namedType[lang],
"capitalise": capitalise,
"decapitalise": decapitalise,
}
tmpl := template.Must(template.New("").Funcs(funcs).Parse(tmplSource[lang]))
if err := tmpl.Execute(buffer, data); err != nil {
return "", err
}
// For Go bindings pass the code through gofmt to clean it up
if lang == LangGo {
code, err := format.Source(buffer.Bytes())
if err != nil {
return "", fmt.Errorf("%v\n%s", err, buffer)
}
return string(code), nil
}
// For all others just return as is for now
return buffer.String(), nil
}
// bindType is a set of type binders that convert Solidity types to some supported
// programming language types.
var bindType = map[Lang]func(kind abi.Type, structs map[string]*tmplStruct) string{
LangGo: bindTypeGo,
LangJava: bindTypeJava,
}
// bindBasicTypeGo converts basic solidity types(except array, slice and tuple) to Go ones.
func bindBasicTypeGo(kind abi.Type) string {
switch kind.T {
case abi.AddressTy:
return "common.Address"
case abi.IntTy, abi.UintTy:
parts := regexp.MustCompile(`(u)?int([0-9]*)`).FindStringSubmatch(kind.String())
switch parts[2] {
case "8", "16", "32", "64":
return fmt.Sprintf("%sint%s", parts[1], parts[2])
}
return "*big.Int"
case abi.FixedBytesTy:
return fmt.Sprintf("[%d]byte", kind.Size)
case abi.BytesTy:
return "[]byte"
case abi.FunctionTy:
return "[24]byte"
default:
// string, bool types
return kind.String()
}
}
// bindTypeGo converts solidity types to Go ones. Since there is no clear mapping
// from all Solidity types to Go ones (e.g. uint17), those that cannot be exactly
// mapped will use an upscaled type (e.g. BigDecimal).
func bindTypeGo(kind abi.Type, structs map[string]*tmplStruct) string {
switch kind.T {
case abi.TupleTy:
return structs[kind.TupleRawName+kind.String()].Name
case abi.ArrayTy:
return fmt.Sprintf("[%d]", kind.Size) + bindTypeGo(*kind.Elem, structs)
case abi.SliceTy:
return "[]" + bindTypeGo(*kind.Elem, structs)
default:
return bindBasicTypeGo(kind)
}
}
// bindBasicTypeJava converts basic solidity types(except array, slice and tuple) to Java ones.
func bindBasicTypeJava(kind abi.Type) string {
switch kind.T {
case abi.AddressTy:
return "Address"
case abi.IntTy, abi.UintTy:
// Note that uint and int (without digits) are also matched,
// these are size 256, and will translate to BigInt (the default).
parts := regexp.MustCompile(`(u)?int([0-9]*)`).FindStringSubmatch(kind.String())
if len(parts) != 3 {
return kind.String()
}
// All unsigned integers should be translated to BigInt since gomobile doesn't
// support them.
if parts[1] == "u" {
return "BigInt"
}
namedSize := map[string]string{
"8": "byte",
"16": "short",
"32": "int",
"64": "long",
}[parts[2]]
// default to BigInt
if namedSize == "" {
namedSize = "BigInt"
}
return namedSize
case abi.FixedBytesTy, abi.BytesTy:
return "byte[]"
case abi.BoolTy:
return "boolean"
case abi.StringTy:
return "String"
case abi.FunctionTy:
return "byte[24]"
default:
return kind.String()
}
}
// pluralizeJavaType explicitly converts multidimensional types to predefined
// types in go side.
func pluralizeJavaType(typ string) string {
switch typ {
case "boolean":
return "Bools"
case "String":
return "Strings"
case "Address":
return "Addresses"
case "byte[]":
return "Binaries"
case "BigInt":
return "BigInts"
}
return typ + "[]"
}
// bindTypeJava converts a Solidity type to a Java one. Since there is no clear mapping
// from all Solidity types to Java ones (e.g. uint17), those that cannot be exactly
// mapped will use an upscaled type (e.g. BigDecimal).
func bindTypeJava(kind abi.Type, structs map[string]*tmplStruct) string {
switch kind.T {
case abi.TupleTy:
return structs[kind.TupleRawName+kind.String()].Name
case abi.ArrayTy, abi.SliceTy:
return pluralizeJavaType(bindTypeJava(*kind.Elem, structs))
default:
return bindBasicTypeJava(kind)
}
}
// bindTopicType is a set of type binders that convert Solidity types to some
// supported programming language topic types.
var bindTopicType = map[Lang]func(kind abi.Type, structs map[string]*tmplStruct) string{
LangGo: bindTopicTypeGo,
LangJava: bindTopicTypeJava,
}
// bindTopicTypeGo converts a Solidity topic type to a Go one. It is almost the same
// functionality as for simple types, but dynamic types get converted to hashes.
func bindTopicTypeGo(kind abi.Type, structs map[string]*tmplStruct) string {
bound := bindTypeGo(kind, structs)
// todo(rjl493456442) according solidity documentation, indexed event
// parameters that are not value types i.e. arrays and structs are not
// stored directly but instead a keccak256-hash of an encoding is stored.
//
// We only convert stringS and bytes to hash, still need to deal with
// array(both fixed-size and dynamic-size) and struct.
if bound == "string" || bound == "[]byte" {
bound = "common.Hash"
}
return bound
}
// bindTopicTypeJava converts a Solidity topic type to a Java one. It is almost the same
// functionality as for simple types, but dynamic types get converted to hashes.
func bindTopicTypeJava(kind abi.Type, structs map[string]*tmplStruct) string {
bound := bindTypeJava(kind, structs)
// todo(rjl493456442) according solidity documentation, indexed event
// parameters that are not value types i.e. arrays and structs are not
// stored directly but instead a keccak256-hash of an encoding is stored.
//
// We only convert strings and bytes to hash, still need to deal with
// array(both fixed-size and dynamic-size) and struct.
if bound == "String" || bound == "byte[]" {
bound = "Hash"
}
return bound
}
// bindStructType is a set of type binders that convert Solidity tuple types to some supported
// programming language struct definition.
var bindStructType = map[Lang]func(kind abi.Type, structs map[string]*tmplStruct) string{
LangGo: bindStructTypeGo,
LangJava: bindStructTypeJava,
}
// bindStructTypeGo converts a Solidity tuple type to a Go one and records the mapping
// in the given map.
// Notably, this function will resolve and record nested struct recursively.
func bindStructTypeGo(kind abi.Type, structs map[string]*tmplStruct) string {
switch kind.T {
case abi.TupleTy:
// We compose a raw struct name and a canonical parameter expression
// together here. The reason is before solidity v0.5.11, kind.TupleRawName
// is empty, so we use canonical parameter expression to distinguish
// different struct definition. From the consideration of backward
// compatibility, we concat these two together so that if kind.TupleRawName
// is not empty, it can have unique id.
id := kind.TupleRawName + kind.String()
if s, exist := structs[id]; exist {
return s.Name
}
var (
names = make(map[string]bool)
fields []*tmplField
)
for i, elem := range kind.TupleElems {
name := capitalise(kind.TupleRawNames[i])
name = abi.ResolveNameConflict(name, func(s string) bool { return names[s] })
names[name] = true
fields = append(fields, &tmplField{Type: bindStructTypeGo(*elem, structs), Name: name, SolKind: *elem})
}
name := kind.TupleRawName
if name == "" {
name = fmt.Sprintf("Struct%d", len(structs))
}
name = capitalise(name)
structs[id] = &tmplStruct{
Name: name,
Fields: fields,
}
return name
case abi.ArrayTy:
return fmt.Sprintf("[%d]", kind.Size) + bindStructTypeGo(*kind.Elem, structs)
case abi.SliceTy:
return "[]" + bindStructTypeGo(*kind.Elem, structs)
default:
return bindBasicTypeGo(kind)
}
}
// bindStructTypeJava converts a Solidity tuple type to a Java one and records the mapping
// in the given map.
// Notably, this function will resolve and record nested struct recursively.
func bindStructTypeJava(kind abi.Type, structs map[string]*tmplStruct) string {
switch kind.T {
case abi.TupleTy:
// We compose a raw struct name and a canonical parameter expression
// together here. The reason is before solidity v0.5.11, kind.TupleRawName
// is empty, so we use canonical parameter expression to distinguish
// different struct definition. From the consideration of backward
// compatibility, we concat these two together so that if kind.TupleRawName
// is not empty, it can have unique id.
id := kind.TupleRawName + kind.String()
if s, exist := structs[id]; exist {
return s.Name
}
var fields []*tmplField
for i, elem := range kind.TupleElems {
field := bindStructTypeJava(*elem, structs)
fields = append(fields, &tmplField{Type: field, Name: decapitalise(kind.TupleRawNames[i]), SolKind: *elem})
}
name := kind.TupleRawName
if name == "" {
name = fmt.Sprintf("Class%d", len(structs))
}
structs[id] = &tmplStruct{
Name: name,
Fields: fields,
}
return name
case abi.ArrayTy, abi.SliceTy:
return pluralizeJavaType(bindStructTypeJava(*kind.Elem, structs))
default:
return bindBasicTypeJava(kind)
}
}
// namedType is a set of functions that transform language specific types to
// named versions that may be used inside method names.
var namedType = map[Lang]func(string, abi.Type) string{
LangGo: func(string, abi.Type) string { panic("this shouldn't be needed") },
LangJava: namedTypeJava,
}
// namedTypeJava converts some primitive data types to named variants that can
// be used as parts of method names.
func namedTypeJava(javaKind string, solKind abi.Type) string {
switch javaKind {
case "byte[]":
return "Binary"
case "boolean":
return "Bool"
default:
parts := regexp.MustCompile(`(u)?int([0-9]*)(\[[0-9]*\])?`).FindStringSubmatch(solKind.String())
if len(parts) != 4 {
return javaKind
}
switch parts[2] {
case "8", "16", "32", "64":
if parts[3] == "" {
return capitalise(fmt.Sprintf("%sint%s", parts[1], parts[2]))
}
return capitalise(fmt.Sprintf("%sint%ss", parts[1], parts[2]))
default:
return javaKind
}
}
}
// alias returns an alias of the given string based on the aliasing rules
// or returns itself if no rule is matched.
func alias(aliases map[string]string, n string) string {
if alias, exist := aliases[n]; exist {
return alias
}
return n
}
// methodNormalizer is a name transformer that modifies Solidity method names to
// conform to target language naming conventions.
var methodNormalizer = map[Lang]func(string) string{
LangGo: abi.ToCamelCase,
LangJava: decapitalise,
}
// capitalise makes a camel-case string which starts with an upper case character.
var capitalise = abi.ToCamelCase
// decapitalise makes a camel-case string which starts with a lower case character.
func decapitalise(input string) string {
if len(input) == 0 {
return input
}
goForm := abi.ToCamelCase(input)
return strings.ToLower(goForm[:1]) + goForm[1:]
}
// structured checks whether a list of ABI data types has enough information to
// operate through a proper Go struct or if flat returns are needed.
func structured(args abi.Arguments) bool {
if len(args) < 2 {
return false
}
exists := make(map[string]bool)
for _, out := range args {
// If the name is anonymous, we can't organize into a struct
if out.Name == "" {
return false
}
// If the field name is empty when normalized or collides (var, Var, _var, _Var),
// we can't organize into a struct
field := capitalise(out.Name)
if field == "" || exists[field] {
return false
}
exists[field] = true
}
return true
}
// hasStruct returns an indicator whether the given type is struct, struct slice
// or struct array.
func hasStruct(t abi.Type) bool {
switch t.T {
case abi.SliceTy:
return hasStruct(*t.Elem)
case abi.ArrayTy:
return hasStruct(*t.Elem)
case abi.TupleTy:
return true
default:
return false
}
}

View File

@@ -0,0 +1,709 @@
// Copyright 2016 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package bind
import "github.com/ethereum/go-ethereum/accounts/abi"
// tmplData is the data structure required to fill the binding template.
type tmplData struct {
Package string // Name of the package to place the generated file in
Contracts map[string]*tmplContract // List of contracts to generate into this file
Libraries map[string]string // Map the bytecode's link pattern to the library name
Structs map[string]*tmplStruct // Contract struct type definitions
}
// tmplContract contains the data needed to generate an individual contract binding.
type tmplContract struct {
Type string // Type name of the main contract binding
InputABI string // JSON ABI used as the input to generate the binding from
InputBin string // Optional EVM bytecode used to generate deploy code from
FuncSigs map[string]string // Optional map: string signature -> 4-byte signature
Constructor abi.Method // Contract constructor for deploy parametrization
Calls map[string]*tmplMethod // Contract calls that only read state data
Transacts map[string]*tmplMethod // Contract calls that write state data
Fallback *tmplMethod // Additional special fallback function
Receive *tmplMethod // Additional special receive function
Events map[string]*tmplEvent // Contract events accessors
Libraries map[string]string // Same as tmplData, but filtered to only keep what the contract needs
Library bool // Indicator whether the contract is a library
}
// tmplMethod is a wrapper around an abi.Method that contains a few preprocessed
// and cached data fields.
type tmplMethod struct {
Original abi.Method // Original method as parsed by the abi package
Normalized abi.Method // Normalized version of the parsed method (capitalized names, non-anonymous args/returns)
Structured bool // Whether the returns should be accumulated into a struct
}
// tmplEvent is a wrapper around an abi.Event that contains a few preprocessed
// and cached data fields.
type tmplEvent struct {
Original abi.Event // Original event as parsed by the abi package
Normalized abi.Event // Normalized version of the parsed fields
}
// tmplField is a wrapper around a struct field with binding language
// struct type definition and relative filed name.
type tmplField struct {
Type string // Field type representation depends on target binding language
Name string // Field name converted from the raw user-defined field name
SolKind abi.Type // Raw abi type information
}
// tmplStruct is a wrapper around an abi.tuple and contains an auto-generated
// struct name.
type tmplStruct struct {
Name string // Auto-generated struct name(before solidity v0.5.11) or raw name.
Fields []*tmplField // Struct fields definition depends on the binding language.
}
// tmplSource is language to template mapping containing all the supported
// programming languages the package can generate to.
var tmplSource = map[Lang]string{
LangGo: tmplSourceGo,
LangJava: tmplSourceJava,
}
// tmplSourceGo is the Go source template that the generated Go contract binding
// is based on.
const tmplSourceGo = `
// Code generated - DO NOT EDIT.
// This file is a generated binding and any manual changes will be lost.
package {{.Package}}
import (
"math/big"
"strings"
"errors"
ethereum "github.com/ethereum/go-ethereum"
"github.com/ethereum/go-ethereum/accounts/abi"
"github.com/ethereum/go-ethereum/accounts/abi/bind"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/event"
)
// Reference imports to suppress errors if they are not otherwise used.
var (
_ = errors.New
_ = big.NewInt
_ = strings.NewReader
_ = ethereum.NotFound
_ = bind.Bind
_ = common.Big1
_ = types.BloomLookup
_ = event.NewSubscription
_ = abi.ConvertType
)
{{$structs := .Structs}}
{{range $structs}}
// {{.Name}} is an auto generated low-level Go binding around an user-defined struct.
type {{.Name}} struct {
{{range $field := .Fields}}
{{$field.Name}} {{$field.Type}}{{end}}
}
{{end}}
{{range $contract := .Contracts}}
// {{.Type}}MetaData contains all meta data concerning the {{.Type}} contract.
var {{.Type}}MetaData = &bind.MetaData{
ABI: "{{.InputABI}}",
{{if $contract.FuncSigs -}}
Sigs: map[string]string{
{{range $strsig, $binsig := .FuncSigs}}"{{$binsig}}": "{{$strsig}}",
{{end}}
},
{{end -}}
{{if .InputBin -}}
Bin: "0x{{.InputBin}}",
{{end}}
}
// {{.Type}}ABI is the input ABI used to generate the binding from.
// Deprecated: Use {{.Type}}MetaData.ABI instead.
var {{.Type}}ABI = {{.Type}}MetaData.ABI
{{if $contract.FuncSigs}}
// Deprecated: Use {{.Type}}MetaData.Sigs instead.
// {{.Type}}FuncSigs maps the 4-byte function signature to its string representation.
var {{.Type}}FuncSigs = {{.Type}}MetaData.Sigs
{{end}}
{{if .InputBin}}
// {{.Type}}Bin is the compiled bytecode used for deploying new contracts.
// Deprecated: Use {{.Type}}MetaData.Bin instead.
var {{.Type}}Bin = {{.Type}}MetaData.Bin
// Deploy{{.Type}} deploys a new Ethereum contract, binding an instance of {{.Type}} to it.
func Deploy{{.Type}}(auth *bind.TransactOpts, backend bind.ContractBackend {{range .Constructor.Inputs}}, {{.Name}} {{bindtype .Type $structs}}{{end}}) (common.Address, *types.Transaction, *{{.Type}}, error) {
parsed, err := {{.Type}}MetaData.GetAbi()
if err != nil {
return common.Address{}, nil, nil, err
}
if parsed == nil {
return common.Address{}, nil, nil, errors.New("GetABI returned nil")
}
{{range $pattern, $name := .Libraries}}
{{decapitalise $name}}Addr, _, _, _ := Deploy{{capitalise $name}}(auth, backend)
{{$contract.Type}}Bin = strings.ReplaceAll({{$contract.Type}}Bin, "__${{$pattern}}$__", {{decapitalise $name}}Addr.String()[2:])
{{end}}
address, tx, contract, err := bind.DeployContract(auth, *parsed, common.FromHex({{.Type}}Bin), backend {{range .Constructor.Inputs}}, {{.Name}}{{end}})
if err != nil {
return common.Address{}, nil, nil, err
}
return address, tx, &{{.Type}}{ {{.Type}}Caller: {{.Type}}Caller{contract: contract}, {{.Type}}Transactor: {{.Type}}Transactor{contract: contract}, {{.Type}}Filterer: {{.Type}}Filterer{contract: contract} }, nil
}
{{end}}
// {{.Type}} is an auto generated Go binding around an Ethereum contract.
type {{.Type}} struct {
{{.Type}}Caller // Read-only binding to the contract
{{.Type}}Transactor // Write-only binding to the contract
{{.Type}}Filterer // Log filterer for contract events
}
// {{.Type}}Caller is an auto generated read-only Go binding around an Ethereum contract.
type {{.Type}}Caller struct {
contract *bind.BoundContract // Generic contract wrapper for the low level calls
}
// {{.Type}}Transactor is an auto generated write-only Go binding around an Ethereum contract.
type {{.Type}}Transactor struct {
contract *bind.BoundContract // Generic contract wrapper for the low level calls
}
// {{.Type}}Filterer is an auto generated log filtering Go binding around an Ethereum contract events.
type {{.Type}}Filterer struct {
contract *bind.BoundContract // Generic contract wrapper for the low level calls
}
// {{.Type}}Session is an auto generated Go binding around an Ethereum contract,
// with pre-set call and transact options.
type {{.Type}}Session struct {
Contract *{{.Type}} // Generic contract binding to set the session for
CallOpts bind.CallOpts // Call options to use throughout this session
TransactOpts bind.TransactOpts // Transaction auth options to use throughout this session
}
// {{.Type}}CallerSession is an auto generated read-only Go binding around an Ethereum contract,
// with pre-set call options.
type {{.Type}}CallerSession struct {
Contract *{{.Type}}Caller // Generic contract caller binding to set the session for
CallOpts bind.CallOpts // Call options to use throughout this session
}
// {{.Type}}TransactorSession is an auto generated write-only Go binding around an Ethereum contract,
// with pre-set transact options.
type {{.Type}}TransactorSession struct {
Contract *{{.Type}}Transactor // Generic contract transactor binding to set the session for
TransactOpts bind.TransactOpts // Transaction auth options to use throughout this session
}
// {{.Type}}Raw is an auto generated low-level Go binding around an Ethereum contract.
type {{.Type}}Raw struct {
Contract *{{.Type}} // Generic contract binding to access the raw methods on
}
// {{.Type}}CallerRaw is an auto generated low-level read-only Go binding around an Ethereum contract.
type {{.Type}}CallerRaw struct {
Contract *{{.Type}}Caller // Generic read-only contract binding to access the raw methods on
}
// {{.Type}}TransactorRaw is an auto generated low-level write-only Go binding around an Ethereum contract.
type {{.Type}}TransactorRaw struct {
Contract *{{.Type}}Transactor // Generic write-only contract binding to access the raw methods on
}
// New{{.Type}} creates a new instance of {{.Type}}, bound to a specific deployed contract.
func New{{.Type}}(address common.Address, backend bind.ContractBackend) (*{{.Type}}, error) {
contract, err := bind{{.Type}}(address, backend, backend, backend)
if err != nil {
return nil, err
}
return &{{.Type}}{ {{.Type}}Caller: {{.Type}}Caller{contract: contract}, {{.Type}}Transactor: {{.Type}}Transactor{contract: contract}, {{.Type}}Filterer: {{.Type}}Filterer{contract: contract} }, nil
}
// New{{.Type}}Caller creates a new read-only instance of {{.Type}}, bound to a specific deployed contract.
func New{{.Type}}Caller(address common.Address, caller bind.ContractCaller) (*{{.Type}}Caller, error) {
contract, err := bind{{.Type}}(address, caller, nil, nil)
if err != nil {
return nil, err
}
return &{{.Type}}Caller{contract: contract}, nil
}
// New{{.Type}}Transactor creates a new write-only instance of {{.Type}}, bound to a specific deployed contract.
func New{{.Type}}Transactor(address common.Address, transactor bind.ContractTransactor) (*{{.Type}}Transactor, error) {
contract, err := bind{{.Type}}(address, nil, transactor, nil)
if err != nil {
return nil, err
}
return &{{.Type}}Transactor{contract: contract}, nil
}
// New{{.Type}}Filterer creates a new log filterer instance of {{.Type}}, bound to a specific deployed contract.
func New{{.Type}}Filterer(address common.Address, filterer bind.ContractFilterer) (*{{.Type}}Filterer, error) {
contract, err := bind{{.Type}}(address, nil, nil, filterer)
if err != nil {
return nil, err
}
return &{{.Type}}Filterer{contract: contract}, nil
}
// bind{{.Type}} binds a generic wrapper to an already deployed contract.
func bind{{.Type}}(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) {
parsed, err := {{.Type}}MetaData.GetAbi()
if err != nil {
return nil, err
}
return bind.NewBoundContract(address, *parsed, caller, transactor, filterer), nil
}
// Call invokes the (constant) contract method with params as input values and
// sets the output to result. The result type might be a single field for simple
// returns, a slice of interfaces for anonymous returns and a struct for named
// returns.
func (_{{$contract.Type}} *{{$contract.Type}}Raw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error {
return _{{$contract.Type}}.Contract.{{$contract.Type}}Caller.contract.Call(opts, result, method, params...)
}
// Transfer initiates a plain transaction to move funds to the contract, calling
// its default method if one is available.
func (_{{$contract.Type}} *{{$contract.Type}}Raw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) {
return _{{$contract.Type}}.Contract.{{$contract.Type}}Transactor.contract.Transfer(opts)
}
// Transact invokes the (paid) contract method with params as input values.
func (_{{$contract.Type}} *{{$contract.Type}}Raw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) {
return _{{$contract.Type}}.Contract.{{$contract.Type}}Transactor.contract.Transact(opts, method, params...)
}
// Call invokes the (constant) contract method with params as input values and
// sets the output to result. The result type might be a single field for simple
// returns, a slice of interfaces for anonymous returns and a struct for named
// returns.
func (_{{$contract.Type}} *{{$contract.Type}}CallerRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error {
return _{{$contract.Type}}.Contract.contract.Call(opts, result, method, params...)
}
// Transfer initiates a plain transaction to move funds to the contract, calling
// its default method if one is available.
func (_{{$contract.Type}} *{{$contract.Type}}TransactorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) {
return _{{$contract.Type}}.Contract.contract.Transfer(opts)
}
// Transact invokes the (paid) contract method with params as input values.
func (_{{$contract.Type}} *{{$contract.Type}}TransactorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) {
return _{{$contract.Type}}.Contract.contract.Transact(opts, method, params...)
}
{{range .Calls}}
// {{.Normalized.Name}} is a free data retrieval call binding the contract method 0x{{printf "%x" .Original.ID}}.
//
// Solidity: {{.Original.String}}
func (_{{$contract.Type}} *{{$contract.Type}}Caller) {{.Normalized.Name}}(opts *bind.CallOpts {{range .Normalized.Inputs}}, {{.Name}} {{bindtype .Type $structs}} {{end}}) ({{if .Structured}}struct{ {{range .Normalized.Outputs}}{{.Name}} {{bindtype .Type $structs}};{{end}} },{{else}}{{range .Normalized.Outputs}}{{bindtype .Type $structs}},{{end}}{{end}} error) {
var out []interface{}
err := _{{$contract.Type}}.contract.Call(opts, &out, "{{.Original.Name}}" {{range .Normalized.Inputs}}, {{.Name}}{{end}})
{{if .Structured}}
outstruct := new(struct{ {{range .Normalized.Outputs}} {{.Name}} {{bindtype .Type $structs}}; {{end}} })
if err != nil {
return *outstruct, err
}
{{range $i, $t := .Normalized.Outputs}}
outstruct.{{.Name}} = *abi.ConvertType(out[{{$i}}], new({{bindtype .Type $structs}})).(*{{bindtype .Type $structs}}){{end}}
return *outstruct, err
{{else}}
if err != nil {
return {{range $i, $_ := .Normalized.Outputs}}*new({{bindtype .Type $structs}}), {{end}} err
}
{{range $i, $t := .Normalized.Outputs}}
out{{$i}} := *abi.ConvertType(out[{{$i}}], new({{bindtype .Type $structs}})).(*{{bindtype .Type $structs}}){{end}}
return {{range $i, $t := .Normalized.Outputs}}out{{$i}}, {{end}} err
{{end}}
}
// {{.Normalized.Name}} is a free data retrieval call binding the contract method 0x{{printf "%x" .Original.ID}}.
//
// Solidity: {{.Original.String}}
func (_{{$contract.Type}} *{{$contract.Type}}Session) {{.Normalized.Name}}({{range $i, $_ := .Normalized.Inputs}}{{if ne $i 0}},{{end}} {{.Name}} {{bindtype .Type $structs}} {{end}}) ({{if .Structured}}struct{ {{range .Normalized.Outputs}}{{.Name}} {{bindtype .Type $structs}};{{end}} }, {{else}} {{range .Normalized.Outputs}}{{bindtype .Type $structs}},{{end}} {{end}} error) {
return _{{$contract.Type}}.Contract.{{.Normalized.Name}}(&_{{$contract.Type}}.CallOpts {{range .Normalized.Inputs}}, {{.Name}}{{end}})
}
// {{.Normalized.Name}} is a free data retrieval call binding the contract method 0x{{printf "%x" .Original.ID}}.
//
// Solidity: {{.Original.String}}
func (_{{$contract.Type}} *{{$contract.Type}}CallerSession) {{.Normalized.Name}}({{range $i, $_ := .Normalized.Inputs}}{{if ne $i 0}},{{end}} {{.Name}} {{bindtype .Type $structs}} {{end}}) ({{if .Structured}}struct{ {{range .Normalized.Outputs}}{{.Name}} {{bindtype .Type $structs}};{{end}} }, {{else}} {{range .Normalized.Outputs}}{{bindtype .Type $structs}},{{end}} {{end}} error) {
return _{{$contract.Type}}.Contract.{{.Normalized.Name}}(&_{{$contract.Type}}.CallOpts {{range .Normalized.Inputs}}, {{.Name}}{{end}})
}
{{end}}
{{range .Transacts}}
// {{.Normalized.Name}} is a paid mutator transaction binding the contract method 0x{{printf "%x" .Original.ID}}.
//
// Solidity: {{.Original.String}}
func (_{{$contract.Type}} *{{$contract.Type}}Transactor) {{.Normalized.Name}}(opts *bind.TransactOpts {{range .Normalized.Inputs}}, {{.Name}} {{bindtype .Type $structs}} {{end}}) (*types.Transaction, error) {
return _{{$contract.Type}}.contract.Transact(opts, "{{.Original.Name}}" {{range .Normalized.Inputs}}, {{.Name}}{{end}})
}
// {{.Normalized.Name}} is a paid mutator transaction binding the contract method 0x{{printf "%x" .Original.ID}}.
//
// Solidity: {{.Original.String}}
func (_{{$contract.Type}} *{{$contract.Type}}Session) {{.Normalized.Name}}({{range $i, $_ := .Normalized.Inputs}}{{if ne $i 0}},{{end}} {{.Name}} {{bindtype .Type $structs}} {{end}}) (*types.Transaction, error) {
return _{{$contract.Type}}.Contract.{{.Normalized.Name}}(&_{{$contract.Type}}.TransactOpts {{range $i, $_ := .Normalized.Inputs}}, {{.Name}}{{end}})
}
// {{.Normalized.Name}} is a paid mutator transaction binding the contract method 0x{{printf "%x" .Original.ID}}.
//
// Solidity: {{.Original.String}}
func (_{{$contract.Type}} *{{$contract.Type}}TransactorSession) {{.Normalized.Name}}({{range $i, $_ := .Normalized.Inputs}}{{if ne $i 0}},{{end}} {{.Name}} {{bindtype .Type $structs}} {{end}}) (*types.Transaction, error) {
return _{{$contract.Type}}.Contract.{{.Normalized.Name}}(&_{{$contract.Type}}.TransactOpts {{range $i, $_ := .Normalized.Inputs}}, {{.Name}}{{end}})
}
{{end}}
{{if .Fallback}}
// Fallback is a paid mutator transaction binding the contract fallback function.
//
// Solidity: {{.Fallback.Original.String}}
func (_{{$contract.Type}} *{{$contract.Type}}Transactor) Fallback(opts *bind.TransactOpts, calldata []byte) (*types.Transaction, error) {
return _{{$contract.Type}}.contract.RawTransact(opts, calldata)
}
// Fallback is a paid mutator transaction binding the contract fallback function.
//
// Solidity: {{.Fallback.Original.String}}
func (_{{$contract.Type}} *{{$contract.Type}}Session) Fallback(calldata []byte) (*types.Transaction, error) {
return _{{$contract.Type}}.Contract.Fallback(&_{{$contract.Type}}.TransactOpts, calldata)
}
// Fallback is a paid mutator transaction binding the contract fallback function.
//
// Solidity: {{.Fallback.Original.String}}
func (_{{$contract.Type}} *{{$contract.Type}}TransactorSession) Fallback(calldata []byte) (*types.Transaction, error) {
return _{{$contract.Type}}.Contract.Fallback(&_{{$contract.Type}}.TransactOpts, calldata)
}
{{end}}
{{if .Receive}}
// Receive is a paid mutator transaction binding the contract receive function.
//
// Solidity: {{.Receive.Original.String}}
func (_{{$contract.Type}} *{{$contract.Type}}Transactor) Receive(opts *bind.TransactOpts) (*types.Transaction, error) {
return _{{$contract.Type}}.contract.RawTransact(opts, nil) // calldata is disallowed for receive function
}
// Receive is a paid mutator transaction binding the contract receive function.
//
// Solidity: {{.Receive.Original.String}}
func (_{{$contract.Type}} *{{$contract.Type}}Session) Receive() (*types.Transaction, error) {
return _{{$contract.Type}}.Contract.Receive(&_{{$contract.Type}}.TransactOpts)
}
// Receive is a paid mutator transaction binding the contract receive function.
//
// Solidity: {{.Receive.Original.String}}
func (_{{$contract.Type}} *{{$contract.Type}}TransactorSession) Receive() (*types.Transaction, error) {
return _{{$contract.Type}}.Contract.Receive(&_{{$contract.Type}}.TransactOpts)
}
{{end}}
{{range .Events}}
// {{$contract.Type}}{{.Normalized.Name}}Iterator is returned from Filter{{.Normalized.Name}} and is used to iterate over the raw logs and unpacked data for {{.Normalized.Name}} events raised by the {{$contract.Type}} contract.
type {{$contract.Type}}{{.Normalized.Name}}Iterator struct {
Event *{{$contract.Type}}{{.Normalized.Name}} // Event containing the contract specifics and raw log
contract *bind.BoundContract // Generic contract to use for unpacking event data
event string // Event name to use for unpacking event data
logs chan types.Log // Log channel receiving the found contract events
sub ethereum.Subscription // Subscription for errors, completion and termination
done bool // Whether the subscription completed delivering logs
fail error // Occurred error to stop iteration
}
// Next advances the iterator to the subsequent event, returning whether there
// are any more events found. In case of a retrieval or parsing error, false is
// returned and Error() can be queried for the exact failure.
func (it *{{$contract.Type}}{{.Normalized.Name}}Iterator) Next() bool {
// If the iterator failed, stop iterating
if (it.fail != nil) {
return false
}
// If the iterator completed, deliver directly whatever's available
if (it.done) {
select {
case log := <-it.logs:
it.Event = new({{$contract.Type}}{{.Normalized.Name}})
if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil {
it.fail = err
return false
}
it.Event.Raw = log
return true
default:
return false
}
}
// Iterator still in progress, wait for either a data or an error event
select {
case log := <-it.logs:
it.Event = new({{$contract.Type}}{{.Normalized.Name}})
if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil {
it.fail = err
return false
}
it.Event.Raw = log
return true
case err := <-it.sub.Err():
it.done = true
it.fail = err
return it.Next()
}
}
// Error returns any retrieval or parsing error occurred during filtering.
func (it *{{$contract.Type}}{{.Normalized.Name}}Iterator) Error() error {
return it.fail
}
// Close terminates the iteration process, releasing any pending underlying
// resources.
func (it *{{$contract.Type}}{{.Normalized.Name}}Iterator) Close() error {
it.sub.Unsubscribe()
return nil
}
// {{$contract.Type}}{{.Normalized.Name}} represents a {{.Normalized.Name}} event raised by the {{$contract.Type}} contract.
type {{$contract.Type}}{{.Normalized.Name}} struct { {{range .Normalized.Inputs}}
{{capitalise .Name}} {{if .Indexed}}{{bindtopictype .Type $structs}}{{else}}{{bindtype .Type $structs}}{{end}}; {{end}}
Raw types.Log // Blockchain specific contextual infos
}
// Filter{{.Normalized.Name}} is a free log retrieval operation binding the contract event 0x{{printf "%x" .Original.ID}}.
//
// Solidity: {{.Original.String}}
func (_{{$contract.Type}} *{{$contract.Type}}Filterer) Filter{{.Normalized.Name}}(opts *bind.FilterOpts{{range .Normalized.Inputs}}{{if .Indexed}}, {{.Name}} []{{bindtype .Type $structs}}{{end}}{{end}}) (*{{$contract.Type}}{{.Normalized.Name}}Iterator, error) {
{{range .Normalized.Inputs}}
{{if .Indexed}}var {{.Name}}Rule []interface{}
for _, {{.Name}}Item := range {{.Name}} {
{{.Name}}Rule = append({{.Name}}Rule, {{.Name}}Item)
}{{end}}{{end}}
logs, sub, err := _{{$contract.Type}}.contract.FilterLogs(opts, "{{.Original.Name}}"{{range .Normalized.Inputs}}{{if .Indexed}}, {{.Name}}Rule{{end}}{{end}})
if err != nil {
return nil, err
}
return &{{$contract.Type}}{{.Normalized.Name}}Iterator{contract: _{{$contract.Type}}.contract, event: "{{.Original.Name}}", logs: logs, sub: sub}, nil
}
// Watch{{.Normalized.Name}} is a free log subscription operation binding the contract event 0x{{printf "%x" .Original.ID}}.
//
// Solidity: {{.Original.String}}
func (_{{$contract.Type}} *{{$contract.Type}}Filterer) Watch{{.Normalized.Name}}(opts *bind.WatchOpts, sink chan<- *{{$contract.Type}}{{.Normalized.Name}}{{range .Normalized.Inputs}}{{if .Indexed}}, {{.Name}} []{{bindtype .Type $structs}}{{end}}{{end}}) (event.Subscription, error) {
{{range .Normalized.Inputs}}
{{if .Indexed}}var {{.Name}}Rule []interface{}
for _, {{.Name}}Item := range {{.Name}} {
{{.Name}}Rule = append({{.Name}}Rule, {{.Name}}Item)
}{{end}}{{end}}
logs, sub, err := _{{$contract.Type}}.contract.WatchLogs(opts, "{{.Original.Name}}"{{range .Normalized.Inputs}}{{if .Indexed}}, {{.Name}}Rule{{end}}{{end}})
if err != nil {
return nil, err
}
return event.NewSubscription(func(quit <-chan struct{}) error {
defer sub.Unsubscribe()
for {
select {
case log := <-logs:
// New log arrived, parse the event and forward to the user
event := new({{$contract.Type}}{{.Normalized.Name}})
if err := _{{$contract.Type}}.contract.UnpackLog(event, "{{.Original.Name}}", log); err != nil {
return err
}
event.Raw = log
select {
case sink <- event:
case err := <-sub.Err():
return err
case <-quit:
return nil
}
case err := <-sub.Err():
return err
case <-quit:
return nil
}
}
}), nil
}
// Parse{{.Normalized.Name}} is a log parse operation binding the contract event 0x{{printf "%x" .Original.ID}}.
//
// Solidity: {{.Original.String}}
func (_{{$contract.Type}} *{{$contract.Type}}Filterer) Parse{{.Normalized.Name}}(log types.Log) (*{{$contract.Type}}{{.Normalized.Name}}, error) {
event := new({{$contract.Type}}{{.Normalized.Name}})
if err := _{{$contract.Type}}.contract.UnpackLog(event, "{{.Original.Name}}", log); err != nil {
return nil, err
}
event.Raw = log
return event, nil
}
{{end}}
{{end}}
`
// tmplSourceJava is the Java source template that the generated Java contract binding
// is based on.
const tmplSourceJava = `
// This file is an automatically generated Java binding. Do not modify as any
// change will likely be lost upon the next re-generation!
package {{.Package}};
import org.ethereum.geth.*;
import java.util.*;
{{$structs := .Structs}}
{{range $contract := .Contracts}}
{{if not .Library}}public {{end}}class {{.Type}} {
// ABI is the input ABI used to generate the binding from.
public final static String ABI = "{{.InputABI}}";
{{if $contract.FuncSigs}}
// {{.Type}}FuncSigs maps the 4-byte function signature to its string representation.
public final static Map<String, String> {{.Type}}FuncSigs;
static {
Hashtable<String, String> temp = new Hashtable<String, String>();
{{range $strsig, $binsig := .FuncSigs}}temp.put("{{$binsig}}", "{{$strsig}}");
{{end}}
{{.Type}}FuncSigs = Collections.unmodifiableMap(temp);
}
{{end}}
{{if .InputBin}}
// BYTECODE is the compiled bytecode used for deploying new contracts.
public final static String BYTECODE = "0x{{.InputBin}}";
// deploy deploys a new Ethereum contract, binding an instance of {{.Type}} to it.
public static {{.Type}} deploy(TransactOpts auth, EthereumClient client{{range .Constructor.Inputs}}, {{bindtype .Type $structs}} {{.Name}}{{end}}) throws Exception {
Interfaces args = Geth.newInterfaces({{(len .Constructor.Inputs)}});
String bytecode = BYTECODE;
{{if .Libraries}}
// "link" contract to dependent libraries by deploying them first.
{{range $pattern, $name := .Libraries}}
{{capitalise $name}} {{decapitalise $name}}Inst = {{capitalise $name}}.deploy(auth, client);
bytecode = bytecode.replace("__${{$pattern}}$__", {{decapitalise $name}}Inst.Address.getHex().substring(2));
{{end}}
{{end}}
{{range $index, $element := .Constructor.Inputs}}Interface arg{{$index}} = Geth.newInterface();arg{{$index}}.set{{namedtype (bindtype .Type $structs) .Type}}({{.Name}});args.set({{$index}},arg{{$index}});
{{end}}
return new {{.Type}}(Geth.deployContract(auth, ABI, Geth.decodeFromHex(bytecode), client, args));
}
// Internal constructor used by contract deployment.
private {{.Type}}(BoundContract deployment) {
this.Address = deployment.getAddress();
this.Deployer = deployment.getDeployer();
this.Contract = deployment;
}
{{end}}
// Ethereum address where this contract is located at.
public final Address Address;
// Ethereum transaction in which this contract was deployed (if known!).
public final Transaction Deployer;
// Contract instance bound to a blockchain address.
private final BoundContract Contract;
// Creates a new instance of {{.Type}}, bound to a specific deployed contract.
public {{.Type}}(Address address, EthereumClient client) throws Exception {
this(Geth.bindContract(address, ABI, client));
}
{{range .Calls}}
{{if gt (len .Normalized.Outputs) 1}}
// {{capitalise .Normalized.Name}}Results is the output of a call to {{.Normalized.Name}}.
public class {{capitalise .Normalized.Name}}Results {
{{range $index, $item := .Normalized.Outputs}}public {{bindtype .Type $structs}} {{if ne .Name ""}}{{.Name}}{{else}}Return{{$index}}{{end}};
{{end}}
}
{{end}}
// {{.Normalized.Name}} is a free data retrieval call binding the contract method 0x{{printf "%x" .Original.ID}}.
//
// Solidity: {{.Original.String}}
public {{if gt (len .Normalized.Outputs) 1}}{{capitalise .Normalized.Name}}Results{{else if eq (len .Normalized.Outputs) 0}}void{{else}}{{range .Normalized.Outputs}}{{bindtype .Type $structs}}{{end}}{{end}} {{.Normalized.Name}}(CallOpts opts{{range .Normalized.Inputs}}, {{bindtype .Type $structs}} {{.Name}}{{end}}) throws Exception {
Interfaces args = Geth.newInterfaces({{(len .Normalized.Inputs)}});
{{range $index, $item := .Normalized.Inputs}}Interface arg{{$index}} = Geth.newInterface();arg{{$index}}.set{{namedtype (bindtype .Type $structs) .Type}}({{.Name}});args.set({{$index}},arg{{$index}});
{{end}}
Interfaces results = Geth.newInterfaces({{(len .Normalized.Outputs)}});
{{range $index, $item := .Normalized.Outputs}}Interface result{{$index}} = Geth.newInterface(); result{{$index}}.setDefault{{namedtype (bindtype .Type $structs) .Type}}(); results.set({{$index}}, result{{$index}});
{{end}}
if (opts == null) {
opts = Geth.newCallOpts();
}
this.Contract.call(opts, results, "{{.Original.Name}}", args);
{{if gt (len .Normalized.Outputs) 1}}
{{capitalise .Normalized.Name}}Results result = new {{capitalise .Normalized.Name}}Results();
{{range $index, $item := .Normalized.Outputs}}result.{{if ne .Name ""}}{{.Name}}{{else}}Return{{$index}}{{end}} = results.get({{$index}}).get{{namedtype (bindtype .Type $structs) .Type}}();
{{end}}
return result;
{{else}}{{range .Normalized.Outputs}}return results.get(0).get{{namedtype (bindtype .Type $structs) .Type}}();{{end}}
{{end}}
}
{{end}}
{{range .Transacts}}
// {{.Normalized.Name}} is a paid mutator transaction binding the contract method 0x{{printf "%x" .Original.ID}}.
//
// Solidity: {{.Original.String}}
public Transaction {{.Normalized.Name}}(TransactOpts opts{{range .Normalized.Inputs}}, {{bindtype .Type $structs}} {{.Name}}{{end}}) throws Exception {
Interfaces args = Geth.newInterfaces({{(len .Normalized.Inputs)}});
{{range $index, $item := .Normalized.Inputs}}Interface arg{{$index}} = Geth.newInterface();arg{{$index}}.set{{namedtype (bindtype .Type $structs) .Type}}({{.Name}});args.set({{$index}},arg{{$index}});
{{end}}
return this.Contract.transact(opts, "{{.Original.Name}}" , args);
}
{{end}}
{{if .Fallback}}
// Fallback is a paid mutator transaction binding the contract fallback function.
//
// Solidity: {{.Fallback.Original.String}}
public Transaction Fallback(TransactOpts opts, byte[] calldata) throws Exception {
return this.Contract.rawTransact(opts, calldata);
}
{{end}}
{{if .Receive}}
// Receive is a paid mutator transaction binding the contract receive function.
//
// Solidity: {{.Receive.Original.String}}
public Transaction Receive(TransactOpts opts) throws Exception {
return this.Contract.rawTransact(opts, null);
}
{{end}}
}
{{end}}
`

View File

@@ -0,0 +1,79 @@
// Copyright 2016 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package bind
import (
"context"
"errors"
"time"
"github.com/ethereum/go-ethereum"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/log"
)
// WaitMined waits for tx to be mined on the blockchain.
// It stops waiting when the context is canceled.
func WaitMined(ctx context.Context, b DeployBackend, tx *types.Transaction) (*types.Receipt, error) {
queryTicker := time.NewTicker(time.Second)
defer queryTicker.Stop()
logger := log.New("hash", tx.Hash())
for {
receipt, err := b.TransactionReceipt(ctx, tx.Hash())
if err == nil {
return receipt, nil
}
if errors.Is(err, ethereum.NotFound) {
logger.Trace("Transaction not yet mined")
} else {
logger.Trace("Receipt retrieval failed", "err", err)
}
// Wait for the next round.
select {
case <-ctx.Done():
return nil, ctx.Err()
case <-queryTicker.C:
}
}
}
// WaitDeployed waits for a contract deployment transaction and returns the on-chain
// contract address when it is mined. It stops waiting when ctx is canceled.
func WaitDeployed(ctx context.Context, b DeployBackend, tx *types.Transaction) (common.Address, error) {
if tx.To() != nil {
return common.Address{}, errors.New("tx is not contract creation")
}
receipt, err := WaitMined(ctx, b, tx)
if err != nil {
return common.Address{}, err
}
if receipt.ContractAddress == (common.Address{}) {
return common.Address{}, errors.New("zero address")
}
// Check that code has indeed been deployed at the address.
// This matters on pre-Homestead chains: OOG in the constructor
// could leave an empty account behind.
code, err := b.CodeAt(ctx, receipt.ContractAddress, nil)
if err == nil && len(code) == 0 {
err = ErrNoCodeAfterDeploy
}
return receipt.ContractAddress, err
}

View File

@@ -0,0 +1,26 @@
// Copyright 2015 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
// Package abi implements the Ethereum ABI (Application Binary
// Interface).
//
// The Ethereum ABI is strongly typed, known at compile time
// and static. This ABI will handle basic type casting; unsigned
// to signed and visa versa. It does not handle slice casting such
// as unsigned slice to signed slice. Bit size type casting is also
// handled. ints with a bit size of 32 will be properly cast to int256,
// etc.
package abi

View File

@@ -0,0 +1,93 @@
// Copyright 2016 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package abi
import (
"bytes"
"errors"
"fmt"
"strings"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/crypto"
)
type Error struct {
Name string
Inputs Arguments
str string
// Sig contains the string signature according to the ABI spec.
// e.g. error foo(uint32 a, int b) = "foo(uint32,int256)"
// Please note that "int" is substitute for its canonical representation "int256"
Sig string
// ID returns the canonical representation of the error's signature used by the
// abi definition to identify event names and types.
ID common.Hash
}
func NewError(name string, inputs Arguments) Error {
// sanitize inputs to remove inputs without names
// and precompute string and sig representation.
names := make([]string, len(inputs))
types := make([]string, len(inputs))
for i, input := range inputs {
if input.Name == "" {
inputs[i] = Argument{
Name: fmt.Sprintf("arg%d", i),
Indexed: input.Indexed,
Type: input.Type,
}
} else {
inputs[i] = input
}
// string representation
names[i] = fmt.Sprintf("%v %v", input.Type, inputs[i].Name)
if input.Indexed {
names[i] = fmt.Sprintf("%v indexed %v", input.Type, inputs[i].Name)
}
// sig representation
types[i] = input.Type.String()
}
str := fmt.Sprintf("error %v(%v)", name, strings.Join(names, ", "))
sig := fmt.Sprintf("%v(%v)", name, strings.Join(types, ","))
id := common.BytesToHash(crypto.Keccak256([]byte(sig)))
return Error{
Name: name,
Inputs: inputs,
str: str,
Sig: sig,
ID: id,
}
}
func (e *Error) String() string {
return e.str
}
func (e *Error) Unpack(data []byte) (interface{}, error) {
if len(data) < 4 {
return "", errors.New("invalid data for unpacking")
}
if !bytes.Equal(data[:4], e.ID[:4]) {
return "", errors.New("invalid data for unpacking")
}
return e.Inputs.Unpack(data[4:])
}

View File

@@ -0,0 +1,81 @@
// Copyright 2016 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package abi
import (
"errors"
"fmt"
"reflect"
)
var (
errBadBool = errors.New("abi: improperly encoded boolean value")
)
// formatSliceString formats the reflection kind with the given slice size
// and returns a formatted string representation.
func formatSliceString(kind reflect.Kind, sliceSize int) string {
if sliceSize == -1 {
return fmt.Sprintf("[]%v", kind)
}
return fmt.Sprintf("[%d]%v", sliceSize, kind)
}
// sliceTypeCheck checks that the given slice can by assigned to the reflection
// type in t.
func sliceTypeCheck(t Type, val reflect.Value) error {
if val.Kind() != reflect.Slice && val.Kind() != reflect.Array {
return typeErr(formatSliceString(t.GetType().Kind(), t.Size), val.Type())
}
if t.T == ArrayTy && val.Len() != t.Size {
return typeErr(formatSliceString(t.Elem.GetType().Kind(), t.Size), formatSliceString(val.Type().Elem().Kind(), val.Len()))
}
if t.Elem.T == SliceTy || t.Elem.T == ArrayTy {
if val.Len() > 0 {
return sliceTypeCheck(*t.Elem, val.Index(0))
}
}
if val.Type().Elem().Kind() != t.Elem.GetType().Kind() {
return typeErr(formatSliceString(t.Elem.GetType().Kind(), t.Size), val.Type())
}
return nil
}
// typeCheck checks that the given reflection value can be assigned to the reflection
// type in t.
func typeCheck(t Type, value reflect.Value) error {
if t.T == SliceTy || t.T == ArrayTy {
return sliceTypeCheck(t, value)
}
// Check base type validity. Element types will be checked later on.
if t.GetType().Kind() != value.Kind() {
return typeErr(t.GetType().Kind(), value.Kind())
} else if t.T == FixedBytesTy && t.Size != value.Len() {
return typeErr(t.GetType(), value.Type())
} else {
return nil
}
}
// typeErr returns a formatted type casting error.
func typeErr(expected, got interface{}) error {
return fmt.Errorf("abi: cannot use %v as type %v as argument", got, expected)
}

View File

@@ -0,0 +1,103 @@
// Copyright 2016 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package abi
import (
"fmt"
"strings"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/crypto"
)
// Event is an event potentially triggered by the EVM's LOG mechanism. The Event
// holds type information (inputs) about the yielded output. Anonymous events
// don't get the signature canonical representation as the first LOG topic.
type Event struct {
// Name is the event name used for internal representation. It's derived from
// the raw name and a suffix will be added in the case of event overloading.
//
// e.g.
// These are two events that have the same name:
// * foo(int,int)
// * foo(uint,uint)
// The event name of the first one will be resolved as foo while the second one
// will be resolved as foo0.
Name string
// RawName is the raw event name parsed from ABI.
RawName string
Anonymous bool
Inputs Arguments
str string
// Sig contains the string signature according to the ABI spec.
// e.g. event foo(uint32 a, int b) = "foo(uint32,int256)"
// Please note that "int" is substitute for its canonical representation "int256"
Sig string
// ID returns the canonical representation of the event's signature used by the
// abi definition to identify event names and types.
ID common.Hash
}
// NewEvent creates a new Event.
// It sanitizes the input arguments to remove unnamed arguments.
// It also precomputes the id, signature and string representation
// of the event.
func NewEvent(name, rawName string, anonymous bool, inputs Arguments) Event {
// sanitize inputs to remove inputs without names
// and precompute string and sig representation.
names := make([]string, len(inputs))
types := make([]string, len(inputs))
for i, input := range inputs {
if input.Name == "" {
inputs[i] = Argument{
Name: fmt.Sprintf("arg%d", i),
Indexed: input.Indexed,
Type: input.Type,
}
} else {
inputs[i] = input
}
// string representation
names[i] = fmt.Sprintf("%v %v", input.Type, inputs[i].Name)
if input.Indexed {
names[i] = fmt.Sprintf("%v indexed %v", input.Type, inputs[i].Name)
}
// sig representation
types[i] = input.Type.String()
}
str := fmt.Sprintf("event %v(%v)", rawName, strings.Join(names, ", "))
sig := fmt.Sprintf("%v(%v)", rawName, strings.Join(types, ","))
id := common.BytesToHash(crypto.Keccak256([]byte(sig)))
return Event{
Name: name,
RawName: rawName,
Anonymous: anonymous,
Inputs: inputs,
str: str,
Sig: sig,
ID: id,
}
}
func (e Event) String() string {
return e.str
}

View File

@@ -0,0 +1,167 @@
// Copyright 2015 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package abi
import (
"fmt"
"strings"
"github.com/ethereum/go-ethereum/crypto"
)
// FunctionType represents different types of functions a contract might have.
type FunctionType int
const (
// Constructor represents the constructor of the contract.
// The constructor function is called while deploying a contract.
Constructor FunctionType = iota
// Fallback represents the fallback function.
// This function is executed if no other function matches the given function
// signature and no receive function is specified.
Fallback
// Receive represents the receive function.
// This function is executed on plain Ether transfers.
Receive
// Function represents a normal function.
Function
)
// Method represents a callable given a `Name` and whether the method is a constant.
// If the method is `Const` no transaction needs to be created for this
// particular Method call. It can easily be simulated using a local VM.
// For example a `Balance()` method only needs to retrieve something
// from the storage and therefore requires no Tx to be sent to the
// network. A method such as `Transact` does require a Tx and thus will
// be flagged `false`.
// Input specifies the required input parameters for this gives method.
type Method struct {
// Name is the method name used for internal representation. It's derived from
// the raw name and a suffix will be added in the case of a function overload.
//
// e.g.
// These are two functions that have the same name:
// * foo(int,int)
// * foo(uint,uint)
// The method name of the first one will be resolved as foo while the second one
// will be resolved as foo0.
Name string
RawName string // RawName is the raw method name parsed from ABI
// Type indicates whether the method is a
// special fallback introduced in solidity v0.6.0
Type FunctionType
// StateMutability indicates the mutability state of method,
// the default value is nonpayable. It can be empty if the abi
// is generated by legacy compiler.
StateMutability string
// Legacy indicators generated by compiler before v0.6.0
Constant bool
Payable bool
Inputs Arguments
Outputs Arguments
str string
// Sig returns the methods string signature according to the ABI spec.
// e.g. function foo(uint32 a, int b) = "foo(uint32,int256)"
// Please note that "int" is substitute for its canonical representation "int256"
Sig string
// ID returns the canonical representation of the method's signature used by the
// abi definition to identify method names and types.
ID []byte
}
// NewMethod creates a new Method.
// A method should always be created using NewMethod.
// It also precomputes the sig representation and the string representation
// of the method.
func NewMethod(name string, rawName string, funType FunctionType, mutability string, isConst, isPayable bool, inputs Arguments, outputs Arguments) Method {
var (
types = make([]string, len(inputs))
inputNames = make([]string, len(inputs))
outputNames = make([]string, len(outputs))
)
for i, input := range inputs {
inputNames[i] = fmt.Sprintf("%v %v", input.Type, input.Name)
types[i] = input.Type.String()
}
for i, output := range outputs {
outputNames[i] = output.Type.String()
if len(output.Name) > 0 {
outputNames[i] += fmt.Sprintf(" %v", output.Name)
}
}
// calculate the signature and method id. Note only function
// has meaningful signature and id.
var (
sig string
id []byte
)
if funType == Function {
sig = fmt.Sprintf("%v(%v)", rawName, strings.Join(types, ","))
id = crypto.Keccak256([]byte(sig))[:4]
}
// Extract meaningful state mutability of solidity method.
// If it's default value, never print it.
state := mutability
if state == "nonpayable" {
state = ""
}
if state != "" {
state = state + " "
}
identity := fmt.Sprintf("function %v", rawName)
if funType == Fallback {
identity = "fallback"
} else if funType == Receive {
identity = "receive"
} else if funType == Constructor {
identity = "constructor"
}
str := fmt.Sprintf("%v(%v) %sreturns(%v)", identity, strings.Join(inputNames, ", "), state, strings.Join(outputNames, ", "))
return Method{
Name: name,
RawName: rawName,
Type: funType,
StateMutability: mutability,
Constant: isConst,
Payable: isPayable,
Inputs: inputs,
Outputs: outputs,
str: str,
Sig: sig,
ID: id,
}
}
func (method Method) String() string {
return method.str
}
// IsConstant returns the indicator whether the method is read-only.
func (method Method) IsConstant() bool {
return method.StateMutability == "view" || method.StateMutability == "pure" || method.Constant
}
// IsPayable returns the indicator whether the method can process
// plain ether transfers.
func (method Method) IsPayable() bool {
return method.StateMutability == "payable" || method.Payable
}

View File

@@ -0,0 +1,85 @@
// Copyright 2016 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package abi
import (
"errors"
"fmt"
"math/big"
"reflect"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/math"
)
// packBytesSlice packs the given bytes as [L, V] as the canonical representation
// bytes slice.
func packBytesSlice(bytes []byte, l int) []byte {
len := packNum(reflect.ValueOf(l))
return append(len, common.RightPadBytes(bytes, (l+31)/32*32)...)
}
// packElement packs the given reflect value according to the abi specification in
// t.
func packElement(t Type, reflectValue reflect.Value) ([]byte, error) {
switch t.T {
case IntTy, UintTy:
return packNum(reflectValue), nil
case StringTy:
return packBytesSlice([]byte(reflectValue.String()), reflectValue.Len()), nil
case AddressTy:
if reflectValue.Kind() == reflect.Array {
reflectValue = mustArrayToByteSlice(reflectValue)
}
return common.LeftPadBytes(reflectValue.Bytes(), 32), nil
case BoolTy:
if reflectValue.Bool() {
return math.PaddedBigBytes(common.Big1, 32), nil
}
return math.PaddedBigBytes(common.Big0, 32), nil
case BytesTy:
if reflectValue.Kind() == reflect.Array {
reflectValue = mustArrayToByteSlice(reflectValue)
}
if reflectValue.Type() != reflect.TypeOf([]byte{}) {
return []byte{}, errors.New("Bytes type is neither slice nor array")
}
return packBytesSlice(reflectValue.Bytes(), reflectValue.Len()), nil
case FixedBytesTy, FunctionTy:
if reflectValue.Kind() == reflect.Array {
reflectValue = mustArrayToByteSlice(reflectValue)
}
return common.RightPadBytes(reflectValue.Bytes(), 32), nil
default:
return []byte{}, fmt.Errorf("Could not pack element, unknown type: %v", t.T)
}
}
// packNum packs the given number (using the reflect value) and will cast it to appropriate number representation.
func packNum(value reflect.Value) []byte {
switch kind := value.Kind(); kind {
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
return math.U256Bytes(new(big.Int).SetUint64(value.Uint()))
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
return math.U256Bytes(big.NewInt(value.Int()))
case reflect.Ptr:
return math.U256Bytes(new(big.Int).Set(value.Interface().(*big.Int)))
default:
panic("abi: fatal error")
}
}

View File

@@ -0,0 +1,264 @@
// Copyright 2016 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package abi
import (
"errors"
"fmt"
"math/big"
"reflect"
"strings"
)
// ConvertType converts an interface of a runtime type into a interface of the
// given type, e.g. turn this code:
//
// var fields []reflect.StructField
//
// fields = append(fields, reflect.StructField{
// Name: "X",
// Type: reflect.TypeOf(new(big.Int)),
// Tag: reflect.StructTag("json:\"" + "x" + "\""),
// }
//
// into:
//
// type TupleT struct { X *big.Int }
func ConvertType(in interface{}, proto interface{}) interface{} {
protoType := reflect.TypeOf(proto)
if reflect.TypeOf(in).ConvertibleTo(protoType) {
return reflect.ValueOf(in).Convert(protoType).Interface()
}
// Use set as a last ditch effort
if err := set(reflect.ValueOf(proto), reflect.ValueOf(in)); err != nil {
panic(err)
}
return proto
}
// indirect recursively dereferences the value until it either gets the value
// or finds a big.Int
func indirect(v reflect.Value) reflect.Value {
if v.Kind() == reflect.Ptr && v.Elem().Type() != reflect.TypeOf(big.Int{}) {
return indirect(v.Elem())
}
return v
}
// reflectIntType returns the reflect using the given size and
// unsignedness.
func reflectIntType(unsigned bool, size int) reflect.Type {
if unsigned {
switch size {
case 8:
return reflect.TypeOf(uint8(0))
case 16:
return reflect.TypeOf(uint16(0))
case 32:
return reflect.TypeOf(uint32(0))
case 64:
return reflect.TypeOf(uint64(0))
}
}
switch size {
case 8:
return reflect.TypeOf(int8(0))
case 16:
return reflect.TypeOf(int16(0))
case 32:
return reflect.TypeOf(int32(0))
case 64:
return reflect.TypeOf(int64(0))
}
return reflect.TypeOf(&big.Int{})
}
// mustArrayToByteSlice creates a new byte slice with the exact same size as value
// and copies the bytes in value to the new slice.
func mustArrayToByteSlice(value reflect.Value) reflect.Value {
slice := reflect.MakeSlice(reflect.TypeOf([]byte{}), value.Len(), value.Len())
reflect.Copy(slice, value)
return slice
}
// set attempts to assign src to dst by either setting, copying or otherwise.
//
// set is a bit more lenient when it comes to assignment and doesn't force an as
// strict ruleset as bare `reflect` does.
func set(dst, src reflect.Value) error {
dstType, srcType := dst.Type(), src.Type()
switch {
case dstType.Kind() == reflect.Interface && dst.Elem().IsValid() && (dst.Elem().Type().Kind() == reflect.Ptr || dst.Elem().CanSet()):
return set(dst.Elem(), src)
case dstType.Kind() == reflect.Ptr && dstType.Elem() != reflect.TypeOf(big.Int{}):
return set(dst.Elem(), src)
case srcType.AssignableTo(dstType) && dst.CanSet():
dst.Set(src)
case dstType.Kind() == reflect.Slice && srcType.Kind() == reflect.Slice && dst.CanSet():
return setSlice(dst, src)
case dstType.Kind() == reflect.Array:
return setArray(dst, src)
case dstType.Kind() == reflect.Struct:
return setStruct(dst, src)
default:
return fmt.Errorf("abi: cannot unmarshal %v in to %v", src.Type(), dst.Type())
}
return nil
}
// setSlice attempts to assign src to dst when slices are not assignable by default
// e.g. src: [][]byte -> dst: [][15]byte
// setSlice ignores if we cannot copy all of src' elements.
func setSlice(dst, src reflect.Value) error {
slice := reflect.MakeSlice(dst.Type(), src.Len(), src.Len())
for i := 0; i < src.Len(); i++ {
if err := set(slice.Index(i), src.Index(i)); err != nil {
return err
}
}
if dst.CanSet() {
dst.Set(slice)
return nil
}
return errors.New("Cannot set slice, destination not settable")
}
func setArray(dst, src reflect.Value) error {
if src.Kind() == reflect.Ptr {
return set(dst, indirect(src))
}
array := reflect.New(dst.Type()).Elem()
min := src.Len()
if src.Len() > dst.Len() {
min = dst.Len()
}
for i := 0; i < min; i++ {
if err := set(array.Index(i), src.Index(i)); err != nil {
return err
}
}
if dst.CanSet() {
dst.Set(array)
return nil
}
return errors.New("Cannot set array, destination not settable")
}
func setStruct(dst, src reflect.Value) error {
for i := 0; i < src.NumField(); i++ {
srcField := src.Field(i)
dstField := dst.Field(i)
if !dstField.IsValid() || !srcField.IsValid() {
return fmt.Errorf("Could not find src field: %v value: %v in destination", srcField.Type().Name(), srcField)
}
if err := set(dstField, srcField); err != nil {
return err
}
}
return nil
}
// mapArgNamesToStructFields maps a slice of argument names to struct fields.
//
// first round: for each Exportable field that contains a `abi:""` tag and this field name
// exists in the given argument name list, pair them together.
//
// second round: for each argument name that has not been already linked, find what
// variable is expected to be mapped into, if it exists and has not been used, pair them.
//
// Note this function assumes the given value is a struct value.
func mapArgNamesToStructFields(argNames []string, value reflect.Value) (map[string]string, error) {
typ := value.Type()
abi2struct := make(map[string]string)
struct2abi := make(map[string]string)
// first round ~~~
for i := 0; i < typ.NumField(); i++ {
structFieldName := typ.Field(i).Name
// skip private struct fields.
if structFieldName[:1] != strings.ToUpper(structFieldName[:1]) {
continue
}
// skip fields that have no abi:"" tag.
tagName, ok := typ.Field(i).Tag.Lookup("abi")
if !ok {
continue
}
// check if tag is empty.
if tagName == "" {
return nil, fmt.Errorf("struct: abi tag in '%s' is empty", structFieldName)
}
// check which argument field matches with the abi tag.
found := false
for _, arg := range argNames {
if arg == tagName {
if abi2struct[arg] != "" {
return nil, fmt.Errorf("struct: abi tag in '%s' already mapped", structFieldName)
}
// pair them
abi2struct[arg] = structFieldName
struct2abi[structFieldName] = arg
found = true
}
}
// check if this tag has been mapped.
if !found {
return nil, fmt.Errorf("struct: abi tag '%s' defined but not found in abi", tagName)
}
}
// second round ~~~
for _, argName := range argNames {
structFieldName := ToCamelCase(argName)
if structFieldName == "" {
return nil, fmt.Errorf("abi: purely underscored output cannot unpack to struct")
}
// this abi has already been paired, skip it... unless there exists another, yet unassigned
// struct field with the same field name. If so, raise an error:
// abi: [ { "name": "value" } ]
// struct { Value *big.Int , Value1 *big.Int `abi:"value"`}
if abi2struct[argName] != "" {
if abi2struct[argName] != structFieldName &&
struct2abi[structFieldName] == "" &&
value.FieldByName(structFieldName).IsValid() {
return nil, fmt.Errorf("abi: multiple variables maps to the same abi field '%s'", argName)
}
continue
}
// return an error if this struct field has already been paired.
if struct2abi[structFieldName] != "" {
return nil, fmt.Errorf("abi: multiple outputs mapping to the same struct field '%s'", structFieldName)
}
if value.FieldByName(structFieldName).IsValid() {
// pair them
abi2struct[argName] = structFieldName
struct2abi[structFieldName] = argName
} else {
// not paired, but annotate as used, to detect cases like
// abi : [ { "name": "value" }, { "name": "_value" } ]
// struct { Value *big.Int }
struct2abi[structFieldName] = argName
}
}
return abi2struct, nil
}

View File

@@ -0,0 +1,176 @@
// Copyright 2022 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package abi
import (
"fmt"
)
type SelectorMarshaling struct {
Name string `json:"name"`
Type string `json:"type"`
Inputs []ArgumentMarshaling `json:"inputs"`
}
func isDigit(c byte) bool {
return c >= '0' && c <= '9'
}
func isAlpha(c byte) bool {
return (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z')
}
func isIdentifierSymbol(c byte) bool {
return c == '$' || c == '_'
}
func parseToken(unescapedSelector string, isIdent bool) (string, string, error) {
if len(unescapedSelector) == 0 {
return "", "", fmt.Errorf("empty token")
}
firstChar := unescapedSelector[0]
position := 1
if !(isAlpha(firstChar) || (isIdent && isIdentifierSymbol(firstChar))) {
return "", "", fmt.Errorf("invalid token start: %c", firstChar)
}
for position < len(unescapedSelector) {
char := unescapedSelector[position]
if !(isAlpha(char) || isDigit(char) || (isIdent && isIdentifierSymbol(char))) {
break
}
position++
}
return unescapedSelector[:position], unescapedSelector[position:], nil
}
func parseIdentifier(unescapedSelector string) (string, string, error) {
return parseToken(unescapedSelector, true)
}
func parseElementaryType(unescapedSelector string) (string, string, error) {
parsedType, rest, err := parseToken(unescapedSelector, false)
if err != nil {
return "", "", fmt.Errorf("failed to parse elementary type: %v", err)
}
// handle arrays
for len(rest) > 0 && rest[0] == '[' {
parsedType = parsedType + string(rest[0])
rest = rest[1:]
for len(rest) > 0 && isDigit(rest[0]) {
parsedType = parsedType + string(rest[0])
rest = rest[1:]
}
if len(rest) == 0 || rest[0] != ']' {
return "", "", fmt.Errorf("failed to parse array: expected ']', got %c", unescapedSelector[0])
}
parsedType = parsedType + string(rest[0])
rest = rest[1:]
}
return parsedType, rest, nil
}
func parseCompositeType(unescapedSelector string) ([]interface{}, string, error) {
if len(unescapedSelector) == 0 || unescapedSelector[0] != '(' {
return nil, "", fmt.Errorf("expected '(', got %c", unescapedSelector[0])
}
parsedType, rest, err := parseType(unescapedSelector[1:])
if err != nil {
return nil, "", fmt.Errorf("failed to parse type: %v", err)
}
result := []interface{}{parsedType}
for len(rest) > 0 && rest[0] != ')' {
parsedType, rest, err = parseType(rest[1:])
if err != nil {
return nil, "", fmt.Errorf("failed to parse type: %v", err)
}
result = append(result, parsedType)
}
if len(rest) == 0 || rest[0] != ')' {
return nil, "", fmt.Errorf("expected ')', got '%s'", rest)
}
if len(rest) >= 3 && rest[1] == '[' && rest[2] == ']' {
return append(result, "[]"), rest[3:], nil
}
return result, rest[1:], nil
}
func parseType(unescapedSelector string) (interface{}, string, error) {
if len(unescapedSelector) == 0 {
return nil, "", fmt.Errorf("empty type")
}
if unescapedSelector[0] == '(' {
return parseCompositeType(unescapedSelector)
} else {
return parseElementaryType(unescapedSelector)
}
}
func assembleArgs(args []interface{}) ([]ArgumentMarshaling, error) {
arguments := make([]ArgumentMarshaling, 0)
for i, arg := range args {
// generate dummy name to avoid unmarshal issues
name := fmt.Sprintf("name%d", i)
if s, ok := arg.(string); ok {
arguments = append(arguments, ArgumentMarshaling{name, s, s, nil, false})
} else if components, ok := arg.([]interface{}); ok {
subArgs, err := assembleArgs(components)
if err != nil {
return nil, fmt.Errorf("failed to assemble components: %v", err)
}
tupleType := "tuple"
if len(subArgs) != 0 && subArgs[len(subArgs)-1].Type == "[]" {
subArgs = subArgs[:len(subArgs)-1]
tupleType = "tuple[]"
}
arguments = append(arguments, ArgumentMarshaling{name, tupleType, tupleType, subArgs, false})
} else {
return nil, fmt.Errorf("failed to assemble args: unexpected type %T", arg)
}
}
return arguments, nil
}
// ParseSelector converts a method selector into a struct that can be JSON encoded
// and consumed by other functions in this package.
// Note, although uppercase letters are not part of the ABI spec, this function
// still accepts it as the general format is valid.
func ParseSelector(unescapedSelector string) (SelectorMarshaling, error) {
name, rest, err := parseIdentifier(unescapedSelector)
if err != nil {
return SelectorMarshaling{}, fmt.Errorf("failed to parse selector '%s': %v", unescapedSelector, err)
}
args := []interface{}{}
if len(rest) >= 2 && rest[0] == '(' && rest[1] == ')' {
rest = rest[2:]
} else {
args, rest, err = parseCompositeType(rest)
if err != nil {
return SelectorMarshaling{}, fmt.Errorf("failed to parse selector '%s': %v", unescapedSelector, err)
}
}
if len(rest) > 0 {
return SelectorMarshaling{}, fmt.Errorf("failed to parse selector '%s': unexpected string '%s'", unescapedSelector, rest)
}
// Reassemble the fake ABI and construct the JSON
fakeArgs, err := assembleArgs(args)
if err != nil {
return SelectorMarshaling{}, fmt.Errorf("failed to parse selector: %v", err)
}
return SelectorMarshaling{name, "function", fakeArgs}, nil
}

View File

@@ -0,0 +1,173 @@
// Copyright 2018 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package abi
import (
"encoding/binary"
"errors"
"fmt"
"math/big"
"reflect"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/crypto"
)
// MakeTopics converts a filter query argument list into a filter topic set.
func MakeTopics(query ...[]interface{}) ([][]common.Hash, error) {
topics := make([][]common.Hash, len(query))
for i, filter := range query {
for _, rule := range filter {
var topic common.Hash
// Try to generate the topic based on simple types
switch rule := rule.(type) {
case common.Hash:
copy(topic[:], rule[:])
case common.Address:
copy(topic[common.HashLength-common.AddressLength:], rule[:])
case *big.Int:
blob := rule.Bytes()
copy(topic[common.HashLength-len(blob):], blob)
case bool:
if rule {
topic[common.HashLength-1] = 1
}
case int8:
copy(topic[:], genIntType(int64(rule), 1))
case int16:
copy(topic[:], genIntType(int64(rule), 2))
case int32:
copy(topic[:], genIntType(int64(rule), 4))
case int64:
copy(topic[:], genIntType(rule, 8))
case uint8:
blob := new(big.Int).SetUint64(uint64(rule)).Bytes()
copy(topic[common.HashLength-len(blob):], blob)
case uint16:
blob := new(big.Int).SetUint64(uint64(rule)).Bytes()
copy(topic[common.HashLength-len(blob):], blob)
case uint32:
blob := new(big.Int).SetUint64(uint64(rule)).Bytes()
copy(topic[common.HashLength-len(blob):], blob)
case uint64:
blob := new(big.Int).SetUint64(rule).Bytes()
copy(topic[common.HashLength-len(blob):], blob)
case string:
hash := crypto.Keccak256Hash([]byte(rule))
copy(topic[:], hash[:])
case []byte:
hash := crypto.Keccak256Hash(rule)
copy(topic[:], hash[:])
default:
// todo(rjl493456442) according solidity documentation, indexed event
// parameters that are not value types i.e. arrays and structs are not
// stored directly but instead a keccak256-hash of an encoding is stored.
//
// We only convert stringS and bytes to hash, still need to deal with
// array(both fixed-size and dynamic-size) and struct.
// Attempt to generate the topic from funky types
val := reflect.ValueOf(rule)
switch {
// static byte array
case val.Kind() == reflect.Array && reflect.TypeOf(rule).Elem().Kind() == reflect.Uint8:
reflect.Copy(reflect.ValueOf(topic[:val.Len()]), val)
default:
return nil, fmt.Errorf("unsupported indexed type: %T", rule)
}
}
topics[i] = append(topics[i], topic)
}
}
return topics, nil
}
func genIntType(rule int64, size uint) []byte {
var topic [common.HashLength]byte
if rule < 0 {
// if a rule is negative, we need to put it into two's complement.
// extended to common.HashLength bytes.
topic = [common.HashLength]byte{255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255}
}
for i := uint(0); i < size; i++ {
topic[common.HashLength-i-1] = byte(rule >> (i * 8))
}
return topic[:]
}
// ParseTopics converts the indexed topic fields into actual log field values.
func ParseTopics(out interface{}, fields Arguments, topics []common.Hash) error {
return parseTopicWithSetter(fields, topics,
func(arg Argument, reconstr interface{}) {
field := reflect.ValueOf(out).Elem().FieldByName(ToCamelCase(arg.Name))
field.Set(reflect.ValueOf(reconstr))
})
}
// ParseTopicsIntoMap converts the indexed topic field-value pairs into map key-value pairs.
func ParseTopicsIntoMap(out map[string]interface{}, fields Arguments, topics []common.Hash) error {
return parseTopicWithSetter(fields, topics,
func(arg Argument, reconstr interface{}) {
out[arg.Name] = reconstr
})
}
// parseTopicWithSetter converts the indexed topic field-value pairs and stores them using the
// provided set function.
//
// Note, dynamic types cannot be reconstructed since they get mapped to Keccak256
// hashes as the topic value!
func parseTopicWithSetter(fields Arguments, topics []common.Hash, setter func(Argument, interface{})) error {
// Sanity check that the fields and topics match up
if len(fields) != len(topics) {
return errors.New("topic/field count mismatch")
}
// Iterate over all the fields and reconstruct them from topics
for i, arg := range fields {
if !arg.Indexed {
return errors.New("non-indexed field in topic reconstruction")
}
var reconstr interface{}
switch arg.Type.T {
case TupleTy:
return errors.New("tuple type in topic reconstruction")
case StringTy, BytesTy, SliceTy, ArrayTy:
// Array types (including strings and bytes) have their keccak256 hashes stored in the topic- not a hash
// whose bytes can be decoded to the actual value- so the best we can do is retrieve that hash
reconstr = topics[i]
case FunctionTy:
if garbage := binary.BigEndian.Uint64(topics[i][0:8]); garbage != 0 {
return fmt.Errorf("bind: got improperly encoded function type, got %v", topics[i].Bytes())
}
var tmp [24]byte
copy(tmp[:], topics[i][8:32])
reconstr = tmp
default:
var err error
reconstr, err = toGoType(0, arg.Type, topics[i].Bytes())
if err != nil {
return err
}
}
// Use the setter function to store the value
setter(arg, reconstr)
}
return nil
}

View File

@@ -0,0 +1,423 @@
// Copyright 2015 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package abi
import (
"errors"
"fmt"
"reflect"
"regexp"
"strconv"
"strings"
"unicode"
"unicode/utf8"
"github.com/ethereum/go-ethereum/common"
)
// Type enumerator
const (
IntTy byte = iota
UintTy
BoolTy
StringTy
SliceTy
ArrayTy
TupleTy
AddressTy
FixedBytesTy
BytesTy
HashTy
FixedPointTy
FunctionTy
)
// Type is the reflection of the supported argument type.
type Type struct {
Elem *Type
Size int
T byte // Our own type checking
stringKind string // holds the unparsed string for deriving signatures
// Tuple relative fields
TupleRawName string // Raw struct name defined in source code, may be empty.
TupleElems []*Type // Type information of all tuple fields
TupleRawNames []string // Raw field name of all tuple fields
TupleType reflect.Type // Underlying struct of the tuple
}
var (
// typeRegex parses the abi sub types
typeRegex = regexp.MustCompile("([a-zA-Z]+)(([0-9]+)(x([0-9]+))?)?")
)
// NewType creates a new reflection type of abi type given in t.
func NewType(t string, internalType string, components []ArgumentMarshaling) (typ Type, err error) {
// check that array brackets are equal if they exist
if strings.Count(t, "[") != strings.Count(t, "]") {
return Type{}, fmt.Errorf("invalid arg type in abi")
}
typ.stringKind = t
// if there are brackets, get ready to go into slice/array mode and
// recursively create the type
if strings.Count(t, "[") != 0 {
// Note internalType can be empty here.
subInternal := internalType
if i := strings.LastIndex(internalType, "["); i != -1 {
subInternal = subInternal[:i]
}
// recursively embed the type
i := strings.LastIndex(t, "[")
embeddedType, err := NewType(t[:i], subInternal, components)
if err != nil {
return Type{}, err
}
// grab the last cell and create a type from there
sliced := t[i:]
// grab the slice size with regexp
re := regexp.MustCompile("[0-9]+")
intz := re.FindAllString(sliced, -1)
if len(intz) == 0 {
// is a slice
typ.T = SliceTy
typ.Elem = &embeddedType
typ.stringKind = embeddedType.stringKind + sliced
} else if len(intz) == 1 {
// is an array
typ.T = ArrayTy
typ.Elem = &embeddedType
typ.Size, err = strconv.Atoi(intz[0])
if err != nil {
return Type{}, fmt.Errorf("abi: error parsing variable size: %v", err)
}
typ.stringKind = embeddedType.stringKind + sliced
} else {
return Type{}, fmt.Errorf("invalid formatting of array type")
}
return typ, err
}
// parse the type and size of the abi-type.
matches := typeRegex.FindAllStringSubmatch(t, -1)
if len(matches) == 0 {
return Type{}, fmt.Errorf("invalid type '%v'", t)
}
parsedType := matches[0]
// varSize is the size of the variable
var varSize int
if len(parsedType[3]) > 0 {
var err error
varSize, err = strconv.Atoi(parsedType[2])
if err != nil {
return Type{}, fmt.Errorf("abi: error parsing variable size: %v", err)
}
} else {
if parsedType[0] == "uint" || parsedType[0] == "int" {
// this should fail because it means that there's something wrong with
// the abi type (the compiler should always format it to the size...always)
return Type{}, fmt.Errorf("unsupported arg type: %s", t)
}
}
// varType is the parsed abi type
switch varType := parsedType[1]; varType {
case "int":
typ.Size = varSize
typ.T = IntTy
case "uint":
typ.Size = varSize
typ.T = UintTy
case "bool":
typ.T = BoolTy
case "address":
typ.Size = 20
typ.T = AddressTy
case "string":
typ.T = StringTy
case "bytes":
if varSize == 0 {
typ.T = BytesTy
} else {
typ.T = FixedBytesTy
typ.Size = varSize
}
case "tuple":
var (
fields []reflect.StructField
elems []*Type
names []string
expression string // canonical parameter expression
used = make(map[string]bool)
)
expression += "("
for idx, c := range components {
cType, err := NewType(c.Type, c.InternalType, c.Components)
if err != nil {
return Type{}, err
}
name := ToCamelCase(c.Name)
if name == "" {
return Type{}, errors.New("abi: purely anonymous or underscored field is not supported")
}
fieldName := ResolveNameConflict(name, func(s string) bool { return used[s] })
if err != nil {
return Type{}, err
}
used[fieldName] = true
if !isValidFieldName(fieldName) {
return Type{}, fmt.Errorf("field %d has invalid name", idx)
}
fields = append(fields, reflect.StructField{
Name: fieldName, // reflect.StructOf will panic for any exported field.
Type: cType.GetType(),
Tag: reflect.StructTag("json:\"" + c.Name + "\""),
})
elems = append(elems, &cType)
names = append(names, c.Name)
expression += cType.stringKind
if idx != len(components)-1 {
expression += ","
}
}
expression += ")"
typ.TupleType = reflect.StructOf(fields)
typ.TupleElems = elems
typ.TupleRawNames = names
typ.T = TupleTy
typ.stringKind = expression
const structPrefix = "struct "
// After solidity 0.5.10, a new field of abi "internalType"
// is introduced. From that we can obtain the struct name
// user defined in the source code.
if internalType != "" && strings.HasPrefix(internalType, structPrefix) {
// Foo.Bar type definition is not allowed in golang,
// convert the format to FooBar
typ.TupleRawName = strings.ReplaceAll(internalType[len(structPrefix):], ".", "")
}
case "function":
typ.T = FunctionTy
typ.Size = 24
default:
return Type{}, fmt.Errorf("unsupported arg type: %s", t)
}
return
}
// GetType returns the reflection type of the ABI type.
func (t Type) GetType() reflect.Type {
switch t.T {
case IntTy:
return reflectIntType(false, t.Size)
case UintTy:
return reflectIntType(true, t.Size)
case BoolTy:
return reflect.TypeOf(false)
case StringTy:
return reflect.TypeOf("")
case SliceTy:
return reflect.SliceOf(t.Elem.GetType())
case ArrayTy:
return reflect.ArrayOf(t.Size, t.Elem.GetType())
case TupleTy:
return t.TupleType
case AddressTy:
return reflect.TypeOf(common.Address{})
case FixedBytesTy:
return reflect.ArrayOf(t.Size, reflect.TypeOf(byte(0)))
case BytesTy:
return reflect.SliceOf(reflect.TypeOf(byte(0)))
case HashTy:
// hashtype currently not used
return reflect.ArrayOf(32, reflect.TypeOf(byte(0)))
case FixedPointTy:
// fixedpoint type currently not used
return reflect.ArrayOf(32, reflect.TypeOf(byte(0)))
case FunctionTy:
return reflect.ArrayOf(24, reflect.TypeOf(byte(0)))
default:
panic("Invalid type")
}
}
// String implements Stringer.
func (t Type) String() (out string) {
return t.stringKind
}
func (t Type) pack(v reflect.Value) ([]byte, error) {
// dereference pointer first if it's a pointer
v = indirect(v)
if err := typeCheck(t, v); err != nil {
return nil, err
}
switch t.T {
case SliceTy, ArrayTy:
var ret []byte
if t.requiresLengthPrefix() {
// append length
ret = append(ret, packNum(reflect.ValueOf(v.Len()))...)
}
// calculate offset if any
offset := 0
offsetReq := isDynamicType(*t.Elem)
if offsetReq {
offset = getTypeSize(*t.Elem) * v.Len()
}
var tail []byte
for i := 0; i < v.Len(); i++ {
val, err := t.Elem.pack(v.Index(i))
if err != nil {
return nil, err
}
if !offsetReq {
ret = append(ret, val...)
continue
}
ret = append(ret, packNum(reflect.ValueOf(offset))...)
offset += len(val)
tail = append(tail, val...)
}
return append(ret, tail...), nil
case TupleTy:
// (T1,...,Tk) for k >= 0 and any types T1, …, Tk
// enc(X) = head(X(1)) ... head(X(k)) tail(X(1)) ... tail(X(k))
// where X = (X(1), ..., X(k)) and head and tail are defined for Ti being a static
// type as
// head(X(i)) = enc(X(i)) and tail(X(i)) = "" (the empty string)
// and as
// head(X(i)) = enc(len(head(X(1)) ... head(X(k)) tail(X(1)) ... tail(X(i-1))))
// tail(X(i)) = enc(X(i))
// otherwise, i.e. if Ti is a dynamic type.
fieldmap, err := mapArgNamesToStructFields(t.TupleRawNames, v)
if err != nil {
return nil, err
}
// Calculate prefix occupied size.
offset := 0
for _, elem := range t.TupleElems {
offset += getTypeSize(*elem)
}
var ret, tail []byte
for i, elem := range t.TupleElems {
field := v.FieldByName(fieldmap[t.TupleRawNames[i]])
if !field.IsValid() {
return nil, fmt.Errorf("field %s for tuple not found in the given struct", t.TupleRawNames[i])
}
val, err := elem.pack(field)
if err != nil {
return nil, err
}
if isDynamicType(*elem) {
ret = append(ret, packNum(reflect.ValueOf(offset))...)
tail = append(tail, val...)
offset += len(val)
} else {
ret = append(ret, val...)
}
}
return append(ret, tail...), nil
default:
return packElement(t, v)
}
}
// requireLengthPrefix returns whether the type requires any sort of length
// prefixing.
func (t Type) requiresLengthPrefix() bool {
return t.T == StringTy || t.T == BytesTy || t.T == SliceTy
}
// isDynamicType returns true if the type is dynamic.
// The following types are called “dynamic”:
// * bytes
// * string
// * T[] for any T
// * T[k] for any dynamic T and any k >= 0
// * (T1,...,Tk) if Ti is dynamic for some 1 <= i <= k
func isDynamicType(t Type) bool {
if t.T == TupleTy {
for _, elem := range t.TupleElems {
if isDynamicType(*elem) {
return true
}
}
return false
}
return t.T == StringTy || t.T == BytesTy || t.T == SliceTy || (t.T == ArrayTy && isDynamicType(*t.Elem))
}
// getTypeSize returns the size that this type needs to occupy.
// We distinguish static and dynamic types. Static types are encoded in-place
// and dynamic types are encoded at a separately allocated location after the
// current block.
// So for a static variable, the size returned represents the size that the
// variable actually occupies.
// For a dynamic variable, the returned size is fixed 32 bytes, which is used
// to store the location reference for actual value storage.
func getTypeSize(t Type) int {
if t.T == ArrayTy && !isDynamicType(*t.Elem) {
// Recursively calculate type size if it is a nested array
if t.Elem.T == ArrayTy || t.Elem.T == TupleTy {
return t.Size * getTypeSize(*t.Elem)
}
return t.Size * 32
} else if t.T == TupleTy && !isDynamicType(t) {
total := 0
for _, elem := range t.TupleElems {
total += getTypeSize(*elem)
}
return total
}
return 32
}
// isLetter reports whether a given 'rune' is classified as a Letter.
// This method is copied from reflect/type.go
func isLetter(ch rune) bool {
return 'a' <= ch && ch <= 'z' || 'A' <= ch && ch <= 'Z' || ch == '_' || ch >= utf8.RuneSelf && unicode.IsLetter(ch)
}
// isValidFieldName checks if a string is a valid (struct) field name or not.
//
// According to the language spec, a field name should be an identifier.
//
// identifier = letter { letter | unicode_digit } .
// letter = unicode_letter | "_" .
// This method is copied from reflect/type.go
func isValidFieldName(fieldName string) bool {
for i, c := range fieldName {
if i == 0 && !isLetter(c) {
return false
}
if !(isLetter(c) || unicode.IsDigit(c)) {
return false
}
}
return len(fieldName) > 0
}

View File

@@ -0,0 +1,297 @@
// Copyright 2017 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package abi
import (
"encoding/binary"
"fmt"
"math/big"
"reflect"
"github.com/ethereum/go-ethereum/common"
)
var (
// MaxUint256 is the maximum value that can be represented by a uint256.
MaxUint256 = new(big.Int).Sub(new(big.Int).Lsh(common.Big1, 256), common.Big1)
// MaxInt256 is the maximum value that can be represented by a int256.
MaxInt256 = new(big.Int).Sub(new(big.Int).Lsh(common.Big1, 255), common.Big1)
)
// ReadInteger reads the integer based on its kind and returns the appropriate value.
func ReadInteger(typ Type, b []byte) interface{} {
if typ.T == UintTy {
switch typ.Size {
case 8:
return b[len(b)-1]
case 16:
return binary.BigEndian.Uint16(b[len(b)-2:])
case 32:
return binary.BigEndian.Uint32(b[len(b)-4:])
case 64:
return binary.BigEndian.Uint64(b[len(b)-8:])
default:
// the only case left for unsigned integer is uint256.
return new(big.Int).SetBytes(b)
}
}
switch typ.Size {
case 8:
return int8(b[len(b)-1])
case 16:
return int16(binary.BigEndian.Uint16(b[len(b)-2:]))
case 32:
return int32(binary.BigEndian.Uint32(b[len(b)-4:]))
case 64:
return int64(binary.BigEndian.Uint64(b[len(b)-8:]))
default:
// the only case left for integer is int256
// big.SetBytes can't tell if a number is negative or positive in itself.
// On EVM, if the returned number > max int256, it is negative.
// A number is > max int256 if the bit at position 255 is set.
ret := new(big.Int).SetBytes(b)
if ret.Bit(255) == 1 {
ret.Add(MaxUint256, new(big.Int).Neg(ret))
ret.Add(ret, common.Big1)
ret.Neg(ret)
}
return ret
}
}
// readBool reads a bool.
func readBool(word []byte) (bool, error) {
for _, b := range word[:31] {
if b != 0 {
return false, errBadBool
}
}
switch word[31] {
case 0:
return false, nil
case 1:
return true, nil
default:
return false, errBadBool
}
}
// A function type is simply the address with the function selection signature at the end.
//
// readFunctionType enforces that standard by always presenting it as a 24-array (address + sig = 24 bytes)
func readFunctionType(t Type, word []byte) (funcTy [24]byte, err error) {
if t.T != FunctionTy {
return [24]byte{}, fmt.Errorf("abi: invalid type in call to make function type byte array")
}
if garbage := binary.BigEndian.Uint64(word[24:32]); garbage != 0 {
err = fmt.Errorf("abi: got improperly encoded function type, got %v", word)
} else {
copy(funcTy[:], word[0:24])
}
return
}
// ReadFixedBytes uses reflection to create a fixed array to be read from.
func ReadFixedBytes(t Type, word []byte) (interface{}, error) {
if t.T != FixedBytesTy {
return nil, fmt.Errorf("abi: invalid type in call to make fixed byte array")
}
// convert
array := reflect.New(t.GetType()).Elem()
reflect.Copy(array, reflect.ValueOf(word[0:t.Size]))
return array.Interface(), nil
}
// forEachUnpack iteratively unpack elements.
func forEachUnpack(t Type, output []byte, start, size int) (interface{}, error) {
if size < 0 {
return nil, fmt.Errorf("cannot marshal input to array, size is negative (%d)", size)
}
if start+32*size > len(output) {
return nil, fmt.Errorf("abi: cannot marshal into go array: offset %d would go over slice boundary (len=%d)", len(output), start+32*size)
}
// this value will become our slice or our array, depending on the type
var refSlice reflect.Value
if t.T == SliceTy {
// declare our slice
refSlice = reflect.MakeSlice(t.GetType(), size, size)
} else if t.T == ArrayTy {
// declare our array
refSlice = reflect.New(t.GetType()).Elem()
} else {
return nil, fmt.Errorf("abi: invalid type in array/slice unpacking stage")
}
// Arrays have packed elements, resulting in longer unpack steps.
// Slices have just 32 bytes per element (pointing to the contents).
elemSize := getTypeSize(*t.Elem)
for i, j := start, 0; j < size; i, j = i+elemSize, j+1 {
inter, err := toGoType(i, *t.Elem, output)
if err != nil {
return nil, err
}
// append the item to our reflect slice
refSlice.Index(j).Set(reflect.ValueOf(inter))
}
// return the interface
return refSlice.Interface(), nil
}
func forTupleUnpack(t Type, output []byte) (interface{}, error) {
retval := reflect.New(t.GetType()).Elem()
virtualArgs := 0
for index, elem := range t.TupleElems {
marshalledValue, err := toGoType((index+virtualArgs)*32, *elem, output)
if err != nil {
return nil, err
}
if elem.T == ArrayTy && !isDynamicType(*elem) {
// If we have a static array, like [3]uint256, these are coded as
// just like uint256,uint256,uint256.
// This means that we need to add two 'virtual' arguments when
// we count the index from now on.
//
// Array values nested multiple levels deep are also encoded inline:
// [2][3]uint256: uint256,uint256,uint256,uint256,uint256,uint256
//
// Calculate the full array size to get the correct offset for the next argument.
// Decrement it by 1, as the normal index increment is still applied.
virtualArgs += getTypeSize(*elem)/32 - 1
} else if elem.T == TupleTy && !isDynamicType(*elem) {
// If we have a static tuple, like (uint256, bool, uint256), these are
// coded as just like uint256,bool,uint256
virtualArgs += getTypeSize(*elem)/32 - 1
}
retval.Field(index).Set(reflect.ValueOf(marshalledValue))
}
return retval.Interface(), nil
}
// toGoType parses the output bytes and recursively assigns the value of these bytes
// into a go type with accordance with the ABI spec.
func toGoType(index int, t Type, output []byte) (interface{}, error) {
if index+32 > len(output) {
return nil, fmt.Errorf("abi: cannot marshal in to go type: length insufficient %d require %d", len(output), index+32)
}
var (
returnOutput []byte
begin, length int
err error
)
// if we require a length prefix, find the beginning word and size returned.
if t.requiresLengthPrefix() {
begin, length, err = lengthPrefixPointsTo(index, output)
if err != nil {
return nil, err
}
} else {
returnOutput = output[index : index+32]
}
switch t.T {
case TupleTy:
if isDynamicType(t) {
begin, err := tuplePointsTo(index, output)
if err != nil {
return nil, err
}
return forTupleUnpack(t, output[begin:])
}
return forTupleUnpack(t, output[index:])
case SliceTy:
return forEachUnpack(t, output[begin:], 0, length)
case ArrayTy:
if isDynamicType(*t.Elem) {
offset := binary.BigEndian.Uint64(returnOutput[len(returnOutput)-8:])
if offset > uint64(len(output)) {
return nil, fmt.Errorf("abi: toGoType offset greater than output length: offset: %d, len(output): %d", offset, len(output))
}
return forEachUnpack(t, output[offset:], 0, t.Size)
}
return forEachUnpack(t, output[index:], 0, t.Size)
case StringTy: // variable arrays are written at the end of the return bytes
return string(output[begin : begin+length]), nil
case IntTy, UintTy:
return ReadInteger(t, returnOutput), nil
case BoolTy:
return readBool(returnOutput)
case AddressTy:
return common.BytesToAddress(returnOutput), nil
case HashTy:
return common.BytesToHash(returnOutput), nil
case BytesTy:
return output[begin : begin+length], nil
case FixedBytesTy:
return ReadFixedBytes(t, returnOutput)
case FunctionTy:
return readFunctionType(t, returnOutput)
default:
return nil, fmt.Errorf("abi: unknown type %v", t.T)
}
}
// lengthPrefixPointsTo interprets a 32 byte slice as an offset and then determines which indices to look to decode the type.
func lengthPrefixPointsTo(index int, output []byte) (start int, length int, err error) {
bigOffsetEnd := new(big.Int).SetBytes(output[index : index+32])
bigOffsetEnd.Add(bigOffsetEnd, common.Big32)
outputLength := big.NewInt(int64(len(output)))
if bigOffsetEnd.Cmp(outputLength) > 0 {
return 0, 0, fmt.Errorf("abi: cannot marshal in to go slice: offset %v would go over slice boundary (len=%v)", bigOffsetEnd, outputLength)
}
if bigOffsetEnd.BitLen() > 63 {
return 0, 0, fmt.Errorf("abi offset larger than int64: %v", bigOffsetEnd)
}
offsetEnd := int(bigOffsetEnd.Uint64())
lengthBig := new(big.Int).SetBytes(output[offsetEnd-32 : offsetEnd])
totalSize := new(big.Int).Add(bigOffsetEnd, lengthBig)
if totalSize.BitLen() > 63 {
return 0, 0, fmt.Errorf("abi: length larger than int64: %v", totalSize)
}
if totalSize.Cmp(outputLength) > 0 {
return 0, 0, fmt.Errorf("abi: cannot marshal in to go type: length insufficient %v require %v", outputLength, totalSize)
}
start = int(bigOffsetEnd.Uint64())
length = int(lengthBig.Uint64())
return
}
// tuplePointsTo resolves the location reference for dynamic tuple.
func tuplePointsTo(index int, output []byte) (start int, err error) {
offset := new(big.Int).SetBytes(output[index : index+32])
outputLen := big.NewInt(int64(len(output)))
if offset.Cmp(outputLen) > 0 {
return 0, fmt.Errorf("abi: cannot marshal in to go slice: offset %v would go over slice boundary (len=%v)", offset, outputLen)
}
if offset.BitLen() > 63 {
return 0, fmt.Errorf("abi offset larger than int64: %v", offset)
}
return int(offset.Uint64()), nil
}

View File

@@ -0,0 +1,40 @@
// Copyright 2022 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package abi
import "fmt"
// ResolveNameConflict returns the next available name for a given thing.
// This helper can be used for lots of purposes:
//
// - In solidity function overloading is supported, this function can fix
// the name conflicts of overloaded functions.
// - In golang binding generation, the parameter(in function, event, error,
// and struct definition) name will be converted to camelcase style which
// may eventually lead to name conflicts.
//
// Name conflicts are mostly resolved by adding number suffix. e.g. if the abi contains
// Methods "send" and "send1", ResolveNameConflict would return "send2" for input "send".
func ResolveNameConflict(rawName string, used func(string) bool) string {
name := rawName
ok := used(name)
for idx := 0; ok; idx++ {
name = fmt.Sprintf("%s%d", rawName, idx)
ok = used(name)
}
return name
}

View File

@@ -0,0 +1,226 @@
// Copyright 2017 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
// Package accounts implements high level Ethereum account management.
package accounts
import (
"fmt"
"math/big"
"github.com/ethereum/go-ethereum"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/event"
"golang.org/x/crypto/sha3"
)
// Account represents an Ethereum account located at a specific location defined
// by the optional URL field.
type Account struct {
Address common.Address `json:"address"` // Ethereum account address derived from the key
URL URL `json:"url"` // Optional resource locator within a backend
}
const (
MimetypeDataWithValidator = "data/validator"
MimetypeTypedData = "data/typed"
MimetypeClique = "application/x-clique-header"
MimetypeTextPlain = "text/plain"
)
// Wallet represents a software or hardware wallet that might contain one or more
// accounts (derived from the same seed).
type Wallet interface {
// URL retrieves the canonical path under which this wallet is reachable. It is
// used by upper layers to define a sorting order over all wallets from multiple
// backends.
URL() URL
// Status returns a textual status to aid the user in the current state of the
// wallet. It also returns an error indicating any failure the wallet might have
// encountered.
Status() (string, error)
// Open initializes access to a wallet instance. It is not meant to unlock or
// decrypt account keys, rather simply to establish a connection to hardware
// wallets and/or to access derivation seeds.
//
// The passphrase parameter may or may not be used by the implementation of a
// particular wallet instance. The reason there is no passwordless open method
// is to strive towards a uniform wallet handling, oblivious to the different
// backend providers.
//
// Please note, if you open a wallet, you must close it to release any allocated
// resources (especially important when working with hardware wallets).
Open(passphrase string) error
// Close releases any resources held by an open wallet instance.
Close() error
// Accounts retrieves the list of signing accounts the wallet is currently aware
// of. For hierarchical deterministic wallets, the list will not be exhaustive,
// rather only contain the accounts explicitly pinned during account derivation.
Accounts() []Account
// Contains returns whether an account is part of this particular wallet or not.
Contains(account Account) bool
// Derive attempts to explicitly derive a hierarchical deterministic account at
// the specified derivation path. If requested, the derived account will be added
// to the wallet's tracked account list.
Derive(path DerivationPath, pin bool) (Account, error)
// SelfDerive sets a base account derivation path from which the wallet attempts
// to discover non zero accounts and automatically add them to list of tracked
// accounts.
//
// Note, self derivation will increment the last component of the specified path
// opposed to descending into a child path to allow discovering accounts starting
// from non zero components.
//
// Some hardware wallets switched derivation paths through their evolution, so
// this method supports providing multiple bases to discover old user accounts
// too. Only the last base will be used to derive the next empty account.
//
// You can disable automatic account discovery by calling SelfDerive with a nil
// chain state reader.
SelfDerive(bases []DerivationPath, chain ethereum.ChainStateReader)
// SignData requests the wallet to sign the hash of the given data
// It looks up the account specified either solely via its address contained within,
// or optionally with the aid of any location metadata from the embedded URL field.
//
// If the wallet requires additional authentication to sign the request (e.g.
// a password to decrypt the account, or a PIN code to verify the transaction),
// an AuthNeededError instance will be returned, containing infos for the user
// about which fields or actions are needed. The user may retry by providing
// the needed details via SignDataWithPassphrase, or by other means (e.g. unlock
// the account in a keystore).
SignData(account Account, mimeType string, data []byte) ([]byte, error)
// SignDataWithPassphrase is identical to SignData, but also takes a password
// NOTE: there's a chance that an erroneous call might mistake the two strings, and
// supply password in the mimetype field, or vice versa. Thus, an implementation
// should never echo the mimetype or return the mimetype in the error-response
SignDataWithPassphrase(account Account, passphrase, mimeType string, data []byte) ([]byte, error)
// SignText requests the wallet to sign the hash of a given piece of data, prefixed
// by the Ethereum prefix scheme
// It looks up the account specified either solely via its address contained within,
// or optionally with the aid of any location metadata from the embedded URL field.
//
// If the wallet requires additional authentication to sign the request (e.g.
// a password to decrypt the account, or a PIN code to verify the transaction),
// an AuthNeededError instance will be returned, containing infos for the user
// about which fields or actions are needed. The user may retry by providing
// the needed details via SignTextWithPassphrase, or by other means (e.g. unlock
// the account in a keystore).
//
// This method should return the signature in 'canonical' format, with v 0 or 1.
SignText(account Account, text []byte) ([]byte, error)
// SignTextWithPassphrase is identical to Signtext, but also takes a password
SignTextWithPassphrase(account Account, passphrase string, hash []byte) ([]byte, error)
// SignTx requests the wallet to sign the given transaction.
//
// It looks up the account specified either solely via its address contained within,
// or optionally with the aid of any location metadata from the embedded URL field.
//
// If the wallet requires additional authentication to sign the request (e.g.
// a password to decrypt the account, or a PIN code to verify the transaction),
// an AuthNeededError instance will be returned, containing infos for the user
// about which fields or actions are needed. The user may retry by providing
// the needed details via SignTxWithPassphrase, or by other means (e.g. unlock
// the account in a keystore).
SignTx(account Account, tx *types.Transaction, chainID *big.Int) (*types.Transaction, error)
// SignTxWithPassphrase is identical to SignTx, but also takes a password
SignTxWithPassphrase(account Account, passphrase string, tx *types.Transaction, chainID *big.Int) (*types.Transaction, error)
}
// Backend is a "wallet provider" that may contain a batch of accounts they can
// sign transactions with and upon request, do so.
type Backend interface {
// Wallets retrieves the list of wallets the backend is currently aware of.
//
// The returned wallets are not opened by default. For software HD wallets this
// means that no base seeds are decrypted, and for hardware wallets that no actual
// connection is established.
//
// The resulting wallet list will be sorted alphabetically based on its internal
// URL assigned by the backend. Since wallets (especially hardware) may come and
// go, the same wallet might appear at a different positions in the list during
// subsequent retrievals.
Wallets() []Wallet
// Subscribe creates an async subscription to receive notifications when the
// backend detects the arrival or departure of a wallet.
Subscribe(sink chan<- WalletEvent) event.Subscription
}
// TextHash is a helper function that calculates a hash for the given message that can be
// safely used to calculate a signature from.
//
// The hash is calculated as
//
// keccak256("\x19Ethereum Signed Message:\n"${message length}${message}).
//
// This gives context to the signed message and prevents signing of transactions.
func TextHash(data []byte) []byte {
hash, _ := TextAndHash(data)
return hash
}
// TextAndHash is a helper function that calculates a hash for the given message that can be
// safely used to calculate a signature from.
//
// The hash is calculated as
//
// keccak256("\x19Ethereum Signed Message:\n"${message length}${message}).
//
// This gives context to the signed message and prevents signing of transactions.
func TextAndHash(data []byte) ([]byte, string) {
msg := fmt.Sprintf("\x19Ethereum Signed Message:\n%d%s", len(data), string(data))
hasher := sha3.NewLegacyKeccak256()
hasher.Write([]byte(msg))
return hasher.Sum(nil), msg
}
// WalletEventType represents the different event types that can be fired by
// the wallet subscription subsystem.
type WalletEventType int
const (
// WalletArrived is fired when a new wallet is detected either via USB or via
// a filesystem event in the keystore.
WalletArrived WalletEventType = iota
// WalletOpened is fired when a wallet is successfully opened with the purpose
// of starting any background processes such as automatic key derivation.
WalletOpened
// WalletDropped
WalletDropped
)
// WalletEvent is an event fired by an account backend when a wallet arrival or
// departure is detected.
type WalletEvent struct {
Wallet Wallet // Wallet instance arrived or departed
Kind WalletEventType // Event type that happened in the system
}

View File

@@ -0,0 +1,67 @@
// Copyright 2017 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package accounts
import (
"errors"
"fmt"
)
// ErrUnknownAccount is returned for any requested operation for which no backend
// provides the specified account.
var ErrUnknownAccount = errors.New("unknown account")
// ErrUnknownWallet is returned for any requested operation for which no backend
// provides the specified wallet.
var ErrUnknownWallet = errors.New("unknown wallet")
// ErrNotSupported is returned when an operation is requested from an account
// backend that it does not support.
var ErrNotSupported = errors.New("not supported")
// ErrInvalidPassphrase is returned when a decryption operation receives a bad
// passphrase.
var ErrInvalidPassphrase = errors.New("invalid password")
// ErrWalletAlreadyOpen is returned if a wallet is attempted to be opened the
// second time.
var ErrWalletAlreadyOpen = errors.New("wallet already open")
// ErrWalletClosed is returned if a wallet is offline.
var ErrWalletClosed = errors.New("wallet closed")
// AuthNeededError is returned by backends for signing requests where the user
// is required to provide further authentication before signing can succeed.
//
// This usually means either that a password needs to be supplied, or perhaps a
// one time PIN code displayed by some hardware device.
type AuthNeededError struct {
Needed string // Extra authentication the user needs to provide
}
// NewAuthNeededError creates a new authentication error with the extra details
// about the needed fields set.
func NewAuthNeededError(needed string) error {
return &AuthNeededError{
Needed: needed,
}
}
// Error implements the standard error interface.
func (err *AuthNeededError) Error() string {
return fmt.Sprintf("authentication needed: %s", err.Needed)
}

View File

@@ -0,0 +1,269 @@
// Copyright 2019 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package external
import (
"fmt"
"math/big"
"sync"
"github.com/ethereum/go-ethereum"
"github.com/ethereum/go-ethereum/accounts"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/event"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/rpc"
"github.com/ethereum/go-ethereum/signer/core/apitypes"
)
type ExternalBackend struct {
signers []accounts.Wallet
}
func (eb *ExternalBackend) Wallets() []accounts.Wallet {
return eb.signers
}
func NewExternalBackend(endpoint string) (*ExternalBackend, error) {
signer, err := NewExternalSigner(endpoint)
if err != nil {
return nil, err
}
return &ExternalBackend{
signers: []accounts.Wallet{signer},
}, nil
}
func (eb *ExternalBackend) Subscribe(sink chan<- accounts.WalletEvent) event.Subscription {
return event.NewSubscription(func(quit <-chan struct{}) error {
<-quit
return nil
})
}
// ExternalSigner provides an API to interact with an external signer (clef)
// It proxies request to the external signer while forwarding relevant
// request headers
type ExternalSigner struct {
client *rpc.Client
endpoint string
status string
cacheMu sync.RWMutex
cache []accounts.Account
}
func NewExternalSigner(endpoint string) (*ExternalSigner, error) {
client, err := rpc.Dial(endpoint)
if err != nil {
return nil, err
}
extsigner := &ExternalSigner{
client: client,
endpoint: endpoint,
}
// Check if reachable
version, err := extsigner.pingVersion()
if err != nil {
return nil, err
}
extsigner.status = fmt.Sprintf("ok [version=%v]", version)
return extsigner, nil
}
func (api *ExternalSigner) URL() accounts.URL {
return accounts.URL{
Scheme: "extapi",
Path: api.endpoint,
}
}
func (api *ExternalSigner) Status() (string, error) {
return api.status, nil
}
func (api *ExternalSigner) Open(passphrase string) error {
return fmt.Errorf("operation not supported on external signers")
}
func (api *ExternalSigner) Close() error {
return fmt.Errorf("operation not supported on external signers")
}
func (api *ExternalSigner) Accounts() []accounts.Account {
var accnts []accounts.Account
res, err := api.listAccounts()
if err != nil {
log.Error("account listing failed", "error", err)
return accnts
}
for _, addr := range res {
accnts = append(accnts, accounts.Account{
URL: accounts.URL{
Scheme: "extapi",
Path: api.endpoint,
},
Address: addr,
})
}
api.cacheMu.Lock()
api.cache = accnts
api.cacheMu.Unlock()
return accnts
}
func (api *ExternalSigner) Contains(account accounts.Account) bool {
api.cacheMu.RLock()
defer api.cacheMu.RUnlock()
if api.cache == nil {
// If we haven't already fetched the accounts, it's time to do so now
api.cacheMu.RUnlock()
api.Accounts()
api.cacheMu.RLock()
}
for _, a := range api.cache {
if a.Address == account.Address && (account.URL == (accounts.URL{}) || account.URL == api.URL()) {
return true
}
}
return false
}
func (api *ExternalSigner) Derive(path accounts.DerivationPath, pin bool) (accounts.Account, error) {
return accounts.Account{}, fmt.Errorf("operation not supported on external signers")
}
func (api *ExternalSigner) SelfDerive(bases []accounts.DerivationPath, chain ethereum.ChainStateReader) {
log.Error("operation SelfDerive not supported on external signers")
}
// SignData signs keccak256(data). The mimetype parameter describes the type of data being signed
func (api *ExternalSigner) SignData(account accounts.Account, mimeType string, data []byte) ([]byte, error) {
var res hexutil.Bytes
var signAddress = common.NewMixedcaseAddress(account.Address)
if err := api.client.Call(&res, "account_signData",
mimeType,
&signAddress, // Need to use the pointer here, because of how MarshalJSON is defined
hexutil.Encode(data)); err != nil {
return nil, err
}
// If V is on 27/28-form, convert to 0/1 for Clique
if mimeType == accounts.MimetypeClique && (res[64] == 27 || res[64] == 28) {
res[64] -= 27 // Transform V from 27/28 to 0/1 for Clique use
}
return res, nil
}
func (api *ExternalSigner) SignText(account accounts.Account, text []byte) ([]byte, error) {
var signature hexutil.Bytes
var signAddress = common.NewMixedcaseAddress(account.Address)
if err := api.client.Call(&signature, "account_signData",
accounts.MimetypeTextPlain,
&signAddress, // Need to use the pointer here, because of how MarshalJSON is defined
hexutil.Encode(text)); err != nil {
return nil, err
}
if signature[64] == 27 || signature[64] == 28 {
// If clef is used as a backend, it may already have transformed
// the signature to ethereum-type signature.
signature[64] -= 27 // Transform V from Ethereum-legacy to 0/1
}
return signature, nil
}
// signTransactionResult represents the signinig result returned by clef.
type signTransactionResult struct {
Raw hexutil.Bytes `json:"raw"`
Tx *types.Transaction `json:"tx"`
}
// SignTx sends the transaction to the external signer.
// If chainID is nil, or tx.ChainID is zero, the chain ID will be assigned
// by the external signer. For non-legacy transactions, the chain ID of the
// transaction overrides the chainID parameter.
func (api *ExternalSigner) SignTx(account accounts.Account, tx *types.Transaction, chainID *big.Int) (*types.Transaction, error) {
data := hexutil.Bytes(tx.Data())
var to *common.MixedcaseAddress
if tx.To() != nil {
t := common.NewMixedcaseAddress(*tx.To())
to = &t
}
args := &apitypes.SendTxArgs{
Data: &data,
Nonce: hexutil.Uint64(tx.Nonce()),
Value: hexutil.Big(*tx.Value()),
Gas: hexutil.Uint64(tx.Gas()),
To: to,
From: common.NewMixedcaseAddress(account.Address),
}
switch tx.Type() {
case types.LegacyTxType, types.AccessListTxType:
args.GasPrice = (*hexutil.Big)(tx.GasPrice())
case types.DynamicFeeTxType:
args.MaxFeePerGas = (*hexutil.Big)(tx.GasFeeCap())
args.MaxPriorityFeePerGas = (*hexutil.Big)(tx.GasTipCap())
default:
return nil, fmt.Errorf("unsupported tx type %d", tx.Type())
}
// We should request the default chain id that we're operating with
// (the chain we're executing on)
if chainID != nil && chainID.Sign() != 0 {
args.ChainID = (*hexutil.Big)(chainID)
}
if tx.Type() != types.LegacyTxType {
// However, if the user asked for a particular chain id, then we should
// use that instead.
if tx.ChainId().Sign() != 0 {
args.ChainID = (*hexutil.Big)(tx.ChainId())
}
accessList := tx.AccessList()
args.AccessList = &accessList
}
var res signTransactionResult
if err := api.client.Call(&res, "account_signTransaction", args); err != nil {
return nil, err
}
return res.Tx, nil
}
func (api *ExternalSigner) SignTextWithPassphrase(account accounts.Account, passphrase string, text []byte) ([]byte, error) {
return []byte{}, fmt.Errorf("password-operations not supported on external signers")
}
func (api *ExternalSigner) SignTxWithPassphrase(account accounts.Account, passphrase string, tx *types.Transaction, chainID *big.Int) (*types.Transaction, error) {
return nil, fmt.Errorf("password-operations not supported on external signers")
}
func (api *ExternalSigner) SignDataWithPassphrase(account accounts.Account, passphrase, mimeType string, data []byte) ([]byte, error) {
return nil, fmt.Errorf("password-operations not supported on external signers")
}
func (api *ExternalSigner) listAccounts() ([]common.Address, error) {
var res []common.Address
if err := api.client.Call(&res, "account_list"); err != nil {
return nil, err
}
return res, nil
}
func (api *ExternalSigner) pingVersion() (string, error) {
var v string
if err := api.client.Call(&v, "account_version"); err != nil {
return "", err
}
return v, nil
}

180
vendor/github.com/ethereum/go-ethereum/accounts/hd.go generated vendored Normal file
View File

@@ -0,0 +1,180 @@
// Copyright 2017 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package accounts
import (
"encoding/json"
"errors"
"fmt"
"math"
"math/big"
"strings"
)
// DefaultRootDerivationPath is the root path to which custom derivation endpoints
// are appended. As such, the first account will be at m/44'/60'/0'/0, the second
// at m/44'/60'/0'/1, etc.
var DefaultRootDerivationPath = DerivationPath{0x80000000 + 44, 0x80000000 + 60, 0x80000000 + 0, 0}
// DefaultBaseDerivationPath is the base path from which custom derivation endpoints
// are incremented. As such, the first account will be at m/44'/60'/0'/0/0, the second
// at m/44'/60'/0'/0/1, etc.
var DefaultBaseDerivationPath = DerivationPath{0x80000000 + 44, 0x80000000 + 60, 0x80000000 + 0, 0, 0}
// LegacyLedgerBaseDerivationPath is the legacy base path from which custom derivation
// endpoints are incremented. As such, the first account will be at m/44'/60'/0'/0, the
// second at m/44'/60'/0'/1, etc.
var LegacyLedgerBaseDerivationPath = DerivationPath{0x80000000 + 44, 0x80000000 + 60, 0x80000000 + 0, 0}
// DerivationPath represents the computer friendly version of a hierarchical
// deterministic wallet account derivation path.
//
// The BIP-32 spec https://github.com/bitcoin/bips/blob/master/bip-0032.mediawiki
// defines derivation paths to be of the form:
//
// m / purpose' / coin_type' / account' / change / address_index
//
// The BIP-44 spec https://github.com/bitcoin/bips/blob/master/bip-0044.mediawiki
// defines that the `purpose` be 44' (or 0x8000002C) for crypto currencies, and
// SLIP-44 https://github.com/satoshilabs/slips/blob/master/slip-0044.md assigns
// the `coin_type` 60' (or 0x8000003C) to Ethereum.
//
// The root path for Ethereum is m/44'/60'/0'/0 according to the specification
// from https://github.com/ethereum/EIPs/issues/84, albeit it's not set in stone
// yet whether accounts should increment the last component or the children of
// that. We will go with the simpler approach of incrementing the last component.
type DerivationPath []uint32
// ParseDerivationPath converts a user specified derivation path string to the
// internal binary representation.
//
// Full derivation paths need to start with the `m/` prefix, relative derivation
// paths (which will get appended to the default root path) must not have prefixes
// in front of the first element. Whitespace is ignored.
func ParseDerivationPath(path string) (DerivationPath, error) {
var result DerivationPath
// Handle absolute or relative paths
components := strings.Split(path, "/")
switch {
case len(components) == 0:
return nil, errors.New("empty derivation path")
case strings.TrimSpace(components[0]) == "":
return nil, errors.New("ambiguous path: use 'm/' prefix for absolute paths, or no leading '/' for relative ones")
case strings.TrimSpace(components[0]) == "m":
components = components[1:]
default:
result = append(result, DefaultRootDerivationPath...)
}
// All remaining components are relative, append one by one
if len(components) == 0 {
return nil, errors.New("empty derivation path") // Empty relative paths
}
for _, component := range components {
// Ignore any user added whitespace
component = strings.TrimSpace(component)
var value uint32
// Handle hardened paths
if strings.HasSuffix(component, "'") {
value = 0x80000000
component = strings.TrimSpace(strings.TrimSuffix(component, "'"))
}
// Handle the non hardened component
bigval, ok := new(big.Int).SetString(component, 0)
if !ok {
return nil, fmt.Errorf("invalid component: %s", component)
}
max := math.MaxUint32 - value
if bigval.Sign() < 0 || bigval.Cmp(big.NewInt(int64(max))) > 0 {
if value == 0 {
return nil, fmt.Errorf("component %v out of allowed range [0, %d]", bigval, max)
}
return nil, fmt.Errorf("component %v out of allowed hardened range [0, %d]", bigval, max)
}
value += uint32(bigval.Uint64())
// Append and repeat
result = append(result, value)
}
return result, nil
}
// String implements the stringer interface, converting a binary derivation path
// to its canonical representation.
func (path DerivationPath) String() string {
result := "m"
for _, component := range path {
var hardened bool
if component >= 0x80000000 {
component -= 0x80000000
hardened = true
}
result = fmt.Sprintf("%s/%d", result, component)
if hardened {
result += "'"
}
}
return result
}
// MarshalJSON turns a derivation path into its json-serialized string
func (path DerivationPath) MarshalJSON() ([]byte, error) {
return json.Marshal(path.String())
}
// UnmarshalJSON a json-serialized string back into a derivation path
func (path *DerivationPath) UnmarshalJSON(b []byte) error {
var dp string
var err error
if err = json.Unmarshal(b, &dp); err != nil {
return err
}
*path, err = ParseDerivationPath(dp)
return err
}
// DefaultIterator creates a BIP-32 path iterator, which progresses by increasing the last component:
// i.e. m/44'/60'/0'/0/0, m/44'/60'/0'/0/1, m/44'/60'/0'/0/2, ... m/44'/60'/0'/0/N.
func DefaultIterator(base DerivationPath) func() DerivationPath {
path := make(DerivationPath, len(base))
copy(path[:], base[:])
// Set it back by one, so the first call gives the first result
path[len(path)-1]--
return func() DerivationPath {
path[len(path)-1]++
return path
}
}
// LedgerLiveIterator creates a bip44 path iterator for Ledger Live.
// Ledger Live increments the third component rather than the fifth component
// i.e. m/44'/60'/0'/0/0, m/44'/60'/1'/0/0, m/44'/60'/2'/0/0, ... m/44'/60'/N'/0/0.
func LedgerLiveIterator(base DerivationPath) func() DerivationPath {
path := make(DerivationPath, len(base))
copy(path[:], base[:])
// Set it back by one, so the first call gives the first result
path[2]--
return func() DerivationPath {
// ledgerLivePathIterator iterates on the third component
path[2]++
return path
}
}

View File

@@ -0,0 +1,309 @@
// Copyright 2017 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package keystore
import (
"bufio"
"encoding/json"
"fmt"
"os"
"path/filepath"
"sort"
"strings"
"sync"
"time"
mapset "github.com/deckarep/golang-set"
"github.com/ethereum/go-ethereum/accounts"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/log"
)
// Minimum amount of time between cache reloads. This limit applies if the platform does
// not support change notifications. It also applies if the keystore directory does not
// exist yet, the code will attempt to create a watcher at most this often.
const minReloadInterval = 2 * time.Second
type accountsByURL []accounts.Account
func (s accountsByURL) Len() int { return len(s) }
func (s accountsByURL) Less(i, j int) bool { return s[i].URL.Cmp(s[j].URL) < 0 }
func (s accountsByURL) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
// AmbiguousAddrError is returned when attempting to unlock
// an address for which more than one file exists.
type AmbiguousAddrError struct {
Addr common.Address
Matches []accounts.Account
}
func (err *AmbiguousAddrError) Error() string {
files := ""
for i, a := range err.Matches {
files += a.URL.Path
if i < len(err.Matches)-1 {
files += ", "
}
}
return fmt.Sprintf("multiple keys match address (%s)", files)
}
// accountCache is a live index of all accounts in the keystore.
type accountCache struct {
keydir string
watcher *watcher
mu sync.Mutex
all accountsByURL
byAddr map[common.Address][]accounts.Account
throttle *time.Timer
notify chan struct{}
fileC fileCache
}
func newAccountCache(keydir string) (*accountCache, chan struct{}) {
ac := &accountCache{
keydir: keydir,
byAddr: make(map[common.Address][]accounts.Account),
notify: make(chan struct{}, 1),
fileC: fileCache{all: mapset.NewThreadUnsafeSet()},
}
ac.watcher = newWatcher(ac)
return ac, ac.notify
}
func (ac *accountCache) accounts() []accounts.Account {
ac.maybeReload()
ac.mu.Lock()
defer ac.mu.Unlock()
cpy := make([]accounts.Account, len(ac.all))
copy(cpy, ac.all)
return cpy
}
func (ac *accountCache) hasAddress(addr common.Address) bool {
ac.maybeReload()
ac.mu.Lock()
defer ac.mu.Unlock()
return len(ac.byAddr[addr]) > 0
}
func (ac *accountCache) add(newAccount accounts.Account) {
ac.mu.Lock()
defer ac.mu.Unlock()
i := sort.Search(len(ac.all), func(i int) bool { return ac.all[i].URL.Cmp(newAccount.URL) >= 0 })
if i < len(ac.all) && ac.all[i] == newAccount {
return
}
// newAccount is not in the cache.
ac.all = append(ac.all, accounts.Account{})
copy(ac.all[i+1:], ac.all[i:])
ac.all[i] = newAccount
ac.byAddr[newAccount.Address] = append(ac.byAddr[newAccount.Address], newAccount)
}
// note: removed needs to be unique here (i.e. both File and Address must be set).
func (ac *accountCache) delete(removed accounts.Account) {
ac.mu.Lock()
defer ac.mu.Unlock()
ac.all = removeAccount(ac.all, removed)
if ba := removeAccount(ac.byAddr[removed.Address], removed); len(ba) == 0 {
delete(ac.byAddr, removed.Address)
} else {
ac.byAddr[removed.Address] = ba
}
}
// deleteByFile removes an account referenced by the given path.
func (ac *accountCache) deleteByFile(path string) {
ac.mu.Lock()
defer ac.mu.Unlock()
i := sort.Search(len(ac.all), func(i int) bool { return ac.all[i].URL.Path >= path })
if i < len(ac.all) && ac.all[i].URL.Path == path {
removed := ac.all[i]
ac.all = append(ac.all[:i], ac.all[i+1:]...)
if ba := removeAccount(ac.byAddr[removed.Address], removed); len(ba) == 0 {
delete(ac.byAddr, removed.Address)
} else {
ac.byAddr[removed.Address] = ba
}
}
}
// watcherStarted returns true if the watcher loop started running (even if it
// has since also ended).
func (ac *accountCache) watcherStarted() bool {
ac.mu.Lock()
defer ac.mu.Unlock()
return ac.watcher.running || ac.watcher.runEnded
}
func removeAccount(slice []accounts.Account, elem accounts.Account) []accounts.Account {
for i := range slice {
if slice[i] == elem {
return append(slice[:i], slice[i+1:]...)
}
}
return slice
}
// find returns the cached account for address if there is a unique match.
// The exact matching rules are explained by the documentation of accounts.Account.
// Callers must hold ac.mu.
func (ac *accountCache) find(a accounts.Account) (accounts.Account, error) {
// Limit search to address candidates if possible.
matches := ac.all
if (a.Address != common.Address{}) {
matches = ac.byAddr[a.Address]
}
if a.URL.Path != "" {
// If only the basename is specified, complete the path.
if !strings.ContainsRune(a.URL.Path, filepath.Separator) {
a.URL.Path = filepath.Join(ac.keydir, a.URL.Path)
}
for i := range matches {
if matches[i].URL == a.URL {
return matches[i], nil
}
}
if (a.Address == common.Address{}) {
return accounts.Account{}, ErrNoMatch
}
}
switch len(matches) {
case 1:
return matches[0], nil
case 0:
return accounts.Account{}, ErrNoMatch
default:
err := &AmbiguousAddrError{Addr: a.Address, Matches: make([]accounts.Account, len(matches))}
copy(err.Matches, matches)
sort.Sort(accountsByURL(err.Matches))
return accounts.Account{}, err
}
}
func (ac *accountCache) maybeReload() {
ac.mu.Lock()
if ac.watcher.running {
ac.mu.Unlock()
return // A watcher is running and will keep the cache up-to-date.
}
if ac.throttle == nil {
ac.throttle = time.NewTimer(0)
} else {
select {
case <-ac.throttle.C:
default:
ac.mu.Unlock()
return // The cache was reloaded recently.
}
}
// No watcher running, start it.
ac.watcher.start()
ac.throttle.Reset(minReloadInterval)
ac.mu.Unlock()
ac.scanAccounts()
}
func (ac *accountCache) close() {
ac.mu.Lock()
ac.watcher.close()
if ac.throttle != nil {
ac.throttle.Stop()
}
if ac.notify != nil {
close(ac.notify)
ac.notify = nil
}
ac.mu.Unlock()
}
// scanAccounts checks if any changes have occurred on the filesystem, and
// updates the account cache accordingly
func (ac *accountCache) scanAccounts() error {
// Scan the entire folder metadata for file changes
creates, deletes, updates, err := ac.fileC.scan(ac.keydir)
if err != nil {
log.Debug("Failed to reload keystore contents", "err", err)
return err
}
if creates.Cardinality() == 0 && deletes.Cardinality() == 0 && updates.Cardinality() == 0 {
return nil
}
// Create a helper method to scan the contents of the key files
var (
buf = new(bufio.Reader)
key struct {
Address string `json:"address"`
}
)
readAccount := func(path string) *accounts.Account {
fd, err := os.Open(path)
if err != nil {
log.Trace("Failed to open keystore file", "path", path, "err", err)
return nil
}
defer fd.Close()
buf.Reset(fd)
// Parse the address.
key.Address = ""
err = json.NewDecoder(buf).Decode(&key)
addr := common.HexToAddress(key.Address)
switch {
case err != nil:
log.Debug("Failed to decode keystore key", "path", path, "err", err)
case addr == common.Address{}:
log.Debug("Failed to decode keystore key", "path", path, "err", "missing or zero address")
default:
return &accounts.Account{
Address: addr,
URL: accounts.URL{Scheme: KeyStoreScheme, Path: path},
}
}
return nil
}
// Process all the file diffs
start := time.Now()
for _, p := range creates.ToSlice() {
if a := readAccount(p.(string)); a != nil {
ac.add(*a)
}
}
for _, p := range deletes.ToSlice() {
ac.deleteByFile(p.(string))
}
for _, p := range updates.ToSlice() {
path := p.(string)
ac.deleteByFile(path)
if a := readAccount(path); a != nil {
ac.add(*a)
}
}
end := time.Now()
select {
case ac.notify <- struct{}{}:
default:
}
log.Trace("Handled keystore changes", "time", end.Sub(start))
return nil
}

View File

@@ -0,0 +1,105 @@
// Copyright 2017 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package keystore
import (
"os"
"path/filepath"
"strings"
"sync"
"time"
mapset "github.com/deckarep/golang-set"
"github.com/ethereum/go-ethereum/log"
)
// fileCache is a cache of files seen during scan of keystore.
type fileCache struct {
all mapset.Set // Set of all files from the keystore folder
lastMod time.Time // Last time instance when a file was modified
mu sync.Mutex
}
// scan performs a new scan on the given directory, compares against the already
// cached filenames, and returns file sets: creates, deletes, updates.
func (fc *fileCache) scan(keyDir string) (mapset.Set, mapset.Set, mapset.Set, error) {
t0 := time.Now()
// List all the files from the keystore folder
files, err := os.ReadDir(keyDir)
if err != nil {
return nil, nil, nil, err
}
t1 := time.Now()
fc.mu.Lock()
defer fc.mu.Unlock()
// Iterate all the files and gather their metadata
all := mapset.NewThreadUnsafeSet()
mods := mapset.NewThreadUnsafeSet()
var newLastMod time.Time
for _, fi := range files {
path := filepath.Join(keyDir, fi.Name())
// Skip any non-key files from the folder
if nonKeyFile(fi) {
log.Trace("Ignoring file on account scan", "path", path)
continue
}
// Gather the set of all and freshly modified files
all.Add(path)
info, err := fi.Info()
if err != nil {
return nil, nil, nil, err
}
modified := info.ModTime()
if modified.After(fc.lastMod) {
mods.Add(path)
}
if modified.After(newLastMod) {
newLastMod = modified
}
}
t2 := time.Now()
// Update the tracked files and return the three sets
deletes := fc.all.Difference(all) // Deletes = previous - current
creates := all.Difference(fc.all) // Creates = current - previous
updates := mods.Difference(creates) // Updates = modified - creates
fc.all, fc.lastMod = all, newLastMod
t3 := time.Now()
// Report on the scanning stats and return
log.Debug("FS scan times", "list", t1.Sub(t0), "set", t2.Sub(t1), "diff", t3.Sub(t2))
return creates, deletes, updates, nil
}
// nonKeyFile ignores editor backups, hidden files and folders/symlinks.
func nonKeyFile(fi os.DirEntry) bool {
// Skip editor backups and UNIX-style hidden files.
if strings.HasSuffix(fi.Name(), "~") || strings.HasPrefix(fi.Name(), ".") {
return true
}
// Skip misc special files, directories (yes, symlinks too).
if fi.IsDir() || !fi.Type().IsRegular() {
return true
}
return false
}

View File

@@ -0,0 +1,284 @@
// Copyright 2014 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package keystore
import (
"bytes"
"crypto/ecdsa"
"encoding/hex"
"encoding/json"
"fmt"
"io"
"os"
"path/filepath"
"strings"
"time"
"github.com/ethereum/go-ethereum/accounts"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/crypto"
"github.com/google/uuid"
"github.com/status-im/status-go/extkeys"
)
const (
version = 3
)
type Key struct {
Id uuid.UUID // Version 4 "random" for unique id not derived from key data
// to simplify lookups we also store the address
Address common.Address
// we only store privkey as pubkey/address can be derived from it
// privkey in this struct is always in plaintext
PrivateKey *ecdsa.PrivateKey
// ExtendedKey is the extended key of the PrivateKey itself, and it's used
// to derive child keys.
ExtendedKey *extkeys.ExtendedKey
// SubAccountIndex is DEPRECATED
// It was use in Status to keep track of the number of sub-account created
// before having multi-account support.
SubAccountIndex uint32
}
type keyStore interface {
// Loads and decrypts the key from disk.
GetKey(addr common.Address, filename string, auth string) (*Key, error)
// Writes and encrypts the key.
StoreKey(filename string, k *Key, auth string) error
// Joins filename with the key directory unless it is already absolute.
JoinPath(filename string) string
}
type plainKeyJSON struct {
Address string `json:"address"`
PrivateKey string `json:"privatekey"`
Id string `json:"id"`
Version int `json:"version"`
}
type encryptedKeyJSONV3 struct {
Address string `json:"address"`
Crypto CryptoJSON `json:"crypto"`
Id string `json:"id"`
Version int `json:"version"`
ExtendedKey CryptoJSON `json:"extendedkey"`
SubAccountIndex uint32 `json:"subaccountindex"`
}
type encryptedKeyJSONV1 struct {
Address string `json:"address"`
Crypto CryptoJSON `json:"crypto"`
Id string `json:"id"`
Version string `json:"version"`
}
type CryptoJSON struct {
Cipher string `json:"cipher"`
CipherText string `json:"ciphertext"`
CipherParams cipherparamsJSON `json:"cipherparams"`
KDF string `json:"kdf"`
KDFParams map[string]interface{} `json:"kdfparams"`
MAC string `json:"mac"`
}
type cipherparamsJSON struct {
IV string `json:"iv"`
}
func (k *Key) MarshalJSON() (j []byte, err error) {
jStruct := plainKeyJSON{
hex.EncodeToString(k.Address[:]),
hex.EncodeToString(crypto.FromECDSA(k.PrivateKey)),
k.Id.String(),
version,
}
j, err = json.Marshal(jStruct)
return j, err
}
func (k *Key) UnmarshalJSON(j []byte) (err error) {
keyJSON := new(plainKeyJSON)
err = json.Unmarshal(j, &keyJSON)
if err != nil {
return err
}
u := new(uuid.UUID)
*u, err = uuid.Parse(keyJSON.Id)
if err != nil {
return err
}
k.Id = *u
addr, err := hex.DecodeString(keyJSON.Address)
if err != nil {
return err
}
privkey, err := crypto.HexToECDSA(keyJSON.PrivateKey)
if err != nil {
return err
}
k.Address = common.BytesToAddress(addr)
k.PrivateKey = privkey
return nil
}
func newKeyFromECDSA(privateKeyECDSA *ecdsa.PrivateKey) *Key {
id, err := uuid.NewRandom()
if err != nil {
panic(fmt.Sprintf("Could not create random uuid: %v", err))
}
key := &Key{
Id: id,
Address: crypto.PubkeyToAddress(privateKeyECDSA.PublicKey),
PrivateKey: privateKeyECDSA,
}
return key
}
func newKeyForPurposeFromExtendedKey(keyPurpose extkeys.KeyPurpose, extKey *extkeys.ExtendedKey) (*Key, error) {
var (
extChild1, extChild2 *extkeys.ExtendedKey
err error
)
if extKey.Depth == 0 { // we are dealing with master key
// CKD#1 - main account
extChild1, err = extKey.ChildForPurpose(keyPurpose, 0)
if err != nil {
return &Key{}, err
}
// CKD#2 - sub-accounts root
extChild2, err = extKey.ChildForPurpose(keyPurpose, 1)
if err != nil {
return &Key{}, err
}
} else { // we are dealing with non-master key, so it is safe to persist and extend from it
extChild1 = extKey
extChild2 = extKey
}
privateKeyECDSA := extChild1.ToECDSA()
id, err := uuid.NewRandom()
if err != nil {
return nil, err
}
key := &Key{
Id: id,
Address: crypto.PubkeyToAddress(privateKeyECDSA.PublicKey),
PrivateKey: privateKeyECDSA,
ExtendedKey: extChild2,
}
return key, nil
}
// NewKeyForDirectICAP generates a key whose address fits into < 155 bits so it can fit
// into the Direct ICAP spec. for simplicity and easier compatibility with other libs, we
// retry until the first byte is 0.
func NewKeyForDirectICAP(rand io.Reader) *Key {
randBytes := make([]byte, 64)
_, err := rand.Read(randBytes)
if err != nil {
panic("key generation: could not read from random source: " + err.Error())
}
reader := bytes.NewReader(randBytes)
privateKeyECDSA, err := ecdsa.GenerateKey(crypto.S256(), reader)
if err != nil {
panic("key generation: ecdsa.GenerateKey failed: " + err.Error())
}
key := newKeyFromECDSA(privateKeyECDSA)
if !strings.HasPrefix(key.Address.Hex(), "0x00") {
return NewKeyForDirectICAP(rand)
}
return key
}
func newKey(rand io.Reader) (*Key, error) {
privateKeyECDSA, err := ecdsa.GenerateKey(crypto.S256(), rand)
if err != nil {
return nil, err
}
return newKeyFromECDSA(privateKeyECDSA), nil
}
func storeNewKey(ks keyStore, rand io.Reader, auth string) (*Key, accounts.Account, error) {
key, err := newKey(rand)
if err != nil {
return nil, accounts.Account{}, err
}
a := accounts.Account{
Address: key.Address,
URL: accounts.URL{Scheme: KeyStoreScheme, Path: ks.JoinPath(keyFileName(key.Address))},
}
if err := ks.StoreKey(a.URL.Path, key, auth); err != nil {
zeroKey(key.PrivateKey)
return nil, a, err
}
return key, a, err
}
func writeTemporaryKeyFile(file string, content []byte) (string, error) {
// Create the keystore directory with appropriate permissions
// in case it is not present yet.
const dirPerm = 0700
if err := os.MkdirAll(filepath.Dir(file), dirPerm); err != nil {
return "", err
}
// Atomic write: create a temporary hidden file first
// then move it into place. TempFile assigns mode 0600.
f, err := os.CreateTemp(filepath.Dir(file), "."+filepath.Base(file)+".tmp")
if err != nil {
return "", err
}
if _, err := f.Write(content); err != nil {
f.Close()
os.Remove(f.Name())
return "", err
}
f.Close()
return f.Name(), nil
}
func writeKeyFile(file string, content []byte) error {
name, err := writeTemporaryKeyFile(file, content)
if err != nil {
return err
}
return os.Rename(name, file)
}
// keyFileName implements the naming convention for keyfiles:
// UTC--<created_at UTC ISO8601>-<address hex>
func keyFileName(keyAddr common.Address) string {
ts := time.Now().UTC()
return fmt.Sprintf("UTC--%s--%s", toISO8601(ts), hex.EncodeToString(keyAddr[:]))
}
func toISO8601(t time.Time) string {
var tz string
name, offset := t.Zone()
if name == "UTC" {
tz = "Z"
} else {
tz = fmt.Sprintf("%03d00", offset/3600)
}
return fmt.Sprintf("%04d-%02d-%02dT%02d-%02d-%02d.%09d%s",
t.Year(), t.Month(), t.Day(), t.Hour(), t.Minute(), t.Second(), t.Nanosecond(), tz)
}

View File

@@ -0,0 +1,588 @@
// Copyright 2017 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
// Package keystore implements encrypted storage of secp256k1 private keys.
//
// Keys are stored as encrypted JSON files according to the Web3 Secret Storage specification.
// See https://github.com/ethereum/wiki/wiki/Web3-Secret-Storage-Definition for more information.
package keystore
import (
"crypto/ecdsa"
crand "crypto/rand"
"errors"
"math/big"
"os"
"path/filepath"
"reflect"
"runtime"
"sync"
"time"
"github.com/ethereum/go-ethereum/accounts"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/event"
"github.com/google/uuid"
"github.com/status-im/status-go/extkeys"
)
var (
ErrLocked = accounts.NewAuthNeededError("password or unlock")
ErrNoMatch = errors.New("no key for given address or file")
ErrDecrypt = errors.New("could not decrypt key with given password")
// ErrAccountAlreadyExists is returned if an account attempted to import is
// already present in the keystore.
ErrAccountAlreadyExists = errors.New("account already exists")
)
// KeyStoreType is the reflect type of a keystore backend.
var KeyStoreType = reflect.TypeOf(&KeyStore{})
// KeyStoreScheme is the protocol scheme prefixing account and wallet URLs.
const KeyStoreScheme = "keystore"
// Maximum time between wallet refreshes (if filesystem notifications don't work).
const walletRefreshCycle = 3 * time.Second
// KeyStore manages a key storage directory on disk.
type KeyStore struct {
storage keyStore // Storage backend, might be cleartext or encrypted
cache *accountCache // In-memory account cache over the filesystem storage
changes chan struct{} // Channel receiving change notifications from the cache
unlocked map[common.Address]*unlocked // Currently unlocked account (decrypted private keys)
wallets []accounts.Wallet // Wallet wrappers around the individual key files
updateFeed event.Feed // Event feed to notify wallet additions/removals
updateScope event.SubscriptionScope // Subscription scope tracking current live listeners
updating bool // Whether the event notification loop is running
mu sync.RWMutex
importMu sync.Mutex // Import Mutex locks the import to prevent two insertions from racing
}
type unlocked struct {
*Key
abort chan struct{}
}
// NewKeyStore creates a keystore for the given directory.
func NewKeyStore(keydir string, scryptN, scryptP int) *KeyStore {
keydir, _ = filepath.Abs(keydir)
ks := &KeyStore{storage: &keyStorePassphrase{keydir, scryptN, scryptP, false}}
ks.init(keydir)
return ks
}
// NewPlaintextKeyStore creates a keystore for the given directory.
// Deprecated: Use NewKeyStore.
func NewPlaintextKeyStore(keydir string) *KeyStore {
keydir, _ = filepath.Abs(keydir)
ks := &KeyStore{storage: &keyStorePlain{keydir}}
ks.init(keydir)
return ks
}
func (ks *KeyStore) init(keydir string) {
// Lock the mutex since the account cache might call back with events
ks.mu.Lock()
defer ks.mu.Unlock()
// Initialize the set of unlocked keys and the account cache
ks.unlocked = make(map[common.Address]*unlocked)
ks.cache, ks.changes = newAccountCache(keydir)
// TODO: In order for this finalizer to work, there must be no references
// to ks. addressCache doesn't keep a reference but unlocked keys do,
// so the finalizer will not trigger until all timed unlocks have expired.
runtime.SetFinalizer(ks, func(m *KeyStore) {
m.cache.close()
})
// Create the initial list of wallets from the cache
accs := ks.cache.accounts()
ks.wallets = make([]accounts.Wallet, len(accs))
for i := 0; i < len(accs); i++ {
ks.wallets[i] = &keystoreWallet{account: accs[i], keystore: ks}
}
}
// Wallets implements accounts.Backend, returning all single-key wallets from the
// keystore directory.
func (ks *KeyStore) Wallets() []accounts.Wallet {
// Make sure the list of wallets is in sync with the account cache
ks.refreshWallets()
ks.mu.RLock()
defer ks.mu.RUnlock()
cpy := make([]accounts.Wallet, len(ks.wallets))
copy(cpy, ks.wallets)
return cpy
}
// refreshWallets retrieves the current account list and based on that does any
// necessary wallet refreshes.
func (ks *KeyStore) refreshWallets() {
// Retrieve the current list of accounts
ks.mu.Lock()
accs := ks.cache.accounts()
// Transform the current list of wallets into the new one
var (
wallets = make([]accounts.Wallet, 0, len(accs))
events []accounts.WalletEvent
)
for _, account := range accs {
// Drop wallets while they were in front of the next account
for len(ks.wallets) > 0 && ks.wallets[0].URL().Cmp(account.URL) < 0 {
events = append(events, accounts.WalletEvent{Wallet: ks.wallets[0], Kind: accounts.WalletDropped})
ks.wallets = ks.wallets[1:]
}
// If there are no more wallets or the account is before the next, wrap new wallet
if len(ks.wallets) == 0 || ks.wallets[0].URL().Cmp(account.URL) > 0 {
wallet := &keystoreWallet{account: account, keystore: ks}
events = append(events, accounts.WalletEvent{Wallet: wallet, Kind: accounts.WalletArrived})
wallets = append(wallets, wallet)
continue
}
// If the account is the same as the first wallet, keep it
if ks.wallets[0].Accounts()[0] == account {
wallets = append(wallets, ks.wallets[0])
ks.wallets = ks.wallets[1:]
continue
}
}
// Drop any leftover wallets and set the new batch
for _, wallet := range ks.wallets {
events = append(events, accounts.WalletEvent{Wallet: wallet, Kind: accounts.WalletDropped})
}
ks.wallets = wallets
ks.mu.Unlock()
// Fire all wallet events and return
for _, event := range events {
ks.updateFeed.Send(event)
}
}
// Subscribe implements accounts.Backend, creating an async subscription to
// receive notifications on the addition or removal of keystore wallets.
func (ks *KeyStore) Subscribe(sink chan<- accounts.WalletEvent) event.Subscription {
// We need the mutex to reliably start/stop the update loop
ks.mu.Lock()
defer ks.mu.Unlock()
// Subscribe the caller and track the subscriber count
sub := ks.updateScope.Track(ks.updateFeed.Subscribe(sink))
// Subscribers require an active notification loop, start it
if !ks.updating {
ks.updating = true
go ks.updater()
}
return sub
}
// updater is responsible for maintaining an up-to-date list of wallets stored in
// the keystore, and for firing wallet addition/removal events. It listens for
// account change events from the underlying account cache, and also periodically
// forces a manual refresh (only triggers for systems where the filesystem notifier
// is not running).
func (ks *KeyStore) updater() {
for {
// Wait for an account update or a refresh timeout
select {
case <-ks.changes:
case <-time.After(walletRefreshCycle):
}
// Run the wallet refresher
ks.refreshWallets()
// If all our subscribers left, stop the updater
ks.mu.Lock()
if ks.updateScope.Count() == 0 {
ks.updating = false
ks.mu.Unlock()
return
}
ks.mu.Unlock()
}
}
// HasAddress reports whether a key with the given address is present.
func (ks *KeyStore) HasAddress(addr common.Address) bool {
return ks.cache.hasAddress(addr)
}
// Accounts returns all key files present in the directory.
func (ks *KeyStore) Accounts() []accounts.Account {
return ks.cache.accounts()
}
// AccountDecryptedKey returns decrypted key for account (provided that password is correct).
func (ks *KeyStore) AccountDecryptedKey(a accounts.Account, auth string) (accounts.Account, *Key, error) {
return ks.getDecryptedKey(a, auth)
}
// Delete deletes the key matched by account.
// If the account contains no filename, the address must match a unique key.
func (ks *KeyStore) Delete(a accounts.Account) error {
// The order is crucial here. The key is dropped from the
// cache after the file is gone so that a reload happening in
// between won't insert it into the cache again.
a, err := ks.Find(a)
if err != nil {
return err
}
err = os.Remove(a.URL.Path)
if err == nil {
ks.cache.delete(a)
ks.refreshWallets()
}
return err
}
// SignHash calculates a ECDSA signature for the given hash. The produced
// signature is in the [R || S || V] format where V is 0 or 1.
func (ks *KeyStore) SignHash(a accounts.Account, hash []byte) ([]byte, error) {
// Look up the key to sign with and abort if it cannot be found
ks.mu.RLock()
defer ks.mu.RUnlock()
unlockedKey, found := ks.unlocked[a.Address]
if !found {
return nil, ErrLocked
}
// Sign the hash using plain ECDSA operations
return crypto.Sign(hash, unlockedKey.PrivateKey)
}
// SignTx signs the given transaction with the requested account.
func (ks *KeyStore) SignTx(a accounts.Account, tx *types.Transaction, chainID *big.Int) (*types.Transaction, error) {
// Look up the key to sign with and abort if it cannot be found
ks.mu.RLock()
defer ks.mu.RUnlock()
unlockedKey, found := ks.unlocked[a.Address]
if !found {
return nil, ErrLocked
}
// Depending on the presence of the chain ID, sign with 2718 or homestead
signer := types.LatestSignerForChainID(chainID)
return types.SignTx(tx, signer, unlockedKey.PrivateKey)
}
// SignHashWithPassphrase signs hash if the private key matching the given address
// can be decrypted with the given passphrase. The produced signature is in the
// [R || S || V] format where V is 0 or 1.
func (ks *KeyStore) SignHashWithPassphrase(a accounts.Account, passphrase string, hash []byte) (signature []byte, err error) {
_, key, err := ks.getDecryptedKey(a, passphrase)
if err != nil {
return nil, err
}
defer zeroKey(key.PrivateKey)
return crypto.Sign(hash, key.PrivateKey)
}
// SignTxWithPassphrase signs the transaction if the private key matching the
// given address can be decrypted with the given passphrase.
func (ks *KeyStore) SignTxWithPassphrase(a accounts.Account, passphrase string, tx *types.Transaction, chainID *big.Int) (*types.Transaction, error) {
_, key, err := ks.getDecryptedKey(a, passphrase)
if err != nil {
return nil, err
}
defer zeroKey(key.PrivateKey)
// Depending on the presence of the chain ID, sign with or without replay protection.
signer := types.LatestSignerForChainID(chainID)
return types.SignTx(tx, signer, key.PrivateKey)
}
// Unlock unlocks the given account indefinitely.
func (ks *KeyStore) Unlock(a accounts.Account, passphrase string) error {
return ks.TimedUnlock(a, passphrase, 0)
}
// Lock removes the private key with the given address from memory.
func (ks *KeyStore) Lock(addr common.Address) error {
ks.mu.Lock()
if unl, found := ks.unlocked[addr]; found {
ks.mu.Unlock()
ks.expire(addr, unl, time.Duration(0)*time.Nanosecond)
} else {
ks.mu.Unlock()
}
return nil
}
// TimedUnlock unlocks the given account with the passphrase. The account
// stays unlocked for the duration of timeout. A timeout of 0 unlocks the account
// until the program exits. The account must match a unique key file.
//
// If the account address is already unlocked for a duration, TimedUnlock extends or
// shortens the active unlock timeout. If the address was previously unlocked
// indefinitely the timeout is not altered.
func (ks *KeyStore) TimedUnlock(a accounts.Account, passphrase string, timeout time.Duration) error {
a, key, err := ks.getDecryptedKey(a, passphrase)
if err != nil {
return err
}
ks.mu.Lock()
defer ks.mu.Unlock()
u, found := ks.unlocked[a.Address]
if found {
if u.abort == nil {
// The address was unlocked indefinitely, so unlocking
// it with a timeout would be confusing.
zeroKey(key.PrivateKey)
return nil
}
// Terminate the expire goroutine and replace it below.
close(u.abort)
}
if timeout > 0 {
u = &unlocked{Key: key, abort: make(chan struct{})}
go ks.expire(a.Address, u, timeout)
} else {
u = &unlocked{Key: key}
}
ks.unlocked[a.Address] = u
return nil
}
// Find resolves the given account into a unique entry in the keystore.
func (ks *KeyStore) Find(a accounts.Account) (accounts.Account, error) {
ks.cache.maybeReload()
ks.cache.mu.Lock()
a, err := ks.cache.find(a)
ks.cache.mu.Unlock()
return a, err
}
func (ks *KeyStore) getDecryptedKey(a accounts.Account, auth string) (accounts.Account, *Key, error) {
a, err := ks.Find(a)
if err != nil {
return a, nil, err
}
key, err := ks.storage.GetKey(a.Address, a.URL.Path, auth)
return a, key, err
}
func (ks *KeyStore) expire(addr common.Address, u *unlocked, timeout time.Duration) {
t := time.NewTimer(timeout)
defer t.Stop()
select {
case <-u.abort:
// just quit
case <-t.C:
ks.mu.Lock()
// only drop if it's still the same key instance that dropLater
// was launched with. we can check that using pointer equality
// because the map stores a new pointer every time the key is
// unlocked.
if ks.unlocked[addr] == u {
zeroKey(u.PrivateKey)
delete(ks.unlocked, addr)
}
ks.mu.Unlock()
}
}
// NewAccount generates a new key and stores it into the key directory,
// encrypting it with the passphrase.
func (ks *KeyStore) NewAccount(passphrase string) (accounts.Account, error) {
_, account, err := storeNewKey(ks.storage, crand.Reader, passphrase)
if err != nil {
return accounts.Account{}, err
}
// Add the account to the cache immediately rather
// than waiting for file system notifications to pick it up.
ks.cache.add(account)
ks.refreshWallets()
return account, nil
}
// Export exports as a JSON key, encrypted with newPassphrase.
func (ks *KeyStore) Export(a accounts.Account, passphrase, newPassphrase string) (keyJSON []byte, err error) {
_, key, err := ks.getDecryptedKey(a, passphrase)
if err != nil {
return nil, err
}
var N, P int
if store, ok := ks.storage.(*keyStorePassphrase); ok {
N, P = store.scryptN, store.scryptP
} else {
N, P = StandardScryptN, StandardScryptP
}
return EncryptKey(key, newPassphrase, N, P)
}
// Import stores the given encrypted JSON key into the key directory.
func (ks *KeyStore) Import(keyJSON []byte, passphrase, newPassphrase string) (accounts.Account, error) {
key, err := DecryptKey(keyJSON, passphrase)
if key != nil && key.PrivateKey != nil {
defer zeroKey(key.PrivateKey)
}
if err != nil {
return accounts.Account{}, err
}
ks.importMu.Lock()
defer ks.importMu.Unlock()
if ks.cache.hasAddress(key.Address) {
return accounts.Account{
Address: key.Address,
}, ErrAccountAlreadyExists
}
return ks.importKey(key, newPassphrase)
}
// ImportECDSA stores the given key into the key directory, encrypting it with the passphrase.
func (ks *KeyStore) ImportECDSA(priv *ecdsa.PrivateKey, passphrase string) (accounts.Account, error) {
ks.importMu.Lock()
defer ks.importMu.Unlock()
key := newKeyFromECDSA(priv)
if ks.cache.hasAddress(key.Address) {
return accounts.Account{}, ErrAccountAlreadyExists
}
return ks.importKey(key, passphrase)
}
// ImportSingleExtendedKey imports an extended key setting it in both the PrivateKey and ExtendedKey fields
// of the Key struct.
// ImportExtendedKey is used in older version of Status where PrivateKey is set to be the BIP44 key at index 0,
// and ExtendedKey is the extended key of the BIP44 key at index 1.
func (ks *KeyStore) ImportSingleExtendedKey(extKey *extkeys.ExtendedKey, passphrase string) (accounts.Account, error) {
privateKeyECDSA := extKey.ToECDSA()
id, err := uuid.NewRandom()
if err != nil {
return accounts.Account{}, err
}
key := &Key{
Id: id,
Address: crypto.PubkeyToAddress(privateKeyECDSA.PublicKey),
PrivateKey: privateKeyECDSA,
ExtendedKey: extKey,
}
if ks.cache.hasAddress(key.Address) {
return accounts.Account{}, ErrAccountAlreadyExists
}
return ks.importKey(key, passphrase)
}
// ImportExtendedKey stores ECDSA key (obtained from extended key) along with CKD#2 (root for sub-accounts)
// If key file is not found, it is created. Key is encrypted with the given passphrase.
// Deprecated: status-go is now using ImportSingleExtendedKey
func (ks *KeyStore) ImportExtendedKey(extKey *extkeys.ExtendedKey, passphrase string) (accounts.Account, error) {
return ks.ImportExtendedKeyForPurpose(extkeys.KeyPurposeWallet, extKey, passphrase)
}
// ImportExtendedKeyForPurpose stores ECDSA key (obtained from extended key) along with CKD#2 (root for sub-accounts)
// If key file is not found, it is created. Key is encrypted with the given passphrase.
// Deprecated: status-go is now using ImportSingleExtendedKey
func (ks *KeyStore) ImportExtendedKeyForPurpose(keyPurpose extkeys.KeyPurpose, extKey *extkeys.ExtendedKey, passphrase string) (accounts.Account, error) {
key, err := newKeyForPurposeFromExtendedKey(keyPurpose, extKey)
if err != nil {
zeroKey(key.PrivateKey)
return accounts.Account{}, err
}
// if account is already imported, return cached version
if ks.cache.hasAddress(key.Address) {
a := accounts.Account{
Address: key.Address,
}
ks.cache.maybeReload()
ks.cache.mu.Lock()
a, err := ks.cache.find(a)
ks.cache.mu.Unlock()
if err != nil {
zeroKey(key.PrivateKey)
return a, err
}
return a, nil
}
return ks.importKey(key, passphrase)
}
func (ks *KeyStore) importKey(key *Key, passphrase string) (accounts.Account, error) {
a := accounts.Account{Address: key.Address, URL: accounts.URL{Scheme: KeyStoreScheme, Path: ks.storage.JoinPath(keyFileName(key.Address))}}
if err := ks.storage.StoreKey(a.URL.Path, key, passphrase); err != nil {
return accounts.Account{}, err
}
ks.cache.add(a)
ks.refreshWallets()
return a, nil
}
func (ks *KeyStore) IncSubAccountIndex(a accounts.Account, passphrase string) error {
a, key, err := ks.getDecryptedKey(a, passphrase)
if err != nil {
return err
}
key.SubAccountIndex++
return ks.storage.StoreKey(a.URL.Path, key, passphrase)
}
// Update changes the passphrase of an existing account.
func (ks *KeyStore) Update(a accounts.Account, passphrase, newPassphrase string) error {
a, key, err := ks.getDecryptedKey(a, passphrase)
if err != nil {
return err
}
return ks.storage.StoreKey(a.URL.Path, key, newPassphrase)
}
// ImportPreSaleKey decrypts the given Ethereum presale wallet and stores
// a key file in the key directory. The key file is encrypted with the same passphrase.
func (ks *KeyStore) ImportPreSaleKey(keyJSON []byte, passphrase string) (accounts.Account, error) {
a, _, err := importPreSaleKey(ks.storage, keyJSON, passphrase)
if err != nil {
return a, err
}
ks.cache.add(a)
ks.refreshWallets()
return a, nil
}
// isUpdating returns whether the event notification loop is running.
// This method is mainly meant for tests.
func (ks *KeyStore) isUpdating() bool {
ks.mu.RLock()
defer ks.mu.RUnlock()
return ks.updating
}
// zeroKey zeroes a private key in memory.
func zeroKey(k *ecdsa.PrivateKey) {
if k == nil {
return
}
b := k.D.Bits()
for i := range b {
b[i] = 0
}
}

View File

@@ -0,0 +1,488 @@
// Copyright 2014 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
/*
This key store behaves as KeyStorePlain with the difference that
the private key is encrypted and on disk uses another JSON encoding.
The crypto is documented at https://github.com/ethereum/wiki/wiki/Web3-Secret-Storage-Definition
*/
package keystore
import (
"bytes"
"crypto/aes"
"crypto/rand"
"crypto/sha256"
"encoding/hex"
"encoding/json"
"fmt"
"io"
"os"
"path/filepath"
"github.com/ethereum/go-ethereum/accounts"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/math"
"github.com/ethereum/go-ethereum/crypto"
"github.com/google/uuid"
"github.com/status-im/status-go/extkeys"
"golang.org/x/crypto/pbkdf2"
"golang.org/x/crypto/scrypt"
)
const (
keyHeaderKDF = "scrypt"
// StandardScryptN is the N parameter of Scrypt encryption algorithm, using 256MB
// memory and taking approximately 1s CPU time on a modern processor.
StandardScryptN = 1 << 18
// StandardScryptP is the P parameter of Scrypt encryption algorithm, using 256MB
// memory and taking approximately 1s CPU time on a modern processor.
StandardScryptP = 1
// LightScryptN is the N parameter of Scrypt encryption algorithm, using 4MB
// memory and taking approximately 100ms CPU time on a modern processor.
LightScryptN = 1 << 12
// LightScryptP is the P parameter of Scrypt encryption algorithm, using 4MB
// memory and taking approximately 100ms CPU time on a modern processor.
LightScryptP = 6
scryptR = 8
scryptDKLen = 32
)
type keyStorePassphrase struct {
keysDirPath string
scryptN int
scryptP int
// skipKeyFileVerification disables the security-feature which does
// reads and decrypts any newly created keyfiles. This should be 'false' in all
// cases except tests -- setting this to 'true' is not recommended.
skipKeyFileVerification bool
}
func (ks keyStorePassphrase) GetKey(addr common.Address, filename, auth string) (*Key, error) {
// Load the key from the keystore and decrypt its contents
keyjson, err := os.ReadFile(filename)
if err != nil {
return nil, err
}
key, err := DecryptKey(keyjson, auth)
if err != nil {
return nil, err
}
// Make sure we're really operating on the requested key (no swap attacks)
if key.Address != addr {
return nil, fmt.Errorf("key content mismatch: have account %x, want %x", key.Address, addr)
}
return key, nil
}
// StoreKey generates a key, encrypts with 'auth' and stores in the given directory
func StoreKey(dir, auth string, scryptN, scryptP int) (accounts.Account, error) {
_, a, err := storeNewKey(&keyStorePassphrase{dir, scryptN, scryptP, false}, rand.Reader, auth)
return a, err
}
func (ks keyStorePassphrase) StoreKey(filename string, key *Key, auth string) error {
keyjson, err := EncryptKey(key, auth, ks.scryptN, ks.scryptP)
if err != nil {
return err
}
// Write into temporary file
tmpName, err := writeTemporaryKeyFile(filename, keyjson)
if err != nil {
return err
}
if !ks.skipKeyFileVerification {
// Verify that we can decrypt the file with the given password.
_, err = ks.GetKey(key.Address, tmpName, auth)
if err != nil {
msg := "An error was encountered when saving and verifying the keystore file. \n" +
"This indicates that the keystore is corrupted. \n" +
"The corrupted file is stored at \n%v\n" +
"Please file a ticket at:\n\n" +
"https://github.com/ethereum/go-ethereum/issues." +
"The error was : %s"
//lint:ignore ST1005 This is a message for the user
return fmt.Errorf(msg, tmpName, err)
}
}
return os.Rename(tmpName, filename)
}
func (ks keyStorePassphrase) JoinPath(filename string) string {
if filepath.IsAbs(filename) {
return filename
}
return filepath.Join(ks.keysDirPath, filename)
}
// Encryptdata encrypts the data given as 'data' with the password 'auth'.
func EncryptDataV3(data, auth []byte, scryptN, scryptP int) (CryptoJSON, error) {
salt := make([]byte, 32)
if _, err := io.ReadFull(rand.Reader, salt); err != nil {
panic("reading from crypto/rand failed: " + err.Error())
}
derivedKey, err := scrypt.Key(auth, salt, scryptN, scryptR, scryptP, scryptDKLen)
if err != nil {
return CryptoJSON{}, err
}
encryptKey := derivedKey[:16]
iv := make([]byte, aes.BlockSize) // 16
if _, err := io.ReadFull(rand.Reader, iv); err != nil {
panic("reading from crypto/rand failed: " + err.Error())
}
cipherText, err := aesCTRXOR(encryptKey, data, iv)
if err != nil {
return CryptoJSON{}, err
}
mac := crypto.Keccak256(derivedKey[16:32], cipherText)
scryptParamsJSON := make(map[string]interface{}, 5)
scryptParamsJSON["n"] = scryptN
scryptParamsJSON["r"] = scryptR
scryptParamsJSON["p"] = scryptP
scryptParamsJSON["dklen"] = scryptDKLen
scryptParamsJSON["salt"] = hex.EncodeToString(salt)
cipherParamsJSON := cipherparamsJSON{
IV: hex.EncodeToString(iv),
}
cryptoStruct := CryptoJSON{
Cipher: "aes-128-ctr",
CipherText: hex.EncodeToString(cipherText),
CipherParams: cipherParamsJSON,
KDF: keyHeaderKDF,
KDFParams: scryptParamsJSON,
MAC: hex.EncodeToString(mac),
}
return cryptoStruct, nil
}
// EncryptKey encrypts a key using the specified scrypt parameters into a json
// blob that can be decrypted later on.
func EncryptKey(key *Key, auth string, scryptN, scryptP int) ([]byte, error) {
keyBytes := math.PaddedBigBytes(key.PrivateKey.D, 32)
cryptoStruct, err := EncryptDataV3(keyBytes, []byte(auth), scryptN, scryptP)
if err != nil {
return nil, err
}
encryptedExtendedKey, err := EncryptExtendedKey(key.ExtendedKey, auth, scryptN, scryptP)
if err != nil {
return nil, err
}
encryptedKeyJSONV3 := encryptedKeyJSONV3{
hex.EncodeToString(key.Address[:]),
cryptoStruct,
key.Id.String(),
version,
encryptedExtendedKey,
key.SubAccountIndex,
}
return json.Marshal(encryptedKeyJSONV3)
}
func EncryptExtendedKey(extKey *extkeys.ExtendedKey, auth string, scryptN, scryptP int) (CryptoJSON, error) {
if extKey == nil {
return CryptoJSON{}, nil
}
authArray := []byte(auth)
salt := make([]byte, 32)
if _, err := io.ReadFull(rand.Reader, salt); err != nil {
panic("reading from crypto/rand failed: " + err.Error())
}
derivedKey, err := scrypt.Key(authArray, salt, scryptN, scryptR, scryptP, scryptDKLen)
if err != nil {
return CryptoJSON{}, err
}
encryptKey := derivedKey[:16]
keyBytes := []byte(extKey.String())
iv := make([]byte, aes.BlockSize) // 16
if _, err := io.ReadFull(rand.Reader, iv); err != nil {
panic("reading from crypto/rand failed: " + err.Error())
}
cipherText, err := aesCTRXOR(encryptKey, keyBytes, iv)
if err != nil {
return CryptoJSON{}, err
}
mac := crypto.Keccak256(derivedKey[16:32], cipherText)
scryptParamsJSON := make(map[string]interface{}, 5)
scryptParamsJSON["n"] = scryptN
scryptParamsJSON["r"] = scryptR
scryptParamsJSON["p"] = scryptP
scryptParamsJSON["dklen"] = scryptDKLen
scryptParamsJSON["salt"] = hex.EncodeToString(salt)
cipherParamsJSON := cipherparamsJSON{
IV: hex.EncodeToString(iv),
}
return CryptoJSON{
Cipher: "aes-128-ctr",
CipherText: hex.EncodeToString(cipherText),
CipherParams: cipherParamsJSON,
KDF: "scrypt",
KDFParams: scryptParamsJSON,
MAC: hex.EncodeToString(mac),
}, nil
}
// DecryptKey decrypts a key from a json blob, returning the private key itself.
func DecryptKey(keyjson []byte, auth string) (*Key, error) {
// Parse the json into a simple map to fetch the key version
m := make(map[string]interface{})
if err := json.Unmarshal(keyjson, &m); err != nil {
return nil, err
}
// Depending on the version try to parse one way or another
var (
keyBytes, keyId []byte
err error
extKeyBytes []byte
extKey *extkeys.ExtendedKey
)
subAccountIndex, ok := m["subaccountindex"].(float64)
if !ok {
subAccountIndex = 0
}
if version, ok := m["version"].(string); ok && version == "1" {
k := new(encryptedKeyJSONV1)
if err := json.Unmarshal(keyjson, k); err != nil {
return nil, err
}
keyBytes, keyId, err = decryptKeyV1(k, auth)
if err != nil {
return nil, err
}
extKey, err = extkeys.NewKeyFromString(extkeys.EmptyExtendedKeyString)
} else {
k := new(encryptedKeyJSONV3)
if err := json.Unmarshal(keyjson, k); err != nil {
return nil, err
}
keyBytes, keyId, err = decryptKeyV3(k, auth)
if err != nil {
return nil, err
}
extKeyBytes, err = decryptExtendedKey(k, auth)
if err != nil {
return nil, err
}
extKey, err = extkeys.NewKeyFromString(string(extKeyBytes))
}
// Handle any decryption errors and return the key
if err != nil {
return nil, err
}
key := crypto.ToECDSAUnsafe(keyBytes)
id, err := uuid.FromBytes(keyId)
if err != nil {
return nil, err
}
return &Key{
Id: id,
Address: crypto.PubkeyToAddress(key.PublicKey),
PrivateKey: key,
ExtendedKey: extKey,
SubAccountIndex: uint32(subAccountIndex),
}, nil
}
func DecryptDataV3(cryptoJson CryptoJSON, auth string) ([]byte, error) {
if cryptoJson.Cipher != "aes-128-ctr" {
return nil, fmt.Errorf("cipher not supported: %v", cryptoJson.Cipher)
}
mac, err := hex.DecodeString(cryptoJson.MAC)
if err != nil {
return nil, err
}
iv, err := hex.DecodeString(cryptoJson.CipherParams.IV)
if err != nil {
return nil, err
}
cipherText, err := hex.DecodeString(cryptoJson.CipherText)
if err != nil {
return nil, err
}
derivedKey, err := getKDFKey(cryptoJson, auth)
if err != nil {
return nil, err
}
calculatedMAC := crypto.Keccak256(derivedKey[16:32], cipherText)
if !bytes.Equal(calculatedMAC, mac) {
return nil, ErrDecrypt
}
plainText, err := aesCTRXOR(derivedKey[:16], cipherText, iv)
if err != nil {
return nil, err
}
return plainText, err
}
func decryptKeyV3(keyProtected *encryptedKeyJSONV3, auth string) (keyBytes []byte, keyId []byte, err error) {
if keyProtected.Version != version {
return nil, nil, fmt.Errorf("version not supported: %v", keyProtected.Version)
}
keyUUID, err := uuid.Parse(keyProtected.Id)
if err != nil {
return nil, nil, err
}
keyId = keyUUID[:]
plainText, err := DecryptDataV3(keyProtected.Crypto, auth)
if err != nil {
return nil, nil, err
}
return plainText, keyId, err
}
func decryptKeyV1(keyProtected *encryptedKeyJSONV1, auth string) (keyBytes []byte, keyId []byte, err error) {
keyUUID, err := uuid.Parse(keyProtected.Id)
if err != nil {
return nil, nil, err
}
keyId = keyUUID[:]
mac, err := hex.DecodeString(keyProtected.Crypto.MAC)
if err != nil {
return nil, nil, err
}
iv, err := hex.DecodeString(keyProtected.Crypto.CipherParams.IV)
if err != nil {
return nil, nil, err
}
cipherText, err := hex.DecodeString(keyProtected.Crypto.CipherText)
if err != nil {
return nil, nil, err
}
derivedKey, err := getKDFKey(keyProtected.Crypto, auth)
if err != nil {
return nil, nil, err
}
calculatedMAC := crypto.Keccak256(derivedKey[16:32], cipherText)
if !bytes.Equal(calculatedMAC, mac) {
return nil, nil, ErrDecrypt
}
plainText, err := aesCBCDecrypt(crypto.Keccak256(derivedKey[:16])[:16], cipherText, iv)
if err != nil {
return nil, nil, err
}
return plainText, keyId, err
}
func decryptExtendedKey(keyProtected *encryptedKeyJSONV3, auth string) (plainText []byte, err error) {
if len(keyProtected.ExtendedKey.CipherText) == 0 {
return []byte(extkeys.EmptyExtendedKeyString), nil
}
if keyProtected.Version != version {
return nil, fmt.Errorf("Version not supported: %v", keyProtected.Version)
}
if keyProtected.ExtendedKey.Cipher != "aes-128-ctr" {
return nil, fmt.Errorf("Cipher not supported: %v", keyProtected.ExtendedKey.Cipher)
}
mac, err := hex.DecodeString(keyProtected.ExtendedKey.MAC)
if err != nil {
return nil, err
}
iv, err := hex.DecodeString(keyProtected.ExtendedKey.CipherParams.IV)
if err != nil {
return nil, err
}
cipherText, err := hex.DecodeString(keyProtected.ExtendedKey.CipherText)
if err != nil {
return nil, err
}
derivedKey, err := getKDFKey(keyProtected.ExtendedKey, auth)
if err != nil {
return nil, err
}
calculatedMAC := crypto.Keccak256(derivedKey[16:32], cipherText)
if !bytes.Equal(calculatedMAC, mac) {
return nil, ErrDecrypt
}
plainText, err = aesCTRXOR(derivedKey[:16], cipherText, iv)
if err != nil {
return nil, err
}
return plainText, err
}
func getKDFKey(cryptoJSON CryptoJSON, auth string) ([]byte, error) {
authArray := []byte(auth)
salt, err := hex.DecodeString(cryptoJSON.KDFParams["salt"].(string))
if err != nil {
return nil, err
}
dkLen := ensureInt(cryptoJSON.KDFParams["dklen"])
if cryptoJSON.KDF == keyHeaderKDF {
n := ensureInt(cryptoJSON.KDFParams["n"])
r := ensureInt(cryptoJSON.KDFParams["r"])
p := ensureInt(cryptoJSON.KDFParams["p"])
return scrypt.Key(authArray, salt, n, r, p, dkLen)
} else if cryptoJSON.KDF == "pbkdf2" {
c := ensureInt(cryptoJSON.KDFParams["c"])
prf := cryptoJSON.KDFParams["prf"].(string)
if prf != "hmac-sha256" {
return nil, fmt.Errorf("unsupported PBKDF2 PRF: %s", prf)
}
key := pbkdf2.Key(authArray, salt, c, dkLen, sha256.New)
return key, nil
}
return nil, fmt.Errorf("unsupported KDF: %s", cryptoJSON.KDF)
}
// TODO: can we do without this when unmarshalling dynamic JSON?
// why do integers in KDF params end up as float64 and not int after
// unmarshal?
func ensureInt(x interface{}) int {
res, ok := x.(int)
if !ok {
res = int(x.(float64))
}
return res
}

View File

@@ -0,0 +1,61 @@
// Copyright 2015 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package keystore
import (
"encoding/json"
"fmt"
"os"
"path/filepath"
"github.com/ethereum/go-ethereum/common"
)
type keyStorePlain struct {
keysDirPath string
}
func (ks keyStorePlain) GetKey(addr common.Address, filename, auth string) (*Key, error) {
fd, err := os.Open(filename)
if err != nil {
return nil, err
}
defer fd.Close()
key := new(Key)
if err := json.NewDecoder(fd).Decode(key); err != nil {
return nil, err
}
if key.Address != addr {
return nil, fmt.Errorf("key content mismatch: have address %x, want %x", key.Address, addr)
}
return key, nil
}
func (ks keyStorePlain) StoreKey(filename string, key *Key, auth string) error {
content, err := json.Marshal(key)
if err != nil {
return err
}
return writeKeyFile(filename, content)
}
func (ks keyStorePlain) JoinPath(filename string) string {
if filepath.IsAbs(filename) {
return filename
}
return filepath.Join(ks.keysDirPath, filename)
}

View File

@@ -0,0 +1,150 @@
// Copyright 2016 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package keystore
import (
"crypto/aes"
"crypto/cipher"
"crypto/sha256"
"encoding/hex"
"encoding/json"
"errors"
"fmt"
"github.com/ethereum/go-ethereum/accounts"
"github.com/ethereum/go-ethereum/crypto"
"github.com/google/uuid"
"golang.org/x/crypto/pbkdf2"
)
// creates a Key and stores that in the given KeyStore by decrypting a presale key JSON
func importPreSaleKey(keyStore keyStore, keyJSON []byte, password string) (accounts.Account, *Key, error) {
key, err := decryptPreSaleKey(keyJSON, password)
if err != nil {
return accounts.Account{}, nil, err
}
key.Id, err = uuid.NewRandom()
if err != nil {
return accounts.Account{}, nil, err
}
a := accounts.Account{
Address: key.Address,
URL: accounts.URL{
Scheme: KeyStoreScheme,
Path: keyStore.JoinPath(keyFileName(key.Address)),
},
}
err = keyStore.StoreKey(a.URL.Path, key, password)
return a, key, err
}
func decryptPreSaleKey(fileContent []byte, password string) (key *Key, err error) {
preSaleKeyStruct := struct {
EncSeed string
EthAddr string
Email string
BtcAddr string
}{}
err = json.Unmarshal(fileContent, &preSaleKeyStruct)
if err != nil {
return nil, err
}
encSeedBytes, err := hex.DecodeString(preSaleKeyStruct.EncSeed)
if err != nil {
return nil, errors.New("invalid hex in encSeed")
}
if len(encSeedBytes) < 16 {
return nil, errors.New("invalid encSeed, too short")
}
iv := encSeedBytes[:16]
cipherText := encSeedBytes[16:]
/*
See https://github.com/ethereum/pyethsaletool
pyethsaletool generates the encryption key from password by
2000 rounds of PBKDF2 with HMAC-SHA-256 using password as salt (:().
16 byte key length within PBKDF2 and resulting key is used as AES key
*/
passBytes := []byte(password)
derivedKey := pbkdf2.Key(passBytes, passBytes, 2000, 16, sha256.New)
plainText, err := aesCBCDecrypt(derivedKey, cipherText, iv)
if err != nil {
return nil, err
}
ethPriv := crypto.Keccak256(plainText)
ecKey := crypto.ToECDSAUnsafe(ethPriv)
key = &Key{
Id: uuid.UUID{},
Address: crypto.PubkeyToAddress(ecKey.PublicKey),
PrivateKey: ecKey,
}
derivedAddr := hex.EncodeToString(key.Address.Bytes()) // needed because .Hex() gives leading "0x"
expectedAddr := preSaleKeyStruct.EthAddr
if derivedAddr != expectedAddr {
err = fmt.Errorf("decrypted addr '%s' not equal to expected addr '%s'", derivedAddr, expectedAddr)
}
return key, err
}
func aesCTRXOR(key, inText, iv []byte) ([]byte, error) {
// AES-128 is selected due to size of encryptKey.
aesBlock, err := aes.NewCipher(key)
if err != nil {
return nil, err
}
stream := cipher.NewCTR(aesBlock, iv)
outText := make([]byte, len(inText))
stream.XORKeyStream(outText, inText)
return outText, err
}
func aesCBCDecrypt(key, cipherText, iv []byte) ([]byte, error) {
aesBlock, err := aes.NewCipher(key)
if err != nil {
return nil, err
}
decrypter := cipher.NewCBCDecrypter(aesBlock, iv)
paddedPlaintext := make([]byte, len(cipherText))
decrypter.CryptBlocks(paddedPlaintext, cipherText)
plaintext := pkcs7Unpad(paddedPlaintext)
if plaintext == nil {
return nil, ErrDecrypt
}
return plaintext, err
}
// From https://leanpub.com/gocrypto/read#leanpub-auto-block-cipher-modes
func pkcs7Unpad(in []byte) []byte {
if len(in) == 0 {
return nil
}
padding := in[len(in)-1]
if int(padding) > len(in) || padding > aes.BlockSize {
return nil
} else if padding == 0 {
return nil
}
for i := len(in) - 1; i > len(in)-int(padding)-1; i-- {
if in[i] != padding {
return nil
}
}
return in[:len(in)-int(padding)]
}

View File

@@ -0,0 +1,150 @@
// Copyright 2017 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package keystore
import (
"math/big"
"github.com/ethereum/go-ethereum"
"github.com/ethereum/go-ethereum/accounts"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto"
)
// keystoreWallet implements the accounts.Wallet interface for the original
// keystore.
type keystoreWallet struct {
account accounts.Account // Single account contained in this wallet
keystore *KeyStore // Keystore where the account originates from
}
// URL implements accounts.Wallet, returning the URL of the account within.
func (w *keystoreWallet) URL() accounts.URL {
return w.account.URL
}
// Status implements accounts.Wallet, returning whether the account held by the
// keystore wallet is unlocked or not.
func (w *keystoreWallet) Status() (string, error) {
w.keystore.mu.RLock()
defer w.keystore.mu.RUnlock()
if _, ok := w.keystore.unlocked[w.account.Address]; ok {
return "Unlocked", nil
}
return "Locked", nil
}
// Open implements accounts.Wallet, but is a noop for plain wallets since there
// is no connection or decryption step necessary to access the list of accounts.
func (w *keystoreWallet) Open(passphrase string) error { return nil }
// Close implements accounts.Wallet, but is a noop for plain wallets since there
// is no meaningful open operation.
func (w *keystoreWallet) Close() error { return nil }
// Accounts implements accounts.Wallet, returning an account list consisting of
// a single account that the plain keystore wallet contains.
func (w *keystoreWallet) Accounts() []accounts.Account {
return []accounts.Account{w.account}
}
// Contains implements accounts.Wallet, returning whether a particular account is
// or is not wrapped by this wallet instance.
func (w *keystoreWallet) Contains(account accounts.Account) bool {
return account.Address == w.account.Address && (account.URL == (accounts.URL{}) || account.URL == w.account.URL)
}
// Derive implements accounts.Wallet, but is a noop for plain wallets since there
// is no notion of hierarchical account derivation for plain keystore accounts.
func (w *keystoreWallet) Derive(path accounts.DerivationPath, pin bool) (accounts.Account, error) {
return accounts.Account{}, accounts.ErrNotSupported
}
// SelfDerive implements accounts.Wallet, but is a noop for plain wallets since
// there is no notion of hierarchical account derivation for plain keystore accounts.
func (w *keystoreWallet) SelfDerive(bases []accounts.DerivationPath, chain ethereum.ChainStateReader) {
}
// signHash attempts to sign the given hash with
// the given account. If the wallet does not wrap this particular account, an
// error is returned to avoid account leakage (even though in theory we may be
// able to sign via our shared keystore backend).
func (w *keystoreWallet) signHash(account accounts.Account, hash []byte) ([]byte, error) {
// Make sure the requested account is contained within
if !w.Contains(account) {
return nil, accounts.ErrUnknownAccount
}
// Account seems valid, request the keystore to sign
return w.keystore.SignHash(account, hash)
}
// SignData signs keccak256(data). The mimetype parameter describes the type of data being signed.
func (w *keystoreWallet) SignData(account accounts.Account, mimeType string, data []byte) ([]byte, error) {
return w.signHash(account, crypto.Keccak256(data))
}
// SignDataWithPassphrase signs keccak256(data). The mimetype parameter describes the type of data being signed.
func (w *keystoreWallet) SignDataWithPassphrase(account accounts.Account, passphrase, mimeType string, data []byte) ([]byte, error) {
// Make sure the requested account is contained within
if !w.Contains(account) {
return nil, accounts.ErrUnknownAccount
}
// Account seems valid, request the keystore to sign
return w.keystore.SignHashWithPassphrase(account, passphrase, crypto.Keccak256(data))
}
// SignText implements accounts.Wallet, attempting to sign the hash of
// the given text with the given account.
func (w *keystoreWallet) SignText(account accounts.Account, text []byte) ([]byte, error) {
return w.signHash(account, accounts.TextHash(text))
}
// SignTextWithPassphrase implements accounts.Wallet, attempting to sign the
// hash of the given text with the given account using passphrase as extra authentication.
func (w *keystoreWallet) SignTextWithPassphrase(account accounts.Account, passphrase string, text []byte) ([]byte, error) {
// Make sure the requested account is contained within
if !w.Contains(account) {
return nil, accounts.ErrUnknownAccount
}
// Account seems valid, request the keystore to sign
return w.keystore.SignHashWithPassphrase(account, passphrase, accounts.TextHash(text))
}
// SignTx implements accounts.Wallet, attempting to sign the given transaction
// with the given account. If the wallet does not wrap this particular account,
// an error is returned to avoid account leakage (even though in theory we may
// be able to sign via our shared keystore backend).
func (w *keystoreWallet) SignTx(account accounts.Account, tx *types.Transaction, chainID *big.Int) (*types.Transaction, error) {
// Make sure the requested account is contained within
if !w.Contains(account) {
return nil, accounts.ErrUnknownAccount
}
// Account seems valid, request the keystore to sign
return w.keystore.SignTx(account, tx, chainID)
}
// SignTxWithPassphrase implements accounts.Wallet, attempting to sign the given
// transaction with the given account using passphrase as extra authentication.
func (w *keystoreWallet) SignTxWithPassphrase(account accounts.Account, passphrase string, tx *types.Transaction, chainID *big.Int) (*types.Transaction, error) {
// Make sure the requested account is contained within
if !w.Contains(account) {
return nil, accounts.ErrUnknownAccount
}
// Account seems valid, request the keystore to sign
return w.keystore.SignTxWithPassphrase(account, passphrase, tx, chainID)
}

View File

@@ -0,0 +1,114 @@
// Copyright 2016 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
//go:build (darwin && !ios && cgo) || freebsd || (linux && !arm64) || netbsd || solaris
// +build darwin,!ios,cgo freebsd linux,!arm64 netbsd solaris
package keystore
import (
"time"
"github.com/ethereum/go-ethereum/log"
"github.com/rjeczalik/notify"
)
type watcher struct {
ac *accountCache
running bool // set to true when runloop begins
runEnded bool // set to true when runloop ends
starting bool // set to true prior to runloop starting
ev chan notify.EventInfo
quit chan struct{}
}
func newWatcher(ac *accountCache) *watcher {
return &watcher{
ac: ac,
ev: make(chan notify.EventInfo, 10),
quit: make(chan struct{}),
}
}
// enabled returns false on systems not supported.
func (*watcher) enabled() bool { return true }
// starts the watcher loop in the background.
// Start a watcher in the background if that's not already in progress.
// The caller must hold w.ac.mu.
func (w *watcher) start() {
if w.starting || w.running {
return
}
w.starting = true
go w.loop()
}
func (w *watcher) close() {
close(w.quit)
}
func (w *watcher) loop() {
defer func() {
w.ac.mu.Lock()
w.running = false
w.starting = false
w.runEnded = true
w.ac.mu.Unlock()
}()
logger := log.New("path", w.ac.keydir)
if err := notify.Watch(w.ac.keydir, w.ev, notify.All); err != nil {
logger.Trace("Failed to watch keystore folder", "err", err)
return
}
defer notify.Stop(w.ev)
logger.Trace("Started watching keystore folder")
defer logger.Trace("Stopped watching keystore folder")
w.ac.mu.Lock()
w.running = true
w.ac.mu.Unlock()
// Wait for file system events and reload.
// When an event occurs, the reload call is delayed a bit so that
// multiple events arriving quickly only cause a single reload.
var (
debounceDuration = 500 * time.Millisecond
rescanTriggered = false
debounce = time.NewTimer(0)
)
// Ignore initial trigger
if !debounce.Stop() {
<-debounce.C
}
defer debounce.Stop()
for {
select {
case <-w.quit:
return
case <-w.ev:
// Trigger the scan (with delay), if not already triggered
if !rescanTriggered {
debounce.Reset(debounceDuration)
rescanTriggered = true
}
case <-debounce.C:
w.ac.scanAccounts()
rescanTriggered = false
}
}
}

View File

@@ -0,0 +1,35 @@
// Copyright 2016 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
//go:build (darwin && !cgo) || ios || (linux && arm64) || windows || (!darwin && !freebsd && !linux && !netbsd && !solaris)
// +build darwin,!cgo ios linux,arm64 windows !darwin,!freebsd,!linux,!netbsd,!solaris
// This is the fallback implementation of directory watching.
// It is used on unsupported platforms.
package keystore
type watcher struct {
running bool
runEnded bool
}
func newWatcher(*accountCache) *watcher { return new(watcher) }
func (*watcher) start() {}
func (*watcher) close() {}
// enabled returns false on systems not supported.
func (*watcher) enabled() bool { return false }

View File

@@ -0,0 +1,272 @@
// Copyright 2017 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package accounts
import (
"reflect"
"sort"
"sync"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/event"
)
// managerSubBufferSize determines how many incoming wallet events
// the manager will buffer in its channel.
const managerSubBufferSize = 50
// Config contains the settings of the global account manager.
//
// TODO(rjl493456442, karalabe, holiman): Get rid of this when account management
// is removed in favor of Clef.
type Config struct {
InsecureUnlockAllowed bool // Whether account unlocking in insecure environment is allowed
}
// newBackendEvent lets the manager know it should
// track the given backend for wallet updates.
type newBackendEvent struct {
backend Backend
processed chan struct{} // Informs event emitter that backend has been integrated
}
// Manager is an overarching account manager that can communicate with various
// backends for signing transactions.
type Manager struct {
config *Config // Global account manager configurations
backends map[reflect.Type][]Backend // Index of backends currently registered
updaters []event.Subscription // Wallet update subscriptions for all backends
updates chan WalletEvent // Subscription sink for backend wallet changes
newBackends chan newBackendEvent // Incoming backends to be tracked by the manager
wallets []Wallet // Cache of all wallets from all registered backends
feed event.Feed // Wallet feed notifying of arrivals/departures
quit chan chan error
term chan struct{} // Channel is closed upon termination of the update loop
lock sync.RWMutex
}
// NewManager creates a generic account manager to sign transaction via various
// supported backends.
func NewManager(config *Config, backends ...Backend) *Manager {
// Retrieve the initial list of wallets from the backends and sort by URL
var wallets []Wallet
for _, backend := range backends {
wallets = merge(wallets, backend.Wallets()...)
}
// Subscribe to wallet notifications from all backends
updates := make(chan WalletEvent, managerSubBufferSize)
subs := make([]event.Subscription, len(backends))
for i, backend := range backends {
subs[i] = backend.Subscribe(updates)
}
// Assemble the account manager and return
am := &Manager{
config: config,
backends: make(map[reflect.Type][]Backend),
updaters: subs,
updates: updates,
newBackends: make(chan newBackendEvent),
wallets: wallets,
quit: make(chan chan error),
term: make(chan struct{}),
}
for _, backend := range backends {
kind := reflect.TypeOf(backend)
am.backends[kind] = append(am.backends[kind], backend)
}
go am.update()
return am
}
// Close terminates the account manager's internal notification processes.
func (am *Manager) Close() error {
errc := make(chan error)
am.quit <- errc
return <-errc
}
// Config returns the configuration of account manager.
func (am *Manager) Config() *Config {
return am.config
}
// AddBackend starts the tracking of an additional backend for wallet updates.
// cmd/geth assumes once this func returns the backends have been already integrated.
func (am *Manager) AddBackend(backend Backend) {
done := make(chan struct{})
am.newBackends <- newBackendEvent{backend, done}
<-done
}
// update is the wallet event loop listening for notifications from the backends
// and updating the cache of wallets.
func (am *Manager) update() {
// Close all subscriptions when the manager terminates
defer func() {
am.lock.Lock()
for _, sub := range am.updaters {
sub.Unsubscribe()
}
am.updaters = nil
am.lock.Unlock()
}()
// Loop until termination
for {
select {
case event := <-am.updates:
// Wallet event arrived, update local cache
am.lock.Lock()
switch event.Kind {
case WalletArrived:
am.wallets = merge(am.wallets, event.Wallet)
case WalletDropped:
am.wallets = drop(am.wallets, event.Wallet)
}
am.lock.Unlock()
// Notify any listeners of the event
am.feed.Send(event)
case event := <-am.newBackends:
am.lock.Lock()
// Update caches
backend := event.backend
am.wallets = merge(am.wallets, backend.Wallets()...)
am.updaters = append(am.updaters, backend.Subscribe(am.updates))
kind := reflect.TypeOf(backend)
am.backends[kind] = append(am.backends[kind], backend)
am.lock.Unlock()
close(event.processed)
case errc := <-am.quit:
// Manager terminating, return
errc <- nil
// Signals event emitters the loop is not receiving values
// to prevent them from getting stuck.
close(am.term)
return
}
}
}
// Backends retrieves the backend(s) with the given type from the account manager.
func (am *Manager) Backends(kind reflect.Type) []Backend {
am.lock.RLock()
defer am.lock.RUnlock()
return am.backends[kind]
}
// Wallets returns all signer accounts registered under this account manager.
func (am *Manager) Wallets() []Wallet {
am.lock.RLock()
defer am.lock.RUnlock()
return am.walletsNoLock()
}
// walletsNoLock returns all registered wallets. Callers must hold am.lock.
func (am *Manager) walletsNoLock() []Wallet {
cpy := make([]Wallet, len(am.wallets))
copy(cpy, am.wallets)
return cpy
}
// Wallet retrieves the wallet associated with a particular URL.
func (am *Manager) Wallet(url string) (Wallet, error) {
am.lock.RLock()
defer am.lock.RUnlock()
parsed, err := parseURL(url)
if err != nil {
return nil, err
}
for _, wallet := range am.walletsNoLock() {
if wallet.URL() == parsed {
return wallet, nil
}
}
return nil, ErrUnknownWallet
}
// Accounts returns all account addresses of all wallets within the account manager
func (am *Manager) Accounts() []common.Address {
am.lock.RLock()
defer am.lock.RUnlock()
addresses := make([]common.Address, 0) // return [] instead of nil if empty
for _, wallet := range am.wallets {
for _, account := range wallet.Accounts() {
addresses = append(addresses, account.Address)
}
}
return addresses
}
// Find attempts to locate the wallet corresponding to a specific account. Since
// accounts can be dynamically added to and removed from wallets, this method has
// a linear runtime in the number of wallets.
func (am *Manager) Find(account Account) (Wallet, error) {
am.lock.RLock()
defer am.lock.RUnlock()
for _, wallet := range am.wallets {
if wallet.Contains(account) {
return wallet, nil
}
}
return nil, ErrUnknownAccount
}
// Subscribe creates an async subscription to receive notifications when the
// manager detects the arrival or departure of a wallet from any of its backends.
func (am *Manager) Subscribe(sink chan<- WalletEvent) event.Subscription {
return am.feed.Subscribe(sink)
}
// merge is a sorted analogue of append for wallets, where the ordering of the
// origin list is preserved by inserting new wallets at the correct position.
//
// The original slice is assumed to be already sorted by URL.
func merge(slice []Wallet, wallets ...Wallet) []Wallet {
for _, wallet := range wallets {
n := sort.Search(len(slice), func(i int) bool { return slice[i].URL().Cmp(wallet.URL()) >= 0 })
if n == len(slice) {
slice = append(slice, wallet)
continue
}
slice = append(slice[:n], append([]Wallet{wallet}, slice[n:]...)...)
}
return slice
}
// drop is the counterpart of merge, which looks up wallets from within the sorted
// cache and removes the ones specified.
func drop(slice []Wallet, wallets ...Wallet) []Wallet {
for _, wallet := range wallets {
n := sort.Search(len(slice), func(i int) bool { return slice[i].URL().Cmp(wallet.URL()) >= 0 })
if n == len(slice) {
// Wallet not found, may happen during startup
continue
}
slice = append(slice[:n], slice[n+1:]...)
}
return slice
}

View File

@@ -0,0 +1,106 @@
# Using the smartcard wallet
## Requirements
* A USB smartcard reader
* A keycard that supports the status app
* PCSCD version 4.3 running on your system **Only version 4.3 is currently supported**
## Preparing the smartcard
**WARNING: FOILLOWING THESE INSTRUCTIONS WILL DESTROY THE MASTER KEY ON YOUR CARD. ONLY PROCEED IF NO FUNDS ARE ASSOCIATED WITH THESE ACCOUNTS**
You can use status' [keycard-cli](https://github.com/status-im/keycard-cli) and you should get _at least_ version 2.1.1 of their [smartcard application](https://github.com/status-im/status-keycard/releases/download/2.2.1/keycard_v2.2.1.cap)
You also need to make sure that the PCSC daemon is running on your system.
Then, you can install the application to the card by typing:
```
keycard install -a keycard_v2.2.1.cap && keycard init
```
At the end of this process, you will be provided with a PIN, a PUK and a pairing password. Write them down, you'll need them shortly.
Start `geth` with the `console` command. You will notice the following warning:
```
WARN [04-09|16:58:38.898] Failed to open wallet url=keycard://044def09 err="smartcard: pairing password needed"
```
Write down the URL (`keycard://044def09` in this example). Then ask `geth` to open the wallet:
```
> personal.openWallet("keycard://044def09", "pairing password")
```
The pairing password has been generated during the card initialization process.
The process needs to be repeated once more with the PIN:
```
> personal.openWallet("keycard://044def09", "PIN number")
```
If everything goes well, you should see your new account when typing `personal` on the console:
```
> personal
WARN [04-09|17:02:07.330] Smartcard wallet account derivation failed url=keycard://044def09 err="Unexpected response status Cla=0x80, Ins=0xd1, Sw=0x6985"
{
listAccounts: [],
listWallets: [{
status: "Empty, waiting for initialization",
url: "keycard://044def09"
}],
...
}
```
So the communication with the card is working, but there is no key associated with this wallet. Let's create it:
```
> personal.initializeWallet("keycard://044def09")
"tilt ... impact"
```
You should get a list of words, this is your seed so write them down. Your wallet should now be initialized:
```
> personal.listWallets
[{
accounts: [{
address: "0x678b7cd55c61917defb23546a41803c5bfefbc7a",
url: "keycard://044d/m/44'/60'/0'/0/0"
}],
status: "Online",
url: "keycard://044def09"
}]
```
You're all set!
## Usage
1. Start `geth` with the `console` command
2. Check the card's URL by checking `personal.listWallets`:
```
listWallets: [{
status: "Online, can derive public keys",
url: "keycard://a4d73015"
}]
```
3. Open the wallet, you will be prompted for your pairing password, then PIN:
```
personal.openWallet("keycard://a4d73015")
```
4. Check that creation was successful by typing e.g. `personal`. Then use it like a regular wallet.
## Known issues
* Starting geth with a valid card seems to make firefox crash.
* PCSC version 4.4 should work, but is currently untested

View File

@@ -0,0 +1,87 @@
// Copyright 2018 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package scwallet
import (
"bytes"
"encoding/binary"
"fmt"
)
// commandAPDU represents an application data unit sent to a smartcard.
type commandAPDU struct {
Cla, Ins, P1, P2 uint8 // Class, Instruction, Parameter 1, Parameter 2
Data []byte // Command data
Le uint8 // Command data length
}
// serialize serializes a command APDU.
func (ca commandAPDU) serialize() ([]byte, error) {
buf := new(bytes.Buffer)
if err := binary.Write(buf, binary.BigEndian, ca.Cla); err != nil {
return nil, err
}
if err := binary.Write(buf, binary.BigEndian, ca.Ins); err != nil {
return nil, err
}
if err := binary.Write(buf, binary.BigEndian, ca.P1); err != nil {
return nil, err
}
if err := binary.Write(buf, binary.BigEndian, ca.P2); err != nil {
return nil, err
}
if len(ca.Data) > 0 {
if err := binary.Write(buf, binary.BigEndian, uint8(len(ca.Data))); err != nil {
return nil, err
}
if err := binary.Write(buf, binary.BigEndian, ca.Data); err != nil {
return nil, err
}
}
if err := binary.Write(buf, binary.BigEndian, ca.Le); err != nil {
return nil, err
}
return buf.Bytes(), nil
}
// responseAPDU represents an application data unit received from a smart card.
type responseAPDU struct {
Data []byte // response data
Sw1, Sw2 uint8 // status words 1 and 2
}
// deserialize deserializes a response APDU.
func (ra *responseAPDU) deserialize(data []byte) error {
if len(data) < 2 {
return fmt.Errorf("can not deserialize data: payload too short (%d < 2)", len(data))
}
ra.Data = make([]byte, len(data)-2)
buf := bytes.NewReader(data)
if err := binary.Read(buf, binary.BigEndian, &ra.Data); err != nil {
return err
}
if err := binary.Read(buf, binary.BigEndian, &ra.Sw1); err != nil {
return err
}
if err := binary.Read(buf, binary.BigEndian, &ra.Sw2); err != nil {
return err
}
return nil
}

View File

@@ -0,0 +1,302 @@
// Copyright 2018 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
// This package implements support for smartcard-based hardware wallets such as
// the one written by Status: https://github.com/status-im/hardware-wallet
//
// This implementation of smartcard wallets have a different interaction process
// to other types of hardware wallet. The process works like this:
//
// 1. (First use with a given client) Establish a pairing between hardware
// wallet and client. This requires a secret value called a 'pairing password'.
// You can pair with an unpaired wallet with `personal.openWallet(URI, pairing password)`.
// 2. (First use only) Initialize the wallet, which generates a keypair, stores
// it on the wallet, and returns it so the user can back it up. You can
// initialize a wallet with `personal.initializeWallet(URI)`.
// 3. Connect to the wallet using the pairing information established in step 1.
// You can connect to a paired wallet with `personal.openWallet(URI, PIN)`.
// 4. Interact with the wallet as normal.
package scwallet
import (
"encoding/json"
"io"
"os"
"path/filepath"
"sort"
"sync"
"time"
"github.com/ethereum/go-ethereum/accounts"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/event"
"github.com/ethereum/go-ethereum/log"
pcsc "github.com/gballet/go-libpcsclite"
)
// Scheme is the URI prefix for smartcard wallets.
const Scheme = "keycard"
// refreshCycle is the maximum time between wallet refreshes (if USB hotplug
// notifications don't work).
const refreshCycle = time.Second
// refreshThrottling is the minimum time between wallet refreshes to avoid thrashing.
const refreshThrottling = 500 * time.Millisecond
// smartcardPairing contains information about a smart card we have paired with
// or might pair with the hub.
type smartcardPairing struct {
PublicKey []byte `json:"publicKey"`
PairingIndex uint8 `json:"pairingIndex"`
PairingKey []byte `json:"pairingKey"`
Accounts map[common.Address]accounts.DerivationPath `json:"accounts"`
}
// Hub is a accounts.Backend that can find and handle generic PC/SC hardware wallets.
type Hub struct {
scheme string // Protocol scheme prefixing account and wallet URLs.
context *pcsc.Client
datadir string
pairings map[string]smartcardPairing
refreshed time.Time // Time instance when the list of wallets was last refreshed
wallets map[string]*Wallet // Mapping from reader names to wallet instances
updateFeed event.Feed // Event feed to notify wallet additions/removals
updateScope event.SubscriptionScope // Subscription scope tracking current live listeners
updating bool // Whether the event notification loop is running
quit chan chan error
stateLock sync.RWMutex // Protects the internals of the hub from racey access
}
func (hub *Hub) readPairings() error {
hub.pairings = make(map[string]smartcardPairing)
pairingFile, err := os.Open(filepath.Join(hub.datadir, "smartcards.json"))
if err != nil {
if os.IsNotExist(err) {
return nil
}
return err
}
pairingData, err := io.ReadAll(pairingFile)
if err != nil {
return err
}
var pairings []smartcardPairing
if err := json.Unmarshal(pairingData, &pairings); err != nil {
return err
}
for _, pairing := range pairings {
hub.pairings[string(pairing.PublicKey)] = pairing
}
return nil
}
func (hub *Hub) writePairings() error {
pairingFile, err := os.OpenFile(filepath.Join(hub.datadir, "smartcards.json"), os.O_RDWR|os.O_CREATE, 0755)
if err != nil {
return err
}
defer pairingFile.Close()
pairings := make([]smartcardPairing, 0, len(hub.pairings))
for _, pairing := range hub.pairings {
pairings = append(pairings, pairing)
}
pairingData, err := json.Marshal(pairings)
if err != nil {
return err
}
if _, err := pairingFile.Write(pairingData); err != nil {
return err
}
return nil
}
func (hub *Hub) pairing(wallet *Wallet) *smartcardPairing {
if pairing, ok := hub.pairings[string(wallet.PublicKey)]; ok {
return &pairing
}
return nil
}
func (hub *Hub) setPairing(wallet *Wallet, pairing *smartcardPairing) error {
if pairing == nil {
delete(hub.pairings, string(wallet.PublicKey))
} else {
hub.pairings[string(wallet.PublicKey)] = *pairing
}
return hub.writePairings()
}
// NewHub creates a new hardware wallet manager for smartcards.
func NewHub(daemonPath string, scheme string, datadir string) (*Hub, error) {
context, err := pcsc.EstablishContext(daemonPath, pcsc.ScopeSystem)
if err != nil {
return nil, err
}
hub := &Hub{
scheme: scheme,
context: context,
datadir: datadir,
wallets: make(map[string]*Wallet),
quit: make(chan chan error),
}
if err := hub.readPairings(); err != nil {
return nil, err
}
hub.refreshWallets()
return hub, nil
}
// Wallets implements accounts.Backend, returning all the currently tracked smart
// cards that appear to be hardware wallets.
func (hub *Hub) Wallets() []accounts.Wallet {
// Make sure the list of wallets is up to date
hub.refreshWallets()
hub.stateLock.RLock()
defer hub.stateLock.RUnlock()
cpy := make([]accounts.Wallet, 0, len(hub.wallets))
for _, wallet := range hub.wallets {
cpy = append(cpy, wallet)
}
sort.Sort(accounts.WalletsByURL(cpy))
return cpy
}
// refreshWallets scans the devices attached to the machine and updates the
// list of wallets based on the found devices.
func (hub *Hub) refreshWallets() {
// Don't scan the USB like crazy it the user fetches wallets in a loop
hub.stateLock.RLock()
elapsed := time.Since(hub.refreshed)
hub.stateLock.RUnlock()
if elapsed < refreshThrottling {
return
}
// Retrieve all the smart card reader to check for cards
readers, err := hub.context.ListReaders()
if err != nil {
// This is a perverted hack, the scard library returns an error if no card
// readers are present instead of simply returning an empty list. We don't
// want to fill the user's log with errors, so filter those out.
if err.Error() != "scard: Cannot find a smart card reader." {
log.Error("Failed to enumerate smart card readers", "err", err)
return
}
}
// Transform the current list of wallets into the new one
hub.stateLock.Lock()
events := []accounts.WalletEvent{}
seen := make(map[string]struct{})
for _, reader := range readers {
// Mark the reader as present
seen[reader] = struct{}{}
// If we already know about this card, skip to the next reader, otherwise clean up
if wallet, ok := hub.wallets[reader]; ok {
if err := wallet.ping(); err == nil {
continue
}
wallet.Close()
events = append(events, accounts.WalletEvent{Wallet: wallet, Kind: accounts.WalletDropped})
delete(hub.wallets, reader)
}
// New card detected, try to connect to it
card, err := hub.context.Connect(reader, pcsc.ShareShared, pcsc.ProtocolAny)
if err != nil {
log.Debug("Failed to open smart card", "reader", reader, "err", err)
continue
}
wallet := NewWallet(hub, card)
if err = wallet.connect(); err != nil {
log.Debug("Failed to connect to smart card", "reader", reader, "err", err)
card.Disconnect(pcsc.LeaveCard)
continue
}
// Card connected, start tracking in amongs the wallets
hub.wallets[reader] = wallet
events = append(events, accounts.WalletEvent{Wallet: wallet, Kind: accounts.WalletArrived})
}
// Remove any wallets no longer present
for reader, wallet := range hub.wallets {
if _, ok := seen[reader]; !ok {
wallet.Close()
events = append(events, accounts.WalletEvent{Wallet: wallet, Kind: accounts.WalletDropped})
delete(hub.wallets, reader)
}
}
hub.refreshed = time.Now()
hub.stateLock.Unlock()
for _, event := range events {
hub.updateFeed.Send(event)
}
}
// Subscribe implements accounts.Backend, creating an async subscription to
// receive notifications on the addition or removal of smart card wallets.
func (hub *Hub) Subscribe(sink chan<- accounts.WalletEvent) event.Subscription {
// We need the mutex to reliably start/stop the update loop
hub.stateLock.Lock()
defer hub.stateLock.Unlock()
// Subscribe the caller and track the subscriber count
sub := hub.updateScope.Track(hub.updateFeed.Subscribe(sink))
// Subscribers require an active notification loop, start it
if !hub.updating {
hub.updating = true
go hub.updater()
}
return sub
}
// updater is responsible for maintaining an up-to-date list of wallets managed
// by the smart card hub, and for firing wallet addition/removal events.
func (hub *Hub) updater() {
for {
// TODO: Wait for a USB hotplug event (not supported yet) or a refresh timeout
// <-hub.changes
time.Sleep(refreshCycle)
// Run the wallet refresher
hub.refreshWallets()
// If all our subscribers left, stop the updater
hub.stateLock.Lock()
if hub.updateScope.Count() == 0 {
hub.updating = false
hub.stateLock.Unlock()
return
}
hub.stateLock.Unlock()
}
}

View File

@@ -0,0 +1,339 @@
// Copyright 2018 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package scwallet
import (
"bytes"
"crypto/aes"
"crypto/cipher"
"crypto/elliptic"
"crypto/rand"
"crypto/sha256"
"crypto/sha512"
"fmt"
"github.com/ethereum/go-ethereum/crypto"
pcsc "github.com/gballet/go-libpcsclite"
"golang.org/x/crypto/pbkdf2"
"golang.org/x/text/unicode/norm"
)
const (
maxPayloadSize = 223
pairP1FirstStep = 0
pairP1LastStep = 1
scSecretLength = 32
scBlockSize = 16
insOpenSecureChannel = 0x10
insMutuallyAuthenticate = 0x11
insPair = 0x12
insUnpair = 0x13
pairingSalt = "Keycard Pairing Password Salt"
)
// SecureChannelSession enables secure communication with a hardware wallet.
type SecureChannelSession struct {
card *pcsc.Card // A handle to the smartcard for communication
secret []byte // A shared secret generated from our ECDSA keys
publicKey []byte // Our own ephemeral public key
PairingKey []byte // A permanent shared secret for a pairing, if present
sessionEncKey []byte // The current session encryption key
sessionMacKey []byte // The current session MAC key
iv []byte // The current IV
PairingIndex uint8 // The pairing index
}
// NewSecureChannelSession creates a new secure channel for the given card and public key.
func NewSecureChannelSession(card *pcsc.Card, keyData []byte) (*SecureChannelSession, error) {
// Generate an ECDSA keypair for ourselves
key, err := crypto.GenerateKey()
if err != nil {
return nil, err
}
cardPublic, err := crypto.UnmarshalPubkey(keyData)
if err != nil {
return nil, fmt.Errorf("could not unmarshal public key from card: %v", err)
}
secret, _ := key.Curve.ScalarMult(cardPublic.X, cardPublic.Y, key.D.Bytes())
return &SecureChannelSession{
card: card,
secret: secret.Bytes(),
publicKey: elliptic.Marshal(crypto.S256(), key.PublicKey.X, key.PublicKey.Y),
}, nil
}
// Pair establishes a new pairing with the smartcard.
func (s *SecureChannelSession) Pair(pairingPassword []byte) error {
secretHash := pbkdf2.Key(norm.NFKD.Bytes(pairingPassword), norm.NFKD.Bytes([]byte(pairingSalt)), 50000, 32, sha256.New)
challenge := make([]byte, 32)
if _, err := rand.Read(challenge); err != nil {
return err
}
response, err := s.pair(pairP1FirstStep, challenge)
if err != nil {
return err
}
md := sha256.New()
md.Write(secretHash[:])
md.Write(challenge)
expectedCryptogram := md.Sum(nil)
cardCryptogram := response.Data[:32]
cardChallenge := response.Data[32:64]
if !bytes.Equal(expectedCryptogram, cardCryptogram) {
return fmt.Errorf("invalid card cryptogram %v != %v", expectedCryptogram, cardCryptogram)
}
md.Reset()
md.Write(secretHash[:])
md.Write(cardChallenge)
response, err = s.pair(pairP1LastStep, md.Sum(nil))
if err != nil {
return err
}
md.Reset()
md.Write(secretHash[:])
md.Write(response.Data[1:])
s.PairingKey = md.Sum(nil)
s.PairingIndex = response.Data[0]
return nil
}
// Unpair disestablishes an existing pairing.
func (s *SecureChannelSession) Unpair() error {
if s.PairingKey == nil {
return fmt.Errorf("cannot unpair: not paired")
}
_, err := s.transmitEncrypted(claSCWallet, insUnpair, s.PairingIndex, 0, []byte{})
if err != nil {
return err
}
s.PairingKey = nil
// Close channel
s.iv = nil
return nil
}
// Open initializes the secure channel.
func (s *SecureChannelSession) Open() error {
if s.iv != nil {
return fmt.Errorf("session already opened")
}
response, err := s.open()
if err != nil {
return err
}
// Generate the encryption/mac key by hashing our shared secret,
// pairing key, and the first bytes returned from the Open APDU.
md := sha512.New()
md.Write(s.secret)
md.Write(s.PairingKey)
md.Write(response.Data[:scSecretLength])
keyData := md.Sum(nil)
s.sessionEncKey = keyData[:scSecretLength]
s.sessionMacKey = keyData[scSecretLength : scSecretLength*2]
// The IV is the last bytes returned from the Open APDU.
s.iv = response.Data[scSecretLength:]
return s.mutuallyAuthenticate()
}
// mutuallyAuthenticate is an internal method to authenticate both ends of the
// connection.
func (s *SecureChannelSession) mutuallyAuthenticate() error {
data := make([]byte, scSecretLength)
if _, err := rand.Read(data); err != nil {
return err
}
response, err := s.transmitEncrypted(claSCWallet, insMutuallyAuthenticate, 0, 0, data)
if err != nil {
return err
}
if response.Sw1 != 0x90 || response.Sw2 != 0x00 {
return fmt.Errorf("got unexpected response from MUTUALLY_AUTHENTICATE: %#x%x", response.Sw1, response.Sw2)
}
if len(response.Data) != scSecretLength {
return fmt.Errorf("response from MUTUALLY_AUTHENTICATE was %d bytes, expected %d", len(response.Data), scSecretLength)
}
return nil
}
// open is an internal method that sends an open APDU.
func (s *SecureChannelSession) open() (*responseAPDU, error) {
return transmit(s.card, &commandAPDU{
Cla: claSCWallet,
Ins: insOpenSecureChannel,
P1: s.PairingIndex,
P2: 0,
Data: s.publicKey,
Le: 0,
})
}
// pair is an internal method that sends a pair APDU.
func (s *SecureChannelSession) pair(p1 uint8, data []byte) (*responseAPDU, error) {
return transmit(s.card, &commandAPDU{
Cla: claSCWallet,
Ins: insPair,
P1: p1,
P2: 0,
Data: data,
Le: 0,
})
}
// transmitEncrypted sends an encrypted message, and decrypts and returns the response.
func (s *SecureChannelSession) transmitEncrypted(cla, ins, p1, p2 byte, data []byte) (*responseAPDU, error) {
if s.iv == nil {
return nil, fmt.Errorf("channel not open")
}
data, err := s.encryptAPDU(data)
if err != nil {
return nil, err
}
meta := [16]byte{cla, ins, p1, p2, byte(len(data) + scBlockSize)}
if err = s.updateIV(meta[:], data); err != nil {
return nil, err
}
fulldata := make([]byte, len(s.iv)+len(data))
copy(fulldata, s.iv)
copy(fulldata[len(s.iv):], data)
response, err := transmit(s.card, &commandAPDU{
Cla: cla,
Ins: ins,
P1: p1,
P2: p2,
Data: fulldata,
})
if err != nil {
return nil, err
}
rmeta := [16]byte{byte(len(response.Data))}
rmac := response.Data[:len(s.iv)]
rdata := response.Data[len(s.iv):]
plainData, err := s.decryptAPDU(rdata)
if err != nil {
return nil, err
}
if err = s.updateIV(rmeta[:], rdata); err != nil {
return nil, err
}
if !bytes.Equal(s.iv, rmac) {
return nil, fmt.Errorf("invalid MAC in response")
}
rapdu := &responseAPDU{}
rapdu.deserialize(plainData)
if rapdu.Sw1 != sw1Ok {
return nil, fmt.Errorf("unexpected response status Cla=%#x, Ins=%#x, Sw=%#x%x", cla, ins, rapdu.Sw1, rapdu.Sw2)
}
return rapdu, nil
}
// encryptAPDU is an internal method that serializes and encrypts an APDU.
func (s *SecureChannelSession) encryptAPDU(data []byte) ([]byte, error) {
if len(data) > maxPayloadSize {
return nil, fmt.Errorf("payload of %d bytes exceeds maximum of %d", len(data), maxPayloadSize)
}
data = pad(data, 0x80)
ret := make([]byte, len(data))
a, err := aes.NewCipher(s.sessionEncKey)
if err != nil {
return nil, err
}
crypter := cipher.NewCBCEncrypter(a, s.iv)
crypter.CryptBlocks(ret, data)
return ret, nil
}
// pad applies message padding to a 16 byte boundary.
func pad(data []byte, terminator byte) []byte {
padded := make([]byte, (len(data)/16+1)*16)
copy(padded, data)
padded[len(data)] = terminator
return padded
}
// decryptAPDU is an internal method that decrypts and deserializes an APDU.
func (s *SecureChannelSession) decryptAPDU(data []byte) ([]byte, error) {
a, err := aes.NewCipher(s.sessionEncKey)
if err != nil {
return nil, err
}
ret := make([]byte, len(data))
crypter := cipher.NewCBCDecrypter(a, s.iv)
crypter.CryptBlocks(ret, data)
return unpad(ret, 0x80)
}
// unpad strips padding from a message.
func unpad(data []byte, terminator byte) ([]byte, error) {
for i := 1; i <= 16; i++ {
switch data[len(data)-i] {
case 0:
continue
case terminator:
return data[:len(data)-i], nil
default:
return nil, fmt.Errorf("expected end of padding, got %d", data[len(data)-i])
}
}
return nil, fmt.Errorf("expected end of padding, got 0")
}
// updateIV is an internal method that updates the initialization vector after
// each message exchanged.
func (s *SecureChannelSession) updateIV(meta, data []byte) error {
data = pad(data, 0)
a, err := aes.NewCipher(s.sessionMacKey)
if err != nil {
return err
}
crypter := cipher.NewCBCEncrypter(a, make([]byte, 16))
crypter.CryptBlocks(meta, meta)
crypter.CryptBlocks(data, data)
// The first 16 bytes of the last block is the MAC
s.iv = data[len(data)-32 : len(data)-16]
return nil
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,31 @@
// Copyright 2018 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package accounts
// AccountsByURL implements sort.Interface for []Account based on the URL field.
type AccountsByURL []Account
func (a AccountsByURL) Len() int { return len(a) }
func (a AccountsByURL) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
func (a AccountsByURL) Less(i, j int) bool { return a[i].URL.Cmp(a[j].URL) < 0 }
// WalletsByURL implements sort.Interface for []Wallet based on the URL field.
type WalletsByURL []Wallet
func (w WalletsByURL) Len() int { return len(w) }
func (w WalletsByURL) Swap(i, j int) { w[i], w[j] = w[j], w[i] }
func (w WalletsByURL) Less(i, j int) bool { return w[i].URL().Cmp(w[j].URL()) < 0 }

103
vendor/github.com/ethereum/go-ethereum/accounts/url.go generated vendored Normal file
View File

@@ -0,0 +1,103 @@
// Copyright 2017 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package accounts
import (
"encoding/json"
"errors"
"fmt"
"strings"
)
// URL represents the canonical identification URL of a wallet or account.
//
// It is a simplified version of url.URL, with the important limitations (which
// are considered features here) that it contains value-copyable components only,
// as well as that it doesn't do any URL encoding/decoding of special characters.
//
// The former is important to allow an account to be copied without leaving live
// references to the original version, whereas the latter is important to ensure
// one single canonical form opposed to many allowed ones by the RFC 3986 spec.
//
// As such, these URLs should not be used outside of the scope of an Ethereum
// wallet or account.
type URL struct {
Scheme string // Protocol scheme to identify a capable account backend
Path string // Path for the backend to identify a unique entity
}
// parseURL converts a user supplied URL into the accounts specific structure.
func parseURL(url string) (URL, error) {
parts := strings.Split(url, "://")
if len(parts) != 2 || parts[0] == "" {
return URL{}, errors.New("protocol scheme missing")
}
return URL{
Scheme: parts[0],
Path: parts[1],
}, nil
}
// String implements the stringer interface.
func (u URL) String() string {
if u.Scheme != "" {
return fmt.Sprintf("%s://%s", u.Scheme, u.Path)
}
return u.Path
}
// TerminalString implements the log.TerminalStringer interface.
func (u URL) TerminalString() string {
url := u.String()
if len(url) > 32 {
return url[:31] + ".."
}
return url
}
// MarshalJSON implements the json.Marshaller interface.
func (u URL) MarshalJSON() ([]byte, error) {
return json.Marshal(u.String())
}
// UnmarshalJSON parses url.
func (u *URL) UnmarshalJSON(input []byte) error {
var textURL string
err := json.Unmarshal(input, &textURL)
if err != nil {
return err
}
url, err := parseURL(textURL)
if err != nil {
return err
}
u.Scheme = url.Scheme
u.Path = url.Path
return nil
}
// Cmp compares x and y and returns:
//
// -1 if x < y
// 0 if x == y
// +1 if x > y
func (u URL) Cmp(url URL) int {
if u.Scheme == url.Scheme {
return strings.Compare(u.Path, url.Path)
}
return strings.Compare(u.Scheme, url.Scheme)
}

57
vendor/github.com/ethereum/go-ethereum/appveyor.yml generated vendored Normal file
View File

@@ -0,0 +1,57 @@
clone_depth: 5
version: "{branch}.{build}"
image:
- Ubuntu
- Visual Studio 2019
environment:
matrix:
- GETH_ARCH: amd64
GETH_MINGW: 'C:\msys64\mingw64'
- GETH_ARCH: 386
GETH_MINGW: 'C:\msys64\mingw32'
install:
- git submodule update --init --depth 1 --recursive
- go version
for:
# Linux has its own script without -arch and -cc.
# The linux builder also runs lint.
- matrix:
only:
- image: Ubuntu
build_script:
- go run build/ci.go lint
- go run build/ci.go install -dlgo
test_script:
- go run build/ci.go test -dlgo -coverage
# linux/386 is disabled.
- matrix:
exclude:
- image: Ubuntu
GETH_ARCH: 386
# Windows builds for amd64 + 386.
- matrix:
only:
- image: Visual Studio 2019
environment:
# We use gcc from MSYS2 because it is the most recent compiler version available on
# AppVeyor. Note: gcc.exe only works properly if the corresponding bin/ directory is
# contained in PATH.
GETH_CC: '%GETH_MINGW%\bin\gcc.exe'
PATH: '%GETH_MINGW%\bin;C:\Program Files (x86)\NSIS\;%PATH%'
build_script:
- 'echo %GETH_ARCH%'
- 'echo %GETH_CC%'
- '%GETH_CC% --version'
- go run build/ci.go install -dlgo -arch %GETH_ARCH% -cc %GETH_CC%
after_build:
# Upload builds. Note that ci.go makes this a no-op PR builds.
- go run build/ci.go archive -arch %GETH_ARCH% -type zip -signer WINDOWS_SIGNING_KEY -upload gethstore/builds
- go run build/ci.go nsis -arch %GETH_ARCH% -signer WINDOWS_SIGNING_KEY -upload gethstore/builds
test_script:
- go run build/ci.go test -dlgo -arch %GETH_ARCH% -cc %GETH_CC% -coverage

32
vendor/github.com/ethereum/go-ethereum/circle.yml generated vendored Normal file
View File

@@ -0,0 +1,32 @@
machine:
services:
- docker
dependencies:
cache_directories:
- "~/.ethash" # Cache the ethash DAG generated by hive for consecutive builds
- "~/.docker" # Cache all docker images manually to avoid lengthy rebuilds
override:
# Restore all previously cached docker images
- mkdir -p ~/.docker
- for img in `ls ~/.docker`; do docker load -i ~/.docker/$img; done
# Pull in and hive, restore cached ethash DAGs and do a dry run
- go get -u github.com/karalabe/hive
- (cd ~/.go_workspace/src/github.com/karalabe/hive && mkdir -p workspace/ethash/ ~/.ethash)
- (cd ~/.go_workspace/src/github.com/karalabe/hive && cp -r ~/.ethash/. workspace/ethash/)
- (cd ~/.go_workspace/src/github.com/karalabe/hive && hive --docker-noshell --client=NONE --test=. --sim=. --loglevel=6)
# Cache all the docker images and the ethash DAGs
- for img in `docker images | grep -v "^<none>" | tail -n +2 | awk '{print $1}'`; do docker save $img > ~/.docker/`echo $img | tr '/' ':'`.tar; done
- cp -r ~/.go_workspace/src/github.com/karalabe/hive/workspace/ethash/. ~/.ethash
test:
override:
# Build Geth and move into a known folder
- make geth
- cp ./build/bin/geth $HOME/geth
# Run hive and move all generated logs into the public artifacts folder
- (cd ~/.go_workspace/src/github.com/karalabe/hive && hive --docker-noshell --client=go-ethereum:local --override=$HOME/geth --test=. --sim=.)
- cp -r ~/.go_workspace/src/github.com/karalabe/hive/workspace/logs/* $CIRCLE_ARTIFACTS

30
vendor/github.com/ethereum/go-ethereum/common/big.go generated vendored Normal file
View File

@@ -0,0 +1,30 @@
// Copyright 2014 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package common
import "math/big"
// Common big integers often used
var (
Big1 = big.NewInt(1)
Big2 = big.NewInt(2)
Big3 = big.NewInt(3)
Big0 = big.NewInt(0)
Big32 = big.NewInt(32)
Big256 = big.NewInt(256)
Big257 = big.NewInt(257)
)

View File

@@ -0,0 +1,188 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Adapted from: https://golang.org/src/crypto/cipher/xor.go
// Package bitutil implements fast bitwise operations.
package bitutil
import (
"runtime"
"unsafe"
)
const wordSize = int(unsafe.Sizeof(uintptr(0)))
const supportsUnaligned = runtime.GOARCH == "386" || runtime.GOARCH == "amd64" || runtime.GOARCH == "ppc64" || runtime.GOARCH == "ppc64le" || runtime.GOARCH == "s390x"
// XORBytes xors the bytes in a and b. The destination is assumed to have enough
// space. Returns the number of bytes xor'd.
func XORBytes(dst, a, b []byte) int {
if supportsUnaligned {
return fastXORBytes(dst, a, b)
}
return safeXORBytes(dst, a, b)
}
// fastXORBytes xors in bulk. It only works on architectures that support
// unaligned read/writes.
func fastXORBytes(dst, a, b []byte) int {
n := len(a)
if len(b) < n {
n = len(b)
}
w := n / wordSize
if w > 0 {
dw := *(*[]uintptr)(unsafe.Pointer(&dst))
aw := *(*[]uintptr)(unsafe.Pointer(&a))
bw := *(*[]uintptr)(unsafe.Pointer(&b))
for i := 0; i < w; i++ {
dw[i] = aw[i] ^ bw[i]
}
}
for i := n - n%wordSize; i < n; i++ {
dst[i] = a[i] ^ b[i]
}
return n
}
// safeXORBytes xors one by one. It works on all architectures, independent if
// it supports unaligned read/writes or not.
func safeXORBytes(dst, a, b []byte) int {
n := len(a)
if len(b) < n {
n = len(b)
}
for i := 0; i < n; i++ {
dst[i] = a[i] ^ b[i]
}
return n
}
// ANDBytes ands the bytes in a and b. The destination is assumed to have enough
// space. Returns the number of bytes and'd.
func ANDBytes(dst, a, b []byte) int {
if supportsUnaligned {
return fastANDBytes(dst, a, b)
}
return safeANDBytes(dst, a, b)
}
// fastANDBytes ands in bulk. It only works on architectures that support
// unaligned read/writes.
func fastANDBytes(dst, a, b []byte) int {
n := len(a)
if len(b) < n {
n = len(b)
}
w := n / wordSize
if w > 0 {
dw := *(*[]uintptr)(unsafe.Pointer(&dst))
aw := *(*[]uintptr)(unsafe.Pointer(&a))
bw := *(*[]uintptr)(unsafe.Pointer(&b))
for i := 0; i < w; i++ {
dw[i] = aw[i] & bw[i]
}
}
for i := n - n%wordSize; i < n; i++ {
dst[i] = a[i] & b[i]
}
return n
}
// safeANDBytes ands one by one. It works on all architectures, independent if
// it supports unaligned read/writes or not.
func safeANDBytes(dst, a, b []byte) int {
n := len(a)
if len(b) < n {
n = len(b)
}
for i := 0; i < n; i++ {
dst[i] = a[i] & b[i]
}
return n
}
// ORBytes ors the bytes in a and b. The destination is assumed to have enough
// space. Returns the number of bytes or'd.
func ORBytes(dst, a, b []byte) int {
if supportsUnaligned {
return fastORBytes(dst, a, b)
}
return safeORBytes(dst, a, b)
}
// fastORBytes ors in bulk. It only works on architectures that support
// unaligned read/writes.
func fastORBytes(dst, a, b []byte) int {
n := len(a)
if len(b) < n {
n = len(b)
}
w := n / wordSize
if w > 0 {
dw := *(*[]uintptr)(unsafe.Pointer(&dst))
aw := *(*[]uintptr)(unsafe.Pointer(&a))
bw := *(*[]uintptr)(unsafe.Pointer(&b))
for i := 0; i < w; i++ {
dw[i] = aw[i] | bw[i]
}
}
for i := n - n%wordSize; i < n; i++ {
dst[i] = a[i] | b[i]
}
return n
}
// safeORBytes ors one by one. It works on all architectures, independent if
// it supports unaligned read/writes or not.
func safeORBytes(dst, a, b []byte) int {
n := len(a)
if len(b) < n {
n = len(b)
}
for i := 0; i < n; i++ {
dst[i] = a[i] | b[i]
}
return n
}
// TestBytes tests whether any bit is set in the input byte slice.
func TestBytes(p []byte) bool {
if supportsUnaligned {
return fastTestBytes(p)
}
return safeTestBytes(p)
}
// fastTestBytes tests for set bits in bulk. It only works on architectures that
// support unaligned read/writes.
func fastTestBytes(p []byte) bool {
n := len(p)
w := n / wordSize
if w > 0 {
pw := *(*[]uintptr)(unsafe.Pointer(&p))
for i := 0; i < w; i++ {
if pw[i] != 0 {
return true
}
}
}
for i := n - n%wordSize; i < n; i++ {
if p[i] != 0 {
return true
}
}
return false
}
// safeTestBytes tests for set bits one byte at a time. It works on all
// architectures, independent if it supports unaligned read/writes or not.
func safeTestBytes(p []byte) bool {
for i := 0; i < len(p); i++ {
if p[i] != 0 {
return true
}
}
return false
}

View File

@@ -0,0 +1,170 @@
// Copyright 2017 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package bitutil
import "errors"
var (
// errMissingData is returned from decompression if the byte referenced by
// the bitset header overflows the input data.
errMissingData = errors.New("missing bytes on input")
// errUnreferencedData is returned from decompression if not all bytes were used
// up from the input data after decompressing it.
errUnreferencedData = errors.New("extra bytes on input")
// errExceededTarget is returned from decompression if the bitset header has
// more bits defined than the number of target buffer space available.
errExceededTarget = errors.New("target data size exceeded")
// errZeroContent is returned from decompression if a data byte referenced in
// the bitset header is actually a zero byte.
errZeroContent = errors.New("zero byte in input content")
)
// The compression algorithm implemented by CompressBytes and DecompressBytes is
// optimized for sparse input data which contains a lot of zero bytes. Decompression
// requires knowledge of the decompressed data length.
//
// Compression works as follows:
//
// if data only contains zeroes,
// CompressBytes(data) == nil
// otherwise if len(data) <= 1,
// CompressBytes(data) == data
// otherwise:
// CompressBytes(data) == append(CompressBytes(nonZeroBitset(data)), nonZeroBytes(data)...)
// where
// nonZeroBitset(data) is a bit vector with len(data) bits (MSB first):
// nonZeroBitset(data)[i/8] && (1 << (7-i%8)) != 0 if data[i] != 0
// len(nonZeroBitset(data)) == (len(data)+7)/8
// nonZeroBytes(data) contains the non-zero bytes of data in the same order
// CompressBytes compresses the input byte slice according to the sparse bitset
// representation algorithm. If the result is bigger than the original input, no
// compression is done.
func CompressBytes(data []byte) []byte {
if out := bitsetEncodeBytes(data); len(out) < len(data) {
return out
}
cpy := make([]byte, len(data))
copy(cpy, data)
return cpy
}
// bitsetEncodeBytes compresses the input byte slice according to the sparse
// bitset representation algorithm.
func bitsetEncodeBytes(data []byte) []byte {
// Empty slices get compressed to nil
if len(data) == 0 {
return nil
}
// One byte slices compress to nil or retain the single byte
if len(data) == 1 {
if data[0] == 0 {
return nil
}
return data
}
// Calculate the bitset of set bytes, and gather the non-zero bytes
nonZeroBitset := make([]byte, (len(data)+7)/8)
nonZeroBytes := make([]byte, 0, len(data))
for i, b := range data {
if b != 0 {
nonZeroBytes = append(nonZeroBytes, b)
nonZeroBitset[i/8] |= 1 << byte(7-i%8)
}
}
if len(nonZeroBytes) == 0 {
return nil
}
return append(bitsetEncodeBytes(nonZeroBitset), nonZeroBytes...)
}
// DecompressBytes decompresses data with a known target size. If the input data
// matches the size of the target, it means no compression was done in the first
// place.
func DecompressBytes(data []byte, target int) ([]byte, error) {
if len(data) > target {
return nil, errExceededTarget
}
if len(data) == target {
cpy := make([]byte, len(data))
copy(cpy, data)
return cpy, nil
}
return bitsetDecodeBytes(data, target)
}
// bitsetDecodeBytes decompresses data with a known target size.
func bitsetDecodeBytes(data []byte, target int) ([]byte, error) {
out, size, err := bitsetDecodePartialBytes(data, target)
if err != nil {
return nil, err
}
if size != len(data) {
return nil, errUnreferencedData
}
return out, nil
}
// bitsetDecodePartialBytes decompresses data with a known target size, but does
// not enforce consuming all the input bytes. In addition to the decompressed
// output, the function returns the length of compressed input data corresponding
// to the output as the input slice may be longer.
func bitsetDecodePartialBytes(data []byte, target int) ([]byte, int, error) {
// Sanity check 0 targets to avoid infinite recursion
if target == 0 {
return nil, 0, nil
}
// Handle the zero and single byte corner cases
decomp := make([]byte, target)
if len(data) == 0 {
return decomp, 0, nil
}
if target == 1 {
decomp[0] = data[0] // copy to avoid referencing the input slice
if data[0] != 0 {
return decomp, 1, nil
}
return decomp, 0, nil
}
// Decompress the bitset of set bytes and distribute the non zero bytes
nonZeroBitset, ptr, err := bitsetDecodePartialBytes(data, (target+7)/8)
if err != nil {
return nil, ptr, err
}
for i := 0; i < 8*len(nonZeroBitset); i++ {
if nonZeroBitset[i/8]&(1<<byte(7-i%8)) != 0 {
// Make sure we have enough data to push into the correct slot
if ptr >= len(data) {
return nil, 0, errMissingData
}
if i >= len(decomp) {
return nil, 0, errExceededTarget
}
// Make sure the data is valid and push into the slot
if data[ptr] == 0 {
return nil, 0, errZeroContent
}
decomp[i] = data[ptr]
ptr++
}
}
return decomp, ptr, nil
}

151
vendor/github.com/ethereum/go-ethereum/common/bytes.go generated vendored Normal file
View File

@@ -0,0 +1,151 @@
// Copyright 2014 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
// Package common contains various helper functions.
package common
import (
"encoding/hex"
"errors"
"github.com/ethereum/go-ethereum/common/hexutil"
)
// FromHex returns the bytes represented by the hexadecimal string s.
// s may be prefixed with "0x".
func FromHex(s string) []byte {
if has0xPrefix(s) {
s = s[2:]
}
if len(s)%2 == 1 {
s = "0" + s
}
return Hex2Bytes(s)
}
// CopyBytes returns an exact copy of the provided bytes.
func CopyBytes(b []byte) (copiedBytes []byte) {
if b == nil {
return nil
}
copiedBytes = make([]byte, len(b))
copy(copiedBytes, b)
return
}
// has0xPrefix validates str begins with '0x' or '0X'.
func has0xPrefix(str string) bool {
return len(str) >= 2 && str[0] == '0' && (str[1] == 'x' || str[1] == 'X')
}
// isHexCharacter returns bool of c being a valid hexadecimal.
func isHexCharacter(c byte) bool {
return ('0' <= c && c <= '9') || ('a' <= c && c <= 'f') || ('A' <= c && c <= 'F')
}
// isHex validates whether each byte is valid hexadecimal string.
func isHex(str string) bool {
if len(str)%2 != 0 {
return false
}
for _, c := range []byte(str) {
if !isHexCharacter(c) {
return false
}
}
return true
}
// Bytes2Hex returns the hexadecimal encoding of d.
func Bytes2Hex(d []byte) string {
return hex.EncodeToString(d)
}
// Hex2Bytes returns the bytes represented by the hexadecimal string str.
func Hex2Bytes(str string) []byte {
h, _ := hex.DecodeString(str)
return h
}
// Hex2BytesFixed returns bytes of a specified fixed length flen.
func Hex2BytesFixed(str string, flen int) []byte {
h, _ := hex.DecodeString(str)
if len(h) == flen {
return h
}
if len(h) > flen {
return h[len(h)-flen:]
}
hh := make([]byte, flen)
copy(hh[flen-len(h):flen], h)
return hh
}
// ParseHexOrString tries to hexdecode b, but if the prefix is missing, it instead just returns the raw bytes
func ParseHexOrString(str string) ([]byte, error) {
b, err := hexutil.Decode(str)
if errors.Is(err, hexutil.ErrMissingPrefix) {
return []byte(str), nil
}
return b, err
}
// RightPadBytes zero-pads slice to the right up to length l.
func RightPadBytes(slice []byte, l int) []byte {
if l <= len(slice) {
return slice
}
padded := make([]byte, l)
copy(padded, slice)
return padded
}
// LeftPadBytes zero-pads slice to the left up to length l.
func LeftPadBytes(slice []byte, l int) []byte {
if l <= len(slice) {
return slice
}
padded := make([]byte, l)
copy(padded[l-len(slice):], slice)
return padded
}
// TrimLeftZeroes returns a subslice of s without leading zeroes
func TrimLeftZeroes(s []byte) []byte {
idx := 0
for ; idx < len(s); idx++ {
if s[idx] != 0 {
break
}
}
return s[idx:]
}
// TrimRightZeroes returns a subslice of s without trailing zeroes
func TrimRightZeroes(s []byte) []byte {
idx := len(s)
for ; idx > 0; idx-- {
if s[idx-1] != 0 {
break
}
}
return s[:idx]
}

52
vendor/github.com/ethereum/go-ethereum/common/debug.go generated vendored Normal file
View File

@@ -0,0 +1,52 @@
// Copyright 2015 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package common
import (
"fmt"
"os"
"runtime"
"runtime/debug"
"strings"
)
// Report gives off a warning requesting the user to submit an issue to the github tracker.
func Report(extra ...interface{}) {
fmt.Fprintln(os.Stderr, "You've encountered a sought after, hard to reproduce bug. Please report this to the developers <3 https://github.com/ethereum/go-ethereum/issues")
fmt.Fprintln(os.Stderr, extra...)
_, file, line, _ := runtime.Caller(1)
fmt.Fprintf(os.Stderr, "%v:%v\n", file, line)
debug.PrintStack()
fmt.Fprintln(os.Stderr, "#### BUG! PLEASE REPORT ####")
}
// PrintDeprecationWarning prints the given string in a box using fmt.Println.
func PrintDeprecationWarning(str string) {
line := strings.Repeat("#", len(str)+4)
emptyLine := strings.Repeat(" ", len(str))
fmt.Printf(`
%s
# %s #
# %s #
# %s #
%s
`, line, emptyLine, str, emptyLine, line)
}

View File

@@ -0,0 +1,82 @@
// Copyright 2016 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package common
import (
"fmt"
"regexp"
"strings"
"time"
)
// PrettyDuration is a pretty printed version of a time.Duration value that cuts
// the unnecessary precision off from the formatted textual representation.
type PrettyDuration time.Duration
var prettyDurationRe = regexp.MustCompile(`\.[0-9]{4,}`)
// String implements the Stringer interface, allowing pretty printing of duration
// values rounded to three decimals.
func (d PrettyDuration) String() string {
label := time.Duration(d).String()
if match := prettyDurationRe.FindString(label); len(match) > 4 {
label = strings.Replace(label, match, match[:4], 1)
}
return label
}
// PrettyAge is a pretty printed version of a time.Duration value that rounds
// the values up to a single most significant unit, days/weeks/years included.
type PrettyAge time.Time
// ageUnits is a list of units the age pretty printing uses.
var ageUnits = []struct {
Size time.Duration
Symbol string
}{
{12 * 30 * 24 * time.Hour, "y"},
{30 * 24 * time.Hour, "mo"},
{7 * 24 * time.Hour, "w"},
{24 * time.Hour, "d"},
{time.Hour, "h"},
{time.Minute, "m"},
{time.Second, "s"},
}
// String implements the Stringer interface, allowing pretty printing of duration
// values rounded to the most significant time unit.
func (t PrettyAge) String() string {
// Calculate the time difference and handle the 0 cornercase
diff := time.Since(time.Time(t))
if diff < time.Second {
return "0"
}
// Accumulate a precision of 3 components before returning
result, prec := "", 0
for _, unit := range ageUnits {
if diff > unit.Size {
result = fmt.Sprintf("%s%d%s", result, diff/unit.Size, unit.Symbol)
diff %= unit.Size
if prec += 1; prec >= 3 {
break
}
}
}
return result
}

View File

@@ -0,0 +1,241 @@
// Copyright 2016 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
/*
Package hexutil implements hex encoding with 0x prefix.
This encoding is used by the Ethereum RPC API to transport binary data in JSON payloads.
# Encoding Rules
All hex data must have prefix "0x".
For byte slices, the hex data must be of even length. An empty byte slice
encodes as "0x".
Integers are encoded using the least amount of digits (no leading zero digits). Their
encoding may be of uneven length. The number zero encodes as "0x0".
*/
package hexutil
import (
"encoding/hex"
"fmt"
"math/big"
"strconv"
)
const uintBits = 32 << (uint64(^uint(0)) >> 63)
// Errors
var (
ErrEmptyString = &decError{"empty hex string"}
ErrSyntax = &decError{"invalid hex string"}
ErrMissingPrefix = &decError{"hex string without 0x prefix"}
ErrOddLength = &decError{"hex string of odd length"}
ErrEmptyNumber = &decError{"hex string \"0x\""}
ErrLeadingZero = &decError{"hex number with leading zero digits"}
ErrUint64Range = &decError{"hex number > 64 bits"}
ErrUintRange = &decError{fmt.Sprintf("hex number > %d bits", uintBits)}
ErrBig256Range = &decError{"hex number > 256 bits"}
)
type decError struct{ msg string }
func (err decError) Error() string { return err.msg }
// Decode decodes a hex string with 0x prefix.
func Decode(input string) ([]byte, error) {
if len(input) == 0 {
return nil, ErrEmptyString
}
if !has0xPrefix(input) {
return nil, ErrMissingPrefix
}
b, err := hex.DecodeString(input[2:])
if err != nil {
err = mapError(err)
}
return b, err
}
// MustDecode decodes a hex string with 0x prefix. It panics for invalid input.
func MustDecode(input string) []byte {
dec, err := Decode(input)
if err != nil {
panic(err)
}
return dec
}
// Encode encodes b as a hex string with 0x prefix.
func Encode(b []byte) string {
enc := make([]byte, len(b)*2+2)
copy(enc, "0x")
hex.Encode(enc[2:], b)
return string(enc)
}
// DecodeUint64 decodes a hex string with 0x prefix as a quantity.
func DecodeUint64(input string) (uint64, error) {
raw, err := checkNumber(input)
if err != nil {
return 0, err
}
dec, err := strconv.ParseUint(raw, 16, 64)
if err != nil {
err = mapError(err)
}
return dec, err
}
// MustDecodeUint64 decodes a hex string with 0x prefix as a quantity.
// It panics for invalid input.
func MustDecodeUint64(input string) uint64 {
dec, err := DecodeUint64(input)
if err != nil {
panic(err)
}
return dec
}
// EncodeUint64 encodes i as a hex string with 0x prefix.
func EncodeUint64(i uint64) string {
enc := make([]byte, 2, 10)
copy(enc, "0x")
return string(strconv.AppendUint(enc, i, 16))
}
var bigWordNibbles int
func init() {
// This is a weird way to compute the number of nibbles required for big.Word.
// The usual way would be to use constant arithmetic but go vet can't handle that.
b, _ := new(big.Int).SetString("FFFFFFFFFF", 16)
switch len(b.Bits()) {
case 1:
bigWordNibbles = 16
case 2:
bigWordNibbles = 8
default:
panic("weird big.Word size")
}
}
// DecodeBig decodes a hex string with 0x prefix as a quantity.
// Numbers larger than 256 bits are not accepted.
func DecodeBig(input string) (*big.Int, error) {
raw, err := checkNumber(input)
if err != nil {
return nil, err
}
if len(raw) > 64 {
return nil, ErrBig256Range
}
words := make([]big.Word, len(raw)/bigWordNibbles+1)
end := len(raw)
for i := range words {
start := end - bigWordNibbles
if start < 0 {
start = 0
}
for ri := start; ri < end; ri++ {
nib := decodeNibble(raw[ri])
if nib == badNibble {
return nil, ErrSyntax
}
words[i] *= 16
words[i] += big.Word(nib)
}
end = start
}
dec := new(big.Int).SetBits(words)
return dec, nil
}
// MustDecodeBig decodes a hex string with 0x prefix as a quantity.
// It panics for invalid input.
func MustDecodeBig(input string) *big.Int {
dec, err := DecodeBig(input)
if err != nil {
panic(err)
}
return dec
}
// EncodeBig encodes bigint as a hex string with 0x prefix.
func EncodeBig(bigint *big.Int) string {
if sign := bigint.Sign(); sign == 0 {
return "0x0"
} else if sign > 0 {
return "0x" + bigint.Text(16)
} else {
return "-0x" + bigint.Text(16)[1:]
}
}
func has0xPrefix(input string) bool {
return len(input) >= 2 && input[0] == '0' && (input[1] == 'x' || input[1] == 'X')
}
func checkNumber(input string) (raw string, err error) {
if len(input) == 0 {
return "", ErrEmptyString
}
if !has0xPrefix(input) {
return "", ErrMissingPrefix
}
input = input[2:]
if len(input) == 0 {
return "", ErrEmptyNumber
}
if len(input) > 1 && input[0] == '0' {
return "", ErrLeadingZero
}
return input, nil
}
const badNibble = ^uint64(0)
func decodeNibble(in byte) uint64 {
switch {
case in >= '0' && in <= '9':
return uint64(in - '0')
case in >= 'A' && in <= 'F':
return uint64(in - 'A' + 10)
case in >= 'a' && in <= 'f':
return uint64(in - 'a' + 10)
default:
return badNibble
}
}
func mapError(err error) error {
if err, ok := err.(*strconv.NumError); ok {
switch err.Err {
case strconv.ErrRange:
return ErrUint64Range
case strconv.ErrSyntax:
return ErrSyntax
}
}
if _, ok := err.(hex.InvalidByteError); ok {
return ErrSyntax
}
if err == hex.ErrLength {
return ErrOddLength
}
return err
}

View File

@@ -0,0 +1,376 @@
// Copyright 2016 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package hexutil
import (
"encoding/hex"
"encoding/json"
"fmt"
"math/big"
"reflect"
"strconv"
)
var (
bytesT = reflect.TypeOf(Bytes(nil))
bigT = reflect.TypeOf((*Big)(nil))
uintT = reflect.TypeOf(Uint(0))
uint64T = reflect.TypeOf(Uint64(0))
)
// Bytes marshals/unmarshals as a JSON string with 0x prefix.
// The empty slice marshals as "0x".
type Bytes []byte
// MarshalText implements encoding.TextMarshaler
func (b Bytes) MarshalText() ([]byte, error) {
result := make([]byte, len(b)*2+2)
copy(result, `0x`)
hex.Encode(result[2:], b)
return result, nil
}
// UnmarshalJSON implements json.Unmarshaler.
func (b *Bytes) UnmarshalJSON(input []byte) error {
if !isString(input) {
return errNonString(bytesT)
}
return wrapTypeError(b.UnmarshalText(input[1:len(input)-1]), bytesT)
}
// UnmarshalText implements encoding.TextUnmarshaler.
func (b *Bytes) UnmarshalText(input []byte) error {
raw, err := checkText(input, true)
if err != nil {
return err
}
dec := make([]byte, len(raw)/2)
if _, err = hex.Decode(dec, raw); err != nil {
err = mapError(err)
} else {
*b = dec
}
return err
}
// String returns the hex encoding of b.
func (b Bytes) String() string {
return Encode(b)
}
// ImplementsGraphQLType returns true if Bytes implements the specified GraphQL type.
func (b Bytes) ImplementsGraphQLType(name string) bool { return name == "Bytes" }
// UnmarshalGraphQL unmarshals the provided GraphQL query data.
func (b *Bytes) UnmarshalGraphQL(input interface{}) error {
var err error
switch input := input.(type) {
case string:
data, err := Decode(input)
if err != nil {
return err
}
*b = data
default:
err = fmt.Errorf("unexpected type %T for Bytes", input)
}
return err
}
// UnmarshalFixedJSON decodes the input as a string with 0x prefix. The length of out
// determines the required input length. This function is commonly used to implement the
// UnmarshalJSON method for fixed-size types.
func UnmarshalFixedJSON(typ reflect.Type, input, out []byte) error {
if !isString(input) {
return errNonString(typ)
}
return wrapTypeError(UnmarshalFixedText(typ.String(), input[1:len(input)-1], out), typ)
}
// UnmarshalFixedText decodes the input as a string with 0x prefix. The length of out
// determines the required input length. This function is commonly used to implement the
// UnmarshalText method for fixed-size types.
func UnmarshalFixedText(typname string, input, out []byte) error {
raw, err := checkText(input, true)
if err != nil {
return err
}
if len(raw)/2 != len(out) {
return fmt.Errorf("hex string has length %d, want %d for %s", len(raw), len(out)*2, typname)
}
// Pre-verify syntax before modifying out.
for _, b := range raw {
if decodeNibble(b) == badNibble {
return ErrSyntax
}
}
hex.Decode(out, raw)
return nil
}
// UnmarshalFixedUnprefixedText decodes the input as a string with optional 0x prefix. The
// length of out determines the required input length. This function is commonly used to
// implement the UnmarshalText method for fixed-size types.
func UnmarshalFixedUnprefixedText(typname string, input, out []byte) error {
raw, err := checkText(input, false)
if err != nil {
return err
}
if len(raw)/2 != len(out) {
return fmt.Errorf("hex string has length %d, want %d for %s", len(raw), len(out)*2, typname)
}
// Pre-verify syntax before modifying out.
for _, b := range raw {
if decodeNibble(b) == badNibble {
return ErrSyntax
}
}
hex.Decode(out, raw)
return nil
}
// Big marshals/unmarshals as a JSON string with 0x prefix.
// The zero value marshals as "0x0".
//
// Negative integers are not supported at this time. Attempting to marshal them will
// return an error. Values larger than 256bits are rejected by Unmarshal but will be
// marshaled without error.
type Big big.Int
// MarshalText implements encoding.TextMarshaler
func (b Big) MarshalText() ([]byte, error) {
return []byte(EncodeBig((*big.Int)(&b))), nil
}
// UnmarshalJSON implements json.Unmarshaler.
func (b *Big) UnmarshalJSON(input []byte) error {
if !isString(input) {
return errNonString(bigT)
}
return wrapTypeError(b.UnmarshalText(input[1:len(input)-1]), bigT)
}
// UnmarshalText implements encoding.TextUnmarshaler
func (b *Big) UnmarshalText(input []byte) error {
raw, err := checkNumberText(input)
if err != nil {
return err
}
if len(raw) > 64 {
return ErrBig256Range
}
words := make([]big.Word, len(raw)/bigWordNibbles+1)
end := len(raw)
for i := range words {
start := end - bigWordNibbles
if start < 0 {
start = 0
}
for ri := start; ri < end; ri++ {
nib := decodeNibble(raw[ri])
if nib == badNibble {
return ErrSyntax
}
words[i] *= 16
words[i] += big.Word(nib)
}
end = start
}
var dec big.Int
dec.SetBits(words)
*b = (Big)(dec)
return nil
}
// ToInt converts b to a big.Int.
func (b *Big) ToInt() *big.Int {
return (*big.Int)(b)
}
// String returns the hex encoding of b.
func (b *Big) String() string {
return EncodeBig(b.ToInt())
}
// ImplementsGraphQLType returns true if Big implements the provided GraphQL type.
func (b Big) ImplementsGraphQLType(name string) bool { return name == "BigInt" }
// UnmarshalGraphQL unmarshals the provided GraphQL query data.
func (b *Big) UnmarshalGraphQL(input interface{}) error {
var err error
switch input := input.(type) {
case string:
return b.UnmarshalText([]byte(input))
case int32:
var num big.Int
num.SetInt64(int64(input))
*b = Big(num)
default:
err = fmt.Errorf("unexpected type %T for BigInt", input)
}
return err
}
// Uint64 marshals/unmarshals as a JSON string with 0x prefix.
// The zero value marshals as "0x0".
type Uint64 uint64
// MarshalText implements encoding.TextMarshaler.
func (b Uint64) MarshalText() ([]byte, error) {
buf := make([]byte, 2, 10)
copy(buf, `0x`)
buf = strconv.AppendUint(buf, uint64(b), 16)
return buf, nil
}
// UnmarshalJSON implements json.Unmarshaler.
func (b *Uint64) UnmarshalJSON(input []byte) error {
if !isString(input) {
return errNonString(uint64T)
}
return wrapTypeError(b.UnmarshalText(input[1:len(input)-1]), uint64T)
}
// UnmarshalText implements encoding.TextUnmarshaler
func (b *Uint64) UnmarshalText(input []byte) error {
raw, err := checkNumberText(input)
if err != nil {
return err
}
if len(raw) > 16 {
return ErrUint64Range
}
var dec uint64
for _, byte := range raw {
nib := decodeNibble(byte)
if nib == badNibble {
return ErrSyntax
}
dec *= 16
dec += nib
}
*b = Uint64(dec)
return nil
}
// String returns the hex encoding of b.
func (b Uint64) String() string {
return EncodeUint64(uint64(b))
}
// ImplementsGraphQLType returns true if Uint64 implements the provided GraphQL type.
func (b Uint64) ImplementsGraphQLType(name string) bool { return name == "Long" }
// UnmarshalGraphQL unmarshals the provided GraphQL query data.
func (b *Uint64) UnmarshalGraphQL(input interface{}) error {
var err error
switch input := input.(type) {
case string:
return b.UnmarshalText([]byte(input))
case int32:
*b = Uint64(input)
default:
err = fmt.Errorf("unexpected type %T for Long", input)
}
return err
}
// Uint marshals/unmarshals as a JSON string with 0x prefix.
// The zero value marshals as "0x0".
type Uint uint
// MarshalText implements encoding.TextMarshaler.
func (b Uint) MarshalText() ([]byte, error) {
return Uint64(b).MarshalText()
}
// UnmarshalJSON implements json.Unmarshaler.
func (b *Uint) UnmarshalJSON(input []byte) error {
if !isString(input) {
return errNonString(uintT)
}
return wrapTypeError(b.UnmarshalText(input[1:len(input)-1]), uintT)
}
// UnmarshalText implements encoding.TextUnmarshaler.
func (b *Uint) UnmarshalText(input []byte) error {
var u64 Uint64
err := u64.UnmarshalText(input)
if u64 > Uint64(^uint(0)) || err == ErrUint64Range {
return ErrUintRange
} else if err != nil {
return err
}
*b = Uint(u64)
return nil
}
// String returns the hex encoding of b.
func (b Uint) String() string {
return EncodeUint64(uint64(b))
}
func isString(input []byte) bool {
return len(input) >= 2 && input[0] == '"' && input[len(input)-1] == '"'
}
func bytesHave0xPrefix(input []byte) bool {
return len(input) >= 2 && input[0] == '0' && (input[1] == 'x' || input[1] == 'X')
}
func checkText(input []byte, wantPrefix bool) ([]byte, error) {
if len(input) == 0 {
return nil, nil // empty strings are allowed
}
if bytesHave0xPrefix(input) {
input = input[2:]
} else if wantPrefix {
return nil, ErrMissingPrefix
}
if len(input)%2 != 0 {
return nil, ErrOddLength
}
return input, nil
}
func checkNumberText(input []byte) (raw []byte, err error) {
if len(input) == 0 {
return nil, nil // empty strings are allowed
}
if !bytesHave0xPrefix(input) {
return nil, ErrMissingPrefix
}
input = input[2:]
if len(input) == 0 {
return nil, ErrEmptyNumber
}
if len(input) > 1 && input[0] == '0' {
return nil, ErrLeadingZero
}
return input, nil
}
func wrapTypeError(err error, typ reflect.Type) error {
if _, ok := err.(*decError); ok {
return &json.UnmarshalTypeError{Value: err.Error(), Type: typ}
}
return err
}
func errNonString(typ reflect.Type) error {
return &json.UnmarshalTypeError{Value: "non-string", Type: typ}
}

View File

@@ -0,0 +1,280 @@
// Copyright 2017 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
// Package math provides integer math utilities.
package math
import (
"encoding/json"
"fmt"
"math/big"
)
// Various big integer limit values.
var (
tt255 = BigPow(2, 255)
tt256 = BigPow(2, 256)
tt256m1 = new(big.Int).Sub(tt256, big.NewInt(1))
tt63 = BigPow(2, 63)
MaxBig256 = new(big.Int).Set(tt256m1)
MaxBig63 = new(big.Int).Sub(tt63, big.NewInt(1))
)
const (
// number of bits in a big.Word
wordBits = 32 << (uint64(^big.Word(0)) >> 63)
// number of bytes in a big.Word
wordBytes = wordBits / 8
)
// HexOrDecimal256 marshals big.Int as hex or decimal.
type HexOrDecimal256 big.Int
// NewHexOrDecimal256 creates a new HexOrDecimal256
func NewHexOrDecimal256(x int64) *HexOrDecimal256 {
b := big.NewInt(x)
h := HexOrDecimal256(*b)
return &h
}
func (i *HexOrDecimal256) UnmarshalJSON(data []byte) error {
var x int64
var s string
var b *big.Int
var err error
if err = json.Unmarshal(data, &x); err == nil {
b = big.NewInt(x)
} else if err = json.Unmarshal(data, &s); err == nil {
var ok bool
b, ok = ParseBig256(s)
if !ok {
return fmt.Errorf("invalid hex or decimal integer %q", s)
}
} else {
return err
}
*i = HexOrDecimal256(*b)
return nil
}
// UnmarshalText implements encoding.TextUnmarshaler.
func (i *HexOrDecimal256) UnmarshalText(input []byte) error {
bigint, ok := ParseBig256(string(input))
if !ok {
return fmt.Errorf("invalid hex or decimal integer %q", input)
}
*i = HexOrDecimal256(*bigint)
return nil
}
// MarshalText implements encoding.TextMarshaler.
func (i *HexOrDecimal256) MarshalText() ([]byte, error) {
if i == nil {
return []byte("0x0"), nil
}
return []byte(fmt.Sprintf("%#x", (*big.Int)(i))), nil
}
// Decimal256 unmarshals big.Int as a decimal string. When unmarshalling,
// it however accepts either "0x"-prefixed (hex encoded) or non-prefixed (decimal)
type Decimal256 big.Int
// NewHexOrDecimal256 creates a new Decimal256
func NewDecimal256(x int64) *Decimal256 {
b := big.NewInt(x)
d := Decimal256(*b)
return &d
}
// UnmarshalText implements encoding.TextUnmarshaler.
func (i *Decimal256) UnmarshalText(input []byte) error {
bigint, ok := ParseBig256(string(input))
if !ok {
return fmt.Errorf("invalid hex or decimal integer %q", input)
}
*i = Decimal256(*bigint)
return nil
}
// MarshalText implements encoding.TextMarshaler.
func (i *Decimal256) MarshalText() ([]byte, error) {
return []byte(i.String()), nil
}
// String implements Stringer.
func (i *Decimal256) String() string {
if i == nil {
return "0"
}
return fmt.Sprintf("%#d", (*big.Int)(i))
}
// ParseBig256 parses s as a 256 bit integer in decimal or hexadecimal syntax.
// Leading zeros are accepted. The empty string parses as zero.
func ParseBig256(s string) (*big.Int, bool) {
if s == "" {
return new(big.Int), true
}
var bigint *big.Int
var ok bool
if len(s) >= 2 && (s[:2] == "0x" || s[:2] == "0X") {
bigint, ok = new(big.Int).SetString(s[2:], 16)
} else {
bigint, ok = new(big.Int).SetString(s, 10)
}
if ok && bigint.BitLen() > 256 {
bigint, ok = nil, false
}
return bigint, ok
}
// MustParseBig256 parses s as a 256 bit big integer and panics if the string is invalid.
func MustParseBig256(s string) *big.Int {
v, ok := ParseBig256(s)
if !ok {
panic("invalid 256 bit integer: " + s)
}
return v
}
// BigPow returns a ** b as a big integer.
func BigPow(a, b int64) *big.Int {
r := big.NewInt(a)
return r.Exp(r, big.NewInt(b), nil)
}
// BigMax returns the larger of x or y.
func BigMax(x, y *big.Int) *big.Int {
if x.Cmp(y) < 0 {
return y
}
return x
}
// BigMin returns the smaller of x or y.
func BigMin(x, y *big.Int) *big.Int {
if x.Cmp(y) > 0 {
return y
}
return x
}
// FirstBitSet returns the index of the first 1 bit in v, counting from LSB.
func FirstBitSet(v *big.Int) int {
for i := 0; i < v.BitLen(); i++ {
if v.Bit(i) > 0 {
return i
}
}
return v.BitLen()
}
// PaddedBigBytes encodes a big integer as a big-endian byte slice. The length
// of the slice is at least n bytes.
func PaddedBigBytes(bigint *big.Int, n int) []byte {
if bigint.BitLen()/8 >= n {
return bigint.Bytes()
}
ret := make([]byte, n)
ReadBits(bigint, ret)
return ret
}
// bigEndianByteAt returns the byte at position n,
// in Big-Endian encoding
// So n==0 returns the least significant byte
func bigEndianByteAt(bigint *big.Int, n int) byte {
words := bigint.Bits()
// Check word-bucket the byte will reside in
i := n / wordBytes
if i >= len(words) {
return byte(0)
}
word := words[i]
// Offset of the byte
shift := 8 * uint(n%wordBytes)
return byte(word >> shift)
}
// Byte returns the byte at position n,
// with the supplied padlength in Little-Endian encoding.
// n==0 returns the MSB
// Example: bigint '5', padlength 32, n=31 => 5
func Byte(bigint *big.Int, padlength, n int) byte {
if n >= padlength {
return byte(0)
}
return bigEndianByteAt(bigint, padlength-1-n)
}
// ReadBits encodes the absolute value of bigint as big-endian bytes. Callers must ensure
// that buf has enough space. If buf is too short the result will be incomplete.
func ReadBits(bigint *big.Int, buf []byte) {
i := len(buf)
for _, d := range bigint.Bits() {
for j := 0; j < wordBytes && i > 0; j++ {
i--
buf[i] = byte(d)
d >>= 8
}
}
}
// U256 encodes as a 256 bit two's complement number. This operation is destructive.
func U256(x *big.Int) *big.Int {
return x.And(x, tt256m1)
}
// U256Bytes converts a big Int into a 256bit EVM number.
// This operation is destructive.
func U256Bytes(n *big.Int) []byte {
return PaddedBigBytes(U256(n), 32)
}
// S256 interprets x as a two's complement number.
// x must not exceed 256 bits (the result is undefined if it does) and is not modified.
//
// S256(0) = 0
// S256(1) = 1
// S256(2**255) = -2**255
// S256(2**256-1) = -1
func S256(x *big.Int) *big.Int {
if x.Cmp(tt255) < 0 {
return x
}
return new(big.Int).Sub(x, tt256)
}
// Exp implements exponentiation by squaring.
// Exp returns a newly-allocated big integer and does not change
// base or exponent. The result is truncated to 256 bits.
//
// Courtesy @karalabe and @chfast
func Exp(base, exponent *big.Int) *big.Int {
result := big.NewInt(1)
for _, word := range exponent.Bits() {
for i := 0; i < wordBits; i++ {
if word&1 == 1 {
U256(result.Mul(result, base))
}
U256(base.Mul(base, base))
word >>= 1
}
}
return result
}

View File

@@ -0,0 +1,98 @@
// Copyright 2017 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package math
import (
"fmt"
"math/bits"
"strconv"
)
// Integer limit values.
const (
MaxInt8 = 1<<7 - 1
MinInt8 = -1 << 7
MaxInt16 = 1<<15 - 1
MinInt16 = -1 << 15
MaxInt32 = 1<<31 - 1
MinInt32 = -1 << 31
MaxInt64 = 1<<63 - 1
MinInt64 = -1 << 63
MaxUint8 = 1<<8 - 1
MaxUint16 = 1<<16 - 1
MaxUint32 = 1<<32 - 1
MaxUint64 = 1<<64 - 1
)
// HexOrDecimal64 marshals uint64 as hex or decimal.
type HexOrDecimal64 uint64
// UnmarshalText implements encoding.TextUnmarshaler.
func (i *HexOrDecimal64) UnmarshalText(input []byte) error {
int, ok := ParseUint64(string(input))
if !ok {
return fmt.Errorf("invalid hex or decimal integer %q", input)
}
*i = HexOrDecimal64(int)
return nil
}
// MarshalText implements encoding.TextMarshaler.
func (i HexOrDecimal64) MarshalText() ([]byte, error) {
return []byte(fmt.Sprintf("%#x", uint64(i))), nil
}
// ParseUint64 parses s as an integer in decimal or hexadecimal syntax.
// Leading zeros are accepted. The empty string parses as zero.
func ParseUint64(s string) (uint64, bool) {
if s == "" {
return 0, true
}
if len(s) >= 2 && (s[:2] == "0x" || s[:2] == "0X") {
v, err := strconv.ParseUint(s[2:], 16, 64)
return v, err == nil
}
v, err := strconv.ParseUint(s, 10, 64)
return v, err == nil
}
// MustParseUint64 parses s as an integer and panics if the string is invalid.
func MustParseUint64(s string) uint64 {
v, ok := ParseUint64(s)
if !ok {
panic("invalid unsigned 64 bit integer: " + s)
}
return v
}
// SafeSub returns x-y and checks for overflow.
func SafeSub(x, y uint64) (uint64, bool) {
diff, borrowOut := bits.Sub64(x, y, 0)
return diff, borrowOut != 0
}
// SafeAdd returns x+y and checks for overflow.
func SafeAdd(x, y uint64) (uint64, bool) {
sum, carryOut := bits.Add64(x, y, 0)
return sum, carryOut != 0
}
// SafeMul returns x*y and checks for overflow.
func SafeMul(x, y uint64) (uint64, bool) {
hi, lo := bits.Mul64(x, y)
return lo, hi != 0
}

View File

@@ -0,0 +1,127 @@
// Copyright 2016 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
// Package mclock is a wrapper for a monotonic clock source
package mclock
import (
"time"
_ "unsafe" // for go:linkname
)
//go:noescape
//go:linkname nanotime runtime.nanotime
func nanotime() int64
// AbsTime represents absolute monotonic time.
type AbsTime int64
// Now returns the current absolute monotonic time.
func Now() AbsTime {
return AbsTime(nanotime())
}
// Add returns t + d as absolute time.
func (t AbsTime) Add(d time.Duration) AbsTime {
return t + AbsTime(d)
}
// Sub returns t - t2 as a duration.
func (t AbsTime) Sub(t2 AbsTime) time.Duration {
return time.Duration(t - t2)
}
// The Clock interface makes it possible to replace the monotonic system clock with
// a simulated clock.
type Clock interface {
Now() AbsTime
Sleep(time.Duration)
NewTimer(time.Duration) ChanTimer
After(time.Duration) <-chan AbsTime
AfterFunc(d time.Duration, f func()) Timer
}
// Timer is a cancellable event created by AfterFunc.
type Timer interface {
// Stop cancels the timer. It returns false if the timer has already
// expired or been stopped.
Stop() bool
}
// ChanTimer is a cancellable event created by NewTimer.
type ChanTimer interface {
Timer
// The channel returned by C receives a value when the timer expires.
C() <-chan AbsTime
// Reset reschedules the timer with a new timeout.
// It should be invoked only on stopped or expired timers with drained channels.
Reset(time.Duration)
}
// System implements Clock using the system clock.
type System struct{}
// Now returns the current monotonic time.
func (c System) Now() AbsTime {
return Now()
}
// Sleep blocks for the given duration.
func (c System) Sleep(d time.Duration) {
time.Sleep(d)
}
// NewTimer creates a timer which can be rescheduled.
func (c System) NewTimer(d time.Duration) ChanTimer {
ch := make(chan AbsTime, 1)
t := time.AfterFunc(d, func() {
// This send is non-blocking because that's how time.Timer
// behaves. It doesn't matter in the happy case, but does
// when Reset is misused.
select {
case ch <- c.Now():
default:
}
})
return &systemTimer{t, ch}
}
// After returns a channel which receives the current time after d has elapsed.
func (c System) After(d time.Duration) <-chan AbsTime {
ch := make(chan AbsTime, 1)
time.AfterFunc(d, func() { ch <- c.Now() })
return ch
}
// AfterFunc runs f on a new goroutine after the duration has elapsed.
func (c System) AfterFunc(d time.Duration, f func()) Timer {
return time.AfterFunc(d, f)
}
type systemTimer struct {
*time.Timer
ch <-chan AbsTime
}
func (st *systemTimer) Reset(d time.Duration) {
st.Timer.Reset(d)
}
func (st *systemTimer) C() <-chan AbsTime {
return st.ch
}

View File

@@ -0,0 +1 @@
// This file exists in order to be able to use go:linkname.

View File

@@ -0,0 +1,209 @@
// Copyright 2018 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package mclock
import (
"container/heap"
"sync"
"time"
)
// Simulated implements a virtual Clock for reproducible time-sensitive tests. It
// simulates a scheduler on a virtual timescale where actual processing takes zero time.
//
// The virtual clock doesn't advance on its own, call Run to advance it and execute timers.
// Since there is no way to influence the Go scheduler, testing timeout behaviour involving
// goroutines needs special care. A good way to test such timeouts is as follows: First
// perform the action that is supposed to time out. Ensure that the timer you want to test
// is created. Then run the clock until after the timeout. Finally observe the effect of
// the timeout using a channel or semaphore.
type Simulated struct {
now AbsTime
scheduled simTimerHeap
mu sync.RWMutex
cond *sync.Cond
}
// simTimer implements ChanTimer on the virtual clock.
type simTimer struct {
at AbsTime
index int // position in s.scheduled
s *Simulated
do func()
ch <-chan AbsTime
}
func (s *Simulated) init() {
if s.cond == nil {
s.cond = sync.NewCond(&s.mu)
}
}
// Run moves the clock by the given duration, executing all timers before that duration.
func (s *Simulated) Run(d time.Duration) {
s.mu.Lock()
s.init()
end := s.now.Add(d)
var do []func()
for len(s.scheduled) > 0 && s.scheduled[0].at <= end {
ev := heap.Pop(&s.scheduled).(*simTimer)
do = append(do, ev.do)
}
s.now = end
s.mu.Unlock()
for _, fn := range do {
fn()
}
}
// ActiveTimers returns the number of timers that haven't fired.
func (s *Simulated) ActiveTimers() int {
s.mu.RLock()
defer s.mu.RUnlock()
return len(s.scheduled)
}
// WaitForTimers waits until the clock has at least n scheduled timers.
func (s *Simulated) WaitForTimers(n int) {
s.mu.Lock()
defer s.mu.Unlock()
s.init()
for len(s.scheduled) < n {
s.cond.Wait()
}
}
// Now returns the current virtual time.
func (s *Simulated) Now() AbsTime {
s.mu.RLock()
defer s.mu.RUnlock()
return s.now
}
// Sleep blocks until the clock has advanced by d.
func (s *Simulated) Sleep(d time.Duration) {
<-s.After(d)
}
// NewTimer creates a timer which fires when the clock has advanced by d.
func (s *Simulated) NewTimer(d time.Duration) ChanTimer {
s.mu.Lock()
defer s.mu.Unlock()
ch := make(chan AbsTime, 1)
var timer *simTimer
timer = s.schedule(d, func() { ch <- timer.at })
timer.ch = ch
return timer
}
// After returns a channel which receives the current time after the clock
// has advanced by d.
func (s *Simulated) After(d time.Duration) <-chan AbsTime {
return s.NewTimer(d).C()
}
// AfterFunc runs fn after the clock has advanced by d. Unlike with the system
// clock, fn runs on the goroutine that calls Run.
func (s *Simulated) AfterFunc(d time.Duration, fn func()) Timer {
s.mu.Lock()
defer s.mu.Unlock()
return s.schedule(d, fn)
}
func (s *Simulated) schedule(d time.Duration, fn func()) *simTimer {
s.init()
at := s.now.Add(d)
ev := &simTimer{do: fn, at: at, s: s}
heap.Push(&s.scheduled, ev)
s.cond.Broadcast()
return ev
}
func (ev *simTimer) Stop() bool {
ev.s.mu.Lock()
defer ev.s.mu.Unlock()
if ev.index < 0 {
return false
}
heap.Remove(&ev.s.scheduled, ev.index)
ev.s.cond.Broadcast()
ev.index = -1
return true
}
func (ev *simTimer) Reset(d time.Duration) {
if ev.ch == nil {
panic("mclock: Reset() on timer created by AfterFunc")
}
ev.s.mu.Lock()
defer ev.s.mu.Unlock()
ev.at = ev.s.now.Add(d)
if ev.index < 0 {
heap.Push(&ev.s.scheduled, ev) // already expired
} else {
heap.Fix(&ev.s.scheduled, ev.index) // hasn't fired yet, reschedule
}
ev.s.cond.Broadcast()
}
func (ev *simTimer) C() <-chan AbsTime {
if ev.ch == nil {
panic("mclock: C() on timer created by AfterFunc")
}
return ev.ch
}
type simTimerHeap []*simTimer
func (h *simTimerHeap) Len() int {
return len(*h)
}
func (h *simTimerHeap) Less(i, j int) bool {
return (*h)[i].at < (*h)[j].at
}
func (h *simTimerHeap) Swap(i, j int) {
(*h)[i], (*h)[j] = (*h)[j], (*h)[i]
(*h)[i].index = i
(*h)[j].index = j
}
func (h *simTimerHeap) Push(x interface{}) {
t := x.(*simTimer)
t.index = len(*h)
*h = append(*h, t)
}
func (h *simTimerHeap) Pop() interface{} {
end := len(*h) - 1
t := (*h)[end]
t.index = -1
(*h)[end] = nil
*h = (*h)[:end]
return t
}

49
vendor/github.com/ethereum/go-ethereum/common/path.go generated vendored Normal file
View File

@@ -0,0 +1,49 @@
// Copyright 2014 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package common
import (
"fmt"
"os"
"path/filepath"
"runtime"
)
// MakeName creates a node name that follows the ethereum convention
// for such names. It adds the operation system name and Go runtime version
// the name.
func MakeName(name, version string) string {
return fmt.Sprintf("%s/v%s/%s/%s", name, version, runtime.GOOS, runtime.Version())
}
// FileExist checks if a file exists at filePath.
func FileExist(filePath string) bool {
_, err := os.Stat(filePath)
if err != nil && os.IsNotExist(err) {
return false
}
return true
}
// AbsolutePath returns datadir + filename, or filename if it is absolute.
func AbsolutePath(datadir string, filename string) string {
if filepath.IsAbs(filename) {
return filename
}
return filepath.Join(datadir, filename)
}

View File

@@ -0,0 +1,197 @@
// Copyright 2019 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package prque
import (
"container/heap"
"time"
"github.com/ethereum/go-ethereum/common/mclock"
)
// LazyQueue is a priority queue data structure where priorities can change over
// time and are only evaluated on demand.
// Two callbacks are required:
// - priority evaluates the actual priority of an item
// - maxPriority gives an upper estimate for the priority in any moment between
// now and the given absolute time
//
// If the upper estimate is exceeded then Update should be called for that item.
// A global Refresh function should also be called periodically.
type LazyQueue struct {
clock mclock.Clock
// Items are stored in one of two internal queues ordered by estimated max
// priority until the next and the next-after-next refresh. Update and Refresh
// always places items in queue[1].
queue [2]*sstack
popQueue *sstack
period time.Duration
maxUntil mclock.AbsTime
indexOffset int
setIndex SetIndexCallback
priority PriorityCallback
maxPriority MaxPriorityCallback
lastRefresh1, lastRefresh2 mclock.AbsTime
}
type (
PriorityCallback func(data interface{}) int64 // actual priority callback
MaxPriorityCallback func(data interface{}, until mclock.AbsTime) int64 // estimated maximum priority callback
)
// NewLazyQueue creates a new lazy queue
func NewLazyQueue(setIndex SetIndexCallback, priority PriorityCallback, maxPriority MaxPriorityCallback, clock mclock.Clock, refreshPeriod time.Duration) *LazyQueue {
q := &LazyQueue{
popQueue: newSstack(nil, false),
setIndex: setIndex,
priority: priority,
maxPriority: maxPriority,
clock: clock,
period: refreshPeriod,
lastRefresh1: clock.Now(),
lastRefresh2: clock.Now(),
}
q.Reset()
q.refresh(clock.Now())
return q
}
// Reset clears the contents of the queue
func (q *LazyQueue) Reset() {
q.queue[0] = newSstack(q.setIndex0, false)
q.queue[1] = newSstack(q.setIndex1, false)
}
// Refresh performs queue re-evaluation if necessary
func (q *LazyQueue) Refresh() {
now := q.clock.Now()
for time.Duration(now-q.lastRefresh2) >= q.period*2 {
q.refresh(now)
q.lastRefresh2 = q.lastRefresh1
q.lastRefresh1 = now
}
}
// refresh re-evaluates items in the older queue and swaps the two queues
func (q *LazyQueue) refresh(now mclock.AbsTime) {
q.maxUntil = now.Add(q.period)
for q.queue[0].Len() != 0 {
q.Push(heap.Pop(q.queue[0]).(*item).value)
}
q.queue[0], q.queue[1] = q.queue[1], q.queue[0]
q.indexOffset = 1 - q.indexOffset
q.maxUntil = q.maxUntil.Add(q.period)
}
// Push adds an item to the queue
func (q *LazyQueue) Push(data interface{}) {
heap.Push(q.queue[1], &item{data, q.maxPriority(data, q.maxUntil)})
}
// Update updates the upper priority estimate for the item with the given queue index
func (q *LazyQueue) Update(index int) {
q.Push(q.Remove(index))
}
// Pop removes and returns the item with the greatest actual priority
func (q *LazyQueue) Pop() (interface{}, int64) {
var (
resData interface{}
resPri int64
)
q.MultiPop(func(data interface{}, priority int64) bool {
resData = data
resPri = priority
return false
})
return resData, resPri
}
// peekIndex returns the index of the internal queue where the item with the
// highest estimated priority is or -1 if both are empty
func (q *LazyQueue) peekIndex() int {
if q.queue[0].Len() != 0 {
if q.queue[1].Len() != 0 && q.queue[1].blocks[0][0].priority > q.queue[0].blocks[0][0].priority {
return 1
}
return 0
}
if q.queue[1].Len() != 0 {
return 1
}
return -1
}
// MultiPop pops multiple items from the queue and is more efficient than calling
// Pop multiple times. Popped items are passed to the callback. MultiPop returns
// when the callback returns false or there are no more items to pop.
func (q *LazyQueue) MultiPop(callback func(data interface{}, priority int64) bool) {
nextIndex := q.peekIndex()
for nextIndex != -1 {
data := heap.Pop(q.queue[nextIndex]).(*item).value
heap.Push(q.popQueue, &item{data, q.priority(data)})
nextIndex = q.peekIndex()
for q.popQueue.Len() != 0 && (nextIndex == -1 || q.queue[nextIndex].blocks[0][0].priority < q.popQueue.blocks[0][0].priority) {
i := heap.Pop(q.popQueue).(*item)
if !callback(i.value, i.priority) {
for q.popQueue.Len() != 0 {
q.Push(heap.Pop(q.popQueue).(*item).value)
}
return
}
nextIndex = q.peekIndex() // re-check because callback is allowed to push items back
}
}
}
// PopItem pops the item from the queue only, dropping the associated priority value.
func (q *LazyQueue) PopItem() interface{} {
i, _ := q.Pop()
return i
}
// Remove removes the item with the given index.
func (q *LazyQueue) Remove(index int) interface{} {
if index < 0 {
return nil
}
return heap.Remove(q.queue[index&1^q.indexOffset], index>>1).(*item).value
}
// Empty checks whether the priority queue is empty.
func (q *LazyQueue) Empty() bool {
return q.queue[0].Len() == 0 && q.queue[1].Len() == 0
}
// Size returns the number of items in the priority queue.
func (q *LazyQueue) Size() int {
return q.queue[0].Len() + q.queue[1].Len()
}
// setIndex0 translates internal queue item index to the virtual index space of LazyQueue
func (q *LazyQueue) setIndex0(data interface{}, index int) {
if index == -1 {
q.setIndex(data, -1)
} else {
q.setIndex(data, index+index)
}
}
// setIndex1 translates internal queue item index to the virtual index space of LazyQueue
func (q *LazyQueue) setIndex1(data interface{}, index int) {
q.setIndex(data, index+index+1)
}

View File

@@ -0,0 +1,83 @@
// CookieJar - A contestant's algorithm toolbox
// Copyright (c) 2013 Peter Szilagyi. All rights reserved.
//
// CookieJar is dual licensed: use of this source code is governed by a BSD
// license that can be found in the LICENSE file. Alternatively, the CookieJar
// toolbox may be used in accordance with the terms and conditions contained
// in a signed written agreement between you and the author(s).
// This is a duplicated and slightly modified version of "gopkg.in/karalabe/cookiejar.v2/collections/prque".
// Package prque implements a priority queue data structure supporting arbitrary
// value types and int64 priorities.
//
// If you would like to use a min-priority queue, simply negate the priorities.
//
// Internally the queue is based on the standard heap package working on a
// sortable version of the block based stack.
package prque
import (
"container/heap"
)
// Priority queue data structure.
type Prque struct {
cont *sstack
}
// New creates a new priority queue.
func New(setIndex SetIndexCallback) *Prque {
return &Prque{newSstack(setIndex, false)}
}
// NewWrapAround creates a new priority queue with wrap-around priority handling.
func NewWrapAround(setIndex SetIndexCallback) *Prque {
return &Prque{newSstack(setIndex, true)}
}
// Pushes a value with a given priority into the queue, expanding if necessary.
func (p *Prque) Push(data interface{}, priority int64) {
heap.Push(p.cont, &item{data, priority})
}
// Peek returns the value with the greatest priority but does not pop it off.
func (p *Prque) Peek() (interface{}, int64) {
item := p.cont.blocks[0][0]
return item.value, item.priority
}
// Pops the value with the greatest priority off the stack and returns it.
// Currently no shrinking is done.
func (p *Prque) Pop() (interface{}, int64) {
item := heap.Pop(p.cont).(*item)
return item.value, item.priority
}
// Pops only the item from the queue, dropping the associated priority value.
func (p *Prque) PopItem() interface{} {
return heap.Pop(p.cont).(*item).value
}
// Remove removes the element with the given index.
func (p *Prque) Remove(i int) interface{} {
if i < 0 {
return nil
}
return heap.Remove(p.cont, i)
}
// Checks whether the priority queue is empty.
func (p *Prque) Empty() bool {
return p.cont.Len() == 0
}
// Returns the number of element in the priority queue.
func (p *Prque) Size() int {
return p.cont.Len()
}
// Clears the contents of the priority queue.
func (p *Prque) Reset() {
*p = *New(p.cont.setIndex)
}

View File

@@ -0,0 +1,120 @@
// CookieJar - A contestant's algorithm toolbox
// Copyright (c) 2013 Peter Szilagyi. All rights reserved.
//
// CookieJar is dual licensed: use of this source code is governed by a BSD
// license that can be found in the LICENSE file. Alternatively, the CookieJar
// toolbox may be used in accordance with the terms and conditions contained
// in a signed written agreement between you and the author(s).
// This is a duplicated and slightly modified version of "gopkg.in/karalabe/cookiejar.v2/collections/prque".
package prque
// The size of a block of data
const blockSize = 4096
// A prioritized item in the sorted stack.
//
// Note: priorities can "wrap around" the int64 range, a comes before b if (a.priority - b.priority) > 0.
// The difference between the lowest and highest priorities in the queue at any point should be less than 2^63.
type item struct {
value interface{}
priority int64
}
// SetIndexCallback is called when the element is moved to a new index.
// Providing SetIndexCallback is optional, it is needed only if the application needs
// to delete elements other than the top one.
type SetIndexCallback func(data interface{}, index int)
// Internal sortable stack data structure. Implements the Push and Pop ops for
// the stack (heap) functionality and the Len, Less and Swap methods for the
// sortability requirements of the heaps.
type sstack struct {
setIndex SetIndexCallback
size int
capacity int
offset int
wrapAround bool
blocks [][]*item
active []*item
}
// Creates a new, empty stack.
func newSstack(setIndex SetIndexCallback, wrapAround bool) *sstack {
result := new(sstack)
result.setIndex = setIndex
result.active = make([]*item, blockSize)
result.blocks = [][]*item{result.active}
result.capacity = blockSize
result.wrapAround = wrapAround
return result
}
// Pushes a value onto the stack, expanding it if necessary. Required by
// heap.Interface.
func (s *sstack) Push(data interface{}) {
if s.size == s.capacity {
s.active = make([]*item, blockSize)
s.blocks = append(s.blocks, s.active)
s.capacity += blockSize
s.offset = 0
} else if s.offset == blockSize {
s.active = s.blocks[s.size/blockSize]
s.offset = 0
}
if s.setIndex != nil {
s.setIndex(data.(*item).value, s.size)
}
s.active[s.offset] = data.(*item)
s.offset++
s.size++
}
// Pops a value off the stack and returns it. Currently no shrinking is done.
// Required by heap.Interface.
func (s *sstack) Pop() (res interface{}) {
s.size--
s.offset--
if s.offset < 0 {
s.offset = blockSize - 1
s.active = s.blocks[s.size/blockSize]
}
res, s.active[s.offset] = s.active[s.offset], nil
if s.setIndex != nil {
s.setIndex(res.(*item).value, -1)
}
return
}
// Returns the length of the stack. Required by sort.Interface.
func (s *sstack) Len() int {
return s.size
}
// Compares the priority of two elements of the stack (higher is first).
// Required by sort.Interface.
func (s *sstack) Less(i, j int) bool {
a, b := s.blocks[i/blockSize][i%blockSize].priority, s.blocks[j/blockSize][j%blockSize].priority
if s.wrapAround {
return a-b > 0
}
return a > b
}
// Swaps two elements in the stack. Required by sort.Interface.
func (s *sstack) Swap(i, j int) {
ib, io, jb, jo := i/blockSize, i%blockSize, j/blockSize, j%blockSize
a, b := s.blocks[jb][jo], s.blocks[ib][io]
if s.setIndex != nil {
s.setIndex(a.value, i)
s.setIndex(b.value, j)
}
s.blocks[ib][io], s.blocks[jb][jo] = a, b
}
// Resets the stack, effectively clearing its contents.
func (s *sstack) Reset() {
*s = *newSstack(s.setIndex, false)
}

56
vendor/github.com/ethereum/go-ethereum/common/size.go generated vendored Normal file
View File

@@ -0,0 +1,56 @@
// Copyright 2014 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package common
import (
"fmt"
)
// StorageSize is a wrapper around a float value that supports user friendly
// formatting.
type StorageSize float64
// String implements the stringer interface.
func (s StorageSize) String() string {
if s > 1099511627776 {
return fmt.Sprintf("%.2f TiB", s/1099511627776)
} else if s > 1073741824 {
return fmt.Sprintf("%.2f GiB", s/1073741824)
} else if s > 1048576 {
return fmt.Sprintf("%.2f MiB", s/1048576)
} else if s > 1024 {
return fmt.Sprintf("%.2f KiB", s/1024)
} else {
return fmt.Sprintf("%.2f B", s)
}
}
// TerminalString implements log.TerminalStringer, formatting a string for console
// output during logging.
func (s StorageSize) TerminalString() string {
if s > 1099511627776 {
return fmt.Sprintf("%.2fTiB", s/1099511627776)
} else if s > 1073741824 {
return fmt.Sprintf("%.2fGiB", s/1073741824)
} else if s > 1048576 {
return fmt.Sprintf("%.2fMiB", s/1048576)
} else if s > 1024 {
return fmt.Sprintf("%.2fKiB", s/1024)
} else {
return fmt.Sprintf("%.2fB", s)
}
}

View File

@@ -0,0 +1,53 @@
// Copyright 2015 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package common
import (
"encoding/json"
"fmt"
"os"
)
// LoadJSON reads the given file and unmarshals its content.
func LoadJSON(file string, val interface{}) error {
content, err := os.ReadFile(file)
if err != nil {
return err
}
if err := json.Unmarshal(content, val); err != nil {
if syntaxerr, ok := err.(*json.SyntaxError); ok {
line := findLine(content, syntaxerr.Offset)
return fmt.Errorf("JSON syntax error at %v:%v: %v", file, line, err)
}
return fmt.Errorf("JSON unmarshal error in %v: %v", file, err)
}
return nil
}
// findLine returns the line number for the given offset into data.
func findLine(data []byte, offset int64) (line int) {
line = 1
for i, r := range string(data) {
if int64(i) >= offset {
return
}
if r == '\n' {
line++
}
}
return
}

428
vendor/github.com/ethereum/go-ethereum/common/types.go generated vendored Normal file
View File

@@ -0,0 +1,428 @@
// Copyright 2015 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package common
import (
"bytes"
"database/sql/driver"
"encoding/hex"
"encoding/json"
"errors"
"fmt"
"math/big"
"math/rand"
"reflect"
"strings"
"github.com/ethereum/go-ethereum/common/hexutil"
"golang.org/x/crypto/sha3"
)
// Lengths of hashes and addresses in bytes.
const (
// HashLength is the expected length of the hash
HashLength = 32
// AddressLength is the expected length of the address
AddressLength = 20
)
var (
hashT = reflect.TypeOf(Hash{})
addressT = reflect.TypeOf(Address{})
)
// Hash represents the 32 byte Keccak256 hash of arbitrary data.
type Hash [HashLength]byte
// BytesToHash sets b to hash.
// If b is larger than len(h), b will be cropped from the left.
func BytesToHash(b []byte) Hash {
var h Hash
h.SetBytes(b)
return h
}
// BigToHash sets byte representation of b to hash.
// If b is larger than len(h), b will be cropped from the left.
func BigToHash(b *big.Int) Hash { return BytesToHash(b.Bytes()) }
// HexToHash sets byte representation of s to hash.
// If b is larger than len(h), b will be cropped from the left.
func HexToHash(s string) Hash { return BytesToHash(FromHex(s)) }
// Bytes gets the byte representation of the underlying hash.
func (h Hash) Bytes() []byte { return h[:] }
// Big converts a hash to a big integer.
func (h Hash) Big() *big.Int { return new(big.Int).SetBytes(h[:]) }
// Hex converts a hash to a hex string.
func (h Hash) Hex() string { return hexutil.Encode(h[:]) }
// TerminalString implements log.TerminalStringer, formatting a string for console
// output during logging.
func (h Hash) TerminalString() string {
return fmt.Sprintf("%x..%x", h[:3], h[29:])
}
// String implements the stringer interface and is used also by the logger when
// doing full logging into a file.
func (h Hash) String() string {
return h.Hex()
}
// Format implements fmt.Formatter.
// Hash supports the %v, %s, %q, %x, %X and %d format verbs.
func (h Hash) Format(s fmt.State, c rune) {
hexb := make([]byte, 2+len(h)*2)
copy(hexb, "0x")
hex.Encode(hexb[2:], h[:])
switch c {
case 'x', 'X':
if !s.Flag('#') {
hexb = hexb[2:]
}
if c == 'X' {
hexb = bytes.ToUpper(hexb)
}
fallthrough
case 'v', 's':
s.Write(hexb)
case 'q':
q := []byte{'"'}
s.Write(q)
s.Write(hexb)
s.Write(q)
case 'd':
fmt.Fprint(s, ([len(h)]byte)(h))
default:
fmt.Fprintf(s, "%%!%c(hash=%x)", c, h)
}
}
// UnmarshalText parses a hash in hex syntax.
func (h *Hash) UnmarshalText(input []byte) error {
return hexutil.UnmarshalFixedText("Hash", input, h[:])
}
// UnmarshalJSON parses a hash in hex syntax.
func (h *Hash) UnmarshalJSON(input []byte) error {
return hexutil.UnmarshalFixedJSON(hashT, input, h[:])
}
// MarshalText returns the hex representation of h.
func (h Hash) MarshalText() ([]byte, error) {
return hexutil.Bytes(h[:]).MarshalText()
}
// SetBytes sets the hash to the value of b.
// If b is larger than len(h), b will be cropped from the left.
func (h *Hash) SetBytes(b []byte) {
if len(b) > len(h) {
b = b[len(b)-HashLength:]
}
copy(h[HashLength-len(b):], b)
}
// Generate implements testing/quick.Generator.
func (h Hash) Generate(rand *rand.Rand, size int) reflect.Value {
m := rand.Intn(len(h))
for i := len(h) - 1; i > m; i-- {
h[i] = byte(rand.Uint32())
}
return reflect.ValueOf(h)
}
// Scan implements Scanner for database/sql.
func (h *Hash) Scan(src interface{}) error {
srcB, ok := src.([]byte)
if !ok {
return fmt.Errorf("can't scan %T into Hash", src)
}
if len(srcB) != HashLength {
return fmt.Errorf("can't scan []byte of len %d into Hash, want %d", len(srcB), HashLength)
}
copy(h[:], srcB)
return nil
}
// Value implements valuer for database/sql.
func (h Hash) Value() (driver.Value, error) {
return h[:], nil
}
// ImplementsGraphQLType returns true if Hash implements the specified GraphQL type.
func (Hash) ImplementsGraphQLType(name string) bool { return name == "Bytes32" }
// UnmarshalGraphQL unmarshals the provided GraphQL query data.
func (h *Hash) UnmarshalGraphQL(input interface{}) error {
var err error
switch input := input.(type) {
case string:
err = h.UnmarshalText([]byte(input))
default:
err = fmt.Errorf("unexpected type %T for Hash", input)
}
return err
}
// UnprefixedHash allows marshaling a Hash without 0x prefix.
type UnprefixedHash Hash
// UnmarshalText decodes the hash from hex. The 0x prefix is optional.
func (h *UnprefixedHash) UnmarshalText(input []byte) error {
return hexutil.UnmarshalFixedUnprefixedText("UnprefixedHash", input, h[:])
}
// MarshalText encodes the hash as hex.
func (h UnprefixedHash) MarshalText() ([]byte, error) {
return []byte(hex.EncodeToString(h[:])), nil
}
/////////// Address
// Address represents the 20 byte address of an Ethereum account.
type Address [AddressLength]byte
// BytesToAddress returns Address with value b.
// If b is larger than len(h), b will be cropped from the left.
func BytesToAddress(b []byte) Address {
var a Address
a.SetBytes(b)
return a
}
// BigToAddress returns Address with byte values of b.
// If b is larger than len(h), b will be cropped from the left.
func BigToAddress(b *big.Int) Address { return BytesToAddress(b.Bytes()) }
// HexToAddress returns Address with byte values of s.
// If s is larger than len(h), s will be cropped from the left.
func HexToAddress(s string) Address { return BytesToAddress(FromHex(s)) }
// IsHexAddress verifies whether a string can represent a valid hex-encoded
// Ethereum address or not.
func IsHexAddress(s string) bool {
if has0xPrefix(s) {
s = s[2:]
}
return len(s) == 2*AddressLength && isHex(s)
}
// Bytes gets the string representation of the underlying address.
func (a Address) Bytes() []byte { return a[:] }
// Hash converts an address to a hash by left-padding it with zeros.
func (a Address) Hash() Hash { return BytesToHash(a[:]) }
// Hex returns an EIP55-compliant hex string representation of the address.
func (a Address) Hex() string {
return string(a.checksumHex())
}
// String implements fmt.Stringer.
func (a Address) String() string {
return a.Hex()
}
func (a *Address) checksumHex() []byte {
buf := a.hex()
// compute checksum
sha := sha3.NewLegacyKeccak256()
sha.Write(buf[2:])
hash := sha.Sum(nil)
for i := 2; i < len(buf); i++ {
hashByte := hash[(i-2)/2]
if i%2 == 0 {
hashByte = hashByte >> 4
} else {
hashByte &= 0xf
}
if buf[i] > '9' && hashByte > 7 {
buf[i] -= 32
}
}
return buf[:]
}
func (a Address) hex() []byte {
var buf [len(a)*2 + 2]byte
copy(buf[:2], "0x")
hex.Encode(buf[2:], a[:])
return buf[:]
}
// Format implements fmt.Formatter.
// Address supports the %v, %s, %q, %x, %X and %d format verbs.
func (a Address) Format(s fmt.State, c rune) {
switch c {
case 'v', 's':
s.Write(a.checksumHex())
case 'q':
q := []byte{'"'}
s.Write(q)
s.Write(a.checksumHex())
s.Write(q)
case 'x', 'X':
// %x disables the checksum.
hex := a.hex()
if !s.Flag('#') {
hex = hex[2:]
}
if c == 'X' {
hex = bytes.ToUpper(hex)
}
s.Write(hex)
case 'd':
fmt.Fprint(s, ([len(a)]byte)(a))
default:
fmt.Fprintf(s, "%%!%c(address=%x)", c, a)
}
}
// SetBytes sets the address to the value of b.
// If b is larger than len(a), b will be cropped from the left.
func (a *Address) SetBytes(b []byte) {
if len(b) > len(a) {
b = b[len(b)-AddressLength:]
}
copy(a[AddressLength-len(b):], b)
}
// MarshalText returns the hex representation of a.
func (a Address) MarshalText() ([]byte, error) {
return hexutil.Bytes(a[:]).MarshalText()
}
// UnmarshalText parses a hash in hex syntax.
func (a *Address) UnmarshalText(input []byte) error {
return hexutil.UnmarshalFixedText("Address", input, a[:])
}
// UnmarshalJSON parses a hash in hex syntax.
func (a *Address) UnmarshalJSON(input []byte) error {
return hexutil.UnmarshalFixedJSON(addressT, input, a[:])
}
// Scan implements Scanner for database/sql.
func (a *Address) Scan(src interface{}) error {
srcB, ok := src.([]byte)
if !ok {
return fmt.Errorf("can't scan %T into Address", src)
}
if len(srcB) != AddressLength {
return fmt.Errorf("can't scan []byte of len %d into Address, want %d", len(srcB), AddressLength)
}
copy(a[:], srcB)
return nil
}
// Value implements valuer for database/sql.
func (a Address) Value() (driver.Value, error) {
return a[:], nil
}
// ImplementsGraphQLType returns true if Hash implements the specified GraphQL type.
func (a Address) ImplementsGraphQLType(name string) bool { return name == "Address" }
// UnmarshalGraphQL unmarshals the provided GraphQL query data.
func (a *Address) UnmarshalGraphQL(input interface{}) error {
var err error
switch input := input.(type) {
case string:
err = a.UnmarshalText([]byte(input))
default:
err = fmt.Errorf("unexpected type %T for Address", input)
}
return err
}
// UnprefixedAddress allows marshaling an Address without 0x prefix.
type UnprefixedAddress Address
// UnmarshalText decodes the address from hex. The 0x prefix is optional.
func (a *UnprefixedAddress) UnmarshalText(input []byte) error {
return hexutil.UnmarshalFixedUnprefixedText("UnprefixedAddress", input, a[:])
}
// MarshalText encodes the address as hex.
func (a UnprefixedAddress) MarshalText() ([]byte, error) {
return []byte(hex.EncodeToString(a[:])), nil
}
// MixedcaseAddress retains the original string, which may or may not be
// correctly checksummed
type MixedcaseAddress struct {
addr Address
original string
}
// NewMixedcaseAddress constructor (mainly for testing)
func NewMixedcaseAddress(addr Address) MixedcaseAddress {
return MixedcaseAddress{addr: addr, original: addr.Hex()}
}
// NewMixedcaseAddressFromString is mainly meant for unit-testing
func NewMixedcaseAddressFromString(hexaddr string) (*MixedcaseAddress, error) {
if !IsHexAddress(hexaddr) {
return nil, errors.New("invalid address")
}
a := FromHex(hexaddr)
return &MixedcaseAddress{addr: BytesToAddress(a), original: hexaddr}, nil
}
// UnmarshalJSON parses MixedcaseAddress
func (ma *MixedcaseAddress) UnmarshalJSON(input []byte) error {
if err := hexutil.UnmarshalFixedJSON(addressT, input, ma.addr[:]); err != nil {
return err
}
return json.Unmarshal(input, &ma.original)
}
// MarshalJSON marshals the original value
func (ma *MixedcaseAddress) MarshalJSON() ([]byte, error) {
if strings.HasPrefix(ma.original, "0x") || strings.HasPrefix(ma.original, "0X") {
return json.Marshal(fmt.Sprintf("0x%s", ma.original[2:]))
}
return json.Marshal(fmt.Sprintf("0x%s", ma.original))
}
// Address returns the address
func (ma *MixedcaseAddress) Address() Address {
return ma.addr
}
// String implements fmt.Stringer
func (ma *MixedcaseAddress) String() string {
if ma.ValidChecksum() {
return fmt.Sprintf("%s [chksum ok]", ma.original)
}
return fmt.Sprintf("%s [chksum INVALID]", ma.original)
}
// ValidChecksum returns true if the address has valid checksum
func (ma *MixedcaseAddress) ValidChecksum() bool {
return ma.original == ma.addr.Hex()
}
// Original returns the mixed-case input string
func (ma *MixedcaseAddress) Original() string {
return ma.original
}

View File

@@ -0,0 +1,130 @@
// Copyright 2017 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
// Package consensus implements different Ethereum consensus engines.
package consensus
import (
"math/big"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/rpc"
)
// ChainHeaderReader defines a small collection of methods needed to access the local
// blockchain during header verification.
type ChainHeaderReader interface {
// Config retrieves the blockchain's chain configuration.
Config() *params.ChainConfig
// CurrentHeader retrieves the current header from the local chain.
CurrentHeader() *types.Header
// GetHeader retrieves a block header from the database by hash and number.
GetHeader(hash common.Hash, number uint64) *types.Header
// GetHeaderByNumber retrieves a block header from the database by number.
GetHeaderByNumber(number uint64) *types.Header
// GetHeaderByHash retrieves a block header from the database by its hash.
GetHeaderByHash(hash common.Hash) *types.Header
// GetTd retrieves the total difficulty from the database by hash and number.
GetTd(hash common.Hash, number uint64) *big.Int
}
// ChainReader defines a small collection of methods needed to access the local
// blockchain during header and/or uncle verification.
type ChainReader interface {
ChainHeaderReader
// GetBlock retrieves a block from the database by hash and number.
GetBlock(hash common.Hash, number uint64) *types.Block
}
// Engine is an algorithm agnostic consensus engine.
type Engine interface {
// Author retrieves the Ethereum address of the account that minted the given
// block, which may be different from the header's coinbase if a consensus
// engine is based on signatures.
Author(header *types.Header) (common.Address, error)
// VerifyHeader checks whether a header conforms to the consensus rules of a
// given engine. Verifying the seal may be done optionally here, or explicitly
// via the VerifySeal method.
VerifyHeader(chain ChainHeaderReader, header *types.Header, seal bool) error
// VerifyHeaders is similar to VerifyHeader, but verifies a batch of headers
// concurrently. The method returns a quit channel to abort the operations and
// a results channel to retrieve the async verifications (the order is that of
// the input slice).
VerifyHeaders(chain ChainHeaderReader, headers []*types.Header, seals []bool) (chan<- struct{}, <-chan error)
// VerifyUncles verifies that the given block's uncles conform to the consensus
// rules of a given engine.
VerifyUncles(chain ChainReader, block *types.Block) error
// Prepare initializes the consensus fields of a block header according to the
// rules of a particular engine. The changes are executed inline.
Prepare(chain ChainHeaderReader, header *types.Header) error
// Finalize runs any post-transaction state modifications (e.g. block rewards)
// but does not assemble the block.
//
// Note: The block header and state database might be updated to reflect any
// consensus rules that happen at finalization (e.g. block rewards).
Finalize(chain ChainHeaderReader, header *types.Header, state *state.StateDB, txs []*types.Transaction,
uncles []*types.Header)
// FinalizeAndAssemble runs any post-transaction state modifications (e.g. block
// rewards) and assembles the final block.
//
// Note: The block header and state database might be updated to reflect any
// consensus rules that happen at finalization (e.g. block rewards).
FinalizeAndAssemble(chain ChainHeaderReader, header *types.Header, state *state.StateDB, txs []*types.Transaction,
uncles []*types.Header, receipts []*types.Receipt) (*types.Block, error)
// Seal generates a new sealing request for the given input block and pushes
// the result into the given channel.
//
// Note, the method returns immediately and will send the result async. More
// than one result may also be returned depending on the consensus algorithm.
Seal(chain ChainHeaderReader, block *types.Block, results chan<- *types.Block, stop <-chan struct{}) error
// SealHash returns the hash of a block prior to it being sealed.
SealHash(header *types.Header) common.Hash
// CalcDifficulty is the difficulty adjustment algorithm. It returns the difficulty
// that a new block should have.
CalcDifficulty(chain ChainHeaderReader, time uint64, parent *types.Header) *big.Int
// APIs returns the RPC APIs this consensus engine provides.
APIs(chain ChainHeaderReader) []rpc.API
// Close terminates any background threads maintained by the consensus engine.
Close() error
}
// PoW is a consensus engine based on proof-of-work.
type PoW interface {
Engine
// Hashrate returns the current mining hashrate of a PoW consensus engine.
Hashrate() float64
}

View File

@@ -0,0 +1,41 @@
// Copyright 2017 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package consensus
import "errors"
var (
// ErrUnknownAncestor is returned when validating a block requires an ancestor
// that is unknown.
ErrUnknownAncestor = errors.New("unknown ancestor")
// ErrPrunedAncestor is returned when validating a block requires an ancestor
// that is known, but the state of which is not available.
ErrPrunedAncestor = errors.New("pruned ancestor")
// ErrFutureBlock is returned when a block's timestamp is in the future according
// to the current node.
ErrFutureBlock = errors.New("block in the future")
// ErrInvalidNumber is returned if a block's number doesn't equal its parent's
// plus one.
ErrInvalidNumber = errors.New("invalid block number")
// ErrInvalidTerminalBlock is returned if a block is invalid wrt. the terminal
// total difficulty.
ErrInvalidTerminalBlock = errors.New("invalid terminal block")
)

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,113 @@
// Copyright 2018 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package ethash
import (
"errors"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/core/types"
)
var errEthashStopped = errors.New("ethash stopped")
// API exposes ethash related methods for the RPC interface.
type API struct {
ethash *Ethash
}
// GetWork returns a work package for external miner.
//
// The work package consists of 3 strings:
//
// result[0] - 32 bytes hex encoded current block header pow-hash
// result[1] - 32 bytes hex encoded seed hash used for DAG
// result[2] - 32 bytes hex encoded boundary condition ("target"), 2^256/difficulty
// result[3] - hex encoded block number
func (api *API) GetWork() ([4]string, error) {
if api.ethash.remote == nil {
return [4]string{}, errors.New("not supported")
}
var (
workCh = make(chan [4]string, 1)
errc = make(chan error, 1)
)
select {
case api.ethash.remote.fetchWorkCh <- &sealWork{errc: errc, res: workCh}:
case <-api.ethash.remote.exitCh:
return [4]string{}, errEthashStopped
}
select {
case work := <-workCh:
return work, nil
case err := <-errc:
return [4]string{}, err
}
}
// SubmitWork can be used by external miner to submit their POW solution.
// It returns an indication if the work was accepted.
// Note either an invalid solution, a stale work a non-existent work will return false.
func (api *API) SubmitWork(nonce types.BlockNonce, hash, digest common.Hash) bool {
if api.ethash.remote == nil {
return false
}
var errc = make(chan error, 1)
select {
case api.ethash.remote.submitWorkCh <- &mineResult{
nonce: nonce,
mixDigest: digest,
hash: hash,
errc: errc,
}:
case <-api.ethash.remote.exitCh:
return false
}
err := <-errc
return err == nil
}
// SubmitHashrate can be used for remote miners to submit their hash rate.
// This enables the node to report the combined hash rate of all miners
// which submit work through this node.
//
// It accepts the miner hash rate and an identifier which must be unique
// between nodes.
func (api *API) SubmitHashrate(rate hexutil.Uint64, id common.Hash) bool {
if api.ethash.remote == nil {
return false
}
var done = make(chan struct{}, 1)
select {
case api.ethash.remote.submitRateCh <- &hashrate{done: done, rate: uint64(rate), id: id}:
case <-api.ethash.remote.exitCh:
return false
}
// Block until hash rate submitted successfully.
<-done
return true
}
// GetHashrate returns the current hashrate for local CPU miner and remote miner.
func (api *API) GetHashrate() uint64 {
return uint64(api.ethash.Hashrate())
}

View File

@@ -0,0 +1,675 @@
// Copyright 2017 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package ethash
import (
"bytes"
"errors"
"fmt"
"math/big"
"runtime"
"time"
mapset "github.com/deckarep/golang-set"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/math"
"github.com/ethereum/go-ethereum/consensus"
"github.com/ethereum/go-ethereum/consensus/misc"
"github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/rlp"
"github.com/ethereum/go-ethereum/trie"
"golang.org/x/crypto/sha3"
)
// Ethash proof-of-work protocol constants.
var (
FrontierBlockReward = big.NewInt(5e+18) // Block reward in wei for successfully mining a block
ByzantiumBlockReward = big.NewInt(3e+18) // Block reward in wei for successfully mining a block upward from Byzantium
ConstantinopleBlockReward = big.NewInt(2e+18) // Block reward in wei for successfully mining a block upward from Constantinople
maxUncles = 2 // Maximum number of uncles allowed in a single block
allowedFutureBlockTimeSeconds = int64(15) // Max seconds from current time allowed for blocks, before they're considered future blocks
// calcDifficultyEip5133 is the difficulty adjustment algorithm as specified by EIP 5133.
// It offsets the bomb a total of 11.4M blocks.
// Specification EIP-5133: https://eips.ethereum.org/EIPS/eip-5133
calcDifficultyEip5133 = makeDifficultyCalculator(big.NewInt(11_400_000))
// calcDifficultyEip4345 is the difficulty adjustment algorithm as specified by EIP 4345.
// It offsets the bomb a total of 10.7M blocks.
// Specification EIP-4345: https://eips.ethereum.org/EIPS/eip-4345
calcDifficultyEip4345 = makeDifficultyCalculator(big.NewInt(10_700_000))
// calcDifficultyEip3554 is the difficulty adjustment algorithm as specified by EIP 3554.
// It offsets the bomb a total of 9.7M blocks.
// Specification EIP-3554: https://eips.ethereum.org/EIPS/eip-3554
calcDifficultyEip3554 = makeDifficultyCalculator(big.NewInt(9700000))
// calcDifficultyEip2384 is the difficulty adjustment algorithm as specified by EIP 2384.
// It offsets the bomb 4M blocks from Constantinople, so in total 9M blocks.
// Specification EIP-2384: https://eips.ethereum.org/EIPS/eip-2384
calcDifficultyEip2384 = makeDifficultyCalculator(big.NewInt(9000000))
// calcDifficultyConstantinople is the difficulty adjustment algorithm for Constantinople.
// It returns the difficulty that a new block should have when created at time given the
// parent block's time and difficulty. The calculation uses the Byzantium rules, but with
// bomb offset 5M.
// Specification EIP-1234: https://eips.ethereum.org/EIPS/eip-1234
calcDifficultyConstantinople = makeDifficultyCalculator(big.NewInt(5000000))
// calcDifficultyByzantium is the difficulty adjustment algorithm. It returns
// the difficulty that a new block should have when created at time given the
// parent block's time and difficulty. The calculation uses the Byzantium rules.
// Specification EIP-649: https://eips.ethereum.org/EIPS/eip-649
calcDifficultyByzantium = makeDifficultyCalculator(big.NewInt(3000000))
)
// Various error messages to mark blocks invalid. These should be private to
// prevent engine specific errors from being referenced in the remainder of the
// codebase, inherently breaking if the engine is swapped out. Please put common
// error types into the consensus package.
var (
errOlderBlockTime = errors.New("timestamp older than parent")
errTooManyUncles = errors.New("too many uncles")
errDuplicateUncle = errors.New("duplicate uncle")
errUncleIsAncestor = errors.New("uncle is ancestor")
errDanglingUncle = errors.New("uncle's parent is not ancestor")
errInvalidDifficulty = errors.New("non-positive difficulty")
errInvalidMixDigest = errors.New("invalid mix digest")
errInvalidPoW = errors.New("invalid proof-of-work")
)
// Author implements consensus.Engine, returning the header's coinbase as the
// proof-of-work verified author of the block.
func (ethash *Ethash) Author(header *types.Header) (common.Address, error) {
return header.Coinbase, nil
}
// VerifyHeader checks whether a header conforms to the consensus rules of the
// stock Ethereum ethash engine.
func (ethash *Ethash) VerifyHeader(chain consensus.ChainHeaderReader, header *types.Header, seal bool) error {
// If we're running a full engine faking, accept any input as valid
if ethash.config.PowMode == ModeFullFake {
return nil
}
// Short circuit if the header is known, or its parent not
number := header.Number.Uint64()
if chain.GetHeader(header.Hash(), number) != nil {
return nil
}
parent := chain.GetHeader(header.ParentHash, number-1)
if parent == nil {
return consensus.ErrUnknownAncestor
}
// Sanity checks passed, do a proper verification
return ethash.verifyHeader(chain, header, parent, false, seal, time.Now().Unix())
}
// VerifyHeaders is similar to VerifyHeader, but verifies a batch of headers
// concurrently. The method returns a quit channel to abort the operations and
// a results channel to retrieve the async verifications.
func (ethash *Ethash) VerifyHeaders(chain consensus.ChainHeaderReader, headers []*types.Header, seals []bool) (chan<- struct{}, <-chan error) {
// If we're running a full engine faking, accept any input as valid
if ethash.config.PowMode == ModeFullFake || len(headers) == 0 {
abort, results := make(chan struct{}), make(chan error, len(headers))
for i := 0; i < len(headers); i++ {
results <- nil
}
return abort, results
}
// Spawn as many workers as allowed threads
workers := runtime.GOMAXPROCS(0)
if len(headers) < workers {
workers = len(headers)
}
// Create a task channel and spawn the verifiers
var (
inputs = make(chan int)
done = make(chan int, workers)
errors = make([]error, len(headers))
abort = make(chan struct{})
unixNow = time.Now().Unix()
)
for i := 0; i < workers; i++ {
go func() {
for index := range inputs {
errors[index] = ethash.verifyHeaderWorker(chain, headers, seals, index, unixNow)
done <- index
}
}()
}
errorsOut := make(chan error, len(headers))
go func() {
defer close(inputs)
var (
in, out = 0, 0
checked = make([]bool, len(headers))
inputs = inputs
)
for {
select {
case inputs <- in:
if in++; in == len(headers) {
// Reached end of headers. Stop sending to workers.
inputs = nil
}
case index := <-done:
for checked[index] = true; checked[out]; out++ {
errorsOut <- errors[out]
if out == len(headers)-1 {
return
}
}
case <-abort:
return
}
}
}()
return abort, errorsOut
}
func (ethash *Ethash) verifyHeaderWorker(chain consensus.ChainHeaderReader, headers []*types.Header, seals []bool, index int, unixNow int64) error {
var parent *types.Header
if index == 0 {
parent = chain.GetHeader(headers[0].ParentHash, headers[0].Number.Uint64()-1)
} else if headers[index-1].Hash() == headers[index].ParentHash {
parent = headers[index-1]
}
if parent == nil {
return consensus.ErrUnknownAncestor
}
return ethash.verifyHeader(chain, headers[index], parent, false, seals[index], unixNow)
}
// VerifyUncles verifies that the given block's uncles conform to the consensus
// rules of the stock Ethereum ethash engine.
func (ethash *Ethash) VerifyUncles(chain consensus.ChainReader, block *types.Block) error {
// If we're running a full engine faking, accept any input as valid
if ethash.config.PowMode == ModeFullFake {
return nil
}
// Verify that there are at most 2 uncles included in this block
if len(block.Uncles()) > maxUncles {
return errTooManyUncles
}
if len(block.Uncles()) == 0 {
return nil
}
// Gather the set of past uncles and ancestors
uncles, ancestors := mapset.NewSet(), make(map[common.Hash]*types.Header)
number, parent := block.NumberU64()-1, block.ParentHash()
for i := 0; i < 7; i++ {
ancestorHeader := chain.GetHeader(parent, number)
if ancestorHeader == nil {
break
}
ancestors[parent] = ancestorHeader
// If the ancestor doesn't have any uncles, we don't have to iterate them
if ancestorHeader.UncleHash != types.EmptyUncleHash {
// Need to add those uncles to the banned list too
ancestor := chain.GetBlock(parent, number)
if ancestor == nil {
break
}
for _, uncle := range ancestor.Uncles() {
uncles.Add(uncle.Hash())
}
}
parent, number = ancestorHeader.ParentHash, number-1
}
ancestors[block.Hash()] = block.Header()
uncles.Add(block.Hash())
// Verify each of the uncles that it's recent, but not an ancestor
for _, uncle := range block.Uncles() {
// Make sure every uncle is rewarded only once
hash := uncle.Hash()
if uncles.Contains(hash) {
return errDuplicateUncle
}
uncles.Add(hash)
// Make sure the uncle has a valid ancestry
if ancestors[hash] != nil {
return errUncleIsAncestor
}
if ancestors[uncle.ParentHash] == nil || uncle.ParentHash == block.ParentHash() {
return errDanglingUncle
}
if err := ethash.verifyHeader(chain, uncle, ancestors[uncle.ParentHash], true, true, time.Now().Unix()); err != nil {
return err
}
}
return nil
}
// verifyHeader checks whether a header conforms to the consensus rules of the
// stock Ethereum ethash engine.
// See YP section 4.3.4. "Block Header Validity"
func (ethash *Ethash) verifyHeader(chain consensus.ChainHeaderReader, header, parent *types.Header, uncle bool, seal bool, unixNow int64) error {
// Ensure that the header's extra-data section is of a reasonable size
if uint64(len(header.Extra)) > params.MaximumExtraDataSize {
return fmt.Errorf("extra-data too long: %d > %d", len(header.Extra), params.MaximumExtraDataSize)
}
// Verify the header's timestamp
if !uncle {
if header.Time > uint64(unixNow+allowedFutureBlockTimeSeconds) {
return consensus.ErrFutureBlock
}
}
if header.Time <= parent.Time {
return errOlderBlockTime
}
// Verify the block's difficulty based on its timestamp and parent's difficulty
expected := ethash.CalcDifficulty(chain, header.Time, parent)
if expected.Cmp(header.Difficulty) != 0 {
return fmt.Errorf("invalid difficulty: have %v, want %v", header.Difficulty, expected)
}
// Verify that the gas limit is <= 2^63-1
if header.GasLimit > params.MaxGasLimit {
return fmt.Errorf("invalid gasLimit: have %v, max %v", header.GasLimit, params.MaxGasLimit)
}
// Verify that the gasUsed is <= gasLimit
if header.GasUsed > header.GasLimit {
return fmt.Errorf("invalid gasUsed: have %d, gasLimit %d", header.GasUsed, header.GasLimit)
}
// Verify the block's gas usage and (if applicable) verify the base fee.
if !chain.Config().IsLondon(header.Number) {
// Verify BaseFee not present before EIP-1559 fork.
if header.BaseFee != nil {
return fmt.Errorf("invalid baseFee before fork: have %d, expected 'nil'", header.BaseFee)
}
if err := misc.VerifyGaslimit(parent.GasLimit, header.GasLimit); err != nil {
return err
}
} else if err := misc.VerifyEip1559Header(chain.Config(), parent, header); err != nil {
// Verify the header's EIP-1559 attributes.
return err
}
// Verify that the block number is parent's +1
if diff := new(big.Int).Sub(header.Number, parent.Number); diff.Cmp(big.NewInt(1)) != 0 {
return consensus.ErrInvalidNumber
}
// Verify the engine specific seal securing the block
if seal {
if err := ethash.verifySeal(chain, header, false); err != nil {
return err
}
}
// If all checks passed, validate any special fields for hard forks
if err := misc.VerifyDAOHeaderExtraData(chain.Config(), header); err != nil {
return err
}
if err := misc.VerifyForkHashes(chain.Config(), header, uncle); err != nil {
return err
}
return nil
}
// CalcDifficulty is the difficulty adjustment algorithm. It returns
// the difficulty that a new block should have when created at time
// given the parent block's time and difficulty.
func (ethash *Ethash) CalcDifficulty(chain consensus.ChainHeaderReader, time uint64, parent *types.Header) *big.Int {
return CalcDifficulty(chain.Config(), time, parent)
}
// CalcDifficulty is the difficulty adjustment algorithm. It returns
// the difficulty that a new block should have when created at time
// given the parent block's time and difficulty.
func CalcDifficulty(config *params.ChainConfig, time uint64, parent *types.Header) *big.Int {
next := new(big.Int).Add(parent.Number, big1)
switch {
case config.IsGrayGlacier(next):
return calcDifficultyEip5133(time, parent)
case config.IsArrowGlacier(next):
return calcDifficultyEip4345(time, parent)
case config.IsLondon(next):
return calcDifficultyEip3554(time, parent)
case config.IsMuirGlacier(next):
return calcDifficultyEip2384(time, parent)
case config.IsConstantinople(next):
return calcDifficultyConstantinople(time, parent)
case config.IsByzantium(next):
return calcDifficultyByzantium(time, parent)
case config.IsHomestead(next):
return calcDifficultyHomestead(time, parent)
default:
return calcDifficultyFrontier(time, parent)
}
}
// Some weird constants to avoid constant memory allocs for them.
var (
expDiffPeriod = big.NewInt(100000)
big1 = big.NewInt(1)
big2 = big.NewInt(2)
big9 = big.NewInt(9)
big10 = big.NewInt(10)
bigMinus99 = big.NewInt(-99)
)
// makeDifficultyCalculator creates a difficultyCalculator with the given bomb-delay.
// the difficulty is calculated with Byzantium rules, which differs from Homestead in
// how uncles affect the calculation
func makeDifficultyCalculator(bombDelay *big.Int) func(time uint64, parent *types.Header) *big.Int {
// Note, the calculations below looks at the parent number, which is 1 below
// the block number. Thus we remove one from the delay given
bombDelayFromParent := new(big.Int).Sub(bombDelay, big1)
return func(time uint64, parent *types.Header) *big.Int {
// https://github.com/ethereum/EIPs/issues/100.
// algorithm:
// diff = (parent_diff +
// (parent_diff / 2048 * max((2 if len(parent.uncles) else 1) - ((timestamp - parent.timestamp) // 9), -99))
// ) + 2^(periodCount - 2)
bigTime := new(big.Int).SetUint64(time)
bigParentTime := new(big.Int).SetUint64(parent.Time)
// holds intermediate values to make the algo easier to read & audit
x := new(big.Int)
y := new(big.Int)
// (2 if len(parent_uncles) else 1) - (block_timestamp - parent_timestamp) // 9
x.Sub(bigTime, bigParentTime)
x.Div(x, big9)
if parent.UncleHash == types.EmptyUncleHash {
x.Sub(big1, x)
} else {
x.Sub(big2, x)
}
// max((2 if len(parent_uncles) else 1) - (block_timestamp - parent_timestamp) // 9, -99)
if x.Cmp(bigMinus99) < 0 {
x.Set(bigMinus99)
}
// parent_diff + (parent_diff / 2048 * max((2 if len(parent.uncles) else 1) - ((timestamp - parent.timestamp) // 9), -99))
y.Div(parent.Difficulty, params.DifficultyBoundDivisor)
x.Mul(y, x)
x.Add(parent.Difficulty, x)
// minimum difficulty can ever be (before exponential factor)
if x.Cmp(params.MinimumDifficulty) < 0 {
x.Set(params.MinimumDifficulty)
}
// calculate a fake block number for the ice-age delay
// Specification: https://eips.ethereum.org/EIPS/eip-1234
fakeBlockNumber := new(big.Int)
if parent.Number.Cmp(bombDelayFromParent) >= 0 {
fakeBlockNumber = fakeBlockNumber.Sub(parent.Number, bombDelayFromParent)
}
// for the exponential factor
periodCount := fakeBlockNumber
periodCount.Div(periodCount, expDiffPeriod)
// the exponential factor, commonly referred to as "the bomb"
// diff = diff + 2^(periodCount - 2)
if periodCount.Cmp(big1) > 0 {
y.Sub(periodCount, big2)
y.Exp(big2, y, nil)
x.Add(x, y)
}
return x
}
}
// calcDifficultyHomestead is the difficulty adjustment algorithm. It returns
// the difficulty that a new block should have when created at time given the
// parent block's time and difficulty. The calculation uses the Homestead rules.
func calcDifficultyHomestead(time uint64, parent *types.Header) *big.Int {
// https://github.com/ethereum/EIPs/blob/master/EIPS/eip-2.md
// algorithm:
// diff = (parent_diff +
// (parent_diff / 2048 * max(1 - (block_timestamp - parent_timestamp) // 10, -99))
// ) + 2^(periodCount - 2)
bigTime := new(big.Int).SetUint64(time)
bigParentTime := new(big.Int).SetUint64(parent.Time)
// holds intermediate values to make the algo easier to read & audit
x := new(big.Int)
y := new(big.Int)
// 1 - (block_timestamp - parent_timestamp) // 10
x.Sub(bigTime, bigParentTime)
x.Div(x, big10)
x.Sub(big1, x)
// max(1 - (block_timestamp - parent_timestamp) // 10, -99)
if x.Cmp(bigMinus99) < 0 {
x.Set(bigMinus99)
}
// (parent_diff + parent_diff // 2048 * max(1 - (block_timestamp - parent_timestamp) // 10, -99))
y.Div(parent.Difficulty, params.DifficultyBoundDivisor)
x.Mul(y, x)
x.Add(parent.Difficulty, x)
// minimum difficulty can ever be (before exponential factor)
if x.Cmp(params.MinimumDifficulty) < 0 {
x.Set(params.MinimumDifficulty)
}
// for the exponential factor
periodCount := new(big.Int).Add(parent.Number, big1)
periodCount.Div(periodCount, expDiffPeriod)
// the exponential factor, commonly referred to as "the bomb"
// diff = diff + 2^(periodCount - 2)
if periodCount.Cmp(big1) > 0 {
y.Sub(periodCount, big2)
y.Exp(big2, y, nil)
x.Add(x, y)
}
return x
}
// calcDifficultyFrontier is the difficulty adjustment algorithm. It returns the
// difficulty that a new block should have when created at time given the parent
// block's time and difficulty. The calculation uses the Frontier rules.
func calcDifficultyFrontier(time uint64, parent *types.Header) *big.Int {
diff := new(big.Int)
adjust := new(big.Int).Div(parent.Difficulty, params.DifficultyBoundDivisor)
bigTime := new(big.Int)
bigParentTime := new(big.Int)
bigTime.SetUint64(time)
bigParentTime.SetUint64(parent.Time)
if bigTime.Sub(bigTime, bigParentTime).Cmp(params.DurationLimit) < 0 {
diff.Add(parent.Difficulty, adjust)
} else {
diff.Sub(parent.Difficulty, adjust)
}
if diff.Cmp(params.MinimumDifficulty) < 0 {
diff.Set(params.MinimumDifficulty)
}
periodCount := new(big.Int).Add(parent.Number, big1)
periodCount.Div(periodCount, expDiffPeriod)
if periodCount.Cmp(big1) > 0 {
// diff = diff + 2^(periodCount - 2)
expDiff := periodCount.Sub(periodCount, big2)
expDiff.Exp(big2, expDiff, nil)
diff.Add(diff, expDiff)
diff = math.BigMax(diff, params.MinimumDifficulty)
}
return diff
}
// Exported for fuzzing
var FrontierDifficultyCalculator = calcDifficultyFrontier
var HomesteadDifficultyCalculator = calcDifficultyHomestead
var DynamicDifficultyCalculator = makeDifficultyCalculator
// verifySeal checks whether a block satisfies the PoW difficulty requirements,
// either using the usual ethash cache for it, or alternatively using a full DAG
// to make remote mining fast.
func (ethash *Ethash) verifySeal(chain consensus.ChainHeaderReader, header *types.Header, fulldag bool) error {
// If we're running a fake PoW, accept any seal as valid
if ethash.config.PowMode == ModeFake || ethash.config.PowMode == ModeFullFake {
time.Sleep(ethash.fakeDelay)
if ethash.fakeFail == header.Number.Uint64() {
return errInvalidPoW
}
return nil
}
// If we're running a shared PoW, delegate verification to it
if ethash.shared != nil {
return ethash.shared.verifySeal(chain, header, fulldag)
}
// Ensure that we have a valid difficulty for the block
if header.Difficulty.Sign() <= 0 {
return errInvalidDifficulty
}
// Recompute the digest and PoW values
number := header.Number.Uint64()
var (
digest []byte
result []byte
)
// If fast-but-heavy PoW verification was requested, use an ethash dataset
if fulldag {
dataset := ethash.dataset(number, true)
if dataset.generated() {
digest, result = hashimotoFull(dataset.dataset, ethash.SealHash(header).Bytes(), header.Nonce.Uint64())
// Datasets are unmapped in a finalizer. Ensure that the dataset stays alive
// until after the call to hashimotoFull so it's not unmapped while being used.
runtime.KeepAlive(dataset)
} else {
// Dataset not yet generated, don't hang, use a cache instead
fulldag = false
}
}
// If slow-but-light PoW verification was requested (or DAG not yet ready), use an ethash cache
if !fulldag {
cache := ethash.cache(number)
size := datasetSize(number)
if ethash.config.PowMode == ModeTest {
size = 32 * 1024
}
digest, result = hashimotoLight(size, cache.cache, ethash.SealHash(header).Bytes(), header.Nonce.Uint64())
// Caches are unmapped in a finalizer. Ensure that the cache stays alive
// until after the call to hashimotoLight so it's not unmapped while being used.
runtime.KeepAlive(cache)
}
// Verify the calculated values against the ones provided in the header
if !bytes.Equal(header.MixDigest[:], digest) {
return errInvalidMixDigest
}
target := new(big.Int).Div(two256, header.Difficulty)
if new(big.Int).SetBytes(result).Cmp(target) > 0 {
return errInvalidPoW
}
return nil
}
// Prepare implements consensus.Engine, initializing the difficulty field of a
// header to conform to the ethash protocol. The changes are done inline.
func (ethash *Ethash) Prepare(chain consensus.ChainHeaderReader, header *types.Header) error {
parent := chain.GetHeader(header.ParentHash, header.Number.Uint64()-1)
if parent == nil {
return consensus.ErrUnknownAncestor
}
header.Difficulty = ethash.CalcDifficulty(chain, header.Time, parent)
return nil
}
// Finalize implements consensus.Engine, accumulating the block and uncle rewards,
// setting the final state on the header
func (ethash *Ethash) Finalize(chain consensus.ChainHeaderReader, header *types.Header, state *state.StateDB, txs []*types.Transaction, uncles []*types.Header) {
// Accumulate any block and uncle rewards and commit the final state root
accumulateRewards(chain.Config(), state, header, uncles)
header.Root = state.IntermediateRoot(chain.Config().IsEIP158(header.Number))
}
// FinalizeAndAssemble implements consensus.Engine, accumulating the block and
// uncle rewards, setting the final state and assembling the block.
func (ethash *Ethash) FinalizeAndAssemble(chain consensus.ChainHeaderReader, header *types.Header, state *state.StateDB, txs []*types.Transaction, uncles []*types.Header, receipts []*types.Receipt) (*types.Block, error) {
// Finalize block
ethash.Finalize(chain, header, state, txs, uncles)
// Header seems complete, assemble into a block and return
return types.NewBlock(header, txs, uncles, receipts, trie.NewStackTrie(nil)), nil
}
// SealHash returns the hash of a block prior to it being sealed.
func (ethash *Ethash) SealHash(header *types.Header) (hash common.Hash) {
hasher := sha3.NewLegacyKeccak256()
enc := []interface{}{
header.ParentHash,
header.UncleHash,
header.Coinbase,
header.Root,
header.TxHash,
header.ReceiptHash,
header.Bloom,
header.Difficulty,
header.Number,
header.GasLimit,
header.GasUsed,
header.Time,
header.Extra,
}
if header.BaseFee != nil {
enc = append(enc, header.BaseFee)
}
rlp.Encode(hasher, enc)
hasher.Sum(hash[:0])
return hash
}
// Some weird constants to avoid constant memory allocs for them.
var (
big8 = big.NewInt(8)
big32 = big.NewInt(32)
)
// AccumulateRewards credits the coinbase of the given block with the mining
// reward. The total reward consists of the static block reward and rewards for
// included uncles. The coinbase of each uncle block is also rewarded.
func accumulateRewards(config *params.ChainConfig, state *state.StateDB, header *types.Header, uncles []*types.Header) {
// Select the correct block reward based on chain progression
blockReward := FrontierBlockReward
if config.IsByzantium(header.Number) {
blockReward = ByzantiumBlockReward
}
if config.IsConstantinople(header.Number) {
blockReward = ConstantinopleBlockReward
}
// Accumulate the rewards for the miner and any included uncles
reward := new(big.Int).Set(blockReward)
r := new(big.Int)
for _, uncle := range uncles {
r.Add(uncle.Number, big8)
r.Sub(r, header.Number)
r.Mul(r, blockReward)
r.Div(r, big8)
state.AddBalance(uncle.Coinbase, r)
r.Div(blockReward, big32)
reward.Add(reward, r)
}
state.AddBalance(header.Coinbase, reward)
}

View File

@@ -0,0 +1,191 @@
// Copyright 2020 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package ethash
import (
"math/big"
"github.com/ethereum/go-ethereum/core/types"
"github.com/holiman/uint256"
)
const (
// frontierDurationLimit is for Frontier:
// The decision boundary on the blocktime duration used to determine
// whether difficulty should go up or down.
frontierDurationLimit = 13
// minimumDifficulty The minimum that the difficulty may ever be.
minimumDifficulty = 131072
// expDiffPeriod is the exponential difficulty period
expDiffPeriodUint = 100000
// difficultyBoundDivisorBitShift is the bound divisor of the difficulty (2048),
// This constant is the right-shifts to use for the division.
difficultyBoundDivisor = 11
)
// CalcDifficultyFrontierU256 is the difficulty adjustment algorithm. It returns the
// difficulty that a new block should have when created at time given the parent
// block's time and difficulty. The calculation uses the Frontier rules.
func CalcDifficultyFrontierU256(time uint64, parent *types.Header) *big.Int {
/*
Algorithm
block_diff = pdiff + pdiff / 2048 * (1 if time - ptime < 13 else -1) + int(2^((num // 100000) - 2))
Where:
- pdiff = parent.difficulty
- ptime = parent.time
- time = block.timestamp
- num = block.number
*/
pDiff, _ := uint256.FromBig(parent.Difficulty) // pDiff: pdiff
adjust := pDiff.Clone()
adjust.Rsh(adjust, difficultyBoundDivisor) // adjust: pDiff / 2048
if time-parent.Time < frontierDurationLimit {
pDiff.Add(pDiff, adjust)
} else {
pDiff.Sub(pDiff, adjust)
}
if pDiff.LtUint64(minimumDifficulty) {
pDiff.SetUint64(minimumDifficulty)
}
// 'pdiff' now contains:
// pdiff + pdiff / 2048 * (1 if time - ptime < 13 else -1)
if periodCount := (parent.Number.Uint64() + 1) / expDiffPeriodUint; periodCount > 1 {
// diff = diff + 2^(periodCount - 2)
expDiff := adjust.SetOne()
expDiff.Lsh(expDiff, uint(periodCount-2)) // expdiff: 2 ^ (periodCount -2)
pDiff.Add(pDiff, expDiff)
}
return pDiff.ToBig()
}
// CalcDifficultyHomesteadU256 is the difficulty adjustment algorithm. It returns
// the difficulty that a new block should have when created at time given the
// parent block's time and difficulty. The calculation uses the Homestead rules.
func CalcDifficultyHomesteadU256(time uint64, parent *types.Header) *big.Int {
/*
https://github.com/ethereum/EIPs/blob/master/EIPS/eip-2.md
Algorithm:
block_diff = pdiff + pdiff / 2048 * max(1 - (time - ptime) / 10, -99) + 2 ^ int((num / 100000) - 2))
Our modification, to use unsigned ints:
block_diff = pdiff - pdiff / 2048 * max((time - ptime) / 10 - 1, 99) + 2 ^ int((num / 100000) - 2))
Where:
- pdiff = parent.difficulty
- ptime = parent.time
- time = block.timestamp
- num = block.number
*/
pDiff, _ := uint256.FromBig(parent.Difficulty) // pDiff: pdiff
adjust := pDiff.Clone()
adjust.Rsh(adjust, difficultyBoundDivisor) // adjust: pDiff / 2048
x := (time - parent.Time) / 10 // (time - ptime) / 10)
var neg = true
if x == 0 {
x = 1
neg = false
} else if x >= 100 {
x = 99
} else {
x = x - 1
}
z := new(uint256.Int).SetUint64(x)
adjust.Mul(adjust, z) // adjust: (pdiff / 2048) * max((time - ptime) / 10 - 1, 99)
if neg {
pDiff.Sub(pDiff, adjust) // pdiff - pdiff / 2048 * max((time - ptime) / 10 - 1, 99)
} else {
pDiff.Add(pDiff, adjust) // pdiff + pdiff / 2048 * max((time - ptime) / 10 - 1, 99)
}
if pDiff.LtUint64(minimumDifficulty) {
pDiff.SetUint64(minimumDifficulty)
}
// for the exponential factor, a.k.a "the bomb"
// diff = diff + 2^(periodCount - 2)
if periodCount := (1 + parent.Number.Uint64()) / expDiffPeriodUint; periodCount > 1 {
expFactor := adjust.Lsh(adjust.SetOne(), uint(periodCount-2))
pDiff.Add(pDiff, expFactor)
}
return pDiff.ToBig()
}
// MakeDifficultyCalculatorU256 creates a difficultyCalculator with the given bomb-delay.
// the difficulty is calculated with Byzantium rules, which differs from Homestead in
// how uncles affect the calculation
func MakeDifficultyCalculatorU256(bombDelay *big.Int) func(time uint64, parent *types.Header) *big.Int {
// Note, the calculations below looks at the parent number, which is 1 below
// the block number. Thus we remove one from the delay given
bombDelayFromParent := bombDelay.Uint64() - 1
return func(time uint64, parent *types.Header) *big.Int {
/*
https://github.com/ethereum/EIPs/issues/100
pDiff = parent.difficulty
BLOCK_DIFF_FACTOR = 9
a = pDiff + (pDiff // BLOCK_DIFF_FACTOR) * adj_factor
b = min(parent.difficulty, MIN_DIFF)
child_diff = max(a,b )
*/
x := (time - parent.Time) / 9 // (block_timestamp - parent_timestamp) // 9
c := uint64(1) // if parent.unclehash == emptyUncleHashHash
if parent.UncleHash != types.EmptyUncleHash {
c = 2
}
xNeg := x >= c
if xNeg {
// x is now _negative_ adjustment factor
x = x - c // - ( (t-p)/p -( 2 or 1) )
} else {
x = c - x // (2 or 1) - (t-p)/9
}
if x > 99 {
x = 99 // max(x, 99)
}
// parent_diff + (parent_diff / 2048 * max((2 if len(parent.uncles) else 1) - ((timestamp - parent.timestamp) // 9), -99))
y := new(uint256.Int)
y.SetFromBig(parent.Difficulty) // y: p_diff
pDiff := y.Clone() // pdiff: p_diff
z := new(uint256.Int).SetUint64(x) //z : +-adj_factor (either pos or negative)
y.Rsh(y, difficultyBoundDivisor) // y: p__diff / 2048
z.Mul(y, z) // z: (p_diff / 2048 ) * (+- adj_factor)
if xNeg {
y.Sub(pDiff, z) // y: parent_diff + parent_diff/2048 * adjustment_factor
} else {
y.Add(pDiff, z) // y: parent_diff + parent_diff/2048 * adjustment_factor
}
// minimum difficulty can ever be (before exponential factor)
if y.LtUint64(minimumDifficulty) {
y.SetUint64(minimumDifficulty)
}
// calculate a fake block number for the ice-age delay
// Specification: https://eips.ethereum.org/EIPS/eip-1234
var pNum = parent.Number.Uint64()
if pNum >= bombDelayFromParent {
if fakeBlockNumber := pNum - bombDelayFromParent; fakeBlockNumber >= 2*expDiffPeriodUint {
z.SetOne()
z.Lsh(z, uint(fakeBlockNumber/expDiffPeriodUint-2))
y.Add(z, y)
}
}
return y.ToBig()
}
}

View File

@@ -0,0 +1,697 @@
// Copyright 2017 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
// Package ethash implements the ethash proof-of-work consensus engine.
package ethash
import (
"errors"
"fmt"
"math"
"math/big"
"math/rand"
"os"
"path/filepath"
"reflect"
"runtime"
"strconv"
"sync"
"sync/atomic"
"time"
"unsafe"
"github.com/edsrzf/mmap-go"
"github.com/ethereum/go-ethereum/consensus"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/metrics"
"github.com/ethereum/go-ethereum/rpc"
"github.com/hashicorp/golang-lru/simplelru"
)
var ErrInvalidDumpMagic = errors.New("invalid dump magic")
var (
// two256 is a big integer representing 2^256
two256 = new(big.Int).Exp(big.NewInt(2), big.NewInt(256), big.NewInt(0))
// sharedEthash is a full instance that can be shared between multiple users.
sharedEthash *Ethash
// algorithmRevision is the data structure version used for file naming.
algorithmRevision = 23
// dumpMagic is a dataset dump header to sanity check a data dump.
dumpMagic = []uint32{0xbaddcafe, 0xfee1dead}
)
func init() {
sharedConfig := Config{
PowMode: ModeNormal,
CachesInMem: 3,
DatasetsInMem: 1,
}
sharedEthash = New(sharedConfig, nil, false)
}
// isLittleEndian returns whether the local system is running in little or big
// endian byte order.
func isLittleEndian() bool {
n := uint32(0x01020304)
return *(*byte)(unsafe.Pointer(&n)) == 0x04
}
// memoryMap tries to memory map a file of uint32s for read only access.
func memoryMap(path string, lock bool) (*os.File, mmap.MMap, []uint32, error) {
file, err := os.OpenFile(path, os.O_RDONLY, 0644)
if err != nil {
return nil, nil, nil, err
}
mem, buffer, err := memoryMapFile(file, false)
if err != nil {
file.Close()
return nil, nil, nil, err
}
for i, magic := range dumpMagic {
if buffer[i] != magic {
mem.Unmap()
file.Close()
return nil, nil, nil, ErrInvalidDumpMagic
}
}
if lock {
if err := mem.Lock(); err != nil {
mem.Unmap()
file.Close()
return nil, nil, nil, err
}
}
return file, mem, buffer[len(dumpMagic):], err
}
// memoryMapFile tries to memory map an already opened file descriptor.
func memoryMapFile(file *os.File, write bool) (mmap.MMap, []uint32, error) {
// Try to memory map the file
flag := mmap.RDONLY
if write {
flag = mmap.RDWR
}
mem, err := mmap.Map(file, flag, 0)
if err != nil {
return nil, nil, err
}
// The file is now memory-mapped. Create a []uint32 view of the file.
var view []uint32
header := (*reflect.SliceHeader)(unsafe.Pointer(&view))
header.Data = (*reflect.SliceHeader)(unsafe.Pointer(&mem)).Data
header.Cap = len(mem) / 4
header.Len = header.Cap
return mem, view, nil
}
// memoryMapAndGenerate tries to memory map a temporary file of uint32s for write
// access, fill it with the data from a generator and then move it into the final
// path requested.
func memoryMapAndGenerate(path string, size uint64, lock bool, generator func(buffer []uint32)) (*os.File, mmap.MMap, []uint32, error) {
// Ensure the data folder exists
if err := os.MkdirAll(filepath.Dir(path), 0755); err != nil {
return nil, nil, nil, err
}
// Create a huge temporary empty file to fill with data
temp := path + "." + strconv.Itoa(rand.Int())
dump, err := os.Create(temp)
if err != nil {
return nil, nil, nil, err
}
if err = ensureSize(dump, int64(len(dumpMagic))*4+int64(size)); err != nil {
dump.Close()
os.Remove(temp)
return nil, nil, nil, err
}
// Memory map the file for writing and fill it with the generator
mem, buffer, err := memoryMapFile(dump, true)
if err != nil {
dump.Close()
os.Remove(temp)
return nil, nil, nil, err
}
copy(buffer, dumpMagic)
data := buffer[len(dumpMagic):]
generator(data)
if err := mem.Unmap(); err != nil {
return nil, nil, nil, err
}
if err := dump.Close(); err != nil {
return nil, nil, nil, err
}
if err := os.Rename(temp, path); err != nil {
return nil, nil, nil, err
}
return memoryMap(path, lock)
}
// lru tracks caches or datasets by their last use time, keeping at most N of them.
type lru struct {
what string
new func(epoch uint64) interface{}
mu sync.Mutex
// Items are kept in a LRU cache, but there is a special case:
// We always keep an item for (highest seen epoch) + 1 as the 'future item'.
cache *simplelru.LRU
future uint64
futureItem interface{}
}
// newlru create a new least-recently-used cache for either the verification caches
// or the mining datasets.
func newlru(what string, maxItems int, new func(epoch uint64) interface{}) *lru {
if maxItems <= 0 {
maxItems = 1
}
cache, _ := simplelru.NewLRU(maxItems, func(key, value interface{}) {
log.Trace("Evicted ethash "+what, "epoch", key)
})
return &lru{what: what, new: new, cache: cache}
}
// get retrieves or creates an item for the given epoch. The first return value is always
// non-nil. The second return value is non-nil if lru thinks that an item will be useful in
// the near future.
func (lru *lru) get(epoch uint64) (item, future interface{}) {
lru.mu.Lock()
defer lru.mu.Unlock()
// Get or create the item for the requested epoch.
item, ok := lru.cache.Get(epoch)
if !ok {
if lru.future > 0 && lru.future == epoch {
item = lru.futureItem
} else {
log.Trace("Requiring new ethash "+lru.what, "epoch", epoch)
item = lru.new(epoch)
}
lru.cache.Add(epoch, item)
}
// Update the 'future item' if epoch is larger than previously seen.
if epoch < maxEpoch-1 && lru.future < epoch+1 {
log.Trace("Requiring new future ethash "+lru.what, "epoch", epoch+1)
future = lru.new(epoch + 1)
lru.future = epoch + 1
lru.futureItem = future
}
return item, future
}
// cache wraps an ethash cache with some metadata to allow easier concurrent use.
type cache struct {
epoch uint64 // Epoch for which this cache is relevant
dump *os.File // File descriptor of the memory mapped cache
mmap mmap.MMap // Memory map itself to unmap before releasing
cache []uint32 // The actual cache data content (may be memory mapped)
once sync.Once // Ensures the cache is generated only once
}
// newCache creates a new ethash verification cache and returns it as a plain Go
// interface to be usable in an LRU cache.
func newCache(epoch uint64) interface{} {
return &cache{epoch: epoch}
}
// generate ensures that the cache content is generated before use.
func (c *cache) generate(dir string, limit int, lock bool, test bool) {
c.once.Do(func() {
size := cacheSize(c.epoch*epochLength + 1)
seed := seedHash(c.epoch*epochLength + 1)
if test {
size = 1024
}
// If we don't store anything on disk, generate and return.
if dir == "" {
c.cache = make([]uint32, size/4)
generateCache(c.cache, c.epoch, seed)
return
}
// Disk storage is needed, this will get fancy
var endian string
if !isLittleEndian() {
endian = ".be"
}
path := filepath.Join(dir, fmt.Sprintf("cache-R%d-%x%s", algorithmRevision, seed[:8], endian))
logger := log.New("epoch", c.epoch)
// We're about to mmap the file, ensure that the mapping is cleaned up when the
// cache becomes unused.
runtime.SetFinalizer(c, (*cache).finalizer)
// Try to load the file from disk and memory map it
var err error
c.dump, c.mmap, c.cache, err = memoryMap(path, lock)
if err == nil {
logger.Debug("Loaded old ethash cache from disk")
return
}
logger.Debug("Failed to load old ethash cache", "err", err)
// No previous cache available, create a new cache file to fill
c.dump, c.mmap, c.cache, err = memoryMapAndGenerate(path, size, lock, func(buffer []uint32) { generateCache(buffer, c.epoch, seed) })
if err != nil {
logger.Error("Failed to generate mapped ethash cache", "err", err)
c.cache = make([]uint32, size/4)
generateCache(c.cache, c.epoch, seed)
}
// Iterate over all previous instances and delete old ones
for ep := int(c.epoch) - limit; ep >= 0; ep-- {
seed := seedHash(uint64(ep)*epochLength + 1)
path := filepath.Join(dir, fmt.Sprintf("cache-R%d-%x%s*", algorithmRevision, seed[:8], endian))
files, _ := filepath.Glob(path) // find also the temp files that are generated.
for _, file := range files {
os.Remove(file)
}
}
})
}
// finalizer unmaps the memory and closes the file.
func (c *cache) finalizer() {
if c.mmap != nil {
c.mmap.Unmap()
c.dump.Close()
c.mmap, c.dump = nil, nil
}
}
// dataset wraps an ethash dataset with some metadata to allow easier concurrent use.
type dataset struct {
epoch uint64 // Epoch for which this cache is relevant
dump *os.File // File descriptor of the memory mapped cache
mmap mmap.MMap // Memory map itself to unmap before releasing
dataset []uint32 // The actual cache data content
once sync.Once // Ensures the cache is generated only once
done uint32 // Atomic flag to determine generation status
}
// newDataset creates a new ethash mining dataset and returns it as a plain Go
// interface to be usable in an LRU cache.
func newDataset(epoch uint64) interface{} {
return &dataset{epoch: epoch}
}
// generate ensures that the dataset content is generated before use.
func (d *dataset) generate(dir string, limit int, lock bool, test bool) {
d.once.Do(func() {
// Mark the dataset generated after we're done. This is needed for remote
defer atomic.StoreUint32(&d.done, 1)
csize := cacheSize(d.epoch*epochLength + 1)
dsize := datasetSize(d.epoch*epochLength + 1)
seed := seedHash(d.epoch*epochLength + 1)
if test {
csize = 1024
dsize = 32 * 1024
}
// If we don't store anything on disk, generate and return
if dir == "" {
cache := make([]uint32, csize/4)
generateCache(cache, d.epoch, seed)
d.dataset = make([]uint32, dsize/4)
generateDataset(d.dataset, d.epoch, cache)
return
}
// Disk storage is needed, this will get fancy
var endian string
if !isLittleEndian() {
endian = ".be"
}
path := filepath.Join(dir, fmt.Sprintf("full-R%d-%x%s", algorithmRevision, seed[:8], endian))
logger := log.New("epoch", d.epoch)
// We're about to mmap the file, ensure that the mapping is cleaned up when the
// cache becomes unused.
runtime.SetFinalizer(d, (*dataset).finalizer)
// Try to load the file from disk and memory map it
var err error
d.dump, d.mmap, d.dataset, err = memoryMap(path, lock)
if err == nil {
logger.Debug("Loaded old ethash dataset from disk")
return
}
logger.Debug("Failed to load old ethash dataset", "err", err)
// No previous dataset available, create a new dataset file to fill
cache := make([]uint32, csize/4)
generateCache(cache, d.epoch, seed)
d.dump, d.mmap, d.dataset, err = memoryMapAndGenerate(path, dsize, lock, func(buffer []uint32) { generateDataset(buffer, d.epoch, cache) })
if err != nil {
logger.Error("Failed to generate mapped ethash dataset", "err", err)
d.dataset = make([]uint32, dsize/4)
generateDataset(d.dataset, d.epoch, cache)
}
// Iterate over all previous instances and delete old ones
for ep := int(d.epoch) - limit; ep >= 0; ep-- {
seed := seedHash(uint64(ep)*epochLength + 1)
path := filepath.Join(dir, fmt.Sprintf("full-R%d-%x%s", algorithmRevision, seed[:8], endian))
os.Remove(path)
}
})
}
// generated returns whether this particular dataset finished generating already
// or not (it may not have been started at all). This is useful for remote miners
// to default to verification caches instead of blocking on DAG generations.
func (d *dataset) generated() bool {
return atomic.LoadUint32(&d.done) == 1
}
// finalizer closes any file handlers and memory maps open.
func (d *dataset) finalizer() {
if d.mmap != nil {
d.mmap.Unmap()
d.dump.Close()
d.mmap, d.dump = nil, nil
}
}
// MakeCache generates a new ethash cache and optionally stores it to disk.
func MakeCache(block uint64, dir string) {
c := cache{epoch: block / epochLength}
c.generate(dir, math.MaxInt32, false, false)
}
// MakeDataset generates a new ethash dataset and optionally stores it to disk.
func MakeDataset(block uint64, dir string) {
d := dataset{epoch: block / epochLength}
d.generate(dir, math.MaxInt32, false, false)
}
// Mode defines the type and amount of PoW verification an ethash engine makes.
type Mode uint
const (
ModeNormal Mode = iota
ModeShared
ModeTest
ModeFake
ModeFullFake
)
// Config are the configuration parameters of the ethash.
type Config struct {
CacheDir string
CachesInMem int
CachesOnDisk int
CachesLockMmap bool
DatasetDir string
DatasetsInMem int
DatasetsOnDisk int
DatasetsLockMmap bool
PowMode Mode
// When set, notifications sent by the remote sealer will
// be block header JSON objects instead of work package arrays.
NotifyFull bool
Log log.Logger `toml:"-"`
}
// Ethash is a consensus engine based on proof-of-work implementing the ethash
// algorithm.
type Ethash struct {
config Config
caches *lru // In memory caches to avoid regenerating too often
datasets *lru // In memory datasets to avoid regenerating too often
// Mining related fields
rand *rand.Rand // Properly seeded random source for nonces
threads int // Number of threads to mine on if mining
update chan struct{} // Notification channel to update mining parameters
hashrate metrics.Meter // Meter tracking the average hashrate
remote *remoteSealer
// The fields below are hooks for testing
shared *Ethash // Shared PoW verifier to avoid cache regeneration
fakeFail uint64 // Block number which fails PoW check even in fake mode
fakeDelay time.Duration // Time delay to sleep for before returning from verify
lock sync.Mutex // Ensures thread safety for the in-memory caches and mining fields
closeOnce sync.Once // Ensures exit channel will not be closed twice.
}
// New creates a full sized ethash PoW scheme and starts a background thread for
// remote mining, also optionally notifying a batch of remote services of new work
// packages.
func New(config Config, notify []string, noverify bool) *Ethash {
if config.Log == nil {
config.Log = log.Root()
}
if config.CachesInMem <= 0 {
config.Log.Warn("One ethash cache must always be in memory", "requested", config.CachesInMem)
config.CachesInMem = 1
}
if config.CacheDir != "" && config.CachesOnDisk > 0 {
config.Log.Info("Disk storage enabled for ethash caches", "dir", config.CacheDir, "count", config.CachesOnDisk)
}
if config.DatasetDir != "" && config.DatasetsOnDisk > 0 {
config.Log.Info("Disk storage enabled for ethash DAGs", "dir", config.DatasetDir, "count", config.DatasetsOnDisk)
}
ethash := &Ethash{
config: config,
caches: newlru("cache", config.CachesInMem, newCache),
datasets: newlru("dataset", config.DatasetsInMem, newDataset),
update: make(chan struct{}),
hashrate: metrics.NewMeterForced(),
}
if config.PowMode == ModeShared {
ethash.shared = sharedEthash
}
ethash.remote = startRemoteSealer(ethash, notify, noverify)
return ethash
}
// NewTester creates a small sized ethash PoW scheme useful only for testing
// purposes.
func NewTester(notify []string, noverify bool) *Ethash {
return New(Config{PowMode: ModeTest}, notify, noverify)
}
// NewFaker creates a ethash consensus engine with a fake PoW scheme that accepts
// all blocks' seal as valid, though they still have to conform to the Ethereum
// consensus rules.
func NewFaker() *Ethash {
return &Ethash{
config: Config{
PowMode: ModeFake,
Log: log.Root(),
},
}
}
// NewFakeFailer creates a ethash consensus engine with a fake PoW scheme that
// accepts all blocks as valid apart from the single one specified, though they
// still have to conform to the Ethereum consensus rules.
func NewFakeFailer(fail uint64) *Ethash {
return &Ethash{
config: Config{
PowMode: ModeFake,
Log: log.Root(),
},
fakeFail: fail,
}
}
// NewFakeDelayer creates a ethash consensus engine with a fake PoW scheme that
// accepts all blocks as valid, but delays verifications by some time, though
// they still have to conform to the Ethereum consensus rules.
func NewFakeDelayer(delay time.Duration) *Ethash {
return &Ethash{
config: Config{
PowMode: ModeFake,
Log: log.Root(),
},
fakeDelay: delay,
}
}
// NewFullFaker creates an ethash consensus engine with a full fake scheme that
// accepts all blocks as valid, without checking any consensus rules whatsoever.
func NewFullFaker() *Ethash {
return &Ethash{
config: Config{
PowMode: ModeFullFake,
Log: log.Root(),
},
}
}
// NewShared creates a full sized ethash PoW shared between all requesters running
// in the same process.
func NewShared() *Ethash {
return &Ethash{shared: sharedEthash}
}
// Close closes the exit channel to notify all backend threads exiting.
func (ethash *Ethash) Close() error {
return ethash.StopRemoteSealer()
}
// StopRemoteSealer stops the remote sealer
func (ethash *Ethash) StopRemoteSealer() error {
ethash.closeOnce.Do(func() {
// Short circuit if the exit channel is not allocated.
if ethash.remote == nil {
return
}
close(ethash.remote.requestExit)
<-ethash.remote.exitCh
})
return nil
}
// cache tries to retrieve a verification cache for the specified block number
// by first checking against a list of in-memory caches, then against caches
// stored on disk, and finally generating one if none can be found.
func (ethash *Ethash) cache(block uint64) *cache {
epoch := block / epochLength
currentI, futureI := ethash.caches.get(epoch)
current := currentI.(*cache)
// Wait for generation finish.
current.generate(ethash.config.CacheDir, ethash.config.CachesOnDisk, ethash.config.CachesLockMmap, ethash.config.PowMode == ModeTest)
// If we need a new future cache, now's a good time to regenerate it.
if futureI != nil {
future := futureI.(*cache)
go future.generate(ethash.config.CacheDir, ethash.config.CachesOnDisk, ethash.config.CachesLockMmap, ethash.config.PowMode == ModeTest)
}
return current
}
// dataset tries to retrieve a mining dataset for the specified block number
// by first checking against a list of in-memory datasets, then against DAGs
// stored on disk, and finally generating one if none can be found.
//
// If async is specified, not only the future but the current DAG is also
// generates on a background thread.
func (ethash *Ethash) dataset(block uint64, async bool) *dataset {
// Retrieve the requested ethash dataset
epoch := block / epochLength
currentI, futureI := ethash.datasets.get(epoch)
current := currentI.(*dataset)
// If async is specified, generate everything in a background thread
if async && !current.generated() {
go func() {
current.generate(ethash.config.DatasetDir, ethash.config.DatasetsOnDisk, ethash.config.DatasetsLockMmap, ethash.config.PowMode == ModeTest)
if futureI != nil {
future := futureI.(*dataset)
future.generate(ethash.config.DatasetDir, ethash.config.DatasetsOnDisk, ethash.config.DatasetsLockMmap, ethash.config.PowMode == ModeTest)
}
}()
} else {
// Either blocking generation was requested, or already done
current.generate(ethash.config.DatasetDir, ethash.config.DatasetsOnDisk, ethash.config.DatasetsLockMmap, ethash.config.PowMode == ModeTest)
if futureI != nil {
future := futureI.(*dataset)
go future.generate(ethash.config.DatasetDir, ethash.config.DatasetsOnDisk, ethash.config.DatasetsLockMmap, ethash.config.PowMode == ModeTest)
}
}
return current
}
// Threads returns the number of mining threads currently enabled. This doesn't
// necessarily mean that mining is running!
func (ethash *Ethash) Threads() int {
ethash.lock.Lock()
defer ethash.lock.Unlock()
return ethash.threads
}
// SetThreads updates the number of mining threads currently enabled. Calling
// this method does not start mining, only sets the thread count. If zero is
// specified, the miner will use all cores of the machine. Setting a thread
// count below zero is allowed and will cause the miner to idle, without any
// work being done.
func (ethash *Ethash) SetThreads(threads int) {
ethash.lock.Lock()
defer ethash.lock.Unlock()
// If we're running a shared PoW, set the thread count on that instead
if ethash.shared != nil {
ethash.shared.SetThreads(threads)
return
}
// Update the threads and ping any running seal to pull in any changes
ethash.threads = threads
select {
case ethash.update <- struct{}{}:
default:
}
}
// Hashrate implements PoW, returning the measured rate of the search invocations
// per second over the last minute.
// Note the returned hashrate includes local hashrate, but also includes the total
// hashrate of all remote miner.
func (ethash *Ethash) Hashrate() float64 {
// Short circuit if we are run the ethash in normal/test mode.
if ethash.config.PowMode != ModeNormal && ethash.config.PowMode != ModeTest {
return ethash.hashrate.Rate1()
}
var res = make(chan uint64, 1)
select {
case ethash.remote.fetchRateCh <- res:
case <-ethash.remote.exitCh:
// Return local hashrate only if ethash is stopped.
return ethash.hashrate.Rate1()
}
// Gather total submitted hash rate of remote sealers.
return ethash.hashrate.Rate1() + float64(<-res)
}
// APIs implements consensus.Engine, returning the user facing RPC APIs.
func (ethash *Ethash) APIs(chain consensus.ChainHeaderReader) []rpc.API {
// In order to ensure backward compatibility, we exposes ethash RPC APIs
// to both eth and ethash namespaces.
return []rpc.API{
{
Namespace: "eth",
Service: &API{ethash},
},
{
Namespace: "ethash",
Service: &API{ethash},
},
}
}
// SeedHash is the seed to use for generating a verification cache and the mining
// dataset.
func SeedHash(block uint64) []byte {
return seedHash(block)
}

View File

@@ -0,0 +1,35 @@
// Copyright 2021 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
//go:build linux
// +build linux
package ethash
import (
"os"
"golang.org/x/sys/unix"
)
// ensureSize expands the file to the given size. This is to prevent runtime
// errors later on, if the underlying file expands beyond the disk capacity,
// even though it ostensibly is already expanded, but due to being sparse
// does not actually occupy the full declared size on disk.
func ensureSize(f *os.File, size int64) error {
// Docs: https://www.man7.org/linux/man-pages/man2/fallocate.2.html
return unix.Fallocate(int(f.Fd()), 0, 0, size)
}

View File

@@ -0,0 +1,36 @@
// Copyright 2021 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
//go:build !linux
// +build !linux
package ethash
import (
"os"
)
// ensureSize expands the file to the given size. This is to prevent runtime
// errors later on, if the underlying file expands beyond the disk capacity,
// even though it ostensibly is already expanded, but due to being sparse
// does not actually occupy the full declared size on disk.
func ensureSize(f *os.File, size int64) error {
// On systems which do not support fallocate, we merely truncate it.
// More robust alternatives would be to
// - Use posix_fallocate, or
// - explicitly fill the file with zeroes.
return f.Truncate(size)
}

View File

@@ -0,0 +1,451 @@
// Copyright 2017 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package ethash
import (
"bytes"
"context"
crand "crypto/rand"
"encoding/json"
"errors"
"math"
"math/big"
"math/rand"
"net/http"
"runtime"
"sync"
"time"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/consensus"
"github.com/ethereum/go-ethereum/core/types"
)
const (
// staleThreshold is the maximum depth of the acceptable stale but valid ethash solution.
staleThreshold = 7
)
var (
errNoMiningWork = errors.New("no mining work available yet")
errInvalidSealResult = errors.New("invalid or stale proof-of-work solution")
)
// Seal implements consensus.Engine, attempting to find a nonce that satisfies
// the block's difficulty requirements.
func (ethash *Ethash) Seal(chain consensus.ChainHeaderReader, block *types.Block, results chan<- *types.Block, stop <-chan struct{}) error {
// If we're running a fake PoW, simply return a 0 nonce immediately
if ethash.config.PowMode == ModeFake || ethash.config.PowMode == ModeFullFake {
header := block.Header()
header.Nonce, header.MixDigest = types.BlockNonce{}, common.Hash{}
select {
case results <- block.WithSeal(header):
default:
ethash.config.Log.Warn("Sealing result is not read by miner", "mode", "fake", "sealhash", ethash.SealHash(block.Header()))
}
return nil
}
// If we're running a shared PoW, delegate sealing to it
if ethash.shared != nil {
return ethash.shared.Seal(chain, block, results, stop)
}
// Create a runner and the multiple search threads it directs
abort := make(chan struct{})
ethash.lock.Lock()
threads := ethash.threads
if ethash.rand == nil {
seed, err := crand.Int(crand.Reader, big.NewInt(math.MaxInt64))
if err != nil {
ethash.lock.Unlock()
return err
}
ethash.rand = rand.New(rand.NewSource(seed.Int64()))
}
ethash.lock.Unlock()
if threads == 0 {
threads = runtime.NumCPU()
}
if threads < 0 {
threads = 0 // Allows disabling local mining without extra logic around local/remote
}
// Push new work to remote sealer
if ethash.remote != nil {
ethash.remote.workCh <- &sealTask{block: block, results: results}
}
var (
pend sync.WaitGroup
locals = make(chan *types.Block)
)
for i := 0; i < threads; i++ {
pend.Add(1)
go func(id int, nonce uint64) {
defer pend.Done()
ethash.mine(block, id, nonce, abort, locals)
}(i, uint64(ethash.rand.Int63()))
}
// Wait until sealing is terminated or a nonce is found
go func() {
var result *types.Block
select {
case <-stop:
// Outside abort, stop all miner threads
close(abort)
case result = <-locals:
// One of the threads found a block, abort all others
select {
case results <- result:
default:
ethash.config.Log.Warn("Sealing result is not read by miner", "mode", "local", "sealhash", ethash.SealHash(block.Header()))
}
close(abort)
case <-ethash.update:
// Thread count was changed on user request, restart
close(abort)
if err := ethash.Seal(chain, block, results, stop); err != nil {
ethash.config.Log.Error("Failed to restart sealing after update", "err", err)
}
}
// Wait for all miners to terminate and return the block
pend.Wait()
}()
return nil
}
// mine is the actual proof-of-work miner that searches for a nonce starting from
// seed that results in correct final block difficulty.
func (ethash *Ethash) mine(block *types.Block, id int, seed uint64, abort chan struct{}, found chan *types.Block) {
// Extract some data from the header
var (
header = block.Header()
hash = ethash.SealHash(header).Bytes()
target = new(big.Int).Div(two256, header.Difficulty)
number = header.Number.Uint64()
dataset = ethash.dataset(number, false)
)
// Start generating random nonces until we abort or find a good one
var (
attempts = int64(0)
nonce = seed
powBuffer = new(big.Int)
)
logger := ethash.config.Log.New("miner", id)
logger.Trace("Started ethash search for new nonces", "seed", seed)
search:
for {
select {
case <-abort:
// Mining terminated, update stats and abort
logger.Trace("Ethash nonce search aborted", "attempts", nonce-seed)
ethash.hashrate.Mark(attempts)
break search
default:
// We don't have to update hash rate on every nonce, so update after after 2^X nonces
attempts++
if (attempts % (1 << 15)) == 0 {
ethash.hashrate.Mark(attempts)
attempts = 0
}
// Compute the PoW value of this nonce
digest, result := hashimotoFull(dataset.dataset, hash, nonce)
if powBuffer.SetBytes(result).Cmp(target) <= 0 {
// Correct nonce found, create a new header with it
header = types.CopyHeader(header)
header.Nonce = types.EncodeNonce(nonce)
header.MixDigest = common.BytesToHash(digest)
// Seal and return a block (if still needed)
select {
case found <- block.WithSeal(header):
logger.Trace("Ethash nonce found and reported", "attempts", nonce-seed, "nonce", nonce)
case <-abort:
logger.Trace("Ethash nonce found but discarded", "attempts", nonce-seed, "nonce", nonce)
}
break search
}
nonce++
}
}
// Datasets are unmapped in a finalizer. Ensure that the dataset stays live
// during sealing so it's not unmapped while being read.
runtime.KeepAlive(dataset)
}
// This is the timeout for HTTP requests to notify external miners.
const remoteSealerTimeout = 1 * time.Second
type remoteSealer struct {
works map[common.Hash]*types.Block
rates map[common.Hash]hashrate
currentBlock *types.Block
currentWork [4]string
notifyCtx context.Context
cancelNotify context.CancelFunc // cancels all notification requests
reqWG sync.WaitGroup // tracks notification request goroutines
ethash *Ethash
noverify bool
notifyURLs []string
results chan<- *types.Block
workCh chan *sealTask // Notification channel to push new work and relative result channel to remote sealer
fetchWorkCh chan *sealWork // Channel used for remote sealer to fetch mining work
submitWorkCh chan *mineResult // Channel used for remote sealer to submit their mining result
fetchRateCh chan chan uint64 // Channel used to gather submitted hash rate for local or remote sealer.
submitRateCh chan *hashrate // Channel used for remote sealer to submit their mining hashrate
requestExit chan struct{}
exitCh chan struct{}
}
// sealTask wraps a seal block with relative result channel for remote sealer thread.
type sealTask struct {
block *types.Block
results chan<- *types.Block
}
// mineResult wraps the pow solution parameters for the specified block.
type mineResult struct {
nonce types.BlockNonce
mixDigest common.Hash
hash common.Hash
errc chan error
}
// hashrate wraps the hash rate submitted by the remote sealer.
type hashrate struct {
id common.Hash
ping time.Time
rate uint64
done chan struct{}
}
// sealWork wraps a seal work package for remote sealer.
type sealWork struct {
errc chan error
res chan [4]string
}
func startRemoteSealer(ethash *Ethash, urls []string, noverify bool) *remoteSealer {
ctx, cancel := context.WithCancel(context.Background())
s := &remoteSealer{
ethash: ethash,
noverify: noverify,
notifyURLs: urls,
notifyCtx: ctx,
cancelNotify: cancel,
works: make(map[common.Hash]*types.Block),
rates: make(map[common.Hash]hashrate),
workCh: make(chan *sealTask),
fetchWorkCh: make(chan *sealWork),
submitWorkCh: make(chan *mineResult),
fetchRateCh: make(chan chan uint64),
submitRateCh: make(chan *hashrate),
requestExit: make(chan struct{}),
exitCh: make(chan struct{}),
}
go s.loop()
return s
}
func (s *remoteSealer) loop() {
defer func() {
s.ethash.config.Log.Trace("Ethash remote sealer is exiting")
s.cancelNotify()
s.reqWG.Wait()
close(s.exitCh)
}()
ticker := time.NewTicker(5 * time.Second)
defer ticker.Stop()
for {
select {
case work := <-s.workCh:
// Update current work with new received block.
// Note same work can be past twice, happens when changing CPU threads.
s.results = work.results
s.makeWork(work.block)
s.notifyWork()
case work := <-s.fetchWorkCh:
// Return current mining work to remote miner.
if s.currentBlock == nil {
work.errc <- errNoMiningWork
} else {
work.res <- s.currentWork
}
case result := <-s.submitWorkCh:
// Verify submitted PoW solution based on maintained mining blocks.
if s.submitWork(result.nonce, result.mixDigest, result.hash) {
result.errc <- nil
} else {
result.errc <- errInvalidSealResult
}
case result := <-s.submitRateCh:
// Trace remote sealer's hash rate by submitted value.
s.rates[result.id] = hashrate{rate: result.rate, ping: time.Now()}
close(result.done)
case req := <-s.fetchRateCh:
// Gather all hash rate submitted by remote sealer.
var total uint64
for _, rate := range s.rates {
// this could overflow
total += rate.rate
}
req <- total
case <-ticker.C:
// Clear stale submitted hash rate.
for id, rate := range s.rates {
if time.Since(rate.ping) > 10*time.Second {
delete(s.rates, id)
}
}
// Clear stale pending blocks
if s.currentBlock != nil {
for hash, block := range s.works {
if block.NumberU64()+staleThreshold <= s.currentBlock.NumberU64() {
delete(s.works, hash)
}
}
}
case <-s.requestExit:
return
}
}
}
// makeWork creates a work package for external miner.
//
// The work package consists of 3 strings:
//
// result[0], 32 bytes hex encoded current block header pow-hash
// result[1], 32 bytes hex encoded seed hash used for DAG
// result[2], 32 bytes hex encoded boundary condition ("target"), 2^256/difficulty
// result[3], hex encoded block number
func (s *remoteSealer) makeWork(block *types.Block) {
hash := s.ethash.SealHash(block.Header())
s.currentWork[0] = hash.Hex()
s.currentWork[1] = common.BytesToHash(SeedHash(block.NumberU64())).Hex()
s.currentWork[2] = common.BytesToHash(new(big.Int).Div(two256, block.Difficulty()).Bytes()).Hex()
s.currentWork[3] = hexutil.EncodeBig(block.Number())
// Trace the seal work fetched by remote sealer.
s.currentBlock = block
s.works[hash] = block
}
// notifyWork notifies all the specified mining endpoints of the availability of
// new work to be processed.
func (s *remoteSealer) notifyWork() {
work := s.currentWork
// Encode the JSON payload of the notification. When NotifyFull is set,
// this is the complete block header, otherwise it is a JSON array.
var blob []byte
if s.ethash.config.NotifyFull {
blob, _ = json.Marshal(s.currentBlock.Header())
} else {
blob, _ = json.Marshal(work)
}
s.reqWG.Add(len(s.notifyURLs))
for _, url := range s.notifyURLs {
go s.sendNotification(s.notifyCtx, url, blob, work)
}
}
func (s *remoteSealer) sendNotification(ctx context.Context, url string, json []byte, work [4]string) {
defer s.reqWG.Done()
req, err := http.NewRequest("POST", url, bytes.NewReader(json))
if err != nil {
s.ethash.config.Log.Warn("Can't create remote miner notification", "err", err)
return
}
ctx, cancel := context.WithTimeout(ctx, remoteSealerTimeout)
defer cancel()
req = req.WithContext(ctx)
req.Header.Set("Content-Type", "application/json")
resp, err := http.DefaultClient.Do(req)
if err != nil {
s.ethash.config.Log.Warn("Failed to notify remote miner", "err", err)
} else {
s.ethash.config.Log.Trace("Notified remote miner", "miner", url, "hash", work[0], "target", work[2])
resp.Body.Close()
}
}
// submitWork verifies the submitted pow solution, returning
// whether the solution was accepted or not (not can be both a bad pow as well as
// any other error, like no pending work or stale mining result).
func (s *remoteSealer) submitWork(nonce types.BlockNonce, mixDigest common.Hash, sealhash common.Hash) bool {
if s.currentBlock == nil {
s.ethash.config.Log.Error("Pending work without block", "sealhash", sealhash)
return false
}
// Make sure the work submitted is present
block := s.works[sealhash]
if block == nil {
s.ethash.config.Log.Warn("Work submitted but none pending", "sealhash", sealhash, "curnumber", s.currentBlock.NumberU64())
return false
}
// Verify the correctness of submitted result.
header := block.Header()
header.Nonce = nonce
header.MixDigest = mixDigest
start := time.Now()
if !s.noverify {
if err := s.ethash.verifySeal(nil, header, true); err != nil {
s.ethash.config.Log.Warn("Invalid proof-of-work submitted", "sealhash", sealhash, "elapsed", common.PrettyDuration(time.Since(start)), "err", err)
return false
}
}
// Make sure the result channel is assigned.
if s.results == nil {
s.ethash.config.Log.Warn("Ethash result channel is empty, submitted mining result is rejected")
return false
}
s.ethash.config.Log.Trace("Verified correct proof-of-work", "sealhash", sealhash, "elapsed", common.PrettyDuration(time.Since(start)))
// Solutions seems to be valid, return to the miner and notify acceptance.
solution := block.WithSeal(header)
// The submitted solution is within the scope of acceptance.
if solution.NumberU64()+staleThreshold > s.currentBlock.NumberU64() {
select {
case s.results <- solution:
s.ethash.config.Log.Debug("Work submitted is acceptable", "number", solution.NumberU64(), "sealhash", sealhash, "hash", solution.Hash())
return true
default:
s.ethash.config.Log.Warn("Sealing result is not read by miner", "mode", "remote", "sealhash", sealhash)
return false
}
}
// The submitted block is too old to accept, drop it.
s.ethash.config.Log.Warn("Work submitted is too old", "number", solution.NumberU64(), "sealhash", sealhash, "hash", solution.Hash())
return false
}

View File

@@ -0,0 +1,110 @@
// Copyright 2021 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package consensus
import (
"fmt"
"sync"
"github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/rlp"
)
// transitionStatus describes the status of eth1/2 transition. This switch
// between modes is a one-way action which is triggered by corresponding
// consensus-layer message.
type transitionStatus struct {
LeftPoW bool // The flag is set when the first NewHead message received
EnteredPoS bool // The flag is set when the first FinalisedBlock message received
}
// Merger is an internal help structure used to track the eth1/2 transition status.
// It's a common structure can be used in both full node and light client.
type Merger struct {
db ethdb.KeyValueStore
status transitionStatus
mu sync.RWMutex
}
// NewMerger creates a new Merger which stores its transition status in the provided db.
func NewMerger(db ethdb.KeyValueStore) *Merger {
var status transitionStatus
blob := rawdb.ReadTransitionStatus(db)
if len(blob) != 0 {
if err := rlp.DecodeBytes(blob, &status); err != nil {
log.Crit("Failed to decode the transition status", "err", err)
}
}
return &Merger{
db: db,
status: status,
}
}
// ReachTTD is called whenever the first NewHead message received
// from the consensus-layer.
func (m *Merger) ReachTTD() {
m.mu.Lock()
defer m.mu.Unlock()
if m.status.LeftPoW {
return
}
m.status = transitionStatus{LeftPoW: true}
blob, err := rlp.EncodeToBytes(m.status)
if err != nil {
panic(fmt.Sprintf("Failed to encode the transition status: %v", err))
}
rawdb.WriteTransitionStatus(m.db, blob)
log.Info("Left PoW stage")
}
// FinalizePoS is called whenever the first FinalisedBlock message received
// from the consensus-layer.
func (m *Merger) FinalizePoS() {
m.mu.Lock()
defer m.mu.Unlock()
if m.status.EnteredPoS {
return
}
m.status = transitionStatus{LeftPoW: true, EnteredPoS: true}
blob, err := rlp.EncodeToBytes(m.status)
if err != nil {
panic(fmt.Sprintf("Failed to encode the transition status: %v", err))
}
rawdb.WriteTransitionStatus(m.db, blob)
log.Info("Entered PoS stage")
}
// TDDReached reports whether the chain has left the PoW stage.
func (m *Merger) TDDReached() bool {
m.mu.RLock()
defer m.mu.RUnlock()
return m.status.LeftPoW
}
// PoSFinalized reports whether the chain has entered the PoS stage.
func (m *Merger) PoSFinalized() bool {
m.mu.RLock()
defer m.mu.RUnlock()
return m.status.EnteredPoS
}

View File

@@ -0,0 +1,86 @@
// Copyright 2016 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package misc
import (
"bytes"
"errors"
"math/big"
"github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/params"
)
var (
// ErrBadProDAOExtra is returned if a header doesn't support the DAO fork on a
// pro-fork client.
ErrBadProDAOExtra = errors.New("bad DAO pro-fork extra-data")
// ErrBadNoDAOExtra is returned if a header does support the DAO fork on a no-
// fork client.
ErrBadNoDAOExtra = errors.New("bad DAO no-fork extra-data")
)
// VerifyDAOHeaderExtraData validates the extra-data field of a block header to
// ensure it conforms to DAO hard-fork rules.
//
// DAO hard-fork extension to the header validity:
//
// - if the node is no-fork, do not accept blocks in the [fork, fork+10) range
// with the fork specific extra-data set.
// - if the node is pro-fork, require blocks in the specific range to have the
// unique extra-data set.
func VerifyDAOHeaderExtraData(config *params.ChainConfig, header *types.Header) error {
// Short circuit validation if the node doesn't care about the DAO fork
if config.DAOForkBlock == nil {
return nil
}
// Make sure the block is within the fork's modified extra-data range
limit := new(big.Int).Add(config.DAOForkBlock, params.DAOForkExtraRange)
if header.Number.Cmp(config.DAOForkBlock) < 0 || header.Number.Cmp(limit) >= 0 {
return nil
}
// Depending on whether we support or oppose the fork, validate the extra-data contents
if config.DAOForkSupport {
if !bytes.Equal(header.Extra, params.DAOForkBlockExtra) {
return ErrBadProDAOExtra
}
} else {
if bytes.Equal(header.Extra, params.DAOForkBlockExtra) {
return ErrBadNoDAOExtra
}
}
// All ok, header has the same extra-data we expect
return nil
}
// ApplyDAOHardFork modifies the state database according to the DAO hard-fork
// rules, transferring all balances of a set of DAO accounts to a single refund
// contract.
func ApplyDAOHardFork(statedb *state.StateDB) {
// Retrieve the contract to refund balances into
if !statedb.Exist(params.DAORefundContract) {
statedb.CreateAccount(params.DAORefundContract)
}
// Move every DAO account and extra-balance account funds into the refund contract
for _, addr := range params.DAODrainList() {
statedb.AddBalance(params.DAORefundContract, statedb.GetBalance(addr))
statedb.SetBalance(addr, new(big.Int))
}
}

View File

@@ -0,0 +1,93 @@
// Copyright 2021 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package misc
import (
"fmt"
"math/big"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/math"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/params"
)
// VerifyEip1559Header verifies some header attributes which were changed in EIP-1559,
// - gas limit check
// - basefee check
func VerifyEip1559Header(config *params.ChainConfig, parent, header *types.Header) error {
// Verify that the gas limit remains within allowed bounds
parentGasLimit := parent.GasLimit
if !config.IsLondon(parent.Number) {
parentGasLimit = parent.GasLimit * params.ElasticityMultiplier
}
if err := VerifyGaslimit(parentGasLimit, header.GasLimit); err != nil {
return err
}
// Verify the header is not malformed
if header.BaseFee == nil {
return fmt.Errorf("header is missing baseFee")
}
// Verify the baseFee is correct based on the parent header.
expectedBaseFee := CalcBaseFee(config, parent)
if header.BaseFee.Cmp(expectedBaseFee) != 0 {
return fmt.Errorf("invalid baseFee: have %s, want %s, parentBaseFee %s, parentGasUsed %d",
header.BaseFee, expectedBaseFee, parent.BaseFee, parent.GasUsed)
}
return nil
}
// CalcBaseFee calculates the basefee of the header.
func CalcBaseFee(config *params.ChainConfig, parent *types.Header) *big.Int {
// If the current block is the first EIP-1559 block, return the InitialBaseFee.
if !config.IsLondon(parent.Number) {
return new(big.Int).SetUint64(params.InitialBaseFee)
}
parentGasTarget := parent.GasLimit / params.ElasticityMultiplier
// If the parent gasUsed is the same as the target, the baseFee remains unchanged.
if parent.GasUsed == parentGasTarget {
return new(big.Int).Set(parent.BaseFee)
}
var (
num = new(big.Int)
denom = new(big.Int)
)
if parent.GasUsed > parentGasTarget {
// If the parent block used more gas than its target, the baseFee should increase.
// max(1, parentBaseFee * gasUsedDelta / parentGasTarget / baseFeeChangeDenominator)
num.SetUint64(parent.GasUsed - parentGasTarget)
num.Mul(num, parent.BaseFee)
num.Div(num, denom.SetUint64(parentGasTarget))
num.Div(num, denom.SetUint64(params.BaseFeeChangeDenominator))
baseFeeDelta := math.BigMax(num, common.Big1)
return num.Add(parent.BaseFee, baseFeeDelta)
} else {
// Otherwise if the parent block used less gas than its target, the baseFee should decrease.
// max(0, parentBaseFee * gasUsedDelta / parentGasTarget / baseFeeChangeDenominator)
num.SetUint64(parentGasTarget - parent.GasUsed)
num.Mul(num, parent.BaseFee)
num.Div(num, denom.SetUint64(parentGasTarget))
num.Div(num, denom.SetUint64(params.BaseFeeChangeDenominator))
baseFee := num.Sub(parent.BaseFee, num)
return math.BigMax(baseFee, common.Big0)
}
}

View File

@@ -0,0 +1,43 @@
// Copyright 2017 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package misc
import (
"fmt"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/params"
)
// VerifyForkHashes verifies that blocks conforming to network hard-forks do have
// the correct hashes, to avoid clients going off on different chains. This is an
// optional feature.
func VerifyForkHashes(config *params.ChainConfig, header *types.Header, uncle bool) error {
// We don't care about uncles
if uncle {
return nil
}
// If the homestead reprice hash is set, validate it
if config.EIP150Block != nil && config.EIP150Block.Cmp(header.Number) == 0 {
if config.EIP150Hash != (common.Hash{}) && config.EIP150Hash != header.Hash() {
return fmt.Errorf("homestead gas reprice fork: have %#x, want %#x", header.Hash(), config.EIP150Hash)
}
}
// All ok, return
return nil
}

View File

@@ -0,0 +1,42 @@
// Copyright 2021 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package misc
import (
"errors"
"fmt"
"github.com/ethereum/go-ethereum/params"
)
// VerifyGaslimit verifies the header gas limit according increase/decrease
// in relation to the parent gas limit.
func VerifyGaslimit(parentGasLimit, headerGasLimit uint64) error {
// Verify that the gas limit remains within allowed bounds
diff := int64(parentGasLimit) - int64(headerGasLimit)
if diff < 0 {
diff *= -1
}
limit := parentGasLimit / params.GasLimitBoundDivisor
if uint64(diff) >= limit {
return fmt.Errorf("invalid gas limit: have %d, want %d +-= %d", headerGasLimit, parentGasLimit, limit-1)
}
if headerGasLimit < params.MinGasLimit {
return errors.New("invalid gas limit below 5000")
}
return nil
}

12
vendor/github.com/ethereum/go-ethereum/core/.gitignore generated vendored Normal file
View File

@@ -0,0 +1,12 @@
# See http://help.github.com/ignore-files/ for more about ignoring files.
#
# If you find yourself ignoring temporary files generated by your text editor
# or operating system, you probably want to add a global ignore instead:
# git config --global core.excludesfile ~/.gitignore_global
/tmp
*/**/*un~
*un~
.DS_Store
*/**/.DS_Store

View File

@@ -0,0 +1,129 @@
// Copyright 2015 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package core
import (
"fmt"
"github.com/ethereum/go-ethereum/consensus"
"github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/trie"
)
// BlockValidator is responsible for validating block headers, uncles and
// processed state.
//
// BlockValidator implements Validator.
type BlockValidator struct {
config *params.ChainConfig // Chain configuration options
bc *BlockChain // Canonical block chain
engine consensus.Engine // Consensus engine used for validating
}
// NewBlockValidator returns a new block validator which is safe for re-use
func NewBlockValidator(config *params.ChainConfig, blockchain *BlockChain, engine consensus.Engine) *BlockValidator {
validator := &BlockValidator{
config: config,
engine: engine,
bc: blockchain,
}
return validator
}
// ValidateBody validates the given block's uncles and verifies the block
// header's transaction and uncle roots. The headers are assumed to be already
// validated at this point.
func (v *BlockValidator) ValidateBody(block *types.Block) error {
// Check whether the block's known, and if not, that it's linkable
if v.bc.HasBlockAndState(block.Hash(), block.NumberU64()) {
return ErrKnownBlock
}
// Header validity is known at this point, check the uncles and transactions
header := block.Header()
if err := v.engine.VerifyUncles(v.bc, block); err != nil {
return err
}
if hash := types.CalcUncleHash(block.Uncles()); hash != header.UncleHash {
return fmt.Errorf("uncle root hash mismatch: have %x, want %x", hash, header.UncleHash)
}
if hash := types.DeriveSha(block.Transactions(), trie.NewStackTrie(nil)); hash != header.TxHash {
return fmt.Errorf("transaction root hash mismatch: have %x, want %x", hash, header.TxHash)
}
if !v.bc.HasBlockAndState(block.ParentHash(), block.NumberU64()-1) {
if !v.bc.HasBlock(block.ParentHash(), block.NumberU64()-1) {
return consensus.ErrUnknownAncestor
}
return consensus.ErrPrunedAncestor
}
return nil
}
// ValidateState validates the various changes that happen after a state
// transition, such as amount of used gas, the receipt roots and the state root
// itself. ValidateState returns a database batch if the validation was a success
// otherwise nil and an error is returned.
func (v *BlockValidator) ValidateState(block *types.Block, statedb *state.StateDB, receipts types.Receipts, usedGas uint64) error {
header := block.Header()
if block.GasUsed() != usedGas {
return fmt.Errorf("invalid gas used (remote: %d local: %d)", block.GasUsed(), usedGas)
}
// Validate the received block's bloom with the one derived from the generated receipts.
// For valid blocks this should always validate to true.
rbloom := types.CreateBloom(receipts)
if rbloom != header.Bloom {
return fmt.Errorf("invalid bloom (remote: %x local: %x)", header.Bloom, rbloom)
}
// Tre receipt Trie's root (R = (Tr [[H1, R1], ... [Hn, Rn]]))
receiptSha := types.DeriveSha(receipts, trie.NewStackTrie(nil))
if receiptSha != header.ReceiptHash {
return fmt.Errorf("invalid receipt root hash (remote: %x local: %x)", header.ReceiptHash, receiptSha)
}
// Validate the state root against the received state root and throw
// an error if they don't match.
if root := statedb.IntermediateRoot(v.config.IsEIP158(header.Number)); header.Root != root {
return fmt.Errorf("invalid merkle root (remote: %x local: %x)", header.Root, root)
}
return nil
}
// CalcGasLimit computes the gas limit of the next block after parent. It aims
// to keep the baseline gas close to the provided target, and increase it towards
// the target if the baseline gas is lower.
func CalcGasLimit(parentGasLimit, desiredLimit uint64) uint64 {
delta := parentGasLimit/params.GasLimitBoundDivisor - 1
limit := parentGasLimit
if desiredLimit < params.MinGasLimit {
desiredLimit = params.MinGasLimit
}
// If we're outside our allowed gas range, we try to hone towards them
if limit < desiredLimit {
limit = parentGasLimit + delta
if limit > desiredLimit {
limit = desiredLimit
}
return limit
}
if limit > desiredLimit {
limit = parentGasLimit - delta
if limit < desiredLimit {
limit = desiredLimit
}
}
return limit
}

2437
vendor/github.com/ethereum/go-ethereum/core/blockchain.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,177 @@
// Copyright 2018 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package core
import (
"time"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/mclock"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/log"
)
// insertStats tracks and reports on block insertion.
type insertStats struct {
queued, processed, ignored int
usedGas uint64
lastIndex int
startTime mclock.AbsTime
}
// statsReportLimit is the time limit during import and export after which we
// always print out progress. This avoids the user wondering what's going on.
const statsReportLimit = 8 * time.Second
// report prints statistics if some number of blocks have been processed
// or more than a few seconds have passed since the last message.
func (st *insertStats) report(chain []*types.Block, index int, dirty common.StorageSize, setHead bool) {
// Fetch the timings for the batch
var (
now = mclock.Now()
elapsed = now.Sub(st.startTime)
)
// If we're at the last block of the batch or report period reached, log
if index == len(chain)-1 || elapsed >= statsReportLimit {
// Count the number of transactions in this segment
var txs int
for _, block := range chain[st.lastIndex : index+1] {
txs += len(block.Transactions())
}
end := chain[index]
// Assemble the log context and send it to the logger
context := []interface{}{
"number", end.Number(), "hash", end.Hash(),
"blocks", st.processed, "txs", txs, "mgas", float64(st.usedGas) / 1000000,
"elapsed", common.PrettyDuration(elapsed), "mgasps", float64(st.usedGas) * 1000 / float64(elapsed),
}
if timestamp := time.Unix(int64(end.Time()), 0); time.Since(timestamp) > time.Minute {
context = append(context, []interface{}{"age", common.PrettyAge(timestamp)}...)
}
context = append(context, []interface{}{"dirty", dirty}...)
if st.queued > 0 {
context = append(context, []interface{}{"queued", st.queued}...)
}
if st.ignored > 0 {
context = append(context, []interface{}{"ignored", st.ignored}...)
}
if setHead {
log.Info("Imported new chain segment", context...)
} else {
log.Info("Imported new potential chain segment", context...)
}
// Bump the stats reported to the next section
*st = insertStats{startTime: now, lastIndex: index + 1}
}
}
// insertIterator is a helper to assist during chain import.
type insertIterator struct {
chain types.Blocks // Chain of blocks being iterated over
results <-chan error // Verification result sink from the consensus engine
errors []error // Header verification errors for the blocks
index int // Current offset of the iterator
validator Validator // Validator to run if verification succeeds
}
// newInsertIterator creates a new iterator based on the given blocks, which are
// assumed to be a contiguous chain.
func newInsertIterator(chain types.Blocks, results <-chan error, validator Validator) *insertIterator {
return &insertIterator{
chain: chain,
results: results,
errors: make([]error, 0, len(chain)),
index: -1,
validator: validator,
}
}
// next returns the next block in the iterator, along with any potential validation
// error for that block. When the end is reached, it will return (nil, nil).
func (it *insertIterator) next() (*types.Block, error) {
// If we reached the end of the chain, abort
if it.index+1 >= len(it.chain) {
it.index = len(it.chain)
return nil, nil
}
// Advance the iterator and wait for verification result if not yet done
it.index++
if len(it.errors) <= it.index {
it.errors = append(it.errors, <-it.results)
}
if it.errors[it.index] != nil {
return it.chain[it.index], it.errors[it.index]
}
// Block header valid, run body validation and return
return it.chain[it.index], it.validator.ValidateBody(it.chain[it.index])
}
// peek returns the next block in the iterator, along with any potential validation
// error for that block, but does **not** advance the iterator.
//
// Both header and body validation errors (nil too) is cached into the iterator
// to avoid duplicating work on the following next() call.
func (it *insertIterator) peek() (*types.Block, error) {
// If we reached the end of the chain, abort
if it.index+1 >= len(it.chain) {
return nil, nil
}
// Wait for verification result if not yet done
if len(it.errors) <= it.index+1 {
it.errors = append(it.errors, <-it.results)
}
if it.errors[it.index+1] != nil {
return it.chain[it.index+1], it.errors[it.index+1]
}
// Block header valid, ignore body validation since we don't have a parent anyway
return it.chain[it.index+1], nil
}
// previous returns the previous header that was being processed, or nil.
func (it *insertIterator) previous() *types.Header {
if it.index < 1 {
return nil
}
return it.chain[it.index-1].Header()
}
// current returns the current header that is being processed, or nil.
func (it *insertIterator) current() *types.Header {
if it.index == -1 || it.index >= len(it.chain) {
return nil
}
return it.chain[it.index].Header()
}
// first returns the first block in the it.
func (it *insertIterator) first() *types.Block {
return it.chain[0]
}
// remaining returns the number of remaining blocks.
func (it *insertIterator) remaining() int {
return len(it.chain) - it.index
}
// processed returns the number of processed blocks.
func (it *insertIterator) processed() int {
return it.index + 1
}

View File

@@ -0,0 +1,408 @@
// Copyright 2021 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package core
import (
"math/big"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/consensus"
"github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/state/snapshot"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/event"
"github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/rlp"
)
// CurrentHeader retrieves the current head header of the canonical chain. The
// header is retrieved from the HeaderChain's internal cache.
func (bc *BlockChain) CurrentHeader() *types.Header {
return bc.hc.CurrentHeader()
}
// CurrentBlock retrieves the current head block of the canonical chain. The
// block is retrieved from the blockchain's internal cache.
func (bc *BlockChain) CurrentBlock() *types.Block {
return bc.currentBlock.Load().(*types.Block)
}
// CurrentFastBlock retrieves the current fast-sync head block of the canonical
// chain. The block is retrieved from the blockchain's internal cache.
func (bc *BlockChain) CurrentFastBlock() *types.Block {
return bc.currentFastBlock.Load().(*types.Block)
}
// CurrentFinalizedBlock retrieves the current finalized block of the canonical
// chain. The block is retrieved from the blockchain's internal cache.
func (bc *BlockChain) CurrentFinalizedBlock() *types.Block {
return bc.currentFinalizedBlock.Load().(*types.Block)
}
// CurrentSafeBlock retrieves the current safe block of the canonical
// chain. The block is retrieved from the blockchain's internal cache.
func (bc *BlockChain) CurrentSafeBlock() *types.Block {
return bc.currentSafeBlock.Load().(*types.Block)
}
// HasHeader checks if a block header is present in the database or not, caching
// it if present.
func (bc *BlockChain) HasHeader(hash common.Hash, number uint64) bool {
return bc.hc.HasHeader(hash, number)
}
// GetHeader retrieves a block header from the database by hash and number,
// caching it if found.
func (bc *BlockChain) GetHeader(hash common.Hash, number uint64) *types.Header {
return bc.hc.GetHeader(hash, number)
}
// GetHeaderByHash retrieves a block header from the database by hash, caching it if
// found.
func (bc *BlockChain) GetHeaderByHash(hash common.Hash) *types.Header {
return bc.hc.GetHeaderByHash(hash)
}
// GetHeaderByNumber retrieves a block header from the database by number,
// caching it (associated with its hash) if found.
func (bc *BlockChain) GetHeaderByNumber(number uint64) *types.Header {
return bc.hc.GetHeaderByNumber(number)
}
// GetHeadersFrom returns a contiguous segment of headers, in rlp-form, going
// backwards from the given number.
func (bc *BlockChain) GetHeadersFrom(number, count uint64) []rlp.RawValue {
return bc.hc.GetHeadersFrom(number, count)
}
// GetBody retrieves a block body (transactions and uncles) from the database by
// hash, caching it if found.
func (bc *BlockChain) GetBody(hash common.Hash) *types.Body {
// Short circuit if the body's already in the cache, retrieve otherwise
if cached, ok := bc.bodyCache.Get(hash); ok {
body := cached.(*types.Body)
return body
}
number := bc.hc.GetBlockNumber(hash)
if number == nil {
return nil
}
body := rawdb.ReadBody(bc.db, hash, *number)
if body == nil {
return nil
}
// Cache the found body for next time and return
bc.bodyCache.Add(hash, body)
return body
}
// GetBodyRLP retrieves a block body in RLP encoding from the database by hash,
// caching it if found.
func (bc *BlockChain) GetBodyRLP(hash common.Hash) rlp.RawValue {
// Short circuit if the body's already in the cache, retrieve otherwise
if cached, ok := bc.bodyRLPCache.Get(hash); ok {
return cached.(rlp.RawValue)
}
number := bc.hc.GetBlockNumber(hash)
if number == nil {
return nil
}
body := rawdb.ReadBodyRLP(bc.db, hash, *number)
if len(body) == 0 {
return nil
}
// Cache the found body for next time and return
bc.bodyRLPCache.Add(hash, body)
return body
}
// HasBlock checks if a block is fully present in the database or not.
func (bc *BlockChain) HasBlock(hash common.Hash, number uint64) bool {
if bc.blockCache.Contains(hash) {
return true
}
if !bc.HasHeader(hash, number) {
return false
}
return rawdb.HasBody(bc.db, hash, number)
}
// HasFastBlock checks if a fast block is fully present in the database or not.
func (bc *BlockChain) HasFastBlock(hash common.Hash, number uint64) bool {
if !bc.HasBlock(hash, number) {
return false
}
if bc.receiptsCache.Contains(hash) {
return true
}
return rawdb.HasReceipts(bc.db, hash, number)
}
// GetBlock retrieves a block from the database by hash and number,
// caching it if found.
func (bc *BlockChain) GetBlock(hash common.Hash, number uint64) *types.Block {
// Short circuit if the block's already in the cache, retrieve otherwise
if block, ok := bc.blockCache.Get(hash); ok {
return block.(*types.Block)
}
block := rawdb.ReadBlock(bc.db, hash, number)
if block == nil {
return nil
}
// Cache the found block for next time and return
bc.blockCache.Add(block.Hash(), block)
return block
}
// GetBlockByHash retrieves a block from the database by hash, caching it if found.
func (bc *BlockChain) GetBlockByHash(hash common.Hash) *types.Block {
number := bc.hc.GetBlockNumber(hash)
if number == nil {
return nil
}
return bc.GetBlock(hash, *number)
}
// GetBlockByNumber retrieves a block from the database by number, caching it
// (associated with its hash) if found.
func (bc *BlockChain) GetBlockByNumber(number uint64) *types.Block {
hash := rawdb.ReadCanonicalHash(bc.db, number)
if hash == (common.Hash{}) {
return nil
}
return bc.GetBlock(hash, number)
}
// GetBlocksFromHash returns the block corresponding to hash and up to n-1 ancestors.
// [deprecated by eth/62]
func (bc *BlockChain) GetBlocksFromHash(hash common.Hash, n int) (blocks []*types.Block) {
number := bc.hc.GetBlockNumber(hash)
if number == nil {
return nil
}
for i := 0; i < n; i++ {
block := bc.GetBlock(hash, *number)
if block == nil {
break
}
blocks = append(blocks, block)
hash = block.ParentHash()
*number--
}
return
}
// GetReceiptsByHash retrieves the receipts for all transactions in a given block.
func (bc *BlockChain) GetReceiptsByHash(hash common.Hash) types.Receipts {
if receipts, ok := bc.receiptsCache.Get(hash); ok {
return receipts.(types.Receipts)
}
number := rawdb.ReadHeaderNumber(bc.db, hash)
if number == nil {
return nil
}
receipts := rawdb.ReadReceipts(bc.db, hash, *number, bc.chainConfig)
if receipts == nil {
return nil
}
bc.receiptsCache.Add(hash, receipts)
return receipts
}
// GetUnclesInChain retrieves all the uncles from a given block backwards until
// a specific distance is reached.
func (bc *BlockChain) GetUnclesInChain(block *types.Block, length int) []*types.Header {
uncles := []*types.Header{}
for i := 0; block != nil && i < length; i++ {
uncles = append(uncles, block.Uncles()...)
block = bc.GetBlock(block.ParentHash(), block.NumberU64()-1)
}
return uncles
}
// GetCanonicalHash returns the canonical hash for a given block number
func (bc *BlockChain) GetCanonicalHash(number uint64) common.Hash {
return bc.hc.GetCanonicalHash(number)
}
// GetAncestor retrieves the Nth ancestor of a given block. It assumes that either the given block or
// a close ancestor of it is canonical. maxNonCanonical points to a downwards counter limiting the
// number of blocks to be individually checked before we reach the canonical chain.
//
// Note: ancestor == 0 returns the same block, 1 returns its parent and so on.
func (bc *BlockChain) GetAncestor(hash common.Hash, number, ancestor uint64, maxNonCanonical *uint64) (common.Hash, uint64) {
return bc.hc.GetAncestor(hash, number, ancestor, maxNonCanonical)
}
// GetTransactionLookup retrieves the lookup associate with the given transaction
// hash from the cache or database.
func (bc *BlockChain) GetTransactionLookup(hash common.Hash) *rawdb.LegacyTxLookupEntry {
// Short circuit if the txlookup already in the cache, retrieve otherwise
if lookup, exist := bc.txLookupCache.Get(hash); exist {
return lookup.(*rawdb.LegacyTxLookupEntry)
}
tx, blockHash, blockNumber, txIndex := rawdb.ReadTransaction(bc.db, hash)
if tx == nil {
return nil
}
lookup := &rawdb.LegacyTxLookupEntry{BlockHash: blockHash, BlockIndex: blockNumber, Index: txIndex}
bc.txLookupCache.Add(hash, lookup)
return lookup
}
// GetTd retrieves a block's total difficulty in the canonical chain from the
// database by hash and number, caching it if found.
func (bc *BlockChain) GetTd(hash common.Hash, number uint64) *big.Int {
return bc.hc.GetTd(hash, number)
}
// HasState checks if state trie is fully present in the database or not.
func (bc *BlockChain) HasState(hash common.Hash) bool {
_, err := bc.stateCache.OpenTrie(hash)
return err == nil
}
// HasBlockAndState checks if a block and associated state trie is fully present
// in the database or not, caching it if present.
func (bc *BlockChain) HasBlockAndState(hash common.Hash, number uint64) bool {
// Check first that the block itself is known
block := bc.GetBlock(hash, number)
if block == nil {
return false
}
return bc.HasState(block.Root())
}
// TrieNode retrieves a blob of data associated with a trie node
// either from ephemeral in-memory cache, or from persistent storage.
func (bc *BlockChain) TrieNode(hash common.Hash) ([]byte, error) {
return bc.stateCache.TrieDB().Node(hash)
}
// ContractCode retrieves a blob of data associated with a contract hash
// either from ephemeral in-memory cache, or from persistent storage.
func (bc *BlockChain) ContractCode(hash common.Hash) ([]byte, error) {
return bc.stateCache.ContractCode(common.Hash{}, hash)
}
// ContractCodeWithPrefix retrieves a blob of data associated with a contract
// hash either from ephemeral in-memory cache, or from persistent storage.
//
// If the code doesn't exist in the in-memory cache, check the storage with
// new code scheme.
func (bc *BlockChain) ContractCodeWithPrefix(hash common.Hash) ([]byte, error) {
type codeReader interface {
ContractCodeWithPrefix(addrHash, codeHash common.Hash) ([]byte, error)
}
return bc.stateCache.(codeReader).ContractCodeWithPrefix(common.Hash{}, hash)
}
// State returns a new mutable state based on the current HEAD block.
func (bc *BlockChain) State() (*state.StateDB, error) {
return bc.StateAt(bc.CurrentBlock().Root())
}
// StateAt returns a new mutable state based on a particular point in time.
func (bc *BlockChain) StateAt(root common.Hash) (*state.StateDB, error) {
return state.New(root, bc.stateCache, bc.snaps)
}
// Config retrieves the chain's fork configuration.
func (bc *BlockChain) Config() *params.ChainConfig { return bc.chainConfig }
// Engine retrieves the blockchain's consensus engine.
func (bc *BlockChain) Engine() consensus.Engine { return bc.engine }
// Snapshots returns the blockchain snapshot tree.
func (bc *BlockChain) Snapshots() *snapshot.Tree {
return bc.snaps
}
// Validator returns the current validator.
func (bc *BlockChain) Validator() Validator {
return bc.validator
}
// Processor returns the current processor.
func (bc *BlockChain) Processor() Processor {
return bc.processor
}
// StateCache returns the caching database underpinning the blockchain instance.
func (bc *BlockChain) StateCache() state.Database {
return bc.stateCache
}
// GasLimit returns the gas limit of the current HEAD block.
func (bc *BlockChain) GasLimit() uint64 {
return bc.CurrentBlock().GasLimit()
}
// Genesis retrieves the chain's genesis block.
func (bc *BlockChain) Genesis() *types.Block {
return bc.genesisBlock
}
// GetVMConfig returns the block chain VM config.
func (bc *BlockChain) GetVMConfig() *vm.Config {
return &bc.vmConfig
}
// SetTxLookupLimit is responsible for updating the txlookup limit to the
// original one stored in db if the new mismatches with the old one.
func (bc *BlockChain) SetTxLookupLimit(limit uint64) {
bc.txLookupLimit = limit
}
// TxLookupLimit retrieves the txlookup limit used by blockchain to prune
// stale transaction indices.
func (bc *BlockChain) TxLookupLimit() uint64 {
return bc.txLookupLimit
}
// SubscribeRemovedLogsEvent registers a subscription of RemovedLogsEvent.
func (bc *BlockChain) SubscribeRemovedLogsEvent(ch chan<- RemovedLogsEvent) event.Subscription {
return bc.scope.Track(bc.rmLogsFeed.Subscribe(ch))
}
// SubscribeChainEvent registers a subscription of ChainEvent.
func (bc *BlockChain) SubscribeChainEvent(ch chan<- ChainEvent) event.Subscription {
return bc.scope.Track(bc.chainFeed.Subscribe(ch))
}
// SubscribeChainHeadEvent registers a subscription of ChainHeadEvent.
func (bc *BlockChain) SubscribeChainHeadEvent(ch chan<- ChainHeadEvent) event.Subscription {
return bc.scope.Track(bc.chainHeadFeed.Subscribe(ch))
}
// SubscribeChainSideEvent registers a subscription of ChainSideEvent.
func (bc *BlockChain) SubscribeChainSideEvent(ch chan<- ChainSideEvent) event.Subscription {
return bc.scope.Track(bc.chainSideFeed.Subscribe(ch))
}
// SubscribeLogsEvent registers a subscription of []*types.Log.
func (bc *BlockChain) SubscribeLogsEvent(ch chan<- []*types.Log) event.Subscription {
return bc.scope.Track(bc.logsFeed.Subscribe(ch))
}
// SubscribeBlockProcessingEvent registers a subscription of bool where true means
// block processing has started while false means it has stopped.
func (bc *BlockChain) SubscribeBlockProcessingEvent(ch chan<- bool) event.Subscription {
return bc.scope.Track(bc.blockProcFeed.Subscribe(ch))
}

25
vendor/github.com/ethereum/go-ethereum/core/blocks.go generated vendored Normal file
View File

@@ -0,0 +1,25 @@
// Copyright 2015 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package core
import "github.com/ethereum/go-ethereum/common"
// BadHashes represent a set of manually tracked bad hashes (usually hard forks)
var BadHashes = map[common.Hash]bool{
common.HexToHash("05bef30ef572270f654746da22639a7a0c97dd97a7050b9e252391996aaeb689"): true,
common.HexToHash("7d05d08cbc596a2e5e4f13b80a743e53e09221b5323c3a61946b20873e58583f"): true,
}

Some files were not shown because too many files have changed in this diff Show More