diff --git a/.golangci.yml b/.golangci.yml
index 44fce8bea3..42f4f59aac 100644
--- a/.golangci.yml
+++ b/.golangci.yml
@@ -6,6 +6,8 @@ run:
# default is true. Enables skipping of directories:
# vendor$, third_party$, testdata$, examples$, Godeps$, builtin$
skip-dirs-use-default: true
+ skip-files:
+ - core/genesis_alloc.go
linters:
disable-all: true
@@ -30,6 +32,10 @@ linters-settings:
min-occurrences: 6 # minimum number of occurrences
issues:
+ # Quorum - Disabling check for "S1039: unnecessary use of fmt.Sprintf (gosimple)" until we upgrade to geth 1.9.20 and have the lint fixes that also were applied in go-ethereum, as it was creating many necessary changes in geth codebase
+ exclude:
+ - S1039
+ # End Quorum
exclude-rules:
- path: crypto/blake2b/
linters:
@@ -43,3 +49,6 @@ issues:
- path: core/vm/instructions_test.go
linters:
- goconst
+ - path: cmd/faucet/
+ linters:
+ - deadcode
diff --git a/.travis.yml b/.travis.yml
index 87fb4de166..7c2cd8bdd2 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -46,6 +46,7 @@ jobs:
- stage: build
os: osx
+ osx_image: xcode11.3
go: 1.13.x
script:
- echo "Increase the maximum number of open file descriptors on macOS"
@@ -174,7 +175,7 @@ jobs:
git:
submodules: false # avoid cloning ethereum/tests
before_install:
- - curl https://dl.google.com/go/go1.13.linux-amd64.tar.gz | tar -xz
+ - curl https://dl.google.com/go/go1.13.8.linux-amd64.tar.gz | tar -xz
- export PATH=`pwd`/go/bin:$PATH
- export GOROOT=`pwd`/go
- export GOPATH=$HOME/go
diff --git a/Makefile b/Makefile
index 620799ff97..0cac496449 100644
--- a/Makefile
+++ b/Makefile
@@ -10,39 +10,39 @@
GOBIN = ./build/bin
GO ?= latest
+GORUN = env GO111MODULE=on go run
geth:
- build/env.sh go run build/ci.go install ./cmd/geth
+ $(GORUN) build/ci.go install ./cmd/geth
@echo "Done building."
@echo "Run \"$(GOBIN)/geth\" to launch geth."
bootnode:
- build/env.sh go run build/ci.go install ./cmd/bootnode
+ $(GORUN) build/ci.go install ./cmd/bootnode
@echo "Done building."
@echo "Run \"$(GOBIN)/bootnode\" to launch bootnode."
-
all:
- build/env.sh go run build/ci.go install
+ $(GORUN) build/ci.go install
android:
- build/env.sh go run build/ci.go aar --local
+ $(GORUN) build/ci.go aar --local
@echo "Done building."
@echo "Import \"$(GOBIN)/geth.aar\" to use the library."
ios:
- build/env.sh go run build/ci.go xcode --local
+ $(GORUN) build/ci.go xcode --local
@echo "Done building."
@echo "Import \"$(GOBIN)/Geth.framework\" to use the library."
test: all
- build/env.sh go run build/ci.go test
+ $(GORUN) build/ci.go test
lint: ## Run linters.
- build/env.sh go run build/ci.go lint
+ $(GORUN) build/ci.go lint
clean:
- go clean -cache
+ env GO111MODULE=on go clean -cache
rm -fr build/_workspace/pkg/ $(GOBIN)/*
# The devtools target installs tools required for 'go generate'.
@@ -69,12 +69,12 @@ geth-linux: geth-linux-386 geth-linux-amd64 geth-linux-arm geth-linux-mips64 get
@ls -ld $(GOBIN)/geth-linux-*
geth-linux-386:
- build/env.sh go run build/ci.go xgo -- --go=$(GO) --targets=linux/386 -v ./cmd/geth
+ $(GORUN) build/ci.go xgo -- --go=$(GO) --targets=linux/386 -v ./cmd/geth
@echo "Linux 386 cross compilation done:"
@ls -ld $(GOBIN)/geth-linux-* | grep 386
geth-linux-amd64:
- build/env.sh go run build/ci.go xgo -- --go=$(GO) --targets=linux/amd64 -v ./cmd/geth
+ $(GORUN) build/ci.go xgo -- --go=$(GO) --targets=linux/amd64 -v ./cmd/geth
@echo "Linux amd64 cross compilation done:"
@ls -ld $(GOBIN)/geth-linux-* | grep amd64
@@ -83,42 +83,42 @@ geth-linux-arm: geth-linux-arm-5 geth-linux-arm-6 geth-linux-arm-7 geth-linux-ar
@ls -ld $(GOBIN)/geth-linux-* | grep arm
geth-linux-arm-5:
- build/env.sh go run build/ci.go xgo -- --go=$(GO) --targets=linux/arm-5 -v ./cmd/geth
+ $(GORUN) build/ci.go xgo -- --go=$(GO) --targets=linux/arm-5 -v ./cmd/geth
@echo "Linux ARMv5 cross compilation done:"
@ls -ld $(GOBIN)/geth-linux-* | grep arm-5
geth-linux-arm-6:
- build/env.sh go run build/ci.go xgo -- --go=$(GO) --targets=linux/arm-6 -v ./cmd/geth
+ $(GORUN) build/ci.go xgo -- --go=$(GO) --targets=linux/arm-6 -v ./cmd/geth
@echo "Linux ARMv6 cross compilation done:"
@ls -ld $(GOBIN)/geth-linux-* | grep arm-6
geth-linux-arm-7:
- build/env.sh go run build/ci.go xgo -- --go=$(GO) --targets=linux/arm-7 -v ./cmd/geth
+ $(GORUN) build/ci.go xgo -- --go=$(GO) --targets=linux/arm-7 -v ./cmd/geth
@echo "Linux ARMv7 cross compilation done:"
@ls -ld $(GOBIN)/geth-linux-* | grep arm-7
geth-linux-arm64:
- build/env.sh go run build/ci.go xgo -- --go=$(GO) --targets=linux/arm64 -v ./cmd/geth
+ $(GORUN) build/ci.go xgo -- --go=$(GO) --targets=linux/arm64 -v ./cmd/geth
@echo "Linux ARM64 cross compilation done:"
@ls -ld $(GOBIN)/geth-linux-* | grep arm64
geth-linux-mips:
- build/env.sh go run build/ci.go xgo -- --go=$(GO) --targets=linux/mips --ldflags '-extldflags "-static"' -v ./cmd/geth
+ $(GORUN) build/ci.go xgo -- --go=$(GO) --targets=linux/mips --ldflags '-extldflags "-static"' -v ./cmd/geth
@echo "Linux MIPS cross compilation done:"
@ls -ld $(GOBIN)/geth-linux-* | grep mips
geth-linux-mipsle:
- build/env.sh go run build/ci.go xgo -- --go=$(GO) --targets=linux/mipsle --ldflags '-extldflags "-static"' -v ./cmd/geth
+ $(GORUN) build/ci.go xgo -- --go=$(GO) --targets=linux/mipsle --ldflags '-extldflags "-static"' -v ./cmd/geth
@echo "Linux MIPSle cross compilation done:"
@ls -ld $(GOBIN)/geth-linux-* | grep mipsle
geth-linux-mips64:
- build/env.sh go run build/ci.go xgo -- --go=$(GO) --targets=linux/mips64 --ldflags '-extldflags "-static"' -v ./cmd/geth
+ $(GORUN) build/ci.go xgo -- --go=$(GO) --targets=linux/mips64 --ldflags '-extldflags "-static"' -v ./cmd/geth
@echo "Linux MIPS64 cross compilation done:"
@ls -ld $(GOBIN)/geth-linux-* | grep mips64
geth-linux-mips64le:
- build/env.sh go run build/ci.go xgo -- --go=$(GO) --targets=linux/mips64le --ldflags '-extldflags "-static"' -v ./cmd/geth
+ $(GORUN) build/ci.go xgo -- --go=$(GO) --targets=linux/mips64le --ldflags '-extldflags "-static"' -v ./cmd/geth
@echo "Linux MIPS64le cross compilation done:"
@ls -ld $(GOBIN)/geth-linux-* | grep mips64le
@@ -127,12 +127,12 @@ geth-darwin: geth-darwin-386 geth-darwin-amd64
@ls -ld $(GOBIN)/geth-darwin-*
geth-darwin-386:
- build/env.sh go run build/ci.go xgo -- --go=$(GO) --targets=darwin/386 -v ./cmd/geth
+ $(GORUN) build/ci.go xgo -- --go=$(GO) --targets=darwin/386 -v ./cmd/geth
@echo "Darwin 386 cross compilation done:"
@ls -ld $(GOBIN)/geth-darwin-* | grep 386
geth-darwin-amd64:
- build/env.sh go run build/ci.go xgo -- --go=$(GO) --targets=darwin/amd64 -v ./cmd/geth
+ $(GORUN) build/ci.go xgo -- --go=$(GO) --targets=darwin/amd64 -v ./cmd/geth
@echo "Darwin amd64 cross compilation done:"
@ls -ld $(GOBIN)/geth-darwin-* | grep amd64
@@ -141,11 +141,11 @@ geth-windows: geth-windows-386 geth-windows-amd64
@ls -ld $(GOBIN)/geth-windows-*
geth-windows-386:
- build/env.sh go run build/ci.go xgo -- --go=$(GO) --targets=windows/386 -v ./cmd/geth
+ $(GORUN) build/ci.go xgo -- --go=$(GO) --targets=windows/386 -v ./cmd/geth
@echo "Windows 386 cross compilation done:"
@ls -ld $(GOBIN)/geth-windows-* | grep 386
geth-windows-amd64:
- build/env.sh go run build/ci.go xgo -- --go=$(GO) --targets=windows/amd64 -v ./cmd/geth
+ $(GORUN) build/ci.go xgo -- --go=$(GO) --targets=windows/amd64 -v ./cmd/geth
@echo "Windows amd64 cross compilation done:"
@ls -ld $(GOBIN)/geth-windows-* | grep amd64
diff --git a/accounts/abi/abi.go b/accounts/abi/abi.go
index 603e956b9d..fdb4c48b39 100644
--- a/accounts/abi/abi.go
+++ b/accounts/abi/abi.go
@@ -108,12 +108,13 @@ func (abi ABI) UnpackIntoMap(v map[string]interface{}, name string, data []byte)
// UnmarshalJSON implements json.Unmarshaler interface
func (abi *ABI) UnmarshalJSON(data []byte) error {
var fields []struct {
- Type string
- Name string
- Constant bool
- Anonymous bool
- Inputs []Argument
- Outputs []Argument
+ Type string
+ Name string
+ Constant bool
+ StateMutability string
+ Anonymous bool
+ Inputs []Argument
+ Outputs []Argument
}
if err := json.Unmarshal(data, &fields); err != nil {
return err
@@ -134,10 +135,11 @@ func (abi *ABI) UnmarshalJSON(data []byte) error {
name = fmt.Sprintf("%s%d", field.Name, idx)
_, ok = abi.Methods[name]
}
+ isConst := field.Constant || field.StateMutability == "pure" || field.StateMutability == "view"
abi.Methods[name] = Method{
Name: name,
RawName: field.Name,
- Const: field.Constant,
+ Const: isConst,
Inputs: field.Inputs,
Outputs: field.Outputs,
}
diff --git a/accounts/abi/bind/backends/simulated.go b/accounts/abi/bind/backends/simulated.go
index 7f27fe57c3..c9b2053883 100644
--- a/accounts/abi/bind/backends/simulated.go
+++ b/accounts/abi/bind/backends/simulated.go
@@ -49,12 +49,17 @@ import (
var _ bind.ContractBackend = (*SimulatedBackend)(nil)
var (
- errBlockNumberUnsupported = errors.New("simulatedBackend cannot access blocks other than the latest block")
- errGasEstimationFailed = errors.New("gas required exceeds allowance or always failing transaction")
+ errBlockNumberUnsupported = errors.New("simulatedBackend cannot access blocks other than the latest block")
+ errBlockDoesNotExist = errors.New("block does not exist in blockchain")
+ errTransactionDoesNotExist = errors.New("transaction does not exist")
+ errGasEstimationFailed = errors.New("gas required exceeds allowance or always failing transaction")
)
// SimulatedBackend implements bind.ContractBackend, simulating a blockchain in
// the background. Its main purpose is to allow easily testing contract bindings.
+// Simulated backend implements the following interfaces:
+// ChainReader, ChainStateReader, ContractBackend, ContractCaller, ContractFilterer, ContractTransactor,
+// DeployBackend, GasEstimator, GasPricer, LogFilterer, PendingContractCaller, TransactionReader, and TransactionSender
type SimulatedBackend struct {
database ethdb.Database // In memory database to store our testing data
blockchain *core.BlockChain // Ethereum blockchain to handle the consensus
@@ -79,7 +84,7 @@ func NewSimulatedBackendWithDatabase(database ethdb.Database, alloc core.Genesis
database: database,
blockchain: blockchain,
config: genesis.Config,
- events: filters.NewEventSystem(new(event.TypeMux), &filterBackend{database, blockchain}, false),
+ events: filters.NewEventSystem(&filterBackend{database, blockchain}, false),
}
backend.rollback()
return backend
@@ -93,7 +98,7 @@ func NewSimulatedBackendFrom(ethereum *eth.Ethereum) *SimulatedBackend {
database: ethereum.ChainDb(),
blockchain: ethereum.BlockChain(),
config: ethereum.BlockChain().Config(),
- events: filters.NewEventSystem(new(event.TypeMux), &filterBackend{ethereum.ChainDb(), ethereum.BlockChain()}, false),
+ events: filters.NewEventSystem(&filterBackend{ethereum.ChainDb(), ethereum.BlockChain()}, false),
}
backend.rollback()
return backend
@@ -139,15 +144,30 @@ func (b *SimulatedBackend) rollback() {
b.pendingState, _ = state.New(b.pendingBlock.Root(), statedb.Database())
}
+// stateByBlockNumber retrieves a state by a given blocknumber.
+func (b *SimulatedBackend) stateByBlockNumber(ctx context.Context, blockNumber *big.Int) (*state.StateDB, error) {
+ if blockNumber == nil || blockNumber.Cmp(b.blockchain.CurrentBlock().Number()) == 0 {
+ statedb, _, err := b.blockchain.State()
+ return statedb, err
+ }
+ block, err := b.BlockByNumber(ctx, blockNumber)
+ if err != nil {
+ return nil, err
+ }
+ statedb, _, err := b.blockchain.StateAt(block.Hash())
+ return statedb, err
+}
+
// CodeAt returns the code associated with a certain account in the blockchain.
func (b *SimulatedBackend) CodeAt(ctx context.Context, contract common.Address, blockNumber *big.Int) ([]byte, error) {
b.mu.Lock()
defer b.mu.Unlock()
- if blockNumber != nil && blockNumber.Cmp(b.blockchain.CurrentBlock().Number()) != 0 {
- return nil, errBlockNumberUnsupported
+ statedb, err := b.stateByBlockNumber(ctx, blockNumber)
+ if err != nil {
+ return nil, err
}
- statedb, _, _ := b.blockchain.State()
+ statedb, _, _ = b.blockchain.State()
return statedb.GetCode(contract), nil
}
@@ -156,10 +176,11 @@ func (b *SimulatedBackend) BalanceAt(ctx context.Context, contract common.Addres
b.mu.Lock()
defer b.mu.Unlock()
- if blockNumber != nil && blockNumber.Cmp(b.blockchain.CurrentBlock().Number()) != 0 {
- return nil, errBlockNumberUnsupported
+ statedb, err := b.stateByBlockNumber(ctx, blockNumber)
+ if err != nil {
+ return nil, err
}
- statedb, _, _ := b.blockchain.State()
+ statedb, _, _ = b.blockchain.State()
return statedb.GetBalance(contract), nil
}
@@ -168,10 +189,11 @@ func (b *SimulatedBackend) NonceAt(ctx context.Context, contract common.Address,
b.mu.Lock()
defer b.mu.Unlock()
- if blockNumber != nil && blockNumber.Cmp(b.blockchain.CurrentBlock().Number()) != 0 {
- return 0, errBlockNumberUnsupported
+ statedb, err := b.stateByBlockNumber(ctx, blockNumber)
+ if err != nil {
+ return 0, err
}
- statedb, _, _ := b.blockchain.State()
+ statedb, _, _ = b.blockchain.State()
return statedb.GetNonce(contract), nil
}
@@ -180,16 +202,20 @@ func (b *SimulatedBackend) StorageAt(ctx context.Context, contract common.Addres
b.mu.Lock()
defer b.mu.Unlock()
- if blockNumber != nil && blockNumber.Cmp(b.blockchain.CurrentBlock().Number()) != 0 {
- return nil, errBlockNumberUnsupported
+ statedb, err := b.stateByBlockNumber(ctx, blockNumber)
+ if err != nil {
+ return nil, err
}
- statedb, _, _ := b.blockchain.State()
+ statedb, _, _ = b.blockchain.State()
val := statedb.GetState(contract, key)
return val[:], nil
}
// TransactionReceipt returns the receipt of a transaction.
func (b *SimulatedBackend) TransactionReceipt(ctx context.Context, txHash common.Hash) (*types.Receipt, error) {
+ b.mu.Lock()
+ defer b.mu.Unlock()
+
receipt, _, _, _ := rawdb.ReadReceipt(b.database, txHash, b.config)
return receipt, nil
}
@@ -213,6 +239,115 @@ func (b *SimulatedBackend) TransactionByHash(ctx context.Context, txHash common.
return nil, false, ethereum.NotFound
}
+// BlockByHash retrieves a block based on the block hash
+func (b *SimulatedBackend) BlockByHash(ctx context.Context, hash common.Hash) (*types.Block, error) {
+ b.mu.Lock()
+ defer b.mu.Unlock()
+
+ if hash == b.pendingBlock.Hash() {
+ return b.pendingBlock, nil
+ }
+
+ block := b.blockchain.GetBlockByHash(hash)
+ if block != nil {
+ return block, nil
+ }
+
+ return nil, errBlockDoesNotExist
+}
+
+// BlockByNumber retrieves a block from the database by number, caching it
+// (associated with its hash) if found.
+func (b *SimulatedBackend) BlockByNumber(ctx context.Context, number *big.Int) (*types.Block, error) {
+ b.mu.Lock()
+ defer b.mu.Unlock()
+
+ if number == nil || number.Cmp(b.pendingBlock.Number()) == 0 {
+ return b.blockchain.CurrentBlock(), nil
+ }
+
+ block := b.blockchain.GetBlockByNumber(uint64(number.Int64()))
+ if block == nil {
+ return nil, errBlockDoesNotExist
+ }
+
+ return block, nil
+}
+
+// HeaderByHash returns a block header from the current canonical chain.
+func (b *SimulatedBackend) HeaderByHash(ctx context.Context, hash common.Hash) (*types.Header, error) {
+ b.mu.Lock()
+ defer b.mu.Unlock()
+
+ if hash == b.pendingBlock.Hash() {
+ return b.pendingBlock.Header(), nil
+ }
+
+ header := b.blockchain.GetHeaderByHash(hash)
+ if header == nil {
+ return nil, errBlockDoesNotExist
+ }
+
+ return header, nil
+}
+
+// HeaderByNumber returns a block header from the current canonical chain. If number is
+// nil, the latest known header is returned.
+func (b *SimulatedBackend) HeaderByNumber(ctx context.Context, block *big.Int) (*types.Header, error) {
+ b.mu.Lock()
+ defer b.mu.Unlock()
+
+ if block == nil || block.Cmp(b.pendingBlock.Number()) == 0 {
+ return b.blockchain.CurrentHeader(), nil
+ }
+
+ return b.blockchain.GetHeaderByNumber(uint64(block.Int64())), nil
+}
+
+// TransactionCount returns the number of transactions in a given block
+func (b *SimulatedBackend) TransactionCount(ctx context.Context, blockHash common.Hash) (uint, error) {
+ b.mu.Lock()
+ defer b.mu.Unlock()
+
+ if blockHash == b.pendingBlock.Hash() {
+ return uint(b.pendingBlock.Transactions().Len()), nil
+ }
+
+ block := b.blockchain.GetBlockByHash(blockHash)
+ if block == nil {
+ return uint(0), errBlockDoesNotExist
+ }
+
+ return uint(block.Transactions().Len()), nil
+}
+
+// TransactionInBlock returns the transaction for a specific block at a specific index
+func (b *SimulatedBackend) TransactionInBlock(ctx context.Context, blockHash common.Hash, index uint) (*types.Transaction, error) {
+ b.mu.Lock()
+ defer b.mu.Unlock()
+
+ if blockHash == b.pendingBlock.Hash() {
+ transactions := b.pendingBlock.Transactions()
+ if uint(len(transactions)) < index+1 {
+ return nil, errTransactionDoesNotExist
+ }
+
+ return transactions[index], nil
+ }
+
+ block := b.blockchain.GetBlockByHash(blockHash)
+ if block == nil {
+ return nil, errBlockDoesNotExist
+ }
+
+ transactions := block.Transactions()
+ if uint(len(transactions)) < index+1 {
+ return nil, errTransactionDoesNotExist
+ }
+
+ return transactions[index], nil
+}
+
// PendingCodeAt returns the code associated with an account in the pending state.
func (b *SimulatedBackend) PendingCodeAt(ctx context.Context, contract common.Address) ([]byte, error) {
b.mu.Lock()
@@ -441,10 +576,38 @@ func (b *SimulatedBackend) SubscribeFilterLogs(ctx context.Context, query ethere
}), nil
}
+// SubscribeNewHead returns an event subscription for a new header
+func (b *SimulatedBackend) SubscribeNewHead(ctx context.Context, ch chan<- *types.Header) (ethereum.Subscription, error) {
+ // subscribe to a new head
+ sink := make(chan *types.Header)
+ sub := b.events.SubscribeNewHeads(sink)
+
+ return event.NewSubscription(func(quit <-chan struct{}) error {
+ defer sub.Unsubscribe()
+ for {
+ select {
+ case head := <-sink:
+ select {
+ case ch <- head:
+ case err := <-sub.Err():
+ return err
+ case <-quit:
+ return nil
+ }
+ case err := <-sub.Err():
+ return err
+ case <-quit:
+ return nil
+ }
+ }
+ }), nil
+}
+
// AdjustTime adds a time shift to the simulated clock.
func (b *SimulatedBackend) AdjustTime(adjustment time.Duration) error {
b.mu.Lock()
defer b.mu.Unlock()
+
blocks, _ := core.GenerateChain(b.config, b.blockchain.CurrentBlock(), ethash.NewFaker(), b.database, 1, func(number int, block *core.BlockGen) {
for _, tx := range b.pendingBlock.Transactions() {
block.AddTx(tx)
@@ -524,22 +687,27 @@ func (fb *filterBackend) GetLogs(ctx context.Context, hash common.Hash) ([][]*ty
}
func (fb *filterBackend) SubscribeNewTxsEvent(ch chan<- core.NewTxsEvent) event.Subscription {
- return event.NewSubscription(func(quit <-chan struct{}) error {
- <-quit
- return nil
- })
+ return nullSubscription()
}
+
func (fb *filterBackend) SubscribeChainEvent(ch chan<- core.ChainEvent) event.Subscription {
return fb.bc.SubscribeChainEvent(ch)
}
+
func (fb *filterBackend) SubscribeRemovedLogsEvent(ch chan<- core.RemovedLogsEvent) event.Subscription {
return fb.bc.SubscribeRemovedLogsEvent(ch)
}
+
func (fb *filterBackend) SubscribeLogsEvent(ch chan<- []*types.Log) event.Subscription {
return fb.bc.SubscribeLogsEvent(ch)
}
+func (fb *filterBackend) SubscribePendingLogsEvent(ch chan<- []*types.Log) event.Subscription {
+ return nullSubscription()
+}
+
func (fb *filterBackend) BloomStatus() (uint64, uint64) { return 4096, 0 }
+
func (fb *filterBackend) ServiceFilter(ctx context.Context, ms *bloombits.MatcherSession) {
panic("not supported")
}
@@ -555,3 +723,10 @@ func (fb *filterBackend) IsAuthorized(ctx context.Context, authToken *proto.PreA
func (fb *filterBackend) SupportsMultitenancy(context.Context) (*proto.PreAuthenticatedAuthenticationToken, bool) {
panic("not supported")
}
+
+func nullSubscription() event.Subscription {
+ return event.NewSubscription(func(quit <-chan struct{}) error {
+ <-quit
+ return nil
+ })
+}
diff --git a/accounts/abi/bind/backends/simulated_test.go b/accounts/abi/bind/backends/simulated_test.go
index 7421cc67bd..8f6c1b686e 100644
--- a/accounts/abi/bind/backends/simulated_test.go
+++ b/accounts/abi/bind/backends/simulated_test.go
@@ -14,20 +14,24 @@
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see .
-package backends_test
+package backends
import (
+ "bytes"
"context"
"math/big"
+ "strings"
"testing"
+ "time"
- ethereum "github.com/ethereum/go-ethereum"
+ "github.com/ethereum/go-ethereum"
+ "github.com/ethereum/go-ethereum/accounts/abi"
"github.com/ethereum/go-ethereum/accounts/abi/bind"
- "github.com/ethereum/go-ethereum/accounts/abi/bind/backends"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto"
+ "github.com/ethereum/go-ethereum/params"
)
func TestSimulatedBackend(t *testing.T) {
@@ -37,7 +41,7 @@ func TestSimulatedBackend(t *testing.T) {
genAlloc := make(core.GenesisAlloc)
genAlloc[auth.From] = core.GenesisAccount{Balance: big.NewInt(9223372036854775807)}
- sim := backends.NewSimulatedBackend(genAlloc, gasLimit)
+ sim := NewSimulatedBackend(genAlloc, gasLimit)
defer sim.Close()
// should return an error if the tx is not found
@@ -79,5 +83,760 @@ func TestSimulatedBackend(t *testing.T) {
if isPending {
t.Fatal("transaction should not have pending status")
}
+}
+
+var testKey, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
+
+// the following is based on this contract:
+// contract T {
+// event received(address sender, uint amount, bytes memo);
+// event receivedAddr(address sender);
+//
+// function receive(bytes calldata memo) external payable returns (string memory res) {
+// emit received(msg.sender, msg.value, memo);
+// emit receivedAddr(msg.sender);
+// return "hello world";
+// }
+// }
+const abiJSON = `[ { "constant": false, "inputs": [ { "name": "memo", "type": "bytes" } ], "name": "receive", "outputs": [ { "name": "res", "type": "string" } ], "payable": true, "stateMutability": "payable", "type": "function" }, { "anonymous": false, "inputs": [ { "indexed": false, "name": "sender", "type": "address" }, { "indexed": false, "name": "amount", "type": "uint256" }, { "indexed": false, "name": "memo", "type": "bytes" } ], "name": "received", "type": "event" }, { "anonymous": false, "inputs": [ { "indexed": false, "name": "sender", "type": "address" } ], "name": "receivedAddr", "type": "event" } ]`
+const abiBin = `0x608060405234801561001057600080fd5b506102a0806100206000396000f3fe60806040526004361061003b576000357c010000000000000000000000000000000000000000000000000000000090048063a69b6ed014610040575b600080fd5b6100b76004803603602081101561005657600080fd5b810190808035906020019064010000000081111561007357600080fd5b82018360208201111561008557600080fd5b803590602001918460018302840111640100000000831117156100a757600080fd5b9091929391929390505050610132565b6040518080602001828103825283818151815260200191508051906020019080838360005b838110156100f75780820151818401526020810190506100dc565b50505050905090810190601f1680156101245780820380516001836020036101000a031916815260200191505b509250505060405180910390f35b60607f75fd880d39c1daf53b6547ab6cb59451fc6452d27caa90e5b6649dd8293b9eed33348585604051808573ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff168152602001848152602001806020018281038252848482818152602001925080828437600081840152601f19601f8201169050808301925050509550505050505060405180910390a17f46923992397eac56cf13058aced2a1871933622717e27b24eabc13bf9dd329c833604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390a16040805190810160405280600b81526020017f68656c6c6f20776f726c6400000000000000000000000000000000000000000081525090509291505056fea165627a7a72305820ff0c57dad254cfeda48c9cfb47f1353a558bccb4d1bc31da1dae69315772d29e0029`
+const deployedCode = `60806040526004361061003b576000357c010000000000000000000000000000000000000000000000000000000090048063a69b6ed014610040575b600080fd5b6100b76004803603602081101561005657600080fd5b810190808035906020019064010000000081111561007357600080fd5b82018360208201111561008557600080fd5b803590602001918460018302840111640100000000831117156100a757600080fd5b9091929391929390505050610132565b6040518080602001828103825283818151815260200191508051906020019080838360005b838110156100f75780820151818401526020810190506100dc565b50505050905090810190601f1680156101245780820380516001836020036101000a031916815260200191505b509250505060405180910390f35b60607f75fd880d39c1daf53b6547ab6cb59451fc6452d27caa90e5b6649dd8293b9eed33348585604051808573ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff168152602001848152602001806020018281038252848482818152602001925080828437600081840152601f19601f8201169050808301925050509550505050505060405180910390a17f46923992397eac56cf13058aced2a1871933622717e27b24eabc13bf9dd329c833604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390a16040805190810160405280600b81526020017f68656c6c6f20776f726c6400000000000000000000000000000000000000000081525090509291505056fea165627a7a72305820ff0c57dad254cfeda48c9cfb47f1353a558bccb4d1bc31da1dae69315772d29e0029`
+
+// expected return value contains "hello world"
+var expectedReturn = []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 11, 104, 101, 108, 108, 111, 32, 119, 111, 114, 108, 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
+
+func TestNewSimulatedBackend(t *testing.T) {
+ testAddr := crypto.PubkeyToAddress(testKey.PublicKey)
+ expectedBal := big.NewInt(10000000000)
+ sim := NewSimulatedBackend(
+ core.GenesisAlloc{
+ testAddr: {Balance: expectedBal},
+ }, 10000000,
+ )
+ defer sim.Close()
+
+ if sim.config != params.AllEthashProtocolChanges {
+ t.Errorf("expected sim config to equal params.AllEthashProtocolChanges, got %v", sim.config)
+ }
+
+ if sim.blockchain.Config() != params.AllEthashProtocolChanges {
+ t.Errorf("expected sim blockchain config to equal params.AllEthashProtocolChanges, got %v", sim.config)
+ }
+
+ statedb, _, _ := sim.blockchain.State()
+ bal := statedb.GetBalance(testAddr)
+ if bal.Cmp(expectedBal) != 0 {
+ t.Errorf("expected balance for test address not received. expected: %v actual: %v", expectedBal, bal)
+ }
+}
+
+func TestSimulatedBackend_AdjustTime(t *testing.T) {
+ sim := NewSimulatedBackend(
+ core.GenesisAlloc{}, 10000000,
+ )
+ defer sim.Close()
+
+ prevTime := sim.pendingBlock.Time()
+ err := sim.AdjustTime(time.Second)
+ if err != nil {
+ t.Error(err)
+ }
+ newTime := sim.pendingBlock.Time()
+
+ if newTime-prevTime != uint64(time.Second.Seconds()) {
+ t.Errorf("adjusted time not equal to a second. prev: %v, new: %v", prevTime, newTime)
+ }
+}
+
+func TestSimulatedBackend_BalanceAt(t *testing.T) {
+ testAddr := crypto.PubkeyToAddress(testKey.PublicKey)
+ expectedBal := big.NewInt(10000000000)
+ sim := NewSimulatedBackend(
+ core.GenesisAlloc{
+ testAddr: {Balance: expectedBal},
+ }, 10000000,
+ )
+ defer sim.Close()
+ bgCtx := context.Background()
+
+ bal, err := sim.BalanceAt(bgCtx, testAddr, nil)
+ if err != nil {
+ t.Error(err)
+ }
+
+ if bal.Cmp(expectedBal) != 0 {
+ t.Errorf("expected balance for test address not received. expected: %v actual: %v", expectedBal, bal)
+ }
+}
+
+func TestSimulatedBackend_BlockByHash(t *testing.T) {
+ sim := NewSimulatedBackend(
+ core.GenesisAlloc{}, 10000000,
+ )
+ defer sim.Close()
+ bgCtx := context.Background()
+
+ block, err := sim.BlockByNumber(bgCtx, nil)
+ if err != nil {
+ t.Errorf("could not get recent block: %v", err)
+ }
+ blockByHash, err := sim.BlockByHash(bgCtx, block.Hash())
+ if err != nil {
+ t.Errorf("could not get recent block: %v", err)
+ }
+
+ if block.Hash() != blockByHash.Hash() {
+ t.Errorf("did not get expected block")
+ }
+}
+
+func TestSimulatedBackend_BlockByNumber(t *testing.T) {
+ sim := NewSimulatedBackend(
+ core.GenesisAlloc{}, 10000000,
+ )
+ defer sim.Close()
+ bgCtx := context.Background()
+
+ block, err := sim.BlockByNumber(bgCtx, nil)
+ if err != nil {
+ t.Errorf("could not get recent block: %v", err)
+ }
+ if block.NumberU64() != 0 {
+ t.Errorf("did not get most recent block, instead got block number %v", block.NumberU64())
+ }
+
+ // create one block
+ sim.Commit()
+
+ block, err = sim.BlockByNumber(bgCtx, nil)
+ if err != nil {
+ t.Errorf("could not get recent block: %v", err)
+ }
+ if block.NumberU64() != 1 {
+ t.Errorf("did not get most recent block, instead got block number %v", block.NumberU64())
+ }
+ blockByNumber, err := sim.BlockByNumber(bgCtx, big.NewInt(1))
+ if err != nil {
+ t.Errorf("could not get block by number: %v", err)
+ }
+ if blockByNumber.Hash() != block.Hash() {
+ t.Errorf("did not get the same block with height of 1 as before")
+ }
+}
+
+func TestSimulatedBackend_NonceAt(t *testing.T) {
+ testAddr := crypto.PubkeyToAddress(testKey.PublicKey)
+
+ sim := NewSimulatedBackend(
+ core.GenesisAlloc{
+ testAddr: {Balance: big.NewInt(10000000000)},
+ }, 10000000,
+ )
+ defer sim.Close()
+ bgCtx := context.Background()
+
+ nonce, err := sim.NonceAt(bgCtx, testAddr, big.NewInt(0))
+ if err != nil {
+ t.Errorf("could not get nonce for test addr: %v", err)
+ }
+
+ if nonce != uint64(0) {
+ t.Errorf("received incorrect nonce. expected 0, got %v", nonce)
+ }
+
+ // create a signed transaction to send
+ tx := types.NewTransaction(nonce, testAddr, big.NewInt(1000), params.TxGas, big.NewInt(1), nil)
+ signedTx, err := types.SignTx(tx, types.HomesteadSigner{}, testKey)
+ if err != nil {
+ t.Errorf("could not sign tx: %v", err)
+ }
+
+ // send tx to simulated backend
+ err = sim.SendTransaction(bgCtx, signedTx, bind.PrivateTxArgs{})
+ if err != nil {
+ t.Errorf("could not add tx to pending block: %v", err)
+ }
+ sim.Commit()
+
+ newNonce, err := sim.NonceAt(bgCtx, testAddr, big.NewInt(1))
+ if err != nil {
+ t.Errorf("could not get nonce for test addr: %v", err)
+ }
+
+ if newNonce != nonce+uint64(1) {
+ t.Errorf("received incorrect nonce. expected 1, got %v", nonce)
+ }
+}
+
+func TestSimulatedBackend_SendTransaction(t *testing.T) {
+ testAddr := crypto.PubkeyToAddress(testKey.PublicKey)
+
+ sim := NewSimulatedBackend(
+ core.GenesisAlloc{
+ testAddr: {Balance: big.NewInt(10000000000)},
+ }, 10000000,
+ )
+ defer sim.Close()
+ bgCtx := context.Background()
+
+ // create a signed transaction to send
+ tx := types.NewTransaction(uint64(0), testAddr, big.NewInt(1000), params.TxGas, big.NewInt(1), nil)
+ signedTx, err := types.SignTx(tx, types.HomesteadSigner{}, testKey)
+ if err != nil {
+ t.Errorf("could not sign tx: %v", err)
+ }
+
+ // send tx to simulated backend
+ err = sim.SendTransaction(bgCtx, signedTx, bind.PrivateTxArgs{})
+ if err != nil {
+ t.Errorf("could not add tx to pending block: %v", err)
+ }
+ sim.Commit()
+
+ block, err := sim.BlockByNumber(bgCtx, big.NewInt(1))
+ if err != nil {
+ t.Errorf("could not get block at height 1: %v", err)
+ }
+
+ if signedTx.Hash() != block.Transactions()[0].Hash() {
+ t.Errorf("did not commit sent transaction. expected hash %v got hash %v", block.Transactions()[0].Hash(), signedTx.Hash())
+ }
+}
+
+func TestSimulatedBackend_TransactionByHash(t *testing.T) {
+ testAddr := crypto.PubkeyToAddress(testKey.PublicKey)
+
+ sim := NewSimulatedBackend(
+ core.GenesisAlloc{
+ testAddr: {Balance: big.NewInt(10000000000)},
+ }, 10000000,
+ )
+ defer sim.Close()
+ bgCtx := context.Background()
+
+ // create a signed transaction to send
+ tx := types.NewTransaction(uint64(0), testAddr, big.NewInt(1000), params.TxGas, big.NewInt(1), nil)
+ signedTx, err := types.SignTx(tx, types.HomesteadSigner{}, testKey)
+ if err != nil {
+ t.Errorf("could not sign tx: %v", err)
+ }
+
+ // send tx to simulated backend
+ err = sim.SendTransaction(bgCtx, signedTx, bind.PrivateTxArgs{})
+ if err != nil {
+ t.Errorf("could not add tx to pending block: %v", err)
+ }
+
+ // ensure tx is committed pending
+ receivedTx, pending, err := sim.TransactionByHash(bgCtx, signedTx.Hash())
+ if err != nil {
+ t.Errorf("could not get transaction by hash %v: %v", signedTx.Hash(), err)
+ }
+ if !pending {
+ t.Errorf("expected transaction to be in pending state")
+ }
+ if receivedTx.Hash() != signedTx.Hash() {
+ t.Errorf("did not received committed transaction. expected hash %v got hash %v", signedTx.Hash(), receivedTx.Hash())
+ }
+
+ sim.Commit()
+
+ // ensure tx is not and committed pending
+ receivedTx, pending, err = sim.TransactionByHash(bgCtx, signedTx.Hash())
+ if err != nil {
+ t.Errorf("could not get transaction by hash %v: %v", signedTx.Hash(), err)
+ }
+ if pending {
+ t.Errorf("expected transaction to not be in pending state")
+ }
+ if receivedTx.Hash() != signedTx.Hash() {
+ t.Errorf("did not received committed transaction. expected hash %v got hash %v", signedTx.Hash(), receivedTx.Hash())
+ }
+}
+
+func TestSimulatedBackend_EstimateGas(t *testing.T) {
+ sim := NewSimulatedBackend(
+ core.GenesisAlloc{}, 10000000,
+ )
+ defer sim.Close()
+ bgCtx := context.Background()
+ testAddr := crypto.PubkeyToAddress(testKey.PublicKey)
+
+ gas, err := sim.EstimateGas(bgCtx, ethereum.CallMsg{
+ From: testAddr,
+ To: &testAddr,
+ Value: big.NewInt(1000),
+ Data: []byte{},
+ })
+ if err != nil {
+ t.Errorf("could not estimate gas: %v", err)
+ }
+
+ if gas != params.TxGas {
+ t.Errorf("expected 21000 gas cost for a transaction got %v", gas)
+ }
+}
+
+func TestSimulatedBackend_HeaderByHash(t *testing.T) {
+ testAddr := crypto.PubkeyToAddress(testKey.PublicKey)
+
+ sim := NewSimulatedBackend(
+ core.GenesisAlloc{
+ testAddr: {Balance: big.NewInt(10000000000)},
+ }, 10000000,
+ )
+ defer sim.Close()
+ bgCtx := context.Background()
+
+ header, err := sim.HeaderByNumber(bgCtx, nil)
+ if err != nil {
+ t.Errorf("could not get recent block: %v", err)
+ }
+ headerByHash, err := sim.HeaderByHash(bgCtx, header.Hash())
+ if err != nil {
+ t.Errorf("could not get recent block: %v", err)
+ }
+
+ if header.Hash() != headerByHash.Hash() {
+ t.Errorf("did not get expected block")
+ }
+}
+
+func TestSimulatedBackend_HeaderByNumber(t *testing.T) {
+ testAddr := crypto.PubkeyToAddress(testKey.PublicKey)
+
+ sim := NewSimulatedBackend(
+ core.GenesisAlloc{
+ testAddr: {Balance: big.NewInt(10000000000)},
+ }, 10000000,
+ )
+ defer sim.Close()
+ bgCtx := context.Background()
+
+ latestBlockHeader, err := sim.HeaderByNumber(bgCtx, nil)
+ if err != nil {
+ t.Errorf("could not get header for tip of chain: %v", err)
+ }
+ if latestBlockHeader == nil {
+ t.Errorf("received a nil block header")
+ }
+ if latestBlockHeader.Number.Uint64() != uint64(0) {
+ t.Errorf("expected block header number 0, instead got %v", latestBlockHeader.Number.Uint64())
+ }
+
+ sim.Commit()
+
+ latestBlockHeader, err = sim.HeaderByNumber(bgCtx, nil)
+ if err != nil {
+ t.Errorf("could not get header for blockheight of 1: %v", err)
+ }
+
+ blockHeader, err := sim.HeaderByNumber(bgCtx, big.NewInt(1))
+ if err != nil {
+ t.Errorf("could not get header for blockheight of 1: %v", err)
+ }
+
+ if blockHeader.Hash() != latestBlockHeader.Hash() {
+ t.Errorf("block header and latest block header are not the same")
+ }
+ if blockHeader.Number.Int64() != int64(1) {
+ t.Errorf("did not get blockheader for block 1. instead got block %v", blockHeader.Number.Int64())
+ }
+
+ block, err := sim.BlockByNumber(bgCtx, big.NewInt(1))
+ if err != nil {
+ t.Errorf("could not get block for blockheight of 1: %v", err)
+ }
+
+ if block.Hash() != blockHeader.Hash() {
+ t.Errorf("block hash and block header hash do not match. expected %v, got %v", block.Hash(), blockHeader.Hash())
+ }
+}
+
+func TestSimulatedBackend_TransactionCount(t *testing.T) {
+ testAddr := crypto.PubkeyToAddress(testKey.PublicKey)
+
+ sim := NewSimulatedBackend(
+ core.GenesisAlloc{
+ testAddr: {Balance: big.NewInt(10000000000)},
+ }, 10000000,
+ )
+ defer sim.Close()
+ bgCtx := context.Background()
+ currentBlock, err := sim.BlockByNumber(bgCtx, nil)
+ if err != nil || currentBlock == nil {
+ t.Error("could not get current block")
+ }
+
+ count, err := sim.TransactionCount(bgCtx, currentBlock.Hash())
+ if err != nil {
+ t.Error("could not get current block's transaction count")
+ }
+
+ if count != 0 {
+ t.Errorf("expected transaction count of %v does not match actual count of %v", 0, count)
+ }
+
+ // create a signed transaction to send
+ tx := types.NewTransaction(uint64(0), testAddr, big.NewInt(1000), params.TxGas, big.NewInt(1), nil)
+ signedTx, err := types.SignTx(tx, types.HomesteadSigner{}, testKey)
+ if err != nil {
+ t.Errorf("could not sign tx: %v", err)
+ }
+
+ // send tx to simulated backend
+ err = sim.SendTransaction(bgCtx, signedTx, bind.PrivateTxArgs{})
+ if err != nil {
+ t.Errorf("could not add tx to pending block: %v", err)
+ }
+
+ sim.Commit()
+
+ lastBlock, err := sim.BlockByNumber(bgCtx, nil)
+ if err != nil {
+ t.Errorf("could not get header for tip of chain: %v", err)
+ }
+
+ count, err = sim.TransactionCount(bgCtx, lastBlock.Hash())
+ if err != nil {
+ t.Error("could not get current block's transaction count")
+ }
+
+ if count != 1 {
+ t.Errorf("expected transaction count of %v does not match actual count of %v", 1, count)
+ }
+}
+
+func TestSimulatedBackend_TransactionInBlock(t *testing.T) {
+ testAddr := crypto.PubkeyToAddress(testKey.PublicKey)
+
+ sim := NewSimulatedBackend(
+ core.GenesisAlloc{
+ testAddr: {Balance: big.NewInt(10000000000)},
+ }, 10000000,
+ )
+ defer sim.Close()
+ bgCtx := context.Background()
+
+ transaction, err := sim.TransactionInBlock(bgCtx, sim.pendingBlock.Hash(), uint(0))
+ if err == nil && err != errTransactionDoesNotExist {
+ t.Errorf("expected a transaction does not exist error to be received but received %v", err)
+ }
+ if transaction != nil {
+ t.Errorf("expected transaction to be nil but received %v", transaction)
+ }
+
+ // expect pending nonce to be 0 since account has not been used
+ pendingNonce, err := sim.PendingNonceAt(bgCtx, testAddr)
+ if err != nil {
+ t.Errorf("did not get the pending nonce: %v", err)
+ }
+
+ if pendingNonce != uint64(0) {
+ t.Errorf("expected pending nonce of 0 got %v", pendingNonce)
+ }
+
+ // create a signed transaction to send
+ tx := types.NewTransaction(uint64(0), testAddr, big.NewInt(1000), params.TxGas, big.NewInt(1), nil)
+ signedTx, err := types.SignTx(tx, types.HomesteadSigner{}, testKey)
+ if err != nil {
+ t.Errorf("could not sign tx: %v", err)
+ }
+
+ // send tx to simulated backend
+ err = sim.SendTransaction(bgCtx, signedTx, bind.PrivateTxArgs{})
+ if err != nil {
+ t.Errorf("could not add tx to pending block: %v", err)
+ }
+
+ sim.Commit()
+
+ lastBlock, err := sim.BlockByNumber(bgCtx, nil)
+ if err != nil {
+ t.Errorf("could not get header for tip of chain: %v", err)
+ }
+
+ transaction, err = sim.TransactionInBlock(bgCtx, lastBlock.Hash(), uint(1))
+ if err == nil && err != errTransactionDoesNotExist {
+ t.Errorf("expected a transaction does not exist error to be received but received %v", err)
+ }
+ if transaction != nil {
+ t.Errorf("expected transaction to be nil but received %v", transaction)
+ }
+
+ transaction, err = sim.TransactionInBlock(bgCtx, lastBlock.Hash(), uint(0))
+ if err != nil {
+ t.Errorf("could not get transaction in the lastest block with hash %v: %v", lastBlock.Hash().String(), err)
+ }
+
+ if signedTx.Hash().String() != transaction.Hash().String() {
+ t.Errorf("received transaction that did not match the sent transaction. expected hash %v, got hash %v", signedTx.Hash().String(), transaction.Hash().String())
+ }
+}
+
+func TestSimulatedBackend_PendingNonceAt(t *testing.T) {
+ testAddr := crypto.PubkeyToAddress(testKey.PublicKey)
+
+ sim := NewSimulatedBackend(
+ core.GenesisAlloc{
+ testAddr: {Balance: big.NewInt(10000000000)},
+ }, 10000000,
+ )
+ defer sim.Close()
+ bgCtx := context.Background()
+
+ // expect pending nonce to be 0 since account has not been used
+ pendingNonce, err := sim.PendingNonceAt(bgCtx, testAddr)
+ if err != nil {
+ t.Errorf("did not get the pending nonce: %v", err)
+ }
+
+ if pendingNonce != uint64(0) {
+ t.Errorf("expected pending nonce of 0 got %v", pendingNonce)
+ }
+
+ // create a signed transaction to send
+ tx := types.NewTransaction(uint64(0), testAddr, big.NewInt(1000), params.TxGas, big.NewInt(1), nil)
+ signedTx, err := types.SignTx(tx, types.HomesteadSigner{}, testKey)
+ if err != nil {
+ t.Errorf("could not sign tx: %v", err)
+ }
+
+ // send tx to simulated backend
+ err = sim.SendTransaction(bgCtx, signedTx, bind.PrivateTxArgs{})
+ if err != nil {
+ t.Errorf("could not add tx to pending block: %v", err)
+ }
+
+ // expect pending nonce to be 1 since account has submitted one transaction
+ pendingNonce, err = sim.PendingNonceAt(bgCtx, testAddr)
+ if err != nil {
+ t.Errorf("did not get the pending nonce: %v", err)
+ }
+
+ if pendingNonce != uint64(1) {
+ t.Errorf("expected pending nonce of 1 got %v", pendingNonce)
+ }
+
+ // make a new transaction with a nonce of 1
+ tx = types.NewTransaction(uint64(1), testAddr, big.NewInt(1000), params.TxGas, big.NewInt(1), nil)
+ signedTx, err = types.SignTx(tx, types.HomesteadSigner{}, testKey)
+ if err != nil {
+ t.Errorf("could not sign tx: %v", err)
+ }
+ err = sim.SendTransaction(bgCtx, signedTx, bind.PrivateTxArgs{})
+ if err != nil {
+ t.Errorf("could not send tx: %v", err)
+ }
+
+ // expect pending nonce to be 2 since account now has two transactions
+ pendingNonce, err = sim.PendingNonceAt(bgCtx, testAddr)
+ if err != nil {
+ t.Errorf("did not get the pending nonce: %v", err)
+ }
+
+ if pendingNonce != uint64(2) {
+ t.Errorf("expected pending nonce of 2 got %v", pendingNonce)
+ }
+}
+
+func TestSimulatedBackend_TransactionReceipt(t *testing.T) {
+ testAddr := crypto.PubkeyToAddress(testKey.PublicKey)
+
+ sim := NewSimulatedBackend(
+ core.GenesisAlloc{
+ testAddr: {Balance: big.NewInt(10000000000)},
+ }, 10000000,
+ )
+ defer sim.Close()
+ bgCtx := context.Background()
+
+ // create a signed transaction to send
+ tx := types.NewTransaction(uint64(0), testAddr, big.NewInt(1000), params.TxGas, big.NewInt(1), nil)
+ signedTx, err := types.SignTx(tx, types.HomesteadSigner{}, testKey)
+ if err != nil {
+ t.Errorf("could not sign tx: %v", err)
+ }
+
+ // send tx to simulated backend
+ err = sim.SendTransaction(bgCtx, signedTx, bind.PrivateTxArgs{})
+ if err != nil {
+ t.Errorf("could not add tx to pending block: %v", err)
+ }
+ sim.Commit()
+
+ receipt, err := sim.TransactionReceipt(bgCtx, signedTx.Hash())
+ if err != nil {
+ t.Errorf("could not get transaction receipt: %v", err)
+ }
+
+ if receipt.ContractAddress != testAddr && receipt.TxHash != signedTx.Hash() {
+ t.Errorf("received receipt is not correct: %v", receipt)
+ }
+}
+
+func TestSimulatedBackend_SuggestGasPrice(t *testing.T) {
+ sim := NewSimulatedBackend(
+ core.GenesisAlloc{},
+ 10000000,
+ )
+ defer sim.Close()
+ bgCtx := context.Background()
+ gasPrice, err := sim.SuggestGasPrice(bgCtx)
+ if err != nil {
+ t.Errorf("could not get gas price: %v", err)
+ }
+ if gasPrice.Uint64() != uint64(1) {
+ t.Errorf("gas price was not expected value of 1. actual: %v", gasPrice.Uint64())
+ }
+}
+
+func TestSimulatedBackend_PendingCodeAt(t *testing.T) {
+ testAddr := crypto.PubkeyToAddress(testKey.PublicKey)
+ sim := NewSimulatedBackend(
+ core.GenesisAlloc{
+ testAddr: {Balance: big.NewInt(10000000000)},
+ },
+ 10000000,
+ )
+ defer sim.Close()
+ bgCtx := context.Background()
+ code, err := sim.CodeAt(bgCtx, testAddr, nil)
+ if err != nil {
+ t.Errorf("could not get code at test addr: %v", err)
+ }
+ if len(code) != 0 {
+ t.Errorf("got code for account that does not have contract code")
+ }
+
+ parsed, err := abi.JSON(strings.NewReader(abiJSON))
+ if err != nil {
+ t.Errorf("could not get code at test addr: %v", err)
+ }
+ auth := bind.NewKeyedTransactor(testKey)
+ contractAddr, tx, contract, err := bind.DeployContract(auth, parsed, common.FromHex(abiBin), sim)
+ if err != nil {
+ t.Errorf("could not deploy contract: %v tx: %v contract: %v", err, tx, contract)
+ }
+
+ code, err = sim.PendingCodeAt(bgCtx, contractAddr)
+ if err != nil {
+ t.Errorf("could not get code at test addr: %v", err)
+ }
+ if len(code) == 0 {
+ t.Errorf("did not get code for account that has contract code")
+ }
+ // ensure code received equals code deployed
+ if !bytes.Equal(code, common.FromHex(deployedCode)) {
+ t.Errorf("code received did not match expected deployed code:\n expected %v\n actual %v", common.FromHex(deployedCode), code)
+ }
+}
+
+func TestSimulatedBackend_CodeAt(t *testing.T) {
+ testAddr := crypto.PubkeyToAddress(testKey.PublicKey)
+ sim := NewSimulatedBackend(
+ core.GenesisAlloc{
+ testAddr: {Balance: big.NewInt(10000000000)},
+ },
+ 10000000,
+ )
+ defer sim.Close()
+ bgCtx := context.Background()
+ code, err := sim.CodeAt(bgCtx, testAddr, nil)
+ if err != nil {
+ t.Errorf("could not get code at test addr: %v", err)
+ }
+ if len(code) != 0 {
+ t.Errorf("got code for account that does not have contract code")
+ }
+
+ parsed, err := abi.JSON(strings.NewReader(abiJSON))
+ if err != nil {
+ t.Errorf("could not get code at test addr: %v", err)
+ }
+ auth := bind.NewKeyedTransactor(testKey)
+ contractAddr, tx, contract, err := bind.DeployContract(auth, parsed, common.FromHex(abiBin), sim)
+ if err != nil {
+ t.Errorf("could not deploy contract: %v tx: %v contract: %v", err, tx, contract)
+ }
+
+ sim.Commit()
+ code, err = sim.CodeAt(bgCtx, contractAddr, nil)
+ if err != nil {
+ t.Errorf("could not get code at test addr: %v", err)
+ }
+ if len(code) == 0 {
+ t.Errorf("did not get code for account that has contract code")
+ }
+ // ensure code received equals code deployed
+ if !bytes.Equal(code, common.FromHex(deployedCode)) {
+ t.Errorf("code received did not match expected deployed code:\n expected %v\n actual %v", common.FromHex(deployedCode), code)
+ }
+}
+
+// When receive("X") is called with sender 0x00... and value 1, it produces this tx receipt:
+// receipt{status=1 cgas=23949 bloom=00000000004000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000040200000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 logs=[log: b6818c8064f645cd82d99b59a1a267d6d61117ef [75fd880d39c1daf53b6547ab6cb59451fc6452d27caa90e5b6649dd8293b9eed] 000000000000000000000000376c47978271565f56deb45495afa69e59c16ab200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000060000000000000000000000000000000000000000000000000000000000000000158 9ae378b6d4409eada347a5dc0c180f186cb62dc68fcc0f043425eb917335aa28 0 95d429d309bb9d753954195fe2d69bd140b4ae731b9b5b605c34323de162cf00 0]}
+func TestSimulatedBackend_PendingAndCallContract(t *testing.T) {
+ testAddr := crypto.PubkeyToAddress(testKey.PublicKey)
+ sim := NewSimulatedBackend(
+ core.GenesisAlloc{
+ testAddr: {Balance: big.NewInt(10000000000)},
+ },
+ 10000000,
+ )
+ defer sim.Close()
+ bgCtx := context.Background()
+
+ parsed, err := abi.JSON(strings.NewReader(abiJSON))
+ if err != nil {
+ t.Errorf("could not get code at test addr: %v", err)
+ }
+ contractAuth := bind.NewKeyedTransactor(testKey)
+ addr, _, _, err := bind.DeployContract(contractAuth, parsed, common.FromHex(abiBin), sim)
+ if err != nil {
+ t.Errorf("could not deploy contract: %v", err)
+ }
+
+ input, err := parsed.Pack("receive", []byte("X"))
+ if err != nil {
+ t.Errorf("could pack receive function on contract: %v", err)
+ }
+
+ // make sure you can call the contract in pending state
+ res, err := sim.PendingCallContract(bgCtx, ethereum.CallMsg{
+ From: testAddr,
+ To: &addr,
+ Data: input,
+ })
+ if err != nil {
+ t.Errorf("could not call receive method on contract: %v", err)
+ }
+ if len(res) == 0 {
+ t.Errorf("result of contract call was empty: %v", res)
+ }
+
+ // while comparing against the byte array is more exact, also compare against the human readable string for readability
+ if !bytes.Equal(res, expectedReturn) || !strings.Contains(string(res), "hello world") {
+ t.Errorf("response from calling contract was expected to be 'hello world' instead received %v", string(res))
+ }
+
+ sim.Commit()
+
+ // make sure you can call the contract
+ res, err = sim.CallContract(bgCtx, ethereum.CallMsg{
+ From: testAddr,
+ To: &addr,
+ Data: input,
+ }, nil)
+ if err != nil {
+ t.Errorf("could not call receive method on contract: %v", err)
+ }
+ if len(res) == 0 {
+ t.Errorf("result of contract call was empty: %v", res)
+ }
+
+ if !bytes.Equal(res, expectedReturn) || !strings.Contains(string(res), "hello world") {
+ t.Errorf("response from calling contract was expected to be 'hello world' instead received %v", string(res))
+ }
}
diff --git a/accounts/abi/bind/bind_test.go b/accounts/abi/bind/bind_test.go
index c2934d77ed..3f3712d67f 100644
--- a/accounts/abi/bind/bind_test.go
+++ b/accounts/abi/bind/bind_test.go
@@ -1530,6 +1530,61 @@ var bindTests = []struct {
nil,
[]string{"ContractOne", "ContractTwo", "ExternalLib"},
},
+ // Test the existence of the free retrieval calls
+ {
+ `PureAndView`,
+ `pragma solidity >=0.6.0;
+ contract PureAndView {
+ function PureFunc() public pure returns (uint) {
+ return 42;
+ }
+ function ViewFunc() public view returns (uint) {
+ return block.number;
+ }
+ }
+ `,
+ []string{`608060405234801561001057600080fd5b5060b68061001f6000396000f3fe6080604052348015600f57600080fd5b506004361060325760003560e01c806376b5686a146037578063bb38c66c146053575b600080fd5b603d606f565b6040518082815260200191505060405180910390f35b60596077565b6040518082815260200191505060405180910390f35b600043905090565b6000602a90509056fea2646970667358221220d158c2ab7fdfce366a7998ec79ab84edd43b9815630bbaede2c760ea77f29f7f64736f6c63430006000033`},
+ []string{`[{"inputs": [],"name": "PureFunc","outputs": [{"internalType": "uint256","name": "","type": "uint256"}],"stateMutability": "pure","type": "function"},{"inputs": [],"name": "ViewFunc","outputs": [{"internalType": "uint256","name": "","type": "uint256"}],"stateMutability": "view","type": "function"}]`},
+ `
+ "math/big"
+
+ "github.com/ethereum/go-ethereum/accounts/abi/bind"
+ "github.com/ethereum/go-ethereum/accounts/abi/bind/backends"
+ "github.com/ethereum/go-ethereum/core"
+ "github.com/ethereum/go-ethereum/crypto"
+ `,
+ `
+ // Generate a new random account and a funded simulator
+ key, _ := crypto.GenerateKey()
+ auth := bind.NewKeyedTransactor(key)
+
+ sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000)}}, 10000000)
+ defer sim.Close()
+
+ // Deploy a tester contract and execute a structured call on it
+ _, _, pav, err := DeployPureAndView(auth, sim)
+ if err != nil {
+ t.Fatalf("Failed to deploy PureAndView contract: %v", err)
+ }
+ sim.Commit()
+
+ // This test the existence of the free retreiver call for view and pure functions
+ if num, err := pav.PureFunc(nil); err != nil {
+ t.Fatalf("Failed to call anonymous field retriever: %v", err)
+ } else if num.Cmp(big.NewInt(42)) != 0 {
+ t.Fatalf("Retrieved value mismatch: have %v, want %v", num, 42)
+ }
+ if num, err := pav.ViewFunc(nil); err != nil {
+ t.Fatalf("Failed to call anonymous field retriever: %v", err)
+ } else if num.Cmp(big.NewInt(1)) != 0 {
+ t.Fatalf("Retrieved value mismatch: have %v, want %v", num, 1)
+ }
+ `,
+ nil,
+ nil,
+ nil,
+ nil,
+ },
}
// Tests that packages generated by the binder can be successfully compiled and
diff --git a/accounts/abi/bind/topics.go b/accounts/abi/bind/topics.go
index e27fa54842..c908c92582 100644
--- a/accounts/abi/bind/topics.go
+++ b/accounts/abi/bind/topics.go
@@ -178,6 +178,13 @@ func parseTopics(out interface{}, fields abi.Arguments, topics []common.Hash) er
case reflectBigInt:
num := new(big.Int).SetBytes(topics[0][:])
+ if arg.Type.T == abi.IntTy {
+ if num.Cmp(abi.MaxInt256) > 0 {
+ num.Add(abi.MaxUint256, big.NewInt(0).Neg(num))
+ num.Add(num, big.NewInt(1))
+ num.Neg(num)
+ }
+ }
field.Set(reflect.ValueOf(num))
default:
@@ -212,8 +219,7 @@ func parseTopicsIntoMap(out map[string]interface{}, fields abi.Arguments, topics
case abi.BoolTy:
out[arg.Name] = topics[0][common.HashLength-1] == 1
case abi.IntTy, abi.UintTy:
- num := new(big.Int).SetBytes(topics[0][:])
- out[arg.Name] = num
+ out[arg.Name] = abi.ReadInteger(arg.Type.T, arg.Type.Kind, topics[0].Bytes())
case abi.AddressTy:
var addr common.Address
copy(addr[:], topics[0][common.HashLength-common.AddressLength:])
@@ -221,7 +227,11 @@ func parseTopicsIntoMap(out map[string]interface{}, fields abi.Arguments, topics
case abi.HashTy:
out[arg.Name] = topics[0]
case abi.FixedBytesTy:
- out[arg.Name] = topics[0][:]
+ array, err := abi.ReadFixedBytes(arg.Type, topics[0].Bytes())
+ if err != nil {
+ return err
+ }
+ out[arg.Name] = array
case abi.StringTy, abi.BytesTy, abi.SliceTy, abi.ArrayTy:
// Array types (including strings and bytes) have their keccak256 hashes stored in the topic- not a hash
// whose bytes can be decoded to the actual value- so the best we can do is retrieve that hash
diff --git a/accounts/abi/bind/topics_test.go b/accounts/abi/bind/topics_test.go
index f18e2d1bd2..c62f5bab32 100644
--- a/accounts/abi/bind/topics_test.go
+++ b/accounts/abi/bind/topics_test.go
@@ -17,6 +17,7 @@
package bind
import (
+ "math/big"
"reflect"
"testing"
@@ -55,27 +56,44 @@ func TestMakeTopics(t *testing.T) {
}
}
-func TestParseTopics(t *testing.T) {
- type bytesStruct struct {
- StaticBytes [5]byte
- }
+type args struct {
+ createObj func() interface{}
+ resultObj func() interface{}
+ resultMap func() map[string]interface{}
+ fields abi.Arguments
+ topics []common.Hash
+}
+
+type bytesStruct struct {
+ StaticBytes [5]byte
+}
+type int8Struct struct {
+ Int8Value int8
+}
+type int256Struct struct {
+ Int256Value *big.Int
+}
+
+type topicTest struct {
+ name string
+ args args
+ wantErr bool
+}
+
+func setupTopicsTests() []topicTest {
bytesType, _ := abi.NewType("bytes5", "", nil)
- type args struct {
- createObj func() interface{}
- resultObj func() interface{}
- fields abi.Arguments
- topics []common.Hash
- }
- tests := []struct {
- name string
- args args
- wantErr bool
- }{
+ int8Type, _ := abi.NewType("int8", "", nil)
+ int256Type, _ := abi.NewType("int256", "", nil)
+
+ tests := []topicTest{
{
name: "support fixed byte types, right padded to 32 bytes",
args: args{
createObj: func() interface{} { return &bytesStruct{} },
resultObj: func() interface{} { return &bytesStruct{StaticBytes: [5]byte{1, 2, 3, 4, 5}} },
+ resultMap: func() map[string]interface{} {
+ return map[string]interface{}{"staticBytes": [5]byte{1, 2, 3, 4, 5}}
+ },
fields: abi.Arguments{abi.Argument{
Name: "staticBytes",
Type: bytesType,
@@ -87,7 +105,54 @@ func TestParseTopics(t *testing.T) {
},
wantErr: false,
},
+ {
+ name: "int8 with negative value",
+ args: args{
+ createObj: func() interface{} { return &int8Struct{} },
+ resultObj: func() interface{} { return &int8Struct{Int8Value: -1} },
+ resultMap: func() map[string]interface{} {
+ return map[string]interface{}{"int8Value": int8(-1)}
+ },
+ fields: abi.Arguments{abi.Argument{
+ Name: "int8Value",
+ Type: int8Type,
+ Indexed: true,
+ }},
+ topics: []common.Hash{
+ {255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255},
+ },
+ },
+ wantErr: false,
+ },
+ {
+ name: "int256 with negative value",
+ args: args{
+ createObj: func() interface{} { return &int256Struct{} },
+ resultObj: func() interface{} { return &int256Struct{Int256Value: big.NewInt(-1)} },
+ resultMap: func() map[string]interface{} {
+ return map[string]interface{}{"int256Value": big.NewInt(-1)}
+ },
+ fields: abi.Arguments{abi.Argument{
+ Name: "int256Value",
+ Type: int256Type,
+ Indexed: true,
+ }},
+ topics: []common.Hash{
+ {255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255},
+ },
+ },
+ wantErr: false,
+ },
}
+
+ return tests
+}
+
+func TestParseTopics(t *testing.T) {
+ tests := setupTopicsTests()
+
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
createObj := tt.args.createObj()
@@ -101,3 +166,20 @@ func TestParseTopics(t *testing.T) {
})
}
}
+
+func TestParseTopicsIntoMap(t *testing.T) {
+ tests := setupTopicsTests()
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ outMap := make(map[string]interface{})
+ if err := parseTopicsIntoMap(outMap, tt.args.fields, tt.args.topics); (err != nil) != tt.wantErr {
+ t.Errorf("parseTopicsIntoMap() error = %v, wantErr %v", err, tt.wantErr)
+ }
+ resultMap := tt.args.resultMap()
+ if !reflect.DeepEqual(outMap, resultMap) {
+ t.Errorf("parseTopicsIntoMap() = %v, want %v", outMap, resultMap)
+ }
+ })
+ }
+}
diff --git a/accounts/abi/pack.go b/accounts/abi/pack.go
index 36c58265bd..dd1c9a5df8 100644
--- a/accounts/abi/pack.go
+++ b/accounts/abi/pack.go
@@ -73,7 +73,7 @@ func packNum(value reflect.Value) []byte {
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
return U256(big.NewInt(value.Int()))
case reflect.Ptr:
- return U256(value.Interface().(*big.Int))
+ return U256(new(big.Int).Set(value.Interface().(*big.Int)))
default:
panic("abi: fatal error")
}
diff --git a/accounts/abi/unpack.go b/accounts/abi/unpack.go
index b2e61d06c4..2a5db3b315 100644
--- a/accounts/abi/unpack.go
+++ b/accounts/abi/unpack.go
@@ -26,16 +26,18 @@ import (
)
var (
- maxUint256 = big.NewInt(0).Add(
+ // MaxUint256 is the maximum value that can be represented by a uint256
+ MaxUint256 = big.NewInt(0).Add(
big.NewInt(0).Exp(big.NewInt(2), big.NewInt(256), nil),
big.NewInt(-1))
- maxInt256 = big.NewInt(0).Add(
+ // MaxInt256 is the maximum value that can be represented by a int256
+ MaxInt256 = big.NewInt(0).Add(
big.NewInt(0).Exp(big.NewInt(2), big.NewInt(255), nil),
big.NewInt(-1))
)
-// reads the integer based on its kind
-func readInteger(typ byte, kind reflect.Kind, b []byte) interface{} {
+// ReadInteger reads the integer based on its kind and returns the appropriate value
+func ReadInteger(typ byte, kind reflect.Kind, b []byte) interface{} {
switch kind {
case reflect.Uint8:
return b[len(b)-1]
@@ -62,8 +64,8 @@ func readInteger(typ byte, kind reflect.Kind, b []byte) interface{} {
return ret
}
- if ret.Cmp(maxInt256) > 0 {
- ret.Add(maxUint256, big.NewInt(0).Neg(ret))
+ if ret.Cmp(MaxInt256) > 0 {
+ ret.Add(MaxUint256, big.NewInt(0).Neg(ret))
ret.Add(ret, big.NewInt(1))
ret.Neg(ret)
}
@@ -102,8 +104,8 @@ func readFunctionType(t Type, word []byte) (funcTy [24]byte, err error) {
return
}
-// through reflection, creates a fixed array to be read from
-func readFixedBytes(t Type, word []byte) (interface{}, error) {
+// ReadFixedBytes uses reflection to create a fixed array to be read from
+func ReadFixedBytes(t Type, word []byte) (interface{}, error) {
if t.T != FixedBytesTy {
return nil, fmt.Errorf("abi: invalid type in call to make fixed byte array")
}
@@ -230,7 +232,7 @@ func toGoType(index int, t Type, output []byte) (interface{}, error) {
case StringTy: // variable arrays are written at the end of the return bytes
return string(output[begin : begin+length]), nil
case IntTy, UintTy:
- return readInteger(t.T, t.Kind, returnOutput), nil
+ return ReadInteger(t.T, t.Kind, returnOutput), nil
case BoolTy:
return readBool(returnOutput)
case AddressTy:
@@ -240,7 +242,7 @@ func toGoType(index int, t Type, output []byte) (interface{}, error) {
case BytesTy:
return output[begin : begin+length], nil
case FixedBytesTy:
- return readFixedBytes(t, returnOutput)
+ return ReadFixedBytes(t, returnOutput)
case FunctionTy:
return readFunctionType(t, returnOutput)
default:
diff --git a/accounts/manager.go b/accounts/manager.go
index b52bc98fd9..8847da8cb4 100644
--- a/accounts/manager.go
+++ b/accounts/manager.go
@@ -157,6 +157,11 @@ func (am *Manager) Wallets() []Wallet {
am.lock.RLock()
defer am.lock.RUnlock()
+ return am.walletsNoLock()
+}
+
+// walletsNoLock returns all registered wallets. Callers must hold am.lock.
+func (am *Manager) walletsNoLock() []Wallet {
cpy := make([]Wallet, len(am.wallets))
copy(cpy, am.wallets)
return cpy
@@ -171,7 +176,7 @@ func (am *Manager) Wallet(url string) (Wallet, error) {
if err != nil {
return nil, err
}
- for _, wallet := range am.Wallets() {
+ for _, wallet := range am.walletsNoLock() {
if wallet.URL() == parsed {
return wallet, nil
}
diff --git a/accounts/scwallet/securechannel.go b/accounts/scwallet/securechannel.go
index fad876a019..9b70c69dcc 100644
--- a/accounts/scwallet/securechannel.go
+++ b/accounts/scwallet/securechannel.go
@@ -71,7 +71,7 @@ func NewSecureChannelSession(card *pcsc.Card, keyData []byte) (*SecureChannelSes
cardPublic, ok := gen.Unmarshal(keyData)
if !ok {
- return nil, fmt.Errorf("Could not unmarshal public key from card")
+ return nil, fmt.Errorf("could not unmarshal public key from card")
}
secret, err := gen.GenerateSharedSecret(private, cardPublic)
@@ -109,7 +109,7 @@ func (s *SecureChannelSession) Pair(pairingPassword []byte) error {
cardChallenge := response.Data[32:64]
if !bytes.Equal(expectedCryptogram, cardCryptogram) {
- return fmt.Errorf("Invalid card cryptogram %v != %v", expectedCryptogram, cardCryptogram)
+ return fmt.Errorf("invalid card cryptogram %v != %v", expectedCryptogram, cardCryptogram)
}
md.Reset()
@@ -132,7 +132,7 @@ func (s *SecureChannelSession) Pair(pairingPassword []byte) error {
// Unpair disestablishes an existing pairing.
func (s *SecureChannelSession) Unpair() error {
if s.PairingKey == nil {
- return fmt.Errorf("Cannot unpair: not paired")
+ return fmt.Errorf("cannot unpair: not paired")
}
_, err := s.transmitEncrypted(claSCWallet, insUnpair, s.PairingIndex, 0, []byte{})
@@ -148,7 +148,7 @@ func (s *SecureChannelSession) Unpair() error {
// Open initializes the secure channel.
func (s *SecureChannelSession) Open() error {
if s.iv != nil {
- return fmt.Errorf("Session already opened")
+ return fmt.Errorf("session already opened")
}
response, err := s.open()
@@ -185,11 +185,11 @@ func (s *SecureChannelSession) mutuallyAuthenticate() error {
return err
}
if response.Sw1 != 0x90 || response.Sw2 != 0x00 {
- return fmt.Errorf("Got unexpected response from MUTUALLY_AUTHENTICATE: 0x%x%x", response.Sw1, response.Sw2)
+ return fmt.Errorf("got unexpected response from MUTUALLY_AUTHENTICATE: 0x%x%x", response.Sw1, response.Sw2)
}
if len(response.Data) != scSecretLength {
- return fmt.Errorf("Response from MUTUALLY_AUTHENTICATE was %d bytes, expected %d", len(response.Data), scSecretLength)
+ return fmt.Errorf("response from MUTUALLY_AUTHENTICATE was %d bytes, expected %d", len(response.Data), scSecretLength)
}
return nil
@@ -222,7 +222,7 @@ func (s *SecureChannelSession) pair(p1 uint8, data []byte) (*responseAPDU, error
// transmitEncrypted sends an encrypted message, and decrypts and returns the response.
func (s *SecureChannelSession) transmitEncrypted(cla, ins, p1, p2 byte, data []byte) (*responseAPDU, error) {
if s.iv == nil {
- return nil, fmt.Errorf("Channel not open")
+ return nil, fmt.Errorf("channel not open")
}
data, err := s.encryptAPDU(data)
@@ -261,14 +261,14 @@ func (s *SecureChannelSession) transmitEncrypted(cla, ins, p1, p2 byte, data []b
return nil, err
}
if !bytes.Equal(s.iv, rmac) {
- return nil, fmt.Errorf("Invalid MAC in response")
+ return nil, fmt.Errorf("invalid MAC in response")
}
rapdu := &responseAPDU{}
rapdu.deserialize(plainData)
if rapdu.Sw1 != sw1Ok {
- return nil, fmt.Errorf("Unexpected response status Cla=0x%x, Ins=0x%x, Sw=0x%x%x", cla, ins, rapdu.Sw1, rapdu.Sw2)
+ return nil, fmt.Errorf("unexpected response status Cla=0x%x, Ins=0x%x, Sw=0x%x%x", cla, ins, rapdu.Sw1, rapdu.Sw2)
}
return rapdu, nil
@@ -277,7 +277,7 @@ func (s *SecureChannelSession) transmitEncrypted(cla, ins, p1, p2 byte, data []b
// encryptAPDU is an internal method that serializes and encrypts an APDU.
func (s *SecureChannelSession) encryptAPDU(data []byte) ([]byte, error) {
if len(data) > maxPayloadSize {
- return nil, fmt.Errorf("Payload of %d bytes exceeds maximum of %d", len(data), maxPayloadSize)
+ return nil, fmt.Errorf("payload of %d bytes exceeds maximum of %d", len(data), maxPayloadSize)
}
data = pad(data, 0x80)
@@ -323,10 +323,10 @@ func unpad(data []byte, terminator byte) ([]byte, error) {
case terminator:
return data[:len(data)-i], nil
default:
- return nil, fmt.Errorf("Expected end of padding, got %d", data[len(data)-i])
+ return nil, fmt.Errorf("expected end of padding, got %d", data[len(data)-i])
}
}
- return nil, fmt.Errorf("Expected end of padding, got 0")
+ return nil, fmt.Errorf("expected end of padding, got 0")
}
// updateIV is an internal method that updates the initialization vector after
diff --git a/accounts/scwallet/wallet.go b/accounts/scwallet/wallet.go
index 57b5977062..dd9266cb31 100644
--- a/accounts/scwallet/wallet.go
+++ b/accounts/scwallet/wallet.go
@@ -167,7 +167,7 @@ func transmit(card *pcsc.Card, command *commandAPDU) (*responseAPDU, error) {
}
if response.Sw1 != sw1Ok {
- return nil, fmt.Errorf("Unexpected insecure response status Cla=0x%x, Ins=0x%x, Sw=0x%x%x", command.Cla, command.Ins, response.Sw1, response.Sw2)
+ return nil, fmt.Errorf("unexpected insecure response status Cla=0x%x, Ins=0x%x, Sw=0x%x%x", command.Cla, command.Ins, response.Sw1, response.Sw2)
}
return response, nil
@@ -252,7 +252,7 @@ func (w *Wallet) release() error {
// with the wallet.
func (w *Wallet) pair(puk []byte) error {
if w.session.paired() {
- return fmt.Errorf("Wallet already paired")
+ return fmt.Errorf("wallet already paired")
}
pairing, err := w.session.pair(puk)
if err != nil {
@@ -773,12 +773,12 @@ func (w *Wallet) findAccountPath(account accounts.Account) (accounts.DerivationP
// Look for the path in the URL
if account.URL.Scheme != w.Hub.scheme {
- return nil, fmt.Errorf("Scheme %s does not match wallet scheme %s", account.URL.Scheme, w.Hub.scheme)
+ return nil, fmt.Errorf("scheme %s does not match wallet scheme %s", account.URL.Scheme, w.Hub.scheme)
}
parts := strings.SplitN(account.URL.Path, "/", 2)
if len(parts) != 2 {
- return nil, fmt.Errorf("Invalid URL format: %s", account.URL)
+ return nil, fmt.Errorf("invalid URL format: %s", account.URL)
}
if parts[0] != fmt.Sprintf("%x", w.PublicKey[1:3]) {
@@ -813,7 +813,7 @@ func (s *Session) pair(secret []byte) (smartcardPairing, error) {
// unpair deletes an existing pairing.
func (s *Session) unpair() error {
if !s.verified {
- return fmt.Errorf("Unpair requires that the PIN be verified")
+ return fmt.Errorf("unpair requires that the PIN be verified")
}
return s.Channel.Unpair()
}
@@ -850,7 +850,7 @@ func (s *Session) paired() bool {
// authenticate uses an existing pairing to establish a secure channel.
func (s *Session) authenticate(pairing smartcardPairing) error {
if !bytes.Equal(s.Wallet.PublicKey, pairing.PublicKey) {
- return fmt.Errorf("Cannot pair using another wallet's pairing; %x != %x", s.Wallet.PublicKey, pairing.PublicKey)
+ return fmt.Errorf("cannot pair using another wallet's pairing; %x != %x", s.Wallet.PublicKey, pairing.PublicKey)
}
s.Channel.PairingKey = pairing.PairingKey
s.Channel.PairingIndex = pairing.PairingIndex
@@ -879,6 +879,7 @@ func (s *Session) walletStatus() (*walletStatus, error) {
}
// derivationPath fetches the wallet's current derivation path from the card.
+//lint:ignore U1000 needs to be added to the console interface
func (s *Session) derivationPath() (accounts.DerivationPath, error) {
response, err := s.Channel.transmitEncrypted(claSCWallet, insStatus, statusP1Path, 0, nil)
if err != nil {
@@ -993,12 +994,14 @@ func (s *Session) derive(path accounts.DerivationPath) (accounts.Account, error)
}
// keyExport contains information on an exported keypair.
+//lint:ignore U1000 needs to be added to the console interface
type keyExport struct {
PublicKey []byte `asn1:"tag:0"`
PrivateKey []byte `asn1:"tag:1,optional"`
}
// publicKey returns the public key for the current derivation path.
+//lint:ignore U1000 needs to be added to the console interface
func (s *Session) publicKey() ([]byte, error) {
response, err := s.Channel.transmitEncrypted(claSCWallet, insExportKey, exportP1Any, exportP2Pubkey, nil)
if err != nil {
diff --git a/accounts/usbwallet/ledger.go b/accounts/usbwallet/ledger.go
index 17ca9223ff..64eae64f68 100644
--- a/accounts/usbwallet/ledger.go
+++ b/accounts/usbwallet/ledger.go
@@ -162,7 +162,8 @@ func (w *ledgerDriver) SignTx(path accounts.DerivationPath, tx *types.Transactio
return common.Address{}, nil, accounts.ErrWalletClosed
}
// Ensure the wallet is capable of signing the given transaction
- if chainID != nil && w.version[0] <= 1 && w.version[1] <= 0 && w.version[2] <= 2 {
+ if chainID != nil && w.version[0] <= 1 && w.version[2] <= 2 {
+ //lint:ignore ST1005 brand name displayed on the console
return common.Address{}, nil, fmt.Errorf("Ledger v%d.%d.%d doesn't support signing this transaction, please update to v1.0.3 at least", w.version[0], w.version[1], w.version[2])
}
// All infos gathered and metadata checks out, request signing
diff --git a/appveyor.yml b/appveyor.yml
index 0f230bac14..90a862abe7 100644
--- a/appveyor.yml
+++ b/appveyor.yml
@@ -23,8 +23,8 @@ environment:
install:
- git submodule update --init
- rmdir C:\go /s /q
- - appveyor DownloadFile https://dl.google.com/go/go1.13.4.windows-%GETH_ARCH%.zip
- - 7z x go1.13.4.windows-%GETH_ARCH%.zip -y -oC:\ > NUL
+ - appveyor DownloadFile https://dl.google.com/go/go1.13.8.windows-%GETH_ARCH%.zip
+ - 7z x go1.13.8.windows-%GETH_ARCH%.zip -y -oC:\ > NUL
- go version
- gcc --version
diff --git a/build/checksums.txt b/build/checksums.txt
index bb814e3392..e126f71b3e 100644
--- a/build/checksums.txt
+++ b/build/checksums.txt
@@ -1,19 +1,20 @@
# This file contains sha256 checksums of optional build dependencies.
-95dbeab442ee2746b9acf0934c8e2fc26414a0565c008631b04addb8c02e7624 go1.13.4.src.tar.gz
+b13bf04633d4d8cf53226ebeaace8d4d2fd07ae6fa676d0844a688339debec34 go1.13.8.src.tar.gz
-1fcbc9e36f4319eeed02beb8cfd1b3d425ffc2f90ddf09a80f18d5064c51e0cb golangci-lint-1.21.0-linux-386.tar.gz
-267b4066e67139a38d29499331a002d6a29ad5be7aafc83db3b1e88f1b027f90 golangci-lint-1.21.0-linux-armv6.tar.gz
-a602c1f25f90e46e621019cff0a8cb3f4e1837011f3537f15e730d6a9ebf507b golangci-lint-1.21.0-freebsd-armv7.tar.gz
-2c861f8dc56b560474aa27cab0c075991628cc01af3451e27ac82f5d10d5106b golangci-lint-1.21.0-linux-amd64.tar.gz
-a1c39e055280e755acaa906e7abfc20b99a5c28be8af541c57fbc44abbb20dde golangci-lint-1.21.0-linux-arm64.tar.gz
-a8f8bda8c6a4136acf858091077830b1e83ad5612606cb69d5dced869ce00bd8 golangci-lint-1.21.0-linux-ppc64le.tar.gz
-0a8a8c3bc660ccbca668897ab520f7ee9878f16cc8e4dd24fe46236ceec97ba3 golangci-lint-1.21.0-freebsd-armv6.tar.gz
-699b07f45e216571f54002bcbd83b511c4801464a422162158e299587b095b18 golangci-lint-1.21.0-freebsd-amd64.tar.gz
-980fb4993942154bb5c8129ea3b86de09574fe81b24384ebb58cd7a9d2f04483 golangci-lint-1.21.0-linux-armv7.tar.gz
-f15b689088a47f20d5d3c1d945e9ee7c6238f2b84ea468b5f886cf8713dce62e golangci-lint-1.21.0-windows-386.zip
-2e40ded7adcf11e59013cb15c24438b15a86526ca241edfcfdf1abd73a5280a8 golangci-lint-1.21.0-windows-amd64.zip
-6052c7cfea4d6dc2fc722f6c12792a5ec087420198db495afffbc22052653bf7 golangci-lint-1.21.0-freebsd-386.tar.gz
-ca00b8eacf9af14a71b908b4149606c762aa5c0eac781e74ca0abedfdfdf6c8c golangci-lint-1.21.0-linux-s390x.tar.gz
-1365455940c342f95718159d89d66ad2eef19f0846c3e87023e915a3527b929f golangci-lint-1.21.0-darwin-386.tar.gz
-2b2713ec5007e67883aa501eebb81f22abfab0cf0909134ba90f60a066db3760 golangci-lint-1.21.0-darwin-amd64.tar.gz
+d998a84eea42f2271aca792a7b027ca5c1edfcba229e8e5a844c9ac3f336df35 golangci-lint-1.27.0-linux-armv7.tar.gz
+bf781f05b0d393b4bf0a327d9e62926949a4f14d7774d950c4e009fc766ed1d4 golangci-lint.exe-1.27.0-windows-amd64.zip
+bf781f05b0d393b4bf0a327d9e62926949a4f14d7774d950c4e009fc766ed1d4 golangci-lint-1.27.0-windows-amd64.zip
+0e2a57d6ba709440d3ed018ef1037465fa010ed02595829092860e5cf863042e golangci-lint-1.27.0-freebsd-386.tar.gz
+90205fc42ab5ed0096413e790d88ac9b4ed60f4c47e576d13dc0660f7ed4b013 golangci-lint-1.27.0-linux-arm64.tar.gz
+8d345e4e88520e21c113d81978e89ad77fc5b13bfdf20e5bca86b83fc4261272 golangci-lint-1.27.0-linux-amd64.tar.gz
+cc619634a77f18dc73df2a0725be13116d64328dc35131ca1737a850d6f76a59 golangci-lint-1.27.0-freebsd-armv7.tar.gz
+fe683583cfc9eeec83e498c0d6159d87b5e1919dbe4b6c3b3913089642906069 golangci-lint-1.27.0-linux-s390x.tar.gz
+058f5579bee75bdaacbaf75b75e1369f7ad877fd8b3b145aed17a17545de913e golangci-lint-1.27.0-freebsd-armv6.tar.gz
+38e1e3dadbe3f56ab62b4de82ee0b88e8fad966d8dfd740a26ef94c2edef9818 golangci-lint-1.27.0-linux-armv6.tar.gz
+071b34af5516f4e1ddcaea6011e18208f4f043e1af8ba21eeccad4585cb3d095 golangci-lint.exe-1.27.0-windows-386.zip
+071b34af5516f4e1ddcaea6011e18208f4f043e1af8ba21eeccad4585cb3d095 golangci-lint-1.27.0-windows-386.zip
+5f37e2b33914ecddb7cad38186ef4ec61d88172fc04f930fa0267c91151ff306 golangci-lint-1.27.0-linux-386.tar.gz
+4d94cfb51fdebeb205f1d5a349ac2b683c30591c5150708073c1c329e15965f0 golangci-lint-1.27.0-freebsd-amd64.tar.gz
+52572ba8ff07d5169c2365d3de3fec26dc55a97522094d13d1596199580fa281 golangci-lint-1.27.0-linux-ppc64le.tar.gz
+3fb1a1683a29c6c0a8cd76135f62b606fbdd538d5a7aeab94af1af70ffdc2fd4 golangci-lint-1.27.0-darwin-amd64.tar.gz
diff --git a/build/ci.go b/build/ci.go
index 73d1efa0dd..0fa3781b5a 100644
--- a/build/ci.go
+++ b/build/ci.go
@@ -145,6 +145,7 @@ var (
"bionic": "golang-go",
"disco": "golang-go",
"eoan": "golang-go",
+ "focal": "golang-go",
}
debGoBootPaths = map[string]string{
@@ -214,9 +215,9 @@ func doInstall(cmdline []string) {
var minor int
fmt.Sscanf(strings.TrimPrefix(runtime.Version(), "go1."), "%d", &minor)
- if minor < 9 {
+ if minor < 11 {
log.Println("You have Go version", runtime.Version())
- log.Println("go-ethereum requires at least Go version 1.9 and cannot")
+ log.Println("go-ethereum requires at least Go version 1.11 and cannot")
log.Println("be compiled with an earlier version. Please upgrade your Go installation.")
os.Exit(1)
}
@@ -237,13 +238,6 @@ func doInstall(cmdline []string) {
build.MustRun(goinstall)
return
}
- // If we are cross compiling to ARMv5 ARMv6 or ARMv7, clean any previous builds
- if *arch == "arm" {
- os.RemoveAll(filepath.Join(runtime.GOROOT(), "pkg", runtime.GOOS+"_arm"))
- for _, path := range filepath.SplitList(build.GOPATH()) {
- os.RemoveAll(filepath.Join(path, "pkg", runtime.GOOS+"_arm"))
- }
- }
// Seems we are cross compiling, work around forbidden GOBIN
goinstall := goToolArch(*arch, *cc, "install", buildFlags(env)...)
@@ -294,7 +288,6 @@ func goTool(subcmd string, args ...string) *exec.Cmd {
func goToolArch(arch string, cc string, subcmd string, args ...string) *exec.Cmd {
cmd := build.GoTool(subcmd, args...)
- cmd.Env = []string{"GOPATH=" + build.GOPATH()}
if arch == "" || arch == runtime.GOARCH {
cmd.Env = append(cmd.Env, "GOBIN="+GOBIN)
} else {
@@ -305,7 +298,7 @@ func goToolArch(arch string, cc string, subcmd string, args ...string) *exec.Cmd
cmd.Env = append(cmd.Env, "CC="+cc)
}
for _, e := range os.Environ() {
- if strings.HasPrefix(e, "GOPATH=") || strings.HasPrefix(e, "GOBIN=") {
+ if strings.HasPrefix(e, "GOBIN=") {
continue
}
cmd.Env = append(cmd.Env, e)
@@ -367,7 +360,7 @@ func doLint(cmdline []string) {
// downloadLinter downloads and unpacks golangci-lint.
func downloadLinter(cachedir string) string {
- const version = "1.21.0"
+ const version = "1.27.0"
csdb := build.MustLoadChecksums("build/checksums.txt")
base := fmt.Sprintf("golangci-lint-%s-%s-%s", version, runtime.GOOS, runtime.GOARCH)
@@ -892,7 +885,6 @@ func gomobileTool(subcmd string, args ...string) *exec.Cmd {
cmd := exec.Command(filepath.Join(GOBIN, "gomobile"), subcmd)
cmd.Args = append(cmd.Args, args...)
cmd.Env = []string{
- "GOPATH=" + build.GOPATH(),
"PATH=" + GOBIN + string(os.PathListSeparator) + os.Getenv("PATH"),
}
for _, e := range os.Environ() {
@@ -1082,7 +1074,6 @@ func xgoTool(args []string) *exec.Cmd {
cmd := exec.Command(filepath.Join(GOBIN, "xgo"), args...)
cmd.Env = os.Environ()
cmd.Env = append(cmd.Env, []string{
- "GOPATH=" + build.GOPATH(),
"GOBIN=" + GOBIN,
}...)
return cmd
diff --git a/build/deb/ethereum/deb.rules b/build/deb/ethereum/deb.rules
index 983b87af16..0677ef91e4 100644
--- a/build/deb/ethereum/deb.rules
+++ b/build/deb/ethereum/deb.rules
@@ -6,6 +6,7 @@
# Launchpad rejects Go's access to $HOME, use custom folders
export GOCACHE=/tmp/go-build
+export GOPATH=/tmp/gopath
export GOROOT_BOOTSTRAP={{.GoBootPath}}
override_dh_auto_clean:
@@ -19,10 +20,11 @@ override_dh_auto_build:
# We can't download external go modules within Launchpad, so we're shipping the
# entire dependency source cache with go-ethereum.
- (mkdir -p build/_workspace/pkg/mod && mv .mod/* build/_workspace/pkg/mod)
+ mkdir -p $(GOPATH)/pkg
+ mv .mod $(GOPATH)/pkg/mod
# A fresh Go was built, all dependency downloads faked, hope build works now
- build/env.sh ../.go/bin/go run build/ci.go install -git-commit={{.Env.Commit}} -git-branch={{.Env.Branch}} -git-tag={{.Env.Tag}} -buildnum={{.Env.Buildnum}} -pull-request={{.Env.IsPullRequest}}
+ ../.go/bin/go run build/ci.go install -git-commit={{.Env.Commit}} -git-branch={{.Env.Branch}} -git-tag={{.Env.Tag}} -buildnum={{.Env.Buildnum}} -pull-request={{.Env.IsPullRequest}}
override_dh_auto_test:
diff --git a/build/env.sh b/build/env.sh
deleted file mode 100755
index 3914555d1b..0000000000
--- a/build/env.sh
+++ /dev/null
@@ -1,30 +0,0 @@
-#!/bin/sh
-
-set -e
-
-if [ ! -f "build/env.sh" ]; then
- echo "$0 must be run from the root of the repository."
- exit 2
-fi
-
-# Create fake Go workspace if it doesn't exist yet.
-workspace="$PWD/build/_workspace"
-root="$PWD"
-ethdir="$workspace/src/github.com/ethereum"
-if [ ! -L "$ethdir/go-ethereum" ]; then
- mkdir -p "$ethdir"
- cd "$ethdir"
- ln -s ../../../../../. go-ethereum
- cd "$root"
-fi
-
-# Set up the environment to use the workspace.
-GOPATH="$workspace"
-export GOPATH
-
-# Run the command inside the workspace.
-cd "$ethdir/go-ethereum"
-PWD="$ethdir/go-ethereum"
-
-# Launch the arguments with the configured environment.
-exec "$@"
diff --git a/cmd/abidump/main.go b/cmd/abidump/main.go
new file mode 100644
index 0000000000..35cbcbb0ed
--- /dev/null
+++ b/cmd/abidump/main.go
@@ -0,0 +1,74 @@
+// Copyright 2019 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package main
+
+import (
+ "encoding/hex"
+ "flag"
+ "fmt"
+ "os"
+ "strings"
+
+ "github.com/ethereum/go-ethereum/signer/core"
+ "github.com/ethereum/go-ethereum/signer/fourbyte"
+)
+
+func init() {
+ flag.Usage = func() {
+ fmt.Fprintln(os.Stderr, "Usage:", os.Args[0], "")
+ flag.PrintDefaults()
+ fmt.Fprintln(os.Stderr, `
+Parses the given ABI data and tries to interpret it from the fourbyte database.`)
+ }
+}
+
+func parse(data []byte) {
+ db, err := fourbyte.New()
+ if err != nil {
+ die(err)
+ }
+ messages := core.ValidationMessages{}
+ db.ValidateCallData(nil, data, &messages)
+ for _, m := range messages.Messages {
+ fmt.Printf("%v: %v\n", m.Typ, m.Message)
+ }
+}
+
+// Example
+// ./abidump a9059cbb000000000000000000000000ea0e2dc7d65a50e77fc7e84bff3fd2a9e781ff5c0000000000000000000000000000000000000000000000015af1d78b58c40000
+func main() {
+ flag.Parse()
+
+ switch {
+ case flag.NArg() == 1:
+ hexdata := flag.Arg(0)
+ data, err := hex.DecodeString(strings.TrimPrefix(hexdata, "0x"))
+ if err != nil {
+ die(err)
+ }
+ parse(data)
+ default:
+ fmt.Fprintln(os.Stderr, "Error: one argument needed")
+ flag.Usage()
+ os.Exit(2)
+ }
+}
+
+func die(args ...interface{}) {
+ fmt.Fprintln(os.Stderr, args...)
+ os.Exit(1)
+}
diff --git a/cmd/abigen/main.go b/cmd/abigen/main.go
index 659cf1b4c5..ed4a3b8870 100644
--- a/cmd/abigen/main.go
+++ b/cmd/abigen/main.go
@@ -21,9 +21,11 @@ import (
"fmt"
"io/ioutil"
"os"
+ "path/filepath"
"regexp"
"strings"
+ "github.com/ethereum/go-ethereum/accounts/abi"
"github.com/ethereum/go-ethereum/accounts/abi/bind"
"github.com/ethereum/go-ethereum/cmd/utils"
"github.com/ethereum/go-ethereum/common/compiler"
@@ -32,19 +34,6 @@ import (
"gopkg.in/urfave/cli.v1"
)
-const (
- commandHelperTemplate = `{{.Name}}{{if .Subcommands}} command{{end}}{{if .Flags}} [command options]{{end}} [arguments...]
-{{if .Description}}{{.Description}}
-{{end}}{{if .Subcommands}}
-SUBCOMMANDS:
- {{range .Subcommands}}{{.Name}}{{with .ShortName}}, {{.}}{{end}}{{ "\t" }}{{.Usage}}
- {{end}}{{end}}{{if .Flags}}
-OPTIONS:
-{{range $.Flags}}{{"\t"}}{{.}}
-{{end}}
-{{end}}`
-)
-
var (
// Git SHA1 commit hash of the release (set via linker flags)
gitCommit = ""
@@ -128,7 +117,7 @@ func init() {
aliasFlag,
}
app.Action = utils.MigrateFlags(abigen)
- cli.CommandHelpTemplate = commandHelperTemplate
+ cli.CommandHelpTemplate = utils.OriginCommandHelpTemplate
}
func abigen(c *cli.Context) error {
@@ -206,10 +195,22 @@ func abigen(c *cli.Context) error {
utils.Fatalf("Failed to build Solidity contract: %v", err)
}
case c.GlobalIsSet(vyFlag.Name):
- contracts, err = compiler.CompileVyper(c.GlobalString(vyperFlag.Name), c.GlobalString(vyFlag.Name))
+ output, err := compiler.CompileVyper(c.GlobalString(vyperFlag.Name), c.GlobalString(vyFlag.Name))
if err != nil {
utils.Fatalf("Failed to build Vyper contract: %v", err)
}
+ contracts = make(map[string]*compiler.Contract)
+ for n, contract := range output {
+ name := n
+ // Sanitize the combined json names to match the
+ // format expected by solidity.
+ if !strings.Contains(n, ":") {
+ // Remove extra path components
+ name = abi.ToCamelCase(strings.TrimSuffix(filepath.Base(name), ".vy"))
+ }
+ contracts[name] = contract
+ }
+
case c.GlobalIsSet(jsonFlag.Name):
jsonOutput, err := ioutil.ReadFile(c.GlobalString(jsonFlag.Name))
if err != nil {
diff --git a/cmd/checkpoint-admin/main.go b/cmd/checkpoint-admin/main.go
index 26d751dd81..b4d8e0db5a 100644
--- a/cmd/checkpoint-admin/main.go
+++ b/cmd/checkpoint-admin/main.go
@@ -28,19 +28,6 @@ import (
"gopkg.in/urfave/cli.v1"
)
-const (
- commandHelperTemplate = `{{.Name}}{{if .Subcommands}} command{{end}}{{if .Flags}} [command options]{{end}} [arguments...]
-{{if .Description}}{{.Description}}
-{{end}}{{if .Subcommands}}
-SUBCOMMANDS:
- {{range .Subcommands}}{{.Name}}{{with .ShortName}}, {{.}}{{end}}{{ "\t" }}{{.Usage}}
- {{end}}{{end}}{{if .Flags}}
-OPTIONS:
-{{range $.Flags}}{{"\t"}}{{.}}
-{{end}}
-{{end}}`
-)
-
var (
// Git SHA1 commit hash of the release (set via linker flags)
gitCommit = ""
@@ -61,7 +48,7 @@ func init() {
oracleFlag,
nodeURLFlag,
}
- cli.CommandHelpTemplate = commandHelperTemplate
+ cli.CommandHelpTemplate = utils.OriginCommandHelpTemplate
}
// Commonly used command line flags.
diff --git a/cmd/clef/main.go b/cmd/clef/main.go
index d81aae6007..1563cf520d 100644
--- a/cmd/clef/main.go
+++ b/cmd/clef/main.go
@@ -239,6 +239,7 @@ func init() {
}
app.Action = signer
app.Commands = []cli.Command{initCommand, attestCommand, setCredentialCommand, delCredentialCommand, gendocCommand}
+ cli.CommandHelpTemplate = utils.OriginCommandHelpTemplate
}
func main() {
diff --git a/cmd/devp2p/dns_cloudflare.go b/cmd/devp2p/dns_cloudflare.go
index 83279168cc..a4d10dcfdd 100644
--- a/cmd/devp2p/dns_cloudflare.go
+++ b/cmd/devp2p/dns_cloudflare.go
@@ -130,9 +130,9 @@ func (c *cloudflareClient) uploadRecords(name string, records map[string]string)
if !exists {
// Entry is unknown, push a new one to Cloudflare.
log.Info(fmt.Sprintf("Creating %s = %q", path, val))
- ttl := 1
+ ttl := rootTTL
if path != name {
- ttl = 2147483647 // Max TTL permitted by Cloudflare
+ ttl = treeNodeTTL // Max TTL permitted by Cloudflare
}
_, err = c.CreateDNSRecord(c.zoneID, cloudflare.DNSRecord{Type: "TXT", Name: path, Content: val, TTL: ttl})
} else if old.Content != val {
diff --git a/cmd/devp2p/dns_route53.go b/cmd/devp2p/dns_route53.go
new file mode 100644
index 0000000000..71118be543
--- /dev/null
+++ b/cmd/devp2p/dns_route53.go
@@ -0,0 +1,303 @@
+// Copyright 2019 The go-ethereum Authors
+// This file is part of go-ethereum.
+//
+// go-ethereum is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// go-ethereum is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU General Public License
+// along with go-ethereum. If not, see .
+
+package main
+
+import (
+ "errors"
+ "fmt"
+ "sort"
+ "strconv"
+ "strings"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/credentials"
+ "github.com/aws/aws-sdk-go/aws/session"
+ "github.com/aws/aws-sdk-go/service/route53"
+ "github.com/ethereum/go-ethereum/log"
+ "github.com/ethereum/go-ethereum/p2p/dnsdisc"
+ "gopkg.in/urfave/cli.v1"
+)
+
+// The Route53 limits change sets to this size. DNS changes need to be split
+// up into multiple batches to work around the limit.
+const route53ChangeLimit = 30000
+
+var (
+ route53AccessKeyFlag = cli.StringFlag{
+ Name: "access-key-id",
+ Usage: "AWS Access Key ID",
+ EnvVar: "AWS_ACCESS_KEY_ID",
+ }
+ route53AccessSecretFlag = cli.StringFlag{
+ Name: "access-key-secret",
+ Usage: "AWS Access Key Secret",
+ EnvVar: "AWS_SECRET_ACCESS_KEY",
+ }
+ route53ZoneIDFlag = cli.StringFlag{
+ Name: "zone-id",
+ Usage: "Route53 Zone ID",
+ }
+)
+
+type route53Client struct {
+ api *route53.Route53
+ zoneID string
+}
+
+type recordSet struct {
+ values []string
+ ttl int64
+}
+
+// newRoute53Client sets up a Route53 API client from command line flags.
+func newRoute53Client(ctx *cli.Context) *route53Client {
+ akey := ctx.String(route53AccessKeyFlag.Name)
+ asec := ctx.String(route53AccessSecretFlag.Name)
+ if akey == "" || asec == "" {
+ exit(fmt.Errorf("need Route53 Access Key ID and secret proceed"))
+ }
+ config := &aws.Config{Credentials: credentials.NewStaticCredentials(akey, asec, "")}
+ session, err := session.NewSession(config)
+ if err != nil {
+ exit(fmt.Errorf("can't create AWS session: %v", err))
+ }
+ return &route53Client{
+ api: route53.New(session),
+ zoneID: ctx.String(route53ZoneIDFlag.Name),
+ }
+}
+
+// deploy uploads the given tree to Route53.
+func (c *route53Client) deploy(name string, t *dnsdisc.Tree) error {
+ if err := c.checkZone(name); err != nil {
+ return err
+ }
+
+ // Compute DNS changes.
+ existing, err := c.collectRecords(name)
+ if err != nil {
+ return err
+ }
+ log.Info(fmt.Sprintf("Found %d TXT records", len(existing)))
+
+ records := t.ToTXT(name)
+ changes := c.computeChanges(name, records, existing)
+ if len(changes) == 0 {
+ log.Info("No DNS changes needed")
+ return nil
+ }
+
+ // Submit change batches.
+ batches := splitChanges(changes, route53ChangeLimit)
+ for i, changes := range batches {
+ log.Info(fmt.Sprintf("Submitting %d changes to Route53", len(changes)))
+ batch := new(route53.ChangeBatch)
+ batch.SetChanges(changes)
+ batch.SetComment(fmt.Sprintf("enrtree update %d/%d of %s at seq %d", i+1, len(batches), name, t.Seq()))
+ req := &route53.ChangeResourceRecordSetsInput{HostedZoneId: &c.zoneID, ChangeBatch: batch}
+ resp, err := c.api.ChangeResourceRecordSets(req)
+ if err != nil {
+ return err
+ }
+
+ log.Info(fmt.Sprintf("Waiting for change request %s", *resp.ChangeInfo.Id))
+ wreq := &route53.GetChangeInput{Id: resp.ChangeInfo.Id}
+ if err := c.api.WaitUntilResourceRecordSetsChanged(wreq); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// checkZone verifies zone information for the given domain.
+func (c *route53Client) checkZone(name string) (err error) {
+ if c.zoneID == "" {
+ c.zoneID, err = c.findZoneID(name)
+ }
+ return err
+}
+
+// findZoneID searches for the Zone ID containing the given domain.
+func (c *route53Client) findZoneID(name string) (string, error) {
+ log.Info(fmt.Sprintf("Finding Route53 Zone ID for %s", name))
+ var req route53.ListHostedZonesByNameInput
+ for {
+ resp, err := c.api.ListHostedZonesByName(&req)
+ if err != nil {
+ return "", err
+ }
+ for _, zone := range resp.HostedZones {
+ if isSubdomain(name, *zone.Name) {
+ return *zone.Id, nil
+ }
+ }
+ if !*resp.IsTruncated {
+ break
+ }
+ req.DNSName = resp.NextDNSName
+ req.HostedZoneId = resp.NextHostedZoneId
+ }
+ return "", errors.New("can't find zone ID for " + name)
+}
+
+// computeChanges creates DNS changes for the given record.
+func (c *route53Client) computeChanges(name string, records map[string]string, existing map[string]recordSet) []*route53.Change {
+ // Convert all names to lowercase.
+ lrecords := make(map[string]string, len(records))
+ for name, r := range records {
+ lrecords[strings.ToLower(name)] = r
+ }
+ records = lrecords
+
+ var changes []*route53.Change
+ for path, val := range records {
+ ttl := int64(rootTTL)
+ if path != name {
+ ttl = int64(treeNodeTTL)
+ }
+
+ prevRecords, exists := existing[path]
+ prevValue := strings.Join(prevRecords.values, "")
+ if !exists {
+ // Entry is unknown, push a new one
+ log.Info(fmt.Sprintf("Creating %s = %q", path, val))
+ changes = append(changes, newTXTChange("CREATE", path, ttl, splitTXT(val)))
+ } else if prevValue != val {
+ // Entry already exists, only change its content.
+ log.Info(fmt.Sprintf("Updating %s from %q to %q", path, prevValue, val))
+ changes = append(changes, newTXTChange("UPSERT", path, ttl, splitTXT(val)))
+ } else {
+ log.Info(fmt.Sprintf("Skipping %s = %q", path, val))
+ }
+ }
+
+ // Iterate over the old records and delete anything stale.
+ for path, set := range existing {
+ if _, ok := records[path]; ok {
+ continue
+ }
+ // Stale entry, nuke it.
+ log.Info(fmt.Sprintf("Deleting %s = %q", path, strings.Join(set.values, "")))
+ changes = append(changes, newTXTChange("DELETE", path, set.ttl, set.values...))
+ }
+
+ sortChanges(changes)
+ return changes
+}
+
+// sortChanges ensures DNS changes are in leaf-added -> root-changed -> leaf-deleted order.
+func sortChanges(changes []*route53.Change) {
+ score := map[string]int{"CREATE": 1, "UPSERT": 2, "DELETE": 3}
+ sort.Slice(changes, func(i, j int) bool {
+ if *changes[i].Action == *changes[j].Action {
+ return *changes[i].ResourceRecordSet.Name < *changes[j].ResourceRecordSet.Name
+ }
+ return score[*changes[i].Action] < score[*changes[j].Action]
+ })
+}
+
+// splitChanges splits up DNS changes such that each change batch
+// is smaller than the given RDATA limit.
+func splitChanges(changes []*route53.Change, limit int) [][]*route53.Change {
+ var batches [][]*route53.Change
+ var batchSize int
+ for _, ch := range changes {
+ // Start new batch if this change pushes the current one over the limit.
+ size := changeSize(ch)
+ if len(batches) == 0 || batchSize+size > limit {
+ batches = append(batches, nil)
+ batchSize = 0
+ }
+ batches[len(batches)-1] = append(batches[len(batches)-1], ch)
+ batchSize += size
+ }
+ return batches
+}
+
+// changeSize returns the RDATA size of a DNS change.
+func changeSize(ch *route53.Change) int {
+ size := 0
+ for _, rr := range ch.ResourceRecordSet.ResourceRecords {
+ if rr.Value != nil {
+ size += len(*rr.Value)
+ }
+ }
+ return size
+}
+
+// collectRecords collects all TXT records below the given name.
+func (c *route53Client) collectRecords(name string) (map[string]recordSet, error) {
+ log.Info(fmt.Sprintf("Retrieving existing TXT records on %s (%s)", name, c.zoneID))
+ var req route53.ListResourceRecordSetsInput
+ req.SetHostedZoneId(c.zoneID)
+ existing := make(map[string]recordSet)
+ err := c.api.ListResourceRecordSetsPages(&req, func(resp *route53.ListResourceRecordSetsOutput, last bool) bool {
+ for _, set := range resp.ResourceRecordSets {
+ if !isSubdomain(*set.Name, name) || *set.Type != "TXT" {
+ continue
+ }
+ s := recordSet{ttl: *set.TTL}
+ for _, rec := range set.ResourceRecords {
+ s.values = append(s.values, *rec.Value)
+ }
+ name := strings.TrimSuffix(*set.Name, ".")
+ existing[name] = s
+ }
+ return true
+ })
+ return existing, err
+}
+
+// newTXTChange creates a change to a TXT record.
+func newTXTChange(action, name string, ttl int64, values ...string) *route53.Change {
+ var c route53.Change
+ var r route53.ResourceRecordSet
+ var rrs []*route53.ResourceRecord
+ for _, val := range values {
+ rr := new(route53.ResourceRecord)
+ rr.SetValue(val)
+ rrs = append(rrs, rr)
+ }
+ r.SetType("TXT")
+ r.SetName(name)
+ r.SetTTL(ttl)
+ r.SetResourceRecords(rrs)
+ c.SetAction(action)
+ c.SetResourceRecordSet(&r)
+ return &c
+}
+
+// isSubdomain returns true if name is a subdomain of domain.
+func isSubdomain(name, domain string) bool {
+ domain = strings.TrimSuffix(domain, ".")
+ name = strings.TrimSuffix(name, ".")
+ return strings.HasSuffix("."+name, "."+domain)
+}
+
+// splitTXT splits value into a list of quoted 255-character strings.
+func splitTXT(value string) string {
+ var result strings.Builder
+ for len(value) > 0 {
+ rlen := len(value)
+ if rlen > 253 {
+ rlen = 253
+ }
+ result.WriteString(strconv.Quote(value[:rlen]))
+ value = value[rlen:]
+ }
+ return result.String()
+}
diff --git a/cmd/devp2p/dns_route53_test.go b/cmd/devp2p/dns_route53_test.go
new file mode 100644
index 0000000000..f6ab6c1762
--- /dev/null
+++ b/cmd/devp2p/dns_route53_test.go
@@ -0,0 +1,154 @@
+// Copyright 2020 The go-ethereum Authors
+// This file is part of go-ethereum.
+//
+// go-ethereum is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// go-ethereum is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU General Public License
+// along with go-ethereum. If not, see .
+
+package main
+
+import (
+ "reflect"
+ "testing"
+
+ "github.com/aws/aws-sdk-go/service/route53"
+)
+
+// This test checks that computeChanges/splitChanges create DNS changes in
+// leaf-added -> root-changed -> leaf-deleted order.
+func TestRoute53ChangeSort(t *testing.T) {
+ testTree0 := map[string]recordSet{
+ "2kfjogvxdqtxxugbh7gs7naaai.n": {ttl: 3333, values: []string{
+ `"enr:-HW4QO1ml1DdXLeZLsUxewnthhUy8eROqkDyoMTyavfks9JlYQIlMFEUoM78PovJDPQrAkrb3LRJ-""vtrymDguKCOIAWAgmlkgnY0iXNlY3AyNTZrMaEDffaGfJzgGhUif1JqFruZlYmA31HzathLSWxfbq_QoQ4"`,
+ }},
+ "fdxn3sn67na5dka4j2gok7bvqi.n": {ttl: treeNodeTTL, values: []string{`"enrtree-branch:"`}},
+ "n": {ttl: rootTTL, values: []string{`"enrtree-root:v1 e=2KFJOGVXDQTXXUGBH7GS7NAAAI l=FDXN3SN67NA5DKA4J2GOK7BVQI seq=0 sig=v_-J_q_9ICQg5ztExFvLQhDBGMb0lZPJLhe3ts9LAcgqhOhtT3YFJsl8BWNDSwGtamUdR-9xl88_w-X42SVpjwE"`}},
+ }
+
+ testTree1 := map[string]string{
+ "n": "enrtree-root:v1 e=JWXYDBPXYWG6FX3GMDIBFA6CJ4 l=C7HRFPF3BLGF3YR4DY5KX3SMBE seq=1 sig=o908WmNp7LibOfPsr4btQwatZJ5URBr2ZAuxvK4UWHlsB9sUOTJQaGAlLPVAhM__XJesCHxLISo94z5Z2a463gA",
+ "C7HRFPF3BLGF3YR4DY5KX3SMBE.n": "enrtree://AM5FCQLWIZX2QFPNJAP7VUERCCRNGRHWZG3YYHIUV7BVDQ5FDPRT2@morenodes.example.org",
+ "JWXYDBPXYWG6FX3GMDIBFA6CJ4.n": "enrtree-branch:2XS2367YHAXJFGLZHVAWLQD4ZY,H4FHT4B454P6UXFD7JCYQ5PWDY,MHTDO6TMUBRIA2XWG5LUDACK24",
+ "2XS2367YHAXJFGLZHVAWLQD4ZY.n": "enr:-HW4QOFzoVLaFJnNhbgMoDXPnOvcdVuj7pDpqRvh6BRDO68aVi5ZcjB3vzQRZH2IcLBGHzo8uUN3snqmgTiE56CH3AMBgmlkgnY0iXNlY3AyNTZrMaECC2_24YYkYHEgdzxlSNKQEnHhuNAbNlMlWJxrJxbAFvA",
+ "H4FHT4B454P6UXFD7JCYQ5PWDY.n": "enr:-HW4QAggRauloj2SDLtIHN1XBkvhFZ1vtf1raYQp9TBW2RD5EEawDzbtSmlXUfnaHcvwOizhVYLtr7e6vw7NAf6mTuoCgmlkgnY0iXNlY3AyNTZrMaECjrXI8TLNXU0f8cthpAMxEshUyQlK-AM0PW2wfrnacNI",
+ "MHTDO6TMUBRIA2XWG5LUDACK24.n": "enr:-HW4QLAYqmrwllBEnzWWs7I5Ev2IAs7x_dZlbYdRdMUx5EyKHDXp7AV5CkuPGUPdvbv1_Ms1CPfhcGCvSElSosZmyoqAgmlkgnY0iXNlY3AyNTZrMaECriawHKWdDRk2xeZkrOXBQ0dfMFLHY4eENZwdufn1S1o",
+ }
+
+ wantChanges := []*route53.Change{
+ {
+ Action: sp("CREATE"),
+ ResourceRecordSet: &route53.ResourceRecordSet{
+ Name: sp("2xs2367yhaxjfglzhvawlqd4zy.n"),
+ ResourceRecords: []*route53.ResourceRecord{{
+ Value: sp(`"enr:-HW4QOFzoVLaFJnNhbgMoDXPnOvcdVuj7pDpqRvh6BRDO68aVi5ZcjB3vzQRZH2IcLBGHzo8uUN3snqmgTiE56CH3AMBgmlkgnY0iXNlY3AyNTZrMaECC2_24YYkYHEgdzxlSNKQEnHhuNAbNlMlWJxrJxbAFvA"`),
+ }},
+ TTL: ip(treeNodeTTL),
+ Type: sp("TXT"),
+ },
+ },
+ {
+ Action: sp("CREATE"),
+ ResourceRecordSet: &route53.ResourceRecordSet{
+ Name: sp("c7hrfpf3blgf3yr4dy5kx3smbe.n"),
+ ResourceRecords: []*route53.ResourceRecord{{
+ Value: sp(`"enrtree://AM5FCQLWIZX2QFPNJAP7VUERCCRNGRHWZG3YYHIUV7BVDQ5FDPRT2@morenodes.example.org"`),
+ }},
+ TTL: ip(treeNodeTTL),
+ Type: sp("TXT"),
+ },
+ },
+ {
+ Action: sp("CREATE"),
+ ResourceRecordSet: &route53.ResourceRecordSet{
+ Name: sp("h4fht4b454p6uxfd7jcyq5pwdy.n"),
+ ResourceRecords: []*route53.ResourceRecord{{
+ Value: sp(`"enr:-HW4QAggRauloj2SDLtIHN1XBkvhFZ1vtf1raYQp9TBW2RD5EEawDzbtSmlXUfnaHcvwOizhVYLtr7e6vw7NAf6mTuoCgmlkgnY0iXNlY3AyNTZrMaECjrXI8TLNXU0f8cthpAMxEshUyQlK-AM0PW2wfrnacNI"`),
+ }},
+ TTL: ip(treeNodeTTL),
+ Type: sp("TXT"),
+ },
+ },
+ {
+ Action: sp("CREATE"),
+ ResourceRecordSet: &route53.ResourceRecordSet{
+ Name: sp("jwxydbpxywg6fx3gmdibfa6cj4.n"),
+ ResourceRecords: []*route53.ResourceRecord{{
+ Value: sp(`"enrtree-branch:2XS2367YHAXJFGLZHVAWLQD4ZY,H4FHT4B454P6UXFD7JCYQ5PWDY,MHTDO6TMUBRIA2XWG5LUDACK24"`),
+ }},
+ TTL: ip(treeNodeTTL),
+ Type: sp("TXT"),
+ },
+ },
+ {
+ Action: sp("CREATE"),
+ ResourceRecordSet: &route53.ResourceRecordSet{
+ Name: sp("mhtdo6tmubria2xwg5ludack24.n"),
+ ResourceRecords: []*route53.ResourceRecord{{
+ Value: sp(`"enr:-HW4QLAYqmrwllBEnzWWs7I5Ev2IAs7x_dZlbYdRdMUx5EyKHDXp7AV5CkuPGUPdvbv1_Ms1CPfhcGCvSElSosZmyoqAgmlkgnY0iXNlY3AyNTZrMaECriawHKWdDRk2xeZkrOXBQ0dfMFLHY4eENZwdufn1S1o"`),
+ }},
+ TTL: ip(treeNodeTTL),
+ Type: sp("TXT"),
+ },
+ },
+ {
+ Action: sp("UPSERT"),
+ ResourceRecordSet: &route53.ResourceRecordSet{
+ Name: sp("n"),
+ ResourceRecords: []*route53.ResourceRecord{{
+ Value: sp(`"enrtree-root:v1 e=JWXYDBPXYWG6FX3GMDIBFA6CJ4 l=C7HRFPF3BLGF3YR4DY5KX3SMBE seq=1 sig=o908WmNp7LibOfPsr4btQwatZJ5URBr2ZAuxvK4UWHlsB9sUOTJQaGAlLPVAhM__XJesCHxLISo94z5Z2a463gA"`),
+ }},
+ TTL: ip(rootTTL),
+ Type: sp("TXT"),
+ },
+ },
+ {
+ Action: sp("DELETE"),
+ ResourceRecordSet: &route53.ResourceRecordSet{
+ Name: sp("2kfjogvxdqtxxugbh7gs7naaai.n"),
+ ResourceRecords: []*route53.ResourceRecord{
+ {Value: sp(`"enr:-HW4QO1ml1DdXLeZLsUxewnthhUy8eROqkDyoMTyavfks9JlYQIlMFEUoM78PovJDPQrAkrb3LRJ-""vtrymDguKCOIAWAgmlkgnY0iXNlY3AyNTZrMaEDffaGfJzgGhUif1JqFruZlYmA31HzathLSWxfbq_QoQ4"`)},
+ },
+ TTL: ip(3333),
+ Type: sp("TXT"),
+ },
+ },
+ {
+ Action: sp("DELETE"),
+ ResourceRecordSet: &route53.ResourceRecordSet{
+ Name: sp("fdxn3sn67na5dka4j2gok7bvqi.n"),
+ ResourceRecords: []*route53.ResourceRecord{{
+ Value: sp(`"enrtree-branch:"`),
+ }},
+ TTL: ip(treeNodeTTL),
+ Type: sp("TXT"),
+ },
+ },
+ }
+
+ var client route53Client
+ changes := client.computeChanges("n", testTree1, testTree0)
+ if !reflect.DeepEqual(changes, wantChanges) {
+ t.Fatalf("wrong changes (got %d, want %d)", len(changes), len(wantChanges))
+ }
+
+ wantSplit := [][]*route53.Change{
+ wantChanges[:4],
+ wantChanges[4:8],
+ }
+ split := splitChanges(changes, 600)
+ if !reflect.DeepEqual(split, wantSplit) {
+ t.Fatalf("wrong split batches: got %d, want %d", len(split), len(wantSplit))
+ }
+}
+
+func sp(s string) *string { return &s }
+func ip(i int64) *int64 { return &i }
diff --git a/cmd/devp2p/dnscmd.go b/cmd/devp2p/dnscmd.go
index eb15764b04..7c9ccd31f4 100644
--- a/cmd/devp2p/dnscmd.go
+++ b/cmd/devp2p/dnscmd.go
@@ -42,6 +42,7 @@ var (
dnsSignCommand,
dnsTXTCommand,
dnsCloudflareCommand,
+ dnsRoute53Command,
},
}
dnsSyncCommand = cli.Command{
@@ -66,11 +67,18 @@ var (
}
dnsCloudflareCommand = cli.Command{
Name: "to-cloudflare",
- Usage: "Deploy DNS TXT records to cloudflare",
+ Usage: "Deploy DNS TXT records to CloudFlare",
ArgsUsage: "",
Action: dnsToCloudflare,
Flags: []cli.Flag{cloudflareTokenFlag, cloudflareZoneIDFlag},
}
+ dnsRoute53Command = cli.Command{
+ Name: "to-route53",
+ Usage: "Deploy DNS TXT records to Amazon Route53",
+ ArgsUsage: "",
+ Action: dnsToRoute53,
+ Flags: []cli.Flag{route53AccessKeyFlag, route53AccessSecretFlag, route53ZoneIDFlag},
+ }
)
var (
@@ -88,6 +96,11 @@ var (
}
)
+const (
+ rootTTL = 1
+ treeNodeTTL = 2147483647
+)
+
// dnsSync performs dnsSyncCommand.
func dnsSync(ctx *cli.Context) error {
var (
@@ -194,6 +207,19 @@ func dnsToCloudflare(ctx *cli.Context) error {
return client.deploy(domain, t)
}
+// dnsToRoute53 peforms dnsRoute53Command.
+func dnsToRoute53(ctx *cli.Context) error {
+ if ctx.NArg() < 1 {
+ return fmt.Errorf("need tree definition directory as argument")
+ }
+ domain, t, err := loadTreeDefinitionForExport(ctx.Args().Get(0))
+ if err != nil {
+ return err
+ }
+ client := newRoute53Client(ctx)
+ return client.deploy(domain, t)
+}
+
// loadSigningKey loads a private key in Ethereum keystore format.
func loadSigningKey(keyfile string) *ecdsa.PrivateKey {
keyjson, err := ioutil.ReadFile(keyfile)
@@ -214,8 +240,7 @@ func dnsClient(ctx *cli.Context) *dnsdisc.Client {
if commandHasFlag(ctx, dnsTimeoutFlag) {
cfg.Timeout = ctx.Duration(dnsTimeoutFlag.Name)
}
- c, _ := dnsdisc.NewClient(cfg) // cannot fail because no URLs given
- return c
+ return dnsdisc.NewClient(cfg)
}
// There are two file formats for DNS node trees on disk:
diff --git a/cmd/devp2p/main.go b/cmd/devp2p/main.go
index 6faa650937..b895941f25 100644
--- a/cmd/devp2p/main.go
+++ b/cmd/devp2p/main.go
@@ -45,7 +45,7 @@ func init() {
// Set up the CLI app.
app.Flags = append(app.Flags, debug.Flags...)
app.Before = func(ctx *cli.Context) error {
- return debug.Setup(ctx, "")
+ return debug.Setup(ctx)
}
app.After = func(ctx *cli.Context) error {
debug.Exit()
diff --git a/cmd/ethkey/generate.go b/cmd/ethkey/generate.go
index fe9a0c1519..09b00cc9dc 100644
--- a/cmd/ethkey/generate.go
+++ b/cmd/ethkey/generate.go
@@ -52,6 +52,10 @@ If you want to encrypt an existing private key, it can be specified by setting
Name: "privatekey",
Usage: "file containing a raw private key to encrypt",
},
+ cli.BoolFlag{
+ Name: "lightkdf",
+ Usage: "use less secure scrypt parameters",
+ },
},
Action: func(ctx *cli.Context) error {
// Check if keyfile path given and make sure it doesn't already exist.
@@ -91,7 +95,11 @@ If you want to encrypt an existing private key, it can be specified by setting
// Encrypt key with passphrase.
passphrase := promptPassphrase(true)
- keyjson, err := keystore.EncryptKey(key, passphrase, keystore.StandardScryptN, keystore.StandardScryptP)
+ scryptN, scryptP := keystore.StandardScryptN, keystore.StandardScryptP
+ if ctx.Bool("lightkdf") {
+ scryptN, scryptP = keystore.LightScryptN, keystore.LightScryptP
+ }
+ keyjson, err := keystore.EncryptKey(key, passphrase, scryptN, scryptP)
if err != nil {
utils.Fatalf("Error encrypting key: %v", err)
}
diff --git a/cmd/ethkey/main.go b/cmd/ethkey/main.go
index 5b545d5f94..dbc4960588 100644
--- a/cmd/ethkey/main.go
+++ b/cmd/ethkey/main.go
@@ -43,6 +43,7 @@ func init() {
commandSignMessage,
commandVerifyMessage,
}
+ cli.CommandHelpTemplate = utils.OriginCommandHelpTemplate
}
// Commonly used command line flags.
diff --git a/cmd/ethkey/message_test.go b/cmd/ethkey/message_test.go
index e9e8eeeafb..9d242ac002 100644
--- a/cmd/ethkey/message_test.go
+++ b/cmd/ethkey/message_test.go
@@ -34,7 +34,7 @@ func TestMessageSignVerify(t *testing.T) {
message := "test message"
// Create the key.
- generate := runEthkey(t, "generate", keyfile)
+ generate := runEthkey(t, "generate", "--lightkdf", keyfile)
generate.Expect(`
!! Unsupported terminal, password will be echoed.
Password: {{.InputLine "foobar"}}
diff --git a/cmd/evm/disasm.go b/cmd/evm/disasm.go
index 69f611e39b..5bc743aa88 100644
--- a/cmd/evm/disasm.go
+++ b/cmd/evm/disasm.go
@@ -34,17 +34,22 @@ var disasmCommand = cli.Command{
}
func disasmCmd(ctx *cli.Context) error {
- if len(ctx.Args().First()) == 0 {
- return errors.New("filename required")
+ var in string
+ switch {
+ case len(ctx.Args().First()) > 0:
+ fn := ctx.Args().First()
+ input, err := ioutil.ReadFile(fn)
+ if err != nil {
+ return err
+ }
+ in = string(input)
+ case ctx.GlobalIsSet(InputFlag.Name):
+ in = ctx.GlobalString(InputFlag.Name)
+ default:
+ return errors.New("Missing filename or --input value")
}
- fn := ctx.Args().First()
- in, err := ioutil.ReadFile(fn)
- if err != nil {
- return err
- }
-
- code := strings.TrimSpace(string(in))
+ code := strings.TrimSpace(in)
fmt.Printf("%v\n", code)
return asm.PrintDisassembled(code)
}
diff --git a/cmd/evm/main.go b/cmd/evm/main.go
index 5b8b950ab0..72cb1ab852 100644
--- a/cmd/evm/main.go
+++ b/cmd/evm/main.go
@@ -87,6 +87,10 @@ var (
Name: "verbosity",
Usage: "sets the verbosity level",
}
+ BenchFlag = cli.BoolFlag{
+ Name: "bench",
+ Usage: "benchmark the execution",
+ }
CreateFlag = cli.BoolFlag{
Name: "create",
Usage: "indicates the action should be create rather than call",
@@ -124,6 +128,7 @@ var (
func init() {
app.Flags = []cli.Flag{
+ BenchFlag,
CreateFlag,
DebugFlag,
VerbosityFlag,
@@ -152,6 +157,7 @@ func init() {
runCommand,
stateTestCommand,
}
+ cli.CommandHelpTemplate = utils.OriginCommandHelpTemplate
}
func main() {
diff --git a/cmd/evm/runner.go b/cmd/evm/runner.go
index cecbf36063..da301ff5ee 100644
--- a/cmd/evm/runner.go
+++ b/cmd/evm/runner.go
@@ -25,6 +25,7 @@ import (
"os"
goruntime "runtime"
"runtime/pprof"
+ "testing"
"time"
"github.com/ethereum/go-ethereum/cmd/evm/internal/compiler"
@@ -69,6 +70,33 @@ func readGenesis(genesisPath string) *core.Genesis {
return genesis
}
+func timedExec(bench bool, execFunc func() ([]byte, uint64, error)) ([]byte, uint64, time.Duration, error) {
+ var (
+ output []byte
+ gasLeft uint64
+ execTime time.Duration
+ err error
+ )
+
+ if bench {
+ result := testing.Benchmark(func(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ output, gasLeft, err = execFunc()
+ }
+ })
+
+ // Get the average execution time from the benchmarking result.
+ // There are other useful stats here that could be reported.
+ execTime = time.Duration(result.NsPerOp())
+ } else {
+ startTime := time.Now()
+ output, gasLeft, err = execFunc()
+ execTime = time.Since(startTime)
+ }
+
+ return output, gasLeft, execTime, err
+}
+
func runCmd(ctx *cli.Context) error {
glogger := log.NewGlogHandler(log.StreamHandler(os.Stderr, log.TerminalFormat(false)))
glogger.Verbosity(log.Lvl(ctx.GlobalInt(VerbosityFlag.Name)))
@@ -116,11 +144,7 @@ func runCmd(ctx *cli.Context) error {
receiver = common.HexToAddress(ctx.GlobalString(ReceiverFlag.Name))
}
- var (
- code []byte
- ret []byte
- err error
- )
+ var code []byte
codeFileFlag := ctx.GlobalString(CodeFileFlag.Name)
codeFlag := ctx.GlobalString(CodeFlag.Name)
@@ -203,10 +227,10 @@ func runCmd(ctx *cli.Context) error {
} else {
runtimeConfig.ChainConfig = params.AllEthashProtocolChanges
}
- tstart := time.Now()
- var leftOverGas uint64
+
var hexInput []byte
if inputFileFlag := ctx.GlobalString(InputFileFlag.Name); inputFileFlag != "" {
+ var err error
if hexInput, err = ioutil.ReadFile(inputFileFlag); err != nil {
fmt.Printf("could not load input from file: %v\n", err)
os.Exit(1)
@@ -215,16 +239,24 @@ func runCmd(ctx *cli.Context) error {
hexInput = []byte(ctx.GlobalString(InputFlag.Name))
}
input := common.FromHex(string(bytes.TrimSpace(hexInput)))
+
+ var execFunc func() ([]byte, uint64, error)
if ctx.GlobalBool(CreateFlag.Name) {
input = append(code, input...)
- ret, _, leftOverGas, err = runtime.Create(input, &runtimeConfig)
+ execFunc = func() ([]byte, uint64, error) {
+ output, _, gasLeft, err := runtime.Create(input, &runtimeConfig)
+ return output, gasLeft, err
+ }
} else {
if len(code) > 0 {
statedb.SetCode(receiver, code)
}
- ret, leftOverGas, err = runtime.Call(receiver, input, &runtimeConfig)
+ execFunc = func() ([]byte, uint64, error) {
+ return runtime.Call(receiver, input, &runtimeConfig)
+ }
}
- execTime := time.Since(tstart)
+
+ output, leftOverGas, execTime, err := timedExec(ctx.GlobalBool(BenchFlag.Name), execFunc)
if ctx.GlobalBool(DumpFlag.Name) {
statedb.Commit(true)
@@ -267,7 +299,7 @@ Gas used: %d
`, execTime, mem.HeapObjects, mem.Alloc, mem.TotalAlloc, mem.NumGC, initialGas-leftOverGas)
}
if tracer == nil {
- fmt.Printf("0x%x\n", ret)
+ fmt.Printf("0x%x\n", output)
if err != nil {
fmt.Printf(" error: %v\n", err)
}
diff --git a/cmd/faucet/faucet.go b/cmd/faucet/faucet.go
index 27b87f22f1..1a005b3c75 100644
--- a/cmd/faucet/faucet.go
+++ b/cmd/faucet/faucet.go
@@ -353,6 +353,7 @@ func (f *faucet) apiHandler(w http.ResponseWriter, r *http.Request) {
if head == nil || balance == nil {
// Report the faucet offline until initial stats are ready
+ //lint:ignore ST1005 This error is to be displayed in the browser
if err = sendError(conn, errors.New("Faucet offline")); err != nil {
log.Warn("Failed to send faucet error to client", "err", err)
return
@@ -361,11 +362,14 @@ func (f *faucet) apiHandler(w http.ResponseWriter, r *http.Request) {
}
}
// Send over the initial stats and the latest header
+ f.lock.RLock()
+ reqs := f.reqs
+ f.lock.RUnlock()
if err = send(conn, map[string]interface{}{
"funds": new(big.Int).Div(balance, ether),
"funded": nonce,
"peers": f.stack.Server().PeerCount(),
- "requests": f.reqs,
+ "requests": reqs,
}, 3*time.Second); err != nil {
log.Warn("Failed to send initial stats to client", "err", err)
return
@@ -394,6 +398,7 @@ func (f *faucet) apiHandler(w http.ResponseWriter, r *http.Request) {
continue
}
if msg.Tier >= uint(*tiersFlag) {
+ //lint:ignore ST1005 This error is to be displayed in the browser
if err = sendError(conn, errors.New("Invalid funding tier requested")); err != nil {
log.Warn("Failed to send tier error to client", "err", err)
return
@@ -431,6 +436,7 @@ func (f *faucet) apiHandler(w http.ResponseWriter, r *http.Request) {
}
if !result.Success {
log.Warn("Captcha verification failed", "err", string(result.Errors))
+ //lint:ignore ST1005 it's funny and the robot won't mind
if err = sendError(conn, errors.New("Beep-bop, you're a robot!")); err != nil {
log.Warn("Failed to send captcha failure to client", "err", err)
return
@@ -452,6 +458,7 @@ func (f *faucet) apiHandler(w http.ResponseWriter, r *http.Request) {
}
continue
case strings.HasPrefix(msg.URL, "https://plus.google.com/"):
+ //lint:ignore ST1005 Google is a company name and should be capitalized.
if err = sendError(conn, errors.New("Google+ authentication discontinued as the service was sunset")); err != nil {
log.Warn("Failed to send Google+ deprecation to client", "err", err)
return
@@ -464,6 +471,7 @@ func (f *faucet) apiHandler(w http.ResponseWriter, r *http.Request) {
case *noauthFlag:
username, avatar, address, err = authNoAuth(msg.URL)
default:
+ //lint:ignore ST1005 This error is to be displayed in the browser
err = errors.New("Something funky happened, please open an issue at https://github.com/ethereum/go-ethereum/issues")
}
if err != nil {
@@ -522,7 +530,7 @@ func (f *faucet) apiHandler(w http.ResponseWriter, r *http.Request) {
// Send an error if too frequent funding, othewise a success
if !fund {
- if err = sendError(conn, fmt.Errorf("%s left until next allowance", common.PrettyDuration(timeout.Sub(time.Now())))); err != nil { // nolint: gosimple
+ if err = sendError(conn, fmt.Errorf("%s left until next allowance", common.PrettyDuration(time.Until(timeout)))); err != nil { // nolint: gosimple
log.Warn("Failed to send funding error to client", "err", err)
return
}
@@ -684,6 +692,7 @@ func authTwitter(url string) (string, string, common.Address, error) {
// Ensure the user specified a meaningful URL, no fancy nonsense
parts := strings.Split(url, "/")
if len(parts) < 4 || parts[len(parts)-2] != "status" {
+ //lint:ignore ST1005 This error is to be displayed in the browser
return "", "", common.Address{}, errors.New("Invalid Twitter status URL")
}
// Twitter's API isn't really friendly with direct links. Still, we don't
@@ -698,6 +707,7 @@ func authTwitter(url string) (string, string, common.Address, error) {
// Resolve the username from the final redirect, no intermediate junk
parts = strings.Split(res.Request.URL.String(), "/")
if len(parts) < 4 || parts[len(parts)-2] != "status" {
+ //lint:ignore ST1005 This error is to be displayed in the browser
return "", "", common.Address{}, errors.New("Invalid Twitter status URL")
}
username := parts[len(parts)-3]
@@ -708,6 +718,7 @@ func authTwitter(url string) (string, string, common.Address, error) {
}
address := common.HexToAddress(string(regexp.MustCompile("0x[0-9a-fA-F]{40}").Find(body)))
if address == (common.Address{}) {
+ //lint:ignore ST1005 This error is to be displayed in the browser
return "", "", common.Address{}, errors.New("No Ethereum address found to fund")
}
var avatar string
@@ -723,6 +734,7 @@ func authFacebook(url string) (string, string, common.Address, error) {
// Ensure the user specified a meaningful URL, no fancy nonsense
parts := strings.Split(url, "/")
if len(parts) < 4 || parts[len(parts)-2] != "posts" {
+ //lint:ignore ST1005 This error is to be displayed in the browser
return "", "", common.Address{}, errors.New("Invalid Facebook post URL")
}
username := parts[len(parts)-3]
@@ -742,6 +754,7 @@ func authFacebook(url string) (string, string, common.Address, error) {
}
address := common.HexToAddress(string(regexp.MustCompile("0x[0-9a-fA-F]{40}").Find(body)))
if address == (common.Address{}) {
+ //lint:ignore ST1005 This error is to be displayed in the browser
return "", "", common.Address{}, errors.New("No Ethereum address found to fund")
}
var avatar string
@@ -757,6 +770,7 @@ func authFacebook(url string) (string, string, common.Address, error) {
func authNoAuth(url string) (string, string, common.Address, error) {
address := common.HexToAddress(regexp.MustCompile("0x[0-9a-fA-F]{40}").FindString(url))
if address == (common.Address{}) {
+ //lint:ignore ST1005 This error is to be displayed in the browser
return "", "", common.Address{}, errors.New("No Ethereum address found to fund")
}
return address.Hex() + "@noauth", "", address, nil
diff --git a/cmd/geth/chaincmd.go b/cmd/geth/chaincmd.go
index f798c2cc48..602ab2fa6d 100644
--- a/cmd/geth/chaincmd.go
+++ b/cmd/geth/chaincmd.go
@@ -57,6 +57,18 @@ This is a destructive action and changes the network in which you will be
participating.
It expects the genesis file as argument.`,
+ }
+ dumpGenesisCommand = cli.Command{
+ Action: utils.MigrateFlags(dumpGenesis),
+ Name: "dumpgenesis",
+ Usage: "Dumps genesis block JSON configuration to stdout",
+ ArgsUsage: "",
+ Flags: []cli.Flag{
+ utils.DataDirFlag,
+ },
+ Category: "BLOCKCHAIN COMMANDS",
+ Description: `
+The dumpgenesis command dumps the genesis block configuration in JSON format to stdout.`,
}
importCommand = cli.Command{
Action: utils.MigrateFlags(importChain),
@@ -257,6 +269,17 @@ func initGenesis(ctx *cli.Context) error {
return nil
}
+func dumpGenesis(ctx *cli.Context) error {
+ genesis := utils.MakeGenesis(ctx)
+ if genesis == nil {
+ genesis = core.DefaultGenesisBlock()
+ }
+ if err := json.NewEncoder(os.Stdout).Encode(genesis); err != nil {
+ utils.Fatalf("could not encode genesis")
+ }
+ return nil
+}
+
func importChain(ctx *cli.Context) error {
if len(ctx.Args()) < 1 {
utils.Fatalf("This command requires an argument.")
diff --git a/cmd/geth/config.go b/cmd/geth/config.go
index c0ffa0817b..b7c825aaff 100644
--- a/cmd/geth/config.go
+++ b/cmd/geth/config.go
@@ -101,8 +101,8 @@ func defaultNodeConfig() node.Config {
cfg := node.DefaultConfig
cfg.Name = clientIdentifier
cfg.Version = params.VersionWithCommit(gitCommit, gitDate)
- cfg.HTTPModules = append(cfg.HTTPModules, "eth", "shh")
- cfg.WSModules = append(cfg.WSModules, "eth", "shh")
+ cfg.HTTPModules = append(cfg.HTTPModules, "eth")
+ cfg.WSModules = append(cfg.WSModules, "eth")
cfg.IPCPath = "geth.ipc"
return cfg
}
@@ -153,6 +153,10 @@ func makeFullNode(ctx *cli.Context) *node.Node {
cfg.Eth.OverrideIstanbul = new(big.Int).SetUint64(ctx.GlobalUint64(utils.OverrideIstanbulFlag.Name))
}
+ if ctx.GlobalIsSet(utils.OverrideMuirGlacierFlag.Name) {
+ cfg.Eth.OverrideMuirGlacier = new(big.Int).SetUint64(ctx.GlobalUint64(utils.OverrideMuirGlacierFlag.Name))
+ }
+
ethChan := utils.RegisterEthService(stack, &cfg.Eth)
// plugin service must be after eth service so that eth service will be stopped gradually if any of the plugin
diff --git a/cmd/geth/consolecmd_test.go b/cmd/geth/consolecmd_test.go
index 23213239c8..00280dddec 100644
--- a/cmd/geth/consolecmd_test.go
+++ b/cmd/geth/consolecmd_test.go
@@ -93,7 +93,9 @@ func TestConsoleWelcome(t *testing.T) {
geth.SetTemplateFunc("gover", runtime.Version)
geth.SetTemplateFunc("gethver", func() string { return params.VersionWithMeta })
geth.SetTemplateFunc("quorumver", func() string { return params.QuorumVersion })
- geth.SetTemplateFunc("niltime", func() string { return time.Unix(0, 0).Format(time.RFC1123) })
+ geth.SetTemplateFunc("niltime", func() string {
+ return time.Unix(0, 0).Format("Mon Jan 02 2006 15:04:05 GMT-0700 (MST)")
+ })
geth.SetTemplateFunc("apis", func() string { return ipcAPIs })
// Verify the actual welcome message to the required template
@@ -133,11 +135,14 @@ func TestIPCAttachWelcome(t *testing.T) {
"--datadir", datadir, "--port", "0", "--maxpeers", "0", "--nodiscover", "--nat", "none",
"--etherbase", coinbase, "--shh", "--ipcpath", ipc)
+ defer func() {
+ geth.Interrupt()
+ geth.ExpectExit()
+ }()
+
waitForEndpoint(t, ipc, 3*time.Second)
testAttachWelcome(t, geth, "ipc:"+ipc, ipcAPIs)
- geth.Interrupt()
- geth.ExpectExit()
}
func TestHTTPAttachWelcome(t *testing.T) {
@@ -155,9 +160,6 @@ func TestHTTPAttachWelcome(t *testing.T) {
endpoint := "http://127.0.0.1:" + port
waitForEndpoint(t, endpoint, 3*time.Second)
testAttachWelcome(t, geth, endpoint, httpAPIs)
-
- geth.Interrupt()
- geth.ExpectExit()
}
func TestWSAttachWelcome(t *testing.T) {
@@ -175,9 +177,6 @@ func TestWSAttachWelcome(t *testing.T) {
endpoint := "ws://127.0.0.1:" + port
waitForEndpoint(t, endpoint, 3*time.Second)
testAttachWelcome(t, geth, endpoint, httpAPIs)
-
- geth.Interrupt()
- geth.ExpectExit()
}
func testAttachWelcome(t *testing.T, geth *testgeth, endpoint, apis string) {
@@ -193,7 +192,9 @@ func testAttachWelcome(t *testing.T, geth *testgeth, endpoint, apis string) {
attach.SetTemplateFunc("gethver", func() string { return params.VersionWithMeta })
attach.SetTemplateFunc("quorumver", func() string { return params.QuorumVersion })
attach.SetTemplateFunc("etherbase", func() string { return geth.Etherbase })
- attach.SetTemplateFunc("niltime", func() string { return time.Unix(0, 0).Format(time.RFC1123) })
+ attach.SetTemplateFunc("niltime", func() string {
+ return time.Unix(0, 0).Format("Mon Jan 02 2006 15:04:05 GMT-0700 (MST)")
+ })
attach.SetTemplateFunc("ipc", func() bool { return strings.HasPrefix(endpoint, "ipc") || strings.Contains(apis, "admin") })
attach.SetTemplateFunc("datadir", func() string { return geth.Datadir })
attach.SetTemplateFunc("apis", func() string { return apis })
diff --git a/cmd/geth/main.go b/cmd/geth/main.go
index 29219f3695..721d057c66 100644
--- a/cmd/geth/main.go
+++ b/cmd/geth/main.go
@@ -74,6 +74,7 @@ var (
utils.NoUSBFlag,
utils.SmartCardDaemonPathFlag,
utils.OverrideIstanbulFlag,
+ utils.OverrideMuirGlacierFlag,
utils.EthashCacheDirFlag,
utils.EthashCachesInMemoryFlag,
utils.EthashCachesOnDiskFlag,
@@ -134,6 +135,7 @@ var (
utils.NetrestrictFlag,
utils.NodeKeyFileFlag,
utils.NodeKeyHexFlag,
+ utils.DNSDiscoveryFlag,
utils.DeveloperFlag,
utils.DeveloperPeriodFlag,
utils.TestnetFlag,
@@ -216,7 +218,7 @@ func init() {
// Initialize the CLI app and start Geth
app.Action = geth
app.HideVersion = true // we have a command to print the version
- app.Copyright = "Copyright 2013-2019 The go-ethereum Authors"
+ app.Copyright = "Copyright 2013-2020 The go-ethereum Authors"
app.Commands = []cli.Command{
// See chaincmd.go:
initCommand,
@@ -227,6 +229,7 @@ func init() {
copydbCommand,
removedbCommand,
dumpCommand,
+ dumpGenesisCommand,
inspectCommand,
// See accountcmd.go:
accountCommand,
@@ -255,7 +258,7 @@ func init() {
app.Flags = append(app.Flags, metricsFlags...)
app.Before = func(ctx *cli.Context) error {
- return debug.Setup(ctx, "")
+ return debug.Setup(ctx)
}
app.After = func(ctx *cli.Context) error {
debug.Exit()
diff --git a/cmd/geth/retesteth.go b/cmd/geth/retesteth.go
index 078e2c3e85..98f1cc022d 100644
--- a/cmd/geth/retesteth.go
+++ b/cmd/geth/retesteth.go
@@ -81,6 +81,7 @@ type RetestethEthAPI interface {
SendRawTransaction(ctx context.Context, rawTx hexutil.Bytes) (common.Hash, error)
BlockNumber(ctx context.Context) (uint64, error)
GetBlockByNumber(ctx context.Context, blockNr math.HexOrDecimal64, fullTx bool) (map[string]interface{}, error)
+ GetBlockByHash(ctx context.Context, blockHash common.Hash, fullTx bool) (map[string]interface{}, error)
GetBalance(ctx context.Context, address common.Address, blockNr math.HexOrDecimal64) (*math.HexOrDecimal256, error)
GetCode(ctx context.Context, address common.Address, blockNr math.HexOrDecimal64) (hexutil.Bytes, error)
GetTransactionCount(ctx context.Context, address common.Address, blockNr math.HexOrDecimal64) (uint64, error)
@@ -111,7 +112,6 @@ type RetestethAPI struct {
genesisHash common.Hash
engine *NoRewardEngine
blockchain *core.BlockChain
- blockNumber uint64
txMap map[common.Address]map[uint64]*types.Transaction // Sender -> Nonce -> Transaction
txSenders map[common.Address]struct{} // Set of transaction senders
blockInterval uint64
@@ -361,7 +361,7 @@ func (api *RetestethAPI) SetChainParams(ctx context.Context, chainParams ChainPa
ChainID: chainId,
HomesteadBlock: homesteadBlock,
DAOForkBlock: daoForkBlock,
- DAOForkSupport: false,
+ DAOForkSupport: true,
EIP150Block: eip150Block,
EIP155Block: eip155Block,
EIP158Block: eip158Block,
@@ -416,7 +416,6 @@ func (api *RetestethAPI) SetChainParams(ctx context.Context, chainParams ChainPa
api.engine = engine
api.blockchain = blockchain
api.db = state.NewDatabase(api.ethDb)
- api.blockNumber = 0
api.txMap = make(map[common.Address]map[uint64]*types.Transaction)
api.txSenders = make(map[common.Address]struct{})
api.blockInterval = 0
@@ -429,7 +428,7 @@ func (api *RetestethAPI) SendRawTransaction(ctx context.Context, rawTx hexutil.B
// Return nil is not by mistake - some tests include sending transaction where gasLimit overflows uint64
return common.Hash{}, nil
}
- signer := types.MakeSigner(api.chainConfig, big.NewInt(int64(api.blockNumber)))
+ signer := types.MakeSigner(api.chainConfig, big.NewInt(int64(api.currentNumber())))
sender, err := types.Sender(signer, tx)
if err != nil {
return common.Hash{}, err
@@ -455,9 +454,17 @@ func (api *RetestethAPI) MineBlocks(ctx context.Context, number uint64) (bool, e
return true, nil
}
+func (api *RetestethAPI) currentNumber() uint64 {
+ if current := api.blockchain.CurrentBlock(); current != nil {
+ return current.NumberU64()
+ }
+ return 0
+}
+
func (api *RetestethAPI) mineBlock() error {
- parentHash := rawdb.ReadCanonicalHash(api.ethDb, api.blockNumber)
- parent := rawdb.ReadBlock(api.ethDb, parentHash, api.blockNumber)
+ number := api.currentNumber()
+ parentHash := rawdb.ReadCanonicalHash(api.ethDb, number)
+ parent := rawdb.ReadBlock(api.ethDb, parentHash, number)
var timestamp uint64
if api.blockInterval == 0 {
timestamp = uint64(time.Now().Unix())
@@ -467,7 +474,7 @@ func (api *RetestethAPI) mineBlock() error {
gasLimit := core.CalcGasLimit(parent, 9223372036854775807, 9223372036854775807)
header := &types.Header{
ParentHash: parent.Hash(),
- Number: big.NewInt(int64(api.blockNumber + 1)),
+ Number: big.NewInt(int64(number + 1)),
GasLimit: gasLimit,
Extra: api.extraData,
Time: timestamp,
@@ -500,7 +507,6 @@ func (api *RetestethAPI) mineBlock() error {
txCount := 0
var txs []*types.Transaction
var receipts []*types.Receipt
- var coalescedLogs []*types.Log
var blockFull = gasPool.Gas() < params.TxGas
for address := range api.txSenders {
if blockFull {
@@ -527,7 +533,6 @@ func (api *RetestethAPI) mineBlock() error {
}
txs = append(txs, tx)
receipts = append(receipts, receipt)
- coalescedLogs = append(coalescedLogs, receipt.Logs...)
delete(m, nonce)
if len(m) == 0 {
// Last tx for the sender
@@ -555,8 +560,7 @@ func (api *RetestethAPI) importBlock(block *types.Block) error {
if _, err := api.blockchain.InsertChain([]*types.Block{block}); err != nil {
return err
}
- api.blockNumber = block.NumberU64()
- fmt.Printf("Imported block %d\n", block.NumberU64())
+ fmt.Printf("Imported block %d, head is %d\n", block.NumberU64(), api.currentNumber())
return nil
}
@@ -581,7 +585,9 @@ func (api *RetestethAPI) RewindToBlock(ctx context.Context, newHead uint64) (boo
if err := api.blockchain.SetHead(newHead); err != nil {
return false, err
}
- api.blockNumber = newHead
+ // When we rewind, the transaction pool should be cleaned out.
+ api.txMap = make(map[common.Address]map[uint64]*types.Transaction)
+ api.txSenders = make(map[common.Address]struct{})
return true, nil
}
@@ -601,8 +607,7 @@ func (api *RetestethAPI) GetLogHash(ctx context.Context, txHash common.Hash) (co
}
func (api *RetestethAPI) BlockNumber(ctx context.Context) (uint64, error) {
- //fmt.Printf("BlockNumber, response: %d\n", api.blockNumber)
- return api.blockNumber, nil
+ return api.currentNumber(), nil
}
func (api *RetestethAPI) GetBlockByNumber(ctx context.Context, blockNr math.HexOrDecimal64, fullTx bool) (map[string]interface{}, error) {
@@ -619,6 +624,20 @@ func (api *RetestethAPI) GetBlockByNumber(ctx context.Context, blockNr math.HexO
return nil, fmt.Errorf("block %d not found", blockNr)
}
+func (api *RetestethAPI) GetBlockByHash(ctx context.Context, blockHash common.Hash, fullTx bool) (map[string]interface{}, error) {
+ block := api.blockchain.GetBlockByHash(blockHash)
+ if block != nil {
+ response, err := RPCMarshalBlock(block, true, fullTx)
+ if err != nil {
+ return nil, err
+ }
+ response["author"] = response["miner"]
+ response["totalDifficulty"] = (*hexutil.Big)(api.blockchain.GetTd(block.Hash(), block.Number().Uint64()))
+ return response, err
+ }
+ return nil, fmt.Errorf("block 0x%x not found", blockHash)
+}
+
func (api *RetestethAPI) AccountRange(ctx context.Context,
blockHashOrNumber *math.HexOrDecimal256, txIndex uint64,
addressHash *math.HexOrDecimal256, maxResults uint64,
@@ -687,9 +706,6 @@ func (api *RetestethAPI) AccountRange(ctx context.Context,
for i := 0; i < int(maxResults) && it.Next(); i++ {
if preimage := accountTrie.GetKey(it.Key); preimage != nil {
result.AddressMap[common.BytesToHash(it.Key)] = common.BytesToAddress(preimage)
- //fmt.Printf("%x: %x\n", it.Key, preimage)
- } else {
- //fmt.Printf("could not find preimage for %x\n", it.Key)
}
}
//fmt.Printf("Number of entries returned: %d\n", len(result.AddressMap))
@@ -813,9 +829,6 @@ func (api *RetestethAPI) StorageRangeAt(ctx context.Context,
Key: string(ks),
Value: string(vs),
}
- //fmt.Printf("Key: %s, Value: %s\n", ks, vs)
- } else {
- //fmt.Printf("Did not find preimage for %x\n", it.Key)
}
}
if it.Next() {
@@ -894,7 +907,7 @@ func retesteth(ctx *cli.Context) error {
log.Info("HTTP endpoint closed", "url", httpEndpoint)
}()
- abortChan := make(chan os.Signal)
+ abortChan := make(chan os.Signal, 11)
signal.Notify(abortChan, os.Interrupt)
sig := <-abortChan
diff --git a/cmd/geth/usage.go b/cmd/geth/usage.go
index 33278d1245..45d2a85bc6 100644
--- a/cmd/geth/usage.go
+++ b/cmd/geth/usage.go
@@ -189,6 +189,7 @@ var AppHelpFlagGroups = []flagGroup{
utils.BootnodesFlag,
utils.BootnodesV4Flag,
utils.BootnodesV5Flag,
+ utils.DNSDiscoveryFlag,
utils.ListenPortFlag,
utils.MaxPeersFlag,
utils.MaxPendingPeersFlag,
diff --git a/cmd/puppeth/module_dashboard.go b/cmd/puppeth/module_dashboard.go
index 9a77587b4a..39ccdd9aa0 100644
--- a/cmd/puppeth/module_dashboard.go
+++ b/cmd/puppeth/module_dashboard.go
@@ -41,7 +41,7 @@ var dashboardContent = `
- {{.NetworkTitle}}: Ethereum Testnet
+ {{.NetworkTitle}}: Network Dashboard
diff --git a/cmd/utils/flags.go b/cmd/utils/flags.go
index 65f8896fec..fe8e174d17 100644
--- a/cmd/utils/flags.go
+++ b/cmd/utils/flags.go
@@ -88,6 +88,17 @@ SUBCOMMANDS:
{{range $categorized.Flags}}{{"\t"}}{{.}}
{{end}}
{{end}}{{end}}`
+
+ OriginCommandHelpTemplate = `{{.Name}}{{if .Subcommands}} command{{end}}{{if .Flags}} [command options]{{end}} [arguments...]
+{{if .Description}}{{.Description}}
+{{end}}{{if .Subcommands}}
+SUBCOMMANDS:
+ {{range .Subcommands}}{{.Name}}{{with .ShortName}}, {{.}}{{end}}{{ "\t" }}{{.Usage}}
+ {{end}}{{end}}{{if .Flags}}
+OPTIONS:
+{{range $.Flags}}{{"\t"}}{{.}}
+{{end}}
+{{end}}`
)
func init() {
@@ -237,6 +248,10 @@ var (
Name: "override.istanbul",
Usage: "Manually specify Istanbul fork-block, overriding the bundled setting",
}
+ OverrideMuirGlacierFlag = cli.Uint64Flag{
+ Name: "override.muirglacier",
+ Usage: "Manually specify Muir Glacier fork-block, overriding the bundled setting",
+ }
// Light server and client settings
LightLegacyServFlag = cli.IntFlag{ // Deprecated in favor of light.serve, remove in 2021
Name: "lightserv",
@@ -675,6 +690,10 @@ var (
Name: "netrestrict",
Usage: "Restricts network communication to the given IP networks (CIDR masks)",
}
+ DNSDiscoveryFlag = cli.StringFlag{
+ Name: "discovery.dns",
+ Usage: "Sets DNS discovery entry points (use \"\" to disable DNS)",
+ }
// ATM the url is left to the user and deployment to
JSpathFlag = cli.StringFlag{
@@ -921,9 +940,9 @@ func setBootstrapNodes(ctx *cli.Context, cfg *p2p.Config) {
switch {
case ctx.GlobalIsSet(BootnodesFlag.Name) || ctx.GlobalIsSet(BootnodesV4Flag.Name):
if ctx.GlobalIsSet(BootnodesV4Flag.Name) {
- urls = strings.Split(ctx.GlobalString(BootnodesV4Flag.Name), ",")
+ urls = splitAndTrim(ctx.GlobalString(BootnodesV4Flag.Name))
} else {
- urls = strings.Split(ctx.GlobalString(BootnodesFlag.Name), ",")
+ urls = splitAndTrim(ctx.GlobalString(BootnodesFlag.Name))
}
case ctx.GlobalBool(TestnetFlag.Name):
urls = params.TestnetBootnodes
@@ -955,9 +974,9 @@ func setBootstrapNodesV5(ctx *cli.Context, cfg *p2p.Config) {
switch {
case ctx.GlobalIsSet(BootnodesFlag.Name) || ctx.GlobalIsSet(BootnodesV5Flag.Name):
if ctx.GlobalIsSet(BootnodesV5Flag.Name) {
- urls = strings.Split(ctx.GlobalString(BootnodesV5Flag.Name), ",")
+ urls = splitAndTrim(ctx.GlobalString(BootnodesV5Flag.Name))
} else {
- urls = strings.Split(ctx.GlobalString(BootnodesFlag.Name), ",")
+ urls = splitAndTrim(ctx.GlobalString(BootnodesFlag.Name))
}
case ctx.GlobalBool(RinkebyFlag.Name):
urls = params.RinkebyBootnodes
@@ -1665,6 +1684,14 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *eth.Config) {
if ctx.GlobalIsSet(RPCGlobalGasCap.Name) {
cfg.RPCGasCap = new(big.Int).SetUint64(ctx.GlobalUint64(RPCGlobalGasCap.Name))
}
+ if ctx.GlobalIsSet(DNSDiscoveryFlag.Name) {
+ urls := ctx.GlobalString(DNSDiscoveryFlag.Name)
+ if urls == "" {
+ cfg.DiscoveryURLs = []string{}
+ } else {
+ cfg.DiscoveryURLs = splitAndTrim(urls)
+ }
+ }
// set immutability threshold in config
params.SetQuorumImmutabilityThreshold(ctx.GlobalInt(QuorumImmutabilityThreshold.Name))
@@ -1676,16 +1703,19 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *eth.Config) {
cfg.NetworkId = 3
}
cfg.Genesis = core.DefaultTestnetGenesisBlock()
+ setDNSDiscoveryDefaults(cfg, params.KnownDNSNetworks[params.TestnetGenesisHash])
case ctx.GlobalBool(RinkebyFlag.Name):
if !ctx.GlobalIsSet(NetworkIdFlag.Name) {
cfg.NetworkId = 4
}
cfg.Genesis = core.DefaultRinkebyGenesisBlock()
+ setDNSDiscoveryDefaults(cfg, params.KnownDNSNetworks[params.RinkebyGenesisHash])
case ctx.GlobalBool(GoerliFlag.Name):
if !ctx.GlobalIsSet(NetworkIdFlag.Name) {
cfg.NetworkId = 5
}
cfg.Genesis = core.DefaultGoerliGenesisBlock()
+ setDNSDiscoveryDefaults(cfg, params.KnownDNSNetworks[params.GoerliGenesisHash])
case ctx.GlobalBool(DeveloperFlag.Name):
if !ctx.GlobalIsSet(NetworkIdFlag.Name) {
cfg.NetworkId = 1337
@@ -1712,7 +1742,20 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *eth.Config) {
if !ctx.GlobalIsSet(MinerGasPriceFlag.Name) && !ctx.GlobalIsSet(MinerLegacyGasPriceFlag.Name) {
cfg.Miner.GasPrice = big.NewInt(1)
}
+ default:
+ if cfg.NetworkId == 1 {
+ setDNSDiscoveryDefaults(cfg, params.KnownDNSNetworks[params.MainnetGenesisHash])
+ }
+ }
+}
+
+// setDNSDiscoveryDefaults configures DNS discovery with the given URL if
+// no URLs are set.
+func setDNSDiscoveryDefaults(cfg *eth.Config, url string) {
+ if cfg.DiscoveryURLs != nil {
+ return
}
+ cfg.DiscoveryURLs = []string{url}
}
// RegisterEthService adds an Ethereum client to the stack.
diff --git a/common/mclock/mclock.go b/common/mclock/mclock.go
index d0e0cd78be..3aca257cb3 100644
--- a/common/mclock/mclock.go
+++ b/common/mclock/mclock.go
@@ -31,44 +31,93 @@ func Now() AbsTime {
return AbsTime(monotime.Now())
}
-// Add returns t + d.
+// Add returns t + d as absolute time.
func (t AbsTime) Add(d time.Duration) AbsTime {
return t + AbsTime(d)
}
+// Sub returns t - t2 as a duration.
+func (t AbsTime) Sub(t2 AbsTime) time.Duration {
+ return time.Duration(t - t2)
+}
+
// The Clock interface makes it possible to replace the monotonic system clock with
// a simulated clock.
type Clock interface {
Now() AbsTime
Sleep(time.Duration)
- After(time.Duration) <-chan time.Time
+ NewTimer(time.Duration) ChanTimer
+ After(time.Duration) <-chan AbsTime
AfterFunc(d time.Duration, f func()) Timer
}
-// Timer represents a cancellable event returned by AfterFunc
+// Timer is a cancellable event created by AfterFunc.
type Timer interface {
+ // Stop cancels the timer. It returns false if the timer has already
+ // expired or been stopped.
Stop() bool
}
+// ChanTimer is a cancellable event created by NewTimer.
+type ChanTimer interface {
+ Timer
+
+ // The channel returned by C receives a value when the timer expires.
+ C() <-chan AbsTime
+ // Reset reschedules the timer with a new timeout.
+ // It should be invoked only on stopped or expired timers with drained channels.
+ Reset(time.Duration)
+}
+
// System implements Clock using the system clock.
type System struct{}
// Now returns the current monotonic time.
-func (System) Now() AbsTime {
+func (c System) Now() AbsTime {
return AbsTime(monotime.Now())
}
// Sleep blocks for the given duration.
-func (System) Sleep(d time.Duration) {
+func (c System) Sleep(d time.Duration) {
time.Sleep(d)
}
+// NewTimer creates a timer which can be rescheduled.
+func (c System) NewTimer(d time.Duration) ChanTimer {
+ ch := make(chan AbsTime, 1)
+ t := time.AfterFunc(d, func() {
+ // This send is non-blocking because that's how time.Timer
+ // behaves. It doesn't matter in the happy case, but does
+ // when Reset is misused.
+ select {
+ case ch <- c.Now():
+ default:
+ }
+ })
+ return &systemTimer{t, ch}
+}
+
// After returns a channel which receives the current time after d has elapsed.
-func (System) After(d time.Duration) <-chan time.Time {
- return time.After(d)
+func (c System) After(d time.Duration) <-chan AbsTime {
+ ch := make(chan AbsTime, 1)
+ time.AfterFunc(d, func() { ch <- c.Now() })
+ return ch
}
// AfterFunc runs f on a new goroutine after the duration has elapsed.
-func (System) AfterFunc(d time.Duration, f func()) Timer {
+func (c System) AfterFunc(d time.Duration, f func()) Timer {
return time.AfterFunc(d, f)
}
+
+type systemTimer struct {
+ *time.Timer
+ ch <-chan AbsTime
+}
+
+func (st *systemTimer) Reset(d time.Duration) {
+ st.Timer.Reset(d)
+}
+
+func (st *systemTimer) C() <-chan AbsTime {
+ return st.ch
+}
diff --git a/common/mclock/simclock.go b/common/mclock/simclock.go
index 4d351252ff..766ca0f873 100644
--- a/common/mclock/simclock.go
+++ b/common/mclock/simclock.go
@@ -17,6 +17,7 @@
package mclock
import (
+ "container/heap"
"sync"
"time"
)
@@ -32,18 +33,24 @@ import (
// the timeout using a channel or semaphore.
type Simulated struct {
now AbsTime
- scheduled []*simTimer
+ scheduled simTimerHeap
mu sync.RWMutex
cond *sync.Cond
- lastId uint64
}
-// simTimer implements Timer on the virtual clock.
+// simTimer implements ChanTimer on the virtual clock.
type simTimer struct {
- do func()
- at AbsTime
- id uint64
- s *Simulated
+ at AbsTime
+ index int // position in s.scheduled
+ s *Simulated
+ do func()
+ ch <-chan AbsTime
+}
+
+func (s *Simulated) init() {
+ if s.cond == nil {
+ s.cond = sync.NewCond(&s.mu)
+ }
}
// Run moves the clock by the given duration, executing all timers before that duration.
@@ -53,14 +60,9 @@ func (s *Simulated) Run(d time.Duration) {
end := s.now + AbsTime(d)
var do []func()
- for len(s.scheduled) > 0 {
- ev := s.scheduled[0]
- if ev.at > end {
- break
- }
- s.now = ev.at
+ for len(s.scheduled) > 0 && s.scheduled[0].at <= end {
+ ev := heap.Pop(&s.scheduled).(*simTimer)
do = append(do, ev.do)
- s.scheduled = s.scheduled[1:]
}
s.now = end
s.mu.Unlock()
@@ -102,14 +104,22 @@ func (s *Simulated) Sleep(d time.Duration) {
<-s.After(d)
}
+// NewTimer creates a timer which fires when the clock has advanced by d.
+func (s *Simulated) NewTimer(d time.Duration) ChanTimer {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+
+ ch := make(chan AbsTime, 1)
+ var timer *simTimer
+ timer = s.schedule(d, func() { ch <- timer.at })
+ timer.ch = ch
+ return timer
+}
+
// After returns a channel which receives the current time after the clock
// has advanced by d.
-func (s *Simulated) After(d time.Duration) <-chan time.Time {
- after := make(chan time.Time, 1)
- s.AfterFunc(d, func() {
- after <- (time.Time{}).Add(time.Duration(s.now))
- })
- return after
+func (s *Simulated) After(d time.Duration) <-chan AbsTime {
+ return s.NewTimer(d).C()
}
// AfterFunc runs fn after the clock has advanced by d. Unlike with the system
@@ -117,46 +127,83 @@ func (s *Simulated) After(d time.Duration) <-chan time.Time {
func (s *Simulated) AfterFunc(d time.Duration, fn func()) Timer {
s.mu.Lock()
defer s.mu.Unlock()
+
+ return s.schedule(d, fn)
+}
+
+func (s *Simulated) schedule(d time.Duration, fn func()) *simTimer {
s.init()
at := s.now + AbsTime(d)
- s.lastId++
- id := s.lastId
- l, h := 0, len(s.scheduled)
- ll := h
- for l != h {
- m := (l + h) / 2
- if (at < s.scheduled[m].at) || ((at == s.scheduled[m].at) && (id < s.scheduled[m].id)) {
- h = m
- } else {
- l = m + 1
- }
- }
ev := &simTimer{do: fn, at: at, s: s}
- s.scheduled = append(s.scheduled, nil)
- copy(s.scheduled[l+1:], s.scheduled[l:ll])
- s.scheduled[l] = ev
+ heap.Push(&s.scheduled, ev)
s.cond.Broadcast()
return ev
}
func (ev *simTimer) Stop() bool {
- s := ev.s
- s.mu.Lock()
- defer s.mu.Unlock()
+ ev.s.mu.Lock()
+ defer ev.s.mu.Unlock()
- for i := 0; i < len(s.scheduled); i++ {
- if s.scheduled[i] == ev {
- s.scheduled = append(s.scheduled[:i], s.scheduled[i+1:]...)
- s.cond.Broadcast()
- return true
- }
+ if ev.index < 0 {
+ return false
}
- return false
+ heap.Remove(&ev.s.scheduled, ev.index)
+ ev.s.cond.Broadcast()
+ ev.index = -1
+ return true
}
-func (s *Simulated) init() {
- if s.cond == nil {
- s.cond = sync.NewCond(&s.mu)
+func (ev *simTimer) Reset(d time.Duration) {
+ if ev.ch == nil {
+ panic("mclock: Reset() on timer created by AfterFunc")
}
+
+ ev.s.mu.Lock()
+ defer ev.s.mu.Unlock()
+ ev.at = ev.s.now.Add(d)
+ if ev.index < 0 {
+ heap.Push(&ev.s.scheduled, ev) // already expired
+ } else {
+ heap.Fix(&ev.s.scheduled, ev.index) // hasn't fired yet, reschedule
+ }
+ ev.s.cond.Broadcast()
+}
+
+func (ev *simTimer) C() <-chan AbsTime {
+ if ev.ch == nil {
+ panic("mclock: C() on timer created by AfterFunc")
+ }
+ return ev.ch
+}
+
+type simTimerHeap []*simTimer
+
+func (h *simTimerHeap) Len() int {
+ return len(*h)
+}
+
+func (h *simTimerHeap) Less(i, j int) bool {
+ return (*h)[i].at < (*h)[j].at
+}
+
+func (h *simTimerHeap) Swap(i, j int) {
+ (*h)[i], (*h)[j] = (*h)[j], (*h)[i]
+ (*h)[i].index = i
+ (*h)[j].index = j
+}
+
+func (h *simTimerHeap) Push(x interface{}) {
+ t := x.(*simTimer)
+ t.index = len(*h)
+ *h = append(*h, t)
+}
+
+func (h *simTimerHeap) Pop() interface{} {
+ end := len(*h) - 1
+ t := (*h)[end]
+ t.index = -1
+ (*h)[end] = nil
+ *h = (*h)[:end]
+ return t
}
diff --git a/common/mclock/simclock_test.go b/common/mclock/simclock_test.go
index 09e4391c1c..48f3fd56a0 100644
--- a/common/mclock/simclock_test.go
+++ b/common/mclock/simclock_test.go
@@ -25,14 +25,16 @@ var _ Clock = System{}
var _ Clock = new(Simulated)
func TestSimulatedAfter(t *testing.T) {
- const timeout = 30 * time.Minute
- const adv = time.Minute
-
var (
- c Simulated
- end = c.Now().Add(timeout)
- ch = c.After(timeout)
+ timeout = 30 * time.Minute
+ offset = 99 * time.Hour
+ adv = 11 * time.Minute
+ c Simulated
)
+ c.Run(offset)
+
+ end := c.Now().Add(timeout)
+ ch := c.After(timeout)
for c.Now() < end.Add(-adv) {
c.Run(adv)
select {
@@ -45,8 +47,8 @@ func TestSimulatedAfter(t *testing.T) {
c.Run(adv)
select {
case stamp := <-ch:
- want := time.Time{}.Add(timeout)
- if !stamp.Equal(want) {
+ want := AbsTime(0).Add(offset).Add(timeout)
+ if stamp != want {
t.Errorf("Wrong time sent on timer channel: got %v, want %v", stamp, want)
}
default:
@@ -94,7 +96,7 @@ func TestSimulatedSleep(t *testing.T) {
var (
c Simulated
timeout = 1 * time.Hour
- done = make(chan AbsTime)
+ done = make(chan AbsTime, 1)
)
go func() {
c.Sleep(timeout)
@@ -113,3 +115,48 @@ func TestSimulatedSleep(t *testing.T) {
t.Fatal("Sleep didn't return in time")
}
}
+
+func TestSimulatedTimerReset(t *testing.T) {
+ var (
+ c Simulated
+ timeout = 1 * time.Hour
+ )
+ timer := c.NewTimer(timeout)
+ c.Run(2 * timeout)
+ select {
+ case ftime := <-timer.C():
+ if ftime != AbsTime(timeout) {
+ t.Fatalf("wrong time %v sent on timer channel, want %v", ftime, AbsTime(timeout))
+ }
+ default:
+ t.Fatal("timer didn't fire")
+ }
+
+ timer.Reset(timeout)
+ c.Run(2 * timeout)
+ select {
+ case ftime := <-timer.C():
+ if ftime != AbsTime(3*timeout) {
+ t.Fatalf("wrong time %v sent on timer channel, want %v", ftime, AbsTime(3*timeout))
+ }
+ default:
+ t.Fatal("timer didn't fire again")
+ }
+}
+
+func TestSimulatedTimerStop(t *testing.T) {
+ var (
+ c Simulated
+ timeout = 1 * time.Hour
+ )
+ timer := c.NewTimer(timeout)
+ c.Run(2 * timeout)
+ if timer.Stop() {
+ t.Errorf("Stop returned true for fired timer")
+ }
+ select {
+ case <-timer.C():
+ default:
+ t.Fatal("timer didn't fire")
+ }
+}
diff --git a/consensus/ethash/algorithm_test.go b/consensus/ethash/algorithm_test.go
index cf8552f3ab..f4f65047be 100644
--- a/consensus/ethash/algorithm_test.go
+++ b/consensus/ethash/algorithm_test.go
@@ -729,7 +729,7 @@ func TestConcurrentDiskCacheGeneration(t *testing.T) {
go func(idx int) {
defer pend.Done()
- ethash := New(Config{cachedir, 0, 1, "", 0, 0, ModeNormal}, nil, false)
+ ethash := New(Config{cachedir, 0, 1, "", 0, 0, ModeNormal, nil}, nil, false)
defer ethash.Close()
if err := ethash.VerifySeal(nil, block.Header()); err != nil {
t.Errorf("proc %d: block verification failed: %v", idx, err)
diff --git a/consensus/ethash/api.go b/consensus/ethash/api.go
index 4d8eed4161..68b3a84b09 100644
--- a/consensus/ethash/api.go
+++ b/consensus/ethash/api.go
@@ -28,7 +28,7 @@ var errEthashStopped = errors.New("ethash stopped")
// API exposes ethash related methods for the RPC interface.
type API struct {
- ethash *Ethash // Make sure the mode of ethash is normal.
+ ethash *Ethash
}
// GetWork returns a work package for external miner.
@@ -39,7 +39,7 @@ type API struct {
// result[2] - 32 bytes hex encoded boundary condition ("target"), 2^256/difficulty
// result[3] - hex encoded block number
func (api *API) GetWork() ([4]string, error) {
- if api.ethash.config.PowMode != ModeNormal && api.ethash.config.PowMode != ModeTest {
+ if api.ethash.remote == nil {
return [4]string{}, errors.New("not supported")
}
@@ -47,13 +47,11 @@ func (api *API) GetWork() ([4]string, error) {
workCh = make(chan [4]string, 1)
errc = make(chan error, 1)
)
-
select {
- case api.ethash.fetchWorkCh <- &sealWork{errc: errc, res: workCh}:
- case <-api.ethash.exitCh:
+ case api.ethash.remote.fetchWorkCh <- &sealWork{errc: errc, res: workCh}:
+ case <-api.ethash.remote.exitCh:
return [4]string{}, errEthashStopped
}
-
select {
case work := <-workCh:
return work, nil
@@ -66,23 +64,21 @@ func (api *API) GetWork() ([4]string, error) {
// It returns an indication if the work was accepted.
// Note either an invalid solution, a stale work a non-existent work will return false.
func (api *API) SubmitWork(nonce types.BlockNonce, hash, digest common.Hash) bool {
- if api.ethash.config.PowMode != ModeNormal && api.ethash.config.PowMode != ModeTest {
+ if api.ethash.remote == nil {
return false
}
var errc = make(chan error, 1)
-
select {
- case api.ethash.submitWorkCh <- &mineResult{
+ case api.ethash.remote.submitWorkCh <- &mineResult{
nonce: nonce,
mixDigest: digest,
hash: hash,
errc: errc,
}:
- case <-api.ethash.exitCh:
+ case <-api.ethash.remote.exitCh:
return false
}
-
err := <-errc
return err == nil
}
@@ -94,21 +90,19 @@ func (api *API) SubmitWork(nonce types.BlockNonce, hash, digest common.Hash) boo
// It accepts the miner hash rate and an identifier which must be unique
// between nodes.
func (api *API) SubmitHashRate(rate hexutil.Uint64, id common.Hash) bool {
- if api.ethash.config.PowMode != ModeNormal && api.ethash.config.PowMode != ModeTest {
+ if api.ethash.remote == nil {
return false
}
var done = make(chan struct{}, 1)
-
select {
- case api.ethash.submitRateCh <- &hashrate{done: done, rate: uint64(rate), id: id}:
- case <-api.ethash.exitCh:
+ case api.ethash.remote.submitRateCh <- &hashrate{done: done, rate: uint64(rate), id: id}:
+ case <-api.ethash.remote.exitCh:
return false
}
// Block until hash rate submitted successfully.
<-done
-
return true
}
diff --git a/consensus/ethash/consensus.go b/consensus/ethash/consensus.go
index ad68f3972b..b6b1914b1e 100644
--- a/consensus/ethash/consensus.go
+++ b/consensus/ethash/consensus.go
@@ -44,6 +44,11 @@ var (
maxUncles = 2 // Maximum number of uncles allowed in a single block
allowedFutureBlockTime = 15 * time.Second // Max time from current time allowed for blocks, before they're considered future blocks
+ // calcDifficultyEip2384 is the difficulty adjustment algorithm as specified by EIP 2384.
+ // It offsets the bomb 4M blocks from Constantinople, so in total 9M blocks.
+ // Specification EIP-2384: https://eips.ethereum.org/EIPS/eip-2384
+ calcDifficultyEip2384 = makeDifficultyCalculator(big.NewInt(9000000))
+
// calcDifficultyConstantinople is the difficulty adjustment algorithm for Constantinople.
// It returns the difficulty that a new block should have when created at time given the
// parent block's time and difficulty. The calculation uses the Byzantium rules, but with
@@ -63,7 +68,7 @@ var (
// codebase, inherently breaking if the engine is swapped out. Please put common
// error types into the consensus package.
var (
- errZeroBlockTime = errors.New("timestamp equals parent's")
+ errOlderBlockTime = errors.New("timestamp older than parent")
errTooManyUncles = errors.New("too many uncles")
errDuplicateUncle = errors.New("duplicate uncle")
errUncleIsAncestor = errors.New("uncle is ancestor")
@@ -255,9 +260,9 @@ func (ethash *Ethash) verifyHeader(chain consensus.ChainReader, header, parent *
}
}
if header.Time <= parent.Time {
- return errZeroBlockTime
+ return errOlderBlockTime
}
- // Verify the block's difficulty based in its timestamp and parent's difficulty
+ // Verify the block's difficulty based on its timestamp and parent's difficulty
expected := ethash.CalcDifficulty(chain, header.Time, parent)
if expected.Cmp(header.Difficulty) != 0 {
@@ -316,6 +321,8 @@ func (ethash *Ethash) CalcDifficulty(chain consensus.ChainReader, time uint64, p
func CalcDifficulty(config *params.ChainConfig, time uint64, parent *types.Header) *big.Int {
next := new(big.Int).Add(parent.Number, big1)
switch {
+ case config.IsMuirGlacier(next):
+ return calcDifficultyEip2384(time, parent)
case config.IsConstantinople(next):
return calcDifficultyConstantinople(time, parent)
case config.IsByzantium(next):
diff --git a/consensus/ethash/ethash.go b/consensus/ethash/ethash.go
index 97eaef7063..2e1523b89a 100644
--- a/consensus/ethash/ethash.go
+++ b/consensus/ethash/ethash.go
@@ -34,9 +34,7 @@ import (
"unsafe"
mmap "github.com/edsrzf/mmap-go"
- "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/consensus"
- "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/metrics"
"github.com/ethereum/go-ethereum/rpc"
@@ -50,7 +48,7 @@ var (
two256 = new(big.Int).Exp(big.NewInt(2), big.NewInt(256), big.NewInt(0))
// sharedEthash is a full instance that can be shared between multiple users.
- sharedEthash = New(Config{"", 3, 0, "", 1, 0, ModeNormal}, nil, false)
+ sharedEthash = New(Config{"", 3, 0, "", 1, 0, ModeNormal, nil}, nil, false)
// algorithmRevision is the data structure version used for file naming.
algorithmRevision = 23
@@ -403,36 +401,8 @@ type Config struct {
DatasetsInMem int
DatasetsOnDisk int
PowMode Mode
-}
-
-// sealTask wraps a seal block with relative result channel for remote sealer thread.
-type sealTask struct {
- block *types.Block
- results chan<- *types.Block
-}
-
-// mineResult wraps the pow solution parameters for the specified block.
-type mineResult struct {
- nonce types.BlockNonce
- mixDigest common.Hash
- hash common.Hash
-
- errc chan error
-}
-
-// hashrate wraps the hash rate submitted by the remote sealer.
-type hashrate struct {
- id common.Hash
- ping time.Time
- rate uint64
- done chan struct{}
-}
-
-// sealWork wraps a seal work package for remote sealer.
-type sealWork struct {
- errc chan error
- res chan [4]string
+ Log log.Logger `toml:"-"`
}
// Ethash is a consensus engine based on proof-of-work implementing the ethash
@@ -448,52 +418,42 @@ type Ethash struct {
threads int // Number of threads to mine on if mining
update chan struct{} // Notification channel to update mining parameters
hashrate metrics.Meter // Meter tracking the average hashrate
-
- // Remote sealer related fields
- workCh chan *sealTask // Notification channel to push new work and relative result channel to remote sealer
- fetchWorkCh chan *sealWork // Channel used for remote sealer to fetch mining work
- submitWorkCh chan *mineResult // Channel used for remote sealer to submit their mining result
- fetchRateCh chan chan uint64 // Channel used to gather submitted hash rate for local or remote sealer.
- submitRateCh chan *hashrate // Channel used for remote sealer to submit their mining hashrate
+ remote *remoteSealer
// The fields below are hooks for testing
shared *Ethash // Shared PoW verifier to avoid cache regeneration
fakeFail uint64 // Block number which fails PoW check even in fake mode
fakeDelay time.Duration // Time delay to sleep for before returning from verify
- lock sync.Mutex // Ensures thread safety for the in-memory caches and mining fields
- closeOnce sync.Once // Ensures exit channel will not be closed twice.
- exitCh chan chan error // Notification channel to exiting backend threads
+ lock sync.Mutex // Ensures thread safety for the in-memory caches and mining fields
+ closeOnce sync.Once // Ensures exit channel will not be closed twice.
}
// New creates a full sized ethash PoW scheme and starts a background thread for
// remote mining, also optionally notifying a batch of remote services of new work
// packages.
func New(config Config, notify []string, noverify bool) *Ethash {
+ if config.Log == nil {
+ config.Log = log.Root()
+ }
if config.CachesInMem <= 0 {
- log.Warn("One ethash cache must always be in memory", "requested", config.CachesInMem)
+ config.Log.Warn("One ethash cache must always be in memory", "requested", config.CachesInMem)
config.CachesInMem = 1
}
if config.CacheDir != "" && config.CachesOnDisk > 0 {
- log.Info("Disk storage enabled for ethash caches", "dir", config.CacheDir, "count", config.CachesOnDisk)
+ config.Log.Info("Disk storage enabled for ethash caches", "dir", config.CacheDir, "count", config.CachesOnDisk)
}
if config.DatasetDir != "" && config.DatasetsOnDisk > 0 {
- log.Info("Disk storage enabled for ethash DAGs", "dir", config.DatasetDir, "count", config.DatasetsOnDisk)
+ config.Log.Info("Disk storage enabled for ethash DAGs", "dir", config.DatasetDir, "count", config.DatasetsOnDisk)
}
ethash := &Ethash{
- config: config,
- caches: newlru("cache", config.CachesInMem, newCache),
- datasets: newlru("dataset", config.DatasetsInMem, newDataset),
- update: make(chan struct{}),
- hashrate: metrics.NewMeterForced(),
- workCh: make(chan *sealTask),
- fetchWorkCh: make(chan *sealWork),
- submitWorkCh: make(chan *mineResult),
- fetchRateCh: make(chan chan uint64),
- submitRateCh: make(chan *hashrate),
- exitCh: make(chan chan error),
- }
- go ethash.remote(notify, noverify)
+ config: config,
+ caches: newlru("cache", config.CachesInMem, newCache),
+ datasets: newlru("dataset", config.DatasetsInMem, newDataset),
+ update: make(chan struct{}),
+ hashrate: metrics.NewMeterForced(),
+ }
+ ethash.remote = startRemoteSealer(ethash, notify, noverify)
return ethash
}
@@ -501,19 +461,13 @@ func New(config Config, notify []string, noverify bool) *Ethash {
// purposes.
func NewTester(notify []string, noverify bool) *Ethash {
ethash := &Ethash{
- config: Config{PowMode: ModeTest},
- caches: newlru("cache", 1, newCache),
- datasets: newlru("dataset", 1, newDataset),
- update: make(chan struct{}),
- hashrate: metrics.NewMeterForced(),
- workCh: make(chan *sealTask),
- fetchWorkCh: make(chan *sealWork),
- submitWorkCh: make(chan *mineResult),
- fetchRateCh: make(chan chan uint64),
- submitRateCh: make(chan *hashrate),
- exitCh: make(chan chan error),
- }
- go ethash.remote(notify, noverify)
+ config: Config{PowMode: ModeTest, Log: log.Root()},
+ caches: newlru("cache", 1, newCache),
+ datasets: newlru("dataset", 1, newDataset),
+ update: make(chan struct{}),
+ hashrate: metrics.NewMeterForced(),
+ }
+ ethash.remote = startRemoteSealer(ethash, notify, noverify)
return ethash
}
@@ -524,6 +478,7 @@ func NewFaker() *Ethash {
return &Ethash{
config: Config{
PowMode: ModeFake,
+ Log: log.Root(),
},
}
}
@@ -535,6 +490,7 @@ func NewFakeFailer(fail uint64) *Ethash {
return &Ethash{
config: Config{
PowMode: ModeFake,
+ Log: log.Root(),
},
fakeFail: fail,
}
@@ -547,6 +503,7 @@ func NewFakeDelayer(delay time.Duration) *Ethash {
return &Ethash{
config: Config{
PowMode: ModeFake,
+ Log: log.Root(),
},
fakeDelay: delay,
}
@@ -558,6 +515,7 @@ func NewFullFaker() *Ethash {
return &Ethash{
config: Config{
PowMode: ModeFullFake,
+ Log: log.Root(),
},
}
}
@@ -573,13 +531,11 @@ func (ethash *Ethash) Close() error {
var err error
ethash.closeOnce.Do(func() {
// Short circuit if the exit channel is not allocated.
- if ethash.exitCh == nil {
+ if ethash.remote == nil {
return
}
- errc := make(chan error)
- ethash.exitCh <- errc
- err = <-errc
- close(ethash.exitCh)
+ close(ethash.remote.requestExit)
+ <-ethash.remote.exitCh
})
return err
}
@@ -683,8 +639,8 @@ func (ethash *Ethash) Hashrate() float64 {
var res = make(chan uint64, 1)
select {
- case ethash.fetchRateCh <- res:
- case <-ethash.exitCh:
+ case ethash.remote.fetchRateCh <- res:
+ case <-ethash.remote.exitCh:
// Return local hashrate only if ethash is stopped.
return ethash.hashrate.Rate1()
}
diff --git a/consensus/ethash/sealer.go b/consensus/ethash/sealer.go
index 43db1fcb7f..52c4ed46dc 100644
--- a/consensus/ethash/sealer.go
+++ b/consensus/ethash/sealer.go
@@ -18,6 +18,7 @@ package ethash
import (
"bytes"
+ "context"
crand "crypto/rand"
"encoding/json"
"errors"
@@ -33,7 +34,6 @@ import (
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/consensus"
"github.com/ethereum/go-ethereum/core/types"
- "github.com/ethereum/go-ethereum/log"
)
const (
@@ -56,7 +56,7 @@ func (ethash *Ethash) Seal(chain consensus.ChainReader, block *types.Block, resu
select {
case results <- block.WithSeal(header):
default:
- log.Warn("Sealing result is not read by miner", "mode", "fake", "sealhash", ethash.SealHash(block.Header()))
+ ethash.config.Log.Warn("Sealing result is not read by miner", "mode", "fake", "sealhash", ethash.SealHash(block.Header()))
}
return nil
}
@@ -85,8 +85,8 @@ func (ethash *Ethash) Seal(chain consensus.ChainReader, block *types.Block, resu
threads = 0 // Allows disabling local mining without extra logic around local/remote
}
// Push new work to remote sealer
- if ethash.workCh != nil {
- ethash.workCh <- &sealTask{block: block, results: results}
+ if ethash.remote != nil {
+ ethash.remote.workCh <- &sealTask{block: block, results: results}
}
var (
pend sync.WaitGroup
@@ -111,14 +111,14 @@ func (ethash *Ethash) Seal(chain consensus.ChainReader, block *types.Block, resu
select {
case results <- result:
default:
- log.Warn("Sealing result is not read by miner", "mode", "local", "sealhash", ethash.SealHash(block.Header()))
+ ethash.config.Log.Warn("Sealing result is not read by miner", "mode", "local", "sealhash", ethash.SealHash(block.Header()))
}
close(abort)
case <-ethash.update:
// Thread count was changed on user request, restart
close(abort)
if err := ethash.Seal(chain, block, results, stop); err != nil {
- log.Error("Failed to restart sealing after update", "err", err)
+ ethash.config.Log.Error("Failed to restart sealing after update", "err", err)
}
}
// Wait for all miners to terminate and return the block
@@ -143,7 +143,7 @@ func (ethash *Ethash) mine(block *types.Block, id int, seed uint64, abort chan s
attempts = int64(0)
nonce = seed
)
- logger := log.New("miner", id)
+ logger := ethash.config.Log.New("miner", id)
logger.Trace("Started ethash search for new nonces", "seed", seed)
search:
for {
@@ -186,160 +186,128 @@ search:
runtime.KeepAlive(dataset)
}
-// remote is a standalone goroutine to handle remote mining related stuff.
-func (ethash *Ethash) remote(notify []string, noverify bool) {
- var (
- works = make(map[common.Hash]*types.Block)
- rates = make(map[common.Hash]hashrate)
+// This is the timeout for HTTP requests to notify external miners.
+const remoteSealerTimeout = 1 * time.Second
- results chan<- *types.Block
- currentBlock *types.Block
- currentWork [4]string
+type remoteSealer struct {
+ works map[common.Hash]*types.Block
+ rates map[common.Hash]hashrate
+ currentBlock *types.Block
+ currentWork [4]string
+ notifyCtx context.Context
+ cancelNotify context.CancelFunc // cancels all notification requests
+ reqWG sync.WaitGroup // tracks notification request goroutines
- notifyTransport = &http.Transport{}
- notifyClient = &http.Client{
- Transport: notifyTransport,
- Timeout: time.Second,
- }
- notifyReqs = make([]*http.Request, len(notify))
- )
- // notifyWork notifies all the specified mining endpoints of the availability of
- // new work to be processed.
- notifyWork := func() {
- work := currentWork
- blob, _ := json.Marshal(work)
-
- for i, url := range notify {
- // Terminate any previously pending request and create the new work
- if notifyReqs[i] != nil {
- notifyTransport.CancelRequest(notifyReqs[i])
- }
- notifyReqs[i], _ = http.NewRequest("POST", url, bytes.NewReader(blob))
- notifyReqs[i].Header.Set("Content-Type", "application/json")
-
- // Push the new work concurrently to all the remote nodes
- go func(req *http.Request, url string) {
- res, err := notifyClient.Do(req)
- if err != nil {
- log.Warn("Failed to notify remote miner", "err", err)
- } else {
- log.Trace("Notified remote miner", "miner", url, "hash", log.Lazy{Fn: func() common.Hash { return common.HexToHash(work[0]) }}, "target", work[2])
- res.Body.Close()
- }
- }(notifyReqs[i], url)
- }
- }
- // makeWork creates a work package for external miner.
- //
- // The work package consists of 3 strings:
- // result[0], 32 bytes hex encoded current block header pow-hash
- // result[1], 32 bytes hex encoded seed hash used for DAG
- // result[2], 32 bytes hex encoded boundary condition ("target"), 2^256/difficulty
- // result[3], hex encoded block number
- makeWork := func(block *types.Block) {
- hash := ethash.SealHash(block.Header())
-
- currentWork[0] = hash.Hex()
- currentWork[1] = common.BytesToHash(SeedHash(block.NumberU64())).Hex()
- currentWork[2] = common.BytesToHash(new(big.Int).Div(two256, block.Difficulty()).Bytes()).Hex()
- currentWork[3] = hexutil.EncodeBig(block.Number())
-
- // Trace the seal work fetched by remote sealer.
- currentBlock = block
- works[hash] = block
- }
- // submitWork verifies the submitted pow solution, returning
- // whether the solution was accepted or not (not can be both a bad pow as well as
- // any other error, like no pending work or stale mining result).
- submitWork := func(nonce types.BlockNonce, mixDigest common.Hash, sealhash common.Hash) bool {
- if currentBlock == nil {
- log.Error("Pending work without block", "sealhash", sealhash)
- return false
- }
- // Make sure the work submitted is present
- block := works[sealhash]
- if block == nil {
- log.Warn("Work submitted but none pending", "sealhash", sealhash, "curnumber", currentBlock.NumberU64())
- return false
- }
- // Verify the correctness of submitted result.
- header := block.Header()
- header.Nonce = nonce
- header.MixDigest = mixDigest
-
- start := time.Now()
- if !noverify {
- if err := ethash.verifySeal(nil, header, true); err != nil {
- log.Warn("Invalid proof-of-work submitted", "sealhash", sealhash, "elapsed", common.PrettyDuration(time.Since(start)), "err", err)
- return false
- }
- }
- // Make sure the result channel is assigned.
- if results == nil {
- log.Warn("Ethash result channel is empty, submitted mining result is rejected")
- return false
- }
- log.Trace("Verified correct proof-of-work", "sealhash", sealhash, "elapsed", common.PrettyDuration(time.Since(start)))
+ ethash *Ethash
+ noverify bool
+ notifyURLs []string
+ results chan<- *types.Block
+ workCh chan *sealTask // Notification channel to push new work and relative result channel to remote sealer
+ fetchWorkCh chan *sealWork // Channel used for remote sealer to fetch mining work
+ submitWorkCh chan *mineResult // Channel used for remote sealer to submit their mining result
+ fetchRateCh chan chan uint64 // Channel used to gather submitted hash rate for local or remote sealer.
+ submitRateCh chan *hashrate // Channel used for remote sealer to submit their mining hashrate
+ requestExit chan struct{}
+ exitCh chan struct{}
+}
- // Solutions seems to be valid, return to the miner and notify acceptance.
- solution := block.WithSeal(header)
+// sealTask wraps a seal block with relative result channel for remote sealer thread.
+type sealTask struct {
+ block *types.Block
+ results chan<- *types.Block
+}
- // The submitted solution is within the scope of acceptance.
- if solution.NumberU64()+staleThreshold > currentBlock.NumberU64() {
- select {
- case results <- solution:
- log.Debug("Work submitted is acceptable", "number", solution.NumberU64(), "sealhash", sealhash, "hash", solution.Hash())
- return true
- default:
- log.Warn("Sealing result is not read by miner", "mode", "remote", "sealhash", sealhash)
- return false
- }
- }
- // The submitted block is too old to accept, drop it.
- log.Warn("Work submitted is too old", "number", solution.NumberU64(), "sealhash", sealhash, "hash", solution.Hash())
- return false
+// mineResult wraps the pow solution parameters for the specified block.
+type mineResult struct {
+ nonce types.BlockNonce
+ mixDigest common.Hash
+ hash common.Hash
+
+ errc chan error
+}
+
+// hashrate wraps the hash rate submitted by the remote sealer.
+type hashrate struct {
+ id common.Hash
+ ping time.Time
+ rate uint64
+
+ done chan struct{}
+}
+
+// sealWork wraps a seal work package for remote sealer.
+type sealWork struct {
+ errc chan error
+ res chan [4]string
+}
+
+func startRemoteSealer(ethash *Ethash, urls []string, noverify bool) *remoteSealer {
+ ctx, cancel := context.WithCancel(context.Background())
+ s := &remoteSealer{
+ ethash: ethash,
+ noverify: noverify,
+ notifyURLs: urls,
+ notifyCtx: ctx,
+ cancelNotify: cancel,
+ works: make(map[common.Hash]*types.Block),
+ rates: make(map[common.Hash]hashrate),
+ workCh: make(chan *sealTask),
+ fetchWorkCh: make(chan *sealWork),
+ submitWorkCh: make(chan *mineResult),
+ fetchRateCh: make(chan chan uint64),
+ submitRateCh: make(chan *hashrate),
+ requestExit: make(chan struct{}),
+ exitCh: make(chan struct{}),
}
+ go s.loop()
+ return s
+}
+
+func (s *remoteSealer) loop() {
+ defer func() {
+ s.ethash.config.Log.Trace("Ethash remote sealer is exiting")
+ s.cancelNotify()
+ s.reqWG.Wait()
+ close(s.exitCh)
+ }()
ticker := time.NewTicker(5 * time.Second)
defer ticker.Stop()
for {
select {
- case work := <-ethash.workCh:
+ case work := <-s.workCh:
// Update current work with new received block.
// Note same work can be past twice, happens when changing CPU threads.
- results = work.results
+ s.results = work.results
+ s.makeWork(work.block)
+ s.notifyWork()
- makeWork(work.block)
-
- // Notify and requested URLs of the new work availability
- notifyWork()
-
- case work := <-ethash.fetchWorkCh:
+ case work := <-s.fetchWorkCh:
// Return current mining work to remote miner.
- if currentBlock == nil {
+ if s.currentBlock == nil {
work.errc <- errNoMiningWork
} else {
- work.res <- currentWork
+ work.res <- s.currentWork
}
- case result := <-ethash.submitWorkCh:
+ case result := <-s.submitWorkCh:
// Verify submitted PoW solution based on maintained mining blocks.
- if submitWork(result.nonce, result.mixDigest, result.hash) {
+ if s.submitWork(result.nonce, result.mixDigest, result.hash) {
result.errc <- nil
} else {
result.errc <- errInvalidSealResult
}
- case result := <-ethash.submitRateCh:
+ case result := <-s.submitRateCh:
// Trace remote sealer's hash rate by submitted value.
- rates[result.id] = hashrate{rate: result.rate, ping: time.Now()}
+ s.rates[result.id] = hashrate{rate: result.rate, ping: time.Now()}
close(result.done)
- case req := <-ethash.fetchRateCh:
+ case req := <-s.fetchRateCh:
// Gather all hash rate submitted by remote sealer.
var total uint64
- for _, rate := range rates {
+ for _, rate := range s.rates {
// this could overflow
total += rate.rate
}
@@ -347,25 +315,126 @@ func (ethash *Ethash) remote(notify []string, noverify bool) {
case <-ticker.C:
// Clear stale submitted hash rate.
- for id, rate := range rates {
+ for id, rate := range s.rates {
if time.Since(rate.ping) > 10*time.Second {
- delete(rates, id)
+ delete(s.rates, id)
}
}
// Clear stale pending blocks
- if currentBlock != nil {
- for hash, block := range works {
- if block.NumberU64()+staleThreshold <= currentBlock.NumberU64() {
- delete(works, hash)
+ if s.currentBlock != nil {
+ for hash, block := range s.works {
+ if block.NumberU64()+staleThreshold <= s.currentBlock.NumberU64() {
+ delete(s.works, hash)
}
}
}
- case errc := <-ethash.exitCh:
- // Exit remote loop if ethash is closed and return relevant error.
- errc <- nil
- log.Trace("Ethash remote sealer is exiting")
+ case <-s.requestExit:
return
}
}
}
+
+// makeWork creates a work package for external miner.
+//
+// The work package consists of 3 strings:
+// result[0], 32 bytes hex encoded current block header pow-hash
+// result[1], 32 bytes hex encoded seed hash used for DAG
+// result[2], 32 bytes hex encoded boundary condition ("target"), 2^256/difficulty
+// result[3], hex encoded block number
+func (s *remoteSealer) makeWork(block *types.Block) {
+ hash := s.ethash.SealHash(block.Header())
+ s.currentWork[0] = hash.Hex()
+ s.currentWork[1] = common.BytesToHash(SeedHash(block.NumberU64())).Hex()
+ s.currentWork[2] = common.BytesToHash(new(big.Int).Div(two256, block.Difficulty()).Bytes()).Hex()
+ s.currentWork[3] = hexutil.EncodeBig(block.Number())
+
+ // Trace the seal work fetched by remote sealer.
+ s.currentBlock = block
+ s.works[hash] = block
+}
+
+// notifyWork notifies all the specified mining endpoints of the availability of
+// new work to be processed.
+func (s *remoteSealer) notifyWork() {
+ work := s.currentWork
+ blob, _ := json.Marshal(work)
+ s.reqWG.Add(len(s.notifyURLs))
+ for _, url := range s.notifyURLs {
+ go s.sendNotification(s.notifyCtx, url, blob, work)
+ }
+}
+
+func (s *remoteSealer) sendNotification(ctx context.Context, url string, json []byte, work [4]string) {
+ defer s.reqWG.Done()
+
+ req, err := http.NewRequest("POST", url, bytes.NewReader(json))
+ if err != nil {
+ s.ethash.config.Log.Warn("Can't create remote miner notification", "err", err)
+ return
+ }
+ ctx, cancel := context.WithTimeout(ctx, remoteSealerTimeout)
+ defer cancel()
+ req = req.WithContext(ctx)
+ req.Header.Set("Content-Type", "application/json")
+
+ resp, err := http.DefaultClient.Do(req)
+ if err != nil {
+ s.ethash.config.Log.Warn("Failed to notify remote miner", "err", err)
+ } else {
+ s.ethash.config.Log.Trace("Notified remote miner", "miner", url, "hash", work[0], "target", work[2])
+ resp.Body.Close()
+ }
+}
+
+// submitWork verifies the submitted pow solution, returning
+// whether the solution was accepted or not (not can be both a bad pow as well as
+// any other error, like no pending work or stale mining result).
+func (s *remoteSealer) submitWork(nonce types.BlockNonce, mixDigest common.Hash, sealhash common.Hash) bool {
+ if s.currentBlock == nil {
+ s.ethash.config.Log.Error("Pending work without block", "sealhash", sealhash)
+ return false
+ }
+ // Make sure the work submitted is present
+ block := s.works[sealhash]
+ if block == nil {
+ s.ethash.config.Log.Warn("Work submitted but none pending", "sealhash", sealhash, "curnumber", s.currentBlock.NumberU64())
+ return false
+ }
+ // Verify the correctness of submitted result.
+ header := block.Header()
+ header.Nonce = nonce
+ header.MixDigest = mixDigest
+
+ start := time.Now()
+ if !s.noverify {
+ if err := s.ethash.verifySeal(nil, header, true); err != nil {
+ s.ethash.config.Log.Warn("Invalid proof-of-work submitted", "sealhash", sealhash, "elapsed", common.PrettyDuration(time.Since(start)), "err", err)
+ return false
+ }
+ }
+ // Make sure the result channel is assigned.
+ if s.results == nil {
+ s.ethash.config.Log.Warn("Ethash result channel is empty, submitted mining result is rejected")
+ return false
+ }
+ s.ethash.config.Log.Trace("Verified correct proof-of-work", "sealhash", sealhash, "elapsed", common.PrettyDuration(time.Since(start)))
+
+ // Solutions seems to be valid, return to the miner and notify acceptance.
+ solution := block.WithSeal(header)
+
+ // The submitted solution is within the scope of acceptance.
+ if solution.NumberU64()+staleThreshold > s.currentBlock.NumberU64() {
+ select {
+ case s.results <- solution:
+ s.ethash.config.Log.Debug("Work submitted is acceptable", "number", solution.NumberU64(), "sealhash", sealhash, "hash", solution.Hash())
+ return true
+ default:
+ s.ethash.config.Log.Warn("Sealing result is not read by miner", "mode", "remote", "sealhash", sealhash)
+ return false
+ }
+ }
+ // The submitted block is too old to accept, drop it.
+ s.ethash.config.Log.Warn("Work submitted is too old", "number", solution.NumberU64(), "sealhash", sealhash, "hash", solution.Hash())
+ return false
+}
diff --git a/consensus/ethash/sealer_test.go b/consensus/ethash/sealer_test.go
index 82f08d673c..7f83def072 100644
--- a/consensus/ethash/sealer_test.go
+++ b/consensus/ethash/sealer_test.go
@@ -20,59 +20,39 @@ import (
"encoding/json"
"io/ioutil"
"math/big"
- "net"
"net/http"
+ "net/http/httptest"
"testing"
"time"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
+ "github.com/ethereum/go-ethereum/internal/testlog"
+ "github.com/ethereum/go-ethereum/log"
)
// Tests whether remote HTTP servers are correctly notified of new work.
func TestRemoteNotify(t *testing.T) {
- // Start a simple webserver to capture notifications
+ // Start a simple web server to capture notifications.
sink := make(chan [3]string)
-
- server := &http.Server{
- Handler: http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
- blob, err := ioutil.ReadAll(req.Body)
- if err != nil {
- t.Fatalf("failed to read miner notification: %v", err)
- }
- var work [3]string
- if err := json.Unmarshal(blob, &work); err != nil {
- t.Fatalf("failed to unmarshal miner notification: %v", err)
- }
- sink <- work
- }),
- }
- // Open a custom listener to extract its local address
- listener, err := net.Listen("tcp", "localhost:0")
- if err != nil {
- t.Fatalf("failed to open notification server: %v", err)
- }
- defer listener.Close()
-
- go server.Serve(listener)
-
- // Wait for server to start listening
- var tries int
- for tries = 0; tries < 10; tries++ {
- conn, _ := net.DialTimeout("tcp", listener.Addr().String(), 1*time.Second)
- if conn != nil {
- break
+ server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
+ blob, err := ioutil.ReadAll(req.Body)
+ if err != nil {
+ t.Errorf("failed to read miner notification: %v", err)
}
- }
- if tries == 10 {
- t.Fatal("tcp listener not ready for more than 10 seconds")
- }
+ var work [3]string
+ if err := json.Unmarshal(blob, &work); err != nil {
+ t.Errorf("failed to unmarshal miner notification: %v", err)
+ }
+ sink <- work
+ }))
+ defer server.Close()
- // Create the custom ethash engine
- ethash := NewTester([]string{"http://" + listener.Addr().String()}, false)
+ // Create the custom ethash engine.
+ ethash := NewTester([]string{server.URL}, false)
defer ethash.Close()
- // Stream a work task and ensure the notification bubbles out
+ // Stream a work task and ensure the notification bubbles out.
header := &types.Header{Number: big.NewInt(1), Difficulty: big.NewInt(100)}
block := types.NewBlockWithHeader(header)
@@ -97,46 +77,37 @@ func TestRemoteNotify(t *testing.T) {
// Tests that pushing work packages fast to the miner doesn't cause any data race
// issues in the notifications.
func TestRemoteMultiNotify(t *testing.T) {
- // Start a simple webserver to capture notifications
+ // Start a simple web server to capture notifications.
sink := make(chan [3]string, 64)
+ server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
+ blob, err := ioutil.ReadAll(req.Body)
+ if err != nil {
+ t.Errorf("failed to read miner notification: %v", err)
+ }
+ var work [3]string
+ if err := json.Unmarshal(blob, &work); err != nil {
+ t.Errorf("failed to unmarshal miner notification: %v", err)
+ }
+ sink <- work
+ }))
+ defer server.Close()
- server := &http.Server{
- Handler: http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
- blob, err := ioutil.ReadAll(req.Body)
- if err != nil {
- t.Fatalf("failed to read miner notification: %v", err)
- }
- var work [3]string
- if err := json.Unmarshal(blob, &work); err != nil {
- t.Fatalf("failed to unmarshal miner notification: %v", err)
- }
- sink <- work
- }),
- }
- // Open a custom listener to extract its local address
- listener, err := net.Listen("tcp", "localhost:0")
- if err != nil {
- t.Fatalf("failed to open notification server: %v", err)
- }
- defer listener.Close()
-
- go server.Serve(listener)
-
- // Create the custom ethash engine
- ethash := NewTester([]string{"http://" + listener.Addr().String()}, false)
+ // Create the custom ethash engine.
+ ethash := NewTester([]string{server.URL}, false)
+ ethash.config.Log = testlog.Logger(t, log.LvlWarn)
defer ethash.Close()
- // Stream a lot of work task and ensure all the notifications bubble out
+ // Stream a lot of work task and ensure all the notifications bubble out.
for i := 0; i < cap(sink); i++ {
header := &types.Header{Number: big.NewInt(int64(i)), Difficulty: big.NewInt(100)}
block := types.NewBlockWithHeader(header)
-
ethash.Seal(nil, block, nil, nil)
}
+
for i := 0; i < cap(sink); i++ {
select {
case <-sink:
- case <-time.After(3 * time.Second):
+ case <-time.After(10 * time.Second):
t.Fatalf("notification %d timed out", i)
}
}
@@ -206,10 +177,10 @@ func TestStaleSubmission(t *testing.T) {
select {
case res := <-results:
if res.Header().Nonce != fakeNonce {
- t.Errorf("case %d block nonce mismatch, want %s, get %s", id+1, fakeNonce, res.Header().Nonce)
+ t.Errorf("case %d block nonce mismatch, want %x, get %x", id+1, fakeNonce, res.Header().Nonce)
}
if res.Header().MixDigest != fakeDigest {
- t.Errorf("case %d block digest mismatch, want %s, get %s", id+1, fakeDigest, res.Header().MixDigest)
+ t.Errorf("case %d block digest mismatch, want %x, get %x", id+1, fakeDigest, res.Header().MixDigest)
}
if res.Header().Difficulty.Uint64() != c.headers[c.submitIndex].Difficulty.Uint64() {
t.Errorf("case %d block difficulty mismatch, want %d, get %d", id+1, c.headers[c.submitIndex].Difficulty, res.Header().Difficulty)
diff --git a/consensus/protocol.go b/consensus/protocol.go
index 368e358aee..8ccacdd2c7 100644
--- a/consensus/protocol.go
+++ b/consensus/protocol.go
@@ -13,6 +13,7 @@ import (
const (
eth63 = 63
eth64 = 64
+ eth65 = 65
Istanbul64 = 64
Istanbul99 = 99
)
@@ -26,15 +27,15 @@ var (
CliqueProtocol = Protocol{
Name: "eth",
- Versions: []uint{eth64, eth63},
- Lengths: map[uint]uint64{eth64: 17, eth63: 17},
+ Versions: []uint{eth65, eth64, eth63},
+ Lengths: map[uint]uint64{eth65: 17, eth64: 17, eth63: 17},
}
// Default: Keep up-to-date with eth/protocol.go
EthProtocol = Protocol{
Name: "eth",
- Versions: []uint{eth64, eth63},
- Lengths: map[uint]uint64{eth64: 17, eth63: 17},
+ Versions: []uint{eth65, eth64, eth63},
+ Lengths: map[uint]uint64{eth65: 17, eth64: 17, eth63: 17},
}
NorewardsProtocol = Protocol{
diff --git a/console/bridge.go b/console/bridge.go
index c7a67a6850..2625c481d5 100644
--- a/console/bridge.go
+++ b/console/bridge.go
@@ -20,14 +20,16 @@ import (
"encoding/json"
"fmt"
"io"
+ "reflect"
"strings"
"time"
+ "github.com/dop251/goja"
"github.com/ethereum/go-ethereum/accounts/scwallet"
"github.com/ethereum/go-ethereum/accounts/usbwallet"
- "github.com/ethereum/go-ethereum/log"
+ "github.com/ethereum/go-ethereum/common/hexutil"
+ "github.com/ethereum/go-ethereum/internal/jsre"
"github.com/ethereum/go-ethereum/rpc"
- "github.com/robertkrimen/otto"
)
// bridge is a collection of JavaScript utility methods to bride the .js runtime
@@ -47,10 +49,18 @@ func newBridge(client *rpc.Client, prompter UserPrompter, printer io.Writer) *br
}
}
+func getJeth(vm *goja.Runtime) *goja.Object {
+ jeth := vm.Get("jeth")
+ if jeth == nil {
+ panic(vm.ToValue("jeth object does not exist"))
+ }
+ return jeth.ToObject(vm)
+}
+
// NewAccount is a wrapper around the personal.newAccount RPC method that uses a
// non-echoing password prompt to acquire the passphrase and executes the original
// RPC method (saved in jeth.newAccount) with it to actually execute the RPC call.
-func (b *bridge) NewAccount(call otto.FunctionCall) (response otto.Value) {
+func (b *bridge) NewAccount(call jsre.Call) (goja.Value, error) {
var (
password string
confirm string
@@ -58,52 +68,57 @@ func (b *bridge) NewAccount(call otto.FunctionCall) (response otto.Value) {
)
switch {
// No password was specified, prompt the user for it
- case len(call.ArgumentList) == 0:
- if password, err = b.prompter.PromptPassword("Password: "); err != nil {
- throwJSException(err.Error())
+ case len(call.Arguments) == 0:
+ if password, err = b.prompter.PromptPassword("Passphrase: "); err != nil {
+ return nil, err
}
- if confirm, err = b.prompter.PromptPassword("Repeat password: "); err != nil {
- throwJSException(err.Error())
+ if confirm, err = b.prompter.PromptPassword("Repeat passphrase: "); err != nil {
+ return nil, err
}
if password != confirm {
- throwJSException("passwords don't match!")
+ return nil, fmt.Errorf("passwords don't match!")
}
-
// A single string password was specified, use that
- case len(call.ArgumentList) == 1 && call.Argument(0).IsString():
- password, _ = call.Argument(0).ToString()
-
- // Otherwise fail with some error
+ case len(call.Arguments) == 1 && call.Argument(0).ToString() != nil:
+ password = call.Argument(0).ToString().String()
default:
- throwJSException("expected 0 or 1 string argument")
+ return nil, fmt.Errorf("expected 0 or 1 string argument")
}
// Password acquired, execute the call and return
- ret, err := call.Otto.Call("jeth.newAccount", nil, password)
+ newAccount, callable := goja.AssertFunction(getJeth(call.VM).Get("newAccount"))
+ if !callable {
+ return nil, fmt.Errorf("jeth.newAccount is not callable")
+ }
+ ret, err := newAccount(goja.Null(), call.VM.ToValue(password))
if err != nil {
- throwJSException(err.Error())
+ return nil, err
}
- return ret
+ return ret, nil
}
// OpenWallet is a wrapper around personal.openWallet which can interpret and
// react to certain error messages, such as the Trezor PIN matrix request.
-func (b *bridge) OpenWallet(call otto.FunctionCall) (response otto.Value) {
+func (b *bridge) OpenWallet(call jsre.Call) (goja.Value, error) {
// Make sure we have a wallet specified to open
- if !call.Argument(0).IsString() {
- throwJSException("first argument must be the wallet URL to open")
+ if call.Argument(0).ToObject(call.VM).ClassName() != "String" {
+ return nil, fmt.Errorf("first argument must be the wallet URL to open")
}
wallet := call.Argument(0)
- var passwd otto.Value
- if call.Argument(1).IsUndefined() || call.Argument(1).IsNull() {
- passwd, _ = otto.ToValue("")
+ var passwd goja.Value
+ if goja.IsUndefined(call.Argument(1)) || goja.IsNull(call.Argument(1)) {
+ passwd = call.VM.ToValue("")
} else {
passwd = call.Argument(1)
}
// Open the wallet and return if successful in itself
- val, err := call.Otto.Call("jeth.openWallet", nil, wallet, passwd)
+ openWallet, callable := goja.AssertFunction(getJeth(call.VM).Get("openWallet"))
+ if !callable {
+ return nil, fmt.Errorf("jeth.openWallet is not callable")
+ }
+ val, err := openWallet(goja.Null(), wallet, passwd)
if err == nil {
- return val
+ return val, nil
}
// Wallet open failed, report error unless it's a PIN or PUK entry
@@ -111,32 +126,31 @@ func (b *bridge) OpenWallet(call otto.FunctionCall) (response otto.Value) {
case strings.HasSuffix(err.Error(), usbwallet.ErrTrezorPINNeeded.Error()):
val, err = b.readPinAndReopenWallet(call)
if err == nil {
- return val
+ return val, nil
}
val, err = b.readPassphraseAndReopenWallet(call)
if err != nil {
- throwJSException(err.Error())
+ return nil, err
}
case strings.HasSuffix(err.Error(), scwallet.ErrPairingPasswordNeeded.Error()):
// PUK input requested, fetch from the user and call open again
- if input, err := b.prompter.PromptPassword("Please enter the pairing password: "); err != nil {
- throwJSException(err.Error())
- } else {
- passwd, _ = otto.ToValue(input)
+ input, err := b.prompter.PromptPassword("Please enter the pairing password: ")
+ if err != nil {
+ return nil, err
}
- if val, err = call.Otto.Call("jeth.openWallet", nil, wallet, passwd); err != nil {
+ passwd = call.VM.ToValue(input)
+ if val, err = openWallet(goja.Null(), wallet, passwd); err != nil {
if !strings.HasSuffix(err.Error(), scwallet.ErrPINNeeded.Error()) {
- throwJSException(err.Error())
+ return nil, err
} else {
// PIN input requested, fetch from the user and call open again
- if input, err := b.prompter.PromptPassword("Please enter current PIN: "); err != nil {
- throwJSException(err.Error())
- } else {
- passwd, _ = otto.ToValue(input)
+ input, err := b.prompter.PromptPassword("Please enter current PIN: ")
+ if err != nil {
+ return nil, err
}
- if val, err = call.Otto.Call("jeth.openWallet", nil, wallet, passwd); err != nil {
- throwJSException(err.Error())
+ if val, err = openWallet(goja.Null(), wallet, call.VM.ToValue(input)); err != nil {
+ return nil, err
}
}
}
@@ -144,52 +158,52 @@ func (b *bridge) OpenWallet(call otto.FunctionCall) (response otto.Value) {
case strings.HasSuffix(err.Error(), scwallet.ErrPINUnblockNeeded.Error()):
// PIN unblock requested, fetch PUK and new PIN from the user
var pukpin string
- if input, err := b.prompter.PromptPassword("Please enter current PUK: "); err != nil {
- throwJSException(err.Error())
- } else {
- pukpin = input
+ input, err := b.prompter.PromptPassword("Please enter current PUK: ")
+ if err != nil {
+ return nil, err
}
- if input, err := b.prompter.PromptPassword("Please enter new PIN: "); err != nil {
- throwJSException(err.Error())
- } else {
- pukpin += input
+ pukpin = input
+ input, err = b.prompter.PromptPassword("Please enter new PIN: ")
+ if err != nil {
+ return nil, err
}
- passwd, _ = otto.ToValue(pukpin)
- if val, err = call.Otto.Call("jeth.openWallet", nil, wallet, passwd); err != nil {
- throwJSException(err.Error())
+ pukpin += input
+
+ if val, err = openWallet(goja.Null(), wallet, call.VM.ToValue(pukpin)); err != nil {
+ return nil, err
}
case strings.HasSuffix(err.Error(), scwallet.ErrPINNeeded.Error()):
// PIN input requested, fetch from the user and call open again
- if input, err := b.prompter.PromptPassword("Please enter current PIN: "); err != nil {
- throwJSException(err.Error())
- } else {
- passwd, _ = otto.ToValue(input)
+ input, err := b.prompter.PromptPassword("Please enter current PIN: ")
+ if err != nil {
+ return nil, err
}
- if val, err = call.Otto.Call("jeth.openWallet", nil, wallet, passwd); err != nil {
- throwJSException(err.Error())
+ if val, err = openWallet(goja.Null(), wallet, call.VM.ToValue(input)); err != nil {
+ return nil, err
}
default:
// Unknown error occurred, drop to the user
- throwJSException(err.Error())
+ return nil, err
}
- return val
+ return val, nil
}
-func (b *bridge) readPassphraseAndReopenWallet(call otto.FunctionCall) (otto.Value, error) {
- var passwd otto.Value
+func (b *bridge) readPassphraseAndReopenWallet(call jsre.Call) (goja.Value, error) {
wallet := call.Argument(0)
- if input, err := b.prompter.PromptPassword("Please enter your password: "); err != nil {
- throwJSException(err.Error())
- } else {
- passwd, _ = otto.ToValue(input)
+ input, err := b.prompter.PromptPassword("Please enter your passphrase: ")
+ if err != nil {
+ return nil, err
+ }
+ openWallet, callable := goja.AssertFunction(getJeth(call.VM).Get("openWallet"))
+ if !callable {
+ return nil, fmt.Errorf("jeth.openWallet is not callable")
}
- return call.Otto.Call("jeth.openWallet", nil, wallet, passwd)
+ return openWallet(goja.Null(), wallet, call.VM.ToValue(input))
}
-func (b *bridge) readPinAndReopenWallet(call otto.FunctionCall) (otto.Value, error) {
- var passwd otto.Value
+func (b *bridge) readPinAndReopenWallet(call jsre.Call) (goja.Value, error) {
wallet := call.Argument(0)
// Trezor PIN matrix input requested, display the matrix to the user and fetch the data
fmt.Fprintf(b.printer, "Look at the device for number positions\n\n")
@@ -199,155 +213,154 @@ func (b *bridge) readPinAndReopenWallet(call otto.FunctionCall) (otto.Value, err
fmt.Fprintf(b.printer, "--+---+--\n")
fmt.Fprintf(b.printer, "1 | 2 | 3\n\n")
- if input, err := b.prompter.PromptPassword("Please enter current PIN: "); err != nil {
- throwJSException(err.Error())
- } else {
- passwd, _ = otto.ToValue(input)
+ input, err := b.prompter.PromptPassword("Please enter current PIN: ")
+ if err != nil {
+ return nil, err
+ }
+ openWallet, callable := goja.AssertFunction(getJeth(call.VM).Get("openWallet"))
+ if !callable {
+ return nil, fmt.Errorf("jeth.openWallet is not callable")
}
- return call.Otto.Call("jeth.openWallet", nil, wallet, passwd)
+ return openWallet(goja.Null(), wallet, call.VM.ToValue(input))
}
// UnlockAccount is a wrapper around the personal.unlockAccount RPC method that
// uses a non-echoing password prompt to acquire the passphrase and executes the
// original RPC method (saved in jeth.unlockAccount) with it to actually execute
// the RPC call.
-func (b *bridge) UnlockAccount(call otto.FunctionCall) (response otto.Value) {
- // Make sure we have an account specified to unlock
- if !call.Argument(0).IsString() {
- throwJSException("first argument must be the account to unlock")
+func (b *bridge) UnlockAccount(call jsre.Call) (goja.Value, error) {
+ // Make sure we have an account specified to unlock.
+ if call.Argument(0).ExportType().Kind() != reflect.String {
+ return nil, fmt.Errorf("first argument must be the account to unlock")
}
account := call.Argument(0)
- // If password is not given or is the null value, prompt the user for it
- var passwd otto.Value
-
- if call.Argument(1).IsUndefined() || call.Argument(1).IsNull() {
+ // If password is not given or is the null value, prompt the user for it.
+ var passwd goja.Value
+ if goja.IsUndefined(call.Argument(1)) || goja.IsNull(call.Argument(1)) {
fmt.Fprintf(b.printer, "Unlock account %s\n", account)
- if input, err := b.prompter.PromptPassword("Password: "); err != nil {
- throwJSException(err.Error())
- } else {
- passwd, _ = otto.ToValue(input)
+ input, err := b.prompter.PromptPassword("Passphrase: ")
+ if err != nil {
+ return nil, err
}
+ passwd = call.VM.ToValue(input)
} else {
- if !call.Argument(1).IsString() {
- throwJSException("password must be a string")
+ if call.Argument(1).ExportType().Kind() != reflect.String {
+ return nil, fmt.Errorf("password must be a string")
}
passwd = call.Argument(1)
}
- // Third argument is the duration how long the account must be unlocked.
- duration := otto.NullValue()
- if call.Argument(2).IsDefined() && !call.Argument(2).IsNull() {
- if !call.Argument(2).IsNumber() {
- throwJSException("unlock duration must be a number")
+
+ // Third argument is the duration how long the account should be unlocked.
+ duration := goja.Null()
+ if !goja.IsUndefined(call.Argument(2)) && !goja.IsNull(call.Argument(2)) {
+ if !isNumber(call.Argument(2)) {
+ return nil, fmt.Errorf("unlock duration must be a number")
}
duration = call.Argument(2)
}
- // Send the request to the backend and return
- val, err := call.Otto.Call("jeth.unlockAccount", nil, account, passwd, duration)
- if err != nil {
- throwJSException(err.Error())
+
+ // Send the request to the backend and return.
+ unlockAccount, callable := goja.AssertFunction(getJeth(call.VM).Get("unlockAccount"))
+ if !callable {
+ return nil, fmt.Errorf("jeth.unlockAccount is not callable")
}
- return val
+ return unlockAccount(goja.Null(), account, passwd, duration)
}
// Sign is a wrapper around the personal.sign RPC method that uses a non-echoing password
// prompt to acquire the passphrase and executes the original RPC method (saved in
// jeth.sign) with it to actually execute the RPC call.
-func (b *bridge) Sign(call otto.FunctionCall) (response otto.Value) {
+func (b *bridge) Sign(call jsre.Call) (goja.Value, error) {
var (
message = call.Argument(0)
account = call.Argument(1)
passwd = call.Argument(2)
)
- if !message.IsString() {
- throwJSException("first argument must be the message to sign")
+ if message.ExportType().Kind() != reflect.String {
+ return nil, fmt.Errorf("first argument must be the message to sign")
}
- if !account.IsString() {
- throwJSException("second argument must be the account to sign with")
+ if account.ExportType().Kind() != reflect.String {
+ return nil, fmt.Errorf("second argument must be the account to sign with")
}
// if the password is not given or null ask the user and ensure password is a string
- if passwd.IsUndefined() || passwd.IsNull() {
+ if goja.IsUndefined(passwd) || goja.IsNull(passwd) {
fmt.Fprintf(b.printer, "Give password for account %s\n", account)
- if input, err := b.prompter.PromptPassword("Password: "); err != nil {
- throwJSException(err.Error())
- } else {
- passwd, _ = otto.ToValue(input)
+ input, err := b.prompter.PromptPassword("Password: ")
+ if err != nil {
+ return nil, err
}
- }
- if !passwd.IsString() {
- throwJSException("third argument must be the password to unlock the account")
+ passwd = call.VM.ToValue(input)
+ } else if passwd.ExportType().Kind() != reflect.String {
+ return nil, fmt.Errorf("third argument must be the password to unlock the account")
}
// Send the request to the backend and return
- val, err := call.Otto.Call("jeth.sign", nil, message, account, passwd)
- if err != nil {
- throwJSException(err.Error())
+ sign, callable := goja.AssertFunction(getJeth(call.VM).Get("unlockAccount"))
+ if !callable {
+ return nil, fmt.Errorf("jeth.unlockAccount is not callable")
}
- return val
+ return sign(goja.Null(), message, account, passwd)
}
// Sleep will block the console for the specified number of seconds.
-func (b *bridge) Sleep(call otto.FunctionCall) (response otto.Value) {
- if call.Argument(0).IsNumber() {
- sleep, _ := call.Argument(0).ToInteger()
- time.Sleep(time.Duration(sleep) * time.Second)
- return otto.TrueValue()
+func (b *bridge) Sleep(call jsre.Call) (goja.Value, error) {
+ if !isNumber(call.Argument(0)) {
+ return nil, fmt.Errorf("usage: sleep()")
}
- return throwJSException("usage: sleep()")
+ sleep := call.Argument(0).ToFloat()
+ time.Sleep(time.Duration(sleep * float64(time.Second)))
+ return call.VM.ToValue(true), nil
}
// SleepBlocks will block the console for a specified number of new blocks optionally
// until the given timeout is reached.
-func (b *bridge) SleepBlocks(call otto.FunctionCall) (response otto.Value) {
+func (b *bridge) SleepBlocks(call jsre.Call) (goja.Value, error) {
+ // Parse the input parameters for the sleep.
var (
blocks = int64(0)
sleep = int64(9999999999999999) // indefinitely
)
- // Parse the input parameters for the sleep
- nArgs := len(call.ArgumentList)
+ nArgs := len(call.Arguments)
if nArgs == 0 {
- throwJSException("usage: sleepBlocks([, max sleep in seconds])")
+ return nil, fmt.Errorf("usage: sleepBlocks([, max sleep in seconds])")
}
if nArgs >= 1 {
- if call.Argument(0).IsNumber() {
- blocks, _ = call.Argument(0).ToInteger()
- } else {
- throwJSException("expected number as first argument")
+ if !isNumber(call.Argument(0)) {
+ return nil, fmt.Errorf("expected number as first argument")
}
+ blocks = call.Argument(0).ToInteger()
}
if nArgs >= 2 {
- if call.Argument(1).IsNumber() {
- sleep, _ = call.Argument(1).ToInteger()
- } else {
- throwJSException("expected number as second argument")
+ if isNumber(call.Argument(1)) {
+ return nil, fmt.Errorf("expected number as second argument")
}
+ sleep = call.Argument(1).ToInteger()
}
- // go through the console, this will allow web3 to call the appropriate
- // callbacks if a delayed response or notification is received.
- blockNumber := func() int64 {
- result, err := call.Otto.Run("eth.blockNumber")
+
+ // Poll the current block number until either it or a timeout is reached.
+ var (
+ deadline = time.Now().Add(time.Duration(sleep) * time.Second)
+ lastNumber = ^hexutil.Uint64(0)
+ )
+ for time.Now().Before(deadline) {
+ var number hexutil.Uint64
+ err := b.client.Call(&number, "eth_blockNumber")
if err != nil {
- throwJSException(err.Error())
+ return nil, err
}
- block, err := result.ToInteger()
- if err != nil {
- throwJSException(err.Error())
+ if number != lastNumber {
+ lastNumber = number
+ blocks--
}
- return block
- }
- // Poll the current block number until either it ot a timeout is reached
- targetBlockNr := blockNumber() + blocks
- deadline := time.Now().Add(time.Duration(sleep) * time.Second)
-
- for time.Now().Before(deadline) {
- if blockNumber() >= targetBlockNr {
- return otto.TrueValue()
+ if blocks <= 0 {
+ break
}
time.Sleep(time.Second)
}
- return otto.FalseValue()
+ return call.VM.ToValue(true), nil
}
type jsonrpcCall struct {
@@ -357,15 +370,15 @@ type jsonrpcCall struct {
}
// Send implements the web3 provider "send" method.
-func (b *bridge) Send(call otto.FunctionCall) (response otto.Value) {
+func (b *bridge) Send(call jsre.Call) (goja.Value, error) {
// Remarshal the request into a Go value.
- JSON, _ := call.Otto.Object("JSON")
- reqVal, err := JSON.Call("stringify", call.Argument(0))
+ reqVal, err := call.Argument(0).ToObject(call.VM).MarshalJSON()
if err != nil {
- throwJSException(err.Error())
+ return nil, err
}
+
var (
- rawReq = reqVal.String()
+ rawReq = string(reqVal)
dec = json.NewDecoder(strings.NewReader(rawReq))
reqs []jsonrpcCall
batch bool
@@ -381,10 +394,12 @@ func (b *bridge) Send(call otto.FunctionCall) (response otto.Value) {
}
// Execute the requests.
- resps, _ := call.Otto.Object("new Array()")
+ var resps []*goja.Object
for _, req := range reqs {
- resp, _ := call.Otto.Object(`({"jsonrpc":"2.0"})`)
+ resp := call.VM.NewObject()
+ resp.Set("jsonrpc", "2.0")
resp.Set("id", req.ID)
+
var result json.RawMessage
err = b.client.Call(&result, req.Method, req.Params...)
switch err := err.(type) {
@@ -392,9 +407,14 @@ func (b *bridge) Send(call otto.FunctionCall) (response otto.Value) {
if result == nil {
// Special case null because it is decoded as an empty
// raw message for some reason.
- resp.Set("result", otto.NullValue())
+ resp.Set("result", goja.Null())
} else {
- resultVal, err := JSON.Call("parse", string(result))
+ JSON := call.VM.Get("JSON").ToObject(call.VM)
+ parse, callable := goja.AssertFunction(JSON.Get("parse"))
+ if !callable {
+ return nil, fmt.Errorf("JSON.parse is not a function")
+ }
+ resultVal, err := parse(goja.Null(), call.VM.ToValue(string(result)))
if err != nil {
setError(resp, -32603, err.Error())
} else {
@@ -406,33 +426,38 @@ func (b *bridge) Send(call otto.FunctionCall) (response otto.Value) {
default:
setError(resp, -32603, err.Error())
}
- resps.Call("push", resp)
+ resps = append(resps, resp)
}
// Return the responses either to the callback (if supplied)
// or directly as the return value.
+ var result goja.Value
if batch {
- response = resps.Value()
+ result = call.VM.ToValue(resps)
} else {
- response, _ = resps.Get("0")
+ result = resps[0]
}
- if fn := call.Argument(1); fn.Class() == "Function" {
- fn.Call(otto.NullValue(), otto.NullValue(), response)
- return otto.UndefinedValue()
+ if fn, isFunc := goja.AssertFunction(call.Argument(1)); isFunc {
+ fn(goja.Null(), goja.Null(), result)
+ return goja.Undefined(), nil
}
- return response
+ return result, nil
}
-func setError(resp *otto.Object, code int, msg string) {
+func setError(resp *goja.Object, code int, msg string) {
resp.Set("error", map[string]interface{}{"code": code, "message": msg})
}
-// throwJSException panics on an otto.Value. The Otto VM will recover from the
-// Go panic and throw msg as a JavaScript error.
-func throwJSException(msg interface{}) otto.Value {
- val, err := otto.ToValue(msg)
- if err != nil {
- log.Error("Failed to serialize JavaScript exception", "exception", msg, "err", err)
+// isNumber returns true if input value is a JS number.
+func isNumber(v goja.Value) bool {
+ k := v.ExportType().Kind()
+ return k >= reflect.Int && k <= reflect.Float64
+}
+
+func getObject(vm *goja.Runtime, name string) *goja.Object {
+ v := vm.Get(name)
+ if v == nil {
+ return nil
}
- panic(val)
+ return v.ToObject(vm)
}
diff --git a/console/console.go b/console/console.go
index 934389c972..fb03cf9b09 100644
--- a/console/console.go
+++ b/console/console.go
@@ -29,12 +29,13 @@ import (
"strings"
"syscall"
+ "github.com/dop251/goja"
"github.com/ethereum/go-ethereum/internal/jsre"
+ "github.com/ethereum/go-ethereum/internal/jsre/deps"
"github.com/ethereum/go-ethereum/internal/web3ext"
"github.com/ethereum/go-ethereum/rpc"
"github.com/mattn/go-colorable"
"github.com/peterh/liner"
- "github.com/robertkrimen/otto"
)
var (
@@ -88,6 +89,7 @@ func New(config Config) (*Console, error) {
if config.Printer == nil {
config.Printer = colorable.NewColorableStdout()
}
+
// Initialize the console and return
console := &Console{
client: config.Client,
@@ -109,125 +111,145 @@ func New(config Config) (*Console, error) {
// init retrieves the available APIs from the remote RPC provider and initializes
// the console's JavaScript namespaces based on the exposed modules.
func (c *Console) init(preload []string) error {
- // Initialize the JavaScript <-> Go RPC bridge
+ c.initConsoleObject()
+
+ // Initialize the JavaScript <-> Go RPC bridge.
bridge := newBridge(c.client, c.prompter, c.printer)
- c.jsre.Set("jeth", struct{}{})
+ if err := c.initWeb3(bridge); err != nil {
+ return err
+ }
+ if err := c.initExtensions(); err != nil {
+ return err
+ }
+
+ // Add bridge overrides for web3.js functionality.
+ c.jsre.Do(func(vm *goja.Runtime) {
+ c.initAdmin(vm, bridge)
+ c.initPersonal(vm, bridge)
+ })
+
+ // Preload JavaScript files.
+ for _, path := range preload {
+ if err := c.jsre.Exec(path); err != nil {
+ failure := err.Error()
+ if gojaErr, ok := err.(*goja.Exception); ok {
+ failure = gojaErr.String()
+ }
+ return fmt.Errorf("%s: %v", path, failure)
+ }
+ }
- jethObj, _ := c.jsre.Get("jeth")
- jethObj.Object().Set("send", bridge.Send)
- jethObj.Object().Set("sendAsync", bridge.Send)
+ // Configure the input prompter for history and tab completion.
+ if c.prompter != nil {
+ if content, err := ioutil.ReadFile(c.histPath); err != nil {
+ c.prompter.SetHistory(nil)
+ } else {
+ c.history = strings.Split(string(content), "\n")
+ c.prompter.SetHistory(c.history)
+ }
+ c.prompter.SetWordCompleter(c.AutoCompleteInput)
+ }
+ return nil
+}
- consoleObj, _ := c.jsre.Get("console")
- consoleObj.Object().Set("log", c.consoleOutput)
- consoleObj.Object().Set("error", c.consoleOutput)
+func (c *Console) initConsoleObject() {
+ c.jsre.Do(func(vm *goja.Runtime) {
+ console := vm.NewObject()
+ console.Set("log", c.consoleOutput)
+ console.Set("error", c.consoleOutput)
+ vm.Set("console", console)
+ })
+}
- // Load all the internal utility JavaScript libraries
- if err := c.jsre.Compile("bignumber.js", jsre.BignumberJs); err != nil {
+func (c *Console) initWeb3(bridge *bridge) error {
+ bnJS := string(deps.MustAsset("bignumber.js"))
+ web3JS := string(deps.MustAsset("web3.js"))
+ if err := c.jsre.Compile("bignumber.js", bnJS); err != nil {
return fmt.Errorf("bignumber.js: %v", err)
}
- if err := c.jsre.Compile("web3.js", jsre.Web3Js); err != nil {
+ if err := c.jsre.Compile("web3.js", web3JS); err != nil {
return fmt.Errorf("web3.js: %v", err)
}
if _, err := c.jsre.Run("var Web3 = require('web3');"); err != nil {
return fmt.Errorf("web3 require: %v", err)
}
- if _, err := c.jsre.Run("var web3 = new Web3(jeth);"); err != nil {
- return fmt.Errorf("web3 provider: %v", err)
- }
- // Load the supported APIs into the JavaScript runtime environment
+ var err error
+ c.jsre.Do(func(vm *goja.Runtime) {
+ transport := vm.NewObject()
+ transport.Set("send", jsre.MakeCallback(vm, bridge.Send))
+ transport.Set("sendAsync", jsre.MakeCallback(vm, bridge.Send))
+ vm.Set("_consoleWeb3Transport", transport)
+ _, err = vm.RunString("var web3 = new Web3(_consoleWeb3Transport)")
+ })
+ return err
+}
+
+// initExtensions loads and registers web3.js extensions.
+func (c *Console) initExtensions() error {
+ // Compute aliases from server-provided modules.
apis, err := c.client.SupportedModules()
if err != nil {
return fmt.Errorf("api modules: %v", err)
}
- flatten := "var eth = web3.eth; var personal = web3.personal; "
+ aliases := map[string]struct{}{"eth": {}, "personal": {}}
for api := range apis {
if api == "web3" {
- continue // manually mapped or ignore
+ continue
}
//quorum
// the @ symbol results in errors that prevent the extension from being added to the web3 object
api = strings.Replace(api, "plugin@", "plugin_", 1)
//!quorum
-
+ aliases[api] = struct{}{}
if file, ok := web3ext.Modules[api]; ok {
- // Load our extension for the module.
- if err = c.jsre.Compile(fmt.Sprintf("%s.js", api), file); err != nil {
+ if err = c.jsre.Compile(api+".js", file); err != nil {
return fmt.Errorf("%s.js: %v", api, err)
}
- flatten += fmt.Sprintf("var %s = web3.%s; ", api, api)
- } else if obj, err := c.jsre.Run("web3." + api); err == nil && obj.IsObject() {
- // Enable web3.js built-in extension if available.
- flatten += fmt.Sprintf("var %s = web3.%s; ", api, api)
}
}
- if _, err = c.jsre.Run(flatten); err != nil {
- return fmt.Errorf("namespace flattening: %v", err)
- }
- // Initialize the global name register (disabled for now)
- //c.jsre.Run(`var GlobalRegistrar = eth.contract(` + registrar.GlobalRegistrarAbi + `); registrar = GlobalRegistrar.at("` + registrar.GlobalRegistrarAddr + `");`)
- // If the console is in interactive mode, instrument password related methods to query the user
- if c.prompter != nil {
- // Retrieve the account management object to instrument
- personal, err := c.jsre.Get("personal")
- if err != nil {
- return err
- }
- // Override the openWallet, unlockAccount, newAccount and sign methods since
- // these require user interaction. Assign these method in the Console the
- // original web3 callbacks. These will be called by the jeth.* methods after
- // they got the password from the user and send the original web3 request to
- // the backend.
- if obj := personal.Object(); obj != nil { // make sure the personal api is enabled over the interface
- if _, err = c.jsre.Run(`jeth.openWallet = personal.openWallet;`); err != nil {
- return fmt.Errorf("personal.openWallet: %v", err)
- }
- if _, err = c.jsre.Run(`jeth.unlockAccount = personal.unlockAccount;`); err != nil {
- return fmt.Errorf("personal.unlockAccount: %v", err)
- }
- if _, err = c.jsre.Run(`jeth.newAccount = personal.newAccount;`); err != nil {
- return fmt.Errorf("personal.newAccount: %v", err)
- }
- if _, err = c.jsre.Run(`jeth.sign = personal.sign;`); err != nil {
- return fmt.Errorf("personal.sign: %v", err)
- }
- obj.Set("openWallet", bridge.OpenWallet)
- obj.Set("unlockAccount", bridge.UnlockAccount)
- obj.Set("newAccount", bridge.NewAccount)
- obj.Set("sign", bridge.Sign)
- }
- }
- // The admin.sleep and admin.sleepBlocks are offered by the console and not by the RPC layer.
- admin, err := c.jsre.Get("admin")
- if err != nil {
- return err
- }
- if obj := admin.Object(); obj != nil { // make sure the admin api is enabled over the interface
- obj.Set("sleepBlocks", bridge.SleepBlocks)
- obj.Set("sleep", bridge.Sleep)
- obj.Set("clearHistory", c.clearHistory)
- }
- // Preload any JavaScript files before starting the console
- for _, path := range preload {
- if err := c.jsre.Exec(path); err != nil {
- failure := err.Error()
- if ottoErr, ok := err.(*otto.Error); ok {
- failure = ottoErr.String()
+ // Apply aliases.
+ c.jsre.Do(func(vm *goja.Runtime) {
+ web3 := getObject(vm, "web3")
+ for name := range aliases {
+ if v := web3.Get(name); v != nil {
+ vm.Set(name, v)
}
- return fmt.Errorf("%s: %v", path, failure)
}
+ })
+ return nil
+}
+
+// initAdmin creates additional admin APIs implemented by the bridge.
+func (c *Console) initAdmin(vm *goja.Runtime, bridge *bridge) {
+ if admin := getObject(vm, "admin"); admin != nil {
+ admin.Set("sleepBlocks", jsre.MakeCallback(vm, bridge.SleepBlocks))
+ admin.Set("sleep", jsre.MakeCallback(vm, bridge.Sleep))
+ admin.Set("clearHistory", c.clearHistory)
}
- // Configure the console's input prompter for scrollback and tab completion
- if c.prompter != nil {
- if content, err := ioutil.ReadFile(c.histPath); err != nil {
- c.prompter.SetHistory(nil)
- } else {
- c.history = strings.Split(string(content), "\n")
- c.prompter.SetHistory(c.history)
- }
- c.prompter.SetWordCompleter(c.AutoCompleteInput)
+}
+
+// initPersonal redirects account-related API methods through the bridge.
+//
+// If the console is in interactive mode and the 'personal' API is available, override
+// the openWallet, unlockAccount, newAccount and sign methods since these require user
+// interaction. The original web3 callbacks are stored in 'jeth'. These will be called
+// by the bridge after the prompt and send the original web3 request to the backend.
+func (c *Console) initPersonal(vm *goja.Runtime, bridge *bridge) {
+ personal := getObject(vm, "personal")
+ if personal == nil || c.prompter == nil {
+ return
}
- return nil
+ jeth := vm.NewObject()
+ vm.Set("jeth", jeth)
+ jeth.Set("openWallet", personal.Get("openWallet"))
+ jeth.Set("unlockAccount", personal.Get("unlockAccount"))
+ jeth.Set("newAccount", personal.Get("newAccount"))
+ jeth.Set("sign", personal.Get("sign"))
+ personal.Set("openWallet", jsre.MakeCallback(vm, bridge.OpenWallet))
+ personal.Set("unlockAccount", jsre.MakeCallback(vm, bridge.UnlockAccount))
+ personal.Set("newAccount", jsre.MakeCallback(vm, bridge.NewAccount))
+ personal.Set("sign", jsre.MakeCallback(vm, bridge.Sign))
}
func (c *Console) clearHistory() {
@@ -242,13 +264,13 @@ func (c *Console) clearHistory() {
// consoleOutput is an override for the console.log and console.error methods to
// stream the output into the configured output stream instead of stdout.
-func (c *Console) consoleOutput(call otto.FunctionCall) otto.Value {
+func (c *Console) consoleOutput(call goja.FunctionCall) goja.Value {
var output []string
- for _, argument := range call.ArgumentList {
+ for _, argument := range call.Arguments {
output = append(output, fmt.Sprintf("%v", argument))
}
fmt.Fprintln(c.printer, strings.Join(output, " "))
- return otto.Value{}
+ return goja.Null()
}
// AutoCompleteInput is a pre-assembled word completer to be used by the user
@@ -354,13 +376,13 @@ func (c *Console) getConsensus() string {
// Evaluate executes code and pretty prints the result to the specified output
// stream.
-func (c *Console) Evaluate(statement string) error {
+func (c *Console) Evaluate(statement string) {
defer func() {
if r := recover(); r != nil {
fmt.Fprintf(c.printer, "[native] error: %v\n", r)
}
}()
- return c.jsre.Evaluate(statement, c.printer)
+ c.jsre.Evaluate(statement, c.printer)
}
// Interactive starts an interactive user session, where input is propted from
diff --git a/console/console_test.go b/console/console_test.go
index 89dd7cd838..9a2b474442 100644
--- a/console/console_test.go
+++ b/console/console_test.go
@@ -289,7 +289,7 @@ func TestPrettyError(t *testing.T) {
defer tester.Close(t)
tester.console.Evaluate("throw 'hello'")
- want := jsre.ErrorColor("hello") + "\n"
+ want := jsre.ErrorColor("hello") + "\n\tat :1:7(1)\n\n"
if output := tester.output.String(); output != want {
t.Fatalf("pretty error mismatch: have %s, want %s", output, want)
}
diff --git a/contracts/checkpointoracle/oracle.go b/contracts/checkpointoracle/oracle.go
index 2d725397bd..1f273272ab 100644
--- a/contracts/checkpointoracle/oracle.go
+++ b/contracts/checkpointoracle/oracle.go
@@ -29,8 +29,9 @@ import (
"github.com/ethereum/go-ethereum/core/types"
)
-// CheckpointOracle is a Go wrapper around an on-chain light client checkpoint oracle.
+// CheckpointOracle is a Go wrapper around an on-chain checkpoint oracle contract.
type CheckpointOracle struct {
+ address common.Address
contract *contract.CheckpointOracle
}
@@ -40,7 +41,12 @@ func NewCheckpointOracle(contractAddr common.Address, backend bind.ContractBacke
if err != nil {
return nil, err
}
- return &CheckpointOracle{contract: c}, nil
+ return &CheckpointOracle{address: contractAddr, contract: c}, nil
+}
+
+// ContractAddr returns the address of contract.
+func (oracle *CheckpointOracle) ContractAddr() common.Address {
+ return oracle.address
}
// Contract returns the underlying contract instance.
diff --git a/core/blockchain.go b/core/blockchain.go
index 4366ffcc36..ac7581eac2 100644
--- a/core/blockchain.go
+++ b/core/blockchain.go
@@ -445,6 +445,11 @@ func (bc *BlockChain) SetHead(head uint64) error {
}
}
rawdb.WriteHeadBlockHash(db, newHeadBlock.Hash())
+
+ // Degrade the chain markers if they are explicitly reverted.
+ // In theory we should update all in-memory markers in the
+ // last step, however the direction of SetHead is from high
+ // to low, so it's safe the update in-memory markers directly.
bc.currentBlock.Store(newHeadBlock)
headBlockGauge.Update(int64(newHeadBlock.NumberU64()))
}
@@ -457,6 +462,11 @@ func (bc *BlockChain) SetHead(head uint64) error {
newHeadFastBlock = bc.genesisBlock
}
rawdb.WriteHeadFastBlockHash(db, newHeadFastBlock.Hash())
+
+ // Degrade the chain markers if they are explicitly reverted.
+ // In theory we should update all in-memory markers in the
+ // last step, however the direction of SetHead is from high
+ // to low, so it's safe the update in-memory markers directly.
bc.currentFastBlock.Store(newHeadFastBlock)
headFastBlockGauge.Update(int64(newHeadFastBlock.NumberU64()))
}
@@ -592,21 +602,22 @@ func (bc *BlockChain) ResetWithGenesisBlock(genesis *types.Block) error {
defer bc.chainmu.Unlock()
// Prepare the genesis block and reinitialise the chain
- if err := bc.hc.WriteTd(genesis.Hash(), genesis.NumberU64(), genesis.Difficulty()); err != nil {
- log.Crit("Failed to write genesis block TD", "err", err)
+ batch := bc.db.NewBatch()
+ rawdb.WriteTd(batch, genesis.Hash(), genesis.NumberU64(), genesis.Difficulty())
+ rawdb.WriteBlock(batch, genesis)
+ if err := batch.Write(); err != nil {
+ log.Crit("Failed to write genesis block", "err", err)
}
- rawdb.WriteBlock(bc.db, genesis)
+ bc.writeHeadBlock(genesis)
+ // Last update all in-memory chain markers
bc.genesisBlock = genesis
- bc.insert(bc.genesisBlock)
bc.currentBlock.Store(bc.genesisBlock)
headBlockGauge.Update(int64(bc.genesisBlock.NumberU64()))
-
bc.hc.SetGenesis(bc.genesisBlock.Header())
bc.hc.SetCurrentHeader(bc.genesisBlock.Header())
bc.currentFastBlock.Store(bc.genesisBlock)
headFastBlockGauge.Update(int64(bc.genesisBlock.NumberU64()))
-
return nil
}
@@ -664,31 +675,39 @@ func (bc *BlockChain) ExportN(w io.Writer, first uint64, last uint64) error {
return nil
}
-// insert injects a new head block into the current block chain. This method
+// writeHeadBlock injects a new head block into the current block chain. This method
// assumes that the block is indeed a true head. It will also reset the head
// header and the head fast sync block to this very same block if they are older
// or if they are on a different side chain.
//
// Note, this function assumes that the `mu` mutex is held!
-func (bc *BlockChain) insert(block *types.Block) {
+func (bc *BlockChain) writeHeadBlock(block *types.Block) {
// If the block is on a side chain or an unknown one, force other heads onto it too
updateHeads := rawdb.ReadCanonicalHash(bc.db, block.NumberU64()) != block.Hash()
// Add the block to the canonical chain number scheme and mark as the head
- rawdb.WriteCanonicalHash(bc.db, block.Hash(), block.NumberU64())
- rawdb.WriteHeadBlockHash(bc.db, block.Hash())
-
- bc.currentBlock.Store(block)
- headBlockGauge.Update(int64(block.NumberU64()))
+ batch := bc.db.NewBatch()
+ rawdb.WriteCanonicalHash(batch, block.Hash(), block.NumberU64())
+ rawdb.WriteTxLookupEntries(batch, block)
+ rawdb.WriteHeadBlockHash(batch, block.Hash())
// If the block is better than our head or is on a different chain, force update heads
+ if updateHeads {
+ rawdb.WriteHeadHeaderHash(batch, block.Hash())
+ rawdb.WriteHeadFastBlockHash(batch, block.Hash())
+ }
+ // Flush the whole batch into the disk, exit the node if failed
+ if err := batch.Write(); err != nil {
+ log.Crit("Failed to update chain indexes and markers", "err", err)
+ }
+ // Update all in-memory chain markers in the last step
if updateHeads {
bc.hc.SetCurrentHeader(block.Header())
- rawdb.WriteHeadFastBlockHash(bc.db, block.Hash())
-
bc.currentFastBlock.Store(block)
headFastBlockGauge.Update(int64(block.NumberU64()))
}
+ bc.currentBlock.Store(block)
+ headBlockGauge.Update(int64(block.NumberU64()))
}
// Genesis retrieves the chain's genesis block.
@@ -935,26 +954,36 @@ func (bc *BlockChain) Rollback(chain []common.Hash) {
bc.chainmu.Lock()
defer bc.chainmu.Unlock()
+ batch := bc.db.NewBatch()
for i := len(chain) - 1; i >= 0; i-- {
hash := chain[i]
+ // Degrade the chain markers if they are explicitly reverted.
+ // In theory we should update all in-memory markers in the
+ // last step, however the direction of rollback is from high
+ // to low, so it's safe the update in-memory markers directly.
currentHeader := bc.hc.CurrentHeader()
if currentHeader.Hash() == hash {
- bc.hc.SetCurrentHeader(bc.GetHeader(currentHeader.ParentHash, currentHeader.Number.Uint64()-1))
+ newHeadHeader := bc.GetHeader(currentHeader.ParentHash, currentHeader.Number.Uint64()-1)
+ rawdb.WriteHeadHeaderHash(batch, currentHeader.ParentHash)
+ bc.hc.SetCurrentHeader(newHeadHeader)
}
if currentFastBlock := bc.CurrentFastBlock(); currentFastBlock.Hash() == hash {
newFastBlock := bc.GetBlock(currentFastBlock.ParentHash(), currentFastBlock.NumberU64()-1)
- rawdb.WriteHeadFastBlockHash(bc.db, newFastBlock.Hash())
+ rawdb.WriteHeadFastBlockHash(batch, currentFastBlock.ParentHash())
bc.currentFastBlock.Store(newFastBlock)
headFastBlockGauge.Update(int64(newFastBlock.NumberU64()))
}
if currentBlock := bc.CurrentBlock(); currentBlock.Hash() == hash {
newBlock := bc.GetBlock(currentBlock.ParentHash(), currentBlock.NumberU64()-1)
- rawdb.WriteHeadBlockHash(bc.db, newBlock.Hash())
+ rawdb.WriteHeadBlockHash(batch, currentBlock.ParentHash())
bc.currentBlock.Store(newBlock)
headBlockGauge.Update(int64(newBlock.NumberU64()))
}
}
+ if err := batch.Write(); err != nil {
+ log.Crit("Failed to rollback chain markers", "err", err)
+ }
// Truncate ancient data which exceeds the current header.
//
// Notably, it can happen that system crashes without truncating the ancient data
@@ -1117,7 +1146,6 @@ func (bc *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain [
}
// Don't collect too much in-memory, write it out every 100K blocks
if len(deleted) > 100000 {
-
// Sync the ancient store explicitly to ensure all data has been flushed to disk.
if err := bc.db.Sync(); err != nil {
return 0, err
@@ -1226,7 +1254,9 @@ func (bc *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain [
rawdb.WriteReceipts(batch, block.Hash(), block.NumberU64(), receiptChain[i])
rawdb.WriteTxLookupEntries(batch, block)
- stats.processed++
+ // Write everything belongs to the blocks into the database. So that
+ // we can ensure all components of body is completed(body, receipts,
+ // tx indexes)
if batch.ValueSize() >= ethdb.IdealBatchSize {
if err := batch.Write(); err != nil {
return 0, err
@@ -1234,7 +1264,11 @@ func (bc *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain [
size += batch.ValueSize()
batch.Reset()
}
+ stats.processed++
}
+ // Write everything belongs to the blocks into the database. So that
+ // we can ensure all components of body is completed(body, receipts,
+ // tx indexes)
if batch.ValueSize() > 0 {
size += batch.ValueSize()
if err := batch.Write(); err != nil {
@@ -1285,11 +1319,12 @@ func (bc *BlockChain) writeBlockWithoutState(block *types.Block, td *big.Int) (e
bc.wg.Add(1)
defer bc.wg.Done()
- if err := bc.hc.WriteTd(block.Hash(), block.NumberU64(), td); err != nil {
- return err
+ batch := bc.db.NewBatch()
+ rawdb.WriteTd(batch, block.Hash(), block.NumberU64(), td)
+ rawdb.WriteBlock(batch, block)
+ if err := batch.Write(); err != nil {
+ log.Crit("Failed to write block into disk", "err", err)
}
- rawdb.WriteBlock(bc.db, block)
-
return nil
}
@@ -1305,20 +1340,16 @@ func (bc *BlockChain) writeKnownBlock(block *types.Block) error {
return err
}
}
- // Write the positional metadata for transaction/receipt lookups.
- // Preimages here is empty, ignore it.
- rawdb.WriteTxLookupEntries(bc.db, block)
-
- bc.insert(block)
+ bc.writeHeadBlock(block)
return nil
}
// WriteBlockWithState writes the block and all associated state to the database.
-func (bc *BlockChain) WriteBlockWithState(block *types.Block, receipts []*types.Receipt, state, privateState *state.StateDB) (status WriteStatus, err error) {
+func (bc *BlockChain) WriteBlockWithState(block *types.Block, receipts []*types.Receipt, logs []*types.Log, state, privateState *state.StateDB, emitHeadEvent bool) (status WriteStatus, err error) {
bc.chainmu.Lock()
defer bc.chainmu.Unlock()
- return bc.writeBlockWithState(block, receipts, state, privateState)
+ return bc.writeBlockWithState(block, receipts, logs, state, privateState, emitHeadEvent)
}
// QUORUM
@@ -1351,7 +1382,7 @@ func (bc *BlockChain) CommitBlockWithState(deleteEmptyObjects bool, state, priva
// writeBlockWithState writes the block and all associated state to the database,
// but is expects the chain mutex to be held.
-func (bc *BlockChain) writeBlockWithState(block *types.Block, receipts []*types.Receipt, state, privateState *state.StateDB) (status WriteStatus, err error) {
+func (bc *BlockChain) writeBlockWithState(block *types.Block, receipts []*types.Receipt, logs []*types.Log, state, privateState *state.StateDB, emitHeadEvent bool) (status WriteStatus, err error) {
bc.wg.Add(1)
defer bc.wg.Done()
@@ -1382,12 +1413,19 @@ func (bc *BlockChain) writeBlockWithState(block *types.Block, receipts []*types.
localTd := bc.GetTd(currentBlock.Hash(), currentBlock.NumberU64())
externTd := new(big.Int).Add(block.Difficulty(), ptd)
- // Irrelevant of the canonical status, write the block itself to the database
- if err := bc.hc.WriteTd(block.Hash(), block.NumberU64(), externTd); err != nil {
- return NonStatTy, err
- }
- rawdb.WriteBlock(bc.db, block)
-
+ // Irrelevant of the canonical status, write the block itself to the database.
+ //
+ // Note all the components of block(td, hash->number map, header, body, receipts)
+ // should be written atomically. BlockBatch is used for containing all components.
+ blockBatch := bc.db.NewBatch()
+ rawdb.WriteTd(blockBatch, block.Hash(), block.NumberU64(), externTd)
+ rawdb.WriteBlock(blockBatch, block)
+ rawdb.WriteReceipts(blockBatch, block.Hash(), block.NumberU64(), receipts)
+ rawdb.WritePreimages(blockBatch, state.Preimages())
+ if err := blockBatch.Write(); err != nil {
+ log.Crit("Failed to write block into disk", "err", err)
+ }
+ // Commit all cached state changes into underlying memory database.
root, err := state.Commit(bc.chainConfig.IsEIP158(block.Number()))
if err != nil {
@@ -1448,11 +1486,6 @@ func (bc *BlockChain) writeBlockWithState(block *types.Block, receipts []*types.
}
}
}
-
- // Write other block data using a batch.
- batch := bc.db.NewBatch()
- rawdb.WriteReceipts(batch, block.Hash(), block.NumberU64(), receipts)
-
// If the total difficulty is higher than our known, add it to the canonical chain
// Second clause in the if statement reduces the vulnerability to selfish mining.
// Please refer to http://www.cs.cornell.edu/~ie53/publications/btcProcFC.pdf
@@ -1478,23 +1511,32 @@ func (bc *BlockChain) writeBlockWithState(block *types.Block, receipts []*types.
return NonStatTy, err
}
}
- // Write the positional metadata for transaction/receipt lookups and preimages
- rawdb.WriteTxLookupEntries(batch, block)
- rawdb.WritePreimages(batch, state.Preimages())
-
status = CanonStatTy
} else {
status = SideStatTy
}
- if err := batch.Write(); err != nil {
- return NonStatTy, err
- }
-
// Set new head.
if status == CanonStatTy {
- bc.insert(block)
+ bc.writeHeadBlock(block)
}
bc.futureBlocks.Remove(block.Hash())
+
+ if status == CanonStatTy {
+ bc.chainFeed.Send(ChainEvent{Block: block, Hash: block.Hash(), Logs: logs})
+ if len(logs) > 0 {
+ bc.logsFeed.Send(logs)
+ }
+ // In theory we should fire a ChainHeadEvent when we inject
+ // a canonical block, but sometimes we can insert a batch of
+ // canonicial blocks. Avoid firing too much ChainHeadEvents,
+ // we will fire an accumulated ChainHeadEvent and disable fire
+ // event here.
+ if emitHeadEvent {
+ bc.chainHeadFeed.Send(ChainHeadEvent{Block: block})
+ }
+ } else {
+ bc.chainSideFeed.Send(ChainSideEvent{Block: block})
+ }
return status, nil
}
@@ -1545,11 +1587,10 @@ func (bc *BlockChain) InsertChain(chain types.Blocks) (int, error) {
// Pre-checks passed, start the full block imports
bc.wg.Add(1)
bc.chainmu.Lock()
- n, events, logs, err := bc.insertChain(chain, true)
+ n, err := bc.insertChain(chain, true)
bc.chainmu.Unlock()
bc.wg.Done()
- bc.PostChainEvents(events, logs)
return n, err
}
@@ -1582,29 +1623,30 @@ func mergeReceipts(pub, priv types.Receipts) types.Receipts {
// racey behaviour. If a sidechain import is in progress, and the historic state
// is imported, but then new canon-head is added before the actual sidechain
// completes, then the historic state could be pruned again
-func (bc *BlockChain) insertChain(chain types.Blocks, verifySeals bool) (int, []interface{}, []*types.Log, error) {
+func (bc *BlockChain) insertChain(chain types.Blocks, verifySeals bool) (int, error) {
// If the chain is terminating, don't even bother starting up
if atomic.LoadInt32(&bc.procInterrupt) == 1 {
log.Debug("Premature abort during blocks processing")
// QUORUM
if bc.isRaft() {
// Only returns an error for raft mode
- return 0, nil, nil, ErrAbortBlocksProcessing
+ return 0, ErrAbortBlocksProcessing
}
- return 0, nil, nil, nil
+ return 0, nil
}
// Start a parallel signature recovery (signer will fluke on fork transition, minimal perf loss)
senderCacher.recoverFromBlocks(types.MakeSigner(bc.chainConfig, chain[0].Number()), chain)
- // A queued approach to delivering events. This is generally
- // faster than direct delivery and requires much less mutex
- // acquiring.
var (
- stats = insertStats{startTime: mclock.Now()}
- events = make([]interface{}, 0, len(chain))
- lastCanon *types.Block
- coalescedLogs []*types.Log
+ stats = insertStats{startTime: mclock.Now()}
+ lastCanon *types.Block
)
+ // Fire a single chain head event if we've progressed the chain
+ defer func() {
+ if lastCanon != nil && bc.CurrentBlock().Hash() == lastCanon.Hash() {
+ bc.chainHeadFeed.Send(ChainHeadEvent{lastCanon})
+ }
+ }()
// Start the parallel header verifier
headers := make([]*types.Header, len(chain))
seals := make([]bool, len(chain))
@@ -1654,7 +1696,7 @@ func (bc *BlockChain) insertChain(chain types.Blocks, verifySeals bool) (int, []
for block != nil && err == ErrKnownBlock {
log.Debug("Writing previously known block", "number", block.Number(), "hash", block.Hash())
if err := bc.writeKnownBlock(block); err != nil {
- return it.index, nil, nil, err
+ return it.index, err
}
lastCanon = block
@@ -1673,7 +1715,7 @@ func (bc *BlockChain) insertChain(chain types.Blocks, verifySeals bool) (int, []
for block != nil && (it.index == 0 || err == consensus.ErrUnknownAncestor) {
log.Debug("Future block, postponing import", "number", block.Number(), "hash", block.Hash())
if err := bc.addFutureBlock(block); err != nil {
- return it.index, events, coalescedLogs, err
+ return it.index, err
}
block, err = it.next()
}
@@ -1681,14 +1723,14 @@ func (bc *BlockChain) insertChain(chain types.Blocks, verifySeals bool) (int, []
stats.ignored += it.remaining()
// If there are any still remaining, mark as ignored
- return it.index, events, coalescedLogs, err
+ return it.index, err
// Some other error occurred, abort
case err != nil:
bc.futureBlocks.Remove(block.Hash())
stats.ignored += len(it.chain)
bc.reportBlock(block, nil, err)
- return it.index, events, coalescedLogs, err
+ return it.index, err
}
// No validation errors for the first block (or chain prefix skipped)
for ; block != nil && err == nil || err == ErrKnownBlock; block, err = it.next() {
@@ -1698,7 +1740,7 @@ func (bc *BlockChain) insertChain(chain types.Blocks, verifySeals bool) (int, []
// QUORUM
if bc.isRaft() {
// Only returns an error for raft mode
- return it.index, events, coalescedLogs, ErrAbortBlocksProcessing
+ return it.index, ErrAbortBlocksProcessing
}
// END QUORUM
break
@@ -1706,7 +1748,7 @@ func (bc *BlockChain) insertChain(chain types.Blocks, verifySeals bool) (int, []
// If the header is a banned one, straight out abort
if BadHashes[block.Hash()] {
bc.reportBlock(block, nil, ErrBlacklistedHash)
- return it.index, events, coalescedLogs, ErrBlacklistedHash
+ return it.index, ErrBlacklistedHash
}
// If the block is known (in the middle of the chain), it's a special case for
// Clique blocks where they can share state among each other, so importing an
@@ -1723,15 +1765,13 @@ func (bc *BlockChain) insertChain(chain types.Blocks, verifySeals bool) (int, []
"root", block.Root())
if err := bc.writeKnownBlock(block); err != nil {
- return it.index, nil, nil, err
+ return it.index, err
}
stats.processed++
// We can assume that logs are empty here, since the only way for consecutive
// Clique blocks to have the same state is if there are no transactions.
- events = append(events, ChainEvent{block, block.Hash(), nil})
lastCanon = block
-
continue
}
// Retrieve the parent block and it's state to execute on top
@@ -1746,32 +1786,31 @@ func (bc *BlockChain) insertChain(chain types.Blocks, verifySeals bool) (int, []
statedb, err := state.New(parent.Root, bc.stateCache)
if err != nil {
- return it.index, events, coalescedLogs, err
+ return it.index, err
}
// Quorum
privateStateRoot := rawdb.GetPrivateStateRoot(bc.db, parent.Root)
privateState, err := stateNew(privateStateRoot, bc.privateStateCache)
if err != nil {
- return it.index, events, coalescedLogs, err
+ return it.index, err
}
// /Quorum
// If we have a followup block, run that against the current state to pre-cache
// transactions and probabilistically some of the account/storage trie nodes.
var followupInterrupt uint32
-
if !bc.cacheConfig.TrieCleanNoPrefetch {
if followup, err := it.peek(); followup != nil && err == nil {
- go func(start time.Time) {
- throwaway, _ := state.New(parent.Root, bc.stateCache)
- privatest, _ := stateNew(privateStateRoot, bc.privateStateCache)
+ throwaway, _ := state.New(parent.Root, bc.stateCache)
+ privatest, _ := stateNew(privateStateRoot, bc.privateStateCache)
+ go func(start time.Time, followup *types.Block, throwaway, privatest *state.StateDB, interrupt *uint32) {
bc.prefetcher.Prefetch(followup, throwaway, privatest, bc.vmConfig, &followupInterrupt)
blockPrefetchExecuteTimer.Update(time.Since(start))
- if atomic.LoadUint32(&followupInterrupt) == 1 {
+ if atomic.LoadUint32(interrupt) == 1 {
blockPrefetchInterruptMeter.Mark(1)
}
- }(time.Now())
+ }(time.Now(), followup, throwaway, privatest, &followupInterrupt)
}
}
// Process block using the parent state as reference point
@@ -1780,7 +1819,7 @@ func (bc *BlockChain) insertChain(chain types.Blocks, verifySeals bool) (int, []
if err != nil {
bc.reportBlock(block, receipts, err)
atomic.StoreUint32(&followupInterrupt, 1)
- return it.index, events, coalescedLogs, err
+ return it.index, err
}
// Update the metrics touched during block processing
accountReadTimer.Update(statedb.AccountReads) // Account reads are complete, we can mark them
@@ -1799,7 +1838,7 @@ func (bc *BlockChain) insertChain(chain types.Blocks, verifySeals bool) (int, []
if err := bc.validator.ValidateState(block, statedb, receipts, usedGas); err != nil {
bc.reportBlock(block, receipts, err)
atomic.StoreUint32(&followupInterrupt, 1)
- return it.index, events, coalescedLogs, err
+ return it.index, err
}
allReceipts := mergeReceipts(receipts, privateReceipts)
@@ -1812,14 +1851,14 @@ func (bc *BlockChain) insertChain(chain types.Blocks, verifySeals bool) (int, []
// Write the block to the chain and get the status.
substart = time.Now()
- status, err := bc.writeBlockWithState(block, allReceipts, statedb, privateState)
+ status, err := bc.writeBlockWithState(block, allReceipts, logs, statedb, privateState, false)
if err != nil {
atomic.StoreUint32(&followupInterrupt, 1)
- return it.index, events, coalescedLogs, err
+ return it.index, err
}
atomic.StoreUint32(&followupInterrupt, 1)
if err := rawdb.WritePrivateBlockBloom(bc.db, block.NumberU64(), privateReceipts); err != nil {
- return it.index, events, coalescedLogs, err
+ return it.index, err
}
// Update the metrics touched during block commit
accountCommitTimer.Update(statedb.AccountCommits) // Account commits are complete, we can mark them
@@ -1835,8 +1874,6 @@ func (bc *BlockChain) insertChain(chain types.Blocks, verifySeals bool) (int, []
"elapsed", common.PrettyDuration(time.Since(start)),
"root", block.Root())
- coalescedLogs = append(coalescedLogs, logs...)
- events = append(events, ChainEvent{block, block.Hash(), logs})
lastCanon = block
// Only count canonical blocks for GC processing time
@@ -1847,7 +1884,6 @@ func (bc *BlockChain) insertChain(chain types.Blocks, verifySeals bool) (int, []
"diff", block.Difficulty(), "elapsed", common.PrettyDuration(time.Since(start)),
"txs", len(block.Transactions()), "gas", block.GasUsed(), "uncles", len(block.Uncles()),
"root", block.Root())
- events = append(events, ChainSideEvent{block})
default:
// This in theory is impossible, but lets be nice to our future selves and leave
@@ -1866,24 +1902,20 @@ func (bc *BlockChain) insertChain(chain types.Blocks, verifySeals bool) (int, []
// Any blocks remaining here? The only ones we care about are the future ones
if block != nil && err == consensus.ErrFutureBlock {
if err := bc.addFutureBlock(block); err != nil {
- return it.index, events, coalescedLogs, err
+ return it.index, err
}
block, err = it.next()
for ; block != nil && err == consensus.ErrUnknownAncestor; block, err = it.next() {
if err := bc.addFutureBlock(block); err != nil {
- return it.index, events, coalescedLogs, err
+ return it.index, err
}
stats.queued++
}
}
stats.ignored += it.remaining()
- // Append a single chain head event if we've progressed the chain
- if lastCanon != nil && bc.CurrentBlock().Hash() == lastCanon.Hash() {
- events = append(events, ChainHeadEvent{lastCanon})
- }
- return it.index, events, coalescedLogs, err
+ return it.index, err
}
// insertSideChain is called when an import batch hits upon a pruned ancestor
@@ -1892,7 +1924,7 @@ func (bc *BlockChain) insertChain(chain types.Blocks, verifySeals bool) (int, []
//
// The method writes all (header-and-body-valid) blocks to disk, then tries to
// switch over to the new chain if the TD exceeded the current chain.
-func (bc *BlockChain) insertSideChain(block *types.Block, it *insertIterator) (int, []interface{}, []*types.Log, error) {
+func (bc *BlockChain) insertSideChain(block *types.Block, it *insertIterator) (int, error) {
var (
externTd *big.Int
current = bc.CurrentBlock()
@@ -1928,7 +1960,7 @@ func (bc *BlockChain) insertSideChain(block *types.Block, it *insertIterator) (i
// If someone legitimately side-mines blocks, they would still be imported as usual. However,
// we cannot risk writing unverified blocks to disk when they obviously target the pruning
// mechanism.
- return it.index, nil, nil, errors.New("sidechain ghost-state attack")
+ return it.index, errors.New("sidechain ghost-state attack")
}
}
if externTd == nil {
@@ -1939,7 +1971,7 @@ func (bc *BlockChain) insertSideChain(block *types.Block, it *insertIterator) (i
if !bc.HasBlock(block.Hash(), block.NumberU64()) {
start := time.Now()
if err := bc.writeBlockWithoutState(block, externTd); err != nil {
- return it.index, nil, nil, err
+ return it.index, err
}
log.Debug("Injected sidechain block", "number", block.Number(), "hash", block.Hash(),
"diff", block.Difficulty(), "elapsed", common.PrettyDuration(time.Since(start)),
@@ -1956,7 +1988,7 @@ func (bc *BlockChain) insertSideChain(block *types.Block, it *insertIterator) (i
localTd := bc.GetTd(current.Hash(), current.NumberU64())
if localTd.Cmp(externTd) > 0 {
log.Info("Sidechain written to disk", "start", it.first().NumberU64(), "end", it.previous().Number, "sidetd", externTd, "localtd", localTd)
- return it.index, nil, nil, err
+ return it.index, err
}
// Gather all the sidechain hashes (full blocks may be memory heavy)
var (
@@ -1971,7 +2003,7 @@ func (bc *BlockChain) insertSideChain(block *types.Block, it *insertIterator) (i
parent = bc.GetHeader(parent.ParentHash, parent.Number.Uint64()-1)
}
if parent == nil {
- return it.index, nil, nil, errors.New("missing parent")
+ return it.index, errors.New("missing parent")
}
// Import all the pruned blocks to make the state available
var (
@@ -1990,15 +2022,15 @@ func (bc *BlockChain) insertSideChain(block *types.Block, it *insertIterator) (i
// memory here.
if len(blocks) >= 2048 || memory > 64*1024*1024 {
log.Info("Importing heavy sidechain segment", "blocks", len(blocks), "start", blocks[0].NumberU64(), "end", block.NumberU64())
- if _, _, _, err := bc.insertChain(blocks, false); err != nil {
- return 0, nil, nil, err
+ if _, err := bc.insertChain(blocks, false); err != nil {
+ return 0, err
}
blocks, memory = blocks[:0], 0
// If the chain is terminating, stop processing blocks
if atomic.LoadInt32(&bc.procInterrupt) == 1 {
log.Debug("Premature abort during blocks processing")
- return 0, nil, nil, nil
+ return 0, nil
}
}
}
@@ -2006,7 +2038,7 @@ func (bc *BlockChain) insertSideChain(block *types.Block, it *insertIterator) (i
log.Info("Importing sidechain segment", "start", blocks[0].NumberU64(), "end", blocks[len(blocks)-1].NumberU64())
return bc.insertChain(blocks, false)
}
- return 0, nil, nil, nil
+ return 0, nil
}
// reorg takes two blocks, an old chain and a new chain and will reconstruct the
@@ -2021,11 +2053,11 @@ func (bc *BlockChain) reorg(oldBlock, newBlock *types.Block) error {
deletedTxs types.Transactions
addedTxs types.Transactions
- deletedLogs []*types.Log
- rebirthLogs []*types.Log
+ deletedLogs [][]*types.Log
+ rebirthLogs [][]*types.Log
- // collectLogs collects the logs that were generated during the
- // processing of the block that corresponds with the given hash.
+ // collectLogs collects the logs that were generated or removed during
+ // the processing of the block that corresponds with the given hash.
// These logs are later announced as deleted or reborn
collectLogs = func(hash common.Hash, removed bool) {
number := bc.hc.GetBlockNumber(hash)
@@ -2033,18 +2065,40 @@ func (bc *BlockChain) reorg(oldBlock, newBlock *types.Block) error {
return
}
receipts := rawdb.ReadReceipts(bc.db, hash, *number, bc.chainConfig)
+
+ var logs []*types.Log
for _, receipt := range receipts {
for _, log := range receipt.Logs {
l := *log
if removed {
l.Removed = true
- deletedLogs = append(deletedLogs, &l)
} else {
- rebirthLogs = append(rebirthLogs, &l)
}
+ logs = append(logs, &l)
+ }
+ }
+ if len(logs) > 0 {
+ if removed {
+ deletedLogs = append(deletedLogs, logs)
+ } else {
+ rebirthLogs = append(rebirthLogs, logs)
}
}
}
+ // mergeLogs returns a merged log slice with specified sort order.
+ mergeLogs = func(logs [][]*types.Log, reverse bool) []*types.Log {
+ var ret []*types.Log
+ if reverse {
+ for i := len(logs) - 1; i >= 0; i-- {
+ ret = append(ret, logs[i]...)
+ }
+ } else {
+ for i := 0; i < len(logs); i++ {
+ ret = append(ret, logs[i]...)
+ }
+ }
+ return ret
+ }
)
// Reduce the longer chain to the same number as the shorter one
if oldBlock.NumberU64() > newBlock.NumberU64() {
@@ -2110,20 +2164,19 @@ func (bc *BlockChain) reorg(oldBlock, newBlock *types.Block) error {
// taking care of the proper incremental order.
for i := len(newChain) - 1; i >= 1; i-- {
// Insert the block in the canonical way, re-writing history
- bc.insert(newChain[i])
+ bc.writeHeadBlock(newChain[i])
// Collect reborn logs due to chain reorg
collectLogs(newChain[i].Hash(), false)
- // Write lookup entries for hash based transaction/receipt searches
- rawdb.WriteTxLookupEntries(bc.db, newChain[i])
+ // Collect the new added transactions.
addedTxs = append(addedTxs, newChain[i].Transactions()...)
}
- // When transactions get deleted from the database, the receipts that were
- // created in the fork must also be deleted
- batch := bc.db.NewBatch()
+ // Delete useless indexes right now which includes the non-canonical
+ // transaction indexes, canonical chain indexes which above the head.
+ indexesBatch := bc.db.NewBatch()
for _, tx := range types.TxDifference(deletedTxs, addedTxs) {
- rawdb.DeleteTxLookupEntry(batch, tx.Hash())
+ rawdb.DeleteTxLookupEntry(indexesBatch, tx.Hash())
}
// Delete any canonical number assignments above the new head
number := bc.CurrentBlock().NumberU64()
@@ -2132,53 +2185,27 @@ func (bc *BlockChain) reorg(oldBlock, newBlock *types.Block) error {
if hash == (common.Hash{}) {
break
}
- rawdb.DeleteCanonicalHash(batch, i)
+ rawdb.DeleteCanonicalHash(indexesBatch, i)
+ }
+ if err := indexesBatch.Write(); err != nil {
+ log.Crit("Failed to delete useless indexes", "err", err)
}
- batch.Write()
-
// If any logs need to be fired, do it now. In theory we could avoid creating
// this goroutine if there are no events to fire, but realistcally that only
// ever happens if we're reorging empty blocks, which will only happen on idle
// networks where performance is not an issue either way.
- //
- // TODO(karalabe): Can we get rid of the goroutine somehow to guarantee correct
- // event ordering?
- go func() {
- if len(deletedLogs) > 0 {
- bc.rmLogsFeed.Send(RemovedLogsEvent{deletedLogs})
- }
- if len(rebirthLogs) > 0 {
- bc.logsFeed.Send(rebirthLogs)
- }
- if len(oldChain) > 0 {
- for _, block := range oldChain {
- bc.chainSideFeed.Send(ChainSideEvent{Block: block})
- }
- }
- }()
- return nil
-}
-
-// PostChainEvents iterates over the events generated by a chain insertion and
-// posts them into the event feed.
-// TODO: Should not expose PostChainEvents. The chain events should be posted in WriteBlock.
-func (bc *BlockChain) PostChainEvents(events []interface{}, logs []*types.Log) {
- // post event logs for further processing
- if logs != nil {
- bc.logsFeed.Send(logs)
+ if len(deletedLogs) > 0 {
+ bc.rmLogsFeed.Send(RemovedLogsEvent{mergeLogs(deletedLogs, true)})
}
- for _, event := range events {
- switch ev := event.(type) {
- case ChainEvent:
- bc.chainFeed.Send(ev)
-
- case ChainHeadEvent:
- bc.chainHeadFeed.Send(ev)
-
- case ChainSideEvent:
- bc.chainSideFeed.Send(ev)
+ if len(rebirthLogs) > 0 {
+ bc.logsFeed.Send(mergeLogs(rebirthLogs, false))
+ }
+ if len(oldChain) > 0 {
+ for i := len(oldChain) - 1; i >= 0; i-- {
+ bc.chainSideFeed.Send(ChainSideEvent{Block: oldChain[i]})
}
}
+ return nil
}
func (bc *BlockChain) update() {
diff --git a/core/blockchain_test.go b/core/blockchain_test.go
index c00b5f880e..d64e52b1c6 100644
--- a/core/blockchain_test.go
+++ b/core/blockchain_test.go
@@ -22,6 +22,7 @@ import (
"math/big"
"math/rand"
"os"
+ "reflect"
"sync"
"testing"
"time"
@@ -960,16 +961,20 @@ func TestLogReorgs(t *testing.T) {
}
chain, _ = GenerateChain(params.TestChainConfig, genesis, ethash.NewFaker(), db, 3, func(i int, gen *BlockGen) {})
+ done := make(chan struct{})
+ go func() {
+ ev := <-rmLogsCh
+ if len(ev.Logs) == 0 {
+ t.Error("expected logs")
+ }
+ close(done)
+ }()
if _, err := blockchain.InsertChain(chain); err != nil {
t.Fatalf("failed to insert forked chain: %v", err)
}
-
timeout := time.NewTimer(1 * time.Second)
select {
- case ev := <-rmLogsCh:
- if len(ev.Logs) == 0 {
- t.Error("expected logs")
- }
+ case <-done:
case <-timeout.C:
t.Fatal("Timeout. There is no RemovedLogsEvent has been sent.")
}
@@ -982,39 +987,47 @@ func TestLogRebirth(t *testing.T) {
db = rawdb.NewMemoryDatabase()
// this code generates a log
- code = common.Hex2Bytes("60606040525b7f24ec1d3ff24c2f6ff210738839dbc339cd45a5294d85c79361016243157aae7b60405180905060405180910390a15b600a8060416000396000f360606040526008565b00")
- gspec = &Genesis{Config: params.TestChainConfig, Alloc: GenesisAlloc{addr1: {Balance: big.NewInt(10000000000000)}}}
- genesis = gspec.MustCommit(db)
- signer = types.NewEIP155Signer(gspec.Config.ChainID)
- newLogCh = make(chan bool)
+ code = common.Hex2Bytes("60606040525b7f24ec1d3ff24c2f6ff210738839dbc339cd45a5294d85c79361016243157aae7b60405180905060405180910390a15b600a8060416000396000f360606040526008565b00")
+ gspec = &Genesis{Config: params.TestChainConfig, Alloc: GenesisAlloc{addr1: {Balance: big.NewInt(10000000000000)}}}
+ genesis = gspec.MustCommit(db)
+ signer = types.NewEIP155Signer(gspec.Config.ChainID)
+ newLogCh = make(chan bool)
+ removeLogCh = make(chan bool)
)
- // listenNewLog checks whether the received logs number is equal with expected.
- listenNewLog := func(sink chan []*types.Log, expect int) {
+ // validateLogEvent checks whether the received logs number is equal with expected.
+ validateLogEvent := func(sink interface{}, result chan bool, expect int) {
+ chanval := reflect.ValueOf(sink)
+ chantyp := chanval.Type()
+ if chantyp.Kind() != reflect.Chan || chantyp.ChanDir()&reflect.RecvDir == 0 {
+ t.Fatalf("invalid channel, given type %v", chantyp)
+ }
cnt := 0
+ var recv []reflect.Value
+ timeout := time.After(1 * time.Second)
+ cases := []reflect.SelectCase{{Chan: chanval, Dir: reflect.SelectRecv}, {Chan: reflect.ValueOf(timeout), Dir: reflect.SelectRecv}}
for {
- select {
- case logs := <-sink:
- cnt += len(logs)
- case <-time.NewTimer(5 * time.Second).C:
- // new logs timeout
- newLogCh <- false
+ chose, v, _ := reflect.Select(cases)
+ if chose == 1 {
+ // Not enough event received
+ result <- false
return
}
+ cnt += 1
+ recv = append(recv, v)
if cnt == expect {
break
- } else if cnt > expect {
- // redundant logs received
- newLogCh <- false
- return
}
}
- select {
- case <-sink:
- // redundant logs received
- newLogCh <- false
- case <-time.NewTimer(100 * time.Millisecond).C:
- newLogCh <- true
+ done := time.After(50 * time.Millisecond)
+ cases = cases[:1]
+ cases = append(cases, reflect.SelectCase{Chan: reflect.ValueOf(done), Dir: reflect.SelectRecv})
+ chose, _, _ := reflect.Select(cases)
+ // If chose equal 0, it means receiving redundant events.
+ if chose == 1 {
+ result <- true
+ } else {
+ result <- false
}
}
@@ -1038,12 +1051,12 @@ func TestLogRebirth(t *testing.T) {
})
// Spawn a goroutine to receive log events
- go listenNewLog(logsCh, 1)
+ go validateLogEvent(logsCh, newLogCh, 1)
if _, err := blockchain.InsertChain(chain); err != nil {
t.Fatalf("failed to insert chain: %v", err)
}
if !<-newLogCh {
- t.Fatalf("failed to receive new log event")
+ t.Fatal("failed to receive new log event")
}
// Generate long reorg chain
@@ -1060,40 +1073,31 @@ func TestLogRebirth(t *testing.T) {
})
// Spawn a goroutine to receive log events
- go listenNewLog(logsCh, 1)
+ go validateLogEvent(logsCh, newLogCh, 1)
+ go validateLogEvent(rmLogsCh, removeLogCh, 1)
if _, err := blockchain.InsertChain(forkChain); err != nil {
t.Fatalf("failed to insert forked chain: %v", err)
}
if !<-newLogCh {
- t.Fatalf("failed to receive new log event")
+ t.Fatal("failed to receive new log event")
}
- // Ensure removedLog events received
- select {
- case ev := <-rmLogsCh:
- if len(ev.Logs) == 0 {
- t.Error("expected logs")
- }
- case <-time.NewTimer(1 * time.Second).C:
- t.Fatal("Timeout. There is no RemovedLogsEvent has been sent.")
+ if !<-removeLogCh {
+ t.Fatal("failed to receive removed log event")
}
newBlocks, _ := GenerateChain(params.TestChainConfig, chain[len(chain)-1], ethash.NewFaker(), db, 1, func(i int, gen *BlockGen) {})
- go listenNewLog(logsCh, 1)
+ go validateLogEvent(logsCh, newLogCh, 1)
+ go validateLogEvent(rmLogsCh, removeLogCh, 1)
if _, err := blockchain.InsertChain(newBlocks); err != nil {
t.Fatalf("failed to insert forked chain: %v", err)
}
- // Ensure removedLog events received
- select {
- case ev := <-rmLogsCh:
- if len(ev.Logs) == 0 {
- t.Error("expected logs")
- }
- case <-time.NewTimer(1 * time.Second).C:
- t.Fatal("Timeout. There is no RemovedLogsEvent has been sent.")
- }
// Rebirth logs should omit a newLogEvent
if !<-newLogCh {
- t.Fatalf("failed to receive new log event")
+ t.Fatal("failed to receive new log event")
+ }
+ // Ensure removedLog events received
+ if !<-removeLogCh {
+ t.Fatal("failed to receive removed log event")
}
}
@@ -1145,7 +1149,6 @@ func TestSideLogRebirth(t *testing.T) {
logsCh := make(chan []*types.Log)
blockchain.SubscribeLogsEvent(logsCh)
-
chain, _ := GenerateChain(params.TestChainConfig, genesis, ethash.NewFaker(), db, 2, func(i int, gen *BlockGen) {
if i == 1 {
// Higher block difficulty
diff --git a/core/chain_indexer_test.go b/core/chain_indexer_test.go
index abf5b3cc14..ff7548e7bd 100644
--- a/core/chain_indexer_test.go
+++ b/core/chain_indexer_test.go
@@ -203,7 +203,7 @@ func (b *testChainIndexBackend) assertBlocks(headNum, failNum uint64) (uint64, b
}
func (b *testChainIndexBackend) reorg(headNum uint64) uint64 {
- firstChanged := headNum / b.indexer.sectionSize
+ firstChanged := (headNum + 1) / b.indexer.sectionSize
if firstChanged < b.stored {
b.stored = firstChanged
}
diff --git a/core/evm.go b/core/evm.go
index ded12c447b..5ea6ed3e46 100644
--- a/core/evm.go
+++ b/core/evm.go
@@ -86,24 +86,32 @@ func NewMultitenancyAwareEVMContext(ctx context.Context, evmCtx vm.Context) vm.C
// GetHashFn returns a GetHashFunc which retrieves header hashes by number
func GetHashFn(ref *types.Header, chain ChainContext) func(n uint64) common.Hash {
- var cache map[uint64]common.Hash
+ // Cache will initially contain [refHash.parent],
+ // Then fill up with [refHash.p, refHash.pp, refHash.ppp, ...]
+ var cache []common.Hash
return func(n uint64) common.Hash {
// If there's no hash cache yet, make one
- if cache == nil {
- cache = map[uint64]common.Hash{
- ref.Number.Uint64() - 1: ref.ParentHash,
- }
+ if len(cache) == 0 {
+ cache = append(cache, ref.ParentHash)
}
- // Try to fulfill the request from the cache
- if hash, ok := cache[n]; ok {
- return hash
+ if idx := ref.Number.Uint64() - n - 1; idx < uint64(len(cache)) {
+ return cache[idx]
}
- // Not cached, iterate the blocks and cache the hashes
- for header := chain.GetHeader(ref.ParentHash, ref.Number.Uint64()-1); header != nil; header = chain.GetHeader(header.ParentHash, header.Number.Uint64()-1) {
- cache[header.Number.Uint64()-1] = header.ParentHash
- if n == header.Number.Uint64()-1 {
- return header.ParentHash
+ // No luck in the cache, but we can start iterating from the last element we already know
+ lastKnownHash := cache[len(cache)-1]
+ lastKnownNumber := ref.Number.Uint64() - uint64(len(cache))
+
+ for {
+ header := chain.GetHeader(lastKnownHash, lastKnownNumber)
+ if header == nil {
+ break
+ }
+ cache = append(cache, header.ParentHash)
+ lastKnownHash = header.ParentHash
+ lastKnownNumber = header.Number.Uint64() - 1
+ if n == lastKnownNumber {
+ return lastKnownHash
}
}
return common.Hash{}
diff --git a/core/forkid/forkid_test.go b/core/forkid/forkid_test.go
index ee201ae9ae..f3364c3d69 100644
--- a/core/forkid/forkid_test.go
+++ b/core/forkid/forkid_test.go
@@ -57,8 +57,10 @@ func TestCreation(t *testing.T) {
{7279999, ID{Hash: checksumToBytes(0xa00bc324), Next: 7280000}}, // Last Byzantium block
{7280000, ID{Hash: checksumToBytes(0x668db0af), Next: 9069000}}, // First and last Constantinople, first Petersburg block
{9068999, ID{Hash: checksumToBytes(0x668db0af), Next: 9069000}}, // Last Petersburg block
- {9069000, ID{Hash: checksumToBytes(0x879d6e30), Next: 0}}, // Today Istanbul block
- {10000000, ID{Hash: checksumToBytes(0x879d6e30), Next: 0}}, // Future Istanbul block
+ {9069000, ID{Hash: checksumToBytes(0x879d6e30), Next: 9200000}}, // First Istanbul and first Muir Glacier block
+ {9199999, ID{Hash: checksumToBytes(0x879d6e30), Next: 9200000}}, // Last Istanbul and first Muir Glacier block
+ {9200000, ID{Hash: checksumToBytes(0xe029e991), Next: 0}}, // First Muir Glacier block
+ {10000000, ID{Hash: checksumToBytes(0xe029e991), Next: 0}}, // Future Muir Glacier block
},
},
// Ropsten test cases
@@ -76,8 +78,10 @@ func TestCreation(t *testing.T) {
{4939393, ID{Hash: checksumToBytes(0x97b544f3), Next: 4939394}}, // Last Constantinople block
{4939394, ID{Hash: checksumToBytes(0xd6e2149b), Next: 6485846}}, // First Petersburg block
{6485845, ID{Hash: checksumToBytes(0xd6e2149b), Next: 6485846}}, // Last Petersburg block
- {6485846, ID{Hash: checksumToBytes(0x4bc66396), Next: 0}}, // First Istanbul block
- {7500000, ID{Hash: checksumToBytes(0x4bc66396), Next: 0}}, // Future Istanbul block
+ {6485846, ID{Hash: checksumToBytes(0x4bc66396), Next: 7117117}}, // First Istanbul block
+ {7117116, ID{Hash: checksumToBytes(0x4bc66396), Next: 7117117}}, // Last Istanbul block
+ {7117117, ID{Hash: checksumToBytes(0x6727ef90), Next: 0}}, // First Muir Glacier block
+ {7500000, ID{Hash: checksumToBytes(0x6727ef90), Next: 0}}, // Future
},
},
// Rinkeby test cases
@@ -181,11 +185,11 @@ func TestValidation(t *testing.T) {
// Local is mainnet Petersburg, remote is Rinkeby Petersburg.
{7987396, ID{Hash: checksumToBytes(0xafec6b27), Next: 0}, ErrLocalIncompatibleOrStale},
- // Local is mainnet Istanbul, far in the future. Remote announces Gopherium (non existing fork)
+ // Local is mainnet Muir Glacier, far in the future. Remote announces Gopherium (non existing fork)
// at some future block 88888888, for itself, but past block for local. Local is incompatible.
//
// This case detects non-upgraded nodes with majority hash power (typical Ropsten mess).
- {88888888, ID{Hash: checksumToBytes(0x879d6e30), Next: 88888888}, ErrLocalIncompatibleOrStale},
+ {88888888, ID{Hash: checksumToBytes(0xe029e991), Next: 88888888}, ErrLocalIncompatibleOrStale},
// Local is mainnet Byzantium. Remote is also in Byzantium, but announces Gopherium (non existing
// fork) at block 7279999, before Petersburg. Local is incompatible.
diff --git a/core/genesis.go b/core/genesis.go
index c8ed9e3c49..bc5a3e66f2 100644
--- a/core/genesis.go
+++ b/core/genesis.go
@@ -152,10 +152,10 @@ func (e *GenesisMismatchError) Error() string {
//
// The returned chain configuration is never nil.
func SetupGenesisBlock(db ethdb.Database, genesis *Genesis) (*params.ChainConfig, common.Hash, error) {
- return SetupGenesisBlockWithOverride(db, genesis, nil)
+ return SetupGenesisBlockWithOverride(db, genesis, nil, nil)
}
-func SetupGenesisBlockWithOverride(db ethdb.Database, genesis *Genesis, overrideIstanbul *big.Int) (*params.ChainConfig, common.Hash, error) {
+func SetupGenesisBlockWithOverride(db ethdb.Database, genesis *Genesis, overrideIstanbul, overrideMuirGlacier *big.Int) (*params.ChainConfig, common.Hash, error) {
if genesis != nil && genesis.Config == nil {
return params.AllEthashProtocolChanges, common.Hash{}, errGenesisNoConfig
}
@@ -222,6 +222,9 @@ func SetupGenesisBlockWithOverride(db ethdb.Database, genesis *Genesis, override
if overrideIstanbul != nil {
newcfg.IstanbulBlock = overrideIstanbul
}
+ if overrideMuirGlacier != nil {
+ newcfg.MuirGlacierBlock = overrideMuirGlacier
+ }
if err := newcfg.CheckConfigForkOrder(); err != nil {
return newcfg, common.Hash{}, err
}
@@ -407,8 +410,7 @@ func DefaultGoerliGenesisBlock() *Genesis {
}
}
-// DeveloperGenesisBlock returns the 'geth --dev' genesis block. Note, this must
-// be seeded with the
+// DeveloperGenesisBlock returns the 'geth --dev' genesis block.
func DeveloperGenesisBlock(period uint64, faucet common.Address) *Genesis {
// Override the default period to the user requested one
config := *params.AllCliqueProtocolChanges
diff --git a/core/headerchain.go b/core/headerchain.go
index 4682069cff..f21dcf537e 100644
--- a/core/headerchain.go
+++ b/core/headerchain.go
@@ -45,6 +45,14 @@ const (
// HeaderChain implements the basic block header chain logic that is shared by
// core.BlockChain and light.LightChain. It is not usable in itself, only as
// a part of either structure.
+//
+// HeaderChain is responsible for maintaining the header chain including the
+// header query and updating.
+//
+// The components maintained by headerchain includes: (1) total difficult
+// (2) header (3) block hash -> number mapping (4) canonical number -> hash mapping
+// and (5) head header flag.
+//
// It is not thread safe either, the encapsulating chain structures should do
// the necessary mutex locking/unlocking.
type HeaderChain struct {
@@ -66,10 +74,8 @@ type HeaderChain struct {
engine consensus.Engine
}
-// NewHeaderChain creates a new HeaderChain structure.
-// getValidator should return the parent's validator
-// procInterrupt points to the parent's interrupt semaphore
-// wg points to the parent's shutdown wait group
+// NewHeaderChain creates a new HeaderChain structure. ProcInterrupt points
+// to the parent's interrupt semaphore.
func NewHeaderChain(chainDb ethdb.Database, config *params.ChainConfig, engine consensus.Engine, procInterrupt func() bool) (*HeaderChain, error) {
headerCache, _ := lru.New(headerCacheLimit)
tdCache, _ := lru.New(tdCacheLimit)
@@ -147,25 +153,33 @@ func (hc *HeaderChain) WriteHeader(header *types.Header) (status WriteStatus, er
externTd := new(big.Int).Add(header.Difficulty, ptd)
// Irrelevant of the canonical status, write the td and header to the database
- if err := hc.WriteTd(hash, number, externTd); err != nil {
- log.Crit("Failed to write header total difficulty", "err", err)
+ //
+ // Note all the components of header(td, hash->number index and header) should
+ // be written atomically.
+ headerBatch := hc.chainDb.NewBatch()
+ rawdb.WriteTd(headerBatch, hash, number, externTd)
+ rawdb.WriteHeader(headerBatch, header)
+ if err := headerBatch.Write(); err != nil {
+ log.Crit("Failed to write header into disk", "err", err)
}
- rawdb.WriteHeader(hc.chainDb, header)
-
// If the total difficulty is higher than our known, add it to the canonical chain
// Second clause in the if statement reduces the vulnerability to selfish mining.
// Please refer to http://www.cs.cornell.edu/~ie53/publications/btcProcFC.pdf
if externTd.Cmp(localTd) > 0 || (externTd.Cmp(localTd) == 0 && mrand.Float64() < 0.5) {
+ // If the header can be added into canonical chain, adjust the
+ // header chain markers(canonical indexes and head header flag).
+ //
+ // Note all markers should be written atomically.
+
// Delete any canonical number assignments above the new head
- batch := hc.chainDb.NewBatch()
+ markerBatch := hc.chainDb.NewBatch()
for i := number + 1; ; i++ {
hash := rawdb.ReadCanonicalHash(hc.chainDb, i)
if hash == (common.Hash{}) {
break
}
- rawdb.DeleteCanonicalHash(batch, i)
+ rawdb.DeleteCanonicalHash(markerBatch, i)
}
- batch.Write()
// Overwrite any stale canonical number assignments
var (
@@ -174,16 +188,19 @@ func (hc *HeaderChain) WriteHeader(header *types.Header) (status WriteStatus, er
headHeader = hc.GetHeader(headHash, headNumber)
)
for rawdb.ReadCanonicalHash(hc.chainDb, headNumber) != headHash {
- rawdb.WriteCanonicalHash(hc.chainDb, headHash, headNumber)
+ rawdb.WriteCanonicalHash(markerBatch, headHash, headNumber)
headHash = headHeader.ParentHash
headNumber = headHeader.Number.Uint64() - 1
headHeader = hc.GetHeader(headHash, headNumber)
}
// Extend the canonical chain with the new header
- rawdb.WriteCanonicalHash(hc.chainDb, hash, number)
- rawdb.WriteHeadHeaderHash(hc.chainDb, hash)
-
+ rawdb.WriteCanonicalHash(markerBatch, hash, number)
+ rawdb.WriteHeadHeaderHash(markerBatch, hash)
+ if err := markerBatch.Write(); err != nil {
+ log.Crit("Failed to write header markers into disk", "err", err)
+ }
+ // Last step update all in-memory head header markers
hc.currentHeaderHash = hash
hc.currentHeader.Store(types.CopyHeader(header))
headHeaderGauge.Update(header.Number.Int64())
@@ -192,9 +209,9 @@ func (hc *HeaderChain) WriteHeader(header *types.Header) (status WriteStatus, er
} else {
status = SideStatTy
}
+ hc.tdCache.Add(hash, externTd)
hc.headerCache.Add(hash, header)
hc.numberCache.Add(hash, number)
-
return
}
@@ -396,14 +413,6 @@ func (hc *HeaderChain) GetTdByHash(hash common.Hash) *big.Int {
return hc.GetTd(hash, *number)
}
-// WriteTd stores a block's total difficulty into the database, also caching it
-// along the way.
-func (hc *HeaderChain) WriteTd(hash common.Hash, number uint64, td *big.Int) error {
- rawdb.WriteTd(hc.chainDb, hash, number, td)
- hc.tdCache.Add(hash, new(big.Int).Set(td))
- return nil
-}
-
// GetHeader retrieves a block header from the database by hash and number,
// caching it if found.
func (hc *HeaderChain) GetHeader(hash common.Hash, number uint64) *types.Header {
@@ -431,6 +440,8 @@ func (hc *HeaderChain) GetHeaderByHash(hash common.Hash) *types.Header {
}
// HasHeader checks if a block header is present in the database or not.
+// In theory, if header is present in the database, all relative components
+// like td and hash->number should be present too.
func (hc *HeaderChain) HasHeader(hash common.Hash, number uint64) bool {
if hc.numberCache.Contains(hash) || hc.headerCache.Contains(hash) {
return true
@@ -458,10 +469,9 @@ func (hc *HeaderChain) CurrentHeader() *types.Header {
return hc.currentHeader.Load().(*types.Header)
}
-// SetCurrentHeader sets the current head header of the canonical chain.
+// SetCurrentHeader sets the in-memory head header marker of the canonical chan
+// as the given header.
func (hc *HeaderChain) SetCurrentHeader(head *types.Header) {
- rawdb.WriteHeadHeaderHash(hc.chainDb, head.Hash())
-
hc.currentHeader.Store(head)
hc.currentHeaderHash = head.Hash()
headHeaderGauge.Update(head.Number.Int64())
@@ -500,11 +510,18 @@ func (hc *HeaderChain) SetHead(head uint64, updateFn UpdateHeadBlocksCallback, d
// first then remove the relative data from the database.
//
// Update head first(head fast block, head full block) before deleting the data.
+ markerBatch := hc.chainDb.NewBatch()
if updateFn != nil {
- updateFn(hc.chainDb, parent)
+ updateFn(markerBatch, parent)
}
// Update head header then.
- rawdb.WriteHeadHeaderHash(hc.chainDb, parentHash)
+ rawdb.WriteHeadHeaderHash(markerBatch, parentHash)
+ if err := markerBatch.Write(); err != nil {
+ log.Crit("Failed to update chain markers", "error", err)
+ }
+ hc.currentHeader.Store(parent)
+ hc.currentHeaderHash = parentHash
+ headHeaderGauge.Update(parent.Number.Int64())
// Remove the relative data from the database.
if delFn != nil {
@@ -514,13 +531,11 @@ func (hc *HeaderChain) SetHead(head uint64, updateFn UpdateHeadBlocksCallback, d
rawdb.DeleteHeader(batch, hash, num)
rawdb.DeleteTd(batch, hash, num)
rawdb.DeleteCanonicalHash(batch, num)
-
- hc.currentHeader.Store(parent)
- hc.currentHeaderHash = parentHash
- headHeaderGauge.Update(parent.Number.Int64())
}
- batch.Write()
-
+ // Flush all accumulated deletions.
+ if err := batch.Write(); err != nil {
+ log.Crit("Failed to rewind block", "error", err)
+ }
// Clear out any stale content from the caches
hc.headerCache.Purge()
hc.tdCache.Purge()
diff --git a/core/rawdb/database.go b/core/rawdb/database.go
index 353b7dce62..838c084359 100644
--- a/core/rawdb/database.go
+++ b/core/rawdb/database.go
@@ -150,11 +150,10 @@ func NewDatabaseWithFreezer(db ethdb.KeyValueStore, freezer string, namespace st
}
// Database contains only older data than the freezer, this happens if the
// state was wiped and reinited from an existing freezer.
- } else {
- // Key-value store continues where the freezer left off, all is fine. We might
- // have duplicate blocks (crash after freezer write but before kay-value store
- // deletion, but that's fine).
}
+ // Otherwise, key-value store continues where the freezer left off, all is fine.
+ // We might have duplicate blocks (crash after freezer write but before key-value
+ // store deletion, but that's fine).
} else {
// If the freezer is empty, ensure nothing was moved yet from the key-value
// store, otherwise we'll end up missing data. We check block #1 to decide
@@ -167,9 +166,9 @@ func NewDatabaseWithFreezer(db ethdb.KeyValueStore, freezer string, namespace st
return nil, errors.New("ancient chain segments already extracted, please set --datadir.ancient to the correct path")
}
// Block #1 is still in the database, we're allowed to init a new feezer
- } else {
- // The head header is still the genesis, we're allowed to init a new feezer
}
+ // Otherwise, the head header is still the genesis, we're allowed to init a new
+ // feezer.
}
}
// Freezer is consistent with the key-value database, permit combining the two
diff --git a/core/rawdb/freezer_reinit.go b/core/rawdb/freezer_reinit.go
index ea4dd33d1d..d6bf9ab1dd 100644
--- a/core/rawdb/freezer_reinit.go
+++ b/core/rawdb/freezer_reinit.go
@@ -55,10 +55,10 @@ func InitDatabaseFromFreezer(db ethdb.Database) error {
if n >= frozen {
return
}
- // Retrieve the block from the freezer (no need for the hash, we pull by
- // number from the freezer). If successful, pre-cache the block hash and
- // the individual transaction hashes for storing into the database.
- block := ReadBlock(db, common.Hash{}, n)
+ // Retrieve the block from the freezer. If successful, pre-cache
+ // the block hash and the individual transaction hashes for storing
+ // into the database.
+ block := ReadBlock(db, ReadCanonicalHash(db, n), n)
if block != nil {
block.Hash()
for _, tx := range block.Transactions() {
diff --git a/core/state/state_object.go b/core/state/state_object.go
index c4bb6ee84e..d0c1382ae5 100644
--- a/core/state/state_object.go
+++ b/core/state/state_object.go
@@ -288,10 +288,13 @@ func (s *stateObject) finalise() {
}
// updateTrie writes cached storage modifications into the object's storage trie.
+// It will return nil if the trie has not been loaded and no changes have been made
func (s *stateObject) updateTrie(db Database) Trie {
// Make sure all dirty slots are finalized into the pending storage area
s.finalise()
-
+ if len(s.pendingStorage) == 0 {
+ return s.trie
+ }
// Track the amount of time wasted on updating the storge trie
if metrics.EnabledExpensive {
defer func(start time.Time) { s.db.StorageUpdates += time.Since(start) }(time.Now())
@@ -321,8 +324,10 @@ func (s *stateObject) updateTrie(db Database) Trie {
// UpdateRoot sets the trie root to the current root hash of
func (s *stateObject) updateRoot(db Database) {
- s.updateTrie(db)
-
+ // If nothing changed, don't bother with hashing anything
+ if s.updateTrie(db) == nil {
+ return
+ }
// Track the amount of time wasted on hashing the storge trie
if metrics.EnabledExpensive {
defer func(start time.Time) { s.db.StorageHashes += time.Since(start) }(time.Now())
@@ -333,7 +338,10 @@ func (s *stateObject) updateRoot(db Database) {
// CommitTrie the storage trie of the object to db.
// This updates the trie root.
func (s *stateObject) CommitTrie(db Database) error {
- s.updateTrie(db)
+ // If nothing changed, don't bother with hashing anything
+ if s.updateTrie(db) == nil {
+ return nil
+ }
if s.dbErr != nil {
return s.dbErr
}
diff --git a/core/state/statedb.go b/core/state/statedb.go
index cd3e44996f..ede470f0ed 100644
--- a/core/state/statedb.go
+++ b/core/state/statedb.go
@@ -217,7 +217,7 @@ func (s *StateDB) AddRefund(gas uint64) {
func (s *StateDB) SubRefund(gas uint64) {
s.journal.append(refundChange{prev: s.refund})
if gas > s.refund {
- panic("Refund counter below zero")
+ panic(fmt.Sprintf("Refund counter below zero (gas: %d > refund: %d)", gas, s.refund))
}
s.refund -= gas
}
@@ -382,7 +382,8 @@ func (s *StateDB) StorageTrie(addr common.Address) Trie {
return nil
}
cpy := stateObject.deepCopy(s)
- return cpy.updateTrie(s.db)
+ cpy.updateTrie(s.db)
+ return cpy.getTrie(s.db)
}
func (s *StateDB) HasSuicided(addr common.Address) bool {
@@ -856,9 +857,8 @@ func (s *StateDB) Commit(deleteEmptyObjects bool) (common.Hash, error) {
if metrics.EnabledExpensive {
defer func(start time.Time) { s.AccountCommits += time.Since(start) }(time.Now())
}
-
+ var account Account
root, err := s.trie.Commit(func(leaf []byte, parent common.Hash) error {
- var account Account
if err := rlp.DecodeBytes(leaf, &account); err != nil {
return nil
}
diff --git a/core/tx_list.go b/core/tx_list.go
index 75bfdaedac..6b22cbbebe 100644
--- a/core/tx_list.go
+++ b/core/tx_list.go
@@ -494,11 +494,11 @@ func (l *txPricedList) Underpriced(tx *types.Transaction, local *accountSet) boo
// Discard finds a number of most underpriced transactions, removes them from the
// priced list and returns them for further removal from the entire pool.
-func (l *txPricedList) Discard(count int, local *accountSet) types.Transactions {
- drop := make(types.Transactions, 0, count) // Remote underpriced transactions to drop
+func (l *txPricedList) Discard(slots int, local *accountSet) types.Transactions {
+ drop := make(types.Transactions, 0, slots) // Remote underpriced transactions to drop
save := make(types.Transactions, 0, 64) // Local underpriced transactions to keep
- for len(*l.items) > 0 && count > 0 {
+ for len(*l.items) > 0 && slots > 0 {
// Discard stale transactions if found during cleanup
tx := heap.Pop(l.items).(*types.Transaction)
if l.all.Get(tx.Hash()) == nil {
@@ -510,7 +510,7 @@ func (l *txPricedList) Discard(count int, local *accountSet) types.Transactions
save = append(save, tx)
} else {
drop = append(drop, tx)
- count--
+ slots -= numSlots(tx)
}
}
for _, tx := range save {
diff --git a/core/tx_pool.go b/core/tx_pool.go
index a1a27c3acf..f7ddcdbbac 100644
--- a/core/tx_pool.go
+++ b/core/tx_pool.go
@@ -18,7 +18,6 @@ package core
import (
"errors"
- "fmt"
"math"
"math/big"
"sort"
@@ -39,9 +38,26 @@ import (
const (
// chainHeadChanSize is the size of channel listening to ChainHeadEvent.
chainHeadChanSize = 10
+
+ // txSlotSize is used to calculate how many data slots a single transaction
+ // takes up based on its size. The slots are used as DoS protection, ensuring
+ // that validating a new transaction remains a constant operation (in reality
+ // O(maxslots), where max slots are 4 currently).
+ txSlotSize = 32 * 1024
+
+ // txMaxSize is the maximum size a single transaction can have. This field has
+ // non-trivial consequences: larger transactions are significantly harder and
+ // more expensive to propagate; larger transactions also take more resources
+ // to validate whether they fit into the pool or not.
+ txMaxSize = 2 * txSlotSize // 64KB, don't bump without EIP-2464 support
+ // Quorum - value above is not used. instead, ChainConfig.TransactionSizeLimit is used
)
var (
+ // ErrAlreadyKnown is returned if the transactions is already contained
+ // within the pool.
+ ErrAlreadyKnown = errors.New("already known")
+
// ErrInvalidSender is returned if the transaction contains an invalid signature.
ErrInvalidSender = errors.New("invalid sender")
@@ -112,6 +128,7 @@ var (
pendingGauge = metrics.NewRegisteredGauge("txpool/pending", nil)
queuedGauge = metrics.NewRegisteredGauge("txpool/queued", nil)
localGauge = metrics.NewRegisteredGauge("txpool/local", nil)
+ slotsGauge = metrics.NewRegisteredGauge("txpool/slots", nil)
)
// TxStatus is the current status of a transaction as seen by the pool.
@@ -604,7 +621,7 @@ func (pool *TxPool) add(tx *types.Transaction, local bool) (replaced bool, err e
if pool.all.Get(hash) != nil {
log.Trace("Discarding already known transaction", "hash", hash)
knownTxMeter.Mark(1)
- return false, fmt.Errorf("known transaction: %x", hash)
+ return false, ErrAlreadyKnown
}
// If the transaction fails basic validation, discard it
if err := pool.validateTx(tx, local); err != nil {
@@ -621,7 +638,7 @@ func (pool *TxPool) add(tx *types.Transaction, local bool) (replaced bool, err e
return false, ErrUnderpriced
}
// New transaction is better than our worse ones, make room for it
- drop := pool.priced.Discard(pool.all.Count()-int(pool.config.GlobalSlots+pool.config.GlobalQueue-1), pool.locals)
+ drop := pool.priced.Discard(pool.all.Slots()-int(pool.config.GlobalSlots+pool.config.GlobalQueue)+numSlots(tx), pool.locals)
for _, tx := range drop {
log.Trace("Discarding freshly underpriced transaction", "hash", tx.Hash(), "price", tx.GasPrice())
underpricedTxMeter.Mark(1)
@@ -811,7 +828,7 @@ func (pool *TxPool) addTxs(txs []*types.Transaction, local, sync bool) []error {
for i, tx := range txs {
// If the transaction is known, pre-set the error slot
if pool.all.Get(tx.Hash()) != nil {
- errs[i] = fmt.Errorf("known transaction: %x", tx.Hash())
+ errs[i] = ErrAlreadyKnown
knownTxMeter.Mark(1)
continue
}
@@ -889,6 +906,12 @@ func (pool *TxPool) Get(hash common.Hash) *types.Transaction {
return pool.all.Get(hash)
}
+// Has returns an indicator whether txpool has a transaction cached with the
+// given hash.
+func (pool *TxPool) Has(hash common.Hash) bool {
+ return pool.all.Get(hash) != nil
+}
+
// removeTx removes a single transaction from the queue, moving all subsequent
// transactions back to the future queue.
func (pool *TxPool) removeTx(hash common.Hash, outofbound bool) {
@@ -1540,8 +1563,9 @@ func (as *accountSet) merge(other *accountSet) {
// peeking into the pool in TxPool.Get without having to acquire the widely scoped
// TxPool.mu mutex.
type txLookup struct {
- all map[common.Hash]*types.Transaction
- lock sync.RWMutex
+ all map[common.Hash]*types.Transaction
+ slots int
+ lock sync.RWMutex
}
// newTxLookup returns a new txLookup structure.
@@ -1579,11 +1603,22 @@ func (t *txLookup) Count() int {
return len(t.all)
}
+// Slots returns the current number of slots used in the lookup.
+func (t *txLookup) Slots() int {
+ t.lock.RLock()
+ defer t.lock.RUnlock()
+
+ return t.slots
+}
+
// Add adds a transaction to the lookup.
func (t *txLookup) Add(tx *types.Transaction) {
t.lock.Lock()
defer t.lock.Unlock()
+ t.slots += numSlots(tx)
+ slotsGauge.Update(int64(t.slots))
+
t.all[tx.Hash()] = tx
}
@@ -1592,6 +1627,9 @@ func (t *txLookup) Remove(hash common.Hash) {
t.lock.Lock()
defer t.lock.Unlock()
+ t.slots -= numSlots(t.all[hash])
+ slotsGauge.Update(int64(t.slots))
+
delete(t.all, hash)
}
@@ -1599,3 +1637,8 @@ func (t *txLookup) Remove(hash common.Hash) {
func GetChainHeadChannleSize() int {
return chainHeadChanSize
}
+
+// numSlots calculates the number of slots needed for a single transaction.
+func numSlots(tx *types.Transaction) int {
+ return int((tx.Size() + txSlotSize - 1) / txSlotSize)
+}
diff --git a/core/tx_pool_test.go b/core/tx_pool_test.go
index 176dcd5a66..9efb414617 100644
--- a/core/tx_pool_test.go
+++ b/core/tx_pool_test.go
@@ -79,25 +79,30 @@ func pricedTransaction(nonce uint64, gaslimit uint64, gasprice *big.Int, key *ec
return tx
}
+func pricedDataTransaction(nonce uint64, gaslimit uint64, gasprice *big.Int, key *ecdsa.PrivateKey, bytes uint64) *types.Transaction {
+ data := make([]byte, bytes)
+ rand.Read(data)
+
+ tx, _ := types.SignTx(types.NewTransaction(nonce, common.Address{}, big.NewInt(0), gaslimit, gasprice, data), types.HomesteadSigner{}, key)
+ return tx
+}
+
func setupTxPool() (*TxPool, *ecdsa.PrivateKey) {
+ return setupTxPoolWithConfig(params.TestChainConfig)
+}
+
+func setupTxPoolWithConfig(config *params.ChainConfig) (*TxPool, *ecdsa.PrivateKey) {
statedb, _ := state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()))
- blockchain := &testBlockChain{statedb, statedb, 1000000, new(event.Feed)}
+ blockchain := &testBlockChain{statedb, statedb, 10000000, new(event.Feed)}
key, _ := crypto.GenerateKey()
- pool := NewTxPool(testTxPoolConfig, params.TestChainConfig, blockchain)
+ pool := NewTxPool(testTxPoolConfig, config, blockchain)
return pool, key
}
func setupQuorumTxPool() (*TxPool, *ecdsa.PrivateKey) {
- db := rawdb.NewMemoryDatabase()
- statedb, _ := state.New(common.Hash{}, state.NewDatabase(db))
- blockchain := &testBlockChain{statedb, statedb, 1000000, new(event.Feed)}
-
- key, _ := crypto.GenerateKey()
- pool := NewTxPool(testTxPoolConfig, params.QuorumTestChainConfig, blockchain)
-
- return pool, key
+ return setupTxPoolWithConfig(params.QuorumTestChainConfig)
}
// validateTxPoolInternals checks various consistency invariants within the pool.
@@ -581,7 +586,7 @@ func TestTransactionDropping(t *testing.T) {
pool, key := setupTxPool()
defer pool.Stop()
- account, _ := deriveSender(transaction(0, 0, key))
+ account := crypto.PubkeyToAddress(key.PublicKey)
pool.currentState.AddBalance(account, big.NewInt(1000))
// Add some pending and some queued transactions
@@ -790,7 +795,7 @@ func TestTransactionGapFilling(t *testing.T) {
pool, key := setupTxPool()
defer pool.Stop()
- account, _ := deriveSender(transaction(0, 0, key))
+ account := crypto.PubkeyToAddress(key.PublicKey)
pool.currentState.AddBalance(account, big.NewInt(1000000))
// Keep track of transaction events to ensure all executables get announced
@@ -844,7 +849,7 @@ func TestTransactionQueueAccountLimiting(t *testing.T) {
pool, key := setupTxPool()
defer pool.Stop()
- account, _ := deriveSender(transaction(0, 0, key))
+ account := crypto.PubkeyToAddress(key.PublicKey)
pool.currentState.AddBalance(account, big.NewInt(1000000))
// Keep queuing up transactions and make sure all above a limit are dropped
@@ -1041,7 +1046,7 @@ func TestTransactionPendingLimiting(t *testing.T) {
pool, key := setupTxPool()
defer pool.Stop()
- account, _ := deriveSender(transaction(0, 0, key))
+ account := crypto.PubkeyToAddress(key.PublicKey)
pool.currentState.AddBalance(account, big.NewInt(1000000))
// Keep track of transaction events to ensure all executables get announced
@@ -1120,6 +1125,63 @@ func TestTransactionPendingGlobalLimiting(t *testing.T) {
}
}
+// Test the limit on transaction size is enforced correctly.
+// This test verifies every transaction having allowed size
+// is added to the pool, and longer transactions are rejected.
+func TestTransactionAllowedTxSize(t *testing.T) {
+ t.Parallel()
+
+ // Create a test account and fund it
+ pool, key := setupQuorumTxPool()
+ defer pool.Stop()
+
+ account := crypto.PubkeyToAddress(key.PublicKey)
+ pool.currentState.AddBalance(account, big.NewInt(1000000000))
+
+ // Compute maximal data size for transactions (lower bound).
+ //
+ // It is assumed the fields in the transaction (except of the data) are:
+ // - nonce <= 32 bytes
+ // - gasPrice <= 32 bytes
+ // - gasLimit <= 32 bytes
+ // - recipient == 20 bytes
+ // - value <= 32 bytes
+ // - signature == 65 bytes
+ // All those fields are summed up to at most 213 bytes.
+ baseSize := uint64(213)
+ dataSize := txMaxSize - baseSize
+
+ // Try adding a transaction with maximal allowed size
+ gasPrice := big.NewInt(0)
+ tx := pricedDataTransaction(0, pool.currentMaxGas, gasPrice, key, dataSize)
+ if err := pool.addRemoteSync(tx); err != nil {
+ t.Fatalf("failed to add transaction of size %d, close to maximal: %v", int(tx.Size()), err)
+ }
+ // Try adding a transaction with random allowed size
+ if err := pool.addRemoteSync(pricedDataTransaction(1, pool.currentMaxGas, gasPrice, key, uint64(rand.Intn(int(dataSize))))); err != nil {
+ t.Fatalf("failed to add transaction of random allowed size: %v", err)
+ }
+ // Try adding a transaction of minimal not allowed size
+ if err := pool.addRemoteSync(pricedDataTransaction(2, pool.currentMaxGas, gasPrice, key, txMaxSize)); err == nil {
+ t.Fatalf("expected rejection on slightly oversize transaction")
+ }
+ // Try adding a transaction of random not allowed size
+ if err := pool.addRemoteSync(pricedDataTransaction(2, pool.currentMaxGas, gasPrice, key, dataSize+1+uint64(rand.Intn(int(10*txMaxSize))))); err == nil {
+ t.Fatalf("expected rejection on oversize transaction")
+ }
+ // Run some sanity checks on the pool internals
+ pending, queued := pool.Stats()
+ if pending != 2 {
+ t.Fatalf("pending transactions mismatched: have %d, want %d", pending, 2)
+ }
+ if queued != 0 {
+ t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 0)
+ }
+ if err := validateTxPoolInternals(pool); err != nil {
+ t.Fatalf("pool internal state corrupted: %v", err)
+ }
+}
+
// Tests that if transactions start being capped, transactions are also removed from 'all'
func TestTransactionCapClearsFromAll(t *testing.T) {
t.Parallel()
@@ -1870,6 +1932,24 @@ func TestTransactionStatusCheck(t *testing.T) {
}
}
+// Test the transaction slots consumption is computed correctly
+func TestTransactionSlotCount(t *testing.T) {
+ t.Parallel()
+
+ key, _ := crypto.GenerateKey()
+
+ // Check that an empty transaction consumes a single slot
+ smallTx := pricedDataTransaction(0, 0, big.NewInt(0), key, 0)
+ if slots := numSlots(smallTx); slots != 1 {
+ t.Fatalf("small transactions slot count mismatch: have %d want %d", slots, 1)
+ }
+ // Check that a large transaction consumes the correct number of slots
+ bigTx := pricedDataTransaction(0, 0, big.NewInt(0), key, uint64(10*txSlotSize))
+ if slots := numSlots(bigTx); slots != 11 {
+ t.Fatalf("big transactions slot count mismatch: have %d want %d", slots, 11)
+ }
+}
+
// Benchmarks the speed of validating the contents of the pending queue of the
// transaction pool.
func BenchmarkPendingDemotion100(b *testing.B) { benchmarkPendingDemotion(b, 100) }
@@ -1881,7 +1961,7 @@ func benchmarkPendingDemotion(b *testing.B, size int) {
pool, key := setupTxPool()
defer pool.Stop()
- account, _ := deriveSender(transaction(0, 0, key))
+ account := crypto.PubkeyToAddress(key.PublicKey)
pool.currentState.AddBalance(account, big.NewInt(1000000))
for i := 0; i < size; i++ {
@@ -1906,7 +1986,7 @@ func benchmarkFuturePromotion(b *testing.B, size int) {
pool, key := setupTxPool()
defer pool.Stop()
- account, _ := deriveSender(transaction(0, 0, key))
+ account := crypto.PubkeyToAddress(key.PublicKey)
pool.currentState.AddBalance(account, big.NewInt(1000000))
for i := 0; i < size; i++ {
@@ -1930,7 +2010,7 @@ func benchmarkPoolBatchInsert(b *testing.B, size int) {
pool, key := setupTxPool()
defer pool.Stop()
- account, _ := deriveSender(transaction(0, 0, key))
+ account := crypto.PubkeyToAddress(key.PublicKey)
pool.currentState.AddBalance(account, big.NewInt(1000000))
batches := make([]types.Transactions, b.N)
diff --git a/core/vm/contracts.go b/core/vm/contracts.go
index 9b0ba09ed1..7d304613ce 100644
--- a/core/vm/contracts.go
+++ b/core/vm/contracts.go
@@ -28,6 +28,8 @@ import (
"github.com/ethereum/go-ethereum/crypto/blake2b"
"github.com/ethereum/go-ethereum/crypto/bn256"
"github.com/ethereum/go-ethereum/params"
+
+ //lint:ignore SA1019 Needed for precompile
"golang.org/x/crypto/ripemd160"
)
diff --git a/core/vm/contracts_test.go b/core/vm/contracts_test.go
index b4a0c07dca..be003a60c9 100644
--- a/core/vm/contracts_test.go
+++ b/core/vm/contracts_test.go
@@ -29,7 +29,6 @@ import (
// precompiledTest defines the input/output pairs for precompiled contract tests.
type precompiledTest struct {
input, expected string
- gas uint64
name string
noBenchmark bool // Benchmark primarily the worst-cases
}
@@ -418,6 +417,24 @@ func testPrecompiled(addr string, test precompiledTest, t *testing.T) {
})
}
+func testPrecompiledOOG(addr string, test precompiledTest, t *testing.T) {
+ p := PrecompiledContractsIstanbul[common.HexToAddress(addr)]
+ in := common.Hex2Bytes(test.input)
+ contract := NewContract(AccountRef(common.HexToAddress("1337")),
+ nil, new(big.Int), p.RequiredGas(in)-1)
+ t.Run(fmt.Sprintf("%s-Gas=%d", test.name, contract.Gas), func(t *testing.T) {
+ _, err := RunPrecompiledContract(p, in, contract)
+ if err.Error() != "out of gas" {
+ t.Errorf("Expected error [out of gas], got [%v]", err)
+ }
+ // Verify that the precompile did not touch the input buffer
+ exp := common.Hex2Bytes(test.input)
+ if !bytes.Equal(in, exp) {
+ t.Errorf("Precompiled %v modified input data", addr)
+ }
+ })
+}
+
func testPrecompiledFailure(addr string, test precompiledFailureTest, t *testing.T) {
p := PrecompiledContractsIstanbul[common.HexToAddress(addr)]
in := common.Hex2Bytes(test.input)
@@ -541,6 +558,13 @@ func BenchmarkPrecompiledBn256Add(bench *testing.B) {
}
}
+// Tests OOG
+func TestPrecompiledModExpOOG(t *testing.T) {
+ for _, test := range modexpTests {
+ testPrecompiledOOG("05", test, t)
+ }
+}
+
// Tests the sample inputs from the elliptic curve scalar multiplication EIP 213.
func TestPrecompiledBn256ScalarMul(t *testing.T) {
for _, test := range bn256ScalarMulTests {
diff --git a/core/vm/runtime/env.go b/core/vm/runtime/env.go
index fe99522978..30bd23e9fe 100644
--- a/core/vm/runtime/env.go
+++ b/core/vm/runtime/env.go
@@ -17,7 +17,6 @@
package runtime
import (
- "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/vm"
)
@@ -26,8 +25,7 @@ func NewEnv(cfg *Config) *vm.EVM {
context := vm.Context{
CanTransfer: core.CanTransfer,
Transfer: core.Transfer,
- GetHash: func(uint64) common.Hash { return common.Hash{} },
-
+ GetHash: cfg.GetHashFn,
Origin: cfg.Origin,
Coinbase: cfg.Coinbase,
BlockNumber: cfg.BlockNumber,
diff --git a/core/vm/runtime/runtime.go b/core/vm/runtime/runtime.go
index db1f6f3822..dd5dba66f0 100644
--- a/core/vm/runtime/runtime.go
+++ b/core/vm/runtime/runtime.go
@@ -90,8 +90,8 @@ func setDefaults(cfg *Config) {
// Execute executes the code using the input as call data during the execution.
// It returns the EVM's return value, the new state and an error if it failed.
//
-// Executes sets up a in memory, temporarily, environment for the execution of
-// the given code. It makes sure that it's restored to it's original state afterwards.
+// Execute sets up an in-memory, temporary, environment for the execution of
+// the given code. It makes sure that it's restored to its original state afterwards.
func Execute(code, input []byte, cfg *Config) ([]byte, *state.StateDB, error) {
if cfg == nil {
cfg = new(Config)
diff --git a/core/vm/runtime/runtime_test.go b/core/vm/runtime/runtime_test.go
index 15f545ddca..25d6464da0 100644
--- a/core/vm/runtime/runtime_test.go
+++ b/core/vm/runtime/runtime_test.go
@@ -17,14 +17,20 @@
package runtime
import (
+ "context"
"math/big"
"strings"
"testing"
+ "github.com/jpmorganchase/quorum-security-plugin-sdk-go/proto"
+
"github.com/ethereum/go-ethereum/accounts/abi"
"github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/consensus"
+ "github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/core/state"
+ "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/core/vm"
"github.com/ethereum/go-ethereum/params"
)
@@ -203,3 +209,117 @@ func BenchmarkEVM_CREATE2_1200(bench *testing.B) {
// initcode size 1200K, repeatedly calls CREATE2 and then modifies the mem contents
benchmarkEVM_Create(bench, "5b5862124f80600080f5600152600056")
}
+
+func fakeHeader(n uint64, parentHash common.Hash) *types.Header {
+ header := types.Header{
+ Coinbase: common.HexToAddress("0x00000000000000000000000000000000deadbeef"),
+ Number: big.NewInt(int64(n)),
+ ParentHash: parentHash,
+ Time: 1000,
+ Nonce: types.BlockNonce{0x1},
+ Extra: []byte{},
+ Difficulty: big.NewInt(0),
+ GasLimit: 100000,
+ }
+ return &header
+}
+
+type dummyChain struct {
+ counter int
+}
+
+// Engine retrieves the chain's consensus engine.
+func (d *dummyChain) Engine() consensus.Engine {
+ return nil
+}
+
+// GetHeader returns the hash corresponding to their hash.
+func (d *dummyChain) GetHeader(h common.Hash, n uint64) *types.Header {
+ d.counter++
+ parentHash := common.Hash{}
+ s := common.LeftPadBytes(big.NewInt(int64(n-1)).Bytes(), 32)
+ copy(parentHash[:], s)
+
+ //parentHash := common.Hash{byte(n - 1)}
+ //fmt.Printf("GetHeader(%x, %d) => header with parent %x\n", h, n, parentHash)
+ return fakeHeader(n, parentHash)
+}
+
+func (d *dummyChain) SupportsMultitenancy(context.Context) (*proto.PreAuthenticatedAuthenticationToken, bool) {
+ return nil, false
+}
+
+// TestBlockhash tests the blockhash operation. It's a bit special, since it internally
+// requires access to a chain reader.
+func TestBlockhash(t *testing.T) {
+ // Current head
+ n := uint64(1000)
+ parentHash := common.Hash{}
+ s := common.LeftPadBytes(big.NewInt(int64(n-1)).Bytes(), 32)
+ copy(parentHash[:], s)
+ header := fakeHeader(n, parentHash)
+
+ // This is the contract we're using. It requests the blockhash for current num (should be all zeroes),
+ // then iteratively fetches all blockhashes back to n-260.
+ // It returns
+ // 1. the first (should be zero)
+ // 2. the second (should be the parent hash)
+ // 3. the last non-zero hash
+ // By making the chain reader return hashes which correlate to the number, we can
+ // verify that it obtained the right hashes where it should
+
+ /*
+
+ pragma solidity ^0.5.3;
+ contract Hasher{
+
+ function test() public view returns (bytes32, bytes32, bytes32){
+ uint256 x = block.number;
+ bytes32 first;
+ bytes32 last;
+ bytes32 zero;
+ zero = blockhash(x); // Should be zeroes
+ first = blockhash(x-1);
+ for(uint256 i = 2 ; i < 260; i++){
+ bytes32 hash = blockhash(x - i);
+ if (uint256(hash) != 0){
+ last = hash;
+ }
+ }
+ return (zero, first, last);
+ }
+ }
+
+ */
+ // The contract above
+ data := common.Hex2Bytes("6080604052348015600f57600080fd5b50600436106045576000357c010000000000000000000000000000000000000000000000000000000090048063f8a8fd6d14604a575b600080fd5b60506074565b60405180848152602001838152602001828152602001935050505060405180910390f35b600080600080439050600080600083409050600184034092506000600290505b61010481101560c35760008186034090506000816001900414151560b6578093505b5080806001019150506094565b508083839650965096505050505090919256fea165627a7a72305820462d71b510c1725ff35946c20b415b0d50b468ea157c8c77dff9466c9cb85f560029")
+ // The method call to 'test()'
+ input := common.Hex2Bytes("f8a8fd6d")
+ chain := &dummyChain{}
+ ret, _, err := Execute(data, input, &Config{
+ GetHashFn: core.GetHashFn(header, chain),
+ BlockNumber: new(big.Int).Set(header.Number),
+ })
+ if err != nil {
+ t.Fatalf("expected no error, got %v", err)
+ }
+ if len(ret) != 96 {
+ t.Fatalf("expected returndata to be 96 bytes, got %d", len(ret))
+ }
+
+ zero := new(big.Int).SetBytes(ret[0:32])
+ first := new(big.Int).SetBytes(ret[32:64])
+ last := new(big.Int).SetBytes(ret[64:96])
+ if zero.BitLen() != 0 {
+ t.Fatalf("expected zeroes, got %x", ret[0:32])
+ }
+ if first.Uint64() != 999 {
+ t.Fatalf("second block should be 999, got %d (%x)", first, ret[32:64])
+ }
+ if last.Uint64() != 744 {
+ t.Fatalf("last block should be 744, got %d (%x)", last, ret[64:96])
+ }
+ if exp, got := 255, chain.counter; exp != got {
+ t.Errorf("suboptimal; too much chain iteration, expected %d, got %d", exp, got)
+ }
+}
diff --git a/core/vm/stack.go b/core/vm/stack.go
index 4c1b9e8037..c9c3d07f4b 100644
--- a/core/vm/stack.go
+++ b/core/vm/stack.go
@@ -74,13 +74,6 @@ func (st *Stack) Back(n int) *big.Int {
return st.data[st.len()-n-1]
}
-func (st *Stack) require(n int) error {
- if st.len() < n {
- return fmt.Errorf("stack underflow (%d <=> %d)", len(st.data), n)
- }
- return nil
-}
-
// Print dumps the content of the stack
func (st *Stack) Print() {
fmt.Println("### stack ###")
diff --git a/eth/api.go b/eth/api.go
index f82d685629..f67fc517a6 100644
--- a/eth/api.go
+++ b/eth/api.go
@@ -195,8 +195,16 @@ func NewPrivateAdminAPI(eth *Ethereum) *PrivateAdminAPI {
return &PrivateAdminAPI{eth: eth}
}
-// ExportChain exports the current blockchain into a local file.
-func (api *PrivateAdminAPI) ExportChain(file string) (bool, error) {
+// ExportChain exports the current blockchain into a local file,
+// or a range of blocks if first and last are non-nil
+func (api *PrivateAdminAPI) ExportChain(file string, first *uint64, last *uint64) (bool, error) {
+ if first == nil && last != nil {
+ return false, errors.New("last cannot be specified without first")
+ }
+ if first != nil && last == nil {
+ head := api.eth.BlockChain().CurrentHeader().Number.Uint64()
+ last = &head
+ }
if _, err := os.Stat(file); err == nil {
// File already exists. Allowing overwrite could be a DoS vecotor,
// since the 'file' may point to arbitrary paths on the drive
@@ -216,7 +224,11 @@ func (api *PrivateAdminAPI) ExportChain(file string) (bool, error) {
}
// Export the blockchain
- if err := api.eth.BlockChain().Export(writer); err != nil {
+ if first != nil {
+ if err := api.eth.BlockChain().ExportN(writer, *first, *last); err != nil {
+ return false, err
+ }
+ } else if err := api.eth.BlockChain().Export(writer); err != nil {
return false, err
}
return true, nil
diff --git a/eth/api_backend.go b/eth/api_backend.go
index 1e36a70ef4..c4330c1c4b 100644
--- a/eth/api_backend.go
+++ b/eth/api_backend.go
@@ -249,6 +249,10 @@ func (b *EthAPIBackend) SubscribeRemovedLogsEvent(ch chan<- core.RemovedLogsEven
return b.eth.BlockChain().SubscribeRemovedLogsEvent(ch)
}
+func (b *EthAPIBackend) SubscribePendingLogsEvent(ch chan<- []*types.Log) event.Subscription {
+ return b.eth.miner.SubscribePendingLogs(ch)
+}
+
func (b *EthAPIBackend) SubscribeChainEvent(ch chan<- core.ChainEvent) event.Subscription {
return b.eth.BlockChain().SubscribeChainEvent(ch)
}
diff --git a/eth/backend.go b/eth/backend.go
index ad468a3aa3..7a8ac949bf 100644
--- a/eth/backend.go
+++ b/eth/backend.go
@@ -51,6 +51,7 @@ import (
"github.com/ethereum/go-ethereum/multitenancy"
"github.com/ethereum/go-ethereum/node"
"github.com/ethereum/go-ethereum/p2p"
+ "github.com/ethereum/go-ethereum/p2p/enode"
"github.com/ethereum/go-ethereum/p2p/enr"
"github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/rlp"
@@ -78,6 +79,7 @@ type Ethereum struct {
blockchain *core.BlockChain
protocolManager *ProtocolManager
lesServer LesServer
+ dialCandiates enode.Iterator
// DB interfaces
chainDb ethdb.Database // Block chain database
@@ -150,7 +152,7 @@ func New(ctx *node.ServiceContext, config *Config) (*Ethereum, error) {
if err != nil {
return nil, err
}
- chainConfig, genesisHash, genesisErr := core.SetupGenesisBlockWithOverride(chainDb, config.Genesis, config.OverrideIstanbul)
+ chainConfig, genesisHash, genesisErr := core.SetupGenesisBlockWithOverride(chainDb, config.Genesis, config.OverrideIstanbul, config.OverrideMuirGlacier)
if _, ok := genesisErr.(*params.ConfigCompatError); genesisErr != nil && !ok {
return nil, genesisErr
}
@@ -264,6 +266,11 @@ func New(ctx *node.ServiceContext, config *Config) (*Ethereum, error) {
}
eth.APIBackend.gpo = gasprice.NewOracle(eth.APIBackend, gpoParams)
+ eth.dialCandiates, err = eth.setupDiscovery(&ctx.Config.P2P)
+ if err != nil {
+ return nil, err
+ }
+
return eth, nil
}
@@ -567,6 +574,7 @@ func (s *Ethereum) Protocols() []p2p.Protocol {
for i, vsn := range ProtocolVersions {
protos[i] = s.protocolManager.makeProtocol(vsn)
protos[i].Attributes = []enr.Entry{s.currentEthEntry()}
+ protos[i].DialCandidates = s.dialCandiates
}
if s.lesServer != nil {
protos = append(protos, s.lesServer.Protocols()...)
diff --git a/eth/config.go b/eth/config.go
index ecdcae9058..18221a5d69 100644
--- a/eth/config.go
+++ b/eth/config.go
@@ -98,6 +98,10 @@ type Config struct {
NetworkId uint64 // Network ID to use for selecting peers to connect to
SyncMode downloader.SyncMode
+ // This can be set to list of enrtree:// URLs which will be queried for
+ // for nodes to connect to.
+ DiscoveryURLs []string
+
NoPruning bool // Whether to disable pruning and flush everything to disk
NoPrefetch bool // Whether to disable prefetching and only load state on demand
@@ -169,5 +173,8 @@ type Config struct {
// timeout value for call
EVMCallTimeOut time.Duration
+ // MuirGlacier block override (TODO: remove after the fork)
+ OverrideMuirGlacier *big.Int `toml:",omitempty"`
+
EnableMultitenancy bool
}
diff --git a/eth/enr_entry.go b/eth/discovery.go
similarity index 79%
rename from eth/enr_entry.go
rename to eth/discovery.go
index d9e7b95784..97d6322ca1 100644
--- a/eth/enr_entry.go
+++ b/eth/discovery.go
@@ -19,6 +19,8 @@ package eth
import (
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/forkid"
+ "github.com/ethereum/go-ethereum/p2p"
+ "github.com/ethereum/go-ethereum/p2p/dnsdisc"
"github.com/ethereum/go-ethereum/p2p/enode"
"github.com/ethereum/go-ethereum/rlp"
)
@@ -37,6 +39,7 @@ func (e ethEntry) ENRKey() string {
return "eth"
}
+// startEthEntryUpdate starts the ENR updater loop.
func (eth *Ethereum) startEthEntryUpdate(ln *enode.LocalNode) {
var newHead = make(chan core.ChainHeadEvent, 10)
sub := eth.blockchain.SubscribeChainHeadEvent(newHead)
@@ -59,3 +62,12 @@ func (eth *Ethereum) startEthEntryUpdate(ln *enode.LocalNode) {
func (eth *Ethereum) currentEthEntry() *ethEntry {
return ðEntry{ForkID: forkid.NewID(eth.blockchain)}
}
+
+// setupDiscovery creates the node discovery source for the eth protocol.
+func (eth *Ethereum) setupDiscovery(cfg *p2p.Config) (enode.Iterator, error) {
+ if cfg.NoDiscovery || len(eth.config.DiscoveryURLs) == 0 {
+ return nil, nil
+ }
+ client := dnsdisc.NewClient(dnsdisc.Config{})
+ return client.NewIterator(eth.config.DiscoveryURLs...)
+}
diff --git a/eth/downloader/peer.go b/eth/downloader/peer.go
index b4d7391a20..6aece04fd4 100644
--- a/eth/downloader/peer.go
+++ b/eth/downloader/peer.go
@@ -471,7 +471,7 @@ func (ps *peerSet) HeaderIdlePeers() ([]*peerConnection, int) {
defer p.lock.RUnlock()
return p.headerThroughput
}
- return ps.idlePeers(62, 64, idle, throughput)
+ return ps.idlePeers(62, 65, idle, throughput)
}
// BodyIdlePeers retrieves a flat list of all the currently body-idle peers within
@@ -485,7 +485,7 @@ func (ps *peerSet) BodyIdlePeers() ([]*peerConnection, int) {
defer p.lock.RUnlock()
return p.blockThroughput
}
- return ps.idlePeers(62, 64, idle, throughput)
+ return ps.idlePeers(62, 65, idle, throughput)
}
// ReceiptIdlePeers retrieves a flat list of all the currently receipt-idle peers
@@ -499,7 +499,7 @@ func (ps *peerSet) ReceiptIdlePeers() ([]*peerConnection, int) {
defer p.lock.RUnlock()
return p.receiptThroughput
}
- return ps.idlePeers(63, 64, idle, throughput)
+ return ps.idlePeers(63, 65, idle, throughput)
}
// NodeDataIdlePeers retrieves a flat list of all the currently node-data-idle
@@ -513,7 +513,7 @@ func (ps *peerSet) NodeDataIdlePeers() ([]*peerConnection, int) {
defer p.lock.RUnlock()
return p.stateThroughput
}
- return ps.idlePeers(63, 64, idle, throughput)
+ return ps.idlePeers(63, 65, idle, throughput)
}
// idlePeers retrieves a flat list of all currently idle peers satisfying the
diff --git a/eth/fetcher/fetcher.go b/eth/fetcher/block_fetcher.go
similarity index 80%
rename from eth/fetcher/fetcher.go
rename to eth/fetcher/block_fetcher.go
index 28c532d9bd..b6cab05deb 100644
--- a/eth/fetcher/fetcher.go
+++ b/eth/fetcher/block_fetcher.go
@@ -14,7 +14,7 @@
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see .
-// Package fetcher contains the block announcement based synchronisation.
+// Package fetcher contains the announcement based blocks or transaction synchronisation.
package fetcher
import (
@@ -27,16 +27,40 @@ import (
"github.com/ethereum/go-ethereum/consensus"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/log"
+ "github.com/ethereum/go-ethereum/metrics"
)
const (
- arriveTimeout = 500 * time.Millisecond // Time allowance before an announced block is explicitly requested
+ arriveTimeout = 500 * time.Millisecond // Time allowance before an announced block/transaction is explicitly requested
gatherSlack = 100 * time.Millisecond // Interval used to collate almost-expired announces with fetches
- fetchTimeout = 5 * time.Second // Maximum allotted time to return an explicitly requested block
- maxUncleDist = 7 // Maximum allowed backward distance from the chain head
- maxQueueDist = 32 // Maximum allowed distance from the chain head to queue
- hashLimit = 256 // Maximum number of unique blocks a peer may have announced
- blockLimit = 64 // Maximum number of unique blocks a peer may have delivered
+ fetchTimeout = 5 * time.Second // Maximum allotted time to return an explicitly requested block/transaction
+)
+
+const (
+ maxUncleDist = 7 // Maximum allowed backward distance from the chain head
+ maxQueueDist = 32 // Maximum allowed distance from the chain head to queue
+ hashLimit = 256 // Maximum number of unique blocks a peer may have announced
+ blockLimit = 64 // Maximum number of unique blocks a peer may have delivered
+)
+
+var (
+ blockAnnounceInMeter = metrics.NewRegisteredMeter("eth/fetcher/block/announces/in", nil)
+ blockAnnounceOutTimer = metrics.NewRegisteredTimer("eth/fetcher/block/announces/out", nil)
+ blockAnnounceDropMeter = metrics.NewRegisteredMeter("eth/fetcher/block/announces/drop", nil)
+ blockAnnounceDOSMeter = metrics.NewRegisteredMeter("eth/fetcher/block/announces/dos", nil)
+
+ blockBroadcastInMeter = metrics.NewRegisteredMeter("eth/fetcher/block/broadcasts/in", nil)
+ blockBroadcastOutTimer = metrics.NewRegisteredTimer("eth/fetcher/block/broadcasts/out", nil)
+ blockBroadcastDropMeter = metrics.NewRegisteredMeter("eth/fetcher/block/broadcasts/drop", nil)
+ blockBroadcastDOSMeter = metrics.NewRegisteredMeter("eth/fetcher/block/broadcasts/dos", nil)
+
+ headerFetchMeter = metrics.NewRegisteredMeter("eth/fetcher/block/headers", nil)
+ bodyFetchMeter = metrics.NewRegisteredMeter("eth/fetcher/block/bodies", nil)
+
+ headerFilterInMeter = metrics.NewRegisteredMeter("eth/fetcher/block/filter/headers/in", nil)
+ headerFilterOutMeter = metrics.NewRegisteredMeter("eth/fetcher/block/filter/headers/out", nil)
+ bodyFilterInMeter = metrics.NewRegisteredMeter("eth/fetcher/block/filter/bodies/in", nil)
+ bodyFilterOutMeter = metrics.NewRegisteredMeter("eth/fetcher/block/filter/bodies/out", nil)
)
var (
@@ -67,9 +91,9 @@ type chainInsertFn func(types.Blocks) (int, error)
// peerDropFn is a callback type for dropping a peer detected as malicious.
type peerDropFn func(id string)
-// announce is the hash notification of the availability of a new block in the
+// blockAnnounce is the hash notification of the availability of a new block in the
// network.
-type announce struct {
+type blockAnnounce struct {
hash common.Hash // Hash of the block being announced
number uint64 // Number of the block being announced (0 = unknown | old protocol)
header *types.Header // Header of the block partially reassembled (new protocol)
@@ -97,18 +121,18 @@ type bodyFilterTask struct {
time time.Time // Arrival time of the blocks' contents
}
-// inject represents a schedules import operation.
-type inject struct {
+// blockInject represents a schedules import operation.
+type blockInject struct {
origin string
block *types.Block
}
-// Fetcher is responsible for accumulating block announcements from various peers
+// BlockFetcher is responsible for accumulating block announcements from various peers
// and scheduling them for retrieval.
-type Fetcher struct {
+type BlockFetcher struct {
// Various event channels
- notify chan *announce
- inject chan *inject
+ notify chan *blockAnnounce
+ inject chan *blockInject
headerFilter chan chan *headerFilterTask
bodyFilter chan chan *bodyFilterTask
@@ -117,16 +141,16 @@ type Fetcher struct {
quit chan struct{}
// Announce states
- announces map[string]int // Per peer announce counts to prevent memory exhaustion
- announced map[common.Hash][]*announce // Announced blocks, scheduled for fetching
- fetching map[common.Hash]*announce // Announced blocks, currently fetching
- fetched map[common.Hash][]*announce // Blocks with headers fetched, scheduled for body retrieval
- completing map[common.Hash]*announce // Blocks with headers, currently body-completing
+ announces map[string]int // Per peer blockAnnounce counts to prevent memory exhaustion
+ announced map[common.Hash][]*blockAnnounce // Announced blocks, scheduled for fetching
+ fetching map[common.Hash]*blockAnnounce // Announced blocks, currently fetching
+ fetched map[common.Hash][]*blockAnnounce // Blocks with headers fetched, scheduled for body retrieval
+ completing map[common.Hash]*blockAnnounce // Blocks with headers, currently body-completing
// Block cache
- queue *prque.Prque // Queue containing the import operations (block number sorted)
- queues map[string]int // Per peer block counts to prevent memory exhaustion
- queued map[common.Hash]*inject // Set of already queued blocks (to dedupe imports)
+ queue *prque.Prque // Queue containing the import operations (block number sorted)
+ queues map[string]int // Per peer block counts to prevent memory exhaustion
+ queued map[common.Hash]*blockInject // Set of already queued blocks (to dedupe imports)
// Callbacks
getBlock blockRetrievalFn // Retrieves a block from the local chain
@@ -137,30 +161,30 @@ type Fetcher struct {
dropPeer peerDropFn // Drops a peer for misbehaving
// Testing hooks
- announceChangeHook func(common.Hash, bool) // Method to call upon adding or deleting a hash from the announce list
+ announceChangeHook func(common.Hash, bool) // Method to call upon adding or deleting a hash from the blockAnnounce list
queueChangeHook func(common.Hash, bool) // Method to call upon adding or deleting a block from the import queue
fetchingHook func([]common.Hash) // Method to call upon starting a block (eth/61) or header (eth/62) fetch
completingHook func([]common.Hash) // Method to call upon starting a block body fetch (eth/62)
importedHook func(*types.Block) // Method to call upon successful block import (both eth/61 and eth/62)
}
-// New creates a block fetcher to retrieve blocks based on hash announcements.
-func New(getBlock blockRetrievalFn, verifyHeader headerVerifierFn, broadcastBlock blockBroadcasterFn, chainHeight chainHeightFn, insertChain chainInsertFn, dropPeer peerDropFn) *Fetcher {
- return &Fetcher{
- notify: make(chan *announce),
- inject: make(chan *inject),
+// NewBlockFetcher creates a block fetcher to retrieve blocks based on hash announcements.
+func NewBlockFetcher(getBlock blockRetrievalFn, verifyHeader headerVerifierFn, broadcastBlock blockBroadcasterFn, chainHeight chainHeightFn, insertChain chainInsertFn, dropPeer peerDropFn) *BlockFetcher {
+ return &BlockFetcher{
+ notify: make(chan *blockAnnounce),
+ inject: make(chan *blockInject),
headerFilter: make(chan chan *headerFilterTask),
bodyFilter: make(chan chan *bodyFilterTask),
done: make(chan common.Hash),
quit: make(chan struct{}),
announces: make(map[string]int),
- announced: make(map[common.Hash][]*announce),
- fetching: make(map[common.Hash]*announce),
- fetched: make(map[common.Hash][]*announce),
- completing: make(map[common.Hash]*announce),
+ announced: make(map[common.Hash][]*blockAnnounce),
+ fetching: make(map[common.Hash]*blockAnnounce),
+ fetched: make(map[common.Hash][]*blockAnnounce),
+ completing: make(map[common.Hash]*blockAnnounce),
queue: prque.New(nil),
queues: make(map[string]int),
- queued: make(map[common.Hash]*inject),
+ queued: make(map[common.Hash]*blockInject),
getBlock: getBlock,
verifyHeader: verifyHeader,
broadcastBlock: broadcastBlock,
@@ -172,21 +196,21 @@ func New(getBlock blockRetrievalFn, verifyHeader headerVerifierFn, broadcastBloc
// Start boots up the announcement based synchroniser, accepting and processing
// hash notifications and block fetches until termination requested.
-func (f *Fetcher) Start() {
+func (f *BlockFetcher) Start() {
go f.loop()
}
// Stop terminates the announcement based synchroniser, canceling all pending
// operations.
-func (f *Fetcher) Stop() {
+func (f *BlockFetcher) Stop() {
close(f.quit)
}
// Notify announces the fetcher of the potential availability of a new block in
// the network.
-func (f *Fetcher) Notify(peer string, hash common.Hash, number uint64, time time.Time,
+func (f *BlockFetcher) Notify(peer string, hash common.Hash, number uint64, time time.Time,
headerFetcher headerRequesterFn, bodyFetcher bodyRequesterFn) error {
- block := &announce{
+ block := &blockAnnounce{
hash: hash,
number: number,
time: time,
@@ -203,8 +227,8 @@ func (f *Fetcher) Notify(peer string, hash common.Hash, number uint64, time time
}
// Enqueue tries to fill gaps the fetcher's future import queue.
-func (f *Fetcher) Enqueue(peer string, block *types.Block) error {
- op := &inject{
+func (f *BlockFetcher) Enqueue(peer string, block *types.Block) error {
+ op := &blockInject{
origin: peer,
block: block,
}
@@ -218,7 +242,7 @@ func (f *Fetcher) Enqueue(peer string, block *types.Block) error {
// FilterHeaders extracts all the headers that were explicitly requested by the fetcher,
// returning those that should be handled differently.
-func (f *Fetcher) FilterHeaders(peer string, headers []*types.Header, time time.Time) []*types.Header {
+func (f *BlockFetcher) FilterHeaders(peer string, headers []*types.Header, time time.Time) []*types.Header {
log.Trace("Filtering headers", "peer", peer, "headers", len(headers))
// Send the filter channel to the fetcher
@@ -246,7 +270,7 @@ func (f *Fetcher) FilterHeaders(peer string, headers []*types.Header, time time.
// FilterBodies extracts all the block bodies that were explicitly requested by
// the fetcher, returning those that should be handled differently.
-func (f *Fetcher) FilterBodies(peer string, transactions [][]*types.Transaction, uncles [][]*types.Header, time time.Time) ([][]*types.Transaction, [][]*types.Header) {
+func (f *BlockFetcher) FilterBodies(peer string, transactions [][]*types.Transaction, uncles [][]*types.Header, time time.Time) ([][]*types.Transaction, [][]*types.Header) {
log.Trace("Filtering bodies", "peer", peer, "txs", len(transactions), "uncles", len(uncles))
// Send the filter channel to the fetcher
@@ -274,7 +298,7 @@ func (f *Fetcher) FilterBodies(peer string, transactions [][]*types.Transaction,
// Loop is the main fetcher loop, checking and processing various notification
// events.
-func (f *Fetcher) loop() {
+func (f *BlockFetcher) loop() {
// Iterate the block fetching until a quit is requested
fetchTimer := time.NewTimer(0)
completeTimer := time.NewTimer(0)
@@ -289,7 +313,7 @@ func (f *Fetcher) loop() {
// Import any queued blocks that could potentially fit
height := f.chainHeight()
for !f.queue.Empty() {
- op := f.queue.PopItem().(*inject)
+ op := f.queue.PopItem().(*blockInject)
hash := op.block.Hash()
if f.queueChangeHook != nil {
f.queueChangeHook(hash, false)
@@ -313,24 +337,24 @@ func (f *Fetcher) loop() {
// Wait for an outside event to occur
select {
case <-f.quit:
- // Fetcher terminating, abort all operations
+ // BlockFetcher terminating, abort all operations
return
case notification := <-f.notify:
// A block was announced, make sure the peer isn't DOSing us
- propAnnounceInMeter.Mark(1)
+ blockAnnounceInMeter.Mark(1)
count := f.announces[notification.origin] + 1
if count > hashLimit {
log.Debug("Peer exceeded outstanding announces", "peer", notification.origin, "limit", hashLimit)
- propAnnounceDOSMeter.Mark(1)
+ blockAnnounceDOSMeter.Mark(1)
break
}
// If we have a valid block number, check that it's potentially useful
if notification.number > 0 {
if dist := int64(notification.number) - int64(f.chainHeight()); dist < -maxUncleDist || dist > maxQueueDist {
log.Debug("Peer discarded announcement", "peer", notification.origin, "number", notification.number, "hash", notification.hash, "distance", dist)
- propAnnounceDropMeter.Mark(1)
+ blockAnnounceDropMeter.Mark(1)
break
}
}
@@ -352,7 +376,7 @@ func (f *Fetcher) loop() {
case op := <-f.inject:
// A direct block insertion was requested, try and fill any pending gaps
- propBroadcastInMeter.Mark(1)
+ blockBroadcastInMeter.Mark(1)
f.enqueue(op.origin, op.block)
case hash := <-f.done:
@@ -439,7 +463,7 @@ func (f *Fetcher) loop() {
// Split the batch of headers into unknown ones (to return to the caller),
// known incomplete ones (requiring body retrievals) and completed blocks.
- unknown, incomplete, complete := []*types.Header{}, []*announce{}, []*types.Block{}
+ unknown, incomplete, complete := []*types.Header{}, []*blockAnnounce{}, []*types.Block{}
for _, header := range task.headers {
hash := header.Hash()
@@ -475,7 +499,7 @@ func (f *Fetcher) loop() {
f.forgetHash(hash)
}
} else {
- // Fetcher doesn't know about it, add to the return list
+ // BlockFetcher doesn't know about it, add to the return list
unknown = append(unknown, header)
}
}
@@ -562,8 +586,8 @@ func (f *Fetcher) loop() {
}
}
-// rescheduleFetch resets the specified fetch timer to the next announce timeout.
-func (f *Fetcher) rescheduleFetch(fetch *time.Timer) {
+// rescheduleFetch resets the specified fetch timer to the next blockAnnounce timeout.
+func (f *BlockFetcher) rescheduleFetch(fetch *time.Timer) {
// Short circuit if no blocks are announced
if len(f.announced) == 0 {
return
@@ -579,7 +603,7 @@ func (f *Fetcher) rescheduleFetch(fetch *time.Timer) {
}
// rescheduleComplete resets the specified completion timer to the next fetch timeout.
-func (f *Fetcher) rescheduleComplete(complete *time.Timer) {
+func (f *BlockFetcher) rescheduleComplete(complete *time.Timer) {
// Short circuit if no headers are fetched
if len(f.fetched) == 0 {
return
@@ -596,27 +620,27 @@ func (f *Fetcher) rescheduleComplete(complete *time.Timer) {
// enqueue schedules a new future import operation, if the block to be imported
// has not yet been seen.
-func (f *Fetcher) enqueue(peer string, block *types.Block) {
+func (f *BlockFetcher) enqueue(peer string, block *types.Block) {
hash := block.Hash()
// Ensure the peer isn't DOSing us
count := f.queues[peer] + 1
if count > blockLimit {
log.Debug("Discarded propagated block, exceeded allowance", "peer", peer, "number", block.Number(), "hash", hash, "limit", blockLimit)
- propBroadcastDOSMeter.Mark(1)
+ blockBroadcastDOSMeter.Mark(1)
f.forgetHash(hash)
return
}
// Discard any past or too distant blocks
if dist := int64(block.NumberU64()) - int64(f.chainHeight()); dist < -maxUncleDist || dist > maxQueueDist {
log.Debug("Discarded propagated block, too far away", "peer", peer, "number", block.Number(), "hash", hash, "distance", dist)
- propBroadcastDropMeter.Mark(1)
+ blockBroadcastDropMeter.Mark(1)
f.forgetHash(hash)
return
}
// Schedule the block for future importing
if _, ok := f.queued[hash]; !ok {
- op := &inject{
+ op := &blockInject{
origin: peer,
block: block,
}
@@ -633,7 +657,7 @@ func (f *Fetcher) enqueue(peer string, block *types.Block) {
// insert spawns a new goroutine to run a block insertion into the chain. If the
// block's number is at the same height as the current import phase, it updates
// the phase states accordingly.
-func (f *Fetcher) insert(peer string, block *types.Block) {
+func (f *BlockFetcher) insert(peer string, block *types.Block) {
hash := block.Hash()
// Run the import on a new thread
@@ -651,7 +675,7 @@ func (f *Fetcher) insert(peer string, block *types.Block) {
switch err := f.verifyHeader(block.Header()); err {
case nil:
// All ok, quickly propagate to our peers
- propBroadcastOutTimer.UpdateSince(block.ReceivedAt)
+ blockBroadcastOutTimer.UpdateSince(block.ReceivedAt)
go f.broadcastBlock(block, true)
case consensus.ErrFutureBlock:
@@ -669,7 +693,7 @@ func (f *Fetcher) insert(peer string, block *types.Block) {
return
}
// If import succeeded, broadcast the block
- propAnnounceOutTimer.UpdateSince(block.ReceivedAt)
+ blockAnnounceOutTimer.UpdateSince(block.ReceivedAt)
go f.broadcastBlock(block, false)
// Invoke the testing hook if needed
@@ -681,7 +705,7 @@ func (f *Fetcher) insert(peer string, block *types.Block) {
// forgetHash removes all traces of a block announcement from the fetcher's
// internal state.
-func (f *Fetcher) forgetHash(hash common.Hash) {
+func (f *BlockFetcher) forgetHash(hash common.Hash) {
// Remove all pending announces and decrement DOS counters
for _, announce := range f.announced[hash] {
f.announces[announce.origin]--
@@ -723,7 +747,7 @@ func (f *Fetcher) forgetHash(hash common.Hash) {
// forgetBlock removes all traces of a queued block from the fetcher's internal
// state.
-func (f *Fetcher) forgetBlock(hash common.Hash) {
+func (f *BlockFetcher) forgetBlock(hash common.Hash) {
if insert := f.queued[hash]; insert != nil {
f.queues[insert.origin]--
if f.queues[insert.origin] == 0 {
diff --git a/eth/fetcher/fetcher_test.go b/eth/fetcher/block_fetcher_test.go
similarity index 99%
rename from eth/fetcher/fetcher_test.go
rename to eth/fetcher/block_fetcher_test.go
index 83172c5348..038ead12e7 100644
--- a/eth/fetcher/fetcher_test.go
+++ b/eth/fetcher/block_fetcher_test.go
@@ -76,7 +76,7 @@ func makeChain(n int, seed byte, parent *types.Block) ([]common.Hash, map[common
// fetcherTester is a test simulator for mocking out local block chain.
type fetcherTester struct {
- fetcher *Fetcher
+ fetcher *BlockFetcher
hashes []common.Hash // Hash chain belonging to the tester
blocks map[common.Hash]*types.Block // Blocks belonging to the tester
@@ -92,7 +92,7 @@ func newTester() *fetcherTester {
blocks: map[common.Hash]*types.Block{genesis.Hash(): genesis},
drops: make(map[string]bool),
}
- tester.fetcher = New(tester.getBlock, tester.verifyHeader, tester.broadcastBlock, tester.chainHeight, tester.insertChain, tester.dropPeer)
+ tester.fetcher = NewBlockFetcher(tester.getBlock, tester.verifyHeader, tester.broadcastBlock, tester.chainHeight, tester.insertChain, tester.dropPeer)
tester.fetcher.Start()
return tester
diff --git a/eth/fetcher/metrics.go b/eth/fetcher/metrics.go
deleted file mode 100644
index d68d12f000..0000000000
--- a/eth/fetcher/metrics.go
+++ /dev/null
@@ -1,43 +0,0 @@
-// Copyright 2015 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see .
-
-// Contains the metrics collected by the fetcher.
-
-package fetcher
-
-import (
- "github.com/ethereum/go-ethereum/metrics"
-)
-
-var (
- propAnnounceInMeter = metrics.NewRegisteredMeter("eth/fetcher/prop/announces/in", nil)
- propAnnounceOutTimer = metrics.NewRegisteredTimer("eth/fetcher/prop/announces/out", nil)
- propAnnounceDropMeter = metrics.NewRegisteredMeter("eth/fetcher/prop/announces/drop", nil)
- propAnnounceDOSMeter = metrics.NewRegisteredMeter("eth/fetcher/prop/announces/dos", nil)
-
- propBroadcastInMeter = metrics.NewRegisteredMeter("eth/fetcher/prop/broadcasts/in", nil)
- propBroadcastOutTimer = metrics.NewRegisteredTimer("eth/fetcher/prop/broadcasts/out", nil)
- propBroadcastDropMeter = metrics.NewRegisteredMeter("eth/fetcher/prop/broadcasts/drop", nil)
- propBroadcastDOSMeter = metrics.NewRegisteredMeter("eth/fetcher/prop/broadcasts/dos", nil)
-
- headerFetchMeter = metrics.NewRegisteredMeter("eth/fetcher/fetch/headers", nil)
- bodyFetchMeter = metrics.NewRegisteredMeter("eth/fetcher/fetch/bodies", nil)
-
- headerFilterInMeter = metrics.NewRegisteredMeter("eth/fetcher/filter/headers/in", nil)
- headerFilterOutMeter = metrics.NewRegisteredMeter("eth/fetcher/filter/headers/out", nil)
- bodyFilterInMeter = metrics.NewRegisteredMeter("eth/fetcher/filter/bodies/in", nil)
- bodyFilterOutMeter = metrics.NewRegisteredMeter("eth/fetcher/filter/bodies/out", nil)
-)
diff --git a/eth/fetcher/tx_fetcher.go b/eth/fetcher/tx_fetcher.go
new file mode 100644
index 0000000000..c497cebb45
--- /dev/null
+++ b/eth/fetcher/tx_fetcher.go
@@ -0,0 +1,894 @@
+// Copyright 2020 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package fetcher
+
+import (
+ "bytes"
+ "fmt"
+ mrand "math/rand"
+ "sort"
+ "time"
+
+ mapset "github.com/deckarep/golang-set"
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/common/mclock"
+ "github.com/ethereum/go-ethereum/core"
+ "github.com/ethereum/go-ethereum/core/types"
+ "github.com/ethereum/go-ethereum/log"
+ "github.com/ethereum/go-ethereum/metrics"
+)
+
+const (
+ // maxTxAnnounces is the maximum number of unique transaction a peer
+ // can announce in a short time.
+ maxTxAnnounces = 4096
+
+ // maxTxRetrievals is the maximum transaction number can be fetched in one
+ // request. The rationale to pick 256 is:
+ // - In eth protocol, the softResponseLimit is 2MB. Nowadays according to
+ // Etherscan the average transaction size is around 200B, so in theory
+ // we can include lots of transaction in a single protocol packet.
+ // - However the maximum size of a single transaction is raised to 128KB,
+ // so pick a middle value here to ensure we can maximize the efficiency
+ // of the retrieval and response size overflow won't happen in most cases.
+ maxTxRetrievals = 256
+
+ // maxTxUnderpricedSetSize is the size of the underpriced transaction set that
+ // is used to track recent transactions that have been dropped so we don't
+ // re-request them.
+ maxTxUnderpricedSetSize = 32768
+
+ // txArriveTimeout is the time allowance before an announced transaction is
+ // explicitly requested.
+ txArriveTimeout = 500 * time.Millisecond
+
+ // txGatherSlack is the interval used to collate almost-expired announces
+ // with network fetches.
+ txGatherSlack = 100 * time.Millisecond
+)
+
+var (
+ // txFetchTimeout is the maximum allotted time to return an explicitly
+ // requested transaction.
+ txFetchTimeout = 5 * time.Second
+)
+
+var (
+ txAnnounceInMeter = metrics.NewRegisteredMeter("eth/fetcher/transaction/announces/in", nil)
+ txAnnounceKnownMeter = metrics.NewRegisteredMeter("eth/fetcher/transaction/announces/known", nil)
+ txAnnounceUnderpricedMeter = metrics.NewRegisteredMeter("eth/fetcher/transaction/announces/underpriced", nil)
+ txAnnounceDOSMeter = metrics.NewRegisteredMeter("eth/fetcher/transaction/announces/dos", nil)
+
+ txBroadcastInMeter = metrics.NewRegisteredMeter("eth/fetcher/transaction/broadcasts/in", nil)
+ txBroadcastKnownMeter = metrics.NewRegisteredMeter("eth/fetcher/transaction/broadcasts/known", nil)
+ txBroadcastUnderpricedMeter = metrics.NewRegisteredMeter("eth/fetcher/transaction/broadcasts/underpriced", nil)
+ txBroadcastOtherRejectMeter = metrics.NewRegisteredMeter("eth/fetcher/transaction/broadcasts/otherreject", nil)
+
+ txRequestOutMeter = metrics.NewRegisteredMeter("eth/fetcher/transaction/request/out", nil)
+ txRequestFailMeter = metrics.NewRegisteredMeter("eth/fetcher/transaction/request/fail", nil)
+ txRequestDoneMeter = metrics.NewRegisteredMeter("eth/fetcher/transaction/request/done", nil)
+ txRequestTimeoutMeter = metrics.NewRegisteredMeter("eth/fetcher/transaction/request/timeout", nil)
+
+ txReplyInMeter = metrics.NewRegisteredMeter("eth/fetcher/transaction/replies/in", nil)
+ txReplyKnownMeter = metrics.NewRegisteredMeter("eth/fetcher/transaction/replies/known", nil)
+ txReplyUnderpricedMeter = metrics.NewRegisteredMeter("eth/fetcher/transaction/replies/underpriced", nil)
+ txReplyOtherRejectMeter = metrics.NewRegisteredMeter("eth/fetcher/transaction/replies/otherreject", nil)
+
+ txFetcherWaitingPeers = metrics.NewRegisteredGauge("eth/fetcher/transaction/waiting/peers", nil)
+ txFetcherWaitingHashes = metrics.NewRegisteredGauge("eth/fetcher/transaction/waiting/hashes", nil)
+ txFetcherQueueingPeers = metrics.NewRegisteredGauge("eth/fetcher/transaction/queueing/peers", nil)
+ txFetcherQueueingHashes = metrics.NewRegisteredGauge("eth/fetcher/transaction/queueing/hashes", nil)
+ txFetcherFetchingPeers = metrics.NewRegisteredGauge("eth/fetcher/transaction/fetching/peers", nil)
+ txFetcherFetchingHashes = metrics.NewRegisteredGauge("eth/fetcher/transaction/fetching/hashes", nil)
+)
+
+// txAnnounce is the notification of the availability of a batch
+// of new transactions in the network.
+type txAnnounce struct {
+ origin string // Identifier of the peer originating the notification
+ hashes []common.Hash // Batch of transaction hashes being announced
+}
+
+// txRequest represents an in-flight transaction retrieval request destined to
+// a specific peers.
+type txRequest struct {
+ hashes []common.Hash // Transactions having been requested
+ stolen map[common.Hash]struct{} // Deliveries by someone else (don't re-request)
+ time mclock.AbsTime // Timestamp of the request
+}
+
+// txDelivery is the notification that a batch of transactions have been added
+// to the pool and should be untracked.
+type txDelivery struct {
+ origin string // Identifier of the peer originating the notification
+ hashes []common.Hash // Batch of transaction hashes having been delivered
+ direct bool // Whether this is a direct reply or a broadcast
+}
+
+// txDrop is the notiication that a peer has disconnected.
+type txDrop struct {
+ peer string
+}
+
+// TxFetcher is responsible for retrieving new transaction based on announcements.
+//
+// The fetcher operates in 3 stages:
+// - Transactions that are newly discovered are moved into a wait list.
+// - After ~500ms passes, transactions from the wait list that have not been
+// broadcast to us in whole are moved into a queueing area.
+// - When a connected peer doesn't have in-flight retrieval requests, any
+// transaction queued up (and announced by the peer) are allocated to the
+// peer and moved into a fetching status until it's fulfilled or fails.
+//
+// The invariants of the fetcher are:
+// - Each tracked transaction (hash) must only be present in one of the
+// three stages. This ensures that the fetcher operates akin to a finite
+// state automata and there's do data leak.
+// - Each peer that announced transactions may be scheduled retrievals, but
+// only ever one concurrently. This ensures we can immediately know what is
+// missing from a reply and reschedule it.
+type TxFetcher struct {
+ notify chan *txAnnounce
+ cleanup chan *txDelivery
+ drop chan *txDrop
+ quit chan struct{}
+
+ underpriced mapset.Set // Transactions discarded as too cheap (don't re-fetch)
+
+ // Stage 1: Waiting lists for newly discovered transactions that might be
+ // broadcast without needing explicit request/reply round trips.
+ waitlist map[common.Hash]map[string]struct{} // Transactions waiting for an potential broadcast
+ waittime map[common.Hash]mclock.AbsTime // Timestamps when transactions were added to the waitlist
+ waitslots map[string]map[common.Hash]struct{} // Waiting announcement sgroupped by peer (DoS protection)
+
+ // Stage 2: Queue of transactions that waiting to be allocated to some peer
+ // to be retrieved directly.
+ announces map[string]map[common.Hash]struct{} // Set of announced transactions, grouped by origin peer
+ announced map[common.Hash]map[string]struct{} // Set of download locations, grouped by transaction hash
+
+ // Stage 3: Set of transactions currently being retrieved, some which may be
+ // fulfilled and some rescheduled. Note, this step shares 'announces' from the
+ // previous stage to avoid having to duplicate (need it for DoS checks).
+ fetching map[common.Hash]string // Transaction set currently being retrieved
+ requests map[string]*txRequest // In-flight transaction retrievals
+ alternates map[common.Hash]map[string]struct{} // In-flight transaction alternate origins if retrieval fails
+
+ // Callbacks
+ hasTx func(common.Hash) bool // Retrieves a tx from the local txpool
+ addTxs func([]*types.Transaction) []error // Insert a batch of transactions into local txpool
+ fetchTxs func(string, []common.Hash) error // Retrieves a set of txs from a remote peer
+
+ step chan struct{} // Notification channel when the fetcher loop iterates
+ clock mclock.Clock // Time wrapper to simulate in tests
+ rand *mrand.Rand // Randomizer to use in tests instead of map range loops (soft-random)
+}
+
+// NewTxFetcher creates a transaction fetcher to retrieve transaction
+// based on hash announcements.
+func NewTxFetcher(hasTx func(common.Hash) bool, addTxs func([]*types.Transaction) []error, fetchTxs func(string, []common.Hash) error) *TxFetcher {
+ return NewTxFetcherForTests(hasTx, addTxs, fetchTxs, mclock.System{}, nil)
+}
+
+// NewTxFetcherForTests is a testing method to mock out the realtime clock with
+// a simulated version and the internal randomness with a deterministic one.
+func NewTxFetcherForTests(
+ hasTx func(common.Hash) bool, addTxs func([]*types.Transaction) []error, fetchTxs func(string, []common.Hash) error,
+ clock mclock.Clock, rand *mrand.Rand) *TxFetcher {
+ return &TxFetcher{
+ notify: make(chan *txAnnounce),
+ cleanup: make(chan *txDelivery),
+ drop: make(chan *txDrop),
+ quit: make(chan struct{}),
+ waitlist: make(map[common.Hash]map[string]struct{}),
+ waittime: make(map[common.Hash]mclock.AbsTime),
+ waitslots: make(map[string]map[common.Hash]struct{}),
+ announces: make(map[string]map[common.Hash]struct{}),
+ announced: make(map[common.Hash]map[string]struct{}),
+ fetching: make(map[common.Hash]string),
+ requests: make(map[string]*txRequest),
+ alternates: make(map[common.Hash]map[string]struct{}),
+ underpriced: mapset.NewSet(),
+ hasTx: hasTx,
+ addTxs: addTxs,
+ fetchTxs: fetchTxs,
+ clock: clock,
+ rand: rand,
+ }
+}
+
+// Notify announces the fetcher of the potential availability of a new batch of
+// transactions in the network.
+func (f *TxFetcher) Notify(peer string, hashes []common.Hash) error {
+ // Keep track of all the announced transactions
+ txAnnounceInMeter.Mark(int64(len(hashes)))
+
+ // Skip any transaction announcements that we already know of, or that we've
+ // previously marked as cheap and discarded. This check is of course racey,
+ // because multiple concurrent notifies will still manage to pass it, but it's
+ // still valuable to check here because it runs concurrent to the internal
+ // loop, so anything caught here is time saved internally.
+ var (
+ unknowns = make([]common.Hash, 0, len(hashes))
+ duplicate, underpriced int64
+ )
+ for _, hash := range hashes {
+ switch {
+ case f.hasTx(hash):
+ duplicate++
+
+ case f.underpriced.Contains(hash):
+ underpriced++
+
+ default:
+ unknowns = append(unknowns, hash)
+ }
+ }
+ txAnnounceKnownMeter.Mark(duplicate)
+ txAnnounceUnderpricedMeter.Mark(underpriced)
+
+ // If anything's left to announce, push it into the internal loop
+ if len(unknowns) == 0 {
+ return nil
+ }
+ announce := &txAnnounce{
+ origin: peer,
+ hashes: unknowns,
+ }
+ select {
+ case f.notify <- announce:
+ return nil
+ case <-f.quit:
+ return errTerminated
+ }
+}
+
+// Enqueue imports a batch of received transaction into the transaction pool
+// and the fetcher. This method may be called by both transaction broadcasts and
+// direct request replies. The differentiation is important so the fetcher can
+// re-shedule missing transactions as soon as possible.
+func (f *TxFetcher) Enqueue(peer string, txs []*types.Transaction, direct bool) error {
+ // Keep track of all the propagated transactions
+ if direct {
+ txReplyInMeter.Mark(int64(len(txs)))
+ } else {
+ txBroadcastInMeter.Mark(int64(len(txs)))
+ }
+ // Push all the transactions into the pool, tracking underpriced ones to avoid
+ // re-requesting them and dropping the peer in case of malicious transfers.
+ var (
+ added = make([]common.Hash, 0, len(txs))
+ duplicate int64
+ underpriced int64
+ otherreject int64
+ )
+ errs := f.addTxs(txs)
+ for i, err := range errs {
+ if err != nil {
+ // Track the transaction hash if the price is too low for us.
+ // Avoid re-request this transaction when we receive another
+ // announcement.
+ if err == core.ErrUnderpriced || err == core.ErrReplaceUnderpriced {
+ for f.underpriced.Cardinality() >= maxTxUnderpricedSetSize {
+ f.underpriced.Pop()
+ }
+ f.underpriced.Add(txs[i].Hash())
+ }
+ // Track a few interesting failure types
+ switch err {
+ case nil: // Noop, but need to handle to not count these
+
+ case core.ErrAlreadyKnown:
+ duplicate++
+
+ case core.ErrUnderpriced, core.ErrReplaceUnderpriced:
+ underpriced++
+
+ default:
+ otherreject++
+ }
+ }
+ added = append(added, txs[i].Hash())
+ }
+ if direct {
+ txReplyKnownMeter.Mark(duplicate)
+ txReplyUnderpricedMeter.Mark(underpriced)
+ txReplyOtherRejectMeter.Mark(otherreject)
+ } else {
+ txBroadcastKnownMeter.Mark(duplicate)
+ txBroadcastUnderpricedMeter.Mark(underpriced)
+ txBroadcastOtherRejectMeter.Mark(otherreject)
+ }
+ select {
+ case f.cleanup <- &txDelivery{origin: peer, hashes: added, direct: direct}:
+ return nil
+ case <-f.quit:
+ return errTerminated
+ }
+}
+
+// Drop should be called when a peer disconnects. It cleans up all the internal
+// data structures of the given node.
+func (f *TxFetcher) Drop(peer string) error {
+ select {
+ case f.drop <- &txDrop{peer: peer}:
+ return nil
+ case <-f.quit:
+ return errTerminated
+ }
+}
+
+// Start boots up the announcement based synchroniser, accepting and processing
+// hash notifications and block fetches until termination requested.
+func (f *TxFetcher) Start() {
+ go f.loop()
+}
+
+// Stop terminates the announcement based synchroniser, canceling all pending
+// operations.
+func (f *TxFetcher) Stop() {
+ close(f.quit)
+}
+
+func (f *TxFetcher) loop() {
+ var (
+ waitTimer = new(mclock.Timer)
+ timeoutTimer = new(mclock.Timer)
+
+ waitTrigger = make(chan struct{}, 1)
+ timeoutTrigger = make(chan struct{}, 1)
+ )
+ for {
+ select {
+ case ann := <-f.notify:
+ // Drop part of the new announcements if there are too many accumulated.
+ // Note, we could but do not filter already known transactions here as
+ // the probability of something arriving between this call and the pre-
+ // filter outside is essentially zero.
+ used := len(f.waitslots[ann.origin]) + len(f.announces[ann.origin])
+ if used >= maxTxAnnounces {
+ // This can happen if a set of transactions are requested but not
+ // all fulfilled, so the remainder are rescheduled without the cap
+ // check. Should be fine as the limit is in the thousands and the
+ // request size in the hundreds.
+ txAnnounceDOSMeter.Mark(int64(len(ann.hashes)))
+ break
+ }
+ want := used + len(ann.hashes)
+ if want > maxTxAnnounces {
+ txAnnounceDOSMeter.Mark(int64(want - maxTxAnnounces))
+ ann.hashes = ann.hashes[:want-maxTxAnnounces]
+ }
+ // All is well, schedule the remainder of the transactions
+ idleWait := len(f.waittime) == 0
+ _, oldPeer := f.announces[ann.origin]
+
+ for _, hash := range ann.hashes {
+ // If the transaction is already downloading, add it to the list
+ // of possible alternates (in case the current retrieval fails) and
+ // also account it for the peer.
+ if f.alternates[hash] != nil {
+ f.alternates[hash][ann.origin] = struct{}{}
+
+ // Stage 2 and 3 share the set of origins per tx
+ if announces := f.announces[ann.origin]; announces != nil {
+ announces[hash] = struct{}{}
+ } else {
+ f.announces[ann.origin] = map[common.Hash]struct{}{hash: struct{}{}}
+ }
+ continue
+ }
+ // If the transaction is not downloading, but is already queued
+ // from a different peer, track it for the new peer too.
+ if f.announced[hash] != nil {
+ f.announced[hash][ann.origin] = struct{}{}
+
+ // Stage 2 and 3 share the set of origins per tx
+ if announces := f.announces[ann.origin]; announces != nil {
+ announces[hash] = struct{}{}
+ } else {
+ f.announces[ann.origin] = map[common.Hash]struct{}{hash: struct{}{}}
+ }
+ continue
+ }
+ // If the transaction is already known to the fetcher, but not
+ // yet downloading, add the peer as an alternate origin in the
+ // waiting list.
+ if f.waitlist[hash] != nil {
+ f.waitlist[hash][ann.origin] = struct{}{}
+
+ if waitslots := f.waitslots[ann.origin]; waitslots != nil {
+ waitslots[hash] = struct{}{}
+ } else {
+ f.waitslots[ann.origin] = map[common.Hash]struct{}{hash: struct{}{}}
+ }
+ continue
+ }
+ // Transaction unknown to the fetcher, insert it into the waiting list
+ f.waitlist[hash] = map[string]struct{}{ann.origin: struct{}{}}
+ f.waittime[hash] = f.clock.Now()
+
+ if waitslots := f.waitslots[ann.origin]; waitslots != nil {
+ waitslots[hash] = struct{}{}
+ } else {
+ f.waitslots[ann.origin] = map[common.Hash]struct{}{hash: struct{}{}}
+ }
+ }
+ // If a new item was added to the waitlist, schedule it into the fetcher
+ if idleWait && len(f.waittime) > 0 {
+ f.rescheduleWait(waitTimer, waitTrigger)
+ }
+ // If this peer is new and announced something already queued, maybe
+ // request transactions from them
+ if !oldPeer && len(f.announces[ann.origin]) > 0 {
+ f.scheduleFetches(timeoutTimer, timeoutTrigger, map[string]struct{}{ann.origin: struct{}{}})
+ }
+
+ case <-waitTrigger:
+ // At least one transaction's waiting time ran out, push all expired
+ // ones into the retrieval queues
+ actives := make(map[string]struct{})
+ for hash, instance := range f.waittime {
+ if time.Duration(f.clock.Now()-instance)+txGatherSlack > txArriveTimeout {
+ // Transaction expired without propagation, schedule for retrieval
+ if f.announced[hash] != nil {
+ panic("announce tracker already contains waitlist item")
+ }
+ f.announced[hash] = f.waitlist[hash]
+ for peer := range f.waitlist[hash] {
+ if announces := f.announces[peer]; announces != nil {
+ announces[hash] = struct{}{}
+ } else {
+ f.announces[peer] = map[common.Hash]struct{}{hash: struct{}{}}
+ }
+ delete(f.waitslots[peer], hash)
+ if len(f.waitslots[peer]) == 0 {
+ delete(f.waitslots, peer)
+ }
+ actives[peer] = struct{}{}
+ }
+ delete(f.waittime, hash)
+ delete(f.waitlist, hash)
+ }
+ }
+ // If transactions are still waiting for propagation, reschedule the wait timer
+ if len(f.waittime) > 0 {
+ f.rescheduleWait(waitTimer, waitTrigger)
+ }
+ // If any peers became active and are idle, request transactions from them
+ if len(actives) > 0 {
+ f.scheduleFetches(timeoutTimer, timeoutTrigger, actives)
+ }
+
+ case <-timeoutTrigger:
+ // Clean up any expired retrievals and avoid re-requesting them from the
+ // same peer (either overloaded or malicious, useless in both cases). We
+ // could also penalize (Drop), but there's nothing to gain, and if could
+ // possibly further increase the load on it.
+ for peer, req := range f.requests {
+ if time.Duration(f.clock.Now()-req.time)+txGatherSlack > txFetchTimeout {
+ txRequestTimeoutMeter.Mark(int64(len(req.hashes)))
+
+ // Reschedule all the not-yet-delivered fetches to alternate peers
+ for _, hash := range req.hashes {
+ // Skip rescheduling hashes already delivered by someone else
+ if req.stolen != nil {
+ if _, ok := req.stolen[hash]; ok {
+ continue
+ }
+ }
+ // Move the delivery back from fetching to queued
+ if _, ok := f.announced[hash]; ok {
+ panic("announced tracker already contains alternate item")
+ }
+ if f.alternates[hash] != nil { // nil if tx was broadcast during fetch
+ f.announced[hash] = f.alternates[hash]
+ }
+ delete(f.announced[hash], peer)
+ if len(f.announced[hash]) == 0 {
+ delete(f.announced, hash)
+ }
+ delete(f.announces[peer], hash)
+ delete(f.alternates, hash)
+ delete(f.fetching, hash)
+ }
+ if len(f.announces[peer]) == 0 {
+ delete(f.announces, peer)
+ }
+ // Keep track of the request as dangling, but never expire
+ f.requests[peer].hashes = nil
+ }
+ }
+ // Schedule a new transaction retrieval
+ f.scheduleFetches(timeoutTimer, timeoutTrigger, nil)
+
+ // No idea if we sheduled something or not, trigger the timer if needed
+ // TODO(karalabe): this is kind of lame, can't we dump it into scheduleFetches somehow?
+ f.rescheduleTimeout(timeoutTimer, timeoutTrigger)
+
+ case delivery := <-f.cleanup:
+ // Independent if the delivery was direct or broadcast, remove all
+ // traces of the hash from internal trackers
+ for _, hash := range delivery.hashes {
+ if _, ok := f.waitlist[hash]; ok {
+ for peer, txset := range f.waitslots {
+ delete(txset, hash)
+ if len(txset) == 0 {
+ delete(f.waitslots, peer)
+ }
+ }
+ delete(f.waitlist, hash)
+ delete(f.waittime, hash)
+ } else {
+ for peer, txset := range f.announces {
+ delete(txset, hash)
+ if len(txset) == 0 {
+ delete(f.announces, peer)
+ }
+ }
+ delete(f.announced, hash)
+ delete(f.alternates, hash)
+
+ // If a transaction currently being fetched from a different
+ // origin was delivered (delivery stolen), mark it so the
+ // actual delivery won't double schedule it.
+ if origin, ok := f.fetching[hash]; ok && (origin != delivery.origin || !delivery.direct) {
+ stolen := f.requests[origin].stolen
+ if stolen == nil {
+ f.requests[origin].stolen = make(map[common.Hash]struct{})
+ stolen = f.requests[origin].stolen
+ }
+ stolen[hash] = struct{}{}
+ }
+ delete(f.fetching, hash)
+ }
+ }
+ // In case of a direct delivery, also reschedule anything missing
+ // from the original query
+ if delivery.direct {
+ // Mark the reqesting successful (independent of individual status)
+ txRequestDoneMeter.Mark(int64(len(delivery.hashes)))
+
+ // Make sure something was pending, nuke it
+ req := f.requests[delivery.origin]
+ if req == nil {
+ log.Warn("Unexpected transaction delivery", "peer", delivery.origin)
+ break
+ }
+ delete(f.requests, delivery.origin)
+
+ // Anything not delivered should be re-scheduled (with or without
+ // this peer, depending on the response cutoff)
+ delivered := make(map[common.Hash]struct{})
+ for _, hash := range delivery.hashes {
+ delivered[hash] = struct{}{}
+ }
+ cutoff := len(req.hashes) // If nothing is delivered, assume everything is missing, don't retry!!!
+ for i, hash := range req.hashes {
+ if _, ok := delivered[hash]; ok {
+ cutoff = i
+ }
+ }
+ // Reschedule missing hashes from alternates, not-fulfilled from alt+self
+ for i, hash := range req.hashes {
+ // Skip rescheduling hashes already delivered by someone else
+ if req.stolen != nil {
+ if _, ok := req.stolen[hash]; ok {
+ continue
+ }
+ }
+ if _, ok := delivered[hash]; !ok {
+ if i < cutoff {
+ delete(f.alternates[hash], delivery.origin)
+ delete(f.announces[delivery.origin], hash)
+ if len(f.announces[delivery.origin]) == 0 {
+ delete(f.announces, delivery.origin)
+ }
+ }
+ if len(f.alternates[hash]) > 0 {
+ if _, ok := f.announced[hash]; ok {
+ panic(fmt.Sprintf("announced tracker already contains alternate item: %v", f.announced[hash]))
+ }
+ f.announced[hash] = f.alternates[hash]
+ }
+ }
+ delete(f.alternates, hash)
+ delete(f.fetching, hash)
+ }
+ // Something was delivered, try to rechedule requests
+ f.scheduleFetches(timeoutTimer, timeoutTrigger, nil) // Partial delivery may enable others to deliver too
+ }
+
+ case drop := <-f.drop:
+ // A peer was dropped, remove all traces of it
+ if _, ok := f.waitslots[drop.peer]; ok {
+ for hash := range f.waitslots[drop.peer] {
+ delete(f.waitlist[hash], drop.peer)
+ if len(f.waitlist[hash]) == 0 {
+ delete(f.waitlist, hash)
+ delete(f.waittime, hash)
+ }
+ }
+ delete(f.waitslots, drop.peer)
+ if len(f.waitlist) > 0 {
+ f.rescheduleWait(waitTimer, waitTrigger)
+ }
+ }
+ // Clean up any active requests
+ var request *txRequest
+ if request = f.requests[drop.peer]; request != nil {
+ for _, hash := range request.hashes {
+ // Skip rescheduling hashes already delivered by someone else
+ if request.stolen != nil {
+ if _, ok := request.stolen[hash]; ok {
+ continue
+ }
+ }
+ // Undelivered hash, reschedule if there's an alternative origin available
+ delete(f.alternates[hash], drop.peer)
+ if len(f.alternates[hash]) == 0 {
+ delete(f.alternates, hash)
+ } else {
+ f.announced[hash] = f.alternates[hash]
+ delete(f.alternates, hash)
+ }
+ delete(f.fetching, hash)
+ }
+ delete(f.requests, drop.peer)
+ }
+ // Clean up general announcement tracking
+ if _, ok := f.announces[drop.peer]; ok {
+ for hash := range f.announces[drop.peer] {
+ delete(f.announced[hash], drop.peer)
+ if len(f.announced[hash]) == 0 {
+ delete(f.announced, hash)
+ }
+ }
+ delete(f.announces, drop.peer)
+ }
+ // If a request was cancelled, check if anything needs to be rescheduled
+ if request != nil {
+ f.scheduleFetches(timeoutTimer, timeoutTrigger, nil)
+ f.rescheduleTimeout(timeoutTimer, timeoutTrigger)
+ }
+
+ case <-f.quit:
+ return
+ }
+ // No idea what happened, but bump some sanity metrics
+ txFetcherWaitingPeers.Update(int64(len(f.waitslots)))
+ txFetcherWaitingHashes.Update(int64(len(f.waitlist)))
+ txFetcherQueueingPeers.Update(int64(len(f.announces) - len(f.requests)))
+ txFetcherQueueingHashes.Update(int64(len(f.announced)))
+ txFetcherFetchingPeers.Update(int64(len(f.requests)))
+ txFetcherFetchingHashes.Update(int64(len(f.fetching)))
+
+ // Loop did something, ping the step notifier if needed (tests)
+ if f.step != nil {
+ f.step <- struct{}{}
+ }
+ }
+}
+
+// rescheduleWait iterates over all the transactions currently in the waitlist
+// and schedules the movement into the fetcher for the earliest.
+//
+// The method has a granularity of 'gatherSlack', since there's not much point in
+// spinning over all the transactions just to maybe find one that should trigger
+// a few ms earlier.
+func (f *TxFetcher) rescheduleWait(timer *mclock.Timer, trigger chan struct{}) {
+ if *timer != nil {
+ (*timer).Stop()
+ }
+ now := f.clock.Now()
+
+ earliest := now
+ for _, instance := range f.waittime {
+ if earliest > instance {
+ earliest = instance
+ if txArriveTimeout-time.Duration(now-earliest) < gatherSlack {
+ break
+ }
+ }
+ }
+ *timer = f.clock.AfterFunc(txArriveTimeout-time.Duration(now-earliest), func() {
+ trigger <- struct{}{}
+ })
+}
+
+// rescheduleTimeout iterates over all the transactions currently in flight and
+// schedules a cleanup run when the first would trigger.
+//
+// The method has a granularity of 'gatherSlack', since there's not much point in
+// spinning over all the transactions just to maybe find one that should trigger
+// a few ms earlier.
+//
+// This method is a bit "flaky" "by design". In theory the timeout timer only ever
+// should be rescheduled if some request is pending. In practice, a timeout will
+// cause the timer to be rescheduled every 5 secs (until the peer comes through or
+// disconnects). This is a limitation of the fetcher code because we don't trac
+// pending requests and timed out requests separatey. Without double tracking, if
+// we simply didn't reschedule the timer on all-timeout then the timer would never
+// be set again since len(request) > 0 => something's running.
+func (f *TxFetcher) rescheduleTimeout(timer *mclock.Timer, trigger chan struct{}) {
+ if *timer != nil {
+ (*timer).Stop()
+ }
+ now := f.clock.Now()
+
+ earliest := now
+ for _, req := range f.requests {
+ // If this request already timed out, skip it altogether
+ if req.hashes == nil {
+ continue
+ }
+ if earliest > req.time {
+ earliest = req.time
+ if txFetchTimeout-time.Duration(now-earliest) < gatherSlack {
+ break
+ }
+ }
+ }
+ *timer = f.clock.AfterFunc(txFetchTimeout-time.Duration(now-earliest), func() {
+ trigger <- struct{}{}
+ })
+}
+
+// scheduleFetches starts a batch of retrievals for all available idle peers.
+func (f *TxFetcher) scheduleFetches(timer *mclock.Timer, timeout chan struct{}, whitelist map[string]struct{}) {
+ // Gather the set of peers we want to retrieve from (default to all)
+ actives := whitelist
+ if actives == nil {
+ actives = make(map[string]struct{})
+ for peer := range f.announces {
+ actives[peer] = struct{}{}
+ }
+ }
+ if len(actives) == 0 {
+ return
+ }
+ // For each active peer, try to schedule some transaction fetches
+ idle := len(f.requests) == 0
+
+ f.forEachPeer(actives, func(peer string) {
+ if f.requests[peer] != nil {
+ return // continue in the for-each
+ }
+ if len(f.announces[peer]) == 0 {
+ return // continue in the for-each
+ }
+ hashes := make([]common.Hash, 0, maxTxRetrievals)
+ f.forEachHash(f.announces[peer], func(hash common.Hash) bool {
+ if _, ok := f.fetching[hash]; !ok {
+ // Mark the hash as fetching and stash away possible alternates
+ f.fetching[hash] = peer
+
+ if _, ok := f.alternates[hash]; ok {
+ panic(fmt.Sprintf("alternate tracker already contains fetching item: %v", f.alternates[hash]))
+ }
+ f.alternates[hash] = f.announced[hash]
+ delete(f.announced, hash)
+
+ // Accumulate the hash and stop if the limit was reached
+ hashes = append(hashes, hash)
+ if len(hashes) >= maxTxRetrievals {
+ return false // break in the for-each
+ }
+ }
+ return true // continue in the for-each
+ })
+ // If any hashes were allocated, request them from the peer
+ if len(hashes) > 0 {
+ f.requests[peer] = &txRequest{hashes: hashes, time: f.clock.Now()}
+ txRequestOutMeter.Mark(int64(len(hashes)))
+
+ go func(peer string, hashes []common.Hash) {
+ // Try to fetch the transactions, but in case of a request
+ // failure (e.g. peer disconnected), reschedule the hashes.
+ if err := f.fetchTxs(peer, hashes); err != nil {
+ txRequestFailMeter.Mark(int64(len(hashes)))
+ f.Drop(peer)
+ }
+ }(peer, hashes)
+ }
+ })
+ // If a new request was fired, schedule a timeout timer
+ if idle && len(f.requests) > 0 {
+ f.rescheduleTimeout(timer, timeout)
+ }
+}
+
+// forEachPeer does a range loop over a map of peers in production, but during
+// testing it does a deterministic sorted random to allow reproducing issues.
+func (f *TxFetcher) forEachPeer(peers map[string]struct{}, do func(peer string)) {
+ // If we're running production, use whatever Go's map gives us
+ if f.rand == nil {
+ for peer := range peers {
+ do(peer)
+ }
+ return
+ }
+ // We're running the test suite, make iteration deterministic
+ list := make([]string, 0, len(peers))
+ for peer := range peers {
+ list = append(list, peer)
+ }
+ sort.Strings(list)
+ rotateStrings(list, f.rand.Intn(len(list)))
+ for _, peer := range list {
+ do(peer)
+ }
+}
+
+// forEachHash does a range loop over a map of hashes in production, but during
+// testing it does a deterministic sorted random to allow reproducing issues.
+func (f *TxFetcher) forEachHash(hashes map[common.Hash]struct{}, do func(hash common.Hash) bool) {
+ // If we're running production, use whatever Go's map gives us
+ if f.rand == nil {
+ for hash := range hashes {
+ if !do(hash) {
+ return
+ }
+ }
+ return
+ }
+ // We're running the test suite, make iteration deterministic
+ list := make([]common.Hash, 0, len(hashes))
+ for hash := range hashes {
+ list = append(list, hash)
+ }
+ sortHashes(list)
+ rotateHashes(list, f.rand.Intn(len(list)))
+ for _, hash := range list {
+ if !do(hash) {
+ return
+ }
+ }
+}
+
+// rotateStrings rotates the contents of a slice by n steps. This method is only
+// used in tests to simulate random map iteration but keep it deterministic.
+func rotateStrings(slice []string, n int) {
+ orig := make([]string, len(slice))
+ copy(orig, slice)
+
+ for i := 0; i < len(orig); i++ {
+ slice[i] = orig[(i+n)%len(orig)]
+ }
+}
+
+// sortHashes sorts a slice of hashes. This method is only used in tests in order
+// to simulate random map iteration but keep it deterministic.
+func sortHashes(slice []common.Hash) {
+ for i := 0; i < len(slice); i++ {
+ for j := i + 1; j < len(slice); j++ {
+ if bytes.Compare(slice[i][:], slice[j][:]) > 0 {
+ slice[i], slice[j] = slice[j], slice[i]
+ }
+ }
+ }
+}
+
+// rotateHashes rotates the contents of a slice by n steps. This method is only
+// used in tests to simulate random map iteration but keep it deterministic.
+func rotateHashes(slice []common.Hash, n int) {
+ orig := make([]common.Hash, len(slice))
+ copy(orig, slice)
+
+ for i := 0; i < len(orig); i++ {
+ slice[i] = orig[(i+n)%len(orig)]
+ }
+}
diff --git a/eth/fetcher/tx_fetcher_test.go b/eth/fetcher/tx_fetcher_test.go
new file mode 100644
index 0000000000..c5c198da88
--- /dev/null
+++ b/eth/fetcher/tx_fetcher_test.go
@@ -0,0 +1,1528 @@
+// Copyright 2020 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package fetcher
+
+import (
+ "errors"
+ "math/big"
+ "math/rand"
+ "testing"
+ "time"
+
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/common/mclock"
+ "github.com/ethereum/go-ethereum/core"
+ "github.com/ethereum/go-ethereum/core/types"
+)
+
+var (
+ // testTxs is a set of transactions to use during testing that have meaninful hashes.
+ testTxs = []*types.Transaction{
+ types.NewTransaction(rand.Uint64(), common.Address{byte(rand.Intn(256))}, new(big.Int), 0, new(big.Int), nil),
+ types.NewTransaction(rand.Uint64(), common.Address{byte(rand.Intn(256))}, new(big.Int), 0, new(big.Int), nil),
+ types.NewTransaction(rand.Uint64(), common.Address{byte(rand.Intn(256))}, new(big.Int), 0, new(big.Int), nil),
+ types.NewTransaction(rand.Uint64(), common.Address{byte(rand.Intn(256))}, new(big.Int), 0, new(big.Int), nil),
+ }
+ // testTxsHashes is the hashes of the test transactions above
+ testTxsHashes = []common.Hash{testTxs[0].Hash(), testTxs[1].Hash(), testTxs[2].Hash(), testTxs[3].Hash()}
+)
+
+type doTxNotify struct {
+ peer string
+ hashes []common.Hash
+}
+type doTxEnqueue struct {
+ peer string
+ txs []*types.Transaction
+ direct bool
+}
+type doWait struct {
+ time time.Duration
+ step bool
+}
+type doDrop string
+type doFunc func()
+
+type isWaiting map[string][]common.Hash
+type isScheduled struct {
+ tracking map[string][]common.Hash
+ fetching map[string][]common.Hash
+ dangling map[string][]common.Hash
+}
+type isUnderpriced int
+
+// txFetcherTest represents a test scenario that can be executed by the test
+// runner.
+type txFetcherTest struct {
+ init func() *TxFetcher
+ steps []interface{}
+}
+
+// Tests that transaction announcements are added to a waitlist, and none
+// of them are scheduled for retrieval until the wait expires.
+func TestTransactionFetcherWaiting(t *testing.T) {
+ testTransactionFetcherParallel(t, txFetcherTest{
+ init: func() *TxFetcher {
+ return NewTxFetcher(
+ func(common.Hash) bool { return false },
+ nil,
+ func(string, []common.Hash) error { return nil },
+ )
+ },
+ steps: []interface{}{
+ // Initial announcement to get something into the waitlist
+ doTxNotify{peer: "A", hashes: []common.Hash{{0x01}, {0x02}}},
+ isWaiting(map[string][]common.Hash{
+ "A": {{0x01}, {0x02}},
+ }),
+ // Announce from a new peer to check that no overwrite happens
+ doTxNotify{peer: "B", hashes: []common.Hash{{0x03}, {0x04}}},
+ isWaiting(map[string][]common.Hash{
+ "A": {{0x01}, {0x02}},
+ "B": {{0x03}, {0x04}},
+ }),
+ // Announce clashing hashes but unique new peer
+ doTxNotify{peer: "C", hashes: []common.Hash{{0x01}, {0x04}}},
+ isWaiting(map[string][]common.Hash{
+ "A": {{0x01}, {0x02}},
+ "B": {{0x03}, {0x04}},
+ "C": {{0x01}, {0x04}},
+ }),
+ // Announce existing and clashing hashes from existing peer
+ doTxNotify{peer: "A", hashes: []common.Hash{{0x01}, {0x03}, {0x05}}},
+ isWaiting(map[string][]common.Hash{
+ "A": {{0x01}, {0x02}, {0x03}, {0x05}},
+ "B": {{0x03}, {0x04}},
+ "C": {{0x01}, {0x04}},
+ }),
+ isScheduled{tracking: nil, fetching: nil},
+
+ // Wait for the arrival timeout which should move all expired items
+ // from the wait list to the scheduler
+ doWait{time: txArriveTimeout, step: true},
+ isWaiting(nil),
+ isScheduled{
+ tracking: map[string][]common.Hash{
+ "A": {{0x01}, {0x02}, {0x03}, {0x05}},
+ "B": {{0x03}, {0x04}},
+ "C": {{0x01}, {0x04}},
+ },
+ fetching: map[string][]common.Hash{ // Depends on deterministic test randomizer
+ "A": {{0x02}, {0x03}, {0x05}},
+ "C": {{0x01}, {0x04}},
+ },
+ },
+ // Queue up a non-fetchable transaction and then trigger it with a new
+ // peer (weird case to test 1 line in the fetcher)
+ doTxNotify{peer: "C", hashes: []common.Hash{{0x06}, {0x07}}},
+ isWaiting(map[string][]common.Hash{
+ "C": {{0x06}, {0x07}},
+ }),
+ doWait{time: txArriveTimeout, step: true},
+ isScheduled{
+ tracking: map[string][]common.Hash{
+ "A": {{0x01}, {0x02}, {0x03}, {0x05}},
+ "B": {{0x03}, {0x04}},
+ "C": {{0x01}, {0x04}, {0x06}, {0x07}},
+ },
+ fetching: map[string][]common.Hash{
+ "A": {{0x02}, {0x03}, {0x05}},
+ "C": {{0x01}, {0x04}},
+ },
+ },
+ doTxNotify{peer: "D", hashes: []common.Hash{{0x06}, {0x07}}},
+ isScheduled{
+ tracking: map[string][]common.Hash{
+ "A": {{0x01}, {0x02}, {0x03}, {0x05}},
+ "B": {{0x03}, {0x04}},
+ "C": {{0x01}, {0x04}, {0x06}, {0x07}},
+ "D": {{0x06}, {0x07}},
+ },
+ fetching: map[string][]common.Hash{
+ "A": {{0x02}, {0x03}, {0x05}},
+ "C": {{0x01}, {0x04}},
+ "D": {{0x06}, {0x07}},
+ },
+ },
+ },
+ })
+}
+
+// Tests that transaction announcements skip the waiting list if they are
+// already scheduled.
+func TestTransactionFetcherSkipWaiting(t *testing.T) {
+ testTransactionFetcherParallel(t, txFetcherTest{
+ init: func() *TxFetcher {
+ return NewTxFetcher(
+ func(common.Hash) bool { return false },
+ nil,
+ func(string, []common.Hash) error { return nil },
+ )
+ },
+ steps: []interface{}{
+ // Push an initial announcement through to the scheduled stage
+ doTxNotify{peer: "A", hashes: []common.Hash{{0x01}, {0x02}}},
+ isWaiting(map[string][]common.Hash{
+ "A": {{0x01}, {0x02}},
+ }),
+ isScheduled{tracking: nil, fetching: nil},
+
+ doWait{time: txArriveTimeout, step: true},
+ isWaiting(nil),
+ isScheduled{
+ tracking: map[string][]common.Hash{
+ "A": {{0x01}, {0x02}},
+ },
+ fetching: map[string][]common.Hash{
+ "A": {{0x01}, {0x02}},
+ },
+ },
+ // Announce overlaps from the same peer, ensure the new ones end up
+ // in stage one, and clashing ones don't get double tracked
+ doTxNotify{peer: "A", hashes: []common.Hash{{0x02}, {0x03}}},
+ isWaiting(map[string][]common.Hash{
+ "A": {{0x03}},
+ }),
+ isScheduled{
+ tracking: map[string][]common.Hash{
+ "A": {{0x01}, {0x02}},
+ },
+ fetching: map[string][]common.Hash{
+ "A": {{0x01}, {0x02}},
+ },
+ },
+ // Announce overlaps from a new peer, ensure new transactions end up
+ // in stage one and clashing ones get tracked for the new peer
+ doTxNotify{peer: "B", hashes: []common.Hash{{0x02}, {0x03}, {0x04}}},
+ isWaiting(map[string][]common.Hash{
+ "A": {{0x03}},
+ "B": {{0x03}, {0x04}},
+ }),
+ isScheduled{
+ tracking: map[string][]common.Hash{
+ "A": {{0x01}, {0x02}},
+ "B": {{0x02}},
+ },
+ fetching: map[string][]common.Hash{
+ "A": {{0x01}, {0x02}},
+ },
+ },
+ },
+ })
+}
+
+// Tests that only a single transaction request gets scheduled to a peer
+// and subsequent announces block or get allotted to someone else.
+func TestTransactionFetcherSingletonRequesting(t *testing.T) {
+ testTransactionFetcherParallel(t, txFetcherTest{
+ init: func() *TxFetcher {
+ return NewTxFetcher(
+ func(common.Hash) bool { return false },
+ nil,
+ func(string, []common.Hash) error { return nil },
+ )
+ },
+ steps: []interface{}{
+ // Push an initial announcement through to the scheduled stage
+ doTxNotify{peer: "A", hashes: []common.Hash{{0x01}, {0x02}}},
+ isWaiting(map[string][]common.Hash{
+ "A": {{0x01}, {0x02}},
+ }),
+ isScheduled{tracking: nil, fetching: nil},
+
+ doWait{time: txArriveTimeout, step: true},
+ isWaiting(nil),
+ isScheduled{
+ tracking: map[string][]common.Hash{
+ "A": {{0x01}, {0x02}},
+ },
+ fetching: map[string][]common.Hash{
+ "A": {{0x01}, {0x02}},
+ },
+ },
+ // Announce a new set of transactions from the same peer and ensure
+ // they do not start fetching since the peer is already busy
+ doTxNotify{peer: "A", hashes: []common.Hash{{0x03}, {0x04}}},
+ isWaiting(map[string][]common.Hash{
+ "A": {{0x03}, {0x04}},
+ }),
+ isScheduled{
+ tracking: map[string][]common.Hash{
+ "A": {{0x01}, {0x02}},
+ },
+ fetching: map[string][]common.Hash{
+ "A": {{0x01}, {0x02}},
+ },
+ },
+ doWait{time: txArriveTimeout, step: true},
+ isWaiting(nil),
+ isScheduled{
+ tracking: map[string][]common.Hash{
+ "A": {{0x01}, {0x02}, {0x03}, {0x04}},
+ },
+ fetching: map[string][]common.Hash{
+ "A": {{0x01}, {0x02}},
+ },
+ },
+ // Announce a duplicate set of transactions from a new peer and ensure
+ // uniquely new ones start downloading, even if clashing.
+ doTxNotify{peer: "B", hashes: []common.Hash{{0x02}, {0x03}, {0x05}, {0x06}}},
+ isWaiting(map[string][]common.Hash{
+ "B": {{0x05}, {0x06}},
+ }),
+ isScheduled{
+ tracking: map[string][]common.Hash{
+ "A": {{0x01}, {0x02}, {0x03}, {0x04}},
+ "B": {{0x02}, {0x03}},
+ },
+ fetching: map[string][]common.Hash{
+ "A": {{0x01}, {0x02}},
+ "B": {{0x03}},
+ },
+ },
+ },
+ })
+}
+
+// Tests that if a transaction retrieval fails, all the transactions get
+// instantly schedule back to someone else or the announcements dropped
+// if no alternate source is available.
+func TestTransactionFetcherFailedRescheduling(t *testing.T) {
+ // Create a channel to control when tx requests can fail
+ proceed := make(chan struct{})
+
+ testTransactionFetcherParallel(t, txFetcherTest{
+ init: func() *TxFetcher {
+ return NewTxFetcher(
+ func(common.Hash) bool { return false },
+ nil,
+ func(origin string, hashes []common.Hash) error {
+ <-proceed
+ return errors.New("peer disconnected")
+ },
+ )
+ },
+ steps: []interface{}{
+ // Push an initial announcement through to the scheduled stage
+ doTxNotify{peer: "A", hashes: []common.Hash{{0x01}, {0x02}}},
+ isWaiting(map[string][]common.Hash{
+ "A": {{0x01}, {0x02}},
+ }),
+ isScheduled{tracking: nil, fetching: nil},
+
+ doWait{time: txArriveTimeout, step: true},
+ isWaiting(nil),
+ isScheduled{
+ tracking: map[string][]common.Hash{
+ "A": {{0x01}, {0x02}},
+ },
+ fetching: map[string][]common.Hash{
+ "A": {{0x01}, {0x02}},
+ },
+ },
+ // While the original peer is stuck in the request, push in an second
+ // data source.
+ doTxNotify{peer: "B", hashes: []common.Hash{{0x02}}},
+ isWaiting(nil),
+ isScheduled{
+ tracking: map[string][]common.Hash{
+ "A": {{0x01}, {0x02}},
+ "B": {{0x02}},
+ },
+ fetching: map[string][]common.Hash{
+ "A": {{0x01}, {0x02}},
+ },
+ },
+ // Wait until the original request fails and check that transactions
+ // are either rescheduled or dropped
+ doFunc(func() {
+ proceed <- struct{}{} // Allow peer A to return the failure
+ }),
+ doWait{time: 0, step: true},
+ isWaiting(nil),
+ isScheduled{
+ tracking: map[string][]common.Hash{
+ "B": {{0x02}},
+ },
+ fetching: map[string][]common.Hash{
+ "B": {{0x02}},
+ },
+ },
+ doFunc(func() {
+ proceed <- struct{}{} // Allow peer B to return the failure
+ }),
+ doWait{time: 0, step: true},
+ isWaiting(nil),
+ isScheduled{nil, nil, nil},
+ },
+ })
+}
+
+// Tests that if a transaction retrieval succeeds, all alternate origins
+// are cleaned up.
+func TestTransactionFetcherCleanup(t *testing.T) {
+ testTransactionFetcherParallel(t, txFetcherTest{
+ init: func() *TxFetcher {
+ return NewTxFetcher(
+ func(common.Hash) bool { return false },
+ func(txs []*types.Transaction) []error {
+ return make([]error, len(txs))
+ },
+ func(string, []common.Hash) error { return nil },
+ )
+ },
+ steps: []interface{}{
+ // Push an initial announcement through to the scheduled stage
+ doTxNotify{peer: "A", hashes: []common.Hash{testTxsHashes[0]}},
+ isWaiting(map[string][]common.Hash{
+ "A": {testTxsHashes[0]},
+ }),
+ isScheduled{tracking: nil, fetching: nil},
+
+ doWait{time: txArriveTimeout, step: true},
+ isWaiting(nil),
+ isScheduled{
+ tracking: map[string][]common.Hash{
+ "A": {testTxsHashes[0]},
+ },
+ fetching: map[string][]common.Hash{
+ "A": {testTxsHashes[0]},
+ },
+ },
+ // Request should be delivered
+ doTxEnqueue{peer: "A", txs: []*types.Transaction{testTxs[0]}, direct: true},
+ isScheduled{nil, nil, nil},
+ },
+ })
+}
+
+// Tests that if a transaction retrieval succeeds, but the response is empty (no
+// transactions available, then all are nuked instead of being rescheduled (yes,
+// this was a bug)).
+func TestTransactionFetcherCleanupEmpty(t *testing.T) {
+ testTransactionFetcherParallel(t, txFetcherTest{
+ init: func() *TxFetcher {
+ return NewTxFetcher(
+ func(common.Hash) bool { return false },
+ func(txs []*types.Transaction) []error {
+ return make([]error, len(txs))
+ },
+ func(string, []common.Hash) error { return nil },
+ )
+ },
+ steps: []interface{}{
+ // Push an initial announcement through to the scheduled stage
+ doTxNotify{peer: "A", hashes: []common.Hash{testTxsHashes[0]}},
+ isWaiting(map[string][]common.Hash{
+ "A": {testTxsHashes[0]},
+ }),
+ isScheduled{tracking: nil, fetching: nil},
+
+ doWait{time: txArriveTimeout, step: true},
+ isWaiting(nil),
+ isScheduled{
+ tracking: map[string][]common.Hash{
+ "A": {testTxsHashes[0]},
+ },
+ fetching: map[string][]common.Hash{
+ "A": {testTxsHashes[0]},
+ },
+ },
+ // Deliver an empty response and ensure the transaction is cleared, not rescheduled
+ doTxEnqueue{peer: "A", txs: []*types.Transaction{}, direct: true},
+ isScheduled{nil, nil, nil},
+ },
+ })
+}
+
+// Tests that non-returned transactions are either re-sheduled from a
+// different peer, or self if they are after the cutoff point.
+func TestTransactionFetcherMissingRescheduling(t *testing.T) {
+ testTransactionFetcherParallel(t, txFetcherTest{
+ init: func() *TxFetcher {
+ return NewTxFetcher(
+ func(common.Hash) bool { return false },
+ func(txs []*types.Transaction) []error {
+ return make([]error, len(txs))
+ },
+ func(string, []common.Hash) error { return nil },
+ )
+ },
+ steps: []interface{}{
+ // Push an initial announcement through to the scheduled stage
+ doTxNotify{peer: "A", hashes: []common.Hash{testTxsHashes[0], testTxsHashes[1], testTxsHashes[2]}},
+ isWaiting(map[string][]common.Hash{
+ "A": {testTxsHashes[0], testTxsHashes[1], testTxsHashes[2]},
+ }),
+ isScheduled{tracking: nil, fetching: nil},
+
+ doWait{time: txArriveTimeout, step: true},
+ isWaiting(nil),
+ isScheduled{
+ tracking: map[string][]common.Hash{
+ "A": {testTxsHashes[0], testTxsHashes[1], testTxsHashes[2]},
+ },
+ fetching: map[string][]common.Hash{
+ "A": {testTxsHashes[0], testTxsHashes[1], testTxsHashes[2]},
+ },
+ },
+ // Deliver the middle transaction requested, the one before which
+ // should be dropped and the one after re-requested.
+ doTxEnqueue{peer: "A", txs: []*types.Transaction{testTxs[0]}, direct: true}, // This depends on the deterministic random
+ isScheduled{
+ tracking: map[string][]common.Hash{
+ "A": {testTxsHashes[2]},
+ },
+ fetching: map[string][]common.Hash{
+ "A": {testTxsHashes[2]},
+ },
+ },
+ },
+ })
+}
+
+// Tests that out of two transactions, if one is missing and the last is
+// delivered, the peer gets properly cleaned out from the internal state.
+func TestTransactionFetcherMissingCleanup(t *testing.T) {
+ testTransactionFetcherParallel(t, txFetcherTest{
+ init: func() *TxFetcher {
+ return NewTxFetcher(
+ func(common.Hash) bool { return false },
+ func(txs []*types.Transaction) []error {
+ return make([]error, len(txs))
+ },
+ func(string, []common.Hash) error { return nil },
+ )
+ },
+ steps: []interface{}{
+ // Push an initial announcement through to the scheduled stage
+ doTxNotify{peer: "A", hashes: []common.Hash{testTxsHashes[0], testTxsHashes[1]}},
+ isWaiting(map[string][]common.Hash{
+ "A": {testTxsHashes[0], testTxsHashes[1]},
+ }),
+ isScheduled{tracking: nil, fetching: nil},
+
+ doWait{time: txArriveTimeout, step: true},
+ isWaiting(nil),
+ isScheduled{
+ tracking: map[string][]common.Hash{
+ "A": {testTxsHashes[0], testTxsHashes[1]},
+ },
+ fetching: map[string][]common.Hash{
+ "A": {testTxsHashes[0], testTxsHashes[1]},
+ },
+ },
+ // Deliver the middle transaction requested, the one before which
+ // should be dropped and the one after re-requested.
+ doTxEnqueue{peer: "A", txs: []*types.Transaction{testTxs[1]}, direct: true}, // This depends on the deterministic random
+ isScheduled{nil, nil, nil},
+ },
+ })
+}
+
+// Tests that transaction broadcasts properly clean up announcements.
+func TestTransactionFetcherBroadcasts(t *testing.T) {
+ testTransactionFetcherParallel(t, txFetcherTest{
+ init: func() *TxFetcher {
+ return NewTxFetcher(
+ func(common.Hash) bool { return false },
+ func(txs []*types.Transaction) []error {
+ return make([]error, len(txs))
+ },
+ func(string, []common.Hash) error { return nil },
+ )
+ },
+ steps: []interface{}{
+ // Set up three transactions to be in different stats, waiting, queued and fetching
+ doTxNotify{peer: "A", hashes: []common.Hash{testTxsHashes[0]}},
+ doWait{time: txArriveTimeout, step: true},
+ doTxNotify{peer: "A", hashes: []common.Hash{testTxsHashes[1]}},
+ doWait{time: txArriveTimeout, step: true},
+ doTxNotify{peer: "A", hashes: []common.Hash{testTxsHashes[2]}},
+
+ isWaiting(map[string][]common.Hash{
+ "A": {testTxsHashes[2]},
+ }),
+ isScheduled{
+ tracking: map[string][]common.Hash{
+ "A": {testTxsHashes[0], testTxsHashes[1]},
+ },
+ fetching: map[string][]common.Hash{
+ "A": {testTxsHashes[0]},
+ },
+ },
+ // Broadcast all the transactions and ensure everything gets cleaned
+ // up, but the dangling request is left alone to avoid doing multiple
+ // concurrent requests.
+ doTxEnqueue{peer: "A", txs: []*types.Transaction{testTxs[0], testTxs[1], testTxs[2]}, direct: false},
+ isWaiting(nil),
+ isScheduled{
+ tracking: nil,
+ fetching: nil,
+ dangling: map[string][]common.Hash{
+ "A": {testTxsHashes[0]},
+ },
+ },
+ // Deliver the requested hashes
+ doTxEnqueue{peer: "A", txs: []*types.Transaction{testTxs[0], testTxs[1], testTxs[2]}, direct: true},
+ isScheduled{nil, nil, nil},
+ },
+ })
+}
+
+// Tests that the waiting list timers properly reset and reschedule.
+func TestTransactionFetcherWaitTimerResets(t *testing.T) {
+ testTransactionFetcherParallel(t, txFetcherTest{
+ init: func() *TxFetcher {
+ return NewTxFetcher(
+ func(common.Hash) bool { return false },
+ nil,
+ func(string, []common.Hash) error { return nil },
+ )
+ },
+ steps: []interface{}{
+ doTxNotify{peer: "A", hashes: []common.Hash{{0x01}}},
+ isWaiting(map[string][]common.Hash{
+ "A": {{0x01}},
+ }),
+ isScheduled{nil, nil, nil},
+ doWait{time: txArriveTimeout / 2, step: false},
+ isWaiting(map[string][]common.Hash{
+ "A": {{0x01}},
+ }),
+ isScheduled{nil, nil, nil},
+
+ doTxNotify{peer: "A", hashes: []common.Hash{{0x02}}},
+ isWaiting(map[string][]common.Hash{
+ "A": {{0x01}, {0x02}},
+ }),
+ isScheduled{nil, nil, nil},
+ doWait{time: txArriveTimeout / 2, step: true},
+ isWaiting(map[string][]common.Hash{
+ "A": {{0x02}},
+ }),
+ isScheduled{
+ tracking: map[string][]common.Hash{
+ "A": {{0x01}},
+ },
+ fetching: map[string][]common.Hash{
+ "A": {{0x01}},
+ },
+ },
+
+ doWait{time: txArriveTimeout / 2, step: true},
+ isWaiting(nil),
+ isScheduled{
+ tracking: map[string][]common.Hash{
+ "A": {{0x01}, {0x02}},
+ },
+ fetching: map[string][]common.Hash{
+ "A": {{0x01}},
+ },
+ },
+ },
+ })
+}
+
+// Tests that if a transaction request is not replied to, it will time
+// out and be re-scheduled for someone else.
+func TestTransactionFetcherTimeoutRescheduling(t *testing.T) {
+ testTransactionFetcherParallel(t, txFetcherTest{
+ init: func() *TxFetcher {
+ return NewTxFetcher(
+ func(common.Hash) bool { return false },
+ func(txs []*types.Transaction) []error {
+ return make([]error, len(txs))
+ },
+ func(string, []common.Hash) error { return nil },
+ )
+ },
+ steps: []interface{}{
+ // Push an initial announcement through to the scheduled stage
+ doTxNotify{peer: "A", hashes: []common.Hash{testTxsHashes[0]}},
+ isWaiting(map[string][]common.Hash{
+ "A": {testTxsHashes[0]},
+ }),
+ isScheduled{tracking: nil, fetching: nil},
+
+ doWait{time: txArriveTimeout, step: true},
+ isWaiting(nil),
+ isScheduled{
+ tracking: map[string][]common.Hash{
+ "A": {testTxsHashes[0]},
+ },
+ fetching: map[string][]common.Hash{
+ "A": {testTxsHashes[0]},
+ },
+ },
+ // Wait until the delivery times out, everything should be cleaned up
+ doWait{time: txFetchTimeout, step: true},
+ isWaiting(nil),
+ isScheduled{
+ tracking: nil,
+ fetching: nil,
+ dangling: map[string][]common.Hash{
+ "A": {},
+ },
+ },
+ // Ensure that followup announcements don't get scheduled
+ doTxNotify{peer: "A", hashes: []common.Hash{testTxsHashes[1]}},
+ doWait{time: txArriveTimeout, step: true},
+ isScheduled{
+ tracking: map[string][]common.Hash{
+ "A": {testTxsHashes[1]},
+ },
+ fetching: nil,
+ dangling: map[string][]common.Hash{
+ "A": {},
+ },
+ },
+ // If the dangling request arrives a bit later, do not choke
+ doTxEnqueue{peer: "A", txs: []*types.Transaction{testTxs[0]}, direct: true},
+ isWaiting(nil),
+ isScheduled{
+ tracking: map[string][]common.Hash{
+ "A": {testTxsHashes[1]},
+ },
+ fetching: map[string][]common.Hash{
+ "A": {testTxsHashes[1]},
+ },
+ },
+ },
+ })
+}
+
+// Tests that the fetching timeout timers properly reset and reschedule.
+func TestTransactionFetcherTimeoutTimerResets(t *testing.T) {
+ testTransactionFetcherParallel(t, txFetcherTest{
+ init: func() *TxFetcher {
+ return NewTxFetcher(
+ func(common.Hash) bool { return false },
+ nil,
+ func(string, []common.Hash) error { return nil },
+ )
+ },
+ steps: []interface{}{
+ doTxNotify{peer: "A", hashes: []common.Hash{{0x01}}},
+ doWait{time: txArriveTimeout, step: true},
+ doTxNotify{peer: "B", hashes: []common.Hash{{0x02}}},
+ doWait{time: txArriveTimeout, step: true},
+
+ isWaiting(nil),
+ isScheduled{
+ tracking: map[string][]common.Hash{
+ "A": {{0x01}},
+ "B": {{0x02}},
+ },
+ fetching: map[string][]common.Hash{
+ "A": {{0x01}},
+ "B": {{0x02}},
+ },
+ },
+ doWait{time: txFetchTimeout - txArriveTimeout, step: true},
+ isScheduled{
+ tracking: map[string][]common.Hash{
+ "B": {{0x02}},
+ },
+ fetching: map[string][]common.Hash{
+ "B": {{0x02}},
+ },
+ dangling: map[string][]common.Hash{
+ "A": {},
+ },
+ },
+ doWait{time: txArriveTimeout, step: true},
+ isScheduled{
+ tracking: nil,
+ fetching: nil,
+ dangling: map[string][]common.Hash{
+ "A": {},
+ "B": {},
+ },
+ },
+ },
+ })
+}
+
+// Tests that if thousands of transactions are announces, only a small
+// number of them will be requested at a time.
+func TestTransactionFetcherRateLimiting(t *testing.T) {
+ // Create a slew of transactions and to announce them
+ var hashes []common.Hash
+ for i := 0; i < maxTxAnnounces; i++ {
+ hashes = append(hashes, common.Hash{byte(i / 256), byte(i % 256)})
+ }
+
+ testTransactionFetcherParallel(t, txFetcherTest{
+ init: func() *TxFetcher {
+ return NewTxFetcher(
+ func(common.Hash) bool { return false },
+ nil,
+ func(string, []common.Hash) error { return nil },
+ )
+ },
+ steps: []interface{}{
+ // Announce all the transactions, wait a bit and ensure only a small
+ // percentage gets requested
+ doTxNotify{peer: "A", hashes: hashes},
+ doWait{time: txArriveTimeout, step: true},
+ isWaiting(nil),
+ isScheduled{
+ tracking: map[string][]common.Hash{
+ "A": hashes,
+ },
+ fetching: map[string][]common.Hash{
+ "A": hashes[1643 : 1643+maxTxRetrievals],
+ },
+ },
+ },
+ })
+}
+
+// Tests that then number of transactions a peer is allowed to announce and/or
+// request at the same time is hard capped.
+func TestTransactionFetcherDoSProtection(t *testing.T) {
+ // Create a slew of transactions and to announce them
+ var hashesA []common.Hash
+ for i := 0; i < maxTxAnnounces+1; i++ {
+ hashesA = append(hashesA, common.Hash{0x01, byte(i / 256), byte(i % 256)})
+ }
+ var hashesB []common.Hash
+ for i := 0; i < maxTxAnnounces+1; i++ {
+ hashesB = append(hashesB, common.Hash{0x02, byte(i / 256), byte(i % 256)})
+ }
+ testTransactionFetcherParallel(t, txFetcherTest{
+ init: func() *TxFetcher {
+ return NewTxFetcher(
+ func(common.Hash) bool { return false },
+ nil,
+ func(string, []common.Hash) error { return nil },
+ )
+ },
+ steps: []interface{}{
+ // Announce half of the transaction and wait for them to be scheduled
+ doTxNotify{peer: "A", hashes: hashesA[:maxTxAnnounces/2]},
+ doTxNotify{peer: "B", hashes: hashesB[:maxTxAnnounces/2-1]},
+ doWait{time: txArriveTimeout, step: true},
+
+ // Announce the second half and keep them in the wait list
+ doTxNotify{peer: "A", hashes: hashesA[maxTxAnnounces/2 : maxTxAnnounces]},
+ doTxNotify{peer: "B", hashes: hashesB[maxTxAnnounces/2-1 : maxTxAnnounces-1]},
+
+ // Ensure the hashes are split half and half
+ isWaiting(map[string][]common.Hash{
+ "A": hashesA[maxTxAnnounces/2 : maxTxAnnounces],
+ "B": hashesB[maxTxAnnounces/2-1 : maxTxAnnounces-1],
+ }),
+ isScheduled{
+ tracking: map[string][]common.Hash{
+ "A": hashesA[:maxTxAnnounces/2],
+ "B": hashesB[:maxTxAnnounces/2-1],
+ },
+ fetching: map[string][]common.Hash{
+ "A": hashesA[1643 : 1643+maxTxRetrievals],
+ "B": append(append([]common.Hash{}, hashesB[maxTxAnnounces/2-3:maxTxAnnounces/2-1]...), hashesB[:maxTxRetrievals-2]...),
+ },
+ },
+ // Ensure that adding even one more hash results in dropping the hash
+ doTxNotify{peer: "A", hashes: []common.Hash{hashesA[maxTxAnnounces]}},
+ doTxNotify{peer: "B", hashes: hashesB[maxTxAnnounces-1 : maxTxAnnounces+1]},
+
+ isWaiting(map[string][]common.Hash{
+ "A": hashesA[maxTxAnnounces/2 : maxTxAnnounces],
+ "B": hashesB[maxTxAnnounces/2-1 : maxTxAnnounces],
+ }),
+ isScheduled{
+ tracking: map[string][]common.Hash{
+ "A": hashesA[:maxTxAnnounces/2],
+ "B": hashesB[:maxTxAnnounces/2-1],
+ },
+ fetching: map[string][]common.Hash{
+ "A": hashesA[1643 : 1643+maxTxRetrievals],
+ "B": append(append([]common.Hash{}, hashesB[maxTxAnnounces/2-3:maxTxAnnounces/2-1]...), hashesB[:maxTxRetrievals-2]...),
+ },
+ },
+ },
+ })
+}
+
+// Tests that underpriced transactions don't get rescheduled after being rejected.
+func TestTransactionFetcherUnderpricedDedup(t *testing.T) {
+ testTransactionFetcherParallel(t, txFetcherTest{
+ init: func() *TxFetcher {
+ return NewTxFetcher(
+ func(common.Hash) bool { return false },
+ func(txs []*types.Transaction) []error {
+ errs := make([]error, len(txs))
+ for i := 0; i < len(errs); i++ {
+ if i%2 == 0 {
+ errs[i] = core.ErrUnderpriced
+ } else {
+ errs[i] = core.ErrReplaceUnderpriced
+ }
+ }
+ return errs
+ },
+ func(string, []common.Hash) error { return nil },
+ )
+ },
+ steps: []interface{}{
+ // Deliver a transaction through the fetcher, but reject as underpriced
+ doTxNotify{peer: "A", hashes: []common.Hash{testTxsHashes[0], testTxsHashes[1]}},
+ doWait{time: txArriveTimeout, step: true},
+ doTxEnqueue{peer: "A", txs: []*types.Transaction{testTxs[0], testTxs[1]}, direct: true},
+ isScheduled{nil, nil, nil},
+
+ // Try to announce the transaction again, ensure it's not scheduled back
+ doTxNotify{peer: "A", hashes: []common.Hash{testTxsHashes[0], testTxsHashes[1], testTxsHashes[2]}}, // [2] is needed to force a step in the fetcher
+ isWaiting(map[string][]common.Hash{
+ "A": {testTxsHashes[2]},
+ }),
+ isScheduled{nil, nil, nil},
+ },
+ })
+}
+
+// Tests that underpriced transactions don't get rescheduled after being rejected,
+// but at the same time there's a hard cap on the number of transactions that are
+// tracked.
+func TestTransactionFetcherUnderpricedDoSProtection(t *testing.T) {
+ // Temporarily disable fetch timeouts as they massively mess up the simulated clock
+ defer func(timeout time.Duration) { txFetchTimeout = timeout }(txFetchTimeout)
+ txFetchTimeout = 24 * time.Hour
+
+ // Create a slew of transactions to max out the underpriced set
+ var txs []*types.Transaction
+ for i := 0; i < maxTxUnderpricedSetSize+1; i++ {
+ txs = append(txs, types.NewTransaction(rand.Uint64(), common.Address{byte(rand.Intn(256))}, new(big.Int), 0, new(big.Int), nil))
+ }
+ hashes := make([]common.Hash, len(txs))
+ for i, tx := range txs {
+ hashes[i] = tx.Hash()
+ }
+ // Generate a set of steps to announce and deliver the entire set of transactions
+ var steps []interface{}
+ for i := 0; i < maxTxUnderpricedSetSize/maxTxRetrievals; i++ {
+ steps = append(steps, doTxNotify{peer: "A", hashes: hashes[i*maxTxRetrievals : (i+1)*maxTxRetrievals]})
+ steps = append(steps, isWaiting(map[string][]common.Hash{
+ "A": hashes[i*maxTxRetrievals : (i+1)*maxTxRetrievals],
+ }))
+ steps = append(steps, doWait{time: txArriveTimeout, step: true})
+ steps = append(steps, isScheduled{
+ tracking: map[string][]common.Hash{
+ "A": hashes[i*maxTxRetrievals : (i+1)*maxTxRetrievals],
+ },
+ fetching: map[string][]common.Hash{
+ "A": hashes[i*maxTxRetrievals : (i+1)*maxTxRetrievals],
+ },
+ })
+ steps = append(steps, doTxEnqueue{peer: "A", txs: txs[i*maxTxRetrievals : (i+1)*maxTxRetrievals], direct: true})
+ steps = append(steps, isWaiting(nil))
+ steps = append(steps, isScheduled{nil, nil, nil})
+ steps = append(steps, isUnderpriced((i+1)*maxTxRetrievals))
+ }
+ testTransactionFetcher(t, txFetcherTest{
+ init: func() *TxFetcher {
+ return NewTxFetcher(
+ func(common.Hash) bool { return false },
+ func(txs []*types.Transaction) []error {
+ errs := make([]error, len(txs))
+ for i := 0; i < len(errs); i++ {
+ errs[i] = core.ErrUnderpriced
+ }
+ return errs
+ },
+ func(string, []common.Hash) error { return nil },
+ )
+ },
+ steps: append(steps, []interface{}{
+ // The preparation of the test has already been done in `steps`, add the last check
+ doTxNotify{peer: "A", hashes: []common.Hash{hashes[maxTxUnderpricedSetSize]}},
+ doWait{time: txArriveTimeout, step: true},
+ doTxEnqueue{peer: "A", txs: []*types.Transaction{txs[maxTxUnderpricedSetSize]}, direct: true},
+ isUnderpriced(maxTxUnderpricedSetSize),
+ }...),
+ })
+}
+
+// Tests that unexpected deliveries don't corrupt the internal state.
+func TestTransactionFetcherOutOfBoundDeliveries(t *testing.T) {
+ testTransactionFetcherParallel(t, txFetcherTest{
+ init: func() *TxFetcher {
+ return NewTxFetcher(
+ func(common.Hash) bool { return false },
+ func(txs []*types.Transaction) []error {
+ return make([]error, len(txs))
+ },
+ func(string, []common.Hash) error { return nil },
+ )
+ },
+ steps: []interface{}{
+ // Deliver something out of the blue
+ isWaiting(nil),
+ isScheduled{nil, nil, nil},
+ doTxEnqueue{peer: "A", txs: []*types.Transaction{testTxs[0]}, direct: false},
+ isWaiting(nil),
+ isScheduled{nil, nil, nil},
+
+ // Set up a few hashes into various stages
+ doTxNotify{peer: "A", hashes: []common.Hash{testTxsHashes[0]}},
+ doWait{time: txArriveTimeout, step: true},
+ doTxNotify{peer: "A", hashes: []common.Hash{testTxsHashes[1]}},
+ doWait{time: txArriveTimeout, step: true},
+ doTxNotify{peer: "A", hashes: []common.Hash{testTxsHashes[2]}},
+
+ isWaiting(map[string][]common.Hash{
+ "A": {testTxsHashes[2]},
+ }),
+ isScheduled{
+ tracking: map[string][]common.Hash{
+ "A": {testTxsHashes[0], testTxsHashes[1]},
+ },
+ fetching: map[string][]common.Hash{
+ "A": {testTxsHashes[0]},
+ },
+ },
+ // Deliver everything and more out of the blue
+ doTxEnqueue{peer: "B", txs: []*types.Transaction{testTxs[0], testTxs[1], testTxs[2], testTxs[3]}, direct: true},
+ isWaiting(nil),
+ isScheduled{
+ tracking: nil,
+ fetching: nil,
+ dangling: map[string][]common.Hash{
+ "A": {testTxsHashes[0]},
+ },
+ },
+ },
+ })
+}
+
+// Tests that dropping a peer cleans out all internal data structures in all the
+// live or danglng stages.
+func TestTransactionFetcherDrop(t *testing.T) {
+ testTransactionFetcherParallel(t, txFetcherTest{
+ init: func() *TxFetcher {
+ return NewTxFetcher(
+ func(common.Hash) bool { return false },
+ func(txs []*types.Transaction) []error {
+ return make([]error, len(txs))
+ },
+ func(string, []common.Hash) error { return nil },
+ )
+ },
+ steps: []interface{}{
+ // Set up a few hashes into various stages
+ doTxNotify{peer: "A", hashes: []common.Hash{{0x01}}},
+ doWait{time: txArriveTimeout, step: true},
+ doTxNotify{peer: "A", hashes: []common.Hash{{0x02}}},
+ doWait{time: txArriveTimeout, step: true},
+ doTxNotify{peer: "A", hashes: []common.Hash{{0x03}}},
+
+ isWaiting(map[string][]common.Hash{
+ "A": {{0x03}},
+ }),
+ isScheduled{
+ tracking: map[string][]common.Hash{
+ "A": {{0x01}, {0x02}},
+ },
+ fetching: map[string][]common.Hash{
+ "A": {{0x01}},
+ },
+ },
+ // Drop the peer and ensure everything's cleaned out
+ doDrop("A"),
+ isWaiting(nil),
+ isScheduled{nil, nil, nil},
+
+ // Push the node into a dangling (timeout) state
+ doTxNotify{peer: "A", hashes: []common.Hash{testTxsHashes[0]}},
+ doWait{time: txArriveTimeout, step: true},
+ isWaiting(nil),
+ isScheduled{
+ tracking: map[string][]common.Hash{
+ "A": {testTxsHashes[0]},
+ },
+ fetching: map[string][]common.Hash{
+ "A": {testTxsHashes[0]},
+ },
+ },
+ doWait{time: txFetchTimeout, step: true},
+ isWaiting(nil),
+ isScheduled{
+ tracking: nil,
+ fetching: nil,
+ dangling: map[string][]common.Hash{
+ "A": {},
+ },
+ },
+ // Drop the peer and ensure everything's cleaned out
+ doDrop("A"),
+ isWaiting(nil),
+ isScheduled{nil, nil, nil},
+ },
+ })
+}
+
+// Tests that dropping a peer instantly reschedules failed announcements to any
+// available peer.
+func TestTransactionFetcherDropRescheduling(t *testing.T) {
+ testTransactionFetcherParallel(t, txFetcherTest{
+ init: func() *TxFetcher {
+ return NewTxFetcher(
+ func(common.Hash) bool { return false },
+ func(txs []*types.Transaction) []error {
+ return make([]error, len(txs))
+ },
+ func(string, []common.Hash) error { return nil },
+ )
+ },
+ steps: []interface{}{
+ // Set up a few hashes into various stages
+ doTxNotify{peer: "A", hashes: []common.Hash{{0x01}}},
+ doWait{time: txArriveTimeout, step: true},
+ doTxNotify{peer: "B", hashes: []common.Hash{{0x01}}},
+
+ isWaiting(nil),
+ isScheduled{
+ tracking: map[string][]common.Hash{
+ "A": {{0x01}},
+ "B": {{0x01}},
+ },
+ fetching: map[string][]common.Hash{
+ "A": {{0x01}},
+ },
+ },
+ // Drop the peer and ensure everything's cleaned out
+ doDrop("A"),
+ isWaiting(nil),
+ isScheduled{
+ tracking: map[string][]common.Hash{
+ "B": {{0x01}},
+ },
+ fetching: map[string][]common.Hash{
+ "B": {{0x01}},
+ },
+ },
+ },
+ })
+}
+
+// This test reproduces a crash caught by the fuzzer. The root cause was a
+// dangling transaction timing out and clashing on readd with a concurrently
+// announced one.
+func TestTransactionFetcherFuzzCrash01(t *testing.T) {
+ testTransactionFetcherParallel(t, txFetcherTest{
+ init: func() *TxFetcher {
+ return NewTxFetcher(
+ func(common.Hash) bool { return false },
+ func(txs []*types.Transaction) []error {
+ return make([]error, len(txs))
+ },
+ func(string, []common.Hash) error { return nil },
+ )
+ },
+ steps: []interface{}{
+ // Get a transaction into fetching mode and make it dangling with a broadcast
+ doTxNotify{peer: "A", hashes: []common.Hash{testTxsHashes[0]}},
+ doWait{time: txArriveTimeout, step: true},
+ doTxEnqueue{peer: "A", txs: []*types.Transaction{testTxs[0]}},
+
+ // Notify the dangling transaction once more and crash via a timeout
+ doTxNotify{peer: "A", hashes: []common.Hash{testTxsHashes[0]}},
+ doWait{time: txFetchTimeout, step: true},
+ },
+ })
+}
+
+// This test reproduces a crash caught by the fuzzer. The root cause was a
+// dangling transaction getting peer-dropped and clashing on readd with a
+// concurrently announced one.
+func TestTransactionFetcherFuzzCrash02(t *testing.T) {
+ testTransactionFetcherParallel(t, txFetcherTest{
+ init: func() *TxFetcher {
+ return NewTxFetcher(
+ func(common.Hash) bool { return false },
+ func(txs []*types.Transaction) []error {
+ return make([]error, len(txs))
+ },
+ func(string, []common.Hash) error { return nil },
+ )
+ },
+ steps: []interface{}{
+ // Get a transaction into fetching mode and make it dangling with a broadcast
+ doTxNotify{peer: "A", hashes: []common.Hash{testTxsHashes[0]}},
+ doWait{time: txArriveTimeout, step: true},
+ doTxEnqueue{peer: "A", txs: []*types.Transaction{testTxs[0]}},
+
+ // Notify the dangling transaction once more, re-fetch, and crash via a drop and timeout
+ doTxNotify{peer: "B", hashes: []common.Hash{testTxsHashes[0]}},
+ doWait{time: txArriveTimeout, step: true},
+ doDrop("A"),
+ doWait{time: txFetchTimeout, step: true},
+ },
+ })
+}
+
+// This test reproduces a crash caught by the fuzzer. The root cause was a
+// dangling transaction getting rescheduled via a partial delivery, clashing
+// with a concurrent notify.
+func TestTransactionFetcherFuzzCrash03(t *testing.T) {
+ testTransactionFetcherParallel(t, txFetcherTest{
+ init: func() *TxFetcher {
+ return NewTxFetcher(
+ func(common.Hash) bool { return false },
+ func(txs []*types.Transaction) []error {
+ return make([]error, len(txs))
+ },
+ func(string, []common.Hash) error { return nil },
+ )
+ },
+ steps: []interface{}{
+ // Get a transaction into fetching mode and make it dangling with a broadcast
+ doTxNotify{peer: "A", hashes: []common.Hash{testTxsHashes[0], testTxsHashes[1]}},
+ doWait{time: txFetchTimeout, step: true},
+ doTxEnqueue{peer: "A", txs: []*types.Transaction{testTxs[0], testTxs[1]}},
+
+ // Notify the dangling transaction once more, partially deliver, clash&crash with a timeout
+ doTxNotify{peer: "B", hashes: []common.Hash{testTxsHashes[0]}},
+ doWait{time: txArriveTimeout, step: true},
+
+ doTxEnqueue{peer: "A", txs: []*types.Transaction{testTxs[1]}, direct: true},
+ doWait{time: txFetchTimeout, step: true},
+ },
+ })
+}
+
+// This test reproduces a crash caught by the fuzzer. The root cause was a
+// dangling transaction getting rescheduled via a disconnect, clashing with
+// a concurrent notify.
+func TestTransactionFetcherFuzzCrash04(t *testing.T) {
+ // Create a channel to control when tx requests can fail
+ proceed := make(chan struct{})
+
+ testTransactionFetcherParallel(t, txFetcherTest{
+ init: func() *TxFetcher {
+ return NewTxFetcher(
+ func(common.Hash) bool { return false },
+ func(txs []*types.Transaction) []error {
+ return make([]error, len(txs))
+ },
+ func(string, []common.Hash) error {
+ <-proceed
+ return errors.New("peer disconnected")
+ },
+ )
+ },
+ steps: []interface{}{
+ // Get a transaction into fetching mode and make it dangling with a broadcast
+ doTxNotify{peer: "A", hashes: []common.Hash{testTxsHashes[0]}},
+ doWait{time: txArriveTimeout, step: true},
+ doTxEnqueue{peer: "A", txs: []*types.Transaction{testTxs[0]}},
+
+ // Notify the dangling transaction once more, re-fetch, and crash via an in-flight disconnect
+ doTxNotify{peer: "B", hashes: []common.Hash{testTxsHashes[0]}},
+ doWait{time: txArriveTimeout, step: true},
+ doFunc(func() {
+ proceed <- struct{}{} // Allow peer A to return the failure
+ }),
+ doWait{time: 0, step: true},
+ doWait{time: txFetchTimeout, step: true},
+ },
+ })
+}
+
+func testTransactionFetcherParallel(t *testing.T, tt txFetcherTest) {
+ t.Parallel()
+ testTransactionFetcher(t, tt)
+}
+
+func testTransactionFetcher(t *testing.T, tt txFetcherTest) {
+ // Create a fetcher and hook into it's simulated fields
+ clock := new(mclock.Simulated)
+ wait := make(chan struct{})
+
+ fetcher := tt.init()
+ fetcher.clock = clock
+ fetcher.step = wait
+ fetcher.rand = rand.New(rand.NewSource(0x3a29))
+
+ fetcher.Start()
+ defer fetcher.Stop()
+
+ // Crunch through all the test steps and execute them
+ for i, step := range tt.steps {
+ switch step := step.(type) {
+ case doTxNotify:
+ if err := fetcher.Notify(step.peer, step.hashes); err != nil {
+ t.Errorf("step %d: %v", i, err)
+ }
+ <-wait // Fetcher needs to process this, wait until it's done
+ select {
+ case <-wait:
+ panic("wtf")
+ case <-time.After(time.Millisecond):
+ }
+
+ case doTxEnqueue:
+ if err := fetcher.Enqueue(step.peer, step.txs, step.direct); err != nil {
+ t.Errorf("step %d: %v", i, err)
+ }
+ <-wait // Fetcher needs to process this, wait until it's done
+
+ case doWait:
+ clock.Run(step.time)
+ if step.step {
+ <-wait // Fetcher supposed to do something, wait until it's done
+ }
+
+ case doDrop:
+ if err := fetcher.Drop(string(step)); err != nil {
+ t.Errorf("step %d: %v", i, err)
+ }
+ <-wait // Fetcher needs to process this, wait until it's done
+
+ case doFunc:
+ step()
+
+ case isWaiting:
+ // We need to check that the waiting list (stage 1) internals
+ // match with the expected set. Check the peer->hash mappings
+ // first.
+ for peer, hashes := range step {
+ waiting := fetcher.waitslots[peer]
+ if waiting == nil {
+ t.Errorf("step %d: peer %s missing from waitslots", i, peer)
+ continue
+ }
+ for _, hash := range hashes {
+ if _, ok := waiting[hash]; !ok {
+ t.Errorf("step %d, peer %s: hash %x missing from waitslots", i, peer, hash)
+ }
+ }
+ for hash := range waiting {
+ if !containsHash(hashes, hash) {
+ t.Errorf("step %d, peer %s: hash %x extra in waitslots", i, peer, hash)
+ }
+ }
+ }
+ for peer := range fetcher.waitslots {
+ if _, ok := step[peer]; !ok {
+ t.Errorf("step %d: peer %s extra in waitslots", i, peer)
+ }
+ }
+ // Peer->hash sets correct, check the hash->peer and timeout sets
+ for peer, hashes := range step {
+ for _, hash := range hashes {
+ if _, ok := fetcher.waitlist[hash][peer]; !ok {
+ t.Errorf("step %d, hash %x: peer %s missing from waitlist", i, hash, peer)
+ }
+ if _, ok := fetcher.waittime[hash]; !ok {
+ t.Errorf("step %d: hash %x missing from waittime", i, hash)
+ }
+ }
+ }
+ for hash, peers := range fetcher.waitlist {
+ if len(peers) == 0 {
+ t.Errorf("step %d, hash %x: empty peerset in waitlist", i, hash)
+ }
+ for peer := range peers {
+ if !containsHash(step[peer], hash) {
+ t.Errorf("step %d, hash %x: peer %s extra in waitlist", i, hash, peer)
+ }
+ }
+ }
+ for hash := range fetcher.waittime {
+ var found bool
+ for _, hashes := range step {
+ if containsHash(hashes, hash) {
+ found = true
+ break
+ }
+ }
+ if !found {
+ t.Errorf("step %d,: hash %x extra in waittime", i, hash)
+ }
+ }
+
+ case isScheduled:
+ // Check that all scheduled announces are accounted for and no
+ // extra ones are present.
+ for peer, hashes := range step.tracking {
+ scheduled := fetcher.announces[peer]
+ if scheduled == nil {
+ t.Errorf("step %d: peer %s missing from announces", i, peer)
+ continue
+ }
+ for _, hash := range hashes {
+ if _, ok := scheduled[hash]; !ok {
+ t.Errorf("step %d, peer %s: hash %x missing from announces", i, peer, hash)
+ }
+ }
+ for hash := range scheduled {
+ if !containsHash(hashes, hash) {
+ t.Errorf("step %d, peer %s: hash %x extra in announces", i, peer, hash)
+ }
+ }
+ }
+ for peer := range fetcher.announces {
+ if _, ok := step.tracking[peer]; !ok {
+ t.Errorf("step %d: peer %s extra in announces", i, peer)
+ }
+ }
+ // Check that all announces required to be fetching are in the
+ // appropriate sets
+ for peer, hashes := range step.fetching {
+ request := fetcher.requests[peer]
+ if request == nil {
+ t.Errorf("step %d: peer %s missing from requests", i, peer)
+ continue
+ }
+ for _, hash := range hashes {
+ if !containsHash(request.hashes, hash) {
+ t.Errorf("step %d, peer %s: hash %x missing from requests", i, peer, hash)
+ }
+ }
+ for _, hash := range request.hashes {
+ if !containsHash(hashes, hash) {
+ t.Errorf("step %d, peer %s: hash %x extra in requests", i, peer, hash)
+ }
+ }
+ }
+ for peer := range fetcher.requests {
+ if _, ok := step.fetching[peer]; !ok {
+ if _, ok := step.dangling[peer]; !ok {
+ t.Errorf("step %d: peer %s extra in requests", i, peer)
+ }
+ }
+ }
+ for peer, hashes := range step.fetching {
+ for _, hash := range hashes {
+ if _, ok := fetcher.fetching[hash]; !ok {
+ t.Errorf("step %d, peer %s: hash %x missing from fetching", i, peer, hash)
+ }
+ }
+ }
+ for hash := range fetcher.fetching {
+ var found bool
+ for _, req := range fetcher.requests {
+ if containsHash(req.hashes, hash) {
+ found = true
+ break
+ }
+ }
+ if !found {
+ t.Errorf("step %d: hash %x extra in fetching", i, hash)
+ }
+ }
+ for _, hashes := range step.fetching {
+ for _, hash := range hashes {
+ alternates := fetcher.alternates[hash]
+ if alternates == nil {
+ t.Errorf("step %d: hash %x missing from alternates", i, hash)
+ continue
+ }
+ for peer := range alternates {
+ if _, ok := fetcher.announces[peer]; !ok {
+ t.Errorf("step %d: peer %s extra in alternates", i, peer)
+ continue
+ }
+ if _, ok := fetcher.announces[peer][hash]; !ok {
+ t.Errorf("step %d, peer %s: hash %x extra in alternates", i, hash, peer)
+ continue
+ }
+ }
+ for p := range fetcher.announced[hash] {
+ if _, ok := alternates[p]; !ok {
+ t.Errorf("step %d, hash %x: peer %s missing from alternates", i, hash, p)
+ continue
+ }
+ }
+ }
+ }
+ for peer, hashes := range step.dangling {
+ request := fetcher.requests[peer]
+ if request == nil {
+ t.Errorf("step %d: peer %s missing from requests", i, peer)
+ continue
+ }
+ for _, hash := range hashes {
+ if !containsHash(request.hashes, hash) {
+ t.Errorf("step %d, peer %s: hash %x missing from requests", i, peer, hash)
+ }
+ }
+ for _, hash := range request.hashes {
+ if !containsHash(hashes, hash) {
+ t.Errorf("step %d, peer %s: hash %x extra in requests", i, peer, hash)
+ }
+ }
+ }
+ // Check that all transaction announces that are scheduled for
+ // retrieval but not actively being downloaded are tracked only
+ // in the stage 2 `announced` map.
+ var queued []common.Hash
+ for _, hashes := range step.tracking {
+ for _, hash := range hashes {
+ var found bool
+ for _, hs := range step.fetching {
+ if containsHash(hs, hash) {
+ found = true
+ break
+ }
+ }
+ if !found {
+ queued = append(queued, hash)
+ }
+ }
+ }
+ for _, hash := range queued {
+ if _, ok := fetcher.announced[hash]; !ok {
+ t.Errorf("step %d: hash %x missing from announced", i, hash)
+ }
+ }
+ for hash := range fetcher.announced {
+ if !containsHash(queued, hash) {
+ t.Errorf("step %d: hash %x extra in announced", i, hash)
+ }
+ }
+
+ case isUnderpriced:
+ if fetcher.underpriced.Cardinality() != int(step) {
+ t.Errorf("step %d: underpriced set size mismatch: have %d, want %d", i, fetcher.underpriced.Cardinality(), step)
+ }
+
+ default:
+ t.Fatalf("step %d: unknown step type %T", i, step)
+ }
+ // After every step, cross validate the internal uniqueness invariants
+ // between stage one and stage two.
+ for hash := range fetcher.waittime {
+ if _, ok := fetcher.announced[hash]; ok {
+ t.Errorf("step %d: hash %s present in both stage 1 and 2", i, hash)
+ }
+ }
+ }
+}
+
+// containsHash returns whether a hash is contained within a hash slice.
+func containsHash(slice []common.Hash, hash common.Hash) bool {
+ for _, have := range slice {
+ if have == hash {
+ return true
+ }
+ }
+ return false
+}
diff --git a/eth/filters/api.go b/eth/filters/api.go
index 75b8174da3..0a9e4bac91 100644
--- a/eth/filters/api.go
+++ b/eth/filters/api.go
@@ -66,9 +66,8 @@ type PublicFilterAPI struct {
func NewPublicFilterAPI(backend Backend, lightMode bool) *PublicFilterAPI {
api := &PublicFilterAPI{
backend: backend,
- mux: backend.EventMux(),
chainDb: backend.ChainDb(),
- events: NewEventSystem(backend.EventMux(), backend, lightMode),
+ events: NewEventSystem(backend, lightMode),
filters: make(map[rpc.ID]*filter),
}
go api.timeoutLoop()
@@ -437,7 +436,7 @@ func (api *PublicFilterAPI) GetFilterChanges(ctx context.Context, id rpc.ID) (in
hashes := f.hashes
f.hashes = nil
return returnHashes(hashes), nil
- case LogsSubscription:
+ case LogsSubscription, MinedAndPendingLogsSubscription:
logs := f.logs
f.logs = nil
authLogs, err := api.filterUnAuthorized(ctx, logs)
diff --git a/eth/filters/bench_test.go b/eth/filters/bench_test.go
index fc7e6f5273..584ea23d02 100644
--- a/eth/filters/bench_test.go
+++ b/eth/filters/bench_test.go
@@ -28,7 +28,6 @@ import (
"github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/ethdb"
- "github.com/ethereum/go-ethereum/event"
"github.com/ethereum/go-ethereum/node"
)
@@ -122,14 +121,13 @@ func benchmarkBloomBits(b *testing.B, sectionSize uint64) {
b.Log("Running filter benchmarks...")
start = time.Now()
- mux := new(event.TypeMux)
var backend *testBackend
for i := 0; i < benchFilterCnt; i++ {
if i%20 == 0 {
db.Close()
db, _ = rawdb.NewLevelDBDatabase(benchDataDir, 128, 1024, "")
- backend = &testBackend{mux, db, cnt, new(event.Feed), new(event.Feed), new(event.Feed), new(event.Feed)}
+ backend = &testBackend{db: db, sections: cnt}
}
var addr common.Address
addr[0] = byte(i)
@@ -173,8 +171,7 @@ func BenchmarkNoBloomBits(b *testing.B) {
b.Log("Running filter benchmarks...")
start := time.Now()
- mux := new(event.TypeMux)
- backend := &testBackend{mux, db, 0, new(event.Feed), new(event.Feed), new(event.Feed), new(event.Feed)}
+ backend := &testBackend{db: db}
filter := NewRangeFilter(backend, 0, int64(*headNum), []common.Address{{}}, nil)
filter.Logs(context.Background())
d := time.Since(start)
diff --git a/eth/filters/filter.go b/eth/filters/filter.go
index 8fdcf754ed..84c1735959 100644
--- a/eth/filters/filter.go
+++ b/eth/filters/filter.go
@@ -37,7 +37,6 @@ type Backend interface {
multitenancy.AuthorizationProvider
ChainDb() ethdb.Database
- EventMux() *event.TypeMux
HeaderByNumber(ctx context.Context, blockNr rpc.BlockNumber) (*types.Header, error)
HeaderByHash(ctx context.Context, blockHash common.Hash) (*types.Header, error)
GetReceipts(ctx context.Context, blockHash common.Hash) (types.Receipts, error)
@@ -47,6 +46,7 @@ type Backend interface {
SubscribeChainEvent(ch chan<- core.ChainEvent) event.Subscription
SubscribeRemovedLogsEvent(ch chan<- core.RemovedLogsEvent) event.Subscription
SubscribeLogsEvent(ch chan<- []*types.Log) event.Subscription
+ SubscribePendingLogsEvent(ch chan<- []*types.Log) event.Subscription
BloomStatus() (uint64, uint64)
ServiceFilter(ctx context.Context, session *bloombits.MatcherSession)
diff --git a/eth/filters/filter_system.go b/eth/filters/filter_system.go
index 70139c1a96..a105ec51c3 100644
--- a/eth/filters/filter_system.go
+++ b/eth/filters/filter_system.go
@@ -20,7 +20,6 @@ package filters
import (
"context"
- "errors"
"fmt"
"sync"
"time"
@@ -58,7 +57,6 @@ const (
)
const (
-
// txChanSize is the size of channel listening to NewTxsEvent.
// The number is referenced from the size of tx pool.
txChanSize = 4096
@@ -70,10 +68,6 @@ const (
chainEvChanSize = 10
)
-var (
- ErrInvalidSubscriptionID = errors.New("invalid id")
-)
-
type subscription struct {
id rpc.ID
typ Type
@@ -89,25 +83,25 @@ type subscription struct {
// EventSystem creates subscriptions, processes events and broadcasts them to the
// subscription which match the subscription criteria.
type EventSystem struct {
- mux *event.TypeMux
backend Backend
lightMode bool
lastHead *types.Header
// Subscriptions
- txsSub event.Subscription // Subscription for new transaction event
- logsSub event.Subscription // Subscription for new log event
- rmLogsSub event.Subscription // Subscription for removed log event
- chainSub event.Subscription // Subscription for new chain event
- pendingLogSub *event.TypeMuxSubscription // Subscription for pending log event
+ txsSub event.Subscription // Subscription for new transaction event
+ logsSub event.Subscription // Subscription for new log event
+ rmLogsSub event.Subscription // Subscription for removed log event
+ pendingLogsSub event.Subscription // Subscription for pending log event
+ chainSub event.Subscription // Subscription for new chain event
// Channels
- install chan *subscription // install filter for event notification
- uninstall chan *subscription // remove filter for event notification
- txsCh chan core.NewTxsEvent // Channel to receive new transactions event
- logsCh chan []*types.Log // Channel to receive new log event
- rmLogsCh chan core.RemovedLogsEvent // Channel to receive removed log event
- chainCh chan core.ChainEvent // Channel to receive new chain event
+ install chan *subscription // install filter for event notification
+ uninstall chan *subscription // remove filter for event notification
+ txsCh chan core.NewTxsEvent // Channel to receive new transactions event
+ logsCh chan []*types.Log // Channel to receive new log event
+ pendingLogsCh chan []*types.Log // Channel to receive new log event
+ rmLogsCh chan core.RemovedLogsEvent // Channel to receive removed log event
+ chainCh chan core.ChainEvent // Channel to receive new chain event
}
// NewEventSystem creates a new manager that listens for event on the given mux,
@@ -116,17 +110,17 @@ type EventSystem struct {
//
// The returned manager has a loop that needs to be stopped with the Stop function
// or by stopping the given mux.
-func NewEventSystem(mux *event.TypeMux, backend Backend, lightMode bool) *EventSystem {
+func NewEventSystem(backend Backend, lightMode bool) *EventSystem {
m := &EventSystem{
- mux: mux,
- backend: backend,
- lightMode: lightMode,
- install: make(chan *subscription),
- uninstall: make(chan *subscription),
- txsCh: make(chan core.NewTxsEvent, txChanSize),
- logsCh: make(chan []*types.Log, logsChanSize),
- rmLogsCh: make(chan core.RemovedLogsEvent, rmLogsChanSize),
- chainCh: make(chan core.ChainEvent, chainEvChanSize),
+ backend: backend,
+ lightMode: lightMode,
+ install: make(chan *subscription),
+ uninstall: make(chan *subscription),
+ txsCh: make(chan core.NewTxsEvent, txChanSize),
+ logsCh: make(chan []*types.Log, logsChanSize),
+ rmLogsCh: make(chan core.RemovedLogsEvent, rmLogsChanSize),
+ pendingLogsCh: make(chan []*types.Log, logsChanSize),
+ chainCh: make(chan core.ChainEvent, chainEvChanSize),
}
// Subscribe events
@@ -134,12 +128,10 @@ func NewEventSystem(mux *event.TypeMux, backend Backend, lightMode bool) *EventS
m.logsSub = m.backend.SubscribeLogsEvent(m.logsCh)
m.rmLogsSub = m.backend.SubscribeRemovedLogsEvent(m.rmLogsCh)
m.chainSub = m.backend.SubscribeChainEvent(m.chainCh)
- // TODO(rjl493456442): use feed to subscribe pending log event
- m.pendingLogSub = m.mux.Subscribe(core.PendingLogsEvent{})
+ m.pendingLogsSub = m.backend.SubscribePendingLogsEvent(m.pendingLogsCh)
// Make sure none of the subscriptions are empty
- if m.txsSub == nil || m.logsSub == nil || m.rmLogsSub == nil || m.chainSub == nil ||
- m.pendingLogSub.Closed() {
+ if m.txsSub == nil || m.logsSub == nil || m.rmLogsSub == nil || m.chainSub == nil || m.pendingLogsSub == nil {
log.Crit("Subscribe for event system failed")
}
@@ -316,58 +308,61 @@ func (es *EventSystem) SubscribePendingTxs(hashes chan []common.Hash) *Subscript
type filterIndex map[Type]map[rpc.ID]*subscription
-// broadcast event to filters that match criteria.
-func (es *EventSystem) broadcast(filters filterIndex, ev interface{}) {
- if ev == nil {
+func (es *EventSystem) handleLogs(filters filterIndex, ev []*types.Log) {
+ if len(ev) == 0 {
return
}
+ for _, f := range filters[LogsSubscription] {
+ matchedLogs := filterLogs(ev, f.logsCrit.FromBlock, f.logsCrit.ToBlock, f.logsCrit.Addresses, f.logsCrit.Topics)
+ if len(matchedLogs) > 0 {
+ f.logs <- matchedLogs
+ }
+ }
+}
- switch e := ev.(type) {
- case []*types.Log:
- if len(e) > 0 {
- for _, f := range filters[LogsSubscription] {
- if matchedLogs := filterLogs(e, f.logsCrit.FromBlock, f.logsCrit.ToBlock, f.logsCrit.Addresses, f.logsCrit.Topics); len(matchedLogs) > 0 {
- f.logs <- matchedLogs
- }
- }
+func (es *EventSystem) handlePendingLogs(filters filterIndex, ev []*types.Log) {
+ if len(ev) == 0 {
+ return
+ }
+ for _, f := range filters[PendingLogsSubscription] {
+ matchedLogs := filterLogs(ev, nil, f.logsCrit.ToBlock, f.logsCrit.Addresses, f.logsCrit.Topics)
+ if len(matchedLogs) > 0 {
+ f.logs <- matchedLogs
}
- case core.RemovedLogsEvent:
- for _, f := range filters[LogsSubscription] {
- if matchedLogs := filterLogs(e.Logs, f.logsCrit.FromBlock, f.logsCrit.ToBlock, f.logsCrit.Addresses, f.logsCrit.Topics); len(matchedLogs) > 0 {
- f.logs <- matchedLogs
- }
+ }
+}
+
+func (es *EventSystem) handleRemovedLogs(filters filterIndex, ev core.RemovedLogsEvent) {
+ for _, f := range filters[LogsSubscription] {
+ matchedLogs := filterLogs(ev.Logs, f.logsCrit.FromBlock, f.logsCrit.ToBlock, f.logsCrit.Addresses, f.logsCrit.Topics)
+ if len(matchedLogs) > 0 {
+ f.logs <- matchedLogs
}
- case *event.TypeMuxEvent:
- if muxe, ok := e.Data.(core.PendingLogsEvent); ok {
- for _, f := range filters[PendingLogsSubscription] {
- if e.Time.After(f.created) {
- if matchedLogs := filterLogs(muxe.Logs, nil, f.logsCrit.ToBlock, f.logsCrit.Addresses, f.logsCrit.Topics); len(matchedLogs) > 0 {
- f.logs <- matchedLogs
- }
+ }
+}
+
+func (es *EventSystem) handleTxsEvent(filters filterIndex, ev core.NewTxsEvent) {
+ hashes := make([]common.Hash, 0, len(ev.Txs))
+ for _, tx := range ev.Txs {
+ hashes = append(hashes, tx.Hash())
+ }
+ for _, f := range filters[PendingTransactionsSubscription] {
+ f.hashes <- hashes
+ }
+}
+
+func (es *EventSystem) handleChainEvent(filters filterIndex, ev core.ChainEvent) {
+ for _, f := range filters[BlocksSubscription] {
+ f.headers <- ev.Block.Header()
+ }
+ if es.lightMode && len(filters[LogsSubscription]) > 0 {
+ es.lightFilterNewHead(ev.Block.Header(), func(header *types.Header, remove bool) {
+ for _, f := range filters[LogsSubscription] {
+ if matchedLogs := es.lightFilterLogs(header, f.logsCrit.Addresses, f.logsCrit.Topics, remove); len(matchedLogs) > 0 {
+ f.logs <- matchedLogs
}
}
- }
- case core.NewTxsEvent:
- hashes := make([]common.Hash, 0, len(e.Txs))
- for _, tx := range e.Txs {
- hashes = append(hashes, tx.Hash())
- }
- for _, f := range filters[PendingTransactionsSubscription] {
- f.hashes <- hashes
- }
- case core.ChainEvent:
- for _, f := range filters[BlocksSubscription] {
- f.headers <- e.Block.Header()
- }
- if es.lightMode && len(filters[LogsSubscription]) > 0 {
- es.lightFilterNewHead(e.Block.Header(), func(header *types.Header, remove bool) {
- for _, f := range filters[LogsSubscription] {
- if matchedLogs := es.lightFilterLogs(header, f.logsCrit.Addresses, f.logsCrit.Topics, remove); len(matchedLogs) > 0 {
- f.logs <- matchedLogs
- }
- }
- })
- }
+ })
}
}
@@ -448,10 +443,10 @@ func (es *EventSystem) lightFilterLogs(header *types.Header, addresses []common.
func (es *EventSystem) eventLoop() {
// Ensure all subscriptions get cleaned up
defer func() {
- es.pendingLogSub.Unsubscribe()
es.txsSub.Unsubscribe()
es.logsSub.Unsubscribe()
es.rmLogsSub.Unsubscribe()
+ es.pendingLogsSub.Unsubscribe()
es.chainSub.Unsubscribe()
}()
@@ -462,20 +457,16 @@ func (es *EventSystem) eventLoop() {
for {
select {
- // Handle subscribed events
case ev := <-es.txsCh:
- es.broadcast(index, ev)
+ es.handleTxsEvent(index, ev)
case ev := <-es.logsCh:
- es.broadcast(index, ev)
+ es.handleLogs(index, ev)
case ev := <-es.rmLogsCh:
- es.broadcast(index, ev)
+ es.handleRemovedLogs(index, ev)
+ case ev := <-es.pendingLogsCh:
+ es.handlePendingLogs(index, ev)
case ev := <-es.chainCh:
- es.broadcast(index, ev)
- case ev, active := <-es.pendingLogSub.Chan():
- if !active { // system stopped
- return
- }
- es.broadcast(index, ev)
+ es.handleChainEvent(index, ev)
case f := <-es.install:
if f.typ == MinedAndPendingLogsSubscription {
diff --git a/eth/filters/filter_system_test.go b/eth/filters/filter_system_test.go
index 44bc75ed9e..3e9dd58601 100644
--- a/eth/filters/filter_system_test.go
+++ b/eth/filters/filter_system_test.go
@@ -42,23 +42,20 @@ import (
)
type testBackend struct {
- mux *event.TypeMux
- db ethdb.Database
- sections uint64
- txFeed *event.Feed
- rmLogsFeed *event.Feed
- logsFeed *event.Feed
- chainFeed *event.Feed
+ mux *event.TypeMux
+ db ethdb.Database
+ sections uint64
+ txFeed event.Feed
+ logsFeed event.Feed
+ rmLogsFeed event.Feed
+ pendingLogsFeed event.Feed
+ chainFeed event.Feed
}
func (b *testBackend) ChainDb() ethdb.Database {
return b.db
}
-func (b *testBackend) EventMux() *event.TypeMux {
- return b.mux
-}
-
func (b *testBackend) HeaderByNumber(ctx context.Context, blockNr rpc.BlockNumber) (*types.Header, error) {
var (
hash common.Hash
@@ -119,6 +116,10 @@ func (b *testBackend) SubscribeLogsEvent(ch chan<- []*types.Log) event.Subscript
return b.logsFeed.Subscribe(ch)
}
+func (b *testBackend) SubscribePendingLogsEvent(ch chan<- []*types.Log) event.Subscription {
+ return b.pendingLogsFeed.Subscribe(ch)
+}
+
func (b *testBackend) SubscribeChainEvent(ch chan<- core.ChainEvent) event.Subscription {
return b.chainFeed.Subscribe(ch)
}
@@ -175,13 +176,8 @@ func TestBlockSubscription(t *testing.T) {
t.Parallel()
var (
- mux = new(event.TypeMux)
db = rawdb.NewMemoryDatabase()
- txFeed = new(event.Feed)
- rmLogsFeed = new(event.Feed)
- logsFeed = new(event.Feed)
- chainFeed = new(event.Feed)
- backend = &testBackend{mux, db, 0, txFeed, rmLogsFeed, logsFeed, chainFeed}
+ backend = &testBackend{db: db}
api = NewPublicFilterAPI(backend, false)
genesis = new(core.Genesis).MustCommit(db)
chain, _ = core.GenerateChain(params.TestChainConfig, genesis, ethash.NewFaker(), db, 10, func(i int, gen *core.BlockGen) {})
@@ -220,7 +216,7 @@ func TestBlockSubscription(t *testing.T) {
time.Sleep(1 * time.Second)
for _, e := range chainEvents {
- chainFeed.Send(e)
+ backend.chainFeed.Send(e)
}
<-sub0.Err()
@@ -232,14 +228,9 @@ func TestPendingTxFilter(t *testing.T) {
t.Parallel()
var (
- mux = new(event.TypeMux)
- db = rawdb.NewMemoryDatabase()
- txFeed = new(event.Feed)
- rmLogsFeed = new(event.Feed)
- logsFeed = new(event.Feed)
- chainFeed = new(event.Feed)
- backend = &testBackend{mux, db, 0, txFeed, rmLogsFeed, logsFeed, chainFeed}
- api = NewPublicFilterAPI(backend, false)
+ db = rawdb.NewMemoryDatabase()
+ backend = &testBackend{db: db}
+ api = NewPublicFilterAPI(backend, false)
transactions = []*types.Transaction{
types.NewTransaction(0, common.HexToAddress("0xb794f5ea0ba39494ce83a213fffba74279579268"), new(big.Int), 0, new(big.Int), nil),
@@ -255,7 +246,7 @@ func TestPendingTxFilter(t *testing.T) {
fid0 := api.NewPendingTransactionFilter()
time.Sleep(1 * time.Second)
- txFeed.Send(core.NewTxsEvent{Txs: transactions})
+ backend.txFeed.Send(core.NewTxsEvent{Txs: transactions})
timeout := time.Now().Add(1 * time.Second)
for {
@@ -292,14 +283,9 @@ func TestPendingTxFilter(t *testing.T) {
// If not it must return an error.
func TestLogFilterCreation(t *testing.T) {
var (
- mux = new(event.TypeMux)
- db = rawdb.NewMemoryDatabase()
- txFeed = new(event.Feed)
- rmLogsFeed = new(event.Feed)
- logsFeed = new(event.Feed)
- chainFeed = new(event.Feed)
- backend = &testBackend{mux, db, 0, txFeed, rmLogsFeed, logsFeed, chainFeed}
- api = NewPublicFilterAPI(backend, false)
+ db = rawdb.NewMemoryDatabase()
+ backend = &testBackend{db: db}
+ api = NewPublicFilterAPI(backend, false)
testCases = []struct {
crit FilterCriteria
@@ -341,14 +327,9 @@ func TestInvalidLogFilterCreation(t *testing.T) {
t.Parallel()
var (
- mux = new(event.TypeMux)
- db = rawdb.NewMemoryDatabase()
- txFeed = new(event.Feed)
- rmLogsFeed = new(event.Feed)
- logsFeed = new(event.Feed)
- chainFeed = new(event.Feed)
- backend = &testBackend{mux, db, 0, txFeed, rmLogsFeed, logsFeed, chainFeed}
- api = NewPublicFilterAPI(backend, false)
+ db = rawdb.NewMemoryDatabase()
+ backend = &testBackend{db: db}
+ api = NewPublicFilterAPI(backend, false)
)
// different situations where log filter creation should fail.
@@ -368,15 +349,10 @@ func TestInvalidLogFilterCreation(t *testing.T) {
func TestInvalidGetLogsRequest(t *testing.T) {
var (
- mux = new(event.TypeMux)
- db = rawdb.NewMemoryDatabase()
- txFeed = new(event.Feed)
- rmLogsFeed = new(event.Feed)
- logsFeed = new(event.Feed)
- chainFeed = new(event.Feed)
- backend = &testBackend{mux, db, 0, txFeed, rmLogsFeed, logsFeed, chainFeed}
- api = NewPublicFilterAPI(backend, false)
- blockHash = common.HexToHash("0x1111111111111111111111111111111111111111111111111111111111111111")
+ db = rawdb.NewMemoryDatabase()
+ backend = &testBackend{db: db}
+ api = NewPublicFilterAPI(backend, false)
+ blockHash = common.HexToHash("0x1111111111111111111111111111111111111111111111111111111111111111")
)
// Reason: Cannot specify both BlockHash and FromBlock/ToBlock)
@@ -398,14 +374,9 @@ func TestLogFilter(t *testing.T) {
t.Parallel()
var (
- mux = new(event.TypeMux)
- db = rawdb.NewMemoryDatabase()
- txFeed = new(event.Feed)
- rmLogsFeed = new(event.Feed)
- logsFeed = new(event.Feed)
- chainFeed = new(event.Feed)
- backend = &testBackend{mux, db, 0, txFeed, rmLogsFeed, logsFeed, chainFeed}
- api = NewPublicFilterAPI(backend, false)
+ db = rawdb.NewMemoryDatabase()
+ backend = &testBackend{db: db}
+ api = NewPublicFilterAPI(backend, false)
firstAddr = common.HexToAddress("0x1111111111111111111111111111111111111111")
secondAddr = common.HexToAddress("0x2222222222222222222222222222222222222222")
@@ -415,7 +386,7 @@ func TestLogFilter(t *testing.T) {
secondTopic = common.HexToHash("0x2222222222222222222222222222222222222222222222222222222222222222")
notUsedTopic = common.HexToHash("0x9999999999999999999999999999999999999999999999999999999999999999")
- // posted twice, once as vm.Logs and once as core.PendingLogsEvent
+ // posted twice, once as regular logs and once as pending logs.
allLogs = []*types.Log{
{Address: firstAddr},
{Address: firstAddr, Topics: []common.Hash{firstTopic}, BlockNumber: 1},
@@ -468,11 +439,11 @@ func TestLogFilter(t *testing.T) {
// raise events
time.Sleep(1 * time.Second)
- if nsend := logsFeed.Send(allLogs); nsend == 0 {
- t.Fatal("Shoud have at least one subscription")
+ if nsend := backend.logsFeed.Send(allLogs); nsend == 0 {
+ t.Fatal("Logs event not delivered")
}
- if err := mux.Post(core.PendingLogsEvent{Logs: allLogs}); err != nil {
- t.Fatal(err)
+ if nsend := backend.pendingLogsFeed.Send(allLogs); nsend == 0 {
+ t.Fatal("Pending logs event not delivered")
}
for i, tt := range testCases {
@@ -517,14 +488,9 @@ func TestPendingLogsSubscription(t *testing.T) {
t.Parallel()
var (
- mux = new(event.TypeMux)
- db = rawdb.NewMemoryDatabase()
- txFeed = new(event.Feed)
- rmLogsFeed = new(event.Feed)
- logsFeed = new(event.Feed)
- chainFeed = new(event.Feed)
- backend = &testBackend{mux, db, 0, txFeed, rmLogsFeed, logsFeed, chainFeed}
- api = NewPublicFilterAPI(backend, false)
+ db = rawdb.NewMemoryDatabase()
+ backend = &testBackend{db: db}
+ api = NewPublicFilterAPI(backend, false)
firstAddr = common.HexToAddress("0x1111111111111111111111111111111111111111")
secondAddr = common.HexToAddress("0x2222222222222222222222222222222222222222")
@@ -536,26 +502,18 @@ func TestPendingLogsSubscription(t *testing.T) {
fourthTopic = common.HexToHash("0x4444444444444444444444444444444444444444444444444444444444444444")
notUsedTopic = common.HexToHash("0x9999999999999999999999999999999999999999999999999999999999999999")
- allLogs = []core.PendingLogsEvent{
- {Logs: []*types.Log{{Address: firstAddr, Topics: []common.Hash{}, BlockNumber: 0}}},
- {Logs: []*types.Log{{Address: firstAddr, Topics: []common.Hash{firstTopic}, BlockNumber: 1}}},
- {Logs: []*types.Log{{Address: secondAddr, Topics: []common.Hash{firstTopic}, BlockNumber: 2}}},
- {Logs: []*types.Log{{Address: thirdAddress, Topics: []common.Hash{secondTopic}, BlockNumber: 3}}},
- {Logs: []*types.Log{{Address: thirdAddress, Topics: []common.Hash{secondTopic}, BlockNumber: 4}}},
- {Logs: []*types.Log{
+ allLogs = [][]*types.Log{
+ {{Address: firstAddr, Topics: []common.Hash{}, BlockNumber: 0}},
+ {{Address: firstAddr, Topics: []common.Hash{firstTopic}, BlockNumber: 1}},
+ {{Address: secondAddr, Topics: []common.Hash{firstTopic}, BlockNumber: 2}},
+ {{Address: thirdAddress, Topics: []common.Hash{secondTopic}, BlockNumber: 3}},
+ {{Address: thirdAddress, Topics: []common.Hash{secondTopic}, BlockNumber: 4}},
+ {
{Address: thirdAddress, Topics: []common.Hash{firstTopic}, BlockNumber: 5},
{Address: thirdAddress, Topics: []common.Hash{thirdTopic}, BlockNumber: 5},
{Address: thirdAddress, Topics: []common.Hash{fourthTopic}, BlockNumber: 5},
{Address: firstAddr, Topics: []common.Hash{firstTopic}, BlockNumber: 5},
- }},
- }
-
- convertLogs = func(pl []core.PendingLogsEvent) []*types.Log {
- var logs []*types.Log
- for _, l := range pl {
- logs = append(logs, l.Logs...)
- }
- return logs
+ },
}
testCases = []struct {
@@ -565,21 +523,52 @@ func TestPendingLogsSubscription(t *testing.T) {
sub *Subscription
}{
// match all
- {ethereum.FilterQuery{}, convertLogs(allLogs), nil, nil},
+ {
+ ethereum.FilterQuery{}, flattenLogs(allLogs),
+ nil, nil,
+ },
// match none due to no matching addresses
- {ethereum.FilterQuery{Addresses: []common.Address{{}, notUsedAddress}, Topics: [][]common.Hash{nil}}, []*types.Log{}, nil, nil},
+ {
+ ethereum.FilterQuery{Addresses: []common.Address{{}, notUsedAddress}, Topics: [][]common.Hash{nil}},
+ nil,
+ nil, nil,
+ },
// match logs based on addresses, ignore topics
- {ethereum.FilterQuery{Addresses: []common.Address{firstAddr}}, append(convertLogs(allLogs[:2]), allLogs[5].Logs[3]), nil, nil},
+ {
+ ethereum.FilterQuery{Addresses: []common.Address{firstAddr}},
+ append(flattenLogs(allLogs[:2]), allLogs[5][3]),
+ nil, nil,
+ },
// match none due to no matching topics (match with address)
- {ethereum.FilterQuery{Addresses: []common.Address{secondAddr}, Topics: [][]common.Hash{{notUsedTopic}}}, []*types.Log{}, nil, nil},
+ {
+ ethereum.FilterQuery{Addresses: []common.Address{secondAddr}, Topics: [][]common.Hash{{notUsedTopic}}},
+ nil, nil, nil,
+ },
// match logs based on addresses and topics
- {ethereum.FilterQuery{Addresses: []common.Address{thirdAddress}, Topics: [][]common.Hash{{firstTopic, secondTopic}}}, append(convertLogs(allLogs[3:5]), allLogs[5].Logs[0]), nil, nil},
+ {
+ ethereum.FilterQuery{Addresses: []common.Address{thirdAddress}, Topics: [][]common.Hash{{firstTopic, secondTopic}}},
+ append(flattenLogs(allLogs[3:5]), allLogs[5][0]),
+ nil, nil,
+ },
// match logs based on multiple addresses and "or" topics
- {ethereum.FilterQuery{Addresses: []common.Address{secondAddr, thirdAddress}, Topics: [][]common.Hash{{firstTopic, secondTopic}}}, append(convertLogs(allLogs[2:5]), allLogs[5].Logs[0]), nil, nil},
+ {
+ ethereum.FilterQuery{Addresses: []common.Address{secondAddr, thirdAddress}, Topics: [][]common.Hash{{firstTopic, secondTopic}}},
+ append(flattenLogs(allLogs[2:5]), allLogs[5][0]),
+ nil,
+ nil,
+ },
// block numbers are ignored for filters created with New***Filter, these return all logs that match the given criteria when the state changes
- {ethereum.FilterQuery{Addresses: []common.Address{firstAddr}, FromBlock: big.NewInt(2), ToBlock: big.NewInt(3)}, append(convertLogs(allLogs[:2]), allLogs[5].Logs[3]), nil, nil},
+ {
+ ethereum.FilterQuery{Addresses: []common.Address{firstAddr}, FromBlock: big.NewInt(2), ToBlock: big.NewInt(3)},
+ append(flattenLogs(allLogs[:2]), allLogs[5][3]),
+ nil, nil,
+ },
// multiple pending logs, should match only 2 topics from the logs in block 5
- {ethereum.FilterQuery{Addresses: []common.Address{thirdAddress}, Topics: [][]common.Hash{{firstTopic, fourthTopic}}}, []*types.Log{allLogs[5].Logs[0], allLogs[5].Logs[2]}, nil, nil},
+ {
+ ethereum.FilterQuery{Addresses: []common.Address{thirdAddress}, Topics: [][]common.Hash{{firstTopic, fourthTopic}}},
+ []*types.Log{allLogs[5][0], allLogs[5][2]},
+ nil, nil,
+ },
}
)
@@ -622,10 +611,15 @@ func TestPendingLogsSubscription(t *testing.T) {
// raise events
time.Sleep(1 * time.Second)
- // allLogs are type of core.PendingLogsEvent
- for _, l := range allLogs {
- if err := mux.Post(l); err != nil {
- t.Fatal(err)
- }
+ for _, ev := range allLogs {
+ backend.pendingLogsFeed.Send(ev)
+ }
+}
+
+func flattenLogs(pl [][]*types.Log) []*types.Log {
+ var logs []*types.Log
+ for _, l := range pl {
+ logs = append(logs, l...)
}
+ return logs
}
diff --git a/eth/filters/filter_test.go b/eth/filters/filter_test.go
index ad095b068a..a3e47fa59d 100644
--- a/eth/filters/filter_test.go
+++ b/eth/filters/filter_test.go
@@ -29,7 +29,6 @@ import (
"github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto"
- "github.com/ethereum/go-ethereum/event"
"github.com/ethereum/go-ethereum/params"
)
@@ -50,18 +49,13 @@ func BenchmarkFilters(b *testing.B) {
defer os.RemoveAll(dir)
var (
- db, _ = rawdb.NewLevelDBDatabase(dir, 0, 0, "")
- mux = new(event.TypeMux)
- txFeed = new(event.Feed)
- rmLogsFeed = new(event.Feed)
- logsFeed = new(event.Feed)
- chainFeed = new(event.Feed)
- backend = &testBackend{mux, db, 0, txFeed, rmLogsFeed, logsFeed, chainFeed}
- key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
- addr1 = crypto.PubkeyToAddress(key1.PublicKey)
- addr2 = common.BytesToAddress([]byte("jeff"))
- addr3 = common.BytesToAddress([]byte("ethereum"))
- addr4 = common.BytesToAddress([]byte("random addresses please"))
+ db, _ = rawdb.NewLevelDBDatabase(dir, 0, 0, "")
+ backend = &testBackend{db: db}
+ key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
+ addr1 = crypto.PubkeyToAddress(key1.PublicKey)
+ addr2 = common.BytesToAddress([]byte("jeff"))
+ addr3 = common.BytesToAddress([]byte("ethereum"))
+ addr4 = common.BytesToAddress([]byte("random addresses please"))
)
defer db.Close()
@@ -109,15 +103,10 @@ func TestFilters(t *testing.T) {
defer os.RemoveAll(dir)
var (
- db, _ = rawdb.NewLevelDBDatabase(dir, 0, 0, "")
- mux = new(event.TypeMux)
- txFeed = new(event.Feed)
- rmLogsFeed = new(event.Feed)
- logsFeed = new(event.Feed)
- chainFeed = new(event.Feed)
- backend = &testBackend{mux, db, 0, txFeed, rmLogsFeed, logsFeed, chainFeed}
- key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
- addr = crypto.PubkeyToAddress(key1.PublicKey)
+ db, _ = rawdb.NewLevelDBDatabase(dir, 0, 0, "")
+ backend = &testBackend{db: db}
+ key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
+ addr = crypto.PubkeyToAddress(key1.PublicKey)
hash1 = common.BytesToHash([]byte("topic1"))
hash2 = common.BytesToHash([]byte("topic2"))
diff --git a/eth/gen_config.go b/eth/gen_config.go
index af572f8ac7..4d6ebeae37 100644
--- a/eth/gen_config.go
+++ b/eth/gen_config.go
@@ -22,6 +22,7 @@ func (c Config) MarshalTOML() (interface{}, error) {
Genesis *core.Genesis `toml:",omitempty"`
NetworkId uint64
SyncMode downloader.SyncMode
+ DiscoveryURLs []string
NoPruning bool
NoPrefetch bool
Whitelist map[uint64]common.Hash `toml:"-"`
@@ -51,11 +52,14 @@ func (c Config) MarshalTOML() (interface{}, error) {
RPCGasCap *big.Int `toml:",omitempty"`
Checkpoint *params.TrustedCheckpoint `toml:",omitempty"`
CheckpointOracle *params.CheckpointOracleConfig `toml:",omitempty"`
+ OverrideIstanbul *big.Int `toml:",omitempty"`
+ OverrideMuirGlacier *big.Int `toml:",omitempty"`
}
var enc Config
enc.Genesis = c.Genesis
enc.NetworkId = c.NetworkId
enc.SyncMode = c.SyncMode
+ enc.DiscoveryURLs = c.DiscoveryURLs
enc.NoPruning = c.NoPruning
enc.NoPrefetch = c.NoPrefetch
enc.Whitelist = c.Whitelist
@@ -85,6 +89,8 @@ func (c Config) MarshalTOML() (interface{}, error) {
enc.RPCGasCap = c.RPCGasCap
enc.Checkpoint = c.Checkpoint
enc.CheckpointOracle = c.CheckpointOracle
+ enc.OverrideIstanbul = c.OverrideIstanbul
+ enc.OverrideMuirGlacier = c.OverrideMuirGlacier
return &enc, nil
}
@@ -94,6 +100,7 @@ func (c *Config) UnmarshalTOML(unmarshal func(interface{}) error) error {
Genesis *core.Genesis `toml:",omitempty"`
NetworkId *uint64
SyncMode *downloader.SyncMode
+ DiscoveryURLs []string
NoPruning *bool
NoPrefetch *bool
Whitelist map[uint64]common.Hash `toml:"-"`
@@ -123,6 +130,8 @@ func (c *Config) UnmarshalTOML(unmarshal func(interface{}) error) error {
RPCGasCap *big.Int `toml:",omitempty"`
Checkpoint *params.TrustedCheckpoint `toml:",omitempty"`
CheckpointOracle *params.CheckpointOracleConfig `toml:",omitempty"`
+ OverrideIstanbul *big.Int `toml:",omitempty"`
+ OverrideMuirGlacier *big.Int `toml:",omitempty"`
}
var dec Config
if err := unmarshal(&dec); err != nil {
@@ -137,6 +146,9 @@ func (c *Config) UnmarshalTOML(unmarshal func(interface{}) error) error {
if dec.SyncMode != nil {
c.SyncMode = *dec.SyncMode
}
+ if dec.DiscoveryURLs != nil {
+ c.DiscoveryURLs = dec.DiscoveryURLs
+ }
if dec.NoPruning != nil {
c.NoPruning = *dec.NoPruning
}
@@ -224,5 +236,11 @@ func (c *Config) UnmarshalTOML(unmarshal func(interface{}) error) error {
if dec.CheckpointOracle != nil {
c.CheckpointOracle = dec.CheckpointOracle
}
+ if dec.OverrideIstanbul != nil {
+ c.OverrideIstanbul = dec.OverrideIstanbul
+ }
+ if dec.OverrideMuirGlacier != nil {
+ c.OverrideMuirGlacier = dec.OverrideMuirGlacier
+ }
return nil
}
diff --git a/eth/handler.go b/eth/handler.go
index 87b828c507..e763eb3bbe 100644
--- a/eth/handler.go
+++ b/eth/handler.go
@@ -53,9 +53,6 @@ const (
// txChanSize is the size of channel listening to NewTxsEvent.
// The number is referenced from the size of tx pool.
txChanSize = 4096
-
- // minimim number of peers to broadcast new blocks to
- minBroadcastPeers = 4
)
var (
@@ -80,9 +77,10 @@ type ProtocolManager struct {
blockchain *core.BlockChain
maxPeers int
- downloader *downloader.Downloader
- fetcher *fetcher.Fetcher
- peers *peerSet
+ downloader *downloader.Downloader
+ blockFetcher *fetcher.BlockFetcher
+ txFetcher *fetcher.TxFetcher
+ peers *peerSet
eventMux *event.TypeMux
txsCh chan core.NewTxsEvent
@@ -104,6 +102,9 @@ type ProtocolManager struct {
// Quorum
raftMode bool
engine consensus.Engine
+
+ // Test fields or hooks
+ broadcastTxAnnouncesOnly bool // Testing field, disable transaction propagation
}
// NewProtocolManager returns a new Ethereum sub protocol manager. The Ethereum sub protocol manages peers capable
@@ -203,7 +204,16 @@ func NewProtocolManager(config *params.ChainConfig, checkpoint *params.TrustedCh
}
return n, err
}
- manager.fetcher = fetcher.New(blockchain.GetBlockByHash, validator, manager.BroadcastBlock, heighter, inserter, manager.removePeer)
+ manager.blockFetcher = fetcher.NewBlockFetcher(blockchain.GetBlockByHash, validator, manager.BroadcastBlock, heighter, inserter, manager.removePeer)
+
+ fetchTx := func(peer string, hashes []common.Hash) error {
+ p := manager.peers.Peer(peer)
+ if p == nil {
+ return errors.New("unknown peer")
+ }
+ return p.RequestTxs(hashes)
+ }
+ manager.txFetcher = fetcher.NewTxFetcher(txpool.Has, txpool.AddRemotes, fetchTx)
return manager, nil
}
@@ -220,7 +230,7 @@ func (pm *ProtocolManager) makeProtocol(version uint) p2p.Protocol {
Version: version,
Length: length,
Run: func(p *p2p.Peer, rw p2p.MsgReadWriter) error {
- peer := pm.newPeer(int(version), p, rw)
+ peer := pm.newPeer(int(version), p, rw, pm.txpool.Get)
select {
case pm.newPeerCh <- peer:
pm.wg.Add(1)
@@ -252,6 +262,8 @@ func (pm *ProtocolManager) removePeer(id string) {
// Unregister the peer from the downloader and Ethereum peer set
pm.downloader.UnregisterPeer(id)
+ pm.txFetcher.Drop(id)
+
if err := pm.peers.Unregister(id); err != nil {
log.Error("Peer removal failed", "peer", id, "err", err)
}
@@ -284,7 +296,7 @@ func (pm *ProtocolManager) Start(maxPeers int) {
// start sync handlers
go pm.syncer()
- go pm.txsyncLoop()
+ go pm.txsyncLoop64() // TODO(karalabe): Legacy initial tx echange, drop with eth/64.
}
func (pm *ProtocolManager) Stop() {
@@ -314,8 +326,8 @@ func (pm *ProtocolManager) Stop() {
log.Info("Ethereum protocol stopped")
}
-func (pm *ProtocolManager) newPeer(pv int, p *p2p.Peer, rw p2p.MsgReadWriter) *peer {
- return newPeer(pv, p, newMeteredMsgWriter(rw))
+func (pm *ProtocolManager) newPeer(pv int, p *p2p.Peer, rw p2p.MsgReadWriter, getPooledTx func(hash common.Hash) *types.Transaction) *peer {
+ return newPeer(pv, p, rw, getPooledTx)
}
// handle is the callback invoked to manage the life cycle of an eth peer. When
@@ -339,9 +351,6 @@ func (pm *ProtocolManager) handle(p *peer) error {
p.Log().Debug("Ethereum handshake failed", "err", err)
return err
}
- if rw, ok := p.rw.(*meteredMsgReadWriter); ok {
- rw.Init(p.version)
- }
// Register the peer locally
if err := pm.peers.Register(p); err != nil {
p.Log().Error("Ethereum peer registration failed", "err", err)
@@ -406,12 +415,14 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
// Quorum
if pm.raftMode {
- if msg.Code != TxMsg &&
- msg.Code != GetBlockHeadersMsg && msg.Code != BlockHeadersMsg &&
- msg.Code != GetBlockBodiesMsg && msg.Code != BlockBodiesMsg {
-
+ switch msg.Code {
+ case TransactionMsg, PooledTransactionsMsg,
+ GetPooledTransactionsMsg, NewPooledTransactionHashesMsg,
+ GetBlockHeadersMsg, BlockHeadersMsg,
+ GetBlockBodiesMsg, BlockBodiesMsg:
+ // supported by Raft
+ default:
log.Info("raft: ignoring message", "code", msg.Code)
-
return nil
}
} else if handler, ok := pm.engine.(consensus.Handler); ok {
@@ -562,7 +573,7 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
p.Log().Debug("Whitelist block verified", "number", headers[0].Number.Uint64(), "hash", want)
}
// Irrelevant of the fork checks, send the header to the fetcher just in case
- headers = pm.fetcher.FilterHeaders(p.id, headers, time.Now())
+ headers = pm.blockFetcher.FilterHeaders(p.id, headers, time.Now())
}
if len(headers) > 0 || !filter {
err := pm.downloader.DeliverHeaders(p.id, headers)
@@ -615,7 +626,7 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
// Filter out any explicitly requested bodies, deliver the rest to the downloader
filter := len(transactions) > 0 || len(uncles) > 0
if filter {
- transactions, uncles = pm.fetcher.FilterBodies(p.id, transactions, uncles, time.Now())
+ transactions, uncles = pm.blockFetcher.FilterBodies(p.id, transactions, uncles, time.Now())
}
if len(transactions) > 0 || len(uncles) > 0 || !filter {
err := pm.downloader.DeliverBodies(p.id, transactions, uncles)
@@ -726,7 +737,7 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
}
}
for _, block := range unknown {
- pm.fetcher.Notify(p.id, block.Hash, block.Number, time.Now(), p.RequestOneHeader, p.RequestBodies)
+ pm.blockFetcher.Notify(p.id, block.Hash, block.Number, time.Now(), p.RequestOneHeader, p.RequestBodies)
}
case msg.Code == NewBlockMsg:
@@ -735,6 +746,14 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
if err := msg.Decode(&request); err != nil {
return errResp(ErrDecode, "%v: %v", msg, err)
}
+ if hash := types.CalcUncleHash(request.Block.Uncles()); hash != request.Block.UncleHash() {
+ log.Warn("Propagated block has invalid uncles", "have", hash, "exp", request.Block.UncleHash())
+ break // TODO(karalabe): return error eventually, but wait a few releases
+ }
+ if hash := types.DeriveSha(request.Block.Transactions()); hash != request.Block.TxHash() {
+ log.Warn("Propagated block has invalid body", "have", hash, "exp", request.Block.TxHash())
+ break // TODO(karalabe): return error eventually, but wait a few releases
+ }
if err := request.sanityCheck(); err != nil {
return err
}
@@ -743,7 +762,7 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
// Mark the peer as owning the block and schedule it for import
p.MarkBlock(request.Block.Hash())
- pm.fetcher.Enqueue(p.id, request.Block)
+ pm.blockFetcher.Enqueue(p.id, request.Block)
// Assuming the block is importable by the peer, but possibly not yet done so,
// calculate the head hash and TD that the peer truly must have.
@@ -764,7 +783,59 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
}
}
- case msg.Code == TxMsg:
+ case msg.Code == NewPooledTransactionHashesMsg && p.version >= eth65:
+ // New transaction announcement arrived, make sure we have
+ // a valid and fresh chain to handle them
+ if atomic.LoadUint32(&pm.acceptTxs) == 0 {
+ break
+ }
+ var hashes []common.Hash
+ if err := msg.Decode(&hashes); err != nil {
+ return errResp(ErrDecode, "msg %v: %v", msg, err)
+ }
+ // Schedule all the unknown hashes for retrieval
+ for _, hash := range hashes {
+ p.MarkTransaction(hash)
+ }
+ pm.txFetcher.Notify(p.id, hashes)
+
+ case msg.Code == GetPooledTransactionsMsg && p.version >= eth65:
+ // Decode the retrieval message
+ msgStream := rlp.NewStream(msg.Payload, uint64(msg.Size))
+ if _, err := msgStream.List(); err != nil {
+ return err
+ }
+ // Gather transactions until the fetch or network limits is reached
+ var (
+ hash common.Hash
+ bytes int
+ hashes []common.Hash
+ txs []rlp.RawValue
+ )
+ for bytes < softResponseLimit {
+ // Retrieve the hash of the next block
+ if err := msgStream.Decode(&hash); err == rlp.EOL {
+ break
+ } else if err != nil {
+ return errResp(ErrDecode, "msg %v: %v", msg, err)
+ }
+ // Retrieve the requested transaction, skipping if unknown to us
+ tx := pm.txpool.Get(hash)
+ if tx == nil {
+ continue
+ }
+ // If known, encode and queue for response packet
+ if encoded, err := rlp.EncodeToBytes(tx); err != nil {
+ log.Error("Failed to encode transaction", "err", err)
+ } else {
+ hashes = append(hashes, hash)
+ txs = append(txs, encoded)
+ bytes += len(encoded)
+ }
+ }
+ return p.SendPooledTransactionsRLP(hashes, txs)
+
+ case msg.Code == TransactionMsg || (msg.Code == PooledTransactionsMsg && p.version >= eth65):
// Transactions arrived, make sure we have a valid and fresh chain to handle them
if atomic.LoadUint32(&pm.acceptTxs) == 0 {
break
@@ -781,7 +852,7 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
}
p.MarkTransaction(tx.Hash())
}
- pm.txpool.AddRemotes(txs)
+ pm.txFetcher.Enqueue(p.id, txs, msg.Code == PooledTransactionsMsg)
default:
return errResp(ErrInvalidMsgCode, "%v", msg.Code)
@@ -791,11 +862,11 @@ func (pm *ProtocolManager) handleMsg(p *peer) error {
// Quorum
func (pm *ProtocolManager) Enqueue(id string, block *types.Block) {
- pm.fetcher.Enqueue(id, block)
+ pm.blockFetcher.Enqueue(id, block)
}
-// BroadcastBlock will either propagate a block to a subset of it's peers, or
-// will only announce it's availability (depending what's requested).
+// BroadcastBlock will either propagate a block to a subset of its peers, or
+// will only announce its availability (depending what's requested).
func (pm *ProtocolManager) BroadcastBlock(block *types.Block, propagate bool) {
hash := block.Hash()
peers := pm.peers.PeersWithoutBlock(hash)
@@ -811,14 +882,7 @@ func (pm *ProtocolManager) BroadcastBlock(block *types.Block, propagate bool) {
return
}
// Send the block to a subset of our peers
- transferLen := int(math.Sqrt(float64(len(peers))))
- if transferLen < minBroadcastPeers {
- transferLen = minBroadcastPeers
- }
- if transferLen > len(peers) {
- transferLen = len(peers)
- }
- transfer := peers[:transferLen]
+ transfer := peers[:int(math.Sqrt(float64(len(peers))))]
for _, peer := range transfer {
peer.AsyncSendNewBlock(block, td)
}
@@ -834,11 +898,13 @@ func (pm *ProtocolManager) BroadcastBlock(block *types.Block, propagate bool) {
}
}
-// BroadcastTxs will propagate a batch of transactions to all peers which are not known to
+// BroadcastTransactions will propagate a batch of transactions to all peers which are not known to
// already have the given transaction.
-func (pm *ProtocolManager) BroadcastTxs(txs types.Transactions) {
- var txset = make(map[*peer]types.Transactions)
-
+func (pm *ProtocolManager) BroadcastTransactions(txs types.Transactions, propagate bool) {
+ var (
+ txset = make(map[*peer][]common.Hash)
+ annos = make(map[*peer][]common.Hash)
+ )
// Broadcast transactions to a batch of peers not knowing about it
// NOTE: Raft-based consensus currently assumes that geth broadcasts
// transactions to all peers in the network. A previous comment here
@@ -846,16 +912,35 @@ func (pm *ProtocolManager) BroadcastTxs(txs types.Transactions) {
// subset of peers. If this change occurs upstream, a merge conflict should
// arise here, and we should add logic to send to *all* peers in raft mode.
+ if propagate {
+ for _, tx := range txs {
+ peers := pm.peers.PeersWithoutTx(tx.Hash())
+
+ // Send the block to a subset of our peers
+ transfer := peers[:int(math.Sqrt(float64(len(peers))))]
+ for _, peer := range transfer {
+ txset[peer] = append(txset[peer], tx.Hash())
+ }
+ log.Trace("Broadcast transaction", "hash", tx.Hash(), "recipients", len(peers))
+ }
+ for peer, hashes := range txset {
+ peer.AsyncSendTransactions(hashes)
+ }
+ return
+ }
+ // Otherwise only broadcast the announcement to peers
for _, tx := range txs {
peers := pm.peers.PeersWithoutTx(tx.Hash())
for _, peer := range peers {
- txset[peer] = append(txset[peer], tx)
+ annos[peer] = append(annos[peer], tx.Hash())
}
- log.Trace("Broadcast transaction", "hash", tx.Hash(), "recipients", len(peers))
}
- // FIXME include this again: peers = peers[:int(math.Sqrt(float64(len(peers))))]
- for peer, txs := range txset {
- peer.AsyncSendTransactions(txs)
+ for peer, hashes := range annos {
+ if peer.version >= eth65 {
+ peer.AsyncSendPooledTransactionHashes(hashes)
+ } else {
+ peer.AsyncSendTransactions(hashes)
+ }
}
}
@@ -874,7 +959,13 @@ func (pm *ProtocolManager) txBroadcastLoop() {
for {
select {
case event := <-pm.txsCh:
- pm.BroadcastTxs(event.Txs)
+ // For testing purpose only, disable propagation
+ if pm.broadcastTxAnnouncesOnly {
+ pm.BroadcastTransactions(event.Txs, false)
+ continue
+ }
+ pm.BroadcastTransactions(event.Txs, true) // First propagate transactions to peers
+ pm.BroadcastTransactions(event.Txs, false) // Only then announce to the rest
// Err() channel will be closed when unsubscribing.
case <-pm.txsSub.Err():
diff --git a/eth/handler_test.go b/eth/handler_test.go
index 81b35add8a..05a1b5f254 100644
--- a/eth/handler_test.go
+++ b/eth/handler_test.go
@@ -535,7 +535,7 @@ func testCheckpointChallenge(t *testing.T, syncmode downloader.SyncMode, checkpo
if err != nil {
t.Fatalf("failed to create new blockchain: %v", err)
}
- pm, err := NewProtocolManager(config, cht, syncmode, DefaultConfig.NetworkId, new(event.TypeMux), new(testTxPool), ethash.NewFaker(), blockchain, db, 1, nil, false)
+ pm, err := NewProtocolManager(config, cht, syncmode, DefaultConfig.NetworkId, new(event.TypeMux), &testTxPool{pool: make(map[common.Hash]*types.Transaction)}, ethash.NewFaker(), blockchain, db, 1, nil, false)
if err != nil {
t.Fatalf("failed to start test protocol manager: %v", err)
}
@@ -594,12 +594,12 @@ func TestBroadcastBlock(t *testing.T) {
broadcastExpected int
}{
{1, 1},
- {2, 2},
- {3, 3},
- {4, 4},
- {5, 4},
- {9, 4},
- {12, 4},
+ {2, 1},
+ {3, 1},
+ {4, 2},
+ {5, 2},
+ {9, 3},
+ {12, 3},
{16, 4},
{26, 5},
{100, 10},
@@ -622,7 +622,7 @@ func testBroadcastBlock(t *testing.T, totalPeers, broadcastExpected int) {
if err != nil {
t.Fatalf("failed to create new blockchain: %v", err)
}
- pm, err := NewProtocolManager(config, nil, downloader.FullSync, DefaultConfig.NetworkId, evmux, new(testTxPool), pow, blockchain, db, 1, nil, false)
+ pm, err := NewProtocolManager(config, nil, downloader.FullSync, DefaultConfig.NetworkId, evmux, &testTxPool{pool: make(map[common.Hash]*types.Transaction)}, pow, blockchain, db, 1, nil, false)
if err != nil {
t.Fatalf("failed to start test protocol manager: %v", err)
}
@@ -632,6 +632,7 @@ func testBroadcastBlock(t *testing.T, totalPeers, broadcastExpected int) {
for i := 0; i < totalPeers; i++ {
peer, _ := newTestPeer(fmt.Sprintf("peer %d", i), eth63, pm, true)
defer peer.close()
+
peers = append(peers, peer)
}
chain, _ := core.GenerateChain(gspec.Config, genesis, ethash.NewFaker(), db, 1, func(i int, gen *core.BlockGen) {})
@@ -648,29 +649,83 @@ func testBroadcastBlock(t *testing.T, totalPeers, broadcastExpected int) {
}
}(peer)
}
- timeout := time.After(300 * time.Millisecond)
- var receivedCount int
-outer:
+ var received int
for {
select {
- case err = <-errCh:
- break outer
case <-doneCh:
- receivedCount++
- if receivedCount == totalPeers {
- break outer
+ received++
+
+ case <-time.After(100 * time.Millisecond):
+ if received != broadcastExpected {
+ t.Errorf("broadcast count mismatch: have %d, want %d", received, broadcastExpected)
}
- case <-timeout:
- break outer
+ return
+
+ case err = <-errCh:
+ t.Fatalf("broadcast failed: %v", err)
}
}
- for _, peer := range peers {
- peer.app.Close()
+
+}
+
+// Tests that a propagated malformed block (uncles or transactions don't match
+// with the hashes in the header) gets discarded and not broadcast forward.
+func TestBroadcastMalformedBlock(t *testing.T) {
+ // Create a live node to test propagation with
+ var (
+ engine = ethash.NewFaker()
+ db = rawdb.NewMemoryDatabase()
+ config = ¶ms.ChainConfig{}
+ gspec = &core.Genesis{Config: config}
+ genesis = gspec.MustCommit(db)
+ )
+ blockchain, err := core.NewBlockChain(db, nil, config, engine, vm.Config{}, nil)
+ if err != nil {
+ t.Fatalf("failed to create new blockchain: %v", err)
}
+ pm, err := NewProtocolManager(config, nil, downloader.FullSync, DefaultConfig.NetworkId, new(event.TypeMux), new(testTxPool), engine, blockchain, db, 1, nil, false)
if err != nil {
- t.Errorf("error matching block by peer: %v", err)
+ t.Fatalf("failed to start test protocol manager: %v", err)
}
- if receivedCount != broadcastExpected {
- t.Errorf("block broadcast to %d peers, expected %d", receivedCount, broadcastExpected)
+ pm.Start(2)
+ defer pm.Stop()
+
+ // Create two peers, one to send the malformed block with and one to check
+ // propagation
+ source, _ := newTestPeer("source", eth63, pm, true)
+ defer source.close()
+
+ sink, _ := newTestPeer("sink", eth63, pm, true)
+ defer sink.close()
+
+ // Create various combinations of malformed blocks
+ chain, _ := core.GenerateChain(gspec.Config, genesis, ethash.NewFaker(), db, 1, func(i int, gen *core.BlockGen) {})
+
+ malformedUncles := chain[0].Header()
+ malformedUncles.UncleHash[0]++
+ malformedTransactions := chain[0].Header()
+ malformedTransactions.TxHash[0]++
+ malformedEverything := chain[0].Header()
+ malformedEverything.UncleHash[0]++
+ malformedEverything.TxHash[0]++
+
+ // Keep listening to broadcasts and notify if any arrives
+ notify := make(chan struct{}, 1)
+ go func() {
+ if _, err := sink.app.ReadMsg(); err == nil {
+ notify <- struct{}{}
+ }
+ }()
+ // Try to broadcast all malformations and ensure they all get discarded
+ for _, header := range []*types.Header{malformedUncles, malformedTransactions, malformedEverything} {
+ block := types.NewBlockWithHeader(header).WithBody(chain[0].Transactions(), chain[0].Uncles())
+ if err := p2p.Send(source.app, NewBlockMsg, []interface{}{block, big.NewInt(131136)}); err != nil {
+ t.Fatalf("failed to broadcast block: %v", err)
+ }
+ select {
+ case <-notify:
+ t.Fatalf("malformed block forwarded")
+ case <-time.After(100 * time.Millisecond):
+ }
}
}
diff --git a/eth/helper_test.go b/eth/helper_test.go
index f7aaf31339..67d525aa59 100644
--- a/eth/helper_test.go
+++ b/eth/helper_test.go
@@ -73,7 +73,7 @@ func newTestProtocolManager(mode downloader.SyncMode, blocks int, generator func
if _, err := blockchain.InsertChain(chain); err != nil {
panic(err)
}
- pm, err := NewProtocolManager(gspec.Config, nil, mode, DefaultConfig.NetworkId, evmux, &testTxPool{added: newtx}, engine, blockchain, db, 1, nil, false)
+ pm, err := NewProtocolManager(gspec.Config, nil, mode, DefaultConfig.NetworkId, evmux, &testTxPool{added: newtx, pool: make(map[common.Hash]*types.Transaction)}, engine, blockchain, db, 1, nil, false)
if err != nil {
return nil, nil, err
}
@@ -148,22 +148,43 @@ func newTestProtocolManagerMust(t *testing.T, mode downloader.SyncMode, blocks i
// testTxPool is a fake, helper transaction pool for testing purposes
type testTxPool struct {
txFeed event.Feed
- pool []*types.Transaction // Collection of all transactions
- added chan<- []*types.Transaction // Notification channel for new transactions
+ pool map[common.Hash]*types.Transaction // Hash map of collected transactions
+ added chan<- []*types.Transaction // Notification channel for new transactions
lock sync.RWMutex // Protects the transaction pool
}
+// Has returns an indicator whether txpool has a transaction
+// cached with the given hash.
+func (p *testTxPool) Has(hash common.Hash) bool {
+ p.lock.Lock()
+ defer p.lock.Unlock()
+
+ return p.pool[hash] != nil
+}
+
+// Get retrieves the transaction from local txpool with given
+// tx hash.
+func (p *testTxPool) Get(hash common.Hash) *types.Transaction {
+ p.lock.Lock()
+ defer p.lock.Unlock()
+
+ return p.pool[hash]
+}
+
// AddRemotes appends a batch of transactions to the pool, and notifies any
// listeners if the addition channel is non nil
func (p *testTxPool) AddRemotes(txs []*types.Transaction) []error {
p.lock.Lock()
defer p.lock.Unlock()
- p.pool = append(p.pool, txs...)
+ for _, tx := range txs {
+ p.pool[tx.Hash()] = tx
+ }
if p.added != nil {
p.added <- txs
}
+ p.txFeed.Send(core.NewTxsEvent{Txs: txs})
return make([]error, len(txs))
}
@@ -210,7 +231,7 @@ func newTestPeer(name string, version int, pm *ProtocolManager, shake bool) (*te
var id enode.ID
rand.Read(id[:])
- peer := pm.newPeer(version, p2p.NewPeer(id, name, nil), net)
+ peer := pm.newPeer(version, p2p.NewPeer(id, name, nil), net, pm.txpool.Get)
// Start the peer on a new thread
errc := make(chan error, 1)
@@ -248,7 +269,7 @@ func (p *testPeer) handshake(t *testing.T, td *big.Int, head common.Hash, genesi
CurrentBlock: head,
GenesisBlock: genesis,
}
- case p.version == eth64:
+ case p.version >= eth64:
msg = &statusData{
ProtocolVersion: uint32(p.version),
NetworkID: DefaultConfig.NetworkId,
diff --git a/eth/metrics.go b/eth/metrics.go
deleted file mode 100644
index 0533a2a875..0000000000
--- a/eth/metrics.go
+++ /dev/null
@@ -1,139 +0,0 @@
-// Copyright 2015 The go-ethereum Authors
-// This file is part of the go-ethereum library.
-//
-// The go-ethereum library is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Lesser General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-//
-// The go-ethereum library is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Lesser General Public License for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with the go-ethereum library. If not, see .
-
-package eth
-
-import (
- "github.com/ethereum/go-ethereum/metrics"
- "github.com/ethereum/go-ethereum/p2p"
-)
-
-var (
- propTxnInPacketsMeter = metrics.NewRegisteredMeter("eth/prop/txns/in/packets", nil)
- propTxnInTrafficMeter = metrics.NewRegisteredMeter("eth/prop/txns/in/traffic", nil)
- propTxnOutPacketsMeter = metrics.NewRegisteredMeter("eth/prop/txns/out/packets", nil)
- propTxnOutTrafficMeter = metrics.NewRegisteredMeter("eth/prop/txns/out/traffic", nil)
- propHashInPacketsMeter = metrics.NewRegisteredMeter("eth/prop/hashes/in/packets", nil)
- propHashInTrafficMeter = metrics.NewRegisteredMeter("eth/prop/hashes/in/traffic", nil)
- propHashOutPacketsMeter = metrics.NewRegisteredMeter("eth/prop/hashes/out/packets", nil)
- propHashOutTrafficMeter = metrics.NewRegisteredMeter("eth/prop/hashes/out/traffic", nil)
- propBlockInPacketsMeter = metrics.NewRegisteredMeter("eth/prop/blocks/in/packets", nil)
- propBlockInTrafficMeter = metrics.NewRegisteredMeter("eth/prop/blocks/in/traffic", nil)
- propBlockOutPacketsMeter = metrics.NewRegisteredMeter("eth/prop/blocks/out/packets", nil)
- propBlockOutTrafficMeter = metrics.NewRegisteredMeter("eth/prop/blocks/out/traffic", nil)
- reqHeaderInPacketsMeter = metrics.NewRegisteredMeter("eth/req/headers/in/packets", nil)
- reqHeaderInTrafficMeter = metrics.NewRegisteredMeter("eth/req/headers/in/traffic", nil)
- reqHeaderOutPacketsMeter = metrics.NewRegisteredMeter("eth/req/headers/out/packets", nil)
- reqHeaderOutTrafficMeter = metrics.NewRegisteredMeter("eth/req/headers/out/traffic", nil)
- reqBodyInPacketsMeter = metrics.NewRegisteredMeter("eth/req/bodies/in/packets", nil)
- reqBodyInTrafficMeter = metrics.NewRegisteredMeter("eth/req/bodies/in/traffic", nil)
- reqBodyOutPacketsMeter = metrics.NewRegisteredMeter("eth/req/bodies/out/packets", nil)
- reqBodyOutTrafficMeter = metrics.NewRegisteredMeter("eth/req/bodies/out/traffic", nil)
- reqStateInPacketsMeter = metrics.NewRegisteredMeter("eth/req/states/in/packets", nil)
- reqStateInTrafficMeter = metrics.NewRegisteredMeter("eth/req/states/in/traffic", nil)
- reqStateOutPacketsMeter = metrics.NewRegisteredMeter("eth/req/states/out/packets", nil)
- reqStateOutTrafficMeter = metrics.NewRegisteredMeter("eth/req/states/out/traffic", nil)
- reqReceiptInPacketsMeter = metrics.NewRegisteredMeter("eth/req/receipts/in/packets", nil)
- reqReceiptInTrafficMeter = metrics.NewRegisteredMeter("eth/req/receipts/in/traffic", nil)
- reqReceiptOutPacketsMeter = metrics.NewRegisteredMeter("eth/req/receipts/out/packets", nil)
- reqReceiptOutTrafficMeter = metrics.NewRegisteredMeter("eth/req/receipts/out/traffic", nil)
- miscInPacketsMeter = metrics.NewRegisteredMeter("eth/misc/in/packets", nil)
- miscInTrafficMeter = metrics.NewRegisteredMeter("eth/misc/in/traffic", nil)
- miscOutPacketsMeter = metrics.NewRegisteredMeter("eth/misc/out/packets", nil)
- miscOutTrafficMeter = metrics.NewRegisteredMeter("eth/misc/out/traffic", nil)
-)
-
-// meteredMsgReadWriter is a wrapper around a p2p.MsgReadWriter, capable of
-// accumulating the above defined metrics based on the data stream contents.
-type meteredMsgReadWriter struct {
- p2p.MsgReadWriter // Wrapped message stream to meter
- version int // Protocol version to select correct meters
-}
-
-// newMeteredMsgWriter wraps a p2p MsgReadWriter with metering support. If the
-// metrics system is disabled, this function returns the original object.
-func newMeteredMsgWriter(rw p2p.MsgReadWriter) p2p.MsgReadWriter {
- if !metrics.Enabled {
- return rw
- }
- return &meteredMsgReadWriter{MsgReadWriter: rw}
-}
-
-// Init sets the protocol version used by the stream to know which meters to
-// increment in case of overlapping message ids between protocol versions.
-func (rw *meteredMsgReadWriter) Init(version int) {
- rw.version = version
-}
-
-func (rw *meteredMsgReadWriter) ReadMsg() (p2p.Msg, error) {
- // Read the message and short circuit in case of an error
- msg, err := rw.MsgReadWriter.ReadMsg()
- if err != nil {
- return msg, err
- }
- // Account for the data traffic
- packets, traffic := miscInPacketsMeter, miscInTrafficMeter
- switch {
- case msg.Code == BlockHeadersMsg:
- packets, traffic = reqHeaderInPacketsMeter, reqHeaderInTrafficMeter
- case msg.Code == BlockBodiesMsg:
- packets, traffic = reqBodyInPacketsMeter, reqBodyInTrafficMeter
-
- case rw.version >= eth63 && msg.Code == NodeDataMsg:
- packets, traffic = reqStateInPacketsMeter, reqStateInTrafficMeter
- case rw.version >= eth63 && msg.Code == ReceiptsMsg:
- packets, traffic = reqReceiptInPacketsMeter, reqReceiptInTrafficMeter
-
- case msg.Code == NewBlockHashesMsg:
- packets, traffic = propHashInPacketsMeter, propHashInTrafficMeter
- case msg.Code == NewBlockMsg:
- packets, traffic = propBlockInPacketsMeter, propBlockInTrafficMeter
- case msg.Code == TxMsg:
- packets, traffic = propTxnInPacketsMeter, propTxnInTrafficMeter
- }
- packets.Mark(1)
- traffic.Mark(int64(msg.Size))
-
- return msg, err
-}
-
-func (rw *meteredMsgReadWriter) WriteMsg(msg p2p.Msg) error {
- // Account for the data traffic
- packets, traffic := miscOutPacketsMeter, miscOutTrafficMeter
- switch {
- case msg.Code == BlockHeadersMsg:
- packets, traffic = reqHeaderOutPacketsMeter, reqHeaderOutTrafficMeter
- case msg.Code == BlockBodiesMsg:
- packets, traffic = reqBodyOutPacketsMeter, reqBodyOutTrafficMeter
-
- case rw.version >= eth63 && msg.Code == NodeDataMsg:
- packets, traffic = reqStateOutPacketsMeter, reqStateOutTrafficMeter
- case rw.version >= eth63 && msg.Code == ReceiptsMsg:
- packets, traffic = reqReceiptOutPacketsMeter, reqReceiptOutTrafficMeter
-
- case msg.Code == NewBlockHashesMsg:
- packets, traffic = propHashOutPacketsMeter, propHashOutTrafficMeter
- case msg.Code == NewBlockMsg:
- packets, traffic = propBlockOutPacketsMeter, propBlockOutTrafficMeter
- case msg.Code == TxMsg:
- packets, traffic = propTxnOutPacketsMeter, propTxnOutTrafficMeter
- }
- packets.Mark(1)
- traffic.Mark(int64(msg.Size))
-
- // Send the packet to the p2p layer
- return rw.MsgReadWriter.WriteMsg(msg)
-}
diff --git a/eth/peer.go b/eth/peer.go
index 28946ace2c..865b932584 100644
--- a/eth/peer.go
+++ b/eth/peer.go
@@ -42,24 +42,35 @@ const (
maxKnownTxs = 32768 // Maximum transactions hashes to keep in the known list (prevent DOS)
maxKnownBlocks = 1024 // Maximum block hashes to keep in the known list (prevent DOS)
- // maxQueuedTxs is the maximum number of transaction lists to queue up before
- // dropping broadcasts. This is a sensitive number as a transaction list might
- // contain a single transaction, or thousands.
- maxQueuedTxs = 128
+ // maxQueuedTxs is the maximum number of transactions to queue up before dropping
+ // older broadcasts.
+ maxQueuedTxs = 4096
- // maxQueuedProps is the maximum number of block propagations to queue up before
+ // maxQueuedTxAnns is the maximum number of transaction announcements to queue up
+ // before dropping older announcements.
+ maxQueuedTxAnns = 4096
+
+ // maxQueuedBlocks is the maximum number of block propagations to queue up before
// dropping broadcasts. There's not much point in queueing stale blocks, so a few
// that might cover uncles should be enough.
- maxQueuedProps = 4
+ maxQueuedBlocks = 4
- // maxQueuedAnns is the maximum number of block announcements to queue up before
+ // maxQueuedBlockAnns is the maximum number of block announcements to queue up before
// dropping broadcasts. Similarly to block propagations, there's no point to queue
// above some healthy uncle limit, so use that.
- maxQueuedAnns = 4
+ maxQueuedBlockAnns = 4
handshakeTimeout = 5 * time.Second
)
+// max is a helper function which returns the larger of the two given integers.
+func max(a, b int) int {
+ if a > b {
+ return a
+ }
+ return b
+}
+
// PeerInfo represents a short summary of the Ethereum sub-protocol metadata known
// about a connected peer.
type PeerInfo struct {
@@ -87,48 +98,48 @@ type peer struct {
td *big.Int
lock sync.RWMutex
- knownTxs mapset.Set // Set of transaction hashes known to be known by this peer
- knownBlocks mapset.Set // Set of block hashes known to be known by this peer
- queuedTxs chan []*types.Transaction // Queue of transactions to broadcast to the peer
- queuedProps chan *propEvent // Queue of blocks to broadcast to the peer
- queuedAnns chan *types.Block // Queue of blocks to announce to the peer
- term chan struct{} // Termination channel to stop the broadcaster
+ knownBlocks mapset.Set // Set of block hashes known to be known by this peer
+ queuedBlocks chan *propEvent // Queue of blocks to broadcast to the peer
+ queuedBlockAnns chan *types.Block // Queue of blocks to announce to the peer
+
+ knownTxs mapset.Set // Set of transaction hashes known to be known by this peer
+ txBroadcast chan []common.Hash // Channel used to queue transaction propagation requests
+ txAnnounce chan []common.Hash // Channel used to queue transaction announcement requests
+ getPooledTx func(common.Hash) *types.Transaction // Callback used to retrieve transaction from txpool
+
+ term chan struct{} // Termination channel to stop the broadcaster
}
-func newPeer(version int, p *p2p.Peer, rw p2p.MsgReadWriter) *peer {
+func newPeer(version int, p *p2p.Peer, rw p2p.MsgReadWriter, getPooledTx func(hash common.Hash) *types.Transaction) *peer {
return &peer{
- Peer: p,
- rw: rw,
- version: version,
- id: fmt.Sprintf("%x", p.ID().Bytes()[:8]),
- knownTxs: mapset.NewSet(),
- knownBlocks: mapset.NewSet(),
- queuedTxs: make(chan []*types.Transaction, maxQueuedTxs),
- queuedProps: make(chan *propEvent, maxQueuedProps),
- queuedAnns: make(chan *types.Block, maxQueuedAnns),
- term: make(chan struct{}),
- }
-}
-
-// broadcast is a write loop that multiplexes block propagations, announcements
-// and transaction broadcasts into the remote peer. The goal is to have an async
-// writer that does not lock up node internals.
-func (p *peer) broadcast() {
+ Peer: p,
+ rw: rw,
+ version: version,
+ id: fmt.Sprintf("%x", p.ID().Bytes()[:8]),
+ knownTxs: mapset.NewSet(),
+ knownBlocks: mapset.NewSet(),
+ queuedBlocks: make(chan *propEvent, maxQueuedBlocks),
+ queuedBlockAnns: make(chan *types.Block, maxQueuedBlockAnns),
+ txBroadcast: make(chan []common.Hash),
+ txAnnounce: make(chan []common.Hash),
+ getPooledTx: getPooledTx,
+ term: make(chan struct{}),
+ }
+}
+
+// broadcastBlocks is a write loop that multiplexes blocks and block accouncements
+// to the remote peer. The goal is to have an async writer that does not lock up
+// node internals and at the same time rate limits queued data.
+func (p *peer) broadcastBlocks() {
for {
select {
- case txs := <-p.queuedTxs:
- if err := p.SendTransactions(txs); err != nil {
- return
- }
- p.Log().Trace("Broadcast transactions", "count", len(txs))
-
- case prop := <-p.queuedProps:
+ case prop := <-p.queuedBlocks:
if err := p.SendNewBlock(prop.block, prop.td); err != nil {
return
}
p.Log().Trace("Propagated block", "number", prop.block.Number(), "hash", prop.block.Hash(), "td", prop.td)
- case block := <-p.queuedAnns:
+ case block := <-p.queuedBlockAnns:
if err := p.SendNewBlockHashes([]common.Hash{block.Hash()}, []uint64{block.NumberU64()}); err != nil {
return
}
@@ -140,6 +151,130 @@ func (p *peer) broadcast() {
}
}
+// broadcastTransactions is a write loop that schedules transaction broadcasts
+// to the remote peer. The goal is to have an async writer that does not lock up
+// node internals and at the same time rate limits queued data.
+func (p *peer) broadcastTransactions() {
+ var (
+ queue []common.Hash // Queue of hashes to broadcast as full transactions
+ done chan struct{} // Non-nil if background broadcaster is running
+ fail = make(chan error) // Channel used to receive network error
+ )
+ for {
+ // If there's no in-flight broadcast running, check if a new one is needed
+ if done == nil && len(queue) > 0 {
+ // Pile transaction until we reach our allowed network limit
+ var (
+ hashes []common.Hash
+ txs []*types.Transaction
+ size common.StorageSize
+ )
+ for i := 0; i < len(queue) && size < txsyncPackSize; i++ {
+ if tx := p.getPooledTx(queue[i]); tx != nil {
+ txs = append(txs, tx)
+ size += tx.Size()
+ }
+ hashes = append(hashes, queue[i])
+ }
+ queue = queue[:copy(queue, queue[len(hashes):])]
+
+ // If there's anything available to transfer, fire up an async writer
+ if len(txs) > 0 {
+ done = make(chan struct{})
+ go func() {
+ if err := p.sendTransactions(txs); err != nil {
+ fail <- err
+ return
+ }
+ close(done)
+ p.Log().Trace("Sent transactions", "count", len(txs))
+ }()
+ }
+ }
+ // Transfer goroutine may or may not have been started, listen for events
+ select {
+ case hashes := <-p.txBroadcast:
+ // New batch of transactions to be broadcast, queue them (with cap)
+ queue = append(queue, hashes...)
+ if len(queue) > maxQueuedTxs {
+ // Fancy copy and resize to ensure buffer doesn't grow indefinitely
+ queue = queue[:copy(queue, queue[len(queue)-maxQueuedTxs:])]
+ }
+
+ case <-done:
+ done = nil
+
+ case <-fail:
+ return
+
+ case <-p.term:
+ return
+ }
+ }
+}
+
+// announceTransactions is a write loop that schedules transaction broadcasts
+// to the remote peer. The goal is to have an async writer that does not lock up
+// node internals and at the same time rate limits queued data.
+func (p *peer) announceTransactions() {
+ var (
+ queue []common.Hash // Queue of hashes to announce as transaction stubs
+ done chan struct{} // Non-nil if background announcer is running
+ fail = make(chan error) // Channel used to receive network error
+ )
+ for {
+ // If there's no in-flight announce running, check if a new one is needed
+ if done == nil && len(queue) > 0 {
+ // Pile transaction hashes until we reach our allowed network limit
+ var (
+ hashes []common.Hash
+ pending []common.Hash
+ size common.StorageSize
+ )
+ for i := 0; i < len(queue) && size < txsyncPackSize; i++ {
+ if p.getPooledTx(queue[i]) != nil {
+ pending = append(pending, queue[i])
+ size += common.HashLength
+ }
+ hashes = append(hashes, queue[i])
+ }
+ queue = queue[:copy(queue, queue[len(hashes):])]
+
+ // If there's anything available to transfer, fire up an async writer
+ if len(pending) > 0 {
+ done = make(chan struct{})
+ go func() {
+ if err := p.sendPooledTransactionHashes(pending); err != nil {
+ fail <- err
+ return
+ }
+ close(done)
+ p.Log().Trace("Sent transaction announcements", "count", len(pending))
+ }()
+ }
+ }
+ // Transfer goroutine may or may not have been started, listen for events
+ select {
+ case hashes := <-p.txAnnounce:
+ // New batch of transactions to be broadcast, queue them (with cap)
+ queue = append(queue, hashes...)
+ if len(queue) > maxQueuedTxAnns {
+ // Fancy copy and resize to ensure buffer doesn't grow indefinitely
+ queue = queue[:copy(queue, queue[len(queue)-maxQueuedTxs:])]
+ }
+
+ case <-done:
+ done = nil
+
+ case <-fail:
+ return
+
+ case <-p.term:
+ return
+ }
+ }
+}
+
// close signals the broadcast goroutine to terminate.
func (p *peer) close() {
close(p.term)
@@ -201,46 +336,111 @@ func (p *peer) Send(msgcode uint64, data interface{}) error {
return p2p.Send(p.rw, msgcode, data)
}
-// SendTransactions sends transactions to the peer and includes the hashes
+// SendTransactions64 sends transactions to the peer and includes the hashes
+// in its transaction hash set for future reference.
+//
+// This method is legacy support for initial transaction exchange in eth/64 and
+// prior. For eth/65 and higher use SendPooledTransactionHashes.
+func (p *peer) SendTransactions64(txs types.Transactions) error {
+ return p.sendTransactions(txs)
+}
+
+// sendTransactions sends transactions to the peer and includes the hashes
// in its transaction hash set for future reference.
-func (p *peer) SendTransactions(txs types.Transactions) error {
+//
+// This method is a helper used by the async transaction sender. Don't call it
+// directly as the queueing (memory) and transmission (bandwidth) costs should
+// not be managed directly.
+func (p *peer) sendTransactions(txs types.Transactions) error {
// Mark all the transactions as known, but ensure we don't overflow our limits
+ for p.knownTxs.Cardinality() > max(0, maxKnownTxs-len(txs)) {
+ p.knownTxs.Pop()
+ }
for _, tx := range txs {
p.knownTxs.Add(tx.Hash())
}
- for p.knownTxs.Cardinality() >= maxKnownTxs {
+ return p2p.Send(p.rw, TransactionMsg, txs)
+}
+
+// AsyncSendTransactions queues a list of transactions (by hash) to eventually
+// propagate to a remote peer. The number of pending sends are capped (new ones
+// will force old sends to be dropped)
+func (p *peer) AsyncSendTransactions(hashes []common.Hash) {
+ select {
+ case p.txBroadcast <- hashes:
+ // Mark all the transactions as known, but ensure we don't overflow our limits
+ for p.knownTxs.Cardinality() > max(0, maxKnownTxs-len(hashes)) {
+ p.knownTxs.Pop()
+ }
+ for _, hash := range hashes {
+ p.knownTxs.Add(hash)
+ }
+ case <-p.term:
+ p.Log().Debug("Dropping transaction propagation", "count", len(hashes))
+ }
+}
+
+// sendPooledTransactionHashes sends transaction hashes to the peer and includes
+// them in its transaction hash set for future reference.
+//
+// This method is a helper used by the async transaction announcer. Don't call it
+// directly as the queueing (memory) and transmission (bandwidth) costs should
+// not be managed directly.
+func (p *peer) sendPooledTransactionHashes(hashes []common.Hash) error {
+ // Mark all the transactions as known, but ensure we don't overflow our limits
+ for p.knownTxs.Cardinality() > max(0, maxKnownTxs-len(hashes)) {
p.knownTxs.Pop()
}
- return p2p.Send(p.rw, TxMsg, txs)
+ for _, hash := range hashes {
+ p.knownTxs.Add(hash)
+ }
+ return p2p.Send(p.rw, NewPooledTransactionHashesMsg, hashes)
}
-// AsyncSendTransactions queues list of transactions propagation to a remote
-// peer. If the peer's broadcast queue is full, the event is silently dropped.
-func (p *peer) AsyncSendTransactions(txs []*types.Transaction) {
+// AsyncSendPooledTransactionHashes queues a list of transactions hashes to eventually
+// announce to a remote peer. The number of pending sends are capped (new ones
+// will force old sends to be dropped)
+func (p *peer) AsyncSendPooledTransactionHashes(hashes []common.Hash) {
select {
- case p.queuedTxs <- txs:
+ case p.txAnnounce <- hashes:
// Mark all the transactions as known, but ensure we don't overflow our limits
- for _, tx := range txs {
- p.knownTxs.Add(tx.Hash())
- }
- for p.knownTxs.Cardinality() >= maxKnownTxs {
+ for p.knownTxs.Cardinality() > max(0, maxKnownTxs-len(hashes)) {
p.knownTxs.Pop()
}
- default:
- p.Log().Debug("Dropping transaction propagation", "count", len(txs))
+ for _, hash := range hashes {
+ p.knownTxs.Add(hash)
+ }
+ case <-p.term:
+ p.Log().Debug("Dropping transaction announcement", "count", len(hashes))
+ }
+}
+
+// SendPooledTransactionsRLP sends requested transactions to the peer and adds the
+// hashes in its transaction hash set for future reference.
+//
+// Note, the method assumes the hashes are correct and correspond to the list of
+// transactions being sent.
+func (p *peer) SendPooledTransactionsRLP(hashes []common.Hash, txs []rlp.RawValue) error {
+ // Mark all the transactions as known, but ensure we don't overflow our limits
+ for p.knownTxs.Cardinality() > max(0, maxKnownTxs-len(hashes)) {
+ p.knownTxs.Pop()
+ }
+ for _, hash := range hashes {
+ p.knownTxs.Add(hash)
}
+ return p2p.Send(p.rw, PooledTransactionsMsg, txs)
}
// SendNewBlockHashes announces the availability of a number of blocks through
// a hash notification.
func (p *peer) SendNewBlockHashes(hashes []common.Hash, numbers []uint64) error {
// Mark all the block hashes as known, but ensure we don't overflow our limits
+ for p.knownBlocks.Cardinality() > max(0, maxKnownBlocks-len(hashes)) {
+ p.knownBlocks.Pop()
+ }
for _, hash := range hashes {
p.knownBlocks.Add(hash)
}
- for p.knownBlocks.Cardinality() >= maxKnownBlocks {
- p.knownBlocks.Pop()
- }
request := make(newBlockHashesData, len(hashes))
for i := 0; i < len(hashes); i++ {
request[i].Hash = hashes[i]
@@ -254,12 +454,12 @@ func (p *peer) SendNewBlockHashes(hashes []common.Hash, numbers []uint64) error
// dropped.
func (p *peer) AsyncSendNewBlockHash(block *types.Block) {
select {
- case p.queuedAnns <- block:
+ case p.queuedBlockAnns <- block:
// Mark all the block hash as known, but ensure we don't overflow our limits
- p.knownBlocks.Add(block.Hash())
for p.knownBlocks.Cardinality() >= maxKnownBlocks {
p.knownBlocks.Pop()
}
+ p.knownBlocks.Add(block.Hash())
default:
p.Log().Debug("Dropping block announcement", "number", block.NumberU64(), "hash", block.Hash())
}
@@ -268,10 +468,10 @@ func (p *peer) AsyncSendNewBlockHash(block *types.Block) {
// SendNewBlock propagates an entire block to a remote peer.
func (p *peer) SendNewBlock(block *types.Block, td *big.Int) error {
// Mark all the block hash as known, but ensure we don't overflow our limits
- p.knownBlocks.Add(block.Hash())
for p.knownBlocks.Cardinality() >= maxKnownBlocks {
p.knownBlocks.Pop()
}
+ p.knownBlocks.Add(block.Hash())
return p2p.Send(p.rw, NewBlockMsg, []interface{}{block, td})
}
@@ -279,12 +479,12 @@ func (p *peer) SendNewBlock(block *types.Block, td *big.Int) error {
// the peer's broadcast queue is full, the event is silently dropped.
func (p *peer) AsyncSendNewBlock(block *types.Block, td *big.Int) {
select {
- case p.queuedProps <- &propEvent{block: block, td: td}:
+ case p.queuedBlocks <- &propEvent{block: block, td: td}:
// Mark all the block hash as known, but ensure we don't overflow our limits
- p.knownBlocks.Add(block.Hash())
for p.knownBlocks.Cardinality() >= maxKnownBlocks {
p.knownBlocks.Pop()
}
+ p.knownBlocks.Add(block.Hash())
default:
p.Log().Debug("Dropping block propagation", "number", block.NumberU64(), "hash", block.Hash())
}
@@ -359,6 +559,12 @@ func (p *peer) RequestReceipts(hashes []common.Hash) error {
return p2p.Send(p.rw, GetReceiptsMsg, hashes)
}
+// RequestTxs fetches a batch of transactions from a remote node.
+func (p *peer) RequestTxs(hashes []common.Hash) error {
+ p.Log().Debug("Fetching batch of transactions", "count", len(hashes))
+ return p2p.Send(p.rw, GetPooledTransactionsMsg, hashes)
+}
+
// Handshake executes the eth protocol handshake, negotiating version number,
// network IDs, difficulties, head and genesis blocks.
func (p *peer) Handshake(network uint64, td *big.Int, head common.Hash, genesis common.Hash, forkID forkid.ID, forkFilter forkid.Filter, protocolName string) error {
@@ -381,7 +587,7 @@ func (p *peer) Handshake(network uint64, td *big.Int, head common.Hash, genesis
CurrentBlock: head,
GenesisBlock: genesis,
})
- case p.version == eth64 || istanbulNew:
+ case p.version >= eth64 || istanbulNew:
errc <- p2p.Send(p.rw, StatusMsg, &statusData{
ProtocolVersion: uint32(p.version),
NetworkID: network,
@@ -398,7 +604,7 @@ func (p *peer) Handshake(network uint64, td *big.Int, head common.Hash, genesis
switch {
case p.version == eth63 || istanbulOld:
errc <- p.readStatusLegacy(network, &status63, genesis)
- case p.version == eth64 || istanbulNew:
+ case p.version >= eth64 || istanbulNew:
errc <- p.readStatus(network, &status, genesis, forkFilter)
default:
panic(fmt.Sprintf("unsupported eth protocol version: %d", p.version))
@@ -419,7 +625,7 @@ func (p *peer) Handshake(network uint64, td *big.Int, head common.Hash, genesis
switch {
case p.version == eth63 || istanbulOld:
p.td, p.head = status63.TD, status63.CurrentBlock
- case p.version == eth64 || istanbulNew:
+ case p.version >= eth64 || istanbulNew:
p.td, p.head = status.TD, status.Head
default:
panic(fmt.Sprintf("unsupported eth protocol version: %d", p.version))
@@ -520,7 +726,10 @@ func (ps *peerSet) Register(p *peer) error {
return errAlreadyRegistered
}
ps.peers[p.id] = p
- go p.broadcast()
+
+ go p.broadcastBlocks()
+ go p.broadcastTransactions()
+ go p.announceTransactions()
return nil
}
diff --git a/eth/protocol.go b/eth/protocol.go
index 48259980a4..d2a933985c 100644
--- a/eth/protocol.go
+++ b/eth/protocol.go
@@ -33,16 +33,17 @@ import (
const (
eth63 = 63
eth64 = 64
+ eth65 = 65
)
// protocolName is the official short name of the protocol used during capability negotiation.
var protocolName = "eth"
// ProtocolVersions are the supported versions of the eth protocol (first is primary).
-var ProtocolVersions = []uint{eth64, eth63}
+var ProtocolVersions = []uint{eth65, eth64, eth63}
// protocolLengths are the number of implemented message corresponding to different protocol versions.
-// var protocolLengths = map[uint]uint64{eth64: 17, eth63: 17}
+// var protocolLengths = map[uint]uint64{eth65: 17, eth64: 17, eth63: 17}
const protocolMaxMsgSize = 10 * 1024 * 1024 // Maximum cap on the size of a protocol message
@@ -50,7 +51,7 @@ const protocolMaxMsgSize = 10 * 1024 * 1024 // Maximum cap on the size of a prot
const (
StatusMsg = 0x00
NewBlockHashesMsg = 0x01
- TxMsg = 0x02
+ TransactionMsg = 0x02
GetBlockHeadersMsg = 0x03
BlockHeadersMsg = 0x04
GetBlockBodiesMsg = 0x05
@@ -60,6 +61,14 @@ const (
NodeDataMsg = 0x0e
GetReceiptsMsg = 0x0f
ReceiptsMsg = 0x10
+
+ // New protocol message codes introduced in eth65
+ //
+ // Previously these message ids were used by some legacy and unsupported
+ // eth protocols, reown them here.
+ NewPooledTransactionHashesMsg = 0x08
+ GetPooledTransactionsMsg = 0x09
+ PooledTransactionsMsg = 0x0a
)
type errCode int
@@ -94,6 +103,14 @@ var errorToString = map[int]string{
}
type txPool interface {
+ // Has returns an indicator whether txpool has a transaction
+ // cached with the given hash.
+ Has(hash common.Hash) bool
+
+ // Get retrieves the transaction from local txpool with given
+ // tx hash.
+ Get(hash common.Hash) *types.Transaction
+
// AddRemotes should add the given transactions to the pool.
AddRemotes([]*types.Transaction) []error
diff --git a/eth/protocol_test.go b/eth/protocol_test.go
index 51222dd880..fd05acf268 100644
--- a/eth/protocol_test.go
+++ b/eth/protocol_test.go
@@ -20,6 +20,7 @@ import (
"fmt"
"math/big"
"sync"
+ "sync/atomic"
"testing"
"time"
@@ -61,7 +62,7 @@ func TestStatusMsgErrors63(t *testing.T) {
wantError error
}{
{
- code: TxMsg, data: []interface{}{},
+ code: TransactionMsg, data: []interface{}{},
wantError: errResp(ErrNoStatusMsg, "first msg has code 2 (!= 0)"),
},
{
@@ -113,7 +114,7 @@ func TestStatusMsgErrors64(t *testing.T) {
wantError error
}{
{
- code: TxMsg, data: []interface{}{},
+ code: TransactionMsg, data: []interface{}{},
wantError: errResp(ErrNoStatusMsg, "first msg has code 2 (!= 0)"),
},
{
@@ -180,16 +181,16 @@ func TestForkIDSplit(t *testing.T) {
blocksNoFork, _ = core.GenerateChain(configNoFork, genesisNoFork, engine, dbNoFork, 2, nil)
blocksProFork, _ = core.GenerateChain(configProFork, genesisProFork, engine, dbProFork, 2, nil)
- ethNoFork, _ = NewProtocolManager(configNoFork, nil, downloader.FullSync, 1, new(event.TypeMux), new(testTxPool), engine, chainNoFork, dbNoFork, 1, nil, false)
- ethProFork, _ = NewProtocolManager(configProFork, nil, downloader.FullSync, 1, new(event.TypeMux), new(testTxPool), engine, chainProFork, dbProFork, 1, nil, false)
+ ethNoFork, _ = NewProtocolManager(configNoFork, nil, downloader.FullSync, 1, new(event.TypeMux), &testTxPool{pool: make(map[common.Hash]*types.Transaction)}, engine, chainNoFork, dbNoFork, 1, nil, false)
+ ethProFork, _ = NewProtocolManager(configProFork, nil, downloader.FullSync, 1, new(event.TypeMux), &testTxPool{pool: make(map[common.Hash]*types.Transaction)}, engine, chainProFork, dbProFork, 1, nil, false)
)
ethNoFork.Start(1000)
ethProFork.Start(1000)
// Both nodes should allow the other to connect (same genesis, next fork is the same)
p2pNoFork, p2pProFork := p2p.MsgPipe()
- peerNoFork := newPeer(64, p2p.NewPeer(enode.ID{1}, "", nil), p2pNoFork)
- peerProFork := newPeer(64, p2p.NewPeer(enode.ID{2}, "", nil), p2pProFork)
+ peerNoFork := newPeer(64, p2p.NewPeer(enode.ID{1}, "", nil), p2pNoFork, nil)
+ peerProFork := newPeer(64, p2p.NewPeer(enode.ID{2}, "", nil), p2pProFork, nil)
errc := make(chan error, 2)
go func() { errc <- ethNoFork.handle(peerProFork) }()
@@ -207,8 +208,8 @@ func TestForkIDSplit(t *testing.T) {
chainProFork.InsertChain(blocksProFork[:1])
p2pNoFork, p2pProFork = p2p.MsgPipe()
- peerNoFork = newPeer(64, p2p.NewPeer(enode.ID{1}, "", nil), p2pNoFork)
- peerProFork = newPeer(64, p2p.NewPeer(enode.ID{2}, "", nil), p2pProFork)
+ peerNoFork = newPeer(64, p2p.NewPeer(enode.ID{1}, "", nil), p2pNoFork, nil)
+ peerProFork = newPeer(64, p2p.NewPeer(enode.ID{2}, "", nil), p2pProFork, nil)
errc = make(chan error, 2)
go func() { errc <- ethNoFork.handle(peerProFork) }()
@@ -226,8 +227,8 @@ func TestForkIDSplit(t *testing.T) {
chainProFork.InsertChain(blocksProFork[1:2])
p2pNoFork, p2pProFork = p2p.MsgPipe()
- peerNoFork = newPeer(64, p2p.NewPeer(enode.ID{1}, "", nil), p2pNoFork)
- peerProFork = newPeer(64, p2p.NewPeer(enode.ID{2}, "", nil), p2pProFork)
+ peerNoFork = newPeer(64, p2p.NewPeer(enode.ID{1}, "", nil), p2pNoFork, nil)
+ peerProFork = newPeer(64, p2p.NewPeer(enode.ID{2}, "", nil), p2pProFork, nil)
errc = make(chan error, 2)
go func() { errc <- ethNoFork.handle(peerProFork) }()
@@ -246,6 +247,7 @@ func TestForkIDSplit(t *testing.T) {
// This test checks that received transactions are added to the local pool.
func TestRecvTransactions63(t *testing.T) { testRecvTransactions(t, 63) }
func TestRecvTransactions64(t *testing.T) { testRecvTransactions(t, 64) }
+func TestRecvTransactions65(t *testing.T) { testRecvTransactions(t, 65) }
func testRecvTransactions(t *testing.T, protocol int) {
txAdded := make(chan []*types.Transaction)
@@ -256,7 +258,7 @@ func testRecvTransactions(t *testing.T, protocol int) {
defer p.close()
tx := newTestTransaction(testAccount, 0, 0)
- if err := p2p.Send(p.app, TxMsg, []interface{}{tx}); err != nil {
+ if err := p2p.Send(p.app, TransactionMsg, []interface{}{tx}); err != nil {
t.Fatalf("send error: %v", err)
}
select {
@@ -274,18 +276,22 @@ func testRecvTransactions(t *testing.T, protocol int) {
// This test checks that pending transactions are sent.
func TestSendTransactions63(t *testing.T) { testSendTransactions(t, 63) }
func TestSendTransactions64(t *testing.T) { testSendTransactions(t, 64) }
+func TestSendTransactions65(t *testing.T) { testSendTransactions(t, 65) }
func testSendTransactions(t *testing.T, protocol int) {
pm, _ := newTestProtocolManagerMust(t, downloader.FullSync, 0, nil, nil)
defer pm.Stop()
- // Fill the pool with big transactions.
+ // Fill the pool with big transactions (use a subscription to wait until all
+ // the transactions are announced to avoid spurious events causing extra
+ // broadcasts).
const txsize = txsyncPackSize / 10
alltxs := make([]*types.Transaction, 100)
for nonce := range alltxs {
alltxs[nonce] = newTestTransaction(testAccount, uint64(nonce), txsize)
}
pm.txpool.AddRemotes(alltxs)
+ time.Sleep(100 * time.Millisecond) // Wait until new tx even gets out of the system (lame)
// Connect several peers. They should all receive the pending transactions.
var wg sync.WaitGroup
@@ -297,18 +303,50 @@ func testSendTransactions(t *testing.T, protocol int) {
seen[tx.Hash()] = false
}
for n := 0; n < len(alltxs) && !t.Failed(); {
- var txs []*types.Transaction
- msg, err := p.app.ReadMsg()
- if err != nil {
- t.Errorf("%v: read error: %v", p.Peer, err)
- } else if msg.Code != TxMsg {
- t.Errorf("%v: got code %d, want TxMsg", p.Peer, msg.Code)
- }
- if err := msg.Decode(&txs); err != nil {
- t.Errorf("%v: %v", p.Peer, err)
+ var forAllHashes func(callback func(hash common.Hash))
+ switch protocol {
+ case 63:
+ fallthrough
+ case 64:
+ msg, err := p.app.ReadMsg()
+ if err != nil {
+ t.Errorf("%v: read error: %v", p.Peer, err)
+ continue
+ } else if msg.Code != TransactionMsg {
+ t.Errorf("%v: got code %d, want TxMsg", p.Peer, msg.Code)
+ continue
+ }
+ var txs []*types.Transaction
+ if err := msg.Decode(&txs); err != nil {
+ t.Errorf("%v: %v", p.Peer, err)
+ continue
+ }
+ forAllHashes = func(callback func(hash common.Hash)) {
+ for _, tx := range txs {
+ callback(tx.Hash())
+ }
+ }
+ case 65:
+ msg, err := p.app.ReadMsg()
+ if err != nil {
+ t.Errorf("%v: read error: %v", p.Peer, err)
+ continue
+ } else if msg.Code != NewPooledTransactionHashesMsg {
+ t.Errorf("%v: got code %d, want NewPooledTransactionHashesMsg", p.Peer, msg.Code)
+ continue
+ }
+ var hashes []common.Hash
+ if err := msg.Decode(&hashes); err != nil {
+ t.Errorf("%v: %v", p.Peer, err)
+ continue
+ }
+ forAllHashes = func(callback func(hash common.Hash)) {
+ for _, h := range hashes {
+ callback(h)
+ }
+ }
}
- for _, tx := range txs {
- hash := tx.Hash()
+ forAllHashes(func(hash common.Hash) {
seentx, want := seen[hash]
if seentx {
t.Errorf("%v: got tx more than once: %x", p.Peer, hash)
@@ -318,7 +356,7 @@ func testSendTransactions(t *testing.T, protocol int) {
}
seen[hash] = true
n++
- }
+ })
}
}
for i := 0; i < 3; i++ {
@@ -329,6 +367,53 @@ func testSendTransactions(t *testing.T, protocol int) {
wg.Wait()
}
+func TestTransactionPropagation(t *testing.T) { testSyncTransaction(t, true) }
+func TestTransactionAnnouncement(t *testing.T) { testSyncTransaction(t, false) }
+
+func testSyncTransaction(t *testing.T, propagtion bool) {
+ // Create a protocol manager for transaction fetcher and sender
+ pmFetcher, _ := newTestProtocolManagerMust(t, downloader.FastSync, 0, nil, nil)
+ defer pmFetcher.Stop()
+ pmSender, _ := newTestProtocolManagerMust(t, downloader.FastSync, 1024, nil, nil)
+ pmSender.broadcastTxAnnouncesOnly = !propagtion
+ defer pmSender.Stop()
+
+ // Sync up the two peers
+ io1, io2 := p2p.MsgPipe()
+
+ go pmSender.handle(pmSender.newPeer(65, p2p.NewPeer(enode.ID{}, "sender", nil), io2, pmSender.txpool.Get))
+ go pmFetcher.handle(pmFetcher.newPeer(65, p2p.NewPeer(enode.ID{}, "fetcher", nil), io1, pmFetcher.txpool.Get))
+
+ time.Sleep(250 * time.Millisecond)
+ pmFetcher.synchronise(pmFetcher.peers.BestPeer())
+ atomic.StoreUint32(&pmFetcher.acceptTxs, 1)
+
+ newTxs := make(chan core.NewTxsEvent, 1024)
+ sub := pmFetcher.txpool.SubscribeNewTxsEvent(newTxs)
+ defer sub.Unsubscribe()
+
+ // Fill the pool with new transactions
+ alltxs := make([]*types.Transaction, 1024)
+ for nonce := range alltxs {
+ alltxs[nonce] = newTestTransaction(testAccount, uint64(nonce), 0)
+ }
+ pmSender.txpool.AddRemotes(alltxs)
+
+ var got int
+loop:
+ for {
+ select {
+ case ev := <-newTxs:
+ got += len(ev.Txs)
+ if got == 1024 {
+ break loop
+ }
+ case <-time.NewTimer(time.Second).C:
+ t.Fatal("Failed to retrieve all transaction")
+ }
+ }
+}
+
// Tests that the custom union field encoder and decoder works correctly.
func TestGetBlockHeadersDataEncodeDecode(t *testing.T) {
// Create a "random" hash for testing
diff --git a/eth/sync.go b/eth/sync.go
index e300b1dfaa..e777317bb8 100644
--- a/eth/sync.go
+++ b/eth/sync.go
@@ -45,6 +45,12 @@ type txsync struct {
// syncTransactions starts sending all currently pending transactions to the given peer.
func (pm *ProtocolManager) syncTransactions(p *peer) {
+ // Assemble the set of transaction to broadcast or announce to the remote
+ // peer. Fun fact, this is quite an expensive operation as it needs to sort
+ // the transactions if the sorting is not cached yet. However, with a random
+ // order, insertions could overflow the non-executable queues and get dropped.
+ //
+ // TODO(karalabe): Figure out if we could get away with random order somehow
var txs types.Transactions
pending, _ := pm.txpool.Pending()
for _, batch := range pending {
@@ -53,26 +59,40 @@ func (pm *ProtocolManager) syncTransactions(p *peer) {
if len(txs) == 0 {
return
}
+ // The eth/65 protocol introduces proper transaction announcements, so instead
+ // of dripping transactions across multiple peers, just send the entire list as
+ // an announcement and let the remote side decide what they need (likely nothing).
+ if p.version >= eth65 {
+ hashes := make([]common.Hash, len(txs))
+ for i, tx := range txs {
+ hashes[i] = tx.Hash()
+ }
+ p.AsyncSendPooledTransactionHashes(hashes)
+ return
+ }
+ // Out of luck, peer is running legacy protocols, drop the txs over
select {
- case pm.txsyncCh <- &txsync{p, txs}:
+ case pm.txsyncCh <- &txsync{p: p, txs: txs}:
case <-pm.quitSync:
}
}
-// txsyncLoop takes care of the initial transaction sync for each new
+// txsyncLoop64 takes care of the initial transaction sync for each new
// connection. When a new peer appears, we relay all currently pending
// transactions. In order to minimise egress bandwidth usage, we send
// the transactions in small packs to one peer at a time.
-func (pm *ProtocolManager) txsyncLoop() {
+func (pm *ProtocolManager) txsyncLoop64() {
var (
pending = make(map[enode.ID]*txsync)
sending = false // whether a send is active
pack = new(txsync) // the pack that is being sent
done = make(chan error, 1) // result of the send
)
-
// send starts a sending a pack of transactions from the sync.
send := func(s *txsync) {
+ if s.p.version >= eth65 {
+ panic("initial transaction syncer running on eth/65+")
+ }
// Fill pack with transactions up to the target size.
size := common.StorageSize(0)
pack.p = s.p
@@ -89,7 +109,7 @@ func (pm *ProtocolManager) txsyncLoop() {
// Send the pack in the background.
s.p.Log().Trace("Sending batch of transactions", "count", len(pack.txs), "bytes", size)
sending = true
- go func() { done <- pack.p.SendTransactions(pack.txs) }()
+ go func() { done <- pack.p.SendTransactions64(pack.txs) }()
}
// pick chooses the next pending sync.
@@ -134,8 +154,10 @@ func (pm *ProtocolManager) txsyncLoop() {
// downloading hashes and blocks as well as handling the announcement handler.
func (pm *ProtocolManager) syncer() {
// Start and ensure cleanup of sync mechanisms
- pm.fetcher.Start()
- defer pm.fetcher.Stop()
+ pm.blockFetcher.Start()
+ pm.txFetcher.Start()
+ defer pm.blockFetcher.Stop()
+ defer pm.txFetcher.Stop()
defer pm.downloader.Terminate()
// Wait for different events to fire synchronisation operations
diff --git a/eth/sync_test.go b/eth/sync_test.go
index e4c99ff587..d02bc57108 100644
--- a/eth/sync_test.go
+++ b/eth/sync_test.go
@@ -26,9 +26,13 @@ import (
"github.com/ethereum/go-ethereum/p2p/enode"
)
+func TestFastSyncDisabling63(t *testing.T) { testFastSyncDisabling(t, 63) }
+func TestFastSyncDisabling64(t *testing.T) { testFastSyncDisabling(t, 64) }
+func TestFastSyncDisabling65(t *testing.T) { testFastSyncDisabling(t, 65) }
+
// Tests that fast sync gets disabled as soon as a real block is successfully
// imported into the blockchain.
-func TestFastSyncDisabling(t *testing.T) {
+func testFastSyncDisabling(t *testing.T, protocol int) {
// Create a pristine protocol manager, check that fast sync is left enabled
pmEmpty, _ := newTestProtocolManagerMust(t, downloader.FastSync, 0, nil, nil)
if atomic.LoadUint32(&pmEmpty.fastSync) == 0 {
@@ -42,8 +46,8 @@ func TestFastSyncDisabling(t *testing.T) {
// Sync up the two peers
io1, io2 := p2p.MsgPipe()
- go pmFull.handle(pmFull.newPeer(63, p2p.NewPeer(enode.ID{}, "empty", nil), io2))
- go pmEmpty.handle(pmEmpty.newPeer(63, p2p.NewPeer(enode.ID{}, "full", nil), io1))
+ go pmFull.handle(pmFull.newPeer(protocol, p2p.NewPeer(enode.ID{}, "empty", nil), io2, pmFull.txpool.Get))
+ go pmEmpty.handle(pmEmpty.newPeer(protocol, p2p.NewPeer(enode.ID{}, "full", nil), io1, pmEmpty.txpool.Get))
time.Sleep(250 * time.Millisecond)
pmEmpty.synchronise(pmEmpty.peers.BestPeer())
diff --git a/eth/tracers/tracer.go b/eth/tracers/tracer.go
index 63d38ed32d..a74ed51e1a 100644
--- a/eth/tracers/tracer.go
+++ b/eth/tracers/tracer.go
@@ -93,6 +93,15 @@ type memoryWrapper struct {
// slice returns the requested range of memory as a byte slice.
func (mw *memoryWrapper) slice(begin, end int64) []byte {
+ if end == begin {
+ return []byte{}
+ }
+ if end < begin || begin < 0 {
+ // TODO(karalabe): We can't js-throw from Go inside duktape inside Go. The Go
+ // runtime goes belly up https://github.com/golang/go/issues/15639.
+ log.Warn("Tracer accessed out of bound memory", "offset", begin, "end", end)
+ return nil
+ }
if mw.memory.Len() < int(end) {
// TODO(karalabe): We can't js-throw from Go inside duktape inside Go. The Go
// runtime goes belly up https://github.com/golang/go/issues/15639.
@@ -104,7 +113,7 @@ func (mw *memoryWrapper) slice(begin, end int64) []byte {
// getUint returns the 32 bytes at the specified address interpreted as a uint.
func (mw *memoryWrapper) getUint(addr int64) *big.Int {
- if mw.memory.Len() < int(addr)+32 {
+ if mw.memory.Len() < int(addr)+32 || addr < 0 {
// TODO(karalabe): We can't js-throw from Go inside duktape inside Go. The Go
// runtime goes belly up https://github.com/golang/go/issues/15639.
log.Warn("Tracer accessed out of bound memory", "available", mw.memory.Len(), "offset", addr, "size", 32)
@@ -147,7 +156,7 @@ type stackWrapper struct {
// peek returns the nth-from-the-top element of the stack.
func (sw *stackWrapper) peek(idx int) *big.Int {
- if len(sw.stack.Data()) <= idx {
+ if len(sw.stack.Data()) <= idx || idx < 0 {
// TODO(karalabe): We can't js-throw from Go inside duktape inside Go. The Go
// runtime goes belly up https://github.com/golang/go/issues/15639.
log.Warn("Tracer accessed out of bound stack", "size", len(sw.stack.Data()), "index", idx)
diff --git a/eth/tracers/tracer_test.go b/eth/tracers/tracer_test.go
index 3fc6a4d5ca..32b97f38dc 100644
--- a/eth/tracers/tracer_test.go
+++ b/eth/tracers/tracer_test.go
@@ -64,6 +64,39 @@ func runTrace(tracer *Tracer) (json.RawMessage, error) {
return tracer.GetResult()
}
+// TestRegressionPanicSlice tests that we don't panic on bad arguments to memory access
+func TestRegressionPanicSlice(t *testing.T) {
+ tracer, err := New("{depths: [], step: function(log) { this.depths.push(log.memory.slice(-1,-2)); }, fault: function() {}, result: function() { return this.depths; }}")
+ if err != nil {
+ t.Fatal(err)
+ }
+ if _, err = runTrace(tracer); err != nil {
+ t.Fatal(err)
+ }
+}
+
+// TestRegressionPanicSlice tests that we don't panic on bad arguments to stack peeks
+func TestRegressionPanicPeek(t *testing.T) {
+ tracer, err := New("{depths: [], step: function(log) { this.depths.push(log.stack.peek(-1)); }, fault: function() {}, result: function() { return this.depths; }}")
+ if err != nil {
+ t.Fatal(err)
+ }
+ if _, err = runTrace(tracer); err != nil {
+ t.Fatal(err)
+ }
+}
+
+// TestRegressionPanicSlice tests that we don't panic on bad arguments to memory getUint
+func TestRegressionPanicGetUint(t *testing.T) {
+ tracer, err := New("{ depths: [], step: function(log, db) { this.depths.push(log.memory.getUint(-64));}, fault: function() {}, result: function() { return this.depths; }}")
+ if err != nil {
+ t.Fatal(err)
+ }
+ if _, err = runTrace(tracer); err != nil {
+ t.Fatal(err)
+ }
+}
+
func TestTracing(t *testing.T) {
tracer, err := New("{count: 0, step: function() { this.count += 1; }, fault: function() {}, result: function() { return this.count; }}")
if err != nil {
diff --git a/event/feed.go b/event/feed.go
index 02f3ca6875..33dafe5886 100644
--- a/event/feed.go
+++ b/event/feed.go
@@ -138,6 +138,7 @@ func (f *Feed) Send(value interface{}) (nsent int) {
if !f.typecheck(rvalue.Type()) {
f.sendLock <- struct{}{}
+ f.mu.Unlock()
panic(feedTypeError{op: "Send", got: rvalue.Type(), want: f.etype})
}
f.mu.Unlock()
diff --git a/event/subscription.go b/event/subscription.go
index d03f465075..c80d171f3a 100644
--- a/event/subscription.go
+++ b/event/subscription.go
@@ -145,7 +145,6 @@ func (s *resubscribeSub) loop() {
func (s *resubscribeSub) subscribe() Subscription {
subscribed := make(chan error)
var sub Subscription
-retry:
for {
s.lastTry = mclock.Now()
ctx, cancel := context.WithCancel(context.Background())
@@ -157,19 +156,19 @@ retry:
select {
case err := <-subscribed:
cancel()
- if err != nil {
- // Subscribing failed, wait before launching the next try.
- if s.backoffWait() {
- return nil
+ if err == nil {
+ if sub == nil {
+ panic("event: ResubscribeFunc returned nil subscription and no error")
}
- continue retry
+ return sub
}
- if sub == nil {
- panic("event: ResubscribeFunc returned nil subscription and no error")
+ // Subscribing failed, wait before launching the next try.
+ if s.backoffWait() {
+ return nil // unsubscribed during wait
}
- return sub
case <-s.unsub:
cancel()
+ <-subscribed // avoid leaking the s.fn goroutine.
return nil
}
}
diff --git a/event/subscription_test.go b/event/subscription_test.go
index 5b8a2c8ede..c48be3aa30 100644
--- a/event/subscription_test.go
+++ b/event/subscription_test.go
@@ -102,7 +102,7 @@ func TestResubscribe(t *testing.T) {
func TestResubscribeAbort(t *testing.T) {
t.Parallel()
- done := make(chan error)
+ done := make(chan error, 1)
sub := Resubscribe(0, func(ctx context.Context) (Subscription, error) {
select {
case <-ctx.Done():
diff --git a/extension/privacyExtension/state_set_utilities.go b/extension/privacyExtension/state_set_utilities.go
index 1d6143735b..5b1814d191 100644
--- a/extension/privacyExtension/state_set_utilities.go
+++ b/extension/privacyExtension/state_set_utilities.go
@@ -68,7 +68,7 @@ func setManagedParties(ptm private.PrivateTransactionManager, privateState *stat
return
}
- _, managedParties, _, _, err := ptm.Receive(ptmHash)
+ _, managedParties, _, _, _ := ptm.Receive(ptmHash)
newManagedParties := common.AppendSkipDuplicates(existingManagedParties, managedParties...)
privateState.SetManagedParties(address, newManagedParties)
}
diff --git a/fuzzbuzz.yaml b/fuzzbuzz.yaml
new file mode 100644
index 0000000000..2a4f0c296f
--- /dev/null
+++ b/fuzzbuzz.yaml
@@ -0,0 +1,44 @@
+# bmt keystore rlp trie whisperv6
+
+base: ubuntu:16.04
+targets:
+ - name: rlp
+ language: go
+ version: "1.13"
+ corpus: ./fuzzers/rlp/corpus
+ harness:
+ function: Fuzz
+ package: github.com/ethereum/go-ethereum/tests/fuzzers/rlp
+ checkout: github.com/ethereum/go-ethereum/
+ - name: keystore
+ language: go
+ version: "1.13"
+ corpus: ./fuzzers/keystore/corpus
+ harness:
+ function: Fuzz
+ package: github.com/ethereum/go-ethereum/tests/fuzzers/keystore
+ checkout: github.com/ethereum/go-ethereum/
+ - name: trie
+ language: go
+ version: "1.13"
+ corpus: ./fuzzers/trie/corpus
+ harness:
+ function: Fuzz
+ package: github.com/ethereum/go-ethereum/tests/fuzzers/trie
+ checkout: github.com/ethereum/go-ethereum/
+ - name: txfetcher
+ language: go
+ version: "1.13"
+ corpus: ./fuzzers/txfetcher/corpus
+ harness:
+ function: Fuzz
+ package: github.com/ethereum/go-ethereum/tests/fuzzers/txfetcher
+ checkout: github.com/ethereum/go-ethereum/
+ - name: whisperv6
+ language: go
+ version: "1.13"
+ corpus: ./fuzzers/whisperv6/corpus
+ harness:
+ function: Fuzz
+ package: github.com/ethereum/go-ethereum/tests/fuzzers/whisperv6
+ checkout: github.com/ethereum/go-ethereum/
diff --git a/go.mod b/go.mod
index 63828db25d..3adbae0adc 100644
--- a/go.mod
+++ b/go.mod
@@ -10,10 +10,12 @@ require (
github.com/Azure/go-autorest/autorest/adal v0.8.0 // indirect
github.com/BurntSushi/toml v0.3.1
github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6 // indirect
- github.com/VictoriaMetrics/fastcache v1.5.2
+ github.com/VictoriaMetrics/fastcache v1.5.3
github.com/aristanetworks/goarista v0.0.0-20170210015632-ea17b1a17847
+ github.com/aws/aws-sdk-go v1.25.48
github.com/btcsuite/btcd v0.0.0-20171128150713-2e60448ffcc6
github.com/cespare/cp v0.1.0
+ github.com/cespare/xxhash/v2 v2.1.1 // indirect
github.com/cloudflare/cloudflare-go v0.10.2-0.20190916151808-a80f83b9add9
github.com/coreos/etcd v3.3.20+incompatible
github.com/coreos/go-semver v0.3.0 // indirect
@@ -21,7 +23,9 @@ require (
github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f // indirect
github.com/davecgh/go-spew v1.1.1
github.com/deckarep/golang-set v0.0.0-20180603214616-504e848d77ea
+ github.com/dlclark/regexp2 v1.2.0 // indirect
github.com/docker/docker v1.4.2-0.20180625184442-8e610b2b55bf
+ github.com/dop251/goja v0.0.0-20200106141417-aaec0e7bde29
github.com/eapache/channels v1.1.0
github.com/eapache/queue v1.1.0 // indirect
github.com/edsrzf/mmap-go v0.0.0-20160512033002-935e0e8a636c
@@ -31,6 +35,7 @@ require (
github.com/fjl/memsize v0.0.0-20180418122429-ca190fb6ffbc
github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff
github.com/go-ole/go-ole v1.2.1 // indirect
+ github.com/go-sourcemap/sourcemap v2.1.2+incompatible // indirect
github.com/go-stack/stack v1.8.0
github.com/golang/mock v1.4.3
github.com/golang/protobuf v1.3.4
@@ -60,7 +65,6 @@ require (
github.com/peterh/liner v1.1.1-0.20190123174540-a2c9a5303de7
github.com/prometheus/tsdb v0.6.2-0.20190402121629-4f204dcbc150
github.com/rjeczalik/notify v0.9.1
- github.com/robertkrimen/otto v0.0.0-20170205013659-6a77b7cbc37d
github.com/rs/cors v0.0.0-20160617231935-a62a804a8a00
github.com/rs/xhandler v0.0.0-20160618193221-ed27b6fd6521 // indirect
github.com/status-im/keycard-go v0.0.0-20190316090335-8537d3370df4
@@ -78,6 +82,7 @@ require (
golang.org/x/sync v0.0.0-20190423024810-112230192c58
golang.org/x/sys v0.0.0-20191008105621-543471e840be
golang.org/x/text v0.3.2
+ golang.org/x/time v0.0.0-20190308202827-9d24e82272b4
google.golang.org/grpc v1.29.1
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127
gopkg.in/karalabe/cookiejar.v2 v2.0.0-20150724131613-8dcd6a7f4951
diff --git a/go.sum b/go.sum
index 33f5a3e503..8297552f41 100644
--- a/go.sum
+++ b/go.sum
@@ -27,8 +27,8 @@ github.com/OneOfOne/xxhash v1.2.5 h1:zl/OfRA6nftbBK9qTohYBJ5xvw6C/oNKizR7cZGl3cI
github.com/OneOfOne/xxhash v1.2.5/go.mod h1:eZbhyaAYD41SGSSsnmcpxVoRiQ/MPUTjUdIIOT9Um7Q=
github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6 h1:fLjPD/aNc3UIOA6tDi6QXUemppXK3P9BI7mr2hd6gx8=
github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg=
-github.com/VictoriaMetrics/fastcache v1.5.2 h1:Erd8iIuBAL9kke8JzM4+WxkKuFkHh3ktwLanJvDgR44=
-github.com/VictoriaMetrics/fastcache v1.5.2/go.mod h1:+jv9Ckb+za/P1ZRg/sulP5Ni1v49daAVERr0H3CuscE=
+github.com/VictoriaMetrics/fastcache v1.5.3 h1:2odJnXLbFZcoV9KYtQ+7TH1UOq3dn3AssMgieaezkR4=
+github.com/VictoriaMetrics/fastcache v1.5.3/go.mod h1:+jv9Ckb+za/P1ZRg/sulP5Ni1v49daAVERr0H3CuscE=
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156 h1:eMwmnE/GDgah4HI848JfFxHt+iPb26b4zyfspmqY0/8=
@@ -36,6 +36,8 @@ github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156/go.mod h1:Cb/ax
github.com/aristanetworks/goarista v0.0.0-20170210015632-ea17b1a17847 h1:rtI0fD4oG/8eVokGVPYJEW1F88p1ZNgXiEIs9thEE4A=
github.com/aristanetworks/goarista v0.0.0-20170210015632-ea17b1a17847/go.mod h1:D/tb0zPVXnP7fmsLZjtdUhSsumbK/ij54UXjjVgMGxQ=
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973 h1:xJ4a3vCFaGF/jqvzLMYoU8P317H5OQ+Via4RmuPwCS0=
+github.com/aws/aws-sdk-go v1.25.48 h1:J82DYDGZHOKHdhx6hD24Tm30c2C3GchYGfN0mf9iKUk=
+github.com/aws/aws-sdk-go v1.25.48/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
github.com/btcsuite/btcd v0.0.0-20171128150713-2e60448ffcc6 h1:Eey/GGQ/E5Xp1P2Lyx1qj007hLZfbi0+CoVeJruGCtI=
github.com/btcsuite/btcd v0.0.0-20171128150713-2e60448ffcc6/go.mod h1:Dmm/EzmjnCiweXmzRIAiUWCInVmPgjkzgv5k4tVyXiQ=
@@ -47,6 +49,8 @@ github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghf
github.com/cespare/xxhash/v2 v2.0.1-0.20190104013014-3767db7a7e18 h1:pl4eWIqvFe/Kg3zkn7NxevNzILnZYWDCG7qbA1CJik0=
github.com/cespare/xxhash/v2 v2.0.1-0.20190104013014-3767db7a7e18/go.mod h1:HD5P3vAIAh+Y2GAxg0PrPN1P8WkepXGpjbUPDHJqqKM=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
+github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY=
+github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/cloudflare/cloudflare-go v0.10.2-0.20190916151808-a80f83b9add9 h1:J82+/8rub3qSy0HxEnoYD8cs+HDlHWYrqYXe2Vqxluk=
github.com/cloudflare/cloudflare-go v0.10.2-0.20190916151808-a80f83b9add9/go.mod h1:1MxXX1Ux4x6mqPmjkUgTP1CdXIBXKX7T+Jk9Gxrmx+U=
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
@@ -67,12 +71,18 @@ github.com/deckarep/golang-set v0.0.0-20180603214616-504e848d77ea/go.mod h1:93vs
github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM=
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
+github.com/dlclark/regexp2 v1.2.0 h1:8sAhBGEM0dRWogWqWyQeIJnxjWO6oIjl8FKqREDsGfk=
+github.com/dlclark/regexp2 v1.2.0/go.mod h1:2pZnwuY/m+8K6iRw6wQdMtk+rH5tNGR1i55kozfMjCc=
github.com/docker/docker v1.4.2-0.20180625184442-8e610b2b55bf h1:sh8rkQZavChcmakYiSlqu2425CHyFXLZZnvm7PDpU8M=
github.com/docker/docker v1.4.2-0.20180625184442-8e610b2b55bf/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/eapache/channels v1.1.0 h1:F1taHcn7/F0i8DYqKXJnyhJcVpp2kgFcNePxXtnyu4k=
github.com/eapache/channels v1.1.0/go.mod h1:jMm2qB5Ubtg9zLd+inMZd2/NUvXgzmWXsDaLyQIGfH0=
github.com/eapache/queue v1.1.0 h1:YOEu7KNc61ntiQlcEeUIoDTJ2o8mQznoNvUhiigpIqc=
github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I=
+github.com/dop251/goja v0.0.0-20191203121440-007eef3bc40f h1:vtCDQseO/Sbu5IZSoc2uzZ7CkSoai7OtpcwGFK5FlyE=
+github.com/dop251/goja v0.0.0-20191203121440-007eef3bc40f/go.mod h1:Mw6PkjjMXWbTj+nnj4s3QPXq1jaT0s5pC0iFD4+BOAA=
+github.com/dop251/goja v0.0.0-20200106141417-aaec0e7bde29 h1:Ewd9K+mC725sITA12QQHRqWj78NU4t7EhlFVVgdlzJg=
+github.com/dop251/goja v0.0.0-20200106141417-aaec0e7bde29/go.mod h1:Mw6PkjjMXWbTj+nnj4s3QPXq1jaT0s5pC0iFD4+BOAA=
github.com/edsrzf/mmap-go v0.0.0-20160512033002-935e0e8a636c h1:JHHhtb9XWJrGNMcrVP6vyzO4dusgi/HnceHTgxSejUM=
github.com/edsrzf/mmap-go v0.0.0-20160512033002-935e0e8a636c/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M=
github.com/elastic/gosigar v0.8.1-0.20180330100440-37f05ff46ffa h1:XKAhUk/dtp+CV0VO6mhG2V7jA9vbcGcnYF/Ay9NjZrY=
@@ -95,6 +105,8 @@ github.com/go-logfmt/logfmt v0.3.0 h1:8HUsc87TaSWLKwrnumgC8/YconD2fJQsRJAsWaPg2i
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
github.com/go-ole/go-ole v1.2.1 h1:2lOsA72HgjxAuMlKpFiCbHTvu44PIVkZ5hqm3RSdI/E=
github.com/go-ole/go-ole v1.2.1/go.mod h1:7FAglXiTm7HKlQRDeOQ6ZNUHidzCWXuZWq/1dTyBNF8=
+github.com/go-sourcemap/sourcemap v2.1.2+incompatible h1:0b/xya7BKGhXuqFESKM4oIiRo9WOt2ebz7KxfreD6ug=
+github.com/go-sourcemap/sourcemap v2.1.2+incompatible/go.mod h1:F8jJfvm2KbVjc5NqelyYJmf/v5J0dwNLS2mL4sNA1Jg=
github.com/go-stack/stack v1.8.0 h1:5SgMzNM5HxrEjV0ww2lTmX6E2Izsfxas4+YHWRs3Lsk=
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
github.com/gogo/protobuf v1.1.1 h1:72R+M5VuhED/KujmZVcIquuo8mBgX4oVda//DQb3PXo=
@@ -147,6 +159,8 @@ github.com/jpmorganchase/quorum-security-plugin-sdk-go v0.0.0-20200714173835-22a
github.com/jpmorganchase/quorum-security-plugin-sdk-go v0.0.0-20200714173835-22a319bb78ce/go.mod h1:Zq2sOjX+LZrNoV+cyvS/4Xsy69v8HOFKHtCLkiXQ3Kk=
github.com/jpmorganchase/quorum/crypto/secp256k1 v0.0.0-20200804194033-c8f07379f487 h1:xEt7bIjWO384fz4pz9ZcS3CMG2hUgPwgLb180jeqLzs=
github.com/jpmorganchase/quorum/crypto/secp256k1 v0.0.0-20200804194033-c8f07379f487/go.mod h1:w+wA+9W4bxqH3Jg8upYxYo8MYlQL0UOXJfKVSr+45Ok=
+github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af h1:pmfjZENx5imkbgOkpRUYLnmbU7UEFbjtDA2hxJ1ichM=
+github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
github.com/julienschmidt/httprouter v1.1.1-0.20170430222011-975b5c4c7c21 h1:F/iKcka0K2LgnKy/fgSBf235AETtm1n1TvBzqu40LE0=
github.com/julienschmidt/httprouter v1.1.1-0.20170430222011-975b5c4c7c21/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
github.com/karalabe/usb v0.0.0-20190919080040-51dc0efba356 h1:I/yrLt2WilKxlQKCM52clh5rGzTKpVctGT1lH4Dc8Jw=
diff --git a/graphql/graphql.go b/graphql/graphql.go
index 85bd69e31e..659006d443 100644
--- a/graphql/graphql.go
+++ b/graphql/graphql.go
@@ -342,6 +342,33 @@ func (t *Transaction) PrivateInputData(ctx context.Context) (*hexutil.Bytes, err
// END QUORUM
+func (t *Transaction) R(ctx context.Context) (hexutil.Big, error) {
+ tx, err := t.resolve(ctx)
+ if err != nil || tx == nil {
+ return hexutil.Big{}, err
+ }
+ _, r, _ := tx.RawSignatureValues()
+ return hexutil.Big(*r), nil
+}
+
+func (t *Transaction) S(ctx context.Context) (hexutil.Big, error) {
+ tx, err := t.resolve(ctx)
+ if err != nil || tx == nil {
+ return hexutil.Big{}, err
+ }
+ _, _, s := tx.RawSignatureValues()
+ return hexutil.Big(*s), nil
+}
+
+func (t *Transaction) V(ctx context.Context) (hexutil.Big, error) {
+ tx, err := t.resolve(ctx)
+ if err != nil || tx == nil {
+ return hexutil.Big{}, err
+ }
+ v, _, _ := tx.RawSignatureValues()
+ return hexutil.Big(*v), nil
+}
+
type BlockType int
// Block represents an Ethereum block.
diff --git a/graphql/schema.go b/graphql/schema.go
index 4a86e63a44..669d951986 100644
--- a/graphql/schema.go
+++ b/graphql/schema.go
@@ -119,6 +119,9 @@ const schema string = `
isPrivate: Boolean
# PrivateInputData is the actual payload of Quorum private transaction
privateInputData: Bytes
+ r: BigInt!
+ s: BigInt!
+ v: BigInt!
}
# BlockFilterCriteria encapsulates log filter criteria for a filter applied
diff --git a/internal/build/download.go b/internal/build/download.go
index c506800295..0ed0b5e130 100644
--- a/internal/build/download.go
+++ b/internal/build/download.go
@@ -83,8 +83,10 @@ func (db *ChecksumDB) DownloadFile(url, dstPath string) error {
fmt.Printf("downloading from %s\n", url)
resp, err := http.Get(url)
- if err != nil || resp.StatusCode != http.StatusOK {
- return fmt.Errorf("download error: code %d, err %v", resp.StatusCode, err)
+ if err != nil {
+ return fmt.Errorf("download error: %v", err)
+ } else if resp.StatusCode != http.StatusOK {
+ return fmt.Errorf("download error: status %d", resp.StatusCode)
}
defer resp.Body.Close()
if err := os.MkdirAll(filepath.Dir(dstPath), 0755); err != nil {
diff --git a/internal/build/util.go b/internal/build/util.go
index e2ba055bd4..785abd3139 100644
--- a/internal/build/util.go
+++ b/internal/build/util.go
@@ -51,15 +51,6 @@ func MustRunCommand(cmd string, args ...string) {
MustRun(exec.Command(cmd, args...))
}
-// GOPATH returns the value that the GOPATH environment
-// variable should be set to.
-func GOPATH() string {
- if os.Getenv("GOPATH") == "" {
- log.Fatal("GOPATH is not set")
- }
- return os.Getenv("GOPATH")
-}
-
var warnedAboutGit bool
// RunGit runs a git subcommand and returns its output.
diff --git a/internal/cmdtest/test_cmd.go b/internal/cmdtest/test_cmd.go
index f8b1b43c01..d4ccc58329 100644
--- a/internal/cmdtest/test_cmd.go
+++ b/internal/cmdtest/test_cmd.go
@@ -127,12 +127,12 @@ func (tt *TestCmd) matchExactOutput(want []byte) error {
// Find the mismatch position.
for i := 0; i < n; i++ {
if want[i] != buf[i] {
- return fmt.Errorf("Output mismatch at ◊:\n---------------- (stdout text)\n%s◊%s\n---------------- (expected text)\n%s",
+ return fmt.Errorf("output mismatch at ◊:\n---------------- (stdout text)\n%s◊%s\n---------------- (expected text)\n%s",
buf[:i], buf[i:n], want)
}
}
if n < len(want) {
- return fmt.Errorf("Not enough output, got until ◊:\n---------------- (stdout text)\n%s\n---------------- (expected text)\n%s◊%s",
+ return fmt.Errorf("not enough output, got until ◊:\n---------------- (stdout text)\n%s\n---------------- (expected text)\n%s◊%s",
buf, want[:n], want[n:])
}
}
diff --git a/internal/debug/flags.go b/internal/debug/flags.go
index 46c8fe9f80..3c85749402 100644
--- a/internal/debug/flags.go
+++ b/internal/debug/flags.go
@@ -112,20 +112,9 @@ func init() {
// Setup initializes profiling and logging based on the CLI flags.
// It should be called as early as possible in the program.
-func Setup(ctx *cli.Context, logdir string) error {
+func Setup(ctx *cli.Context) error {
// logging
log.PrintOrigins(ctx.GlobalBool(debugFlag.Name))
- if logdir != "" {
- rfh, err := log.RotatingFileHandler(
- logdir,
- 262144,
- log.JSONFormatOrderedEx(false, true),
- )
- if err != nil {
- return err
- }
- glogger.SetHandler(log.MultiHandler(ostream, rfh))
- }
glogger.Verbosity(log.Lvl(ctx.GlobalInt(verbosityFlag.Name)))
glogger.Vmodule(ctx.GlobalString(vmoduleFlag.Name))
glogger.BacktraceAt(ctx.GlobalString(backtraceAtFlag.Name))
diff --git a/internal/ethapi/api.go b/internal/ethapi/api.go
index 780dbc58f3..f14dc5fa9d 100644
--- a/internal/ethapi/api.go
+++ b/internal/ethapi/api.go
@@ -585,7 +585,7 @@ func (s *PrivateAccountAPI) InitializeWallet(ctx context.Context, url string) (s
case *scwallet.Wallet:
return mnemonic, wallet.Initialize(seed)
default:
- return "", fmt.Errorf("Specified wallet does not support initialization")
+ return "", fmt.Errorf("specified wallet does not support initialization")
}
}
@@ -600,7 +600,7 @@ func (s *PrivateAccountAPI) Unpair(ctx context.Context, url string, pin string)
case *scwallet.Wallet:
return wallet.Unpair([]byte(pin))
default:
- return fmt.Errorf("Specified wallet does not support pairing")
+ return fmt.Errorf("specified wallet does not support pairing")
}
}
@@ -1261,7 +1261,9 @@ func (s *PublicBlockChainAPI) rpcMarshalBlock(b *types.Block, inclTx bool, fullT
if err != nil {
return nil, err
}
- fields["totalDifficulty"] = (*hexutil.Big)(s.b.GetTd(b.Hash()))
+ if inclTx {
+ fields["totalDifficulty"] = (*hexutil.Big)(s.b.GetTd(b.Hash()))
+ }
return fields, err
}
@@ -1644,7 +1646,7 @@ func (args *SendTxArgs) setDefaults(ctx context.Context, b Backend) error {
args.Nonce = (*hexutil.Uint64)(&nonce)
}
if args.Data != nil && args.Input != nil && !bytes.Equal(*args.Data, *args.Input) {
- return errors.New(`Both "data" and "input" are set and not equal. Please use "input" to pass transaction call data.`)
+ return errors.New(`both "data" and "input" are set and not equal. Please use "input" to pass transaction call data`)
}
if args.To == nil {
// Contract creation
@@ -2195,7 +2197,7 @@ func (s *PublicTransactionPoolAPI) Resend(ctx context.Context, sendArgs SendTxAr
}
}
- return common.Hash{}, fmt.Errorf("Transaction %#x not found", matchTx.Hash())
+ return common.Hash{}, fmt.Errorf("transaction %#x not found", matchTx.Hash())
}
// PublicDebugAPI is the collection of Ethereum APIs exposed over the public
diff --git a/internal/ethapi/api_test.go b/internal/ethapi/api_test.go
index 9dcc84f8c0..4eb2006766 100644
--- a/internal/ethapi/api_test.go
+++ b/internal/ethapi/api_test.go
@@ -445,27 +445,6 @@ func TestHandlePrivateTransaction_whenRawStandardPrivateMessageCall(t *testing.T
}
-// Copy and set private
-func copyTransaction(tx *types.Transaction) *types.Transaction {
- var privateTx *types.Transaction
- if tx.To() == nil {
- privateTx = types.NewContractCreation(tx.Nonce(),
- tx.Value(),
- tx.Gas(),
- tx.GasPrice(),
- tx.Data())
- } else {
- privateTx = types.NewTransaction(tx.Nonce(),
- *tx.To(),
- tx.Value(),
- tx.Gas(),
- tx.GasPrice(),
- tx.Data())
- }
- privateTx.SetPrivate()
- return privateTx
-}
-
type StubBackend struct {
getEVMCalled bool
mockAccountExtraDataStateGetter *vm.MockAccountExtraDataStateGetter
@@ -649,6 +628,10 @@ func (sb *StubBackend) ChainConfig() *params.ChainConfig {
return params.QuorumTestChainConfig
}
+func (sb *StubBackend) SubscribePendingLogsEvent(ch chan<- []*types.Log) event.Subscription {
+ panic("implement me")
+}
+
type StubMinimalApiState struct {
}
diff --git a/internal/ethapi/backend.go b/internal/ethapi/backend.go
index 97d32ee278..f26d953067 100644
--- a/internal/ethapi/backend.go
+++ b/internal/ethapi/backend.go
@@ -45,7 +45,6 @@ type Backend interface {
ProtocolVersion() int
SuggestPrice(ctx context.Context) (*big.Int, error)
ChainDb() ethdb.Database
- EventMux() *event.TypeMux
AccountManager() *accounts.Manager
ExtRPCEnabled() bool
CallTimeOut() time.Duration
@@ -83,6 +82,7 @@ type Backend interface {
GetLogs(ctx context.Context, blockHash common.Hash) ([][]*types.Log, error)
ServiceFilter(ctx context.Context, session *bloombits.MatcherSession)
SubscribeLogsEvent(ch chan<- []*types.Log) event.Subscription
+ SubscribePendingLogsEvent(ch chan<- []*types.Log) event.Subscription
SubscribeRemovedLogsEvent(ch chan<- core.RemovedLogsEvent) event.Subscription
ChainConfig() *params.ChainConfig
diff --git a/internal/jsre/completion.go b/internal/jsre/completion.go
index 7f484bbbb8..2c105184cb 100644
--- a/internal/jsre/completion.go
+++ b/internal/jsre/completion.go
@@ -20,35 +20,43 @@ import (
"sort"
"strings"
- "github.com/robertkrimen/otto"
+ "github.com/dop251/goja"
)
// CompleteKeywords returns potential continuations for the given line. Since line is
// evaluated, callers need to make sure that evaluating line does not have side effects.
func (jsre *JSRE) CompleteKeywords(line string) []string {
var results []string
- jsre.Do(func(vm *otto.Otto) {
+ jsre.Do(func(vm *goja.Runtime) {
results = getCompletions(vm, line)
})
return results
}
-func getCompletions(vm *otto.Otto, line string) (results []string) {
+func getCompletions(vm *goja.Runtime, line string) (results []string) {
parts := strings.Split(line, ".")
- objRef := "this"
- prefix := line
- if len(parts) > 1 {
- objRef = strings.Join(parts[0:len(parts)-1], ".")
- prefix = parts[len(parts)-1]
+ if len(parts) == 0 {
+ return nil
}
- obj, _ := vm.Object(objRef)
- if obj == nil {
- return nil
+ // Find the right-most fully named object in the line. e.g. if line = "x.y.z"
+ // and "x.y" is an object, obj will reference "x.y".
+ obj := vm.GlobalObject()
+ for i := 0; i < len(parts)-1; i++ {
+ v := obj.Get(parts[i])
+ if v == nil {
+ return nil // No object was found
+ }
+ obj = v.ToObject(vm)
}
+
+ // Go over the keys of the object and retain the keys matching prefix.
+ // Example: if line = "x.y.z" and "x.y" exists and has keys "zebu", "zebra"
+ // and "platypus", then "x.y.zebu" and "x.y.zebra" will be added to results.
+ prefix := parts[len(parts)-1]
iterOwnAndConstructorKeys(vm, obj, func(k string) {
if strings.HasPrefix(k, prefix) {
- if objRef == "this" {
+ if len(parts) == 1 {
results = append(results, k)
} else {
results = append(results, strings.Join(parts[:len(parts)-1], ".")+"."+k)
@@ -59,9 +67,9 @@ func getCompletions(vm *otto.Otto, line string) (results []string) {
// Append opening parenthesis (for functions) or dot (for objects)
// if the line itself is the only completion.
if len(results) == 1 && results[0] == line {
- obj, _ := vm.Object(line)
+ obj := obj.Get(parts[len(parts)-1])
if obj != nil {
- if obj.Class() == "Function" {
+ if _, isfunc := goja.AssertFunction(obj); isfunc {
results[0] += "("
} else {
results[0] += "."
diff --git a/internal/jsre/completion_test.go b/internal/jsre/completion_test.go
index ccbd73dccc..2d05547d12 100644
--- a/internal/jsre/completion_test.go
+++ b/internal/jsre/completion_test.go
@@ -39,6 +39,10 @@ func TestCompleteKeywords(t *testing.T) {
input string
want []string
}{
+ {
+ input: "St",
+ want: []string{"String"},
+ },
{
input: "x",
want: []string{"x."},
diff --git a/internal/jsre/deps/bindata.go b/internal/jsre/deps/bindata.go
index 7454c7cfcb..a6545b7140 100644
--- a/internal/jsre/deps/bindata.go
+++ b/internal/jsre/deps/bindata.go
@@ -1,8 +1,7 @@
-// Code generated by go-bindata. DO NOT EDIT.
+// Package deps Code generated by go-bindata. (@generated) DO NOT EDIT.
// sources:
// bignumber.js
// web3.js
-
package deps
import (
@@ -20,7 +19,7 @@ import (
func bindataRead(data []byte, name string) ([]byte, error) {
gz, err := gzip.NewReader(bytes.NewBuffer(data))
if err != nil {
- return nil, fmt.Errorf("Read %q: %v", name, err)
+ return nil, fmt.Errorf("read %q: %v", name, err)
}
var buf bytes.Buffer
@@ -28,7 +27,7 @@ func bindataRead(data []byte, name string) ([]byte, error) {
clErr := gz.Close()
if err != nil {
- return nil, fmt.Errorf("Read %q: %v", name, err)
+ return nil, fmt.Errorf("read %q: %v", name, err)
}
if clErr != nil {
return nil, err
@@ -49,21 +48,32 @@ type bindataFileInfo struct {
modTime time.Time
}
+// Name return file name
func (fi bindataFileInfo) Name() string {
return fi.name
}
+
+// Size return file size
func (fi bindataFileInfo) Size() int64 {
return fi.size
}
+
+// Mode return file mode
func (fi bindataFileInfo) Mode() os.FileMode {
return fi.mode
}
+
+// ModTime return file modify time
func (fi bindataFileInfo) ModTime() time.Time {
return fi.modTime
}
+
+// IsDir return file whether a directory
func (fi bindataFileInfo) IsDir() bool {
- return false
+ return fi.mode&os.ModeDir != 0
}
+
+// Sys return file is sys mode
func (fi bindataFileInfo) Sys() interface{} {
return nil
}
@@ -161,8 +171,7 @@ func AssetNames() []string {
// _bindata is a table, holding each asset generator, mapped to its name.
var _bindata = map[string]func() (*asset, error){
"bignumber.js": bignumberJs,
-
- "web3.js": web3Js,
+ "web3.js": web3Js,
}
// AssetDir returns the file names below a certain
@@ -228,7 +237,11 @@ func RestoreAsset(dir, name string) error {
if err != nil {
return err
}
- return os.Chtimes(_filePath(dir, name), info.ModTime(), info.ModTime())
+ err = os.Chtimes(_filePath(dir, name), info.ModTime(), info.ModTime())
+ if err != nil {
+ return err
+ }
+ return nil
}
// RestoreAssets restores an asset under the given directory recursively
diff --git a/internal/jsre/jsre.go b/internal/jsre/jsre.go
index 1b3528a036..bc8869b254 100644
--- a/internal/jsre/jsre.go
+++ b/internal/jsre/jsre.go
@@ -26,30 +26,30 @@ import (
"math/rand"
"time"
+ "github.com/dop251/goja"
"github.com/ethereum/go-ethereum/common"
- "github.com/ethereum/go-ethereum/internal/jsre/deps"
- "github.com/robertkrimen/otto"
)
-var (
- BignumberJs = deps.MustAsset("bignumber.js")
- Web3Js = deps.MustAsset("web3.js")
-)
-
-/*
-JSRE is a generic JS runtime environment embedding the otto JS interpreter.
-It provides some helper functions to
-- load code from files
-- run code snippets
-- require libraries
-- bind native go objects
-*/
+// JSRE is a JS runtime environment embedding the goja interpreter.
+// It provides helper functions to load code from files, run code snippets
+// and bind native go objects to JS.
+//
+// The runtime runs all code on a dedicated event loop and does not expose the underlying
+// goja runtime directly. To use the runtime, call JSRE.Do. When binding a Go function,
+// use the Call type to gain access to the runtime.
type JSRE struct {
assetPath string
output io.Writer
evalQueue chan *evalReq
stopEventLoop chan bool
closed chan struct{}
+ vm *goja.Runtime
+}
+
+// Call is the argument type of Go functions which are callable from JS.
+type Call struct {
+ goja.FunctionCall
+ VM *goja.Runtime
}
// jsTimer is a single timer instance with a callback function
@@ -57,12 +57,12 @@ type jsTimer struct {
timer *time.Timer
duration time.Duration
interval bool
- call otto.FunctionCall
+ call goja.FunctionCall
}
// evalReq is a serialized vm execution request processed by runEventLoop.
type evalReq struct {
- fn func(vm *otto.Otto)
+ fn func(vm *goja.Runtime)
done chan bool
}
@@ -74,9 +74,10 @@ func New(assetPath string, output io.Writer) *JSRE {
closed: make(chan struct{}),
evalQueue: make(chan *evalReq),
stopEventLoop: make(chan bool),
+ vm: goja.New(),
}
go re.runEventLoop()
- re.Set("loadScript", re.loadScript)
+ re.Set("loadScript", MakeCallback(re.vm, re.loadScript))
re.Set("inspect", re.prettyPrintJS)
return re
}
@@ -99,21 +100,20 @@ func randomSource() *rand.Rand {
// serialized way and calls timer callback functions at the appropriate time.
// Exported functions always access the vm through the event queue. You can
-// call the functions of the otto vm directly to circumvent the queue. These
+// call the functions of the goja vm directly to circumvent the queue. These
// functions should be used if and only if running a routine that was already
// called from JS through an RPC call.
func (re *JSRE) runEventLoop() {
defer close(re.closed)
- vm := otto.New()
r := randomSource()
- vm.SetRandomSource(r.Float64)
+ re.vm.SetRandSource(r.Float64)
registry := map[*jsTimer]*jsTimer{}
ready := make(chan *jsTimer)
- newTimer := func(call otto.FunctionCall, interval bool) (*jsTimer, otto.Value) {
- delay, _ := call.Argument(1).ToInteger()
+ newTimer := func(call goja.FunctionCall, interval bool) (*jsTimer, goja.Value) {
+ delay := call.Argument(1).ToInteger()
if 0 >= delay {
delay = 1
}
@@ -128,47 +128,43 @@ func (re *JSRE) runEventLoop() {
ready <- timer
})
- value, err := call.Otto.ToValue(timer)
- if err != nil {
- panic(err)
- }
- return timer, value
+ return timer, re.vm.ToValue(timer)
}
- setTimeout := func(call otto.FunctionCall) otto.Value {
+ setTimeout := func(call goja.FunctionCall) goja.Value {
_, value := newTimer(call, false)
return value
}
- setInterval := func(call otto.FunctionCall) otto.Value {
+ setInterval := func(call goja.FunctionCall) goja.Value {
_, value := newTimer(call, true)
return value
}
- clearTimeout := func(call otto.FunctionCall) otto.Value {
- timer, _ := call.Argument(0).Export()
+ clearTimeout := func(call goja.FunctionCall) goja.Value {
+ timer := call.Argument(0).Export()
if timer, ok := timer.(*jsTimer); ok {
timer.timer.Stop()
delete(registry, timer)
}
- return otto.UndefinedValue()
+ return goja.Undefined()
}
- vm.Set("_setTimeout", setTimeout)
- vm.Set("_setInterval", setInterval)
- vm.Run(`var setTimeout = function(args) {
+ re.vm.Set("_setTimeout", setTimeout)
+ re.vm.Set("_setInterval", setInterval)
+ re.vm.RunString(`var setTimeout = function(args) {
if (arguments.length < 1) {
throw TypeError("Failed to execute 'setTimeout': 1 argument required, but only 0 present.");
}
return _setTimeout.apply(this, arguments);
}`)
- vm.Run(`var setInterval = function(args) {
+ re.vm.RunString(`var setInterval = function(args) {
if (arguments.length < 1) {
throw TypeError("Failed to execute 'setInterval': 1 argument required, but only 0 present.");
}
return _setInterval.apply(this, arguments);
}`)
- vm.Set("clearTimeout", clearTimeout)
- vm.Set("clearInterval", clearTimeout)
+ re.vm.Set("clearTimeout", clearTimeout)
+ re.vm.Set("clearInterval", clearTimeout)
var waitForCallbacks bool
@@ -178,8 +174,8 @@ loop:
case timer := <-ready:
// execute callback, remove/reschedule the timer
var arguments []interface{}
- if len(timer.call.ArgumentList) > 2 {
- tmp := timer.call.ArgumentList[2:]
+ if len(timer.call.Arguments) > 2 {
+ tmp := timer.call.Arguments[2:]
arguments = make([]interface{}, 2+len(tmp))
for i, value := range tmp {
arguments[i+2] = value
@@ -187,11 +183,12 @@ loop:
} else {
arguments = make([]interface{}, 1)
}
- arguments[0] = timer.call.ArgumentList[0]
- _, err := vm.Call(`Function.call.call`, nil, arguments...)
- if err != nil {
- fmt.Println("js error:", err, arguments)
+ arguments[0] = timer.call.Arguments[0]
+ call, isFunc := goja.AssertFunction(timer.call.Arguments[0])
+ if !isFunc {
+ panic(re.vm.ToValue("js error: timer/timeout callback is not a function"))
}
+ call(goja.Null(), timer.call.Arguments...)
_, inreg := registry[timer] // when clearInterval is called from within the callback don't reset it
if timer.interval && inreg {
@@ -204,7 +201,7 @@ loop:
}
case req := <-re.evalQueue:
// run the code, send the result back
- req.fn(vm)
+ req.fn(re.vm)
close(req.done)
if waitForCallbacks && (len(registry) == 0) {
break loop
@@ -223,7 +220,7 @@ loop:
}
// Do executes the given function on the JS event loop.
-func (re *JSRE) Do(fn func(*otto.Otto)) {
+func (re *JSRE) Do(fn func(*goja.Runtime)) {
done := make(chan bool)
req := &evalReq{fn, done}
re.evalQueue <- req
@@ -246,70 +243,36 @@ func (re *JSRE) Exec(file string) error {
if err != nil {
return err
}
- var script *otto.Script
- re.Do(func(vm *otto.Otto) {
- script, err = vm.Compile(file, code)
- if err != nil {
- return
- }
- _, err = vm.Run(script)
- })
- return err
-}
-
-// Bind assigns value v to a variable in the JS environment
-// This method is deprecated, use Set.
-func (re *JSRE) Bind(name string, v interface{}) error {
- return re.Set(name, v)
+ return re.Compile(file, string(code))
}
// Run runs a piece of JS code.
-func (re *JSRE) Run(code string) (v otto.Value, err error) {
- re.Do(func(vm *otto.Otto) { v, err = vm.Run(code) })
- return v, err
-}
-
-// Get returns the value of a variable in the JS environment.
-func (re *JSRE) Get(ns string) (v otto.Value, err error) {
- re.Do(func(vm *otto.Otto) { v, err = vm.Get(ns) })
+func (re *JSRE) Run(code string) (v goja.Value, err error) {
+ re.Do(func(vm *goja.Runtime) { v, err = vm.RunString(code) })
return v, err
}
// Set assigns value v to a variable in the JS environment.
func (re *JSRE) Set(ns string, v interface{}) (err error) {
- re.Do(func(vm *otto.Otto) { err = vm.Set(ns, v) })
+ re.Do(func(vm *goja.Runtime) { vm.Set(ns, v) })
return err
}
-// loadScript executes a JS script from inside the currently executing JS code.
-func (re *JSRE) loadScript(call otto.FunctionCall) otto.Value {
- file, err := call.Argument(0).ToString()
- if err != nil {
- // TODO: throw exception
- return otto.FalseValue()
- }
- file = common.AbsolutePath(re.assetPath, file)
- source, err := ioutil.ReadFile(file)
- if err != nil {
- // TODO: throw exception
- return otto.FalseValue()
- }
- if _, err := compileAndRun(call.Otto, file, source); err != nil {
- // TODO: throw exception
- fmt.Println("err:", err)
- return otto.FalseValue()
- }
- // TODO: return evaluation result
- return otto.TrueValue()
+// MakeCallback turns the given function into a function that's callable by JS.
+func MakeCallback(vm *goja.Runtime, fn func(Call) (goja.Value, error)) goja.Value {
+ return vm.ToValue(func(call goja.FunctionCall) goja.Value {
+ result, err := fn(Call{call, vm})
+ if err != nil {
+ panic(vm.NewGoError(err))
+ }
+ return result
+ })
}
-// Evaluate executes code and pretty prints the result to the specified output
-// stream.
-func (re *JSRE) Evaluate(code string, w io.Writer) error {
- var fail error
-
- re.Do(func(vm *otto.Otto) {
- val, err := vm.Run(code)
+// Evaluate executes code and pretty prints the result to the specified output stream.
+func (re *JSRE) Evaluate(code string, w io.Writer) {
+ re.Do(func(vm *goja.Runtime) {
+ val, err := vm.RunString(code)
if err != nil {
prettyError(vm, err, w)
} else {
@@ -317,19 +280,33 @@ func (re *JSRE) Evaluate(code string, w io.Writer) error {
}
fmt.Fprintln(w)
})
- return fail
}
// Compile compiles and then runs a piece of JS code.
-func (re *JSRE) Compile(filename string, src interface{}) (err error) {
- re.Do(func(vm *otto.Otto) { _, err = compileAndRun(vm, filename, src) })
+func (re *JSRE) Compile(filename string, src string) (err error) {
+ re.Do(func(vm *goja.Runtime) { _, err = compileAndRun(vm, filename, src) })
return err
}
-func compileAndRun(vm *otto.Otto, filename string, src interface{}) (otto.Value, error) {
- script, err := vm.Compile(filename, src)
+// loadScript loads and executes a JS file.
+func (re *JSRE) loadScript(call Call) (goja.Value, error) {
+ file := call.Argument(0).ToString().String()
+ file = common.AbsolutePath(re.assetPath, file)
+ source, err := ioutil.ReadFile(file)
+ if err != nil {
+ return nil, fmt.Errorf("Could not read file %s: %v", file, err)
+ }
+ value, err := compileAndRun(re.vm, file, string(source))
+ if err != nil {
+ return nil, fmt.Errorf("Error while compiling or running script: %v", err)
+ }
+ return value, nil
+}
+
+func compileAndRun(vm *goja.Runtime, filename string, src string) (goja.Value, error) {
+ script, err := goja.Compile(filename, src, false)
if err != nil {
- return otto.Value{}, err
+ return goja.Null(), err
}
- return vm.Run(script)
+ return vm.RunProgram(script)
}
diff --git a/internal/jsre/jsre_test.go b/internal/jsre/jsre_test.go
index bcb6e0dd23..bc38f7a44a 100644
--- a/internal/jsre/jsre_test.go
+++ b/internal/jsre/jsre_test.go
@@ -20,25 +20,24 @@ import (
"io/ioutil"
"os"
"path"
+ "reflect"
"testing"
"time"
- "github.com/robertkrimen/otto"
+ "github.com/dop251/goja"
)
-type testNativeObjectBinding struct{}
+type testNativeObjectBinding struct {
+ vm *goja.Runtime
+}
type msg struct {
Msg string
}
-func (no *testNativeObjectBinding) TestMethod(call otto.FunctionCall) otto.Value {
- m, err := call.Argument(0).ToString()
- if err != nil {
- return otto.UndefinedValue()
- }
- v, _ := call.Otto.ToValue(&msg{m})
- return v
+func (no *testNativeObjectBinding) TestMethod(call goja.FunctionCall) goja.Value {
+ m := call.Argument(0).ToString().String()
+ return no.vm.ToValue(&msg{m})
}
func newWithTestJS(t *testing.T, testjs string) (*JSRE, string) {
@@ -51,7 +50,8 @@ func newWithTestJS(t *testing.T, testjs string) (*JSRE, string) {
t.Fatal("cannot create test.js:", err)
}
}
- return New(dir, os.Stdout), dir
+ jsre := New(dir, os.Stdout)
+ return jsre, dir
}
func TestExec(t *testing.T) {
@@ -66,11 +66,11 @@ func TestExec(t *testing.T) {
if err != nil {
t.Errorf("expected no error, got %v", err)
}
- if !val.IsString() {
+ if val.ExportType().Kind() != reflect.String {
t.Errorf("expected string value, got %v", val)
}
exp := "testMsg"
- got, _ := val.ToString()
+ got := val.ToString().String()
if exp != got {
t.Errorf("expected '%v', got '%v'", exp, got)
}
@@ -90,11 +90,11 @@ func TestNatto(t *testing.T) {
if err != nil {
t.Errorf("expected no error, got %v", err)
}
- if !val.IsString() {
+ if val.ExportType().Kind() != reflect.String {
t.Errorf("expected string value, got %v", val)
}
exp := "testMsg"
- got, _ := val.ToString()
+ got := val.ToString().String()
if exp != got {
t.Errorf("expected '%v', got '%v'", exp, got)
}
@@ -105,7 +105,7 @@ func TestBind(t *testing.T) {
jsre := New("", os.Stdout)
defer jsre.Stop(false)
- jsre.Bind("no", &testNativeObjectBinding{})
+ jsre.Set("no", &testNativeObjectBinding{vm: jsre.vm})
_, err := jsre.Run(`no.TestMethod("testMsg")`)
if err != nil {
@@ -125,11 +125,11 @@ func TestLoadScript(t *testing.T) {
if err != nil {
t.Errorf("expected no error, got %v", err)
}
- if !val.IsString() {
+ if val.ExportType().Kind() != reflect.String {
t.Errorf("expected string value, got %v", val)
}
exp := "testMsg"
- got, _ := val.ToString()
+ got := val.ToString().String()
if exp != got {
t.Errorf("expected '%v', got '%v'", exp, got)
}
diff --git a/internal/jsre/pretty.go b/internal/jsre/pretty.go
index 16fa91b67d..4171e00906 100644
--- a/internal/jsre/pretty.go
+++ b/internal/jsre/pretty.go
@@ -19,12 +19,13 @@ package jsre
import (
"fmt"
"io"
+ "reflect"
"sort"
"strconv"
"strings"
+ "github.com/dop251/goja"
"github.com/fatih/color"
- "github.com/robertkrimen/otto"
)
const (
@@ -52,29 +53,29 @@ var boringKeys = map[string]bool{
}
// prettyPrint writes value to standard output.
-func prettyPrint(vm *otto.Otto, value otto.Value, w io.Writer) {
+func prettyPrint(vm *goja.Runtime, value goja.Value, w io.Writer) {
ppctx{vm: vm, w: w}.printValue(value, 0, false)
}
// prettyError writes err to standard output.
-func prettyError(vm *otto.Otto, err error, w io.Writer) {
+func prettyError(vm *goja.Runtime, err error, w io.Writer) {
failure := err.Error()
- if ottoErr, ok := err.(*otto.Error); ok {
- failure = ottoErr.String()
+ if gojaErr, ok := err.(*goja.Exception); ok {
+ failure = gojaErr.String()
}
fmt.Fprint(w, ErrorColor("%s", failure))
}
-func (re *JSRE) prettyPrintJS(call otto.FunctionCall) otto.Value {
- for _, v := range call.ArgumentList {
- prettyPrint(call.Otto, v, re.output)
+func (re *JSRE) prettyPrintJS(call goja.FunctionCall) goja.Value {
+ for _, v := range call.Arguments {
+ prettyPrint(re.vm, v, re.output)
fmt.Fprintln(re.output)
}
- return otto.UndefinedValue()
+ return goja.Undefined()
}
type ppctx struct {
- vm *otto.Otto
+ vm *goja.Runtime
w io.Writer
}
@@ -82,35 +83,47 @@ func (ctx ppctx) indent(level int) string {
return strings.Repeat(indentString, level)
}
-func (ctx ppctx) printValue(v otto.Value, level int, inArray bool) {
+func (ctx ppctx) printValue(v goja.Value, level int, inArray bool) {
+ if goja.IsNull(v) || goja.IsUndefined(v) {
+ fmt.Fprint(ctx.w, SpecialColor(v.String()))
+ return
+ }
+ kind := v.ExportType().Kind()
switch {
- case v.IsObject():
- ctx.printObject(v.Object(), level, inArray)
- case v.IsNull():
- fmt.Fprint(ctx.w, SpecialColor("null"))
- case v.IsUndefined():
- fmt.Fprint(ctx.w, SpecialColor("undefined"))
- case v.IsString():
- s, _ := v.ToString()
- fmt.Fprint(ctx.w, StringColor("%q", s))
- case v.IsBoolean():
- b, _ := v.ToBoolean()
- fmt.Fprint(ctx.w, SpecialColor("%t", b))
- case v.IsNaN():
- fmt.Fprint(ctx.w, NumberColor("NaN"))
- case v.IsNumber():
- s, _ := v.ToString()
- fmt.Fprint(ctx.w, NumberColor("%s", s))
+ case kind == reflect.Bool:
+ fmt.Fprint(ctx.w, SpecialColor("%t", v.ToBoolean()))
+ case kind == reflect.String:
+ fmt.Fprint(ctx.w, StringColor("%q", v.String()))
+ case kind >= reflect.Int && kind <= reflect.Complex128:
+ fmt.Fprint(ctx.w, NumberColor("%s", v.String()))
default:
- fmt.Fprint(ctx.w, "")
+ if obj, ok := v.(*goja.Object); ok {
+ ctx.printObject(obj, level, inArray)
+ } else {
+ fmt.Fprintf(ctx.w, "", v)
+ }
}
}
-func (ctx ppctx) printObject(obj *otto.Object, level int, inArray bool) {
- switch obj.Class() {
+// SafeGet attempt to get the value associated to `key`, and
+// catches the panic that goja creates if an error occurs in
+// key.
+func SafeGet(obj *goja.Object, key string) (ret goja.Value) {
+ defer func() {
+ if r := recover(); r != nil {
+ ret = goja.Undefined()
+ }
+ }()
+ ret = obj.Get(key)
+
+ return ret
+}
+
+func (ctx ppctx) printObject(obj *goja.Object, level int, inArray bool) {
+ switch obj.ClassName() {
case "Array", "GoArray":
- lv, _ := obj.Get("length")
- len, _ := lv.ToInteger()
+ lv := obj.Get("length")
+ len := lv.ToInteger()
if len == 0 {
fmt.Fprintf(ctx.w, "[]")
return
@@ -121,8 +134,8 @@ func (ctx ppctx) printObject(obj *otto.Object, level int, inArray bool) {
}
fmt.Fprint(ctx.w, "[")
for i := int64(0); i < len; i++ {
- el, err := obj.Get(strconv.FormatInt(i, 10))
- if err == nil {
+ el := obj.Get(strconv.FormatInt(i, 10))
+ if el != nil {
ctx.printValue(el, level+1, true)
}
if i < len-1 {
@@ -149,7 +162,7 @@ func (ctx ppctx) printObject(obj *otto.Object, level int, inArray bool) {
}
fmt.Fprintln(ctx.w, "{")
for i, k := range keys {
- v, _ := obj.Get(k)
+ v := SafeGet(obj, k)
fmt.Fprintf(ctx.w, "%s%s: ", ctx.indent(level+1), k)
ctx.printValue(v, level+1, false)
if i < len(keys)-1 {
@@ -163,29 +176,25 @@ func (ctx ppctx) printObject(obj *otto.Object, level int, inArray bool) {
fmt.Fprintf(ctx.w, "%s}", ctx.indent(level))
case "Function":
- // Use toString() to display the argument list if possible.
- if robj, err := obj.Call("toString"); err != nil {
- fmt.Fprint(ctx.w, FunctionColor("function()"))
- } else {
- desc := strings.Trim(strings.Split(robj.String(), "{")[0], " \t\n")
- desc = strings.Replace(desc, " (", "(", 1)
- fmt.Fprint(ctx.w, FunctionColor("%s", desc))
- }
+ robj := obj.ToString()
+ desc := strings.Trim(strings.Split(robj.String(), "{")[0], " \t\n")
+ desc = strings.Replace(desc, " (", "(", 1)
+ fmt.Fprint(ctx.w, FunctionColor("%s", desc))
case "RegExp":
fmt.Fprint(ctx.w, StringColor("%s", toString(obj)))
default:
- if v, _ := obj.Get("toString"); v.IsFunction() && level <= maxPrettyPrintLevel {
- s, _ := obj.Call("toString")
- fmt.Fprintf(ctx.w, "<%s %s>", obj.Class(), s.String())
+ if level <= maxPrettyPrintLevel {
+ s := obj.ToString().String()
+ fmt.Fprintf(ctx.w, "<%s %s>", obj.ClassName(), s)
} else {
- fmt.Fprintf(ctx.w, "<%s>", obj.Class())
+ fmt.Fprintf(ctx.w, "<%s>", obj.ClassName())
}
}
}
-func (ctx ppctx) fields(obj *otto.Object) []string {
+func (ctx ppctx) fields(obj *goja.Object) []string {
var (
vals, methods []string
seen = make(map[string]bool)
@@ -195,11 +204,22 @@ func (ctx ppctx) fields(obj *otto.Object) []string {
return
}
seen[k] = true
- if v, _ := obj.Get(k); v.IsFunction() {
- methods = append(methods, k)
- } else {
+
+ key := SafeGet(obj, k)
+ if key == nil {
+ // The value corresponding to that key could not be found
+ // (typically because it is backed by an RPC call that is
+ // not supported by this instance. Add it to the list of
+ // values so that it appears as `undefined` to the user.
vals = append(vals, k)
+ } else {
+ if _, callable := goja.AssertFunction(key); callable {
+ methods = append(methods, k)
+ } else {
+ vals = append(vals, k)
+ }
}
+
}
iterOwnAndConstructorKeys(ctx.vm, obj, add)
sort.Strings(vals)
@@ -207,13 +227,13 @@ func (ctx ppctx) fields(obj *otto.Object) []string {
return append(vals, methods...)
}
-func iterOwnAndConstructorKeys(vm *otto.Otto, obj *otto.Object, f func(string)) {
+func iterOwnAndConstructorKeys(vm *goja.Runtime, obj *goja.Object, f func(string)) {
seen := make(map[string]bool)
iterOwnKeys(vm, obj, func(prop string) {
seen[prop] = true
f(prop)
})
- if cp := constructorPrototype(obj); cp != nil {
+ if cp := constructorPrototype(vm, obj); cp != nil {
iterOwnKeys(vm, cp, func(prop string) {
if !seen[prop] {
f(prop)
@@ -222,10 +242,17 @@ func iterOwnAndConstructorKeys(vm *otto.Otto, obj *otto.Object, f func(string))
}
}
-func iterOwnKeys(vm *otto.Otto, obj *otto.Object, f func(string)) {
- Object, _ := vm.Object("Object")
- rv, _ := Object.Call("getOwnPropertyNames", obj.Value())
- gv, _ := rv.Export()
+func iterOwnKeys(vm *goja.Runtime, obj *goja.Object, f func(string)) {
+ Object := vm.Get("Object").ToObject(vm)
+ getOwnPropertyNames, isFunc := goja.AssertFunction(Object.Get("getOwnPropertyNames"))
+ if !isFunc {
+ panic(vm.ToValue("Object.getOwnPropertyNames isn't a function"))
+ }
+ rv, err := getOwnPropertyNames(goja.Null(), obj)
+ if err != nil {
+ panic(vm.ToValue(fmt.Sprintf("Error getting object properties: %v", err)))
+ }
+ gv := rv.Export()
switch gv := gv.(type) {
case []interface{}:
for _, v := range gv {
@@ -240,32 +267,35 @@ func iterOwnKeys(vm *otto.Otto, obj *otto.Object, f func(string)) {
}
}
-func (ctx ppctx) isBigNumber(v *otto.Object) bool {
+func (ctx ppctx) isBigNumber(v *goja.Object) bool {
// Handle numbers with custom constructor.
- if v, _ := v.Get("constructor"); v.Object() != nil {
- if strings.HasPrefix(toString(v.Object()), "function BigNumber") {
+ if obj := v.Get("constructor").ToObject(ctx.vm); obj != nil {
+ if strings.HasPrefix(toString(obj), "function BigNumber") {
return true
}
}
// Handle default constructor.
- BigNumber, _ := ctx.vm.Object("BigNumber.prototype")
+ BigNumber := ctx.vm.Get("BigNumber").ToObject(ctx.vm)
if BigNumber == nil {
return false
}
- bv, _ := BigNumber.Call("isPrototypeOf", v)
- b, _ := bv.ToBoolean()
- return b
+ prototype := BigNumber.Get("prototype").ToObject(ctx.vm)
+ isPrototypeOf, callable := goja.AssertFunction(prototype.Get("isPrototypeOf"))
+ if !callable {
+ return false
+ }
+ bv, _ := isPrototypeOf(prototype, v)
+ return bv.ToBoolean()
}
-func toString(obj *otto.Object) string {
- s, _ := obj.Call("toString")
- return s.String()
+func toString(obj *goja.Object) string {
+ return obj.ToString().String()
}
-func constructorPrototype(obj *otto.Object) *otto.Object {
- if v, _ := obj.Get("constructor"); v.Object() != nil {
- if v, _ = v.Object().Get("prototype"); v.Object() != nil {
- return v.Object()
+func constructorPrototype(vm *goja.Runtime, obj *goja.Object) *goja.Object {
+ if v := obj.Get("constructor"); v != nil {
+ if v := v.ToObject(vm).Get("prototype"); v != nil {
+ return v.ToObject(vm)
}
}
return nil
diff --git a/internal/testlog/testlog.go b/internal/testlog/testlog.go
index c5a6114d16..684339f16d 100644
--- a/internal/testlog/testlog.go
+++ b/internal/testlog/testlog.go
@@ -18,18 +18,12 @@
package testlog
import (
+ "sync"
"testing"
"github.com/ethereum/go-ethereum/log"
)
-// Logger returns a logger which logs to the unit test log of t.
-func Logger(t *testing.T, level log.Lvl) log.Logger {
- l := log.New()
- l.SetHandler(Handler(t, level))
- return l
-}
-
// Handler returns a log handler which logs to the unit test log of t.
func Handler(t *testing.T, level log.Lvl) log.Handler {
return log.LvlFilterHandler(level, &handler{t, log.TerminalFormat(false)})
@@ -44,3 +38,105 @@ func (h *handler) Log(r *log.Record) error {
h.t.Logf("%s", h.fmt.Format(r))
return nil
}
+
+// logger implements log.Logger such that all output goes to the unit test log via
+// t.Logf(). All methods in between logger.Trace, logger.Debug, etc. are marked as test
+// helpers, so the file and line number in unit test output correspond to the call site
+// which emitted the log message.
+type logger struct {
+ t *testing.T
+ l log.Logger
+ mu *sync.Mutex
+ h *bufHandler
+}
+
+type bufHandler struct {
+ buf []*log.Record
+ fmt log.Format
+}
+
+func (h *bufHandler) Log(r *log.Record) error {
+ h.buf = append(h.buf, r)
+ return nil
+}
+
+// Logger returns a logger which logs to the unit test log of t.
+func Logger(t *testing.T, level log.Lvl) log.Logger {
+ l := &logger{
+ t: t,
+ l: log.New(),
+ mu: new(sync.Mutex),
+ h: &bufHandler{fmt: log.TerminalFormat(false)},
+ }
+ l.l.SetHandler(log.LvlFilterHandler(level, l.h))
+ return l
+}
+
+func (l *logger) Trace(msg string, ctx ...interface{}) {
+ l.t.Helper()
+ l.mu.Lock()
+ defer l.mu.Unlock()
+ l.l.Trace(msg, ctx...)
+ l.flush()
+}
+
+func (l *logger) Debug(msg string, ctx ...interface{}) {
+ l.t.Helper()
+ l.mu.Lock()
+ defer l.mu.Unlock()
+ l.l.Debug(msg, ctx...)
+ l.flush()
+}
+
+func (l *logger) Info(msg string, ctx ...interface{}) {
+ l.t.Helper()
+ l.mu.Lock()
+ defer l.mu.Unlock()
+ l.l.Info(msg, ctx...)
+ l.flush()
+}
+
+func (l *logger) Warn(msg string, ctx ...interface{}) {
+ l.t.Helper()
+ l.mu.Lock()
+ defer l.mu.Unlock()
+ l.l.Warn(msg, ctx...)
+ l.flush()
+}
+
+func (l *logger) Error(msg string, ctx ...interface{}) {
+ l.t.Helper()
+ l.mu.Lock()
+ defer l.mu.Unlock()
+ l.l.Error(msg, ctx...)
+ l.flush()
+}
+
+func (l *logger) Crit(msg string, ctx ...interface{}) {
+ l.t.Helper()
+ l.mu.Lock()
+ defer l.mu.Unlock()
+ l.l.Crit(msg, ctx...)
+ l.flush()
+}
+
+func (l *logger) New(ctx ...interface{}) log.Logger {
+ return &logger{l.t, l.l.New(ctx...), l.mu, l.h}
+}
+
+func (l *logger) GetHandler() log.Handler {
+ return l.l.GetHandler()
+}
+
+func (l *logger) SetHandler(h log.Handler) {
+ l.l.SetHandler(h)
+}
+
+// flush writes all buffered messages and clears the buffer.
+func (l *logger) flush() {
+ l.t.Helper()
+ for _, r := range l.h.buf {
+ l.t.Logf("%s", l.h.fmt.Format(r))
+ }
+ l.h.buf = nil
+}
diff --git a/internal/web3ext/web3ext.go b/internal/web3ext/web3ext.go
index 12305b185e..36e0312656 100755
--- a/internal/web3ext/web3ext.go
+++ b/internal/web3ext/web3ext.go
@@ -183,8 +183,8 @@ web3._extend({
new web3._extend.Method({
name: 'exportChain',
call: 'admin_exportChain',
- params: 1,
- inputFormatter: [null]
+ params: 3,
+ inputFormatter: [null, null, null]
}),
new web3._extend.Method({
name: 'importChain',
diff --git a/les/api.go b/les/api.go
index ad511c9d6b..f9b8c34458 100644
--- a/les/api.go
+++ b/les/api.go
@@ -350,5 +350,5 @@ func (api *PrivateLightAPI) GetCheckpointContractAddress() (string, error) {
if api.backend.oracle == nil {
return "", errNotActivated
}
- return api.backend.oracle.config.Address.Hex(), nil
+ return api.backend.oracle.Contract().ContractAddr().Hex(), nil
}
diff --git a/les/api_backend.go b/les/api_backend.go
index 36204751e5..ca990cfb75 100644
--- a/les/api_backend.go
+++ b/les/api_backend.go
@@ -229,6 +229,13 @@ func (b *LesApiBackend) SubscribeLogsEvent(ch chan<- []*types.Log) event.Subscri
return b.eth.blockchain.SubscribeLogsEvent(ch)
}
+func (b *LesApiBackend) SubscribePendingLogsEvent(ch chan<- []*types.Log) event.Subscription {
+ return event.NewSubscription(func(quit <-chan struct{}) error {
+ <-quit
+ return nil
+ })
+}
+
func (b *LesApiBackend) SubscribeRemovedLogsEvent(ch chan<- core.RemovedLogsEvent) event.Subscription {
return b.eth.blockchain.SubscribeRemovedLogsEvent(ch)
}
@@ -249,10 +256,6 @@ func (b *LesApiBackend) ChainDb() ethdb.Database {
return b.eth.chainDb
}
-func (b *LesApiBackend) EventMux() *event.TypeMux {
- return b.eth.eventMux
-}
-
func (b *LesApiBackend) AccountManager() *accounts.Manager {
return b.eth.accountManager
}
diff --git a/les/checkpointoracle.go b/les/checkpointoracle/oracle.go
similarity index 65%
rename from les/checkpointoracle.go
rename to les/checkpointoracle/oracle.go
index 5494e3d6d9..c3983e1a95 100644
--- a/les/checkpointoracle.go
+++ b/les/checkpointoracle/oracle.go
@@ -14,7 +14,10 @@
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see .
-package les
+// Package checkpointoracle is a wrapper of checkpoint oracle contract with
+// additional rules defined. This package can be used both in LES client or
+// server side for offering oracle related APIs.
+package checkpointoracle
import (
"encoding/binary"
@@ -28,10 +31,10 @@ import (
"github.com/ethereum/go-ethereum/params"
)
-// checkpointOracle is responsible for offering the latest stable checkpoint
-// generated and announced by the contract admins on-chain. The checkpoint is
-// verified by clients locally during the checkpoint syncing.
-type checkpointOracle struct {
+// CheckpointOracle is responsible for offering the latest stable checkpoint
+// generated and announced by the contract admins on-chain. The checkpoint can
+// be verified by clients locally during the checkpoint syncing.
+type CheckpointOracle struct {
config *params.CheckpointOracleConfig
contract *checkpointoracle.CheckpointOracle
@@ -39,8 +42,8 @@ type checkpointOracle struct {
getLocal func(uint64) params.TrustedCheckpoint // Function used to retrieve local checkpoint
}
-// newCheckpointOracle returns a checkpoint registrar handler.
-func newCheckpointOracle(config *params.CheckpointOracleConfig, getLocal func(uint64) params.TrustedCheckpoint) *checkpointOracle {
+// New creates a checkpoint oracle handler with given configs and callback.
+func New(config *params.CheckpointOracleConfig, getLocal func(uint64) params.TrustedCheckpoint) *CheckpointOracle {
if config == nil {
log.Info("Checkpoint registrar is not enabled")
return nil
@@ -51,41 +54,46 @@ func newCheckpointOracle(config *params.CheckpointOracleConfig, getLocal func(ui
}
log.Info("Configured checkpoint registrar", "address", config.Address, "signers", len(config.Signers), "threshold", config.Threshold)
- return &checkpointOracle{
+ return &CheckpointOracle{
config: config,
getLocal: getLocal,
}
}
-// start binds the registrar contract and start listening to the
-// newCheckpointEvent for the server side.
-func (reg *checkpointOracle) start(backend bind.ContractBackend) {
- contract, err := checkpointoracle.NewCheckpointOracle(reg.config.Address, backend)
+// Start binds the contract backend, initializes the oracle instance
+// and marks the status as available.
+func (oracle *CheckpointOracle) Start(backend bind.ContractBackend) {
+ contract, err := checkpointoracle.NewCheckpointOracle(oracle.config.Address, backend)
if err != nil {
log.Error("Oracle contract binding failed", "err", err)
return
}
- if !atomic.CompareAndSwapInt32(®.running, 0, 1) {
+ if !atomic.CompareAndSwapInt32(&oracle.running, 0, 1) {
log.Error("Already bound and listening to registrar")
return
}
- reg.contract = contract
+ oracle.contract = contract
}
-// isRunning returns an indicator whether the registrar is running.
-func (reg *checkpointOracle) isRunning() bool {
- return atomic.LoadInt32(®.running) == 1
+// IsRunning returns an indicator whether the oracle is running.
+func (oracle *CheckpointOracle) IsRunning() bool {
+ return atomic.LoadInt32(&oracle.running) == 1
}
-// stableCheckpoint returns the stable checkpoint which was generated by local
+// Contract returns the underlying raw checkpoint oracle contract.
+func (oracle *CheckpointOracle) Contract() *checkpointoracle.CheckpointOracle {
+ return oracle.contract
+}
+
+// StableCheckpoint returns the stable checkpoint which was generated by local
// indexers and announced by trusted signers.
-func (reg *checkpointOracle) stableCheckpoint() (*params.TrustedCheckpoint, uint64) {
+func (oracle *CheckpointOracle) StableCheckpoint() (*params.TrustedCheckpoint, uint64) {
// Retrieve the latest checkpoint from the contract, abort if empty
- latest, hash, height, err := reg.contract.Contract().GetLatestCheckpoint(nil)
+ latest, hash, height, err := oracle.contract.Contract().GetLatestCheckpoint(nil)
if err != nil || (latest == 0 && hash == [32]byte{}) {
return nil, 0
}
- local := reg.getLocal(latest)
+ local := oracle.getLocal(latest)
// The following scenarios may occur:
//
@@ -93,19 +101,18 @@ func (reg *checkpointOracle) stableCheckpoint() (*params.TrustedCheckpoint, uint
// checkpoint which registered in the contract.
// * local checkpoint doesn't match with the registered one.
//
- // In both cases, server won't send the **stable** checkpoint
- // to the client(no worry, client can use hardcoded one instead).
- if local.HashEqual(common.Hash(hash)) {
+ // In both cases, no stable checkpoint will be returned.
+ if local.HashEqual(hash) {
return &local, height.Uint64()
}
return nil, 0
}
-// verifySigners recovers the signer addresses according to the signature and
+// VerifySigners recovers the signer addresses according to the signature and
// checks whether there are enough approvals to finalize the checkpoint.
-func (reg *checkpointOracle) verifySigners(index uint64, hash [32]byte, signatures [][]byte) (bool, []common.Address) {
+func (oracle *CheckpointOracle) VerifySigners(index uint64, hash [32]byte, signatures [][]byte) (bool, []common.Address) {
// Short circuit if the given signatures doesn't reach the threshold.
- if len(signatures) < int(reg.config.Threshold) {
+ if len(signatures) < int(oracle.config.Threshold) {
return false, nil
}
var (
@@ -128,7 +135,7 @@ func (reg *checkpointOracle) verifySigners(index uint64, hash [32]byte, signatur
// hash = keccak256(checkpoint_index, section_head, cht_root, bloom_root)
buf := make([]byte, 8)
binary.BigEndian.PutUint64(buf, index)
- data := append([]byte{0x19, 0x00}, append(reg.config.Address.Bytes(), append(buf, hash[:]...)...)...)
+ data := append([]byte{0x19, 0x00}, append(oracle.config.Address.Bytes(), append(buf, hash[:]...)...)...)
signatures[i][64] -= 27 // Transform V from 27/28 to 0/1 according to the yellow paper for verification.
pubkey, err := crypto.Ecrecover(crypto.Keccak256(data), signatures[i])
if err != nil {
@@ -139,14 +146,14 @@ func (reg *checkpointOracle) verifySigners(index uint64, hash [32]byte, signatur
if _, exist := checked[signer]; exist {
continue
}
- for _, s := range reg.config.Signers {
+ for _, s := range oracle.config.Signers {
if s == signer {
signers = append(signers, signer)
checked[signer] = struct{}{}
}
}
}
- threshold := reg.config.Threshold
+ threshold := oracle.config.Threshold
if uint64(len(signers)) < threshold {
log.Warn("Not enough signers to approve checkpoint", "signers", len(signers), "threshold", threshold)
return false, nil
diff --git a/les/client.go b/les/client.go
index 1730e82f06..c4196f659b 100644
--- a/les/client.go
+++ b/les/client.go
@@ -36,6 +36,7 @@ import (
"github.com/ethereum/go-ethereum/eth/gasprice"
"github.com/ethereum/go-ethereum/event"
"github.com/ethereum/go-ethereum/internal/ethapi"
+ "github.com/ethereum/go-ethereum/les/checkpointoracle"
"github.com/ethereum/go-ethereum/light"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/multitenancy"
@@ -84,7 +85,8 @@ func New(ctx *node.ServiceContext, config *eth.Config) (*LightEthereum, error) {
if err != nil {
return nil, err
}
- chainConfig, genesisHash, genesisErr := core.SetupGenesisBlockWithOverride(chainDb, config.Genesis, config.OverrideIstanbul)
+ chainConfig, genesisHash, genesisErr := core.SetupGenesisBlockWithOverride(chainDb, config.Genesis,
+ config.OverrideIstanbul, config.OverrideMuirGlacier)
if _, isCompat := genesisErr.(*params.ConfigCompatError); genesisErr != nil && !isCompat {
return nil, genesisErr
}
@@ -138,7 +140,7 @@ func New(ctx *node.ServiceContext, config *eth.Config) (*LightEthereum, error) {
if oracle == nil {
oracle = params.CheckpointOracles[genesisHash]
}
- leth.oracle = newCheckpointOracle(oracle, leth.localCheckpoint)
+ leth.oracle = checkpointoracle.New(oracle, leth.localCheckpoint)
// Note: AddChildIndexer starts the update process for the child
leth.bloomIndexer.AddChildIndexer(leth.bloomTrieIndexer)
@@ -290,5 +292,5 @@ func (s *LightEthereum) SetContractBackend(backend bind.ContractBackend) {
if s.oracle == nil {
return
}
- s.oracle.start(backend)
+ s.oracle.Start(backend)
}
diff --git a/les/commons.go b/les/commons.go
index ad3c5aef3d..b402c51769 100644
--- a/les/commons.go
+++ b/les/commons.go
@@ -27,6 +27,7 @@ import (
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/eth"
"github.com/ethereum/go-ethereum/ethdb"
+ "github.com/ethereum/go-ethereum/les/checkpointoracle"
"github.com/ethereum/go-ethereum/light"
"github.com/ethereum/go-ethereum/p2p"
"github.com/ethereum/go-ethereum/p2p/discv5"
@@ -63,7 +64,7 @@ type lesCommons struct {
peers *peerSet
chainReader chainReader
chtIndexer, bloomTrieIndexer *core.ChainIndexer
- oracle *checkpointOracle
+ oracle *checkpointoracle.CheckpointOracle
closeCh chan struct{}
wg sync.WaitGroup
diff --git a/les/peer.go b/les/peer.go
index ab5b30a657..feb3910beb 100644
--- a/les/peer.go
+++ b/les/peer.go
@@ -113,7 +113,7 @@ type peer struct {
fcParams flowcontrol.ServerParams
fcCosts requestCostTable
- trusted bool
+ trusted, server bool
onlyAnnounce bool
chainSince, chainRecent uint64
stateSince, stateRecent uint64
@@ -616,8 +616,8 @@ func (p *peer) Handshake(td *big.Int, head common.Hash, headNum uint64, genesis
// Add advertised checkpoint and register block height which
// client can verify the checkpoint validity.
- if server.oracle != nil && server.oracle.isRunning() {
- cp, height := server.oracle.stableCheckpoint()
+ if server.oracle != nil && server.oracle.IsRunning() {
+ cp, height := server.oracle.StableCheckpoint()
if cp != nil {
send = send.add("checkpoint/value", cp)
send = send.add("checkpoint/registerHeight", height)
@@ -675,11 +675,16 @@ func (p *peer) Handshake(td *big.Int, head common.Hash, headNum uint64, genesis
}
if server != nil {
- if recv.get("announceType", &p.announceType) != nil {
- // set default announceType on server side
- p.announceType = announceTypeSimple
+ p.server = recv.get("flowControl/MRR", nil) == nil
+ if p.server {
+ p.announceType = announceTypeNone // connected to another server, send no messages
+ } else {
+ if recv.get("announceType", &p.announceType) != nil {
+ // set default announceType on server side
+ p.announceType = announceTypeSimple
+ }
+ p.fcClient = flowcontrol.NewClientNode(server.fcManager, server.defParams)
}
- p.fcClient = flowcontrol.NewClientNode(server.fcManager, server.defParams)
} else {
if recv.get("serveChainSince", &p.chainSince) != nil {
p.onlyAnnounce = true
@@ -726,6 +731,7 @@ func (p *peer) Handshake(td *big.Int, head common.Hash, headNum uint64, genesis
}
}
}
+ p.server = true
}
p.headInfo = &announceData{Td: rTd, Hash: rHash, Number: rNum}
return nil
diff --git a/les/retrieve.go b/les/retrieve.go
index d17a02e1ae..c806117902 100644
--- a/les/retrieve.go
+++ b/les/retrieve.go
@@ -119,7 +119,7 @@ func (rm *retrieveManager) retrieve(ctx context.Context, reqID uint64, req *dist
case <-ctx.Done():
sentReq.stop(ctx.Err())
case <-shutdown:
- sentReq.stop(fmt.Errorf("Client is shutting down"))
+ sentReq.stop(fmt.Errorf("client is shutting down"))
}
return sentReq.getError()
}
diff --git a/les/server.go b/les/server.go
index e68903dd81..664eba9717 100644
--- a/les/server.go
+++ b/les/server.go
@@ -24,6 +24,7 @@ import (
"github.com/ethereum/go-ethereum/common/mclock"
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/eth"
+ "github.com/ethereum/go-ethereum/les/checkpointoracle"
"github.com/ethereum/go-ethereum/les/flowcontrol"
"github.com/ethereum/go-ethereum/light"
"github.com/ethereum/go-ethereum/log"
@@ -96,7 +97,7 @@ func NewLesServer(e *eth.Ethereum, config *eth.Config) (*LesServer, error) {
if oracle == nil {
oracle = params.CheckpointOracles[e.BlockChain().Genesis().Hash()]
}
- srv.oracle = newCheckpointOracle(oracle, srv.localCheckpoint)
+ srv.oracle = checkpointoracle.New(oracle, srv.localCheckpoint)
// Initialize server capacity management fields.
srv.defParams = flowcontrol.ServerParams{
@@ -216,7 +217,7 @@ func (s *LesServer) SetContractBackend(backend bind.ContractBackend) {
if s.oracle == nil {
return
}
- s.oracle.start(backend)
+ s.oracle.Start(backend)
}
// capacityManagement starts an event handler loop that updates the recharge curve of
diff --git a/les/server_handler.go b/les/server_handler.go
index 0db396efc9..0b9bd47e52 100644
--- a/les/server_handler.go
+++ b/les/server_handler.go
@@ -108,10 +108,6 @@ func (h *serverHandler) runPeer(version uint, p *p2p.Peer, rw p2p.MsgReadWriter)
}
func (h *serverHandler) handle(p *peer) error {
- // Reject light clients if server is not synced.
- if !h.synced() {
- return p2p.DiscRequested
- }
p.Log().Debug("Light Ethereum peer connected", "name", p.Name())
// Execute the LES handshake
@@ -125,6 +121,16 @@ func (h *serverHandler) handle(p *peer) error {
p.Log().Debug("Light Ethereum handshake failed", "err", err)
return err
}
+ if p.server {
+ // connected to another server, no messages expected, just wait for disconnection
+ _, err := p.rw.ReadMsg()
+ return err
+ }
+ // Reject light clients if server is not synced.
+ if !h.synced() {
+ p.Log().Debug("Light server not synced, rejecting peer")
+ return p2p.DiscRequested
+ }
defer p.fcClient.Disconnect()
// Disconnect the inbound peer if it's rejected by clientPool
@@ -268,7 +274,6 @@ func (h *serverHandler) handleMsg(p *peer, wg *sync.WaitGroup) error {
if metrics.EnabledExpensive {
miscInHeaderPacketsMeter.Mark(1)
miscInHeaderTrafficMeter.Mark(int64(msg.Size))
- defer func(start time.Time) { miscServingTimeHeaderTimer.UpdateSince(start) }(time.Now())
}
var req struct {
ReqID uint64
@@ -372,6 +377,7 @@ func (h *serverHandler) handleMsg(p *peer, wg *sync.WaitGroup) error {
if metrics.EnabledExpensive {
miscOutHeaderPacketsMeter.Mark(1)
miscOutHeaderTrafficMeter.Mark(int64(reply.size()))
+ miscServingTimeHeaderTimer.Update(time.Duration(task.servingTime))
}
}()
}
@@ -381,7 +387,6 @@ func (h *serverHandler) handleMsg(p *peer, wg *sync.WaitGroup) error {
if metrics.EnabledExpensive {
miscInBodyPacketsMeter.Mark(1)
miscInBodyTrafficMeter.Mark(int64(msg.Size))
- defer func(start time.Time) { miscServingTimeBodyTimer.UpdateSince(start) }(time.Now())
}
var req struct {
ReqID uint64
@@ -421,6 +426,7 @@ func (h *serverHandler) handleMsg(p *peer, wg *sync.WaitGroup) error {
if metrics.EnabledExpensive {
miscOutBodyPacketsMeter.Mark(1)
miscOutBodyTrafficMeter.Mark(int64(reply.size()))
+ miscServingTimeBodyTimer.Update(time.Duration(task.servingTime))
}
}()
}
@@ -430,7 +436,6 @@ func (h *serverHandler) handleMsg(p *peer, wg *sync.WaitGroup) error {
if metrics.EnabledExpensive {
miscInCodePacketsMeter.Mark(1)
miscInCodeTrafficMeter.Mark(int64(msg.Size))
- defer func(start time.Time) { miscServingTimeCodeTimer.UpdateSince(start) }(time.Now())
}
var req struct {
ReqID uint64
@@ -494,6 +499,7 @@ func (h *serverHandler) handleMsg(p *peer, wg *sync.WaitGroup) error {
if metrics.EnabledExpensive {
miscOutCodePacketsMeter.Mark(1)
miscOutCodeTrafficMeter.Mark(int64(reply.size()))
+ miscServingTimeCodeTimer.Update(time.Duration(task.servingTime))
}
}()
}
@@ -503,7 +509,6 @@ func (h *serverHandler) handleMsg(p *peer, wg *sync.WaitGroup) error {
if metrics.EnabledExpensive {
miscInReceiptPacketsMeter.Mark(1)
miscInReceiptTrafficMeter.Mark(int64(msg.Size))
- defer func(start time.Time) { miscServingTimeReceiptTimer.UpdateSince(start) }(time.Now())
}
var req struct {
ReqID uint64
@@ -551,6 +556,7 @@ func (h *serverHandler) handleMsg(p *peer, wg *sync.WaitGroup) error {
if metrics.EnabledExpensive {
miscOutReceiptPacketsMeter.Mark(1)
miscOutReceiptTrafficMeter.Mark(int64(reply.size()))
+ miscServingTimeReceiptTimer.Update(time.Duration(task.servingTime))
}
}()
}
@@ -560,7 +566,6 @@ func (h *serverHandler) handleMsg(p *peer, wg *sync.WaitGroup) error {
if metrics.EnabledExpensive {
miscInTrieProofPacketsMeter.Mark(1)
miscInTrieProofTrafficMeter.Mark(int64(msg.Size))
- defer func(start time.Time) { miscServingTimeTrieProofTimer.UpdateSince(start) }(time.Now())
}
var req struct {
ReqID uint64
@@ -654,6 +659,7 @@ func (h *serverHandler) handleMsg(p *peer, wg *sync.WaitGroup) error {
if metrics.EnabledExpensive {
miscOutTrieProofPacketsMeter.Mark(1)
miscOutTrieProofTrafficMeter.Mark(int64(reply.size()))
+ miscServingTimeTrieProofTimer.Update(time.Duration(task.servingTime))
}
}()
}
@@ -663,7 +669,6 @@ func (h *serverHandler) handleMsg(p *peer, wg *sync.WaitGroup) error {
if metrics.EnabledExpensive {
miscInHelperTriePacketsMeter.Mark(1)
miscInHelperTrieTrafficMeter.Mark(int64(msg.Size))
- defer func(start time.Time) { miscServingTimeHelperTrieTimer.UpdateSince(start) }(time.Now())
}
var req struct {
ReqID uint64
@@ -729,6 +734,7 @@ func (h *serverHandler) handleMsg(p *peer, wg *sync.WaitGroup) error {
if metrics.EnabledExpensive {
miscOutHelperTriePacketsMeter.Mark(1)
miscOutHelperTrieTrafficMeter.Mark(int64(reply.size()))
+ miscServingTimeHelperTrieTimer.Update(time.Duration(task.servingTime))
}
}()
}
@@ -738,7 +744,6 @@ func (h *serverHandler) handleMsg(p *peer, wg *sync.WaitGroup) error {
if metrics.EnabledExpensive {
miscInTxsPacketsMeter.Mark(1)
miscInTxsTrafficMeter.Mark(int64(msg.Size))
- defer func(start time.Time) { miscServingTimeTxTimer.UpdateSince(start) }(time.Now())
}
var req struct {
ReqID uint64
@@ -778,6 +783,7 @@ func (h *serverHandler) handleMsg(p *peer, wg *sync.WaitGroup) error {
if metrics.EnabledExpensive {
miscOutTxsPacketsMeter.Mark(1)
miscOutTxsTrafficMeter.Mark(int64(reply.size()))
+ miscServingTimeTxTimer.Update(time.Duration(task.servingTime))
}
}()
}
@@ -787,7 +793,6 @@ func (h *serverHandler) handleMsg(p *peer, wg *sync.WaitGroup) error {
if metrics.EnabledExpensive {
miscInTxStatusPacketsMeter.Mark(1)
miscInTxStatusTrafficMeter.Mark(int64(msg.Size))
- defer func(start time.Time) { miscServingTimeTxStatusTimer.UpdateSince(start) }(time.Now())
}
var req struct {
ReqID uint64
@@ -815,6 +820,7 @@ func (h *serverHandler) handleMsg(p *peer, wg *sync.WaitGroup) error {
if metrics.EnabledExpensive {
miscOutTxStatusPacketsMeter.Mark(1)
miscOutTxStatusTrafficMeter.Mark(int64(reply.size()))
+ miscServingTimeTxStatusTimer.Update(time.Duration(task.servingTime))
}
}()
}
diff --git a/les/serverpool.go b/les/serverpool.go
index 37621dc634..f8fd721696 100644
--- a/les/serverpool.go
+++ b/les/serverpool.go
@@ -179,6 +179,19 @@ func (pool *serverPool) start(server *p2p.Server, topic discv5.Topic) {
pool.checkDial()
pool.wg.Add(1)
go pool.eventLoop()
+
+ // Inject the bootstrap nodes as initial dial candiates.
+ pool.wg.Add(1)
+ go func() {
+ defer pool.wg.Done()
+ for _, n := range server.BootstrapNodes {
+ select {
+ case pool.discNodes <- n:
+ case <-pool.closeCh:
+ return
+ }
+ }
+ }()
}
func (pool *serverPool) stop() {
diff --git a/les/sync.go b/les/sync.go
index 1214fefcaf..207686403f 100644
--- a/les/sync.go
+++ b/les/sync.go
@@ -66,7 +66,7 @@ func (h *clientHandler) validateCheckpoint(peer *peer) error {
if err != nil {
return err
}
- events := h.backend.oracle.contract.LookupCheckpointEvents(logs, cp.SectionIndex, cp.Hash())
+ events := h.backend.oracle.Contract().LookupCheckpointEvents(logs, cp.SectionIndex, cp.Hash())
if len(events) == 0 {
return errInvalidCheckpoint
}
@@ -78,7 +78,7 @@ func (h *clientHandler) validateCheckpoint(peer *peer) error {
for _, event := range events {
signatures = append(signatures, append(event.R[:], append(event.S[:], event.V)...))
}
- valid, signers := h.backend.oracle.verifySigners(index, hash, signatures)
+ valid, signers := h.backend.oracle.VerifySigners(index, hash, signatures)
if !valid {
return errInvalidCheckpoint
}
@@ -134,7 +134,7 @@ func (h *clientHandler) synchronise(peer *peer) {
case hardcoded:
mode = legacyCheckpointSync
log.Debug("Disable checkpoint syncing", "reason", "checkpoint is hardcoded")
- case h.backend.oracle == nil || !h.backend.oracle.isRunning():
+ case h.backend.oracle == nil || !h.backend.oracle.IsRunning():
if h.checkpoint == nil {
mode = lightSync // Downgrade to light sync unfortunately.
} else {
diff --git a/les/sync_test.go b/les/sync_test.go
index 8df6223b84..1c157b4fbf 100644
--- a/les/sync_test.go
+++ b/les/sync_test.go
@@ -80,14 +80,14 @@ func testCheckpointSyncing(t *testing.T, protocol int, syncMode int) {
data := append([]byte{0x19, 0x00}, append(registrarAddr.Bytes(), append([]byte{0, 0, 0, 0, 0, 0, 0, 0}, cp.Hash().Bytes()...)...)...)
sig, _ := crypto.Sign(crypto.Keccak256(data), signerKey)
sig[64] += 27 // Transform V from 0/1 to 27/28 according to the yellow paper
- if _, err := server.handler.server.oracle.contract.RegisterCheckpoint(bind.NewKeyedTransactor(signerKey), cp.SectionIndex, cp.Hash().Bytes(), new(big.Int).Sub(header.Number, big.NewInt(1)), header.ParentHash, [][]byte{sig}); err != nil {
+ if _, err := server.handler.server.oracle.Contract().RegisterCheckpoint(bind.NewKeyedTransactor(signerKey), cp.SectionIndex, cp.Hash().Bytes(), new(big.Int).Sub(header.Number, big.NewInt(1)), header.ParentHash, [][]byte{sig}); err != nil {
t.Error("register checkpoint failed", err)
}
server.backend.Commit()
// Wait for the checkpoint registration
for {
- _, hash, _, err := server.handler.server.oracle.contract.Contract().GetLatestCheckpoint(nil)
+ _, hash, _, err := server.handler.server.oracle.Contract().Contract().GetLatestCheckpoint(nil)
if err != nil || hash == [32]byte{} {
time.Sleep(10 * time.Millisecond)
continue
@@ -164,14 +164,14 @@ func testMissOracleBackend(t *testing.T, hasCheckpoint bool) {
data := append([]byte{0x19, 0x00}, append(registrarAddr.Bytes(), append([]byte{0, 0, 0, 0, 0, 0, 0, 0}, cp.Hash().Bytes()...)...)...)
sig, _ := crypto.Sign(crypto.Keccak256(data), signerKey)
sig[64] += 27 // Transform V from 0/1 to 27/28 according to the yellow paper
- if _, err := server.handler.server.oracle.contract.RegisterCheckpoint(bind.NewKeyedTransactor(signerKey), cp.SectionIndex, cp.Hash().Bytes(), new(big.Int).Sub(header.Number, big.NewInt(1)), header.ParentHash, [][]byte{sig}); err != nil {
+ if _, err := server.handler.server.oracle.Contract().RegisterCheckpoint(bind.NewKeyedTransactor(signerKey), cp.SectionIndex, cp.Hash().Bytes(), new(big.Int).Sub(header.Number, big.NewInt(1)), header.ParentHash, [][]byte{sig}); err != nil {
t.Error("register checkpoint failed", err)
}
server.backend.Commit()
// Wait for the checkpoint registration
for {
- _, hash, _, err := server.handler.server.oracle.contract.Contract().GetLatestCheckpoint(nil)
+ _, hash, _, err := server.handler.server.oracle.Contract().Contract().GetLatestCheckpoint(nil)
if err != nil || hash == [32]byte{} {
time.Sleep(100 * time.Millisecond)
continue
diff --git a/les/test_helper.go b/les/test_helper.go
index fd39ca8531..200b7b9ac4 100644
--- a/les/test_helper.go
+++ b/les/test_helper.go
@@ -39,6 +39,7 @@ import (
"github.com/ethereum/go-ethereum/eth"
"github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/event"
+ "github.com/ethereum/go-ethereum/les/checkpointoracle"
"github.com/ethereum/go-ethereum/les/flowcontrol"
"github.com/ethereum/go-ethereum/light"
"github.com/ethereum/go-ethereum/p2p"
@@ -174,7 +175,7 @@ func newTestClientHandler(backend *backends.SimulatedBackend, odr *LesOdr, index
Alloc: core.GenesisAlloc{bankAddr: {Balance: bankFunds}},
GasLimit: 100000000,
}
- oracle *checkpointOracle
+ oracle *checkpointoracle.CheckpointOracle
)
genesis := gspec.MustCommit(db)
chain, _ := light.NewLightChain(odr, gspec.Config, engine, nil)
@@ -194,7 +195,7 @@ func newTestClientHandler(backend *backends.SimulatedBackend, odr *LesOdr, index
BloomRoot: light.GetBloomTrieRoot(db, index, sectionHead),
}
}
- oracle = newCheckpointOracle(checkpointConfig, getLocal)
+ oracle = checkpointoracle.New(checkpointConfig, getLocal)
}
client := &LightEthereum{
lesCommons: lesCommons{
@@ -218,7 +219,7 @@ func newTestClientHandler(backend *backends.SimulatedBackend, odr *LesOdr, index
client.handler = newClientHandler(ulcServers, ulcFraction, nil, client)
if client.oracle != nil {
- client.oracle.start(backend)
+ client.oracle.Start(backend)
}
return client.handler
}
@@ -230,7 +231,7 @@ func newTestServerHandler(blocks int, indexers []*core.ChainIndexer, db ethdb.Da
Alloc: core.GenesisAlloc{bankAddr: {Balance: bankFunds}},
GasLimit: 100000000,
}
- oracle *checkpointOracle
+ oracle *checkpointoracle.CheckpointOracle
)
genesis := gspec.MustCommit(db)
@@ -257,7 +258,7 @@ func newTestServerHandler(blocks int, indexers []*core.ChainIndexer, db ethdb.Da
BloomRoot: light.GetBloomTrieRoot(db, index, sectionHead),
}
}
- oracle = newCheckpointOracle(checkpointConfig, getLocal)
+ oracle = checkpointoracle.New(checkpointConfig, getLocal)
}
server := &LesServer{
lesCommons: lesCommons{
@@ -284,7 +285,7 @@ func newTestServerHandler(blocks int, indexers []*core.ChainIndexer, db ethdb.Da
server.clientPool.setLimits(10000, 10000) // Assign enough capacity for clientpool
server.handler = newServerHandler(server, simulation.Blockchain(), db, txpool, func() bool { return true })
if server.oracle != nil {
- server.oracle.start(simulation)
+ server.oracle.Start(simulation)
}
server.servingQueue.setThreads(4)
server.handler.start()
diff --git a/les/txrelay.go b/les/txrelay.go
index 49195161b7..d37a18faff 100644
--- a/les/txrelay.go
+++ b/les/txrelay.go
@@ -54,51 +54,51 @@ func newLesTxRelay(ps *peerSet, retriever *retrieveManager) *lesTxRelay {
return r
}
-func (self *lesTxRelay) Stop() {
- close(self.stop)
+func (ltrx *lesTxRelay) Stop() {
+ close(ltrx.stop)
}
-func (self *lesTxRelay) registerPeer(p *peer) {
- self.lock.Lock()
- defer self.lock.Unlock()
+func (ltrx *lesTxRelay) registerPeer(p *peer) {
+ ltrx.lock.Lock()
+ defer ltrx.lock.Unlock()
- self.peerList = self.ps.AllPeers()
+ ltrx.peerList = ltrx.ps.AllPeers()
}
-func (self *lesTxRelay) unregisterPeer(p *peer) {
- self.lock.Lock()
- defer self.lock.Unlock()
+func (ltrx *lesTxRelay) unregisterPeer(p *peer) {
+ ltrx.lock.Lock()
+ defer ltrx.lock.Unlock()
- self.peerList = self.ps.AllPeers()
+ ltrx.peerList = ltrx.ps.AllPeers()
}
// send sends a list of transactions to at most a given number of peers at
// once, never resending any particular transaction to the same peer twice
-func (self *lesTxRelay) send(txs types.Transactions, count int) {
+func (ltrx *lesTxRelay) send(txs types.Transactions, count int) {
sendTo := make(map[*peer]types.Transactions)
- self.peerStartPos++ // rotate the starting position of the peer list
- if self.peerStartPos >= len(self.peerList) {
- self.peerStartPos = 0
+ ltrx.peerStartPos++ // rotate the starting position of the peer list
+ if ltrx.peerStartPos >= len(ltrx.peerList) {
+ ltrx.peerStartPos = 0
}
for _, tx := range txs {
hash := tx.Hash()
- ltr, ok := self.txSent[hash]
+ ltr, ok := ltrx.txSent[hash]
if !ok {
ltr = <rInfo{
tx: tx,
sentTo: make(map[*peer]struct{}),
}
- self.txSent[hash] = ltr
- self.txPending[hash] = struct{}{}
+ ltrx.txSent[hash] = ltr
+ ltrx.txPending[hash] = struct{}{}
}
- if len(self.peerList) > 0 {
+ if len(ltrx.peerList) > 0 {
cnt := count
- pos := self.peerStartPos
+ pos := ltrx.peerStartPos
for {
- peer := self.peerList[pos]
+ peer := ltrx.peerList[pos]
if _, ok := ltr.sentTo[peer]; !ok {
sendTo[peer] = append(sendTo[peer], tx)
ltr.sentTo[peer] = struct{}{}
@@ -108,10 +108,10 @@ func (self *lesTxRelay) send(txs types.Transactions, count int) {
break // sent it to the desired number of peers
}
pos++
- if pos == len(self.peerList) {
+ if pos == len(ltrx.peerList) {
pos = 0
}
- if pos == self.peerStartPos {
+ if pos == ltrx.peerStartPos {
break // tried all available peers
}
}
@@ -139,46 +139,46 @@ func (self *lesTxRelay) send(txs types.Transactions, count int) {
return func() { peer.SendTxs(reqID, cost, enc) }
},
}
- go self.retriever.retrieve(context.Background(), reqID, rq, func(p distPeer, msg *Msg) error { return nil }, self.stop)
+ go ltrx.retriever.retrieve(context.Background(), reqID, rq, func(p distPeer, msg *Msg) error { return nil }, ltrx.stop)
}
}
-func (self *lesTxRelay) Send(txs types.Transactions) {
- self.lock.Lock()
- defer self.lock.Unlock()
+func (ltrx *lesTxRelay) Send(txs types.Transactions) {
+ ltrx.lock.Lock()
+ defer ltrx.lock.Unlock()
- self.send(txs, 3)
+ ltrx.send(txs, 3)
}
-func (self *lesTxRelay) NewHead(head common.Hash, mined []common.Hash, rollback []common.Hash) {
- self.lock.Lock()
- defer self.lock.Unlock()
+func (ltrx *lesTxRelay) NewHead(head common.Hash, mined []common.Hash, rollback []common.Hash) {
+ ltrx.lock.Lock()
+ defer ltrx.lock.Unlock()
for _, hash := range mined {
- delete(self.txPending, hash)
+ delete(ltrx.txPending, hash)
}
for _, hash := range rollback {
- self.txPending[hash] = struct{}{}
+ ltrx.txPending[hash] = struct{}{}
}
- if len(self.txPending) > 0 {
- txs := make(types.Transactions, len(self.txPending))
+ if len(ltrx.txPending) > 0 {
+ txs := make(types.Transactions, len(ltrx.txPending))
i := 0
- for hash := range self.txPending {
- txs[i] = self.txSent[hash].tx
+ for hash := range ltrx.txPending {
+ txs[i] = ltrx.txSent[hash].tx
i++
}
- self.send(txs, 1)
+ ltrx.send(txs, 1)
}
}
-func (self *lesTxRelay) Discard(hashes []common.Hash) {
- self.lock.Lock()
- defer self.lock.Unlock()
+func (ltrx *lesTxRelay) Discard(hashes []common.Hash) {
+ ltrx.lock.Lock()
+ defer ltrx.lock.Unlock()
for _, hash := range hashes {
- delete(self.txSent, hash)
- delete(self.txPending, hash)
+ delete(ltrx.txSent, hash)
+ delete(ltrx.txPending, hash)
}
}
diff --git a/light/lightchain.go b/light/lightchain.go
index a83551dd61..646e5847bb 100644
--- a/light/lightchain.go
+++ b/light/lightchain.go
@@ -173,7 +173,6 @@ func (lc *LightChain) loadLastState() error {
lc.hc.SetCurrentHeader(header)
}
}
-
// Issue a status log and return
header := lc.hc.CurrentHeader()
headerTd := lc.GetTd(header.Hash(), header.Number.Uint64())
@@ -212,9 +211,13 @@ func (lc *LightChain) ResetWithGenesisBlock(genesis *types.Block) {
defer lc.chainmu.Unlock()
// Prepare the genesis block and reinitialise the chain
- rawdb.WriteTd(lc.chainDb, genesis.Hash(), genesis.NumberU64(), genesis.Difficulty())
- rawdb.WriteBlock(lc.chainDb, genesis)
-
+ batch := lc.chainDb.NewBatch()
+ rawdb.WriteTd(batch, genesis.Hash(), genesis.NumberU64(), genesis.Difficulty())
+ rawdb.WriteBlock(batch, genesis)
+ rawdb.WriteHeadHeaderHash(batch, genesis.Hash())
+ if err := batch.Write(); err != nil {
+ log.Crit("Failed to reset genesis block", "err", err)
+ }
lc.genesisBlock = genesis
lc.hc.SetGenesis(lc.genesisBlock.Header())
lc.hc.SetCurrentHeader(lc.genesisBlock.Header())
@@ -337,13 +340,22 @@ func (lc *LightChain) Rollback(chain []common.Hash) {
lc.chainmu.Lock()
defer lc.chainmu.Unlock()
+ batch := lc.chainDb.NewBatch()
for i := len(chain) - 1; i >= 0; i-- {
hash := chain[i]
+ // Degrade the chain markers if they are explicitly reverted.
+ // In theory we should update all in-memory markers in the
+ // last step, however the direction of rollback is from high
+ // to low, so it's safe the update in-memory markers directly.
if head := lc.hc.CurrentHeader(); head.Hash() == hash {
+ rawdb.WriteHeadHeaderHash(batch, head.ParentHash)
lc.hc.SetCurrentHeader(lc.GetHeader(head.ParentHash, head.Number.Uint64()-1))
}
}
+ if err := batch.Write(); err != nil {
+ log.Crit("Failed to rollback light chain", "error", err)
+ }
}
// postChainEvents iterates over the events generated by a chain insertion and
@@ -507,6 +519,7 @@ func (lc *LightChain) SyncCheckpoint(ctx context.Context, checkpoint *params.Tru
// Ensure the chain didn't move past the latest block while retrieving it
if lc.hc.CurrentHeader().Number.Uint64() < header.Number.Uint64() {
log.Info("Updated latest header based on CHT", "number", header.Number, "hash", header.Hash(), "age", common.PrettyAge(time.Unix(int64(header.Time), 0)))
+ rawdb.WriteHeadHeaderHash(lc.chainDb, header.Hash())
lc.hc.SetCurrentHeader(header)
}
return true
diff --git a/log/handler.go b/log/handler.go
index 2f01b5dc6f..3c99114dcb 100644
--- a/log/handler.go
+++ b/log/handler.go
@@ -8,11 +8,6 @@ import (
"reflect"
"sync"
- "io/ioutil"
- "path/filepath"
- "regexp"
- "strings"
-
"github.com/go-stack/stack"
)
@@ -75,111 +70,6 @@ func FileHandler(path string, fmtr Format) (Handler, error) {
return closingHandler{f, StreamHandler(f, fmtr)}, nil
}
-// countingWriter wraps a WriteCloser object in order to count the written bytes.
-type countingWriter struct {
- w io.WriteCloser // the wrapped object
- count uint // number of bytes written
-}
-
-// Write increments the byte counter by the number of bytes written.
-// Implements the WriteCloser interface.
-func (w *countingWriter) Write(p []byte) (n int, err error) {
- n, err = w.w.Write(p)
- w.count += uint(n)
- return n, err
-}
-
-// Close implements the WriteCloser interface.
-func (w *countingWriter) Close() error {
- return w.w.Close()
-}
-
-// prepFile opens the log file at the given path, and cuts off the invalid part
-// from the end, because the previous execution could have been finished by interruption.
-// Assumes that every line ended by '\n' contains a valid log record.
-func prepFile(path string) (*countingWriter, error) {
- f, err := os.OpenFile(path, os.O_RDWR|os.O_APPEND, 0600)
- if err != nil {
- return nil, err
- }
- _, err = f.Seek(-1, io.SeekEnd)
- if err != nil {
- return nil, err
- }
- buf := make([]byte, 1)
- var cut int64
- for {
- if _, err := f.Read(buf); err != nil {
- return nil, err
- }
- if buf[0] == '\n' {
- break
- }
- if _, err = f.Seek(-2, io.SeekCurrent); err != nil {
- return nil, err
- }
- cut++
- }
- fi, err := f.Stat()
- if err != nil {
- return nil, err
- }
- ns := fi.Size() - cut
- if err = f.Truncate(ns); err != nil {
- return nil, err
- }
- return &countingWriter{w: f, count: uint(ns)}, nil
-}
-
-// RotatingFileHandler returns a handler which writes log records to file chunks
-// at the given path. When a file's size reaches the limit, the handler creates
-// a new file named after the timestamp of the first log record it will contain.
-func RotatingFileHandler(path string, limit uint, formatter Format) (Handler, error) {
- if err := os.MkdirAll(path, 0700); err != nil {
- return nil, err
- }
- files, err := ioutil.ReadDir(path)
- if err != nil {
- return nil, err
- }
- re := regexp.MustCompile(`\.log$`)
- last := len(files) - 1
- for last >= 0 && (!files[last].Mode().IsRegular() || !re.MatchString(files[last].Name())) {
- last--
- }
- var counter *countingWriter
- if last >= 0 && files[last].Size() < int64(limit) {
- // Open the last file, and continue to write into it until it's size reaches the limit.
- if counter, err = prepFile(filepath.Join(path, files[last].Name())); err != nil {
- return nil, err
- }
- }
- if counter == nil {
- counter = new(countingWriter)
- }
- h := StreamHandler(counter, formatter)
-
- return FuncHandler(func(r *Record) error {
- if counter.count > limit {
- counter.Close()
- counter.w = nil
- }
- if counter.w == nil {
- f, err := os.OpenFile(
- filepath.Join(path, fmt.Sprintf("%s.log", strings.Replace(r.Time.Format("060102150405.00"), ".", "", 1))),
- os.O_CREATE|os.O_APPEND|os.O_WRONLY,
- 0600,
- )
- if err != nil {
- return err
- }
- counter.w = f
- counter.count = 0
- }
- return h.Log(r)
- }), nil
-}
-
// NetHandler opens a socket to the given address and writes records
// over the connection.
func NetHandler(network, addr string, fmtr Format) (Handler, error) {
diff --git a/log/handler_glog.go b/log/handler_glog.go
index 83dae44bd5..9b1d4efaf4 100644
--- a/log/handler_glog.go
+++ b/log/handler_glog.go
@@ -207,7 +207,7 @@ func (h *GlogHandler) Log(r *Record) error {
}
// Check callsite cache for previously calculated log levels
h.lock.RLock()
- lvl, ok := h.siteCache[r.Call.PC()]
+ lvl, ok := h.siteCache[r.Call.Frame().PC]
h.lock.RUnlock()
// If we didn't cache the callsite yet, calculate it
@@ -215,13 +215,13 @@ func (h *GlogHandler) Log(r *Record) error {
h.lock.Lock()
for _, rule := range h.patterns {
if rule.pattern.MatchString(fmt.Sprintf("%+s", r.Call)) {
- h.siteCache[r.Call.PC()], lvl, ok = rule.level, rule.level, true
+ h.siteCache[r.Call.Frame().PC], lvl, ok = rule.level, rule.level, true
break
}
}
// If no rule matched, remember to drop log the next time
if !ok {
- h.siteCache[r.Call.PC()] = 0
+ h.siteCache[r.Call.Frame().PC] = 0
}
h.lock.Unlock()
}
diff --git a/log/logger.go b/log/logger.go
index ca3e0b0599..276d6969e2 100644
--- a/log/logger.go
+++ b/log/logger.go
@@ -83,7 +83,7 @@ func LvlFromString(lvlString string) (Lvl, error) {
case "crit":
return LvlCrit, nil
default:
- return LvlDebug, fmt.Errorf("Unknown level: %v", lvlString)
+ return LvlDebug, fmt.Errorf("unknown level: %v", lvlString)
}
}
diff --git a/miner/miner.go b/miner/miner.go
index c5cb1b57e1..1f005562f2 100644
--- a/miner/miner.go
+++ b/miner/miner.go
@@ -185,3 +185,9 @@ func (miner *Miner) SetEtherbase(addr common.Address) {
miner.coinbase = addr
miner.worker.setEtherbase(addr)
}
+
+// SubscribePendingLogs starts delivering logs from pending transactions
+// to the given channel.
+func (self *Miner) SubscribePendingLogs(ch chan<- []*types.Log) event.Subscription {
+ return self.worker.pendingLogsFeed.Subscribe(ch)
+}
diff --git a/miner/stress_clique.go b/miner/stress_clique.go
index 7f5db2e520..2f8a28b68f 100644
--- a/miner/stress_clique.go
+++ b/miner/stress_clique.go
@@ -37,6 +37,7 @@ import (
"github.com/ethereum/go-ethereum/eth"
"github.com/ethereum/go-ethereum/eth/downloader"
"github.com/ethereum/go-ethereum/log"
+ "github.com/ethereum/go-ethereum/miner"
"github.com/ethereum/go-ethereum/node"
"github.com/ethereum/go-ethereum/p2p"
"github.com/ethereum/go-ethereum/p2p/enode"
@@ -199,7 +200,7 @@ func makeSealer(genesis *core.Genesis) (*node.Node, error) {
DatabaseHandles: 256,
TxPool: core.DefaultTxPoolConfig,
GPO: eth.DefaultConfig.GPO,
- Miner: Config{
+ Miner: miner.Config{
GasFloor: genesis.GasLimit * 9 / 10,
GasCeil: genesis.GasLimit * 11 / 10,
GasPrice: big.NewInt(1),
diff --git a/miner/stress_ethash.go b/miner/stress_ethash.go
index 7d4a7d24f7..988a15c488 100644
--- a/miner/stress_ethash.go
+++ b/miner/stress_ethash.go
@@ -38,6 +38,7 @@ import (
"github.com/ethereum/go-ethereum/eth"
"github.com/ethereum/go-ethereum/eth/downloader"
"github.com/ethereum/go-ethereum/log"
+ "github.com/ethereum/go-ethereum/miner"
"github.com/ethereum/go-ethereum/node"
"github.com/ethereum/go-ethereum/p2p"
"github.com/ethereum/go-ethereum/p2p/enode"
@@ -179,7 +180,7 @@ func makeMiner(genesis *core.Genesis) (*node.Node, error) {
TxPool: core.DefaultTxPoolConfig,
GPO: eth.DefaultConfig.GPO,
Ethash: eth.DefaultConfig.Ethash,
- Miner: Config{
+ Miner: miner.Config{
GasFloor: genesis.GasLimit * 9 / 10,
GasCeil: genesis.GasLimit * 11 / 10,
GasPrice: big.NewInt(1),
diff --git a/miner/worker.go b/miner/worker.go
index 3877281fa0..59ebace602 100644
--- a/miner/worker.go
+++ b/miner/worker.go
@@ -137,6 +137,9 @@ type worker struct {
eth Backend
chain *core.BlockChain
+ // Feeds
+ pendingLogsFeed event.Feed
+
// Subscriptions
mux *event.TypeMux
txsCh chan core.NewTxsEvent
@@ -630,7 +633,7 @@ func (w *worker) resultLoop() {
allReceipts := mergeReceipts(pubReceipts, prvReceipts)
// Commit block and state to database.
- stat, err := w.chain.WriteBlockWithState(block, allReceipts, task.state, task.privateState)
+ _, err := w.chain.WriteBlockWithState(block, allReceipts, logs, task.state, task.privateState, true)
if err != nil {
log.Error("Failed writing block to chain", "err", err)
continue
@@ -645,17 +648,6 @@ func (w *worker) resultLoop() {
// Broadcast the block and announce chain insertion event
w.mux.Post(core.NewMinedBlockEvent{Block: block})
- var events []interface{}
-
- switch stat {
- case core.CanonStatTy:
- events = append(events, core.ChainEvent{Block: block, Hash: block.Hash(), Logs: logs})
- events = append(events, core.ChainHeadEvent{Block: block})
- case core.SideStatTy:
- events = append(events, core.ChainSideEvent{Block: block})
- }
- w.chain.PostChainEvents(events, logs)
-
// Insert the block into the set of pending ones to resultLoop for confirmations
w.unconfirmed.Insert(block.NumberU64(), block.Hash())
@@ -897,7 +889,7 @@ func (w *worker) commitTransactions(txs *types.TransactionsByPriceAndNonce, coin
cpy[i] = new(types.Log)
*cpy[i] = *l
}
- go w.mux.Post(core.PendingLogsEvent{Logs: cpy})
+ w.pendingLogsFeed.Send(cpy)
}
// Notify resubmit loop to decrease resubmitting interval if current interval is larger
// than the user-specified one.
@@ -1085,3 +1077,11 @@ func (w *worker) commit(uncles []*types.Header, interval func(), update bool, st
}
return nil
}
+
+// postSideBlock fires a side chain event, only use it for testing.
+func (w *worker) postSideBlock(event core.ChainSideEvent) {
+ select {
+ case w.chainSideCh <- event:
+ case <-w.exitCh:
+ }
+}
diff --git a/miner/worker_test.go b/miner/worker_test.go
index fd2d3b20ac..969615a90a 100644
--- a/miner/worker_test.go
+++ b/miner/worker_test.go
@@ -150,9 +150,6 @@ func newTestWorkerBackend(t *testing.T, chainConfig *params.ChainConfig, engine
func (b *testWorkerBackend) ChainDb() ethdb.Database { return b.db }
func (b *testWorkerBackend) BlockChain() *core.BlockChain { return b.chain }
func (b *testWorkerBackend) TxPool() *core.TxPool { return b.txPool }
-func (b *testWorkerBackend) PostChainEvents(events []interface{}) {
- b.chain.PostChainEvents(events, nil)
-}
func (b *testWorkerBackend) newRandomUncle() *types.Block {
var parent *types.Block
@@ -244,8 +241,8 @@ func testGenerateBlockAndImport(t *testing.T, isClique bool) {
for i := 0; i < 5; i++ {
b.txPool.AddLocal(b.newRandomTx(true))
b.txPool.AddLocal(b.newRandomTx(false))
- b.PostChainEvents([]interface{}{core.ChainSideEvent{Block: b.newRandomUncle()}})
- b.PostChainEvents([]interface{}{core.ChainSideEvent{Block: b.newRandomUncle()}})
+ w.postSideBlock(core.ChainSideEvent{Block: b.newRandomUncle()})
+ w.postSideBlock(core.ChainSideEvent{Block: b.newRandomUncle()})
select {
case e := <-loopErr:
t.Fatal(e)
@@ -296,7 +293,7 @@ func testEmptyWork(t *testing.T, chainConfig *params.ChainConfig, engine consens
}
w.skipSealHook = func(task *task) bool { return true }
w.fullTaskHook = func() {
- // Aarch64 unit tests are running in a VM on travis, they must
+ // Arch64 unit tests are running in a VM on travis, they must
// be given more time to execute.
time.Sleep(time.Second)
}
@@ -352,7 +349,8 @@ func TestStreamUncleBlock(t *testing.T) {
}
}
- b.PostChainEvents([]interface{}{core.ChainSideEvent{Block: b.uncleBlock}})
+ w.postSideBlock(core.ChainSideEvent{Block: b.uncleBlock})
+
select {
case <-taskCh:
case <-time.NewTimer(time.Second).C:
diff --git a/node/node.go b/node/node.go
index 4803f3b302..79ec41a20b 100644
--- a/node/node.go
+++ b/node/node.go
@@ -204,7 +204,7 @@ func (n *Node) Start() error {
for _, constructor := range n.serviceFuncs {
// Create a new context for the particular service
ctx := &ServiceContext{
- config: n.config,
+ Config: *n.config,
services: make(map[reflect.Type]Service),
EventMux: n.eventmux,
AccountManager: n.accman,
diff --git a/node/service.go b/node/service.go
index 19b27a60cf..d3d5ddb42a 100644
--- a/node/service.go
+++ b/node/service.go
@@ -33,20 +33,20 @@ import (
// the protocol stack, that is passed to all constructors to be optionally used;
// as well as utility methods to operate on the service environment.
type ServiceContext struct {
- config *Config
services map[reflect.Type]Service // Index of the already constructed services
- EventMux *event.TypeMux // Event multiplexer used for decoupled notifications
- AccountManager *accounts.Manager // Account manager created by the node.
+ Config Config
+ EventMux *event.TypeMux // Event multiplexer used for decoupled notifications
+ AccountManager *accounts.Manager // Account manager created by the node.
}
// OpenDatabase opens an existing database with the given name (or creates one
// if no previous can be found) from within the node's data directory. If the
// node is an ephemeral one, a memory database is returned.
func (ctx *ServiceContext) OpenDatabase(name string, cache int, handles int, namespace string) (ethdb.Database, error) {
- if ctx.config.DataDir == "" {
+ if ctx.Config.DataDir == "" {
return rawdb.NewMemoryDatabase(), nil
}
- return rawdb.NewLevelDBDatabase(ctx.config.ResolvePath(name), cache, handles, namespace)
+ return rawdb.NewLevelDBDatabase(ctx.Config.ResolvePath(name), cache, handles, namespace)
}
// OpenDatabaseWithFreezer opens an existing database with the given name (or
@@ -55,16 +55,16 @@ func (ctx *ServiceContext) OpenDatabase(name string, cache int, handles int, nam
// database to immutable append-only files. If the node is an ephemeral one, a
// memory database is returned.
func (ctx *ServiceContext) OpenDatabaseWithFreezer(name string, cache int, handles int, freezer string, namespace string) (ethdb.Database, error) {
- if ctx.config.DataDir == "" {
+ if ctx.Config.DataDir == "" {
return rawdb.NewMemoryDatabase(), nil
}
- root := ctx.config.ResolvePath(name)
+ root := ctx.Config.ResolvePath(name)
switch {
case freezer == "":
freezer = filepath.Join(root, "ancient")
case !filepath.IsAbs(freezer):
- freezer = ctx.config.ResolvePath(freezer)
+ freezer = ctx.Config.ResolvePath(freezer)
}
return rawdb.NewLevelDBDatabaseWithFreezer(root, cache, handles, freezer, namespace)
}
@@ -73,7 +73,7 @@ func (ctx *ServiceContext) OpenDatabaseWithFreezer(name string, cache int, handl
// and if the user actually uses persistent storage. It will return an empty string
// for emphemeral storage and the user's own input for absolute paths.
func (ctx *ServiceContext) ResolvePath(path string) string {
- return ctx.config.ResolvePath(path)
+ return ctx.Config.ResolvePath(path)
}
// Service retrieves a currently running service registered of a specific type.
@@ -88,13 +88,13 @@ func (ctx *ServiceContext) Service(service interface{}) error {
// NodeKey returns node key from config
func (ctx *ServiceContext) NodeKey() *ecdsa.PrivateKey {
- return ctx.config.NodeKey()
+ return ctx.Config.NodeKey()
}
// ExtRPCEnabled returns the indicator whether node enables the external
// RPC(http, ws or graphql).
func (ctx *ServiceContext) ExtRPCEnabled() bool {
- return ctx.config.ExtRPCEnabled()
+ return ctx.Config.ExtRPCEnabled()
}
// ServiceConstructor is the function signature of the constructors needed to be
diff --git a/node/service_test.go b/node/service_test.go
index 63004a51ab..5da8e9e434 100644
--- a/node/service_test.go
+++ b/node/service_test.go
@@ -38,7 +38,7 @@ func TestContextDatabases(t *testing.T) {
t.Fatalf("non-created database already exists")
}
// Request the opening/creation of a database and ensure it persists to disk
- ctx := &ServiceContext{config: &Config{Name: "unit-test", DataDir: dir}}
+ ctx := &ServiceContext{Config: Config{Name: "unit-test", DataDir: dir}}
db, err := ctx.OpenDatabase("persistent", 0, 0, "")
if err != nil {
t.Fatalf("failed to open persistent database: %v", err)
@@ -49,7 +49,7 @@ func TestContextDatabases(t *testing.T) {
t.Fatalf("persistent database doesn't exists: %v", err)
}
// Request th opening/creation of an ephemeral database and ensure it's not persisted
- ctx = &ServiceContext{config: &Config{DataDir: ""}}
+ ctx = &ServiceContext{Config: Config{DataDir: ""}}
db, err = ctx.OpenDatabase("ephemeral", 0, 0, "")
if err != nil {
t.Fatalf("failed to open ephemeral database: %v", err)
diff --git a/p2p/dial.go b/p2p/dial.go
index 3975b488bf..d190e866af 100644
--- a/p2p/dial.go
+++ b/p2p/dial.go
@@ -17,11 +17,17 @@
package p2p
import (
+ "context"
+ crand "crypto/rand"
+ "encoding/binary"
"errors"
"fmt"
+ mrand "math/rand"
"net"
+ "sync"
"time"
+ "github.com/ethereum/go-ethereum/common/mclock"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/p2p/enode"
"github.com/ethereum/go-ethereum/p2p/netutil"
@@ -33,8 +39,9 @@ const (
// private networks.
dialHistoryExpiration = inboundThrottleTime + 5*time.Second
- // If no peers are found for this amount of time, the initial bootnodes are dialed.
- fallbackInterval = 20 * time.Second
+ // Config for the "Looking for peers" message.
+ dialStatsLogInterval = 10 * time.Second // printed at most this often
+ dialStatsPeerLimit = 3 // but not if more than this many dialed peers
// Endpoint resolution is throttled with bounded backoff.
initialResolveDelay = 60 * time.Second
@@ -42,219 +49,443 @@ const (
)
// NodeDialer is used to connect to nodes in the network, typically by using
-// an underlying net.Dialer but also using net.Pipe in tests
+// an underlying net.Dialer but also using net.Pipe in tests.
type NodeDialer interface {
- Dial(*enode.Node) (net.Conn, error)
+ Dial(context.Context, *enode.Node) (net.Conn, error)
}
type nodeResolver interface {
Resolve(*enode.Node) *enode.Node
}
-// TCPDialer implements the NodeDialer interface by using a net.Dialer to
-// create TCP connections to nodes in the network
-type TCPDialer struct {
- *net.Dialer
+// tcpDialer implements NodeDialer using real TCP connections.
+type tcpDialer struct {
+ d *net.Dialer
}
-// Dial creates a TCP connection to the node
-func (t TCPDialer) Dial(dest *enode.Node) (net.Conn, error) {
- addr := &net.TCPAddr{IP: dest.IP(), Port: dest.TCP()}
- return t.Dialer.Dial("tcp", addr.String())
+func (t tcpDialer) Dial(ctx context.Context, dest *enode.Node) (net.Conn, error) {
+ return t.d.DialContext(ctx, "tcp", nodeAddr(dest).String())
}
-// dialstate schedules dials and discovery lookups.
-// It gets a chance to compute new tasks on every iteration
-// of the main loop in Server.run.
-type dialstate struct {
- maxDynDials int
- netrestrict *netutil.Netlist
- self enode.ID
- bootnodes []*enode.Node // default dials when there are no peers
- log log.Logger
+func nodeAddr(n *enode.Node) net.Addr {
+ return &net.TCPAddr{IP: n.IP(), Port: n.TCP()}
+}
+
+// checkDial errors:
+var (
+ errSelf = errors.New("is self")
+ errAlreadyDialing = errors.New("already dialing")
+ errAlreadyConnected = errors.New("already connected")
+ errRecentlyDialed = errors.New("recently dialed")
+ errNotWhitelisted = errors.New("not contained in netrestrict whitelist")
+)
+
+// dialer creates outbound connections and submits them into Server.
+// Two types of peer connections can be created:
+//
+// - static dials are pre-configured connections. The dialer attempts
+// keep these nodes connected at all times.
+//
+// - dynamic dials are created from node discovery results. The dialer
+// continuously reads candidate nodes from its input iterator and attempts
+// to create peer connections to nodes arriving through the iterator.
+//
+type dialScheduler struct {
+ dialConfig
+ setupFunc dialSetupFunc
+ wg sync.WaitGroup
+ cancel context.CancelFunc
+ ctx context.Context
+ nodesIn chan *enode.Node
+ doneCh chan *dialTask
+ addStaticCh chan *enode.Node
+ remStaticCh chan *enode.Node
+ addPeerCh chan *conn
+ remPeerCh chan *conn
+
+ // Everything below here belongs to loop and
+ // should only be accessed by code on the loop goroutine.
+ dialing map[enode.ID]*dialTask // active tasks
+ peers map[enode.ID]connFlag // all connected peers
+ dialPeers int // current number of dialed peers
+
+ // The static map tracks all static dial tasks. The subset of usable static dial tasks
+ // (i.e. those passing checkDial) is kept in staticPool. The scheduler prefers
+ // launching random static tasks from the pool over launching dynamic dials from the
+ // iterator.
+ static map[enode.ID]*dialTask
+ staticPool []*dialTask
+
+ // The dial history keeps recently dialed nodes. Members of history are not dialed.
+ history expHeap
+ historyTimer mclock.Timer
+ historyTimerTime mclock.AbsTime
+
+ // for logStats
+ lastStatsLog mclock.AbsTime
+ doneSinceLastLog int
+}
- start time.Time // time when the dialer was first used
- lookupRunning bool
- dialing map[enode.ID]connFlag
- lookupBuf []*enode.Node // current discovery lookup results
- static map[enode.ID]*dialTask
- hist expHeap
+type dialSetupFunc func(net.Conn, connFlag, *enode.Node) error
+
+type dialConfig struct {
+ self enode.ID // our own ID
+ maxDialPeers int // maximum number of dialed peers
+ maxActiveDials int // maximum number of active dials
+ netRestrict *netutil.Netlist // IP whitelist, disabled if nil
+ resolver nodeResolver
+ dialer NodeDialer
+ log log.Logger
+ clock mclock.Clock
+ rand *mrand.Rand
}
-type task interface {
- Do(*Server)
+func (cfg dialConfig) withDefaults() dialConfig {
+ if cfg.maxActiveDials == 0 {
+ cfg.maxActiveDials = defaultMaxPendingPeers
+ }
+ if cfg.log == nil {
+ cfg.log = log.Root()
+ }
+ if cfg.clock == nil {
+ cfg.clock = mclock.System{}
+ }
+ if cfg.rand == nil {
+ seedb := make([]byte, 8)
+ crand.Read(seedb)
+ seed := int64(binary.BigEndian.Uint64(seedb))
+ cfg.rand = mrand.New(mrand.NewSource(seed))
+ }
+ return cfg
}
-func newDialState(self enode.ID, maxdyn int, cfg *Config) *dialstate {
- s := &dialstate{
- maxDynDials: maxdyn,
- self: self,
- netrestrict: cfg.NetRestrict,
- log: cfg.Logger,
+func newDialScheduler(config dialConfig, it enode.Iterator, setupFunc dialSetupFunc) *dialScheduler {
+ d := &dialScheduler{
+ dialConfig: config.withDefaults(),
+ setupFunc: setupFunc,
+ dialing: make(map[enode.ID]*dialTask),
static: make(map[enode.ID]*dialTask),
- dialing: make(map[enode.ID]connFlag),
- bootnodes: make([]*enode.Node, len(cfg.BootstrapNodes)),
+ peers: make(map[enode.ID]connFlag),
+ doneCh: make(chan *dialTask),
+ nodesIn: make(chan *enode.Node),
+ addStaticCh: make(chan *enode.Node),
+ remStaticCh: make(chan *enode.Node),
+ addPeerCh: make(chan *conn),
+ remPeerCh: make(chan *conn),
}
- copy(s.bootnodes, cfg.BootstrapNodes)
- if s.log == nil {
- s.log = log.Root()
+ d.lastStatsLog = d.clock.Now()
+ d.ctx, d.cancel = context.WithCancel(context.Background())
+ d.wg.Add(2)
+ go d.readNodes(it)
+ go d.loop(it)
+ return d
+}
+
+// stop shuts down the dialer, canceling all current dial tasks.
+func (d *dialScheduler) stop() {
+ d.cancel()
+ d.wg.Wait()
+}
+
+// addStatic adds a static dial candidate.
+func (d *dialScheduler) addStatic(n *enode.Node) {
+ select {
+ case d.addStaticCh <- n:
+ case <-d.ctx.Done():
}
- for _, n := range cfg.StaticNodes {
- s.addStatic(n)
+}
+
+// removeStatic removes a static dial candidate.
+func (d *dialScheduler) removeStatic(n *enode.Node) {
+ select {
+ case d.remStaticCh <- n:
+ case <-d.ctx.Done():
}
- return s
}
-func (s *dialstate) addStatic(n *enode.Node) {
- // This overwrites the task instead of updating an existing
- // entry, giving users the opportunity to force a resolve operation.
- s.static[n.ID()] = &dialTask{flags: staticDialedConn, dest: n}
+// peerAdded updates the peer set.
+func (d *dialScheduler) peerAdded(c *conn) {
+ select {
+ case d.addPeerCh <- c:
+ case <-d.ctx.Done():
+ }
}
-func (s *dialstate) removeStatic(n *enode.Node) {
- // This removes a task so future attempts to connect will not be made.
- delete(s.static, n.ID())
+// peerRemoved updates the peer set.
+func (d *dialScheduler) peerRemoved(c *conn) {
+ select {
+ case d.remPeerCh <- c:
+ case <-d.ctx.Done():
+ }
}
-func (s *dialstate) newTasks(nRunning int, peers map[enode.ID]*Peer, now time.Time) []task {
- var newtasks []task
- addDial := func(flag connFlag, n *enode.Node) bool {
- if err := s.checkDial(n, peers); err != nil {
- s.log.Trace("Skipping dial candidate", "id", n.ID(), "addr", &net.TCPAddr{IP: n.IP(), Port: n.TCP()}, "err", err)
- return false
- }
- s.dialing[n.ID()] = flag
- newtasks = append(newtasks, &dialTask{flags: flag, dest: n})
- return true
- }
-
- if s.start.IsZero() {
- s.start = now
- }
- s.hist.expire(now)
-
- // Create dials for static nodes if they are not connected.
- for id, t := range s.static {
- err := s.checkDial(t.dest, peers)
- switch err {
- case errNotWhitelisted, errSelf:
- s.log.Warn("Removing static dial candidate", "id", t.dest.ID, "addr", &net.TCPAddr{IP: t.dest.IP(), Port: t.dest.TCP()}, "err", err)
- delete(s.static, t.dest.ID())
- case nil:
- s.dialing[id] = t.flags
- newtasks = append(newtasks, t)
+// loop is the main loop of the dialer.
+func (d *dialScheduler) loop(it enode.Iterator) {
+ var (
+ nodesCh chan *enode.Node
+ historyExp = make(chan struct{}, 1)
+ )
+
+loop:
+ for {
+ // Launch new dials if slots are available.
+ slots := d.freeDialSlots()
+ slots -= d.startStaticDials(slots)
+ if slots > 0 {
+ nodesCh = d.nodesIn
+ } else {
+ nodesCh = nil
}
- }
+ d.rearmHistoryTimer(historyExp)
+ d.logStats()
+
+ select {
+ case node := <-nodesCh:
+ if err := d.checkDial(node); err != nil {
+ d.log.Trace("Discarding dial candidate", "id", node.ID(), "ip", node.IP(), "reason", err)
+ } else {
+ d.startDial(newDialTask(node, dynDialedConn))
+ }
+
+ case task := <-d.doneCh:
+ id := task.dest.ID()
+ delete(d.dialing, id)
+ d.updateStaticPool(id)
+ d.doneSinceLastLog++
+
+ case c := <-d.addPeerCh:
+ if c.is(dynDialedConn) || c.is(staticDialedConn) {
+ d.dialPeers++
+ }
+ id := c.node.ID()
+ d.peers[id] = c.flags
+ // Remove from static pool because the node is now connected.
+ task := d.static[id]
+ if task != nil && task.staticPoolIndex >= 0 {
+ d.removeFromStaticPool(task.staticPoolIndex)
+ }
+ // TODO: cancel dials to connected peers
+
+ case c := <-d.remPeerCh:
+ if c.is(dynDialedConn) || c.is(staticDialedConn) {
+ d.dialPeers--
+ }
+ delete(d.peers, c.node.ID())
+ d.updateStaticPool(c.node.ID())
+
+ case node := <-d.addStaticCh:
+ id := node.ID()
+ _, exists := d.static[id]
+ d.log.Trace("Adding static node", "id", id, "ip", node.IP(), "added", !exists)
+ if exists {
+ continue loop
+ }
+ task := newDialTask(node, staticDialedConn)
+ d.static[id] = task
+ if d.checkDial(node) == nil {
+ d.addToStaticPool(task)
+ }
+
+ case node := <-d.remStaticCh:
+ id := node.ID()
+ task := d.static[id]
+ d.log.Trace("Removing static node", "id", id, "ok", task != nil)
+ if task != nil {
+ delete(d.static, id)
+ if task.staticPoolIndex >= 0 {
+ d.removeFromStaticPool(task.staticPoolIndex)
+ }
+ }
- // Compute number of dynamic dials needed.
- needDynDials := s.maxDynDials
- for _, p := range peers {
- if p.rw.is(dynDialedConn) {
- needDynDials--
+ case <-historyExp:
+ d.expireHistory()
+
+ case <-d.ctx.Done():
+ it.Close()
+ break loop
}
}
- for _, flag := range s.dialing {
- if flag&dynDialedConn != 0 {
- needDynDials--
- }
+
+ d.stopHistoryTimer(historyExp)
+ for range d.dialing {
+ <-d.doneCh
}
+ d.wg.Done()
+}
+
+// readNodes runs in its own goroutine and delivers nodes from
+// the input iterator to the nodesIn channel.
+func (d *dialScheduler) readNodes(it enode.Iterator) {
+ defer d.wg.Done()
- // If we don't have any peers whatsoever, try to dial a random bootnode. This
- // scenario is useful for the testnet (and private networks) where the discovery
- // table might be full of mostly bad peers, making it hard to find good ones.
- if len(peers) == 0 && len(s.bootnodes) > 0 && needDynDials > 0 && now.Sub(s.start) > fallbackInterval {
- bootnode := s.bootnodes[0]
- s.bootnodes = append(s.bootnodes[:0], s.bootnodes[1:]...)
- s.bootnodes = append(s.bootnodes, bootnode)
- if addDial(dynDialedConn, bootnode) {
- needDynDials--
+ for it.Next() {
+ select {
+ case d.nodesIn <- it.Node():
+ case <-d.ctx.Done():
}
}
+}
- // Create dynamic dials from discovery results.
- i := 0
- for ; i < len(s.lookupBuf) && needDynDials > 0; i++ {
- if addDial(dynDialedConn, s.lookupBuf[i]) {
- needDynDials--
- }
+// logStats prints dialer statistics to the log. The message is suppressed when enough
+// peers are connected because users should only see it while their client is starting up
+// or comes back online.
+func (d *dialScheduler) logStats() {
+ now := d.clock.Now()
+ if d.lastStatsLog.Add(dialStatsLogInterval) > now {
+ return
+ }
+ if d.dialPeers < dialStatsPeerLimit && d.dialPeers < d.maxDialPeers {
+ d.log.Info("Looking for peers", "peercount", len(d.peers), "tried", d.doneSinceLastLog, "static", len(d.static))
}
- s.lookupBuf = s.lookupBuf[:copy(s.lookupBuf, s.lookupBuf[i:])]
+ d.doneSinceLastLog = 0
+ d.lastStatsLog = now
+}
- // Launch a discovery lookup if more candidates are needed.
- if len(s.lookupBuf) < needDynDials && !s.lookupRunning {
- s.lookupRunning = true
- newtasks = append(newtasks, &discoverTask{want: needDynDials - len(s.lookupBuf)})
+// rearmHistoryTimer configures d.historyTimer to fire when the
+// next item in d.history expires.
+func (d *dialScheduler) rearmHistoryTimer(ch chan struct{}) {
+ if len(d.history) == 0 || d.historyTimerTime == d.history.nextExpiry() {
+ return
}
+ d.stopHistoryTimer(ch)
+ d.historyTimerTime = d.history.nextExpiry()
+ timeout := time.Duration(d.historyTimerTime - d.clock.Now())
+ d.historyTimer = d.clock.AfterFunc(timeout, func() { ch <- struct{}{} })
+}
- // Launch a timer to wait for the next node to expire if all
- // candidates have been tried and no task is currently active.
- // This should prevent cases where the dialer logic is not ticked
- // because there are no pending events.
- if nRunning == 0 && len(newtasks) == 0 && s.hist.Len() > 0 {
- t := &waitExpireTask{s.hist.nextExpiry().Sub(now)}
- newtasks = append(newtasks, t)
+// stopHistoryTimer stops the timer and drains the channel it sends on.
+func (d *dialScheduler) stopHistoryTimer(ch chan struct{}) {
+ if d.historyTimer != nil && !d.historyTimer.Stop() {
+ <-ch
}
- return newtasks
}
-var (
- errSelf = errors.New("is self")
- errAlreadyDialing = errors.New("already dialing")
- errAlreadyConnected = errors.New("already connected")
- errRecentlyDialed = errors.New("recently dialed")
- errNotWhitelisted = errors.New("not contained in netrestrict whitelist")
-)
+// expireHistory removes expired items from d.history.
+func (d *dialScheduler) expireHistory() {
+ d.historyTimer.Stop()
+ d.historyTimer = nil
+ d.historyTimerTime = 0
+ d.history.expire(d.clock.Now(), func(hkey string) {
+ var id enode.ID
+ copy(id[:], hkey)
+ d.updateStaticPool(id)
+ })
+}
+
+// freeDialSlots returns the number of free dial slots. The result can be negative
+// when peers are connected while their task is still running.
+func (d *dialScheduler) freeDialSlots() int {
+ slots := (d.maxDialPeers - d.dialPeers) * 2
+ if slots > d.maxActiveDials {
+ slots = d.maxActiveDials
+ }
+ free := slots - len(d.dialing)
+ return free
+}
-func (s *dialstate) checkDial(n *enode.Node, peers map[enode.ID]*Peer) error {
- _, dialing := s.dialing[n.ID()]
- switch {
- case dialing:
+// checkDial returns an error if node n should not be dialed.
+func (d *dialScheduler) checkDial(n *enode.Node) error {
+ if n.ID() == d.self {
+ return errSelf
+ }
+ if _, ok := d.dialing[n.ID()]; ok {
return errAlreadyDialing
- case peers[n.ID()] != nil:
+ }
+ if _, ok := d.peers[n.ID()]; ok {
return errAlreadyConnected
- case n.ID() == s.self:
- return errSelf
- case s.netrestrict != nil && !s.netrestrict.Contains(n.IP()):
+ }
+ if d.netRestrict != nil && !d.netRestrict.Contains(n.IP()) {
return errNotWhitelisted
- case s.hist.contains(string(n.ID().Bytes())):
+ }
+ if d.history.contains(string(n.ID().Bytes())) {
return errRecentlyDialed
}
return nil
}
-func (s *dialstate) taskDone(t task, now time.Time) {
- switch t := t.(type) {
- case *dialTask:
- s.hist.add(string(t.dest.ID().Bytes()), now.Add(dialHistoryExpiration))
- delete(s.dialing, t.dest.ID())
- case *discoverTask:
- s.lookupRunning = false
- s.lookupBuf = append(s.lookupBuf, t.results...)
+// startStaticDials starts n static dial tasks.
+func (d *dialScheduler) startStaticDials(n int) (started int) {
+ for started = 0; started < n && len(d.staticPool) > 0; started++ {
+ idx := d.rand.Intn(len(d.staticPool))
+ task := d.staticPool[idx]
+ d.startDial(task)
+ d.removeFromStaticPool(idx)
+ }
+ return started
+}
+
+// updateStaticPool attempts to move the given static dial back into staticPool.
+func (d *dialScheduler) updateStaticPool(id enode.ID) {
+ task, ok := d.static[id]
+ if ok && task.staticPoolIndex < 0 && d.checkDial(task.dest) == nil {
+ d.addToStaticPool(task)
+ }
+}
+
+func (d *dialScheduler) addToStaticPool(task *dialTask) {
+ if task.staticPoolIndex >= 0 {
+ panic("attempt to add task to staticPool twice")
}
+ d.staticPool = append(d.staticPool, task)
+ task.staticPoolIndex = len(d.staticPool) - 1
}
-// A dialTask is generated for each node that is dialed. Its
-// fields cannot be accessed while the task is running.
+// removeFromStaticPool removes the task at idx from staticPool. It does that by moving the
+// current last element of the pool to idx and then shortening the pool by one.
+func (d *dialScheduler) removeFromStaticPool(idx int) {
+ task := d.staticPool[idx]
+ end := len(d.staticPool) - 1
+ d.staticPool[idx] = d.staticPool[end]
+ d.staticPool[idx].staticPoolIndex = idx
+ d.staticPool[end] = nil
+ d.staticPool = d.staticPool[:end]
+ task.staticPoolIndex = -1
+}
+
+// startDial runs the given dial task in a separate goroutine.
+func (d *dialScheduler) startDial(task *dialTask) {
+ d.log.Trace("Starting p2p dial", "id", task.dest.ID(), "ip", task.dest.IP(), "flag", task.flags)
+ hkey := string(task.dest.ID().Bytes())
+ d.history.add(hkey, d.clock.Now().Add(dialHistoryExpiration))
+ d.dialing[task.dest.ID()] = task
+ go func() {
+ task.run(d)
+ d.doneCh <- task
+ }()
+}
+
+// A dialTask generated for each node that is dialed.
type dialTask struct {
- flags connFlag
+ staticPoolIndex int
+ flags connFlag
+ // These fields are private to the task and should not be
+ // accessed by dialScheduler while the task is running.
dest *enode.Node
- lastResolved time.Time
+ lastResolved mclock.AbsTime
resolveDelay time.Duration
}
-func (t *dialTask) Do(srv *Server) {
+func newDialTask(dest *enode.Node, flags connFlag) *dialTask {
+ return &dialTask{dest: dest, flags: flags, staticPoolIndex: -1}
+}
+
+type dialError struct {
+ error
+}
+
+func (t *dialTask) run(d *dialScheduler) {
if t.dest.Incomplete() {
- if !t.resolve(srv) {
+ if !t.resolve(d) {
return
}
}
- err := t.dial(srv, t.dest)
+
+ err := t.dial(d, t.dest)
if err != nil {
- srv.log.Trace("Dial error", "task", t, "err", err)
// Try resolving the ID of static nodes if dialing failed.
if _, ok := err.(*dialError); ok && t.flags&staticDialedConn != 0 {
- if t.resolve(srv) {
- t.dial(srv, t.dest)
+ if t.resolve(d) {
+ t.dial(d, t.dest)
}
}
}
@@ -266,46 +497,42 @@ func (t *dialTask) Do(srv *Server) {
// Resolve operations are throttled with backoff to avoid flooding the
// discovery network with useless queries for nodes that don't exist.
// The backoff delay resets when the node is found.
-func (t *dialTask) resolve(srv *Server) bool {
- if srv.staticNodeResolver == nil {
- srv.log.Debug("Can't resolve node", "id", t.dest.ID(), "err", "discovery is disabled")
+func (t *dialTask) resolve(d *dialScheduler) bool {
+ if d.resolver == nil {
return false
}
if t.resolveDelay == 0 {
t.resolveDelay = initialResolveDelay
}
- if time.Since(t.lastResolved) < t.resolveDelay {
+ if t.lastResolved > 0 && time.Duration(d.clock.Now()-t.lastResolved) < t.resolveDelay {
return false
}
- resolved := srv.staticNodeResolver.Resolve(t.dest)
- t.lastResolved = time.Now()
+ resolved := d.resolver.Resolve(t.dest)
+ t.lastResolved = d.clock.Now()
if resolved == nil {
t.resolveDelay *= 2
if t.resolveDelay > maxResolveDelay {
t.resolveDelay = maxResolveDelay
}
- srv.log.Debug("Resolving node failed", "id", t.dest.ID(), "newdelay", t.resolveDelay)
+ d.log.Debug("Resolving node failed", "id", t.dest.ID(), "newdelay", t.resolveDelay)
return false
}
// The node was found.
t.resolveDelay = initialResolveDelay
t.dest = resolved
- srv.log.Debug("Resolved node", "id", t.dest.ID(), "addr", &net.TCPAddr{IP: t.dest.IP(), Port: t.dest.TCP()})
+ d.log.Debug("Resolved node", "id", t.dest.ID(), "addr", &net.TCPAddr{IP: t.dest.IP(), Port: t.dest.TCP()})
return true
}
-type dialError struct {
- error
-}
-
// dial performs the actual connection attempt.
-func (t *dialTask) dial(srv *Server, dest *enode.Node) error {
- fd, err := srv.Dialer.Dial(dest)
+func (t *dialTask) dial(d *dialScheduler, dest *enode.Node) error {
+ fd, err := d.dialer.Dial(d.ctx, t.dest)
if err != nil {
+ d.log.Trace("Dial error", "id", t.dest.ID(), "addr", nodeAddr(t.dest), "conn", t.flags, "err", cleanupDialErr(err))
return &dialError{err}
}
mfd := newMeteredConn(fd, false, &net.TCPAddr{IP: dest.IP(), Port: dest.TCP()})
- return srv.SetupConn(mfd, t.flags, dest)
+ return d.setupFunc(mfd, t.flags, dest)
}
func (t *dialTask) String() string {
@@ -313,37 +540,9 @@ func (t *dialTask) String() string {
return fmt.Sprintf("%v %x %v:%d", t.flags, id[:8], t.dest.IP(), t.dest.TCP())
}
-// discoverTask runs discovery table operations.
-// Only one discoverTask is active at any time.
-// discoverTask.Do performs a random lookup.
-type discoverTask struct {
- want int
- results []*enode.Node
-}
-
-func (t *discoverTask) Do(srv *Server) {
- t.results = enode.ReadNodes(srv.discmix, t.want)
-}
-
-func (t *discoverTask) String() string {
- s := "discovery query"
- if len(t.results) > 0 {
- s += fmt.Sprintf(" (%d results)", len(t.results))
- } else {
- s += fmt.Sprintf(" (want %d)", t.want)
+func cleanupDialErr(err error) error {
+ if netErr, ok := err.(*net.OpError); ok && netErr.Op == "dial" {
+ return netErr.Err
}
- return s
-}
-
-// A waitExpireTask is generated if there are no other tasks
-// to keep the loop in Server.run ticking.
-type waitExpireTask struct {
- time.Duration
-}
-
-func (t waitExpireTask) Do(*Server) {
- time.Sleep(t.Duration)
-}
-func (t waitExpireTask) String() string {
- return fmt.Sprintf("wait for dial hist expire (%v)", t.Duration)
+ return err
}
diff --git a/p2p/dial_test.go b/p2p/dial_test.go
index 6189ec4d0b..cd8dedff1c 100644
--- a/p2p/dial_test.go
+++ b/p2p/dial_test.go
@@ -17,574 +17,656 @@
package p2p
import (
- "encoding/binary"
+ "context"
+ "errors"
+ "fmt"
+ "math/rand"
"net"
"reflect"
- "strings"
+ "sync"
"testing"
"time"
- "github.com/davecgh/go-spew/spew"
+ "github.com/ethereum/go-ethereum/common/mclock"
"github.com/ethereum/go-ethereum/internal/testlog"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/p2p/enode"
- "github.com/ethereum/go-ethereum/p2p/enr"
"github.com/ethereum/go-ethereum/p2p/netutil"
)
-func init() {
- spew.Config.Indent = "\t"
-}
-
-type dialtest struct {
- init *dialstate // state before and after the test.
- rounds []round
-}
-
-type round struct {
- peers []*Peer // current peer set
- done []task // tasks that got done this round
- new []task // the result must match this one
-}
+// This test checks that dynamic dials are launched from discovery results.
+func TestDialSchedDynDial(t *testing.T) {
+ t.Parallel()
-func runDialTest(t *testing.T, test dialtest) {
- var (
- vtime time.Time
- running int
- )
- pm := func(ps []*Peer) map[enode.ID]*Peer {
- m := make(map[enode.ID]*Peer)
- for _, p := range ps {
- m[p.ID()] = p
- }
- return m
+ config := dialConfig{
+ maxActiveDials: 5,
+ maxDialPeers: 4,
}
- for i, round := range test.rounds {
- for _, task := range round.done {
- running--
- if running < 0 {
- panic("running task counter underflow")
- }
- test.init.taskDone(task, vtime)
- }
+ runDialTest(t, config, []dialTestRound{
+ // 3 out of 4 peers are connected, leaving 2 dial slots.
+ // 9 nodes are discovered, but only 2 are dialed.
+ {
+ peersAdded: []*conn{
+ {flags: staticDialedConn, node: newNode(uintID(0x00), "")},
+ {flags: dynDialedConn, node: newNode(uintID(0x01), "")},
+ {flags: dynDialedConn, node: newNode(uintID(0x02), "")},
+ },
+ discovered: []*enode.Node{
+ newNode(uintID(0x00), "127.0.0.1:30303"), // not dialed because already connected as static peer
+ newNode(uintID(0x02), "127.0.0.1:30303"), // ...
+ newNode(uintID(0x03), "127.0.0.1:30303"),
+ newNode(uintID(0x04), "127.0.0.1:30303"),
+ newNode(uintID(0x05), "127.0.0.1:30303"), // not dialed because there are only two slots
+ newNode(uintID(0x06), "127.0.0.1:30303"), // ...
+ newNode(uintID(0x07), "127.0.0.1:30303"), // ...
+ newNode(uintID(0x08), "127.0.0.1:30303"), // ...
+ },
+ wantNewDials: []*enode.Node{
+ newNode(uintID(0x03), "127.0.0.1:30303"),
+ newNode(uintID(0x04), "127.0.0.1:30303"),
+ },
+ },
- new := test.init.newTasks(running, pm(round.peers), vtime)
- if !sametasks(new, round.new) {
- t.Errorf("ERROR round %d: got %v\nwant %v\nstate: %v\nrunning: %v",
- i, spew.Sdump(new), spew.Sdump(round.new), spew.Sdump(test.init), spew.Sdump(running))
- }
- t.Logf("round %d (running %d) new tasks: %s", i, running, strings.TrimSpace(spew.Sdump(new)))
+ // One dial completes, freeing one dial slot.
+ {
+ failed: []enode.ID{
+ uintID(0x04),
+ },
+ wantNewDials: []*enode.Node{
+ newNode(uintID(0x05), "127.0.0.1:30303"),
+ },
+ },
- // Time advances by 16 seconds on every round.
- vtime = vtime.Add(16 * time.Second)
- running += len(new)
- }
-}
+ // Dial to 0x03 completes, filling the last remaining peer slot.
+ {
+ succeeded: []enode.ID{
+ uintID(0x03),
+ },
+ failed: []enode.ID{
+ uintID(0x05),
+ },
+ discovered: []*enode.Node{
+ newNode(uintID(0x09), "127.0.0.1:30303"), // not dialed because there are no free slots
+ },
+ },
-// This test checks that dynamic dials are launched from discovery results.
-func TestDialStateDynDial(t *testing.T) {
- config := &Config{Logger: testlog.Logger(t, log.LvlTrace)}
- runDialTest(t, dialtest{
- init: newDialState(enode.ID{}, 5, config),
- rounds: []round{
- // A discovery query is launched.
- {
- peers: []*Peer{
- {rw: &conn{flags: staticDialedConn, node: newNode(uintID(0), nil)}},
- {rw: &conn{flags: dynDialedConn, node: newNode(uintID(1), nil)}},
- {rw: &conn{flags: dynDialedConn, node: newNode(uintID(2), nil)}},
- },
- new: []task{
- &discoverTask{want: 3},
- },
- },
- // Dynamic dials are launched when it completes.
- {
- peers: []*Peer{
- {rw: &conn{flags: staticDialedConn, node: newNode(uintID(0), nil)}},
- {rw: &conn{flags: dynDialedConn, node: newNode(uintID(1), nil)}},
- {rw: &conn{flags: dynDialedConn, node: newNode(uintID(2), nil)}},
- },
- done: []task{
- &discoverTask{results: []*enode.Node{
- newNode(uintID(2), nil), // this one is already connected and not dialed.
- newNode(uintID(3), nil),
- newNode(uintID(4), nil),
- newNode(uintID(5), nil),
- newNode(uintID(6), nil), // these are not tried because max dyn dials is 5
- newNode(uintID(7), nil), // ...
- }},
- },
- new: []task{
- &dialTask{flags: dynDialedConn, dest: newNode(uintID(3), nil)},
- &dialTask{flags: dynDialedConn, dest: newNode(uintID(4), nil)},
- &dialTask{flags: dynDialedConn, dest: newNode(uintID(5), nil)},
- },
- },
- // Some of the dials complete but no new ones are launched yet because
- // the sum of active dial count and dynamic peer count is == maxDynDials.
- {
- peers: []*Peer{
- {rw: &conn{flags: staticDialedConn, node: newNode(uintID(0), nil)}},
- {rw: &conn{flags: dynDialedConn, node: newNode(uintID(1), nil)}},
- {rw: &conn{flags: dynDialedConn, node: newNode(uintID(2), nil)}},
- {rw: &conn{flags: dynDialedConn, node: newNode(uintID(3), nil)}},
- {rw: &conn{flags: dynDialedConn, node: newNode(uintID(4), nil)}},
- },
- done: []task{
- &dialTask{flags: dynDialedConn, dest: newNode(uintID(3), nil)},
- &dialTask{flags: dynDialedConn, dest: newNode(uintID(4), nil)},
- },
- },
- // No new dial tasks are launched in the this round because
- // maxDynDials has been reached.
- {
- peers: []*Peer{
- {rw: &conn{flags: staticDialedConn, node: newNode(uintID(0), nil)}},
- {rw: &conn{flags: dynDialedConn, node: newNode(uintID(1), nil)}},
- {rw: &conn{flags: dynDialedConn, node: newNode(uintID(2), nil)}},
- {rw: &conn{flags: dynDialedConn, node: newNode(uintID(3), nil)}},
- {rw: &conn{flags: dynDialedConn, node: newNode(uintID(4), nil)}},
- {rw: &conn{flags: dynDialedConn, node: newNode(uintID(5), nil)}},
- },
- done: []task{
- &dialTask{flags: dynDialedConn, dest: newNode(uintID(5), nil)},
- },
- new: []task{
- &waitExpireTask{Duration: 19 * time.Second},
- },
- },
- // In this round, the peer with id 2 drops off. The query
- // results from last discovery lookup are reused.
- {
- peers: []*Peer{
- {rw: &conn{flags: staticDialedConn, node: newNode(uintID(0), nil)}},
- {rw: &conn{flags: dynDialedConn, node: newNode(uintID(1), nil)}},
- {rw: &conn{flags: dynDialedConn, node: newNode(uintID(3), nil)}},
- {rw: &conn{flags: dynDialedConn, node: newNode(uintID(4), nil)}},
- {rw: &conn{flags: dynDialedConn, node: newNode(uintID(5), nil)}},
- },
- new: []task{
- &dialTask{flags: dynDialedConn, dest: newNode(uintID(6), nil)},
- },
- },
- // More peers (3,4) drop off and dial for ID 6 completes.
- // The last query result from the discovery lookup is reused
- // and a new one is spawned because more candidates are needed.
- {
- peers: []*Peer{
- {rw: &conn{flags: staticDialedConn, node: newNode(uintID(0), nil)}},
- {rw: &conn{flags: dynDialedConn, node: newNode(uintID(1), nil)}},
- {rw: &conn{flags: dynDialedConn, node: newNode(uintID(5), nil)}},
- },
- done: []task{
- &dialTask{flags: dynDialedConn, dest: newNode(uintID(6), nil)},
- },
- new: []task{
- &dialTask{flags: dynDialedConn, dest: newNode(uintID(7), nil)},
- &discoverTask{want: 2},
- },
- },
- // Peer 7 is connected, but there still aren't enough dynamic peers
- // (4 out of 5). However, a discovery is already running, so ensure
- // no new is started.
- {
- peers: []*Peer{
- {rw: &conn{flags: staticDialedConn, node: newNode(uintID(0), nil)}},
- {rw: &conn{flags: dynDialedConn, node: newNode(uintID(1), nil)}},
- {rw: &conn{flags: dynDialedConn, node: newNode(uintID(5), nil)}},
- {rw: &conn{flags: dynDialedConn, node: newNode(uintID(7), nil)}},
- },
- done: []task{
- &dialTask{flags: dynDialedConn, dest: newNode(uintID(7), nil)},
- },
- },
- // Finish the running node discovery with an empty set. A new lookup
- // should be immediately requested.
- {
- peers: []*Peer{
- {rw: &conn{flags: staticDialedConn, node: newNode(uintID(0), nil)}},
- {rw: &conn{flags: dynDialedConn, node: newNode(uintID(1), nil)}},
- {rw: &conn{flags: dynDialedConn, node: newNode(uintID(5), nil)}},
- {rw: &conn{flags: dynDialedConn, node: newNode(uintID(7), nil)}},
- },
- done: []task{
- &discoverTask{},
- },
- new: []task{
- &discoverTask{want: 2},
- },
+ // 3 peers drop off, creating 6 dial slots. Check that 5 of those slots
+ // (i.e. up to maxActiveDialTasks) are used.
+ {
+ peersRemoved: []enode.ID{
+ uintID(0x00),
+ uintID(0x01),
+ uintID(0x02),
+ },
+ discovered: []*enode.Node{
+ newNode(uintID(0x0a), "127.0.0.1:30303"),
+ newNode(uintID(0x0b), "127.0.0.1:30303"),
+ newNode(uintID(0x0c), "127.0.0.1:30303"),
+ newNode(uintID(0x0d), "127.0.0.1:30303"),
+ newNode(uintID(0x0f), "127.0.0.1:30303"),
+ },
+ wantNewDials: []*enode.Node{
+ newNode(uintID(0x06), "127.0.0.1:30303"),
+ newNode(uintID(0x07), "127.0.0.1:30303"),
+ newNode(uintID(0x08), "127.0.0.1:30303"),
+ newNode(uintID(0x09), "127.0.0.1:30303"),
+ newNode(uintID(0x0a), "127.0.0.1:30303"),
},
},
})
}
-// Tests that bootnodes are dialed if no peers are connectd, but not otherwise.
-func TestDialStateDynDialBootnode(t *testing.T) {
- config := &Config{
- BootstrapNodes: []*enode.Node{
- newNode(uintID(1), nil),
- newNode(uintID(2), nil),
- newNode(uintID(3), nil),
- },
- Logger: testlog.Logger(t, log.LvlTrace),
+// This test checks that candidates that do not match the netrestrict list are not dialed.
+func TestDialSchedNetRestrict(t *testing.T) {
+ t.Parallel()
+
+ nodes := []*enode.Node{
+ newNode(uintID(0x01), "127.0.0.1:30303"),
+ newNode(uintID(0x02), "127.0.0.2:30303"),
+ newNode(uintID(0x03), "127.0.0.3:30303"),
+ newNode(uintID(0x04), "127.0.0.4:30303"),
+ newNode(uintID(0x05), "127.0.2.5:30303"),
+ newNode(uintID(0x06), "127.0.2.6:30303"),
+ newNode(uintID(0x07), "127.0.2.7:30303"),
+ newNode(uintID(0x08), "127.0.2.8:30303"),
+ }
+ config := dialConfig{
+ netRestrict: new(netutil.Netlist),
+ maxActiveDials: 10,
+ maxDialPeers: 10,
}
- runDialTest(t, dialtest{
- init: newDialState(enode.ID{}, 5, config),
- rounds: []round{
- {
- new: []task{
- &discoverTask{want: 5},
- },
- },
- {
- done: []task{
- &discoverTask{
- results: []*enode.Node{
- newNode(uintID(4), nil),
- newNode(uintID(5), nil),
- },
- },
- },
- new: []task{
- &dialTask{flags: dynDialedConn, dest: newNode(uintID(4), nil)},
- &dialTask{flags: dynDialedConn, dest: newNode(uintID(5), nil)},
- &discoverTask{want: 3},
- },
- },
- // No dials succeed, bootnodes still pending fallback interval
- {},
- // 1 bootnode attempted as fallback interval was reached
- {
- done: []task{
- &dialTask{flags: dynDialedConn, dest: newNode(uintID(4), nil)},
- &dialTask{flags: dynDialedConn, dest: newNode(uintID(5), nil)},
- },
- new: []task{
- &dialTask{flags: dynDialedConn, dest: newNode(uintID(1), nil)},
- },
- },
- // No dials succeed, 2nd bootnode is attempted
- {
- done: []task{
- &dialTask{flags: dynDialedConn, dest: newNode(uintID(1), nil)},
- },
- new: []task{
- &dialTask{flags: dynDialedConn, dest: newNode(uintID(2), nil)},
- },
- },
- // No dials succeed, 3rd bootnode is attempted
- {
- done: []task{
- &dialTask{flags: dynDialedConn, dest: newNode(uintID(2), nil)},
- },
- new: []task{
- &dialTask{flags: dynDialedConn, dest: newNode(uintID(3), nil)},
- },
- },
- // No dials succeed, 1st bootnode is attempted again, expired random nodes retried
- {
- done: []task{
- &dialTask{flags: dynDialedConn, dest: newNode(uintID(3), nil)},
- &discoverTask{results: []*enode.Node{
- newNode(uintID(6), nil),
- }},
- },
- new: []task{
- &dialTask{flags: dynDialedConn, dest: newNode(uintID(6), nil)},
- &discoverTask{want: 4},
- },
- },
- // Random dial succeeds, no more bootnodes are attempted
- {
- peers: []*Peer{
- {rw: &conn{flags: dynDialedConn, node: newNode(uintID(6), nil)}},
- },
+ config.netRestrict.Add("127.0.2.0/24")
+ runDialTest(t, config, []dialTestRound{
+ {
+ discovered: nodes,
+ wantNewDials: nodes[4:8],
+ },
+ {
+ succeeded: []enode.ID{
+ nodes[4].ID(),
+ nodes[5].ID(),
+ nodes[6].ID(),
+ nodes[7].ID(),
},
},
})
}
-func newNode(id enode.ID, ip net.IP) *enode.Node {
- var r enr.Record
- if ip != nil {
- r.Set(enr.IP(ip))
+// This test checks that static dials work and obey the limits.
+func TestDialSchedStaticDial(t *testing.T) {
+ t.Parallel()
+
+ config := dialConfig{
+ maxActiveDials: 5,
+ maxDialPeers: 4,
}
- return enode.SignNull(&r, id)
+ runDialTest(t, config, []dialTestRound{
+ // Static dials are launched for the nodes that
+ // aren't yet connected.
+ {
+ peersAdded: []*conn{
+ {flags: dynDialedConn, node: newNode(uintID(0x01), "127.0.0.1:30303")},
+ {flags: dynDialedConn, node: newNode(uintID(0x02), "127.0.0.2:30303")},
+ },
+ update: func(d *dialScheduler) {
+ // These two are not dialed because they're already connected
+ // as dynamic peers.
+ d.addStatic(newNode(uintID(0x01), "127.0.0.1:30303"))
+ d.addStatic(newNode(uintID(0x02), "127.0.0.2:30303"))
+ // These nodes will be dialed:
+ d.addStatic(newNode(uintID(0x03), "127.0.0.3:30303"))
+ d.addStatic(newNode(uintID(0x04), "127.0.0.4:30303"))
+ d.addStatic(newNode(uintID(0x05), "127.0.0.5:30303"))
+ d.addStatic(newNode(uintID(0x06), "127.0.0.6:30303"))
+ d.addStatic(newNode(uintID(0x07), "127.0.0.7:30303"))
+ d.addStatic(newNode(uintID(0x08), "127.0.0.8:30303"))
+ d.addStatic(newNode(uintID(0x09), "127.0.0.9:30303"))
+ },
+ wantNewDials: []*enode.Node{
+ newNode(uintID(0x03), "127.0.0.3:30303"),
+ newNode(uintID(0x04), "127.0.0.4:30303"),
+ newNode(uintID(0x05), "127.0.0.5:30303"),
+ newNode(uintID(0x06), "127.0.0.6:30303"),
+ },
+ },
+ // Dial to 0x03 completes, filling a peer slot. One slot remains,
+ // two dials are launched to attempt to fill it.
+ {
+ succeeded: []enode.ID{
+ uintID(0x03),
+ },
+ failed: []enode.ID{
+ uintID(0x04),
+ uintID(0x05),
+ uintID(0x06),
+ },
+ wantResolves: map[enode.ID]*enode.Node{
+ uintID(0x04): nil,
+ uintID(0x05): nil,
+ uintID(0x06): nil,
+ },
+ wantNewDials: []*enode.Node{
+ newNode(uintID(0x08), "127.0.0.8:30303"),
+ newNode(uintID(0x09), "127.0.0.9:30303"),
+ },
+ },
+ // Peer 0x01 drops and 0x07 connects as inbound peer.
+ // Only 0x01 is dialed.
+ {
+ peersAdded: []*conn{
+ {flags: inboundConn, node: newNode(uintID(0x07), "127.0.0.7:30303")},
+ },
+ peersRemoved: []enode.ID{
+ uintID(0x01),
+ },
+ wantNewDials: []*enode.Node{
+ newNode(uintID(0x01), "127.0.0.1:30303"),
+ },
+ },
+ })
}
-// // This test checks that candidates that do not match the netrestrict list are not dialed.
-func TestDialStateNetRestrict(t *testing.T) {
- // This table always returns the same random nodes
- // in the order given below.
- nodes := []*enode.Node{
- newNode(uintID(1), net.ParseIP("127.0.0.1")),
- newNode(uintID(2), net.ParseIP("127.0.0.2")),
- newNode(uintID(3), net.ParseIP("127.0.0.3")),
- newNode(uintID(4), net.ParseIP("127.0.0.4")),
- newNode(uintID(5), net.ParseIP("127.0.2.5")),
- newNode(uintID(6), net.ParseIP("127.0.2.6")),
- newNode(uintID(7), net.ParseIP("127.0.2.7")),
- newNode(uintID(8), net.ParseIP("127.0.2.8")),
+// This test checks that removing static nodes stops connecting to them.
+func TestDialSchedRemoveStatic(t *testing.T) {
+ t.Parallel()
+
+ config := dialConfig{
+ maxActiveDials: 1,
+ maxDialPeers: 1,
}
- restrict := new(netutil.Netlist)
- restrict.Add("127.0.2.0/24")
-
- runDialTest(t, dialtest{
- init: newDialState(enode.ID{}, 10, &Config{NetRestrict: restrict}),
- rounds: []round{
- {
- new: []task{
- &discoverTask{want: 10},
- },
- },
- {
- done: []task{
- &discoverTask{results: nodes},
- },
- new: []task{
- &dialTask{flags: dynDialedConn, dest: nodes[4]},
- &dialTask{flags: dynDialedConn, dest: nodes[5]},
- &dialTask{flags: dynDialedConn, dest: nodes[6]},
- &dialTask{flags: dynDialedConn, dest: nodes[7]},
- &discoverTask{want: 6},
- },
+ runDialTest(t, config, []dialTestRound{
+ // Add static nodes.
+ {
+ update: func(d *dialScheduler) {
+ d.addStatic(newNode(uintID(0x01), "127.0.0.1:30303"))
+ d.addStatic(newNode(uintID(0x02), "127.0.0.2:30303"))
+ d.addStatic(newNode(uintID(0x03), "127.0.0.3:30303"))
+ },
+ wantNewDials: []*enode.Node{
+ newNode(uintID(0x01), "127.0.0.1:30303"),
+ },
+ },
+ // Dial to 0x01 fails.
+ {
+ failed: []enode.ID{
+ uintID(0x01),
+ },
+ wantResolves: map[enode.ID]*enode.Node{
+ uintID(0x01): nil,
+ },
+ wantNewDials: []*enode.Node{
+ newNode(uintID(0x02), "127.0.0.2:30303"),
+ },
+ },
+ // All static nodes are removed. 0x01 is in history, 0x02 is being
+ // dialed, 0x03 is in staticPool.
+ {
+ update: func(d *dialScheduler) {
+ d.removeStatic(newNode(uintID(0x01), "127.0.0.1:30303"))
+ d.removeStatic(newNode(uintID(0x02), "127.0.0.2:30303"))
+ d.removeStatic(newNode(uintID(0x03), "127.0.0.3:30303"))
+ },
+ failed: []enode.ID{
+ uintID(0x02),
+ },
+ wantResolves: map[enode.ID]*enode.Node{
+ uintID(0x02): nil,
},
},
+ // Since all static nodes are removed, they should not be dialed again.
+ {}, {}, {},
})
}
-// This test checks that static dials are launched.
-func TestDialStateStaticDial(t *testing.T) {
- config := &Config{
- StaticNodes: []*enode.Node{
- newNode(uintID(1), nil),
- newNode(uintID(2), nil),
- newNode(uintID(3), nil),
- newNode(uintID(4), nil),
- newNode(uintID(5), nil),
+// This test checks that static dials are selected at random.
+func TestDialSchedManyStaticNodes(t *testing.T) {
+ t.Parallel()
+
+ config := dialConfig{maxDialPeers: 2}
+ runDialTest(t, config, []dialTestRound{
+ {
+ peersAdded: []*conn{
+ {flags: dynDialedConn, node: newNode(uintID(0xFFFE), "")},
+ {flags: dynDialedConn, node: newNode(uintID(0xFFFF), "")},
+ },
+ update: func(d *dialScheduler) {
+ for id := uint16(0); id < 2000; id++ {
+ n := newNode(uintID(id), "127.0.0.1:30303")
+ d.addStatic(n)
+ }
+ },
},
- Logger: testlog.Logger(t, log.LvlTrace),
- }
- runDialTest(t, dialtest{
- init: newDialState(enode.ID{}, 0, config),
- rounds: []round{
- // Static dials are launched for the nodes that
- // aren't yet connected.
- {
- peers: []*Peer{
- {rw: &conn{flags: dynDialedConn, node: newNode(uintID(1), nil)}},
- {rw: &conn{flags: dynDialedConn, node: newNode(uintID(2), nil)}},
- },
- new: []task{
- &dialTask{flags: staticDialedConn, dest: newNode(uintID(3), nil)},
- &dialTask{flags: staticDialedConn, dest: newNode(uintID(4), nil)},
- &dialTask{flags: staticDialedConn, dest: newNode(uintID(5), nil)},
- },
- },
- // No new tasks are launched in this round because all static
- // nodes are either connected or still being dialed.
- {
- peers: []*Peer{
- {rw: &conn{flags: dynDialedConn, node: newNode(uintID(1), nil)}},
- {rw: &conn{flags: dynDialedConn, node: newNode(uintID(2), nil)}},
- {rw: &conn{flags: staticDialedConn, node: newNode(uintID(3), nil)}},
- },
- done: []task{
- &dialTask{flags: staticDialedConn, dest: newNode(uintID(3), nil)},
- },
- },
- // No new dial tasks are launched because all static
- // nodes are now connected.
- {
- peers: []*Peer{
- {rw: &conn{flags: dynDialedConn, node: newNode(uintID(1), nil)}},
- {rw: &conn{flags: dynDialedConn, node: newNode(uintID(2), nil)}},
- {rw: &conn{flags: staticDialedConn, node: newNode(uintID(3), nil)}},
- {rw: &conn{flags: staticDialedConn, node: newNode(uintID(4), nil)}},
- {rw: &conn{flags: staticDialedConn, node: newNode(uintID(5), nil)}},
- },
- done: []task{
- &dialTask{flags: staticDialedConn, dest: newNode(uintID(4), nil)},
- &dialTask{flags: staticDialedConn, dest: newNode(uintID(5), nil)},
- },
- new: []task{
- &waitExpireTask{Duration: 19 * time.Second},
- },
- },
- // Wait a round for dial history to expire, no new tasks should spawn.
- {
- peers: []*Peer{
- {rw: &conn{flags: dynDialedConn, node: newNode(uintID(1), nil)}},
- {rw: &conn{flags: dynDialedConn, node: newNode(uintID(2), nil)}},
- {rw: &conn{flags: staticDialedConn, node: newNode(uintID(3), nil)}},
- {rw: &conn{flags: staticDialedConn, node: newNode(uintID(4), nil)}},
- {rw: &conn{flags: staticDialedConn, node: newNode(uintID(5), nil)}},
- },
- },
- // If a static node is dropped, it should be immediately redialed,
- // irrespective whether it was originally static or dynamic.
- {
- done: []task{
- &waitExpireTask{Duration: 19 * time.Second},
- },
- peers: []*Peer{
- {rw: &conn{flags: dynDialedConn, node: newNode(uintID(1), nil)}},
- {rw: &conn{flags: staticDialedConn, node: newNode(uintID(3), nil)}},
- {rw: &conn{flags: staticDialedConn, node: newNode(uintID(5), nil)}},
- },
- new: []task{
- &dialTask{flags: staticDialedConn, dest: newNode(uintID(2), nil)},
- },
+ {
+ peersRemoved: []enode.ID{
+ uintID(0xFFFE),
+ uintID(0xFFFF),
+ },
+ wantNewDials: []*enode.Node{
+ newNode(uintID(0x0085), "127.0.0.1:30303"),
+ newNode(uintID(0x02dc), "127.0.0.1:30303"),
+ newNode(uintID(0x0285), "127.0.0.1:30303"),
+ newNode(uintID(0x00cb), "127.0.0.1:30303"),
},
},
})
}
// This test checks that past dials are not retried for some time.
-func TestDialStateCache(t *testing.T) {
- config := &Config{
- StaticNodes: []*enode.Node{
- newNode(uintID(1), nil),
- newNode(uintID(2), nil),
- newNode(uintID(3), nil),
+func TestDialSchedHistory(t *testing.T) {
+ t.Parallel()
+
+ config := dialConfig{
+ maxActiveDials: 3,
+ maxDialPeers: 3,
+ }
+ runDialTest(t, config, []dialTestRound{
+ {
+ update: func(d *dialScheduler) {
+ d.addStatic(newNode(uintID(0x01), "127.0.0.1:30303"))
+ d.addStatic(newNode(uintID(0x02), "127.0.0.2:30303"))
+ d.addStatic(newNode(uintID(0x03), "127.0.0.3:30303"))
+ },
+ wantNewDials: []*enode.Node{
+ newNode(uintID(0x01), "127.0.0.1:30303"),
+ newNode(uintID(0x02), "127.0.0.2:30303"),
+ newNode(uintID(0x03), "127.0.0.3:30303"),
+ },
},
- Logger: testlog.Logger(t, log.LvlTrace),
+ // No new tasks are launched in this round because all static
+ // nodes are either connected or still being dialed.
+ {
+ succeeded: []enode.ID{
+ uintID(0x01),
+ uintID(0x02),
+ },
+ failed: []enode.ID{
+ uintID(0x03),
+ },
+ wantResolves: map[enode.ID]*enode.Node{
+ uintID(0x03): nil,
+ },
+ },
+ // Nothing happens in this round because we're waiting for
+ // node 0x3's history entry to expire.
+ {},
+ // The cache entry for node 0x03 has expired and is retried.
+ {
+ wantNewDials: []*enode.Node{
+ newNode(uintID(0x03), "127.0.0.3:30303"),
+ },
+ },
+ })
+}
+
+func TestDialSchedResolve(t *testing.T) {
+ t.Parallel()
+
+ config := dialConfig{
+ maxActiveDials: 1,
+ maxDialPeers: 1,
}
- runDialTest(t, dialtest{
- init: newDialState(enode.ID{}, 0, config),
- rounds: []round{
- // Static dials are launched for the nodes that
- // aren't yet connected.
- {
- peers: nil,
- new: []task{
- &dialTask{flags: staticDialedConn, dest: newNode(uintID(1), nil)},
- &dialTask{flags: staticDialedConn, dest: newNode(uintID(2), nil)},
- &dialTask{flags: staticDialedConn, dest: newNode(uintID(3), nil)},
- },
- },
- // No new tasks are launched in this round because all static
- // nodes are either connected or still being dialed.
- {
- peers: []*Peer{
- {rw: &conn{flags: staticDialedConn, node: newNode(uintID(1), nil)}},
- {rw: &conn{flags: staticDialedConn, node: newNode(uintID(2), nil)}},
- },
- done: []task{
- &dialTask{flags: staticDialedConn, dest: newNode(uintID(1), nil)},
- &dialTask{flags: staticDialedConn, dest: newNode(uintID(2), nil)},
- },
- },
- // A salvage task is launched to wait for node 3's history
- // entry to expire.
- {
- peers: []*Peer{
- {rw: &conn{flags: staticDialedConn, node: newNode(uintID(1), nil)}},
- {rw: &conn{flags: staticDialedConn, node: newNode(uintID(2), nil)}},
- },
- done: []task{
- &dialTask{flags: staticDialedConn, dest: newNode(uintID(3), nil)},
- },
- new: []task{
- &waitExpireTask{Duration: 19 * time.Second},
- },
- },
- // Still waiting for node 3's entry to expire in the cache.
- {
- peers: []*Peer{
- {rw: &conn{flags: staticDialedConn, node: newNode(uintID(1), nil)}},
- {rw: &conn{flags: staticDialedConn, node: newNode(uintID(2), nil)}},
- },
- },
- {
- peers: []*Peer{
- {rw: &conn{flags: staticDialedConn, node: newNode(uintID(1), nil)}},
- {rw: &conn{flags: staticDialedConn, node: newNode(uintID(2), nil)}},
- },
- },
- // The cache entry for node 3 has expired and is retried.
- {
- done: []task{
- &waitExpireTask{Duration: 19 * time.Second},
- },
- peers: []*Peer{
- {rw: &conn{flags: staticDialedConn, node: newNode(uintID(1), nil)}},
- {rw: &conn{flags: staticDialedConn, node: newNode(uintID(2), nil)}},
- },
- new: []task{
- &dialTask{flags: staticDialedConn, dest: newNode(uintID(3), nil)},
- },
+ node := newNode(uintID(0x01), "")
+ resolved := newNode(uintID(0x01), "127.0.0.1:30303")
+ resolved2 := newNode(uintID(0x01), "127.0.0.55:30303")
+ runDialTest(t, config, []dialTestRound{
+ {
+ update: func(d *dialScheduler) {
+ d.addStatic(node)
+ },
+ wantResolves: map[enode.ID]*enode.Node{
+ uintID(0x01): resolved,
+ },
+ wantNewDials: []*enode.Node{
+ resolved,
+ },
+ },
+ {
+ failed: []enode.ID{
+ uintID(0x01),
+ },
+ wantResolves: map[enode.ID]*enode.Node{
+ uintID(0x01): resolved2,
+ },
+ wantNewDials: []*enode.Node{
+ resolved2,
},
},
})
}
-func TestDialResolve(t *testing.T) {
- config := &Config{
- Logger: testlog.Logger(t, log.LvlTrace),
- Dialer: TCPDialer{&net.Dialer{Deadline: time.Now().Add(-5 * time.Minute)}},
+// -------
+// Code below here is the framework for the tests above.
+
+type dialTestRound struct {
+ peersAdded []*conn
+ peersRemoved []enode.ID
+ update func(*dialScheduler) // called at beginning of round
+ discovered []*enode.Node // newly discovered nodes
+ succeeded []enode.ID // dials which succeed this round
+ failed []enode.ID // dials which fail this round
+ wantResolves map[enode.ID]*enode.Node
+ wantNewDials []*enode.Node // dials that should be launched in this round
+}
+
+func runDialTest(t *testing.T, config dialConfig, rounds []dialTestRound) {
+ var (
+ clock = new(mclock.Simulated)
+ iterator = newDialTestIterator()
+ dialer = newDialTestDialer()
+ resolver = new(dialTestResolver)
+ peers = make(map[enode.ID]*conn)
+ setupCh = make(chan *conn)
+ )
+
+ // Override config.
+ config.clock = clock
+ config.dialer = dialer
+ config.resolver = resolver
+ config.log = testlog.Logger(t, log.LvlTrace)
+ config.rand = rand.New(rand.NewSource(0x1111))
+
+ // Set up the dialer. The setup function below runs on the dialTask
+ // goroutine and adds the peer.
+ var dialsched *dialScheduler
+ setup := func(fd net.Conn, f connFlag, node *enode.Node) error {
+ conn := &conn{flags: f, node: node}
+ dialsched.peerAdded(conn)
+ setupCh <- conn
+ return nil
}
- resolved := newNode(uintID(1), net.IP{127, 0, 55, 234})
- resolver := &resolveMock{answer: resolved}
- state := newDialState(enode.ID{}, 0, config)
-
- // Check that the task is generated with an incomplete ID.
- dest := newNode(uintID(1), nil)
- state.addStatic(dest)
- tasks := state.newTasks(0, nil, time.Time{})
- if !reflect.DeepEqual(tasks, []task{&dialTask{flags: staticDialedConn, dest: dest}}) {
- t.Fatalf("expected dial task, got %#v", tasks)
+ dialsched = newDialScheduler(config, iterator, setup)
+ defer dialsched.stop()
+
+ for i, round := range rounds {
+ // Apply peer set updates.
+ for _, c := range round.peersAdded {
+ if peers[c.node.ID()] != nil {
+ t.Fatalf("round %d: peer %v already connected", i, c.node.ID())
+ }
+ dialsched.peerAdded(c)
+ peers[c.node.ID()] = c
+ }
+ for _, id := range round.peersRemoved {
+ c := peers[id]
+ if c == nil {
+ t.Fatalf("round %d: can't remove non-existent peer %v", i, id)
+ }
+ dialsched.peerRemoved(c)
+ }
+
+ // Init round.
+ t.Logf("round %d (%d peers)", i, len(peers))
+ resolver.setAnswers(round.wantResolves)
+ if round.update != nil {
+ round.update(dialsched)
+ }
+ iterator.addNodes(round.discovered)
+
+ // Unblock dialTask goroutines.
+ if err := dialer.completeDials(round.succeeded, nil); err != nil {
+ t.Fatalf("round %d: %v", i, err)
+ }
+ for range round.succeeded {
+ conn := <-setupCh
+ peers[conn.node.ID()] = conn
+ }
+ if err := dialer.completeDials(round.failed, errors.New("oops")); err != nil {
+ t.Fatalf("round %d: %v", i, err)
+ }
+
+ // Wait for new tasks.
+ if err := dialer.waitForDials(round.wantNewDials); err != nil {
+ t.Fatalf("round %d: %v", i, err)
+ }
+ if !resolver.checkCalls() {
+ t.Fatalf("unexpected calls to Resolve: %v", resolver.calls)
+ }
+
+ clock.Run(16 * time.Second)
}
+}
- // Now run the task, it should resolve the ID once.
- srv := &Server{
- Config: *config,
- log: config.Logger,
- staticNodeResolver: resolver,
+// dialTestIterator is the input iterator for dialer tests. This works a bit like a channel
+// with infinite buffer: nodes are added to the buffer with addNodes, which unblocks Next
+// and returns them from the iterator.
+type dialTestIterator struct {
+ cur *enode.Node
+
+ mu sync.Mutex
+ buf []*enode.Node
+ cond *sync.Cond
+ closed bool
+}
+
+func newDialTestIterator() *dialTestIterator {
+ it := &dialTestIterator{}
+ it.cond = sync.NewCond(&it.mu)
+ return it
+}
+
+// addNodes adds nodes to the iterator buffer and unblocks Next.
+func (it *dialTestIterator) addNodes(nodes []*enode.Node) {
+ it.mu.Lock()
+ defer it.mu.Unlock()
+
+ it.buf = append(it.buf, nodes...)
+ it.cond.Signal()
+}
+
+// Node returns the current node.
+func (it *dialTestIterator) Node() *enode.Node {
+ return it.cur
+}
+
+// Next moves to the next node.
+func (it *dialTestIterator) Next() bool {
+ it.mu.Lock()
+ defer it.mu.Unlock()
+
+ it.cur = nil
+ for len(it.buf) == 0 && !it.closed {
+ it.cond.Wait()
}
- tasks[0].Do(srv)
- if !reflect.DeepEqual(resolver.calls, []*enode.Node{dest}) {
- t.Fatalf("wrong resolve calls, got %v", resolver.calls)
+ if it.closed {
+ return false
}
+ it.cur = it.buf[0]
+ copy(it.buf[:], it.buf[1:])
+ it.buf = it.buf[:len(it.buf)-1]
+ return true
+}
+
+// Close ends the iterator, unblocking Next.
+func (it *dialTestIterator) Close() {
+ it.mu.Lock()
+ defer it.mu.Unlock()
+
+ it.closed = true
+ it.buf = nil
+ it.cond.Signal()
+}
+
+// dialTestDialer is the NodeDialer used by runDialTest.
+type dialTestDialer struct {
+ init chan *dialTestReq
+ blocked map[enode.ID]*dialTestReq
+}
- // Report it as done to the dialer, which should update the static node record.
- state.taskDone(tasks[0], time.Now())
- if state.static[uintID(1)].dest != resolved {
- t.Fatalf("state.dest not updated")
+type dialTestReq struct {
+ n *enode.Node
+ unblock chan error
+}
+
+func newDialTestDialer() *dialTestDialer {
+ return &dialTestDialer{
+ init: make(chan *dialTestReq),
+ blocked: make(map[enode.ID]*dialTestReq),
}
}
-// compares task lists but doesn't care about the order.
-func sametasks(a, b []task) bool {
- if len(a) != len(b) {
- return false
+// Dial implements NodeDialer.
+func (d *dialTestDialer) Dial(ctx context.Context, n *enode.Node) (net.Conn, error) {
+ req := &dialTestReq{n: n, unblock: make(chan error, 1)}
+ select {
+ case d.init <- req:
+ select {
+ case err := <-req.unblock:
+ pipe, _ := net.Pipe()
+ return pipe, err
+ case <-ctx.Done():
+ return nil, ctx.Err()
+ }
+ case <-ctx.Done():
+ return nil, ctx.Err()
}
-next:
- for _, ta := range a {
- for _, tb := range b {
- if reflect.DeepEqual(ta, tb) {
- continue next
+}
+
+// waitForDials waits for calls to Dial with the given nodes as argument.
+// Those calls will be held blocking until completeDials is called with the same nodes.
+func (d *dialTestDialer) waitForDials(nodes []*enode.Node) error {
+ waitset := make(map[enode.ID]*enode.Node)
+ for _, n := range nodes {
+ waitset[n.ID()] = n
+ }
+ timeout := time.NewTimer(1 * time.Second)
+ defer timeout.Stop()
+
+ for len(waitset) > 0 {
+ select {
+ case req := <-d.init:
+ want, ok := waitset[req.n.ID()]
+ if !ok {
+ return fmt.Errorf("attempt to dial unexpected node %v", req.n.ID())
+ }
+ if !reflect.DeepEqual(req.n, want) {
+ return fmt.Errorf("ENR of dialed node %v does not match test", req.n.ID())
}
+ delete(waitset, req.n.ID())
+ d.blocked[req.n.ID()] = req
+ case <-timeout.C:
+ var waitlist []enode.ID
+ for id := range waitset {
+ waitlist = append(waitlist, id)
+ }
+ return fmt.Errorf("timed out waiting for dials to %v", waitlist)
}
- return false
}
- return true
+
+ return d.checkUnexpectedDial()
+}
+
+func (d *dialTestDialer) checkUnexpectedDial() error {
+ select {
+ case req := <-d.init:
+ return fmt.Errorf("attempt to dial unexpected node %v", req.n.ID())
+ case <-time.After(150 * time.Millisecond):
+ return nil
+ }
}
-func uintID(i uint32) enode.ID {
- var id enode.ID
- binary.BigEndian.PutUint32(id[:], i)
- return id
+// completeDials unblocks calls to Dial for the given nodes.
+func (d *dialTestDialer) completeDials(ids []enode.ID, err error) error {
+ for _, id := range ids {
+ req := d.blocked[id]
+ if req == nil {
+ return fmt.Errorf("can't complete dial to %v", id)
+ }
+ req.unblock <- err
+ }
+ return nil
}
-// for TestDialResolve
-type resolveMock struct {
- calls []*enode.Node
- answer *enode.Node
+// dialTestResolver tracks calls to resolve.
+type dialTestResolver struct {
+ mu sync.Mutex
+ calls []enode.ID
+ answers map[enode.ID]*enode.Node
}
-func (t *resolveMock) Resolve(n *enode.Node) *enode.Node {
- t.calls = append(t.calls, n)
- return t.answer
+func (t *dialTestResolver) setAnswers(m map[enode.ID]*enode.Node) {
+ t.mu.Lock()
+ defer t.mu.Unlock()
+
+ t.answers = m
+ t.calls = nil
+}
+
+func (t *dialTestResolver) checkCalls() bool {
+ t.mu.Lock()
+ defer t.mu.Unlock()
+
+ for _, id := range t.calls {
+ if _, ok := t.answers[id]; !ok {
+ return false
+ }
+ }
+ return true
+}
+
+func (t *dialTestResolver) Resolve(n *enode.Node) *enode.Node {
+ t.mu.Lock()
+ defer t.mu.Unlock()
+
+ t.calls = append(t.calls, n.ID())
+ return t.answers[n.ID()]
}
diff --git a/p2p/dnsdisc/client.go b/p2p/dnsdisc/client.go
index 677c0aa922..a29f82cd81 100644
--- a/p2p/dnsdisc/client.go
+++ b/p2p/dnsdisc/client.go
@@ -23,6 +23,7 @@ import (
"math/rand"
"net"
"strings"
+ "sync"
"time"
"github.com/ethereum/go-ethereum/common/mclock"
@@ -31,15 +32,13 @@ import (
"github.com/ethereum/go-ethereum/p2p/enode"
"github.com/ethereum/go-ethereum/p2p/enr"
lru "github.com/hashicorp/golang-lru"
+ "golang.org/x/time/rate"
)
// Client discovers nodes by querying DNS servers.
type Client struct {
- cfg Config
- clock mclock.Clock
- linkCache linkCache
- trees map[string]*clientTree
-
+ cfg Config
+ clock mclock.Clock
entries *lru.Cache
}
@@ -48,6 +47,7 @@ type Config struct {
Timeout time.Duration // timeout used for DNS lookups (default 5s)
RecheckInterval time.Duration // time between tree root update checks (default 30min)
CacheLimit int // maximum number of cached records (default 1000)
+ RateLimit float64 // maximum DNS requests / second (default 3)
ValidSchemes enr.IdentityScheme // acceptable ENR identity schemes (default enode.ValidSchemes)
Resolver Resolver // the DNS resolver to use (defaults to system DNS)
Logger log.Logger // destination of client log messages (defaults to root logger)
@@ -60,9 +60,10 @@ type Resolver interface {
func (cfg Config) withDefaults() Config {
const (
- defaultTimeout = 5 * time.Second
- defaultRecheck = 30 * time.Minute
- defaultCache = 1000
+ defaultTimeout = 5 * time.Second
+ defaultRecheck = 30 * time.Minute
+ defaultRateLimit = 3
+ defaultCache = 1000
)
if cfg.Timeout == 0 {
cfg.Timeout = defaultTimeout
@@ -73,6 +74,9 @@ func (cfg Config) withDefaults() Config {
if cfg.CacheLimit == 0 {
cfg.CacheLimit = defaultCache
}
+ if cfg.RateLimit == 0 {
+ cfg.RateLimit = defaultRateLimit
+ }
if cfg.ValidSchemes == nil {
cfg.ValidSchemes = enode.ValidSchemes
}
@@ -86,32 +90,24 @@ func (cfg Config) withDefaults() Config {
}
// NewClient creates a client.
-func NewClient(cfg Config, urls ...string) (*Client, error) {
- c := &Client{
- cfg: cfg.withDefaults(),
- clock: mclock.System{},
- trees: make(map[string]*clientTree),
- }
- var err error
- if c.entries, err = lru.New(c.cfg.CacheLimit); err != nil {
- return nil, err
- }
- for _, url := range urls {
- if err := c.AddTree(url); err != nil {
- return nil, err
- }
+func NewClient(cfg Config) *Client {
+ cfg = cfg.withDefaults()
+ cache, err := lru.New(cfg.CacheLimit)
+ if err != nil {
+ panic(err)
}
- return c, nil
+ rlimit := rate.NewLimiter(rate.Limit(cfg.RateLimit), 10)
+ cfg.Resolver = &rateLimitResolver{cfg.Resolver, rlimit}
+ return &Client{cfg: cfg, entries: cache, clock: mclock.System{}}
}
-// SyncTree downloads the entire node tree at the given URL. This doesn't add the tree for
-// later use, but any previously-synced entries are reused.
+// SyncTree downloads the entire node tree at the given URL.
func (c *Client) SyncTree(url string) (*Tree, error) {
le, err := parseLink(url)
if err != nil {
return nil, fmt.Errorf("invalid enrtree URL: %v", err)
}
- ct := newClientTree(c, le)
+ ct := newClientTree(c, new(linkCache), le)
t := &Tree{entries: make(map[string]entry)}
if err := ct.syncAll(t.entries); err != nil {
return nil, err
@@ -120,75 +116,16 @@ func (c *Client) SyncTree(url string) (*Tree, error) {
return t, nil
}
-// AddTree adds a enrtree:// URL to crawl.
-func (c *Client) AddTree(url string) error {
- le, err := parseLink(url)
- if err != nil {
- return fmt.Errorf("invalid enrtree URL: %v", err)
- }
- ct, err := c.ensureTree(le)
- if err != nil {
- return err
- }
- c.linkCache.add(ct)
- return nil
-}
-
-func (c *Client) ensureTree(le *linkEntry) (*clientTree, error) {
- if tree, ok := c.trees[le.domain]; ok {
- if !tree.matchPubkey(le.pubkey) {
- return nil, fmt.Errorf("conflicting public keys for domain %q", le.domain)
- }
- return tree, nil
- }
- ct := newClientTree(c, le)
- c.trees[le.domain] = ct
- return ct, nil
-}
-
-// RandomNode retrieves the next random node.
-func (c *Client) RandomNode(ctx context.Context) *enode.Node {
- for {
- ct := c.randomTree()
- if ct == nil {
- return nil
- }
- n, err := ct.syncRandom(ctx)
- if err != nil {
- if err == ctx.Err() {
- return nil // context canceled.
- }
- c.cfg.Logger.Debug("Error in DNS random node sync", "tree", ct.loc.domain, "err", err)
- continue
- }
- if n != nil {
- return n
- }
- }
-}
-
-// randomTree returns a random tree.
-func (c *Client) randomTree() *clientTree {
- if !c.linkCache.valid() {
- c.gcTrees()
- }
- limit := rand.Intn(len(c.trees))
- for _, ct := range c.trees {
- if limit == 0 {
- return ct
+// NewIterator creates an iterator that visits all nodes at the
+// given tree URLs.
+func (c *Client) NewIterator(urls ...string) (enode.Iterator, error) {
+ it := c.newRandomIterator()
+ for _, url := range urls {
+ if err := it.addTree(url); err != nil {
+ return nil, err
}
- limit--
- }
- return nil
-}
-
-// gcTrees rebuilds the 'trees' map.
-func (c *Client) gcTrees() {
- trees := make(map[string]*clientTree)
- for t := range c.linkCache.all() {
- trees[t.loc.domain] = t
}
- c.trees = trees
+ return it, nil
}
// resolveRoot retrieves a root entry via DNS.
@@ -258,3 +195,128 @@ func (c *Client) doResolveEntry(ctx context.Context, domain, hash string) (entry
}
return nil, nameError{name, errNoEntry}
}
+
+// rateLimitResolver applies a rate limit to a Resolver.
+type rateLimitResolver struct {
+ r Resolver
+ limiter *rate.Limiter
+}
+
+func (r *rateLimitResolver) LookupTXT(ctx context.Context, domain string) ([]string, error) {
+ if err := r.limiter.Wait(ctx); err != nil {
+ return nil, err
+ }
+ return r.r.LookupTXT(ctx, domain)
+}
+
+// randomIterator traverses a set of trees and returns nodes found in them.
+type randomIterator struct {
+ cur *enode.Node
+ ctx context.Context
+ cancelFn context.CancelFunc
+ c *Client
+
+ mu sync.Mutex
+ trees map[string]*clientTree // all trees
+ lc linkCache // tracks tree dependencies
+}
+
+func (c *Client) newRandomIterator() *randomIterator {
+ ctx, cancel := context.WithCancel(context.Background())
+ return &randomIterator{
+ c: c,
+ ctx: ctx,
+ cancelFn: cancel,
+ trees: make(map[string]*clientTree),
+ }
+}
+
+// Node returns the current node.
+func (it *randomIterator) Node() *enode.Node {
+ return it.cur
+}
+
+// Close closes the iterator.
+func (it *randomIterator) Close() {
+ it.mu.Lock()
+ defer it.mu.Unlock()
+
+ it.cancelFn()
+ it.trees = nil
+}
+
+// Next moves the iterator to the next node.
+func (it *randomIterator) Next() bool {
+ it.cur = it.nextNode()
+ return it.cur != nil
+}
+
+// addTree adds a enrtree:// URL to the iterator.
+func (it *randomIterator) addTree(url string) error {
+ le, err := parseLink(url)
+ if err != nil {
+ return fmt.Errorf("invalid enrtree URL: %v", err)
+ }
+ it.lc.addLink("", le.str)
+ return nil
+}
+
+// nextNode syncs random tree entries until it finds a node.
+func (it *randomIterator) nextNode() *enode.Node {
+ for {
+ ct := it.nextTree()
+ if ct == nil {
+ return nil
+ }
+ n, err := ct.syncRandom(it.ctx)
+ if err != nil {
+ if err == it.ctx.Err() {
+ return nil // context canceled.
+ }
+ it.c.cfg.Logger.Debug("Error in DNS random node sync", "tree", ct.loc.domain, "err", err)
+ continue
+ }
+ if n != nil {
+ return n
+ }
+ }
+}
+
+// nextTree returns a random tree.
+func (it *randomIterator) nextTree() *clientTree {
+ it.mu.Lock()
+ defer it.mu.Unlock()
+
+ if it.lc.changed {
+ it.rebuildTrees()
+ it.lc.changed = false
+ }
+ if len(it.trees) == 0 {
+ return nil
+ }
+ limit := rand.Intn(len(it.trees))
+ for _, ct := range it.trees {
+ if limit == 0 {
+ return ct
+ }
+ limit--
+ }
+ return nil
+}
+
+// rebuildTrees rebuilds the 'trees' map.
+func (it *randomIterator) rebuildTrees() {
+ // Delete removed trees.
+ for loc := range it.trees {
+ if !it.lc.isReferenced(loc) {
+ delete(it.trees, loc)
+ }
+ }
+ // Add new trees.
+ for loc := range it.lc.backrefs {
+ if it.trees[loc] == nil {
+ link, _ := parseLink(linkPrefix + loc)
+ it.trees[loc] = newClientTree(it.c, &it.lc, link)
+ }
+ }
+}
diff --git a/p2p/dnsdisc/client_test.go b/p2p/dnsdisc/client_test.go
index d8e3ecee39..6a6705abf2 100644
--- a/p2p/dnsdisc/client_test.go
+++ b/p2p/dnsdisc/client_test.go
@@ -19,6 +19,7 @@ package dnsdisc
import (
"context"
"crypto/ecdsa"
+ "errors"
"math/rand"
"reflect"
"testing"
@@ -54,7 +55,7 @@ func TestClientSyncTree(t *testing.T) {
wantSeq = uint(1)
)
- c, _ := NewClient(Config{Resolver: r, Logger: testlog.Logger(t, log.LvlTrace)})
+ c := NewClient(Config{Resolver: r, Logger: testlog.Logger(t, log.LvlTrace)})
stree, err := c.SyncTree("enrtree://AKPYQIUQIL7PSIACI32J7FGZW56E5FKHEFCCOFHILBIMW3M6LWXS2@n")
if err != nil {
t.Fatal("sync error:", err)
@@ -68,9 +69,6 @@ func TestClientSyncTree(t *testing.T) {
if stree.Seq() != wantSeq {
t.Errorf("synced tree has wrong seq: %d", stree.Seq())
}
- if len(c.trees) > 0 {
- t.Errorf("tree from SyncTree added to client")
- }
}
// In this test, syncing the tree fails because it contains an invalid ENR entry.
@@ -91,7 +89,7 @@ func TestClientSyncTreeBadNode(t *testing.T) {
"C7HRFPF3BLGF3YR4DY5KX3SMBE.n": "enrtree://AM5FCQLWIZX2QFPNJAP7VUERCCRNGRHWZG3YYHIUV7BVDQ5FDPRT2@morenodes.example.org",
"INDMVBZEEQ4ESVYAKGIYU74EAA.n": "enr:-----",
}
- c, _ := NewClient(Config{Resolver: r, Logger: testlog.Logger(t, log.LvlTrace)})
+ c := NewClient(Config{Resolver: r, Logger: testlog.Logger(t, log.LvlTrace)})
_, err := c.SyncTree("enrtree://AKPYQIUQIL7PSIACI32J7FGZW56E5FKHEFCCOFHILBIMW3M6LWXS2@n")
wantErr := nameError{name: "INDMVBZEEQ4ESVYAKGIYU74EAA.n", err: entryError{typ: "enr", err: errInvalidENR}}
if err != wantErr {
@@ -99,59 +97,142 @@ func TestClientSyncTreeBadNode(t *testing.T) {
}
}
-// This test checks that RandomNode hits all entries.
-func TestClientRandomNode(t *testing.T) {
+// This test checks that randomIterator finds all entries.
+func TestIterator(t *testing.T) {
nodes := testNodes(nodesSeed1, 30)
tree, url := makeTestTree("n", nodes, nil)
r := mapResolver(tree.ToTXT("n"))
- c, _ := NewClient(Config{Resolver: r, Logger: testlog.Logger(t, log.LvlTrace)})
- if err := c.AddTree(url); err != nil {
+ c := NewClient(Config{
+ Resolver: r,
+ Logger: testlog.Logger(t, log.LvlTrace),
+ RateLimit: 500,
+ })
+ it, err := c.NewIterator(url)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ checkIterator(t, it, nodes)
+}
+
+// This test checks if closing randomIterator races.
+func TestIteratorClose(t *testing.T) {
+ nodes := testNodes(nodesSeed1, 500)
+ tree1, url1 := makeTestTree("t1", nodes, nil)
+ c := NewClient(Config{Resolver: newMapResolver(tree1.ToTXT("t1"))})
+ it, err := c.NewIterator(url1)
+ if err != nil {
t.Fatal(err)
}
- checkRandomNode(t, c, nodes)
+ done := make(chan struct{})
+ go func() {
+ for it.Next() {
+ _ = it.Node()
+ }
+ close(done)
+ }()
+
+ time.Sleep(50 * time.Millisecond)
+ it.Close()
+ <-done
}
-// This test checks that RandomNode traverses linked trees as well as explicitly added trees.
-func TestClientRandomNodeLinks(t *testing.T) {
+// This test checks that randomIterator traverses linked trees as well as explicitly added trees.
+func TestIteratorLinks(t *testing.T) {
nodes := testNodes(nodesSeed1, 40)
tree1, url1 := makeTestTree("t1", nodes[:10], nil)
tree2, url2 := makeTestTree("t2", nodes[10:], []string{url1})
- cfg := Config{
- Resolver: newMapResolver(tree1.ToTXT("t1"), tree2.ToTXT("t2")),
- Logger: testlog.Logger(t, log.LvlTrace),
- }
- c, _ := NewClient(cfg)
- if err := c.AddTree(url2); err != nil {
+ c := NewClient(Config{
+ Resolver: newMapResolver(tree1.ToTXT("t1"), tree2.ToTXT("t2")),
+ Logger: testlog.Logger(t, log.LvlTrace),
+ RateLimit: 500,
+ })
+ it, err := c.NewIterator(url2)
+ if err != nil {
t.Fatal(err)
}
- checkRandomNode(t, c, nodes)
+ checkIterator(t, it, nodes)
}
-// This test verifies that RandomNode re-checks the root of the tree to catch
+// This test verifies that randomIterator re-checks the root of the tree to catch
// updates to nodes.
-func TestClientRandomNodeUpdates(t *testing.T) {
+func TestIteratorNodeUpdates(t *testing.T) {
var (
clock = new(mclock.Simulated)
nodes = testNodes(nodesSeed1, 30)
resolver = newMapResolver()
- cfg = Config{
+ c = NewClient(Config{
Resolver: resolver,
Logger: testlog.Logger(t, log.LvlTrace),
RecheckInterval: 20 * time.Minute,
- }
- c, _ = NewClient(cfg)
+ RateLimit: 500,
+ })
+ )
+ c.clock = clock
+ tree1, url := makeTestTree("n", nodes[:25], nil)
+ it, err := c.NewIterator(url)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Sync the original tree.
+ resolver.add(tree1.ToTXT("n"))
+ checkIterator(t, it, nodes[:25])
+
+ // Ensure RandomNode returns the new nodes after the tree is updated.
+ updateSomeNodes(nodesSeed1, nodes)
+ tree2, _ := makeTestTree("n", nodes, nil)
+ resolver.clear()
+ resolver.add(tree2.ToTXT("n"))
+ t.Log("tree updated")
+
+ clock.Run(c.cfg.RecheckInterval + 1*time.Second)
+ checkIterator(t, it, nodes)
+}
+
+// This test checks that the tree root is rechecked when a couple of leaf
+// requests have failed. The test is just like TestIteratorNodeUpdates, but
+// without advancing the clock by recheckInterval after the tree update.
+func TestIteratorRootRecheckOnFail(t *testing.T) {
+ var (
+ clock = new(mclock.Simulated)
+ nodes = testNodes(nodesSeed1, 30)
+ resolver = newMapResolver()
+ c = NewClient(Config{
+ Resolver: resolver,
+ Logger: testlog.Logger(t, log.LvlTrace),
+ RecheckInterval: 20 * time.Minute,
+ RateLimit: 500,
+ // Disabling the cache is required for this test because the client doesn't
+ // notice leaf failures if all records are cached.
+ CacheLimit: 1,
+ })
)
c.clock = clock
tree1, url := makeTestTree("n", nodes[:25], nil)
+ it, err := c.NewIterator(url)
+ if err != nil {
+ t.Fatal(err)
+ }
// Sync the original tree.
resolver.add(tree1.ToTXT("n"))
- c.AddTree(url)
- checkRandomNode(t, c, nodes[:25])
+ checkIterator(t, it, nodes[:25])
- // Update some nodes and ensure RandomNode returns the new nodes as well.
+ // Ensure RandomNode returns the new nodes after the tree is updated.
+ updateSomeNodes(nodesSeed1, nodes)
+ tree2, _ := makeTestTree("n", nodes, nil)
+ resolver.clear()
+ resolver.add(tree2.ToTXT("n"))
+ t.Log("tree updated")
+
+ checkIterator(t, it, nodes)
+}
+
+// updateSomeNodes applies ENR updates to some of the given nodes.
+func updateSomeNodes(keySeed int64, nodes []*enode.Node) {
keys := testKeys(nodesSeed1, len(nodes))
for i, n := range nodes[:len(nodes)/2] {
r := n.Record()
@@ -161,26 +242,21 @@ func TestClientRandomNodeUpdates(t *testing.T) {
n2, _ := enode.New(enode.ValidSchemes, r)
nodes[i] = n2
}
- tree2, _ := makeTestTree("n", nodes, nil)
- clock.Run(cfg.RecheckInterval + 1*time.Second)
- resolver.clear()
- resolver.add(tree2.ToTXT("n"))
- checkRandomNode(t, c, nodes)
}
-// This test verifies that RandomNode re-checks the root of the tree to catch
+// This test verifies that randomIterator re-checks the root of the tree to catch
// updates to links.
-func TestClientRandomNodeLinkUpdates(t *testing.T) {
+func TestIteratorLinkUpdates(t *testing.T) {
var (
clock = new(mclock.Simulated)
nodes = testNodes(nodesSeed1, 30)
resolver = newMapResolver()
- cfg = Config{
+ c = NewClient(Config{
Resolver: resolver,
Logger: testlog.Logger(t, log.LvlTrace),
RecheckInterval: 20 * time.Minute,
- }
- c, _ = NewClient(cfg)
+ RateLimit: 500,
+ })
)
c.clock = clock
tree3, url3 := makeTestTree("t3", nodes[20:30], nil)
@@ -190,49 +266,54 @@ func TestClientRandomNodeLinkUpdates(t *testing.T) {
resolver.add(tree2.ToTXT("t2"))
resolver.add(tree3.ToTXT("t3"))
+ it, err := c.NewIterator(url1)
+ if err != nil {
+ t.Fatal(err)
+ }
+
// Sync tree1 using RandomNode.
- c.AddTree(url1)
- checkRandomNode(t, c, nodes[:20])
+ checkIterator(t, it, nodes[:20])
// Add link to tree3, remove link to tree2.
tree1, _ = makeTestTree("t1", nodes[:10], []string{url3})
resolver.add(tree1.ToTXT("t1"))
- clock.Run(cfg.RecheckInterval + 1*time.Second)
t.Log("tree1 updated")
+ clock.Run(c.cfg.RecheckInterval + 1*time.Second)
+
var wantNodes []*enode.Node
wantNodes = append(wantNodes, tree1.Nodes()...)
wantNodes = append(wantNodes, tree3.Nodes()...)
- checkRandomNode(t, c, wantNodes)
+ checkIterator(t, it, wantNodes)
// Check that linked trees are GCed when they're no longer referenced.
- if len(c.trees) != 2 {
- t.Errorf("client knows %d trees, want 2", len(c.trees))
+ knownTrees := it.(*randomIterator).trees
+ if len(knownTrees) != 2 {
+ t.Errorf("client knows %d trees, want 2", len(knownTrees))
}
}
-func checkRandomNode(t *testing.T, c *Client, wantNodes []*enode.Node) {
+func checkIterator(t *testing.T, it enode.Iterator, wantNodes []*enode.Node) {
t.Helper()
var (
want = make(map[enode.ID]*enode.Node)
- maxCalls = len(wantNodes) * 2
+ maxCalls = len(wantNodes) * 3
calls = 0
- ctx = context.Background()
)
for _, n := range wantNodes {
want[n.ID()] = n
}
for ; len(want) > 0 && calls < maxCalls; calls++ {
- n := c.RandomNode(ctx)
- if n == nil {
- t.Fatalf("RandomNode returned nil (call %d)", calls)
+ if !it.Next() {
+ t.Fatalf("Next returned false (call %d)", calls)
}
+ n := it.Node()
delete(want, n.ID())
}
- t.Logf("checkRandomNode called RandomNode %d times to find %d nodes", calls, len(wantNodes))
+ t.Logf("checkIterator called Next %d times to find %d nodes", calls, len(wantNodes))
for _, n := range want {
- t.Errorf("RandomNode didn't discover node %v", n.ID())
+ t.Errorf("iterator didn't discover node %v", n.ID())
}
}
@@ -312,5 +393,5 @@ func (mr mapResolver) LookupTXT(ctx context.Context, name string) ([]string, err
if record, ok := mr[name]; ok {
return []string{record}, nil
}
- return nil, nil
+ return nil, errors.New("not found")
}
diff --git a/p2p/dnsdisc/sync.go b/p2p/dnsdisc/sync.go
index 533dacc653..36f02acba6 100644
--- a/p2p/dnsdisc/sync.go
+++ b/p2p/dnsdisc/sync.go
@@ -18,7 +18,6 @@ package dnsdisc
import (
"context"
- "crypto/ecdsa"
"math/rand"
"time"
@@ -26,34 +25,35 @@ import (
"github.com/ethereum/go-ethereum/p2p/enode"
)
+const (
+ rootRecheckFailCount = 5 // update root if this many leaf requests fail
+)
+
// clientTree is a full tree being synced.
type clientTree struct {
- c *Client
- loc *linkEntry
- root *rootEntry
+ c *Client
+ loc *linkEntry // link to this tree
+
lastRootCheck mclock.AbsTime // last revalidation of root
- enrs *subtreeSync
- links *subtreeSync
- linkCache linkCache
-}
+ leafFailCount int
+ rootFailCount int
-func newClientTree(c *Client, loc *linkEntry) *clientTree {
- ct := &clientTree{c: c, loc: loc}
- ct.linkCache.self = ct
- return ct
-}
+ root *rootEntry
+ enrs *subtreeSync
+ links *subtreeSync
-func (ct *clientTree) matchPubkey(key *ecdsa.PublicKey) bool {
- return keysEqual(ct.loc.pubkey, key)
+ lc *linkCache // tracks all links between all trees
+ curLinks map[string]struct{} // links contained in this tree
+ linkGCRoot string // root on which last link GC has run
}
-func keysEqual(k1, k2 *ecdsa.PublicKey) bool {
- return k1.Curve == k2.Curve && k1.X.Cmp(k2.X) == 0 && k1.Y.Cmp(k2.Y) == 0
+func newClientTree(c *Client, lc *linkCache, loc *linkEntry) *clientTree {
+ return &clientTree{c: c, lc: lc, loc: loc}
}
// syncAll retrieves all entries of the tree.
func (ct *clientTree) syncAll(dest map[string]entry) error {
- if err := ct.updateRoot(); err != nil {
+ if err := ct.updateRoot(context.Background()); err != nil {
return err
}
if err := ct.links.resolveAll(dest); err != nil {
@@ -67,17 +67,26 @@ func (ct *clientTree) syncAll(dest map[string]entry) error {
// syncRandom retrieves a single entry of the tree. The Node return value
// is non-nil if the entry was a node.
-func (ct *clientTree) syncRandom(ctx context.Context) (*enode.Node, error) {
+func (ct *clientTree) syncRandom(ctx context.Context) (n *enode.Node, err error) {
if ct.rootUpdateDue() {
- if err := ct.updateRoot(); err != nil {
+ if err := ct.updateRoot(ctx); err != nil {
return nil, err
}
}
+
+ // Update fail counter for leaf request errors.
+ defer func() {
+ if err != nil {
+ ct.leafFailCount++
+ }
+ }()
+
// Link tree sync has priority, run it to completion before syncing ENRs.
if !ct.links.done() {
err := ct.syncNextLink(ctx)
return nil, err
}
+ ct.gcLinks()
// Sync next random entry in ENR tree. Once every node has been visited, we simply
// start over. This is fine because entries are cached.
@@ -87,6 +96,16 @@ func (ct *clientTree) syncRandom(ctx context.Context) (*enode.Node, error) {
return ct.syncNextRandomENR(ctx)
}
+// gcLinks removes outdated links from the global link cache. GC runs once
+// when the link sync finishes.
+func (ct *clientTree) gcLinks() {
+ if !ct.links.done() || ct.root.lroot == ct.linkGCRoot {
+ return
+ }
+ ct.lc.resetLinks(ct.loc.str, ct.curLinks)
+ ct.linkGCRoot = ct.root.lroot
+}
+
func (ct *clientTree) syncNextLink(ctx context.Context) error {
hash := ct.links.missing[0]
e, err := ct.links.resolveNext(ctx, hash)
@@ -95,12 +114,9 @@ func (ct *clientTree) syncNextLink(ctx context.Context) error {
}
ct.links.missing = ct.links.missing[1:]
- if le, ok := e.(*linkEntry); ok {
- lt, err := ct.c.ensureTree(le)
- if err != nil {
- return err
- }
- ct.linkCache.add(lt)
+ if dest, ok := e.(*linkEntry); ok {
+ ct.lc.addLink(ct.loc.str, dest.str)
+ ct.curLinks[dest.str] = struct{}{}
}
return nil
}
@@ -137,20 +153,27 @@ func removeHash(h []string, index int) []string {
}
// updateRoot ensures that the given tree has an up-to-date root.
-func (ct *clientTree) updateRoot() error {
+func (ct *clientTree) updateRoot(ctx context.Context) error {
+ if !ct.slowdownRootUpdate(ctx) {
+ return ctx.Err()
+ }
+
ct.lastRootCheck = ct.c.clock.Now()
- ctx, cancel := context.WithTimeout(context.Background(), ct.c.cfg.Timeout)
+ ctx, cancel := context.WithTimeout(ctx, ct.c.cfg.Timeout)
defer cancel()
root, err := ct.c.resolveRoot(ctx, ct.loc)
if err != nil {
+ ct.rootFailCount++
return err
}
ct.root = &root
+ ct.rootFailCount = 0
+ ct.leafFailCount = 0
// Invalidate subtrees if changed.
if ct.links == nil || root.lroot != ct.links.root {
ct.links = newSubtreeSync(ct.c, ct.loc, root.lroot, true)
- ct.linkCache.reset()
+ ct.curLinks = make(map[string]struct{})
}
if ct.enrs == nil || root.eroot != ct.enrs.root {
ct.enrs = newSubtreeSync(ct.c, ct.loc, root.eroot, false)
@@ -160,7 +183,32 @@ func (ct *clientTree) updateRoot() error {
// rootUpdateDue returns true when a root update is needed.
func (ct *clientTree) rootUpdateDue() bool {
- return ct.root == nil || time.Duration(ct.c.clock.Now()-ct.lastRootCheck) > ct.c.cfg.RecheckInterval
+ tooManyFailures := ct.leafFailCount > rootRecheckFailCount
+ scheduledCheck := ct.c.clock.Now().Sub(ct.lastRootCheck) > ct.c.cfg.RecheckInterval
+ return ct.root == nil || tooManyFailures || scheduledCheck
+}
+
+// slowdownRootUpdate applies a delay to root resolution if is tried
+// too frequently. This avoids busy polling when the client is offline.
+// Returns true if the timeout passed, false if sync was canceled.
+func (ct *clientTree) slowdownRootUpdate(ctx context.Context) bool {
+ var delay time.Duration
+ switch {
+ case ct.rootFailCount > 20:
+ delay = 10 * time.Second
+ case ct.rootFailCount > 5:
+ delay = 5 * time.Second
+ default:
+ return true
+ }
+ timeout := ct.c.clock.NewTimer(delay)
+ defer timeout.Stop()
+ select {
+ case <-timeout.C():
+ return true
+ case <-ctx.Done():
+ return false
+ }
}
// subtreeSync is the sync of an ENR or link subtree.
@@ -215,63 +263,51 @@ func (ts *subtreeSync) resolveNext(ctx context.Context, hash string) (entry, err
return e, nil
}
-// linkCache tracks the links of a tree.
+// linkCache tracks links between trees.
type linkCache struct {
- self *clientTree
- directM map[*clientTree]struct{} // direct links
- allM map[*clientTree]struct{} // direct & transitive links
+ backrefs map[string]map[string]struct{}
+ changed bool
}
-// reset clears the cache.
-func (lc *linkCache) reset() {
- lc.directM = nil
- lc.allM = nil
+func (lc *linkCache) isReferenced(r string) bool {
+ return len(lc.backrefs[r]) != 0
}
-// add adds a direct link to the cache.
-func (lc *linkCache) add(ct *clientTree) {
- if lc.directM == nil {
- lc.directM = make(map[*clientTree]struct{})
+func (lc *linkCache) addLink(from, to string) {
+ if _, ok := lc.backrefs[to][from]; ok {
+ return
}
- if _, ok := lc.directM[ct]; !ok {
- lc.invalidate()
- }
- lc.directM[ct] = struct{}{}
-}
-// invalidate resets the cache of transitive links.
-func (lc *linkCache) invalidate() {
- lc.allM = nil
-}
-
-// valid returns true when the cache of transitive links is up-to-date.
-func (lc *linkCache) valid() bool {
- // Re-check validity of child caches to catch updates.
- for ct := range lc.allM {
- if ct != lc.self && !ct.linkCache.valid() {
- lc.allM = nil
- break
- }
+ if lc.backrefs == nil {
+ lc.backrefs = make(map[string]map[string]struct{})
+ }
+ if _, ok := lc.backrefs[to]; !ok {
+ lc.backrefs[to] = make(map[string]struct{})
}
- return lc.allM != nil
+ lc.backrefs[to][from] = struct{}{}
+ lc.changed = true
}
-// all returns all trees reachable through the cache.
-func (lc *linkCache) all() map[*clientTree]struct{} {
- if lc.valid() {
- return lc.allM
- }
- // Remake lc.allM it by taking the union of all() across children.
- m := make(map[*clientTree]struct{})
- if lc.self != nil {
- m[lc.self] = struct{}{}
- }
- for ct := range lc.directM {
- m[ct] = struct{}{}
- for lt := range ct.linkCache.all() {
- m[lt] = struct{}{}
+// resetLinks clears all links of the given tree.
+func (lc *linkCache) resetLinks(from string, keep map[string]struct{}) {
+ stk := []string{from}
+ for len(stk) > 0 {
+ item := stk[len(stk)-1]
+ stk = stk[:len(stk)-1]
+
+ for r, refs := range lc.backrefs {
+ if _, ok := keep[r]; ok {
+ continue
+ }
+ if _, ok := refs[item]; !ok {
+ continue
+ }
+ lc.changed = true
+ delete(refs, item)
+ if len(refs) == 0 {
+ delete(lc.backrefs, r)
+ stk = append(stk, r)
+ }
}
}
- lc.allM = m
- return m
}
diff --git a/p2p/dnsdisc/sync_test.go b/p2p/dnsdisc/sync_test.go
new file mode 100644
index 0000000000..32af3656ef
--- /dev/null
+++ b/p2p/dnsdisc/sync_test.go
@@ -0,0 +1,83 @@
+// Copyright 2018 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package dnsdisc
+
+import (
+ "math/rand"
+ "strconv"
+ "testing"
+)
+
+func TestLinkCache(t *testing.T) {
+ var lc linkCache
+
+ // Check adding links.
+ lc.addLink("1", "2")
+ if !lc.changed {
+ t.Error("changed flag not set")
+ }
+ lc.changed = false
+ lc.addLink("1", "2")
+ if lc.changed {
+ t.Error("changed flag set after adding link that's already present")
+ }
+ lc.addLink("2", "3")
+ lc.addLink("3", "1")
+ lc.addLink("2", "4")
+ lc.changed = false
+
+ if !lc.isReferenced("3") {
+ t.Error("3 not referenced")
+ }
+ if lc.isReferenced("6") {
+ t.Error("6 is referenced")
+ }
+
+ lc.resetLinks("1", nil)
+ if !lc.changed {
+ t.Error("changed flag not set")
+ }
+ if len(lc.backrefs) != 0 {
+ t.Logf("%+v", lc)
+ t.Error("reference maps should be empty")
+ }
+}
+
+func TestLinkCacheRandom(t *testing.T) {
+ tags := make([]string, 1000)
+ for i := range tags {
+ tags[i] = strconv.Itoa(i)
+ }
+
+ // Create random links.
+ var lc linkCache
+ var remove []string
+ for i := 0; i < 100; i++ {
+ a, b := tags[rand.Intn(len(tags))], tags[rand.Intn(len(tags))]
+ lc.addLink(a, b)
+ remove = append(remove, a)
+ }
+
+ // Remove all the links.
+ for _, s := range remove {
+ lc.resetLinks(s, nil)
+ }
+ if len(lc.backrefs) != 0 {
+ t.Logf("%+v", lc)
+ t.Error("reference maps should be empty")
+ }
+}
diff --git a/p2p/dnsdisc/tree.go b/p2p/dnsdisc/tree.go
index eba2ff9c0c..82a935ca41 100644
--- a/p2p/dnsdisc/tree.go
+++ b/p2p/dnsdisc/tree.go
@@ -48,7 +48,7 @@ func (t *Tree) Sign(key *ecdsa.PrivateKey, domain string) (url string, err error
}
root.sig = sig
t.root = &root
- link := &linkEntry{domain, &key.PublicKey}
+ link := newLinkEntry(domain, &key.PublicKey)
return link.String(), nil
}
@@ -209,6 +209,7 @@ type (
node *enode.Node
}
linkEntry struct {
+ str string
domain string
pubkey *ecdsa.PublicKey
}
@@ -246,7 +247,8 @@ func (e *rootEntry) sigHash() []byte {
func (e *rootEntry) verifySignature(pubkey *ecdsa.PublicKey) bool {
sig := e.sig[:crypto.RecoveryIDOffset] // remove recovery id
- return crypto.VerifySignature(crypto.FromECDSAPub(pubkey), e.sigHash(), sig)
+ enckey := crypto.FromECDSAPub(pubkey)
+ return crypto.VerifySignature(enckey, e.sigHash(), sig)
}
func (e *branchEntry) String() string {
@@ -258,8 +260,13 @@ func (e *enrEntry) String() string {
}
func (e *linkEntry) String() string {
- pubkey := b32format.EncodeToString(crypto.CompressPubkey(e.pubkey))
- return fmt.Sprintf("%s%s@%s", linkPrefix, pubkey, e.domain)
+ return linkPrefix + e.str
+}
+
+func newLinkEntry(domain string, pubkey *ecdsa.PublicKey) *linkEntry {
+ key := b32format.EncodeToString(crypto.CompressPubkey(pubkey))
+ str := key + "@" + domain
+ return &linkEntry{str, domain, pubkey}
}
// Entry Parsing
@@ -319,7 +326,7 @@ func parseLink(e string) (*linkEntry, error) {
if err != nil {
return nil, entryError{"link", errBadPubkey}
}
- return &linkEntry{domain, key}, nil
+ return &linkEntry{e, domain, key}, nil
}
func parseBranch(e string) (entry, error) {
diff --git a/p2p/dnsdisc/tree_test.go b/p2p/dnsdisc/tree_test.go
index b6d0a84336..4048c35d63 100644
--- a/p2p/dnsdisc/tree_test.go
+++ b/p2p/dnsdisc/tree_test.go
@@ -91,7 +91,7 @@ func TestParseEntry(t *testing.T) {
// Links
{
input: "enrtree://AKPYQIUQIL7PSIACI32J7FGZW56E5FKHEFCCOFHILBIMW3M6LWXS2@nodes.example.org",
- e: &linkEntry{"nodes.example.org", &testkey.PublicKey},
+ e: &linkEntry{"AKPYQIUQIL7PSIACI32J7FGZW56E5FKHEFCCOFHILBIMW3M6LWXS2@nodes.example.org", "nodes.example.org", &testkey.PublicKey},
},
{
input: "enrtree://nodes.example.org",
diff --git a/p2p/enode/iter.go b/p2p/enode/iter.go
index 112b76d06a..664964f534 100644
--- a/p2p/enode/iter.go
+++ b/p2p/enode/iter.go
@@ -88,6 +88,8 @@ func (it *sliceIter) Next() bool {
}
func (it *sliceIter) Node() *Node {
+ it.mu.Lock()
+ defer it.mu.Unlock()
if len(it.nodes) == 0 {
return nil
}
diff --git a/p2p/metrics.go b/p2p/metrics.go
index 30bd56bd4d..44946473fa 100644
--- a/p2p/metrics.go
+++ b/p2p/metrics.go
@@ -20,102 +20,37 @@ package p2p
import (
"net"
- "sync"
- "sync/atomic"
- "time"
- "github.com/ethereum/go-ethereum/event"
- "github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/metrics"
)
const (
- MetricsInboundTraffic = "p2p/ingress" // Name for the registered inbound traffic meter
- MetricsOutboundTraffic = "p2p/egress" // Name for the registered outbound traffic meter
- MetricsOutboundConnects = "p2p/dials" // Name for the registered outbound connects meter
- MetricsInboundConnects = "p2p/serves" // Name for the registered inbound connects meter
-
- MeteredPeerLimit = 1024 // This amount of peers are individually metered
+ ingressMeterName = "p2p/ingress"
+ egressMeterName = "p2p/egress"
)
var (
- ingressConnectMeter = metrics.NewRegisteredMeter(MetricsInboundConnects, nil) // Meter counting the ingress connections
- ingressTrafficMeter = metrics.NewRegisteredMeter(MetricsInboundTraffic, nil) // Meter metering the cumulative ingress traffic
- egressConnectMeter = metrics.NewRegisteredMeter(MetricsOutboundConnects, nil) // Meter counting the egress connections
- egressTrafficMeter = metrics.NewRegisteredMeter(MetricsOutboundTraffic, nil) // Meter metering the cumulative egress traffic
- activePeerGauge = metrics.NewRegisteredGauge("p2p/peers", nil) // Gauge tracking the current peer count
-
- PeerIngressRegistry = metrics.NewPrefixedChildRegistry(metrics.EphemeralRegistry, MetricsInboundTraffic+"/") // Registry containing the peer ingress
- PeerEgressRegistry = metrics.NewPrefixedChildRegistry(metrics.EphemeralRegistry, MetricsOutboundTraffic+"/") // Registry containing the peer egress
-
- meteredPeerFeed event.Feed // Event feed for peer metrics
- meteredPeerCount int32 // Actually stored peer connection count
-)
-
-// MeteredPeerEventType is the type of peer events emitted by a metered connection.
-type MeteredPeerEventType int
-
-const (
- // PeerHandshakeSucceeded is the type of event
- // emitted when a peer successfully makes the handshake.
- PeerHandshakeSucceeded MeteredPeerEventType = iota
-
- // PeerHandshakeFailed is the type of event emitted when a peer fails to
- // make the handshake or disconnects before it.
- PeerHandshakeFailed
-
- // PeerDisconnected is the type of event emitted when a peer disconnects.
- PeerDisconnected
+ ingressConnectMeter = metrics.NewRegisteredMeter("p2p/serves", nil)
+ ingressTrafficMeter = metrics.NewRegisteredMeter(ingressMeterName, nil)
+ egressConnectMeter = metrics.NewRegisteredMeter("p2p/dials", nil)
+ egressTrafficMeter = metrics.NewRegisteredMeter(egressMeterName, nil)
+ activePeerGauge = metrics.NewRegisteredGauge("p2p/peers", nil)
)
-// MeteredPeerEvent is an event emitted when peers connect or disconnect.
-type MeteredPeerEvent struct {
- Type MeteredPeerEventType // Type of peer event
- Addr string // TCP address of the peer
- Elapsed time.Duration // Time elapsed between the connection and the handshake/disconnection
- Peer *Peer // Connected remote node instance
- Ingress uint64 // Ingress count at the moment of the event
- Egress uint64 // Egress count at the moment of the event
-}
-
-// SubscribeMeteredPeerEvent registers a subscription for peer life-cycle events
-// if metrics collection is enabled.
-func SubscribeMeteredPeerEvent(ch chan<- MeteredPeerEvent) event.Subscription {
- return meteredPeerFeed.Subscribe(ch)
-}
-
// meteredConn is a wrapper around a net.Conn that meters both the
// inbound and outbound network traffic.
type meteredConn struct {
- net.Conn // Network connection to wrap with metering
-
- connected time.Time // Connection time of the peer
- addr *net.TCPAddr // TCP address of the peer
- peer *Peer // Peer instance
-
- // trafficMetered denotes if the peer is registered in the traffic registries.
- // Its value is true if the metered peer count doesn't reach the limit in the
- // moment of the peer's connection.
- trafficMetered bool
- ingressMeter metrics.Meter // Meter for the read bytes of the peer
- egressMeter metrics.Meter // Meter for the written bytes of the peer
-
- lock sync.RWMutex // Lock protecting the metered connection's internals
+ net.Conn
}
// newMeteredConn creates a new metered connection, bumps the ingress or egress
// connection meter and also increases the metered peer count. If the metrics
-// system is disabled or the IP address is unspecified, this function returns
-// the original object.
+// system is disabled, function returns the original connection.
func newMeteredConn(conn net.Conn, ingress bool, addr *net.TCPAddr) net.Conn {
// Short circuit if metrics are disabled
if !metrics.Enabled {
return conn
}
- if addr == nil || addr.IP.IsUnspecified() {
- log.Warn("Peer address is unspecified")
- return conn
- }
// Bump the connection counters and wrap the connection
if ingress {
ingressConnectMeter.Mark(1)
@@ -123,12 +58,7 @@ func newMeteredConn(conn net.Conn, ingress bool, addr *net.TCPAddr) net.Conn {
egressConnectMeter.Mark(1)
}
activePeerGauge.Inc(1)
-
- return &meteredConn{
- Conn: conn,
- addr: addr,
- connected: time.Now(),
- }
+ return &meteredConn{Conn: conn}
}
// Read delegates a network read to the underlying connection, bumping the common
@@ -136,11 +66,6 @@ func newMeteredConn(conn net.Conn, ingress bool, addr *net.TCPAddr) net.Conn {
func (c *meteredConn) Read(b []byte) (n int, err error) {
n, err = c.Conn.Read(b)
ingressTrafficMeter.Mark(int64(n))
- c.lock.RLock()
- if c.trafficMetered {
- c.ingressMeter.Mark(int64(n))
- }
- c.lock.RUnlock()
return n, err
}
@@ -149,84 +74,15 @@ func (c *meteredConn) Read(b []byte) (n int, err error) {
func (c *meteredConn) Write(b []byte) (n int, err error) {
n, err = c.Conn.Write(b)
egressTrafficMeter.Mark(int64(n))
- c.lock.RLock()
- if c.trafficMetered {
- c.egressMeter.Mark(int64(n))
- }
- c.lock.RUnlock()
return n, err
}
-// handshakeDone is called after the connection passes the handshake.
-func (c *meteredConn) handshakeDone(peer *Peer) {
- if atomic.AddInt32(&meteredPeerCount, 1) >= MeteredPeerLimit {
- // Don't register the peer in the traffic registries.
- atomic.AddInt32(&meteredPeerCount, -1)
- c.lock.Lock()
- c.peer, c.trafficMetered = peer, false
- c.lock.Unlock()
- log.Warn("Metered peer count reached the limit")
- } else {
- enode := peer.Node().String()
- c.lock.Lock()
- c.peer, c.trafficMetered = peer, true
- c.ingressMeter = metrics.NewRegisteredMeter(enode, PeerIngressRegistry)
- c.egressMeter = metrics.NewRegisteredMeter(enode, PeerEgressRegistry)
- c.lock.Unlock()
- }
- meteredPeerFeed.Send(MeteredPeerEvent{
- Type: PeerHandshakeSucceeded,
- Addr: c.addr.String(),
- Peer: peer,
- Elapsed: time.Since(c.connected),
- })
-}
-
// Close delegates a close operation to the underlying connection, unregisters
// the peer from the traffic registries and emits close event.
func (c *meteredConn) Close() error {
err := c.Conn.Close()
- c.lock.RLock()
- if c.peer == nil {
- // If the peer disconnects before/during the handshake.
- c.lock.RUnlock()
- meteredPeerFeed.Send(MeteredPeerEvent{
- Type: PeerHandshakeFailed,
- Addr: c.addr.String(),
- Elapsed: time.Since(c.connected),
- })
- activePeerGauge.Dec(1)
- return err
- }
- peer := c.peer
- if !c.trafficMetered {
- // If the peer isn't registered in the traffic registries.
- c.lock.RUnlock()
- meteredPeerFeed.Send(MeteredPeerEvent{
- Type: PeerDisconnected,
- Addr: c.addr.String(),
- Peer: peer,
- })
+ if err == nil {
activePeerGauge.Dec(1)
- return err
}
- ingress, egress, enode := uint64(c.ingressMeter.Count()), uint64(c.egressMeter.Count()), c.peer.Node().String()
- c.lock.RUnlock()
-
- // Decrement the metered peer count
- atomic.AddInt32(&meteredPeerCount, -1)
-
- // Unregister the peer from the traffic registries
- PeerIngressRegistry.Unregister(enode)
- PeerEgressRegistry.Unregister(enode)
-
- meteredPeerFeed.Send(MeteredPeerEvent{
- Type: PeerDisconnected,
- Addr: c.addr.String(),
- Peer: peer,
- Ingress: ingress,
- Egress: egress,
- })
- activePeerGauge.Dec(1)
return err
}
diff --git a/p2p/peer.go b/p2p/peer.go
index 9a9788bc17..4398ad0f23 100644
--- a/p2p/peer.go
+++ b/p2p/peer.go
@@ -302,7 +302,8 @@ func (p *Peer) handle(msg Msg) error {
return fmt.Errorf("msg code out of range: %v", msg.Code)
}
if metrics.Enabled {
- metrics.GetOrRegisterMeter(fmt.Sprintf("%s/%s/%d/%#02x", MetricsInboundTraffic, proto.Name, proto.Version, msg.Code-proto.offset), nil).Mark(int64(msg.meterSize))
+ m := fmt.Sprintf("%s/%s/%d/%#02x", ingressMeterName, proto.Name, proto.Version, msg.Code-proto.offset)
+ metrics.GetOrRegisterMeter(m, nil).Mark(int64(msg.meterSize))
}
select {
case proto.in <- msg:
diff --git a/p2p/peer_test.go b/p2p/peer_test.go
index a2393ba854..e40deb98f0 100644
--- a/p2p/peer_test.go
+++ b/p2p/peer_test.go
@@ -17,15 +17,20 @@
package p2p
import (
+ "encoding/binary"
"errors"
"fmt"
"math/rand"
"net"
"reflect"
+ "strconv"
+ "strings"
"testing"
"time"
"github.com/ethereum/go-ethereum/log"
+ "github.com/ethereum/go-ethereum/p2p/enode"
+ "github.com/ethereum/go-ethereum/p2p/enr"
)
var discard = Protocol{
@@ -45,10 +50,45 @@ var discard = Protocol{
},
}
+// uintID encodes i into a node ID.
+func uintID(i uint16) enode.ID {
+ var id enode.ID
+ binary.BigEndian.PutUint16(id[:], i)
+ return id
+}
+
+// newNode creates a node record with the given address.
+func newNode(id enode.ID, addr string) *enode.Node {
+ var r enr.Record
+ if addr != "" {
+ // Set the port if present.
+ if strings.Contains(addr, ":") {
+ hs, ps, err := net.SplitHostPort(addr)
+ if err != nil {
+ panic(fmt.Errorf("invalid address %q", addr))
+ }
+ port, err := strconv.Atoi(ps)
+ if err != nil {
+ panic(fmt.Errorf("invalid port in %q", addr))
+ }
+ r.Set(enr.TCP(port))
+ r.Set(enr.UDP(port))
+ addr = hs
+ }
+ // Set the IP.
+ ip := net.ParseIP(addr)
+ if ip == nil {
+ panic(fmt.Errorf("invalid IP %q", addr))
+ }
+ r.Set(enr.IP(ip))
+ }
+ return enode.SignNull(&r, id)
+}
+
func testPeer(protos []Protocol) (func(), *conn, *Peer, <-chan error) {
fd1, fd2 := net.Pipe()
- c1 := &conn{fd: fd1, node: newNode(randomID(), nil), transport: newTestTransport(&newkey().PublicKey, fd1)}
- c2 := &conn{fd: fd2, node: newNode(randomID(), nil), transport: newTestTransport(&newkey().PublicKey, fd2)}
+ c1 := &conn{fd: fd1, node: newNode(randomID(), ""), transport: newTestTransport(&newkey().PublicKey, fd1)}
+ c2 := &conn{fd: fd2, node: newNode(randomID(), ""), transport: newTestTransport(&newkey().PublicKey, fd2)}
for _, p := range protos {
c1.caps = append(c1.caps, p.cap())
c2.caps = append(c2.caps, p.cap())
diff --git a/p2p/rlpx.go b/p2p/rlpx.go
index c9ca6ea42f..c134aec1de 100644
--- a/p2p/rlpx.go
+++ b/p2p/rlpx.go
@@ -595,7 +595,8 @@ func (rw *rlpxFrameRW) WriteMsg(msg Msg) error {
}
msg.meterSize = msg.Size
if metrics.Enabled && msg.meterCap.Name != "" { // don't meter non-subprotocol messages
- metrics.GetOrRegisterMeter(fmt.Sprintf("%s/%s/%d/%#02x", MetricsOutboundTraffic, msg.meterCap.Name, msg.meterCap.Version, msg.meterCode), nil).Mark(int64(msg.meterSize))
+ m := fmt.Sprintf("%s/%s/%d/%#02x", egressMeterName, msg.meterCap.Name, msg.meterCap.Version, msg.meterCode)
+ metrics.GetOrRegisterMeter(m, nil).Mark(int64(msg.meterSize))
}
// write header
headbuf := make([]byte, 32)
diff --git a/p2p/server.go b/p2p/server.go
index 2e88a7f500..3084a0e703 100644
--- a/p2p/server.go
+++ b/p2p/server.go
@@ -52,7 +52,6 @@ const (
discmixTimeout = 5 * time.Second
// Connectivity defaults.
- maxActiveDialTasks = 16
defaultMaxPendingPeers = 50
defaultDialRatio = 3
@@ -160,6 +159,8 @@ type Config struct {
DataDir string `toml:",omitempty"`
// Logger is a custom logger to use with the p2p.Server.
Logger log.Logger `toml:",omitempty"`
+
+ clock mclock.Clock
}
// Server manages all peer connections.
@@ -187,13 +188,10 @@ type Server struct {
ntab *discover.UDPv4
DiscV5 *discv5.Network
discmix *enode.FairMix
-
- staticNodeResolver nodeResolver
+ dialsched *dialScheduler
// Channels into the run loop.
quit chan struct{}
- addstatic chan *enode.Node
- removestatic chan *enode.Node
addtrusted chan *enode.Node
removetrusted chan *enode.Node
peerOp chan peerOpFunc
@@ -312,47 +310,57 @@ func (srv *Server) LocalNode() *enode.LocalNode {
// Peers returns all connected peers.
func (srv *Server) Peers() []*Peer {
var ps []*Peer
- select {
- // Note: We'd love to put this function into a variable but
- // that seems to cause a weird compiler error in some
- // environments.
- case srv.peerOp <- func(peers map[enode.ID]*Peer) {
+ srv.doPeerOp(func(peers map[enode.ID]*Peer) {
for _, p := range peers {
ps = append(ps, p)
}
- }:
- <-srv.peerOpDone
- case <-srv.quit:
- }
+ })
return ps
}
// PeerCount returns the number of connected peers.
func (srv *Server) PeerCount() int {
var count int
- select {
- case srv.peerOp <- func(ps map[enode.ID]*Peer) { count = len(ps) }:
- <-srv.peerOpDone
- case <-srv.quit:
- }
+ srv.doPeerOp(func(ps map[enode.ID]*Peer) {
+ count = len(ps)
+ })
return count
}
-// AddPeer connects to the given node and maintains the connection until the
-// server is shut down. If the connection fails for any reason, the server will
-// attempt to reconnect the peer.
+// AddPeer adds the given node to the static node set. When there is room in the peer set,
+// the server will connect to the node. If the connection fails for any reason, the server
+// will attempt to reconnect the peer.
func (srv *Server) AddPeer(node *enode.Node) {
- select {
- case srv.addstatic <- node:
- case <-srv.quit:
- }
+ srv.dialsched.addStatic(node)
}
-// RemovePeer disconnects from the given node
+// RemovePeer removes a node from the static node set. It also disconnects from the given
+// node if it is currently connected as a peer.
+//
+// This method blocks until all protocols have exited and the peer is removed. Do not use
+// RemovePeer in protocol implementations, call Disconnect on the Peer instead.
func (srv *Server) RemovePeer(node *enode.Node) {
- select {
- case srv.removestatic <- node:
- case <-srv.quit:
+ var (
+ ch chan *PeerEvent
+ sub event.Subscription
+ )
+ // Disconnect the peer on the main loop.
+ srv.doPeerOp(func(peers map[enode.ID]*Peer) {
+ srv.dialsched.removeStatic(node)
+ if peer := peers[node.ID()]; peer != nil {
+ ch = make(chan *PeerEvent, 1)
+ sub = srv.peerFeed.Subscribe(ch)
+ peer.Disconnect(DiscRequested)
+ }
+ })
+ // Wait for the peer connection to end.
+ if ch != nil {
+ defer sub.Unsubscribe()
+ for ev := range ch {
+ if ev.Peer == node.ID() && ev.Type == PeerEventTypeDrop {
+ return
+ }
+ }
}
}
@@ -447,6 +455,9 @@ func (srv *Server) Start() (err error) {
if srv.log == nil {
srv.log = log.Root()
}
+ if srv.clock == nil {
+ srv.clock = mclock.System{}
+ }
if srv.NoDial && srv.ListenAddr == "" {
srv.log.Warn("P2P server will be useless, neither dialing nor listening")
}
@@ -461,15 +472,10 @@ func (srv *Server) Start() (err error) {
if srv.listenFunc == nil {
srv.listenFunc = net.Listen
}
- if srv.Dialer == nil {
- srv.Dialer = TCPDialer{&net.Dialer{Timeout: defaultDialTimeout}}
- }
srv.quit = make(chan struct{})
srv.delpeer = make(chan peerDrop)
srv.checkpointPostHandshake = make(chan *conn)
srv.checkpointAddPeer = make(chan *conn)
- srv.addstatic = make(chan *enode.Node)
- srv.removestatic = make(chan *enode.Node)
srv.addtrusted = make(chan *enode.Node)
srv.removetrusted = make(chan *enode.Node)
srv.peerOp = make(chan peerOpFunc)
@@ -486,11 +492,10 @@ func (srv *Server) Start() (err error) {
if err := srv.setupDiscovery(); err != nil {
return err
}
+ srv.setupDialScheduler()
- dynPeers := srv.maxDialedConns()
- dialer := newDialState(srv.localnode.ID(), dynPeers, &srv.Config)
srv.loopWG.Add(1)
- go srv.run(dialer)
+ go srv.run()
return nil
}
@@ -593,7 +598,6 @@ func (srv *Server) setupDiscovery() error {
}
srv.ntab = ntab
srv.discmix.AddSource(ntab.RandomNodes())
- srv.staticNodeResolver = ntab
}
// Discovery V5
@@ -616,6 +620,47 @@ func (srv *Server) setupDiscovery() error {
return nil
}
+func (srv *Server) setupDialScheduler() {
+ config := dialConfig{
+ self: srv.localnode.ID(),
+ maxDialPeers: srv.maxDialedConns(),
+ maxActiveDials: srv.MaxPendingPeers,
+ log: srv.Logger,
+ netRestrict: srv.NetRestrict,
+ dialer: srv.Dialer,
+ clock: srv.clock,
+ }
+ if srv.ntab != nil {
+ config.resolver = srv.ntab
+ }
+ if config.dialer == nil {
+ config.dialer = tcpDialer{&net.Dialer{Timeout: defaultDialTimeout}}
+ }
+ srv.dialsched = newDialScheduler(config, srv.discmix, srv.SetupConn)
+ for _, n := range srv.StaticNodes {
+ srv.dialsched.addStatic(n)
+ }
+}
+
+func (srv *Server) maxInboundConns() int {
+ return srv.MaxPeers - srv.maxDialedConns()
+}
+
+func (srv *Server) maxDialedConns() (limit int) {
+ if srv.NoDial || srv.MaxPeers == 0 {
+ return 0
+ }
+ if srv.DialRatio == 0 {
+ limit = srv.MaxPeers / defaultDialRatio
+ } else {
+ limit = srv.MaxPeers / srv.DialRatio
+ }
+ if limit == 0 {
+ limit = 1
+ }
+ return limit
+}
+
func (srv *Server) setupListening() error {
// Launch the listener.
listener, err := srv.listenFunc("tcp", srv.ListenAddr)
@@ -642,26 +687,27 @@ func (srv *Server) setupListening() error {
return nil
}
-type dialer interface {
- newTasks(running int, peers map[enode.ID]*Peer, now time.Time) []task
- taskDone(task, time.Time)
- addStatic(*enode.Node)
- removeStatic(*enode.Node)
+// doPeerOp runs fn on the main loop.
+func (srv *Server) doPeerOp(fn peerOpFunc) {
+ select {
+ case srv.peerOp <- fn:
+ <-srv.peerOpDone
+ case <-srv.quit:
+ }
}
-func (srv *Server) run(dialstate dialer) {
+// run is the main loop of the server.
+func (srv *Server) run() {
srv.log.Info("Started P2P networking", "self", srv.localnode.Node().URLv4())
defer srv.loopWG.Done()
defer srv.nodedb.Close()
defer srv.discmix.Close()
+ defer srv.dialsched.stop()
var (
peers = make(map[enode.ID]*Peer)
inboundCount = 0
trusted = make(map[enode.ID]bool, len(srv.TrustedNodes))
- taskdone = make(chan task, maxActiveDialTasks)
- runningTasks []task
- queuedTasks []task // tasks that can't run yet
)
// Put trusted nodes into a map to speed up checks.
// Trusted peers are loaded on startup or added via AddTrustedPeer RPC.
@@ -669,79 +715,27 @@ func (srv *Server) run(dialstate dialer) {
trusted[n.ID()] = true
}
- // removes t from runningTasks
- delTask := func(t task) {
- for i := range runningTasks {
- if runningTasks[i] == t {
- runningTasks = append(runningTasks[:i], runningTasks[i+1:]...)
- break
- }
- }
- }
- // starts until max number of active tasks is satisfied
- startTasks := func(ts []task) (rest []task) {
- i := 0
- for ; len(runningTasks) < maxActiveDialTasks && i < len(ts); i++ {
- t := ts[i]
- srv.log.Trace("New dial task", "task", t)
- go func() { t.Do(srv); taskdone <- t }()
- runningTasks = append(runningTasks, t)
- }
- return ts[i:]
- }
- scheduleTasks := func() {
- // Start from queue first.
- queuedTasks = append(queuedTasks[:0], startTasks(queuedTasks)...)
- // Query dialer for new tasks and start as many as possible now.
- if len(runningTasks) < maxActiveDialTasks {
- nt := dialstate.newTasks(len(runningTasks)+len(queuedTasks), peers, time.Now())
- queuedTasks = append(queuedTasks, startTasks(nt)...)
- }
- }
-
running:
for {
- scheduleTasks()
-
select {
case <-srv.quit:
// The server was stopped. Run the cleanup logic.
break running
- case n := <-srv.addstatic:
- // This channel is used by AddPeer to add to the
- // ephemeral static peer list. Add it to the dialer,
- // it will keep the node connected.
- srv.log.Trace("Adding static node", "node", n)
- dialstate.addStatic(n)
-
- case n := <-srv.removestatic:
- // This channel is used by RemovePeer to send a
- // disconnect request to a peer and begin the
- // stop keeping the node connected.
- srv.log.Trace("Removing static node", "node", n)
- dialstate.removeStatic(n)
- if p, ok := peers[n.ID()]; ok {
- p.Disconnect(DiscRequested)
- }
-
case n := <-srv.addtrusted:
- // This channel is used by AddTrustedPeer to add an enode
+ // This channel is used by AddTrustedPeer to add a node
// to the trusted node set.
srv.log.Trace("Adding trusted node", "node", n)
trusted[n.ID()] = true
- // Mark any already-connected peer as trusted
if p, ok := peers[n.ID()]; ok {
p.rw.set(trustedConn, true)
}
case n := <-srv.removetrusted:
- // This channel is used by RemoveTrustedPeer to remove an enode
+ // This channel is used by RemoveTrustedPeer to remove a node
// from the trusted node set.
srv.log.Trace("Removing trusted node", "node", n)
delete(trusted, n.ID())
-
- // Unmark any already-connected peer as trusted
if p, ok := peers[n.ID()]; ok {
p.rw.set(trustedConn, false)
}
@@ -751,14 +745,6 @@ running:
op(peers)
srv.peerOpDone <- struct{}{}
- case t := <-taskdone:
- // A task got done. Tell dialstate about it so it
- // can update its state and remove it from the active
- // tasks list.
- srv.log.Trace("Dial task done", "task", t)
- dialstate.taskDone(t, time.Now())
- delTask(t)
-
case c := <-srv.checkpointPostHandshake:
// A connection has passed the encryption handshake so
// the remote identity is known (but hasn't been verified yet).
@@ -775,33 +761,22 @@ running:
err := srv.addPeerChecks(peers, inboundCount, c)
if err == nil {
// The handshakes are done and it passed all checks.
- p := newPeer(srv.log, c, srv.Protocols)
- // If message events are enabled, pass the peerFeed
- // to the peer
- if srv.EnableMsgEvents {
- p.events = &srv.peerFeed
- }
- name := truncateName(c.name)
- p.log.Debug("Adding p2p peer", "addr", p.RemoteAddr(), "peers", len(peers)+1, "name", name)
- go srv.runPeer(p)
+ p := srv.launchPeer(c)
peers[c.node.ID()] = p
+ srv.log.Debug("Adding p2p peer", "peercount", len(peers), "id", p.ID(), "conn", c.flags, "addr", p.RemoteAddr(), "name", truncateName(c.name))
+ srv.dialsched.peerAdded(c)
if p.Inbound() {
inboundCount++
}
- if conn, ok := c.fd.(*meteredConn); ok {
- conn.handshakeDone(p)
- }
}
- // The dialer logic relies on the assumption that
- // dial tasks complete after the peer has been added or
- // discarded. Unblock the task last.
c.cont <- err
case pd := <-srv.delpeer:
// A peer disconnected.
d := common.PrettyDuration(mclock.Now() - pd.created)
- pd.log.Debug("Removing p2p peer", "addr", pd.RemoteAddr(), "peers", len(peers)-1, "duration", d, "req", pd.requested, "err", pd.err)
delete(peers, pd.ID())
+ srv.log.Debug("Removing p2p peer", "peercount", len(peers), "id", pd.ID(), "duration", d, "req", pd.requested, "err", pd.err)
+ srv.dialsched.peerRemoved(pd.rw)
if pd.Inbound() {
inboundCount--
}
@@ -826,14 +801,14 @@ running:
// is closed.
for len(peers) > 0 {
p := <-srv.delpeer
- p.log.Trace("<-delpeer (spindown)", "remainingTasks", len(runningTasks))
+ p.log.Trace("<-delpeer (spindown)")
delete(peers, p.ID())
}
}
func (srv *Server) postHandshakeChecks(peers map[enode.ID]*Peer, inboundCount int, c *conn) error {
switch {
- case !c.is(trustedConn|staticDialedConn) && len(peers) >= srv.MaxPeers:
+ case !c.is(trustedConn) && len(peers) >= srv.MaxPeers:
return DiscTooManyPeers
case !c.is(trustedConn) && c.is(inboundConn) && inboundCount >= srv.maxInboundConns():
return DiscTooManyPeers
@@ -856,27 +831,12 @@ func (srv *Server) addPeerChecks(peers map[enode.ID]*Peer, inboundCount int, c *
return srv.postHandshakeChecks(peers, inboundCount, c)
}
-func (srv *Server) maxInboundConns() int {
- return srv.MaxPeers - srv.maxDialedConns()
-}
-
-func (srv *Server) maxDialedConns() int {
- if srv.NoDiscovery || srv.NoDial {
- return 0
- }
- r := srv.DialRatio
- if r == 0 {
- r = defaultDialRatio
- }
- return srv.MaxPeers / r
-}
-
// listenLoop runs in its own goroutine and accepts
// inbound connections.
func (srv *Server) listenLoop() {
- defer srv.loopWG.Done()
srv.log.Debug("TCP listener up", "addr", srv.listener.Addr())
+ // The slots channel limits accepts of new connections.
tokens := defaultMaxPendingPeers
if srv.MaxPendingPeers > 0 {
tokens = srv.MaxPendingPeers
@@ -886,6 +846,15 @@ func (srv *Server) listenLoop() {
slots <- struct{}{}
}
+ // Wait for slots to be returned on exit. This ensures all connection goroutines
+ // are down before listenLoop returns.
+ defer srv.loopWG.Done()
+ defer func() {
+ for i := 0; i < cap(slots); i++ {
+ <-slots
+ }
+ }()
+
for {
// Wait for a free slot before accepting.
<-slots
@@ -901,6 +870,7 @@ func (srv *Server) listenLoop() {
continue
} else if err != nil {
srv.log.Debug("Read error", "err", err)
+ slots <- struct{}{}
return
}
break
@@ -929,18 +899,20 @@ func (srv *Server) listenLoop() {
}
func (srv *Server) checkInboundConn(fd net.Conn, remoteIP net.IP) error {
- if remoteIP != nil {
- // Reject connections that do not match NetRestrict.
- if srv.NetRestrict != nil && !srv.NetRestrict.Contains(remoteIP) {
- return fmt.Errorf("not whitelisted in NetRestrict")
- }
- // Reject Internet peers that try too often.
- srv.inboundHistory.expire(time.Now())
- if !netutil.IsLAN(remoteIP) && srv.inboundHistory.contains(remoteIP.String()) {
- return fmt.Errorf("too many attempts")
- }
- srv.inboundHistory.add(remoteIP.String(), time.Now().Add(inboundThrottleTime))
+ if remoteIP == nil {
+ return nil
+ }
+ // Reject connections that do not match NetRestrict.
+ if srv.NetRestrict != nil && !srv.NetRestrict.Contains(remoteIP) {
+ return fmt.Errorf("not whitelisted in NetRestrict")
}
+ // Reject Internet peers that try too often.
+ now := srv.clock.Now()
+ srv.inboundHistory.expire(now, nil)
+ if !netutil.IsLAN(remoteIP) && srv.inboundHistory.contains(remoteIP.String()) {
+ return fmt.Errorf("too many attempts")
+ }
+ srv.inboundHistory.add(remoteIP.String(), now.Add(inboundThrottleTime))
return nil
}
@@ -952,7 +924,6 @@ func (srv *Server) SetupConn(fd net.Conn, flags connFlag, dialDest *enode.Node)
err := srv.setupConn(c, flags, dialDest)
if err != nil {
c.close(err)
- srv.log.Trace("Setting up connection failed", "addr", fd.RemoteAddr(), "err", err)
}
return err
}
@@ -971,7 +942,9 @@ func (srv *Server) setupConn(c *conn, flags connFlag, dialDest *enode.Node) erro
if dialDest != nil {
dialPubkey = new(ecdsa.PublicKey)
if err := dialDest.Load((*enode.Secp256k1)(dialPubkey)); err != nil {
- return errors.New("dial destination doesn't have a secp256k1 public key")
+ err = errors.New("dial destination doesn't have a secp256k1 public key")
+ srv.log.Trace("Setting up connection failed", "addr", c.fd.RemoteAddr(), "conn", c.flags, "err", err)
+ return err
}
}
@@ -1038,11 +1011,6 @@ func (srv *Server) setupConn(c *conn, flags connFlag, dialDest *enode.Node) erro
//END - QUORUM Permissioning
- if conn, ok := c.fd.(*meteredConn); ok {
- p := newPeer(srv.log, c, srv.Protocols)
- conn.handshakeDone(p)
- }
-
err = srv.checkpoint(c, srv.checkpointPostHandshake)
if err != nil {
clog.Trace("Rejected peer", "err", err)
@@ -1052,7 +1020,7 @@ func (srv *Server) setupConn(c *conn, flags connFlag, dialDest *enode.Node) erro
// Run the capability negotiation handshake.
phs, err := c.doProtoHandshake(srv.ourHandshake)
if err != nil {
- clog.Trace("Failed proto handshake", "err", err)
+ clog.Trace("Failed p2p handshake", "err", err)
return err
}
if id := c.node.ID(); !bytes.Equal(crypto.Keccak256(phs.ID), id[:]) {
@@ -1066,9 +1034,6 @@ func (srv *Server) setupConn(c *conn, flags connFlag, dialDest *enode.Node) erro
return err
}
- // If the checks completed successfully, the connection has been added as a peer and
- // runPeer has been launched.
- clog.Trace("Connection set up", "inbound", dialDest == nil)
return nil
}
@@ -1100,15 +1065,22 @@ func (srv *Server) checkpoint(c *conn, stage chan<- *conn) error {
return <-c.cont
}
+func (srv *Server) launchPeer(c *conn) *Peer {
+ p := newPeer(srv.log, c, srv.Protocols)
+ if srv.EnableMsgEvents {
+ // If message events are enabled, pass the peerFeed
+ // to the peer.
+ p.events = &srv.peerFeed
+ }
+ go srv.runPeer(p)
+ return p
+}
+
// runPeer runs in its own goroutine for each peer.
-// it waits until the Peer logic returns and removes
-// the peer.
func (srv *Server) runPeer(p *Peer) {
if srv.newPeerHook != nil {
srv.newPeerHook(p)
}
-
- // broadcast peer add
srv.peerFeed.Send(&PeerEvent{
Type: PeerEventTypeAdd,
Peer: p.ID(),
@@ -1116,10 +1088,18 @@ func (srv *Server) runPeer(p *Peer) {
LocalAddress: p.LocalAddr().String(),
})
- // run the protocol
+ // Run the per-peer main loop.
remoteRequested, err := p.run()
- // broadcast peer drop
+ // Announce disconnect on the main loop to update the peer set.
+ // The main loop waits for existing peers to be sent on srv.delpeer
+ // before returning, so this send should not select on srv.quit.
+ srv.delpeer <- peerDrop{p, err, remoteRequested}
+
+ // Broadcast peer drop to external subscribers. This needs to be
+ // after the send to delpeer so subscribers have a consistent view of
+ // the peer set (i.e. Server.Peers() doesn't include the peer when the
+ // event is received.
srv.peerFeed.Send(&PeerEvent{
Type: PeerEventTypeDrop,
Peer: p.ID(),
@@ -1127,10 +1107,6 @@ func (srv *Server) runPeer(p *Peer) {
RemoteAddress: p.RemoteAddr().String(),
LocalAddress: p.LocalAddr().String(),
})
-
- // Note: run waits for existing peers to be sent on srv.delpeer
- // before returning, so this send should not select on srv.quit.
- srv.delpeer <- peerDrop{p, err, remoteRequested}
}
// NodeInfo represents a short summary of the information known about the host.
diff --git a/p2p/server_test.go b/p2p/server_test.go
index cae657443d..dce00a5092 100644
--- a/p2p/server_test.go
+++ b/p2p/server_test.go
@@ -39,10 +39,6 @@ import (
"golang.org/x/crypto/sha3"
)
-// func init() {
-// log.Root().SetHandler(log.LvlFilterHandler(log.LvlTrace, log.StreamHandler(os.Stderr, log.TerminalFormat(false))))
-// }
-
type testTransport struct {
rpub *ecdsa.PublicKey
*rlpx
@@ -77,11 +73,12 @@ func (c *testTransport) close(err error) {
func startTestServer(t *testing.T, remoteKey *ecdsa.PublicKey, pf func(*Peer)) *Server {
config := Config{
- Name: "test",
- MaxPeers: 10,
- ListenAddr: "127.0.0.1:0",
- PrivateKey: newkey(),
- Logger: testlog.Logger(t, log.LvlTrace),
+ Name: "test",
+ MaxPeers: 10,
+ ListenAddr: "127.0.0.1:0",
+ NoDiscovery: true,
+ PrivateKey: newkey(),
+ Logger: testlog.Logger(t, log.LvlTrace),
}
server := &Server{
Config: config,
@@ -136,11 +133,10 @@ func TestServerDial(t *testing.T) {
t.Fatalf("could not setup listener: %v", err)
}
defer listener.Close()
- accepted := make(chan net.Conn)
+ accepted := make(chan net.Conn, 1)
go func() {
conn, err := listener.Accept()
if err != nil {
- t.Error("accept error:", err)
return
}
accepted <- conn
@@ -210,155 +206,38 @@ func TestServerDial(t *testing.T) {
}
}
-// This test checks that tasks generated by dialstate are
-// actually executed and taskdone is called for them.
-func TestServerTaskScheduling(t *testing.T) {
- var (
- done = make(chan *testTask)
- quit, returned = make(chan struct{}), make(chan struct{})
- tc = 0
- tg = taskgen{
- newFunc: func(running int, peers map[enode.ID]*Peer) []task {
- tc++
- return []task{&testTask{index: tc - 1}}
- },
- doneFunc: func(t task) {
- select {
- case done <- t.(*testTask):
- case <-quit:
- }
- },
- }
- )
+// This test checks that RemovePeer disconnects the peer if it is connected.
+func TestServerRemovePeerDisconnect(t *testing.T) {
+ srv1 := &Server{Config: Config{
+ PrivateKey: newkey(),
+ MaxPeers: 1,
+ NoDiscovery: true,
+ Logger: testlog.Logger(t, log.LvlTrace).New("server", "1"),
+ }}
+ srv2 := &Server{Config: Config{
+ PrivateKey: newkey(),
+ MaxPeers: 1,
+ NoDiscovery: true,
+ NoDial: true,
+ ListenAddr: "127.0.0.1:0",
+ Logger: testlog.Logger(t, log.LvlTrace).New("server", "2"),
+ }}
+ srv1.Start()
+ defer srv1.Stop()
+ srv2.Start()
+ defer srv2.Stop()
- // The Server in this test isn't actually running
- // because we're only interested in what run does.
- db, _ := enode.OpenDB("")
- srv := &Server{
- Config: Config{MaxPeers: 10},
- localnode: enode.NewLocalNode(db, newkey()),
- nodedb: db,
- discmix: enode.NewFairMix(0),
- quit: make(chan struct{}),
- running: true,
- log: log.New(),
- }
- srv.loopWG.Add(1)
- go func() {
- srv.run(tg)
- close(returned)
- }()
-
- var gotdone []*testTask
- for i := 0; i < 100; i++ {
- gotdone = append(gotdone, <-done)
+ if !syncAddPeer(srv1, srv2.Self()) {
+ t.Fatal("peer not connected")
}
- for i, task := range gotdone {
- if task.index != i {
- t.Errorf("task %d has wrong index, got %d", i, task.index)
- break
- }
- if !task.called {
- t.Errorf("task %d was not called", i)
- break
- }
- }
-
- close(quit)
- srv.Stop()
- select {
- case <-returned:
- case <-time.After(500 * time.Millisecond):
- t.Error("Server.run did not return within 500ms")
+ srv1.RemovePeer(srv2.Self())
+ if srv1.PeerCount() > 0 {
+ t.Fatal("removed peer still connected")
}
}
-// This test checks that Server doesn't drop tasks,
-// even if newTasks returns more than the maximum number of tasks.
-func TestServerManyTasks(t *testing.T) {
- alltasks := make([]task, 300)
- for i := range alltasks {
- alltasks[i] = &testTask{index: i}
- }
-
- var (
- db, _ = enode.OpenDB("")
- srv = &Server{
- quit: make(chan struct{}),
- localnode: enode.NewLocalNode(db, newkey()),
- nodedb: db,
- running: true,
- log: log.New(),
- discmix: enode.NewFairMix(0),
- }
- done = make(chan *testTask)
- start, end = 0, 0
- )
- defer srv.Stop()
- srv.loopWG.Add(1)
- go srv.run(taskgen{
- newFunc: func(running int, peers map[enode.ID]*Peer) []task {
- start, end = end, end+maxActiveDialTasks+10
- if end > len(alltasks) {
- end = len(alltasks)
- }
- return alltasks[start:end]
- },
- doneFunc: func(tt task) {
- done <- tt.(*testTask)
- },
- })
-
- doneset := make(map[int]bool)
- timeout := time.After(2 * time.Second)
- for len(doneset) < len(alltasks) {
- select {
- case tt := <-done:
- if doneset[tt.index] {
- t.Errorf("task %d got done more than once", tt.index)
- } else {
- doneset[tt.index] = true
- }
- case <-timeout:
- t.Errorf("%d of %d tasks got done within 2s", len(doneset), len(alltasks))
- for i := 0; i < len(alltasks); i++ {
- if !doneset[i] {
- t.Logf("task %d not done", i)
- }
- }
- return
- }
- }
-}
-
-type taskgen struct {
- newFunc func(running int, peers map[enode.ID]*Peer) []task
- doneFunc func(task)
-}
-
-func (tg taskgen) newTasks(running int, peers map[enode.ID]*Peer, now time.Time) []task {
- return tg.newFunc(running, peers)
-}
-func (tg taskgen) taskDone(t task, now time.Time) {
- tg.doneFunc(t)
-}
-func (tg taskgen) addStatic(*enode.Node) {
-}
-func (tg taskgen) removeStatic(*enode.Node) {
-}
-
-type testTask struct {
- index int
- called bool
-}
-
-func (t *testTask) Do(srv *Server) {
- t.called = true
-}
-
-// This test checks that connections are disconnected
-// just after the encryption handshake when the server is
-// at capacity. Trusted connections should still be accepted.
+// This test checks that connections are disconnected just after the encryption handshake
+// when the server is at capacity. Trusted connections should still be accepted.
func TestServerAtCap(t *testing.T) {
trustedNode := newkey()
trustedID := enode.PubkeyToIDV4(&trustedNode.PublicKey)
@@ -368,7 +247,8 @@ func TestServerAtCap(t *testing.T) {
MaxPeers: 10,
NoDial: true,
NoDiscovery: true,
- TrustedNodes: []*enode.Node{newNode(trustedID, nil)},
+ TrustedNodes: []*enode.Node{newNode(trustedID, "")},
+ Logger: testlog.Logger(t, log.LvlTrace),
},
}
if err := srv.Start(); err != nil {
@@ -406,14 +286,14 @@ func TestServerAtCap(t *testing.T) {
}
// Remove from trusted set and try again
- srv.RemoveTrustedPeer(newNode(trustedID, nil))
+ srv.RemoveTrustedPeer(newNode(trustedID, ""))
c = newconn(trustedID)
if err := srv.checkpoint(c, srv.checkpointPostHandshake); err != DiscTooManyPeers {
t.Error("wrong error for insert:", err)
}
// Add anotherID to trusted set and try again
- srv.AddTrustedPeer(newNode(anotherID, nil))
+ srv.AddTrustedPeer(newNode(anotherID, ""))
c = newconn(anotherID)
if err := srv.checkpoint(c, srv.checkpointPostHandshake); err != nil {
t.Error("unexpected error for trusted conn @posthandshake:", err)
@@ -444,9 +324,9 @@ func TestServerPeerLimits(t *testing.T) {
NoDial: true,
NoDiscovery: true,
Protocols: []Protocol{discard},
+ Logger: testlog.Logger(t, log.LvlTrace),
},
newTransport: func(fd net.Conn) transport { return tp },
- log: log.New(),
}
if err := srv.Start(); err != nil {
t.Fatalf("couldn't start server: %v", err)
@@ -743,7 +623,7 @@ func TestServerInboundThrottle(t *testing.T) {
conn.Close()
// Dial again. This time the server should close the connection immediately.
- connClosed := make(chan struct{})
+ connClosed := make(chan struct{}, 1)
conn, err = net.DialTimeout("tcp", srv.ListenAddr, timeout)
if err != nil {
t.Fatalf("could not dial: %v", err)
@@ -797,3 +677,23 @@ func (l *fakeAddrListener) Accept() (net.Conn, error) {
func (c *fakeAddrConn) RemoteAddr() net.Addr {
return c.remoteAddr
}
+
+func syncAddPeer(srv *Server, node *enode.Node) bool {
+ var (
+ ch = make(chan *PeerEvent)
+ sub = srv.SubscribeEvents(ch)
+ timeout = time.After(2 * time.Second)
+ )
+ defer sub.Unsubscribe()
+ srv.AddPeer(node)
+ for {
+ select {
+ case ev := <-ch:
+ if ev.Type == PeerEventTypeAdd && ev.Peer == node.ID() {
+ return true
+ }
+ case <-timeout:
+ return false
+ }
+ }
+}
diff --git a/p2p/simulations/adapters/exec.go b/p2p/simulations/adapters/exec.go
index 7c6ec94621..18ec9c69b8 100644
--- a/p2p/simulations/adapters/exec.go
+++ b/p2p/simulations/adapters/exec.go
@@ -287,7 +287,7 @@ func (n *ExecNode) Stop() error {
if err := n.Cmd.Process.Signal(syscall.SIGTERM); err != nil {
return n.Cmd.Process.Kill()
}
- waitErr := make(chan error)
+ waitErr := make(chan error, 1)
go func() {
waitErr <- n.Cmd.Wait()
}()
diff --git a/p2p/simulations/adapters/inproc.go b/p2p/simulations/adapters/inproc.go
index 9787082e18..651d9546ae 100644
--- a/p2p/simulations/adapters/inproc.go
+++ b/p2p/simulations/adapters/inproc.go
@@ -17,6 +17,7 @@
package adapters
import (
+ "context"
"errors"
"fmt"
"math"
@@ -126,7 +127,7 @@ func (s *SimAdapter) NewNode(config *NodeConfig) (Node, error) {
// Dial implements the p2p.NodeDialer interface by connecting to the node using
// an in-memory net.Pipe
-func (s *SimAdapter) Dial(dest *enode.Node) (conn net.Conn, err error) {
+func (s *SimAdapter) Dial(ctx context.Context, dest *enode.Node) (conn net.Conn, err error) {
node, ok := s.GetNode(dest.ID())
if !ok {
return nil, fmt.Errorf("unknown node: %s", dest.ID())
diff --git a/p2p/util.go b/p2p/util.go
index 018cc40e98..3c5f6b8508 100644
--- a/p2p/util.go
+++ b/p2p/util.go
@@ -18,7 +18,8 @@ package p2p
import (
"container/heap"
- "time"
+
+ "github.com/ethereum/go-ethereum/common/mclock"
)
// expHeap tracks strings and their expiry time.
@@ -27,16 +28,16 @@ type expHeap []expItem
// expItem is an entry in addrHistory.
type expItem struct {
item string
- exp time.Time
+ exp mclock.AbsTime
}
// nextExpiry returns the next expiry time.
-func (h *expHeap) nextExpiry() time.Time {
+func (h *expHeap) nextExpiry() mclock.AbsTime {
return (*h)[0].exp
}
// add adds an item and sets its expiry time.
-func (h *expHeap) add(item string, exp time.Time) {
+func (h *expHeap) add(item string, exp mclock.AbsTime) {
heap.Push(h, expItem{item, exp})
}
@@ -51,15 +52,18 @@ func (h expHeap) contains(item string) bool {
}
// expire removes items with expiry time before 'now'.
-func (h *expHeap) expire(now time.Time) {
- for h.Len() > 0 && h.nextExpiry().Before(now) {
- heap.Pop(h)
+func (h *expHeap) expire(now mclock.AbsTime, onExp func(string)) {
+ for h.Len() > 0 && h.nextExpiry() < now {
+ item := heap.Pop(h)
+ if onExp != nil {
+ onExp(item.(expItem).item)
+ }
}
}
// heap.Interface boilerplate
func (h expHeap) Len() int { return len(h) }
-func (h expHeap) Less(i, j int) bool { return h[i].exp.Before(h[j].exp) }
+func (h expHeap) Less(i, j int) bool { return h[i].exp < h[j].exp }
func (h expHeap) Swap(i, j int) { h[i], h[j] = h[j], h[i] }
func (h *expHeap) Push(x interface{}) { *h = append(*h, x.(expItem)) }
func (h *expHeap) Pop() interface{} {
diff --git a/p2p/util_test.go b/p2p/util_test.go
index c9f2648dc9..cc0d2b215f 100644
--- a/p2p/util_test.go
+++ b/p2p/util_test.go
@@ -19,30 +19,32 @@ package p2p
import (
"testing"
"time"
+
+ "github.com/ethereum/go-ethereum/common/mclock"
)
func TestExpHeap(t *testing.T) {
var h expHeap
var (
- basetime = time.Unix(4000, 0)
+ basetime = mclock.AbsTime(10)
exptimeA = basetime.Add(2 * time.Second)
exptimeB = basetime.Add(3 * time.Second)
exptimeC = basetime.Add(4 * time.Second)
)
- h.add("a", exptimeA)
h.add("b", exptimeB)
+ h.add("a", exptimeA)
h.add("c", exptimeC)
- if !h.nextExpiry().Equal(exptimeA) {
+ if h.nextExpiry() != exptimeA {
t.Fatal("wrong nextExpiry")
}
if !h.contains("a") || !h.contains("b") || !h.contains("c") {
t.Fatal("heap doesn't contain all live items")
}
- h.expire(exptimeA.Add(1))
- if !h.nextExpiry().Equal(exptimeB) {
+ h.expire(exptimeA.Add(1), nil)
+ if h.nextExpiry() != exptimeB {
t.Fatal("wrong nextExpiry")
}
if h.contains("a") {
diff --git a/params/bootnodes.go b/params/bootnodes.go
index 967cba5bc4..f27e5b7d18 100644
--- a/params/bootnodes.go
+++ b/params/bootnodes.go
@@ -16,6 +16,8 @@
package params
+import "github.com/ethereum/go-ethereum/common"
+
// MainnetBootnodes are the enode URLs of the P2P bootstrap nodes running on
// the main Ethereum network.
var MainnetBootnodes = []string{
@@ -28,9 +30,6 @@ var MainnetBootnodes = []string{
"enode://103858bdb88756c71f15e9b5e09b56dc1be52f0a5021d46301dbbfb7e130029cc9d0d6f73f693bc29b665770fff7da4d34f3c6379fe12721b5d7a0bcb5ca1fc1@191.234.162.198:30303", // bootnode-azure-brazilsouth-001
"enode://715171f50508aba88aecd1250af392a45a330af91d7b90701c436b618c86aaa1589c9184561907bebbb56439b8f8787bc01f49a7c77276c58c1b09822d75e8e8@52.231.165.108:30303", // bootnode-azure-koreasouth-001
"enode://5d6d7cd20d6da4bb83a1d28cadb5d409b64edf314c0335df658c1a54e32c7c4a7ab7823d57c39b6a757556e68ff1df17c748b698544a55cb488b52479a92b60f@104.42.217.25:30303", // bootnode-azure-westus-001
-
- // Ethereum Foundation C++ Bootnodes
- "enode://979b7fa28feeb35a4741660a16076f1943202cb72b6af70d327f053e248bab9ba81760f39d0701ef1d8f89cc1fbd2cacba0710a12cd5314d5e0c9021aa3637f9@5.1.83.226:30303", // DE
}
// TestnetBootnodes are the enode URLs of the P2P bootstrap nodes running on the
@@ -61,7 +60,7 @@ var GoerliBootnodes = []string{
"enode://f4a9c6ee28586009fb5a96c8af13a58ed6d8315a9eee4772212c1d4d9cebe5a8b8a78ea4434f318726317d04a3f531a1ef0420cf9752605a562cfe858c46e263@213.186.16.82:30303",
// Ethereum Foundation bootnode
- "enode://573b6607cd59f241e30e4c4943fd50e99e2b6f42f9bd5ca111659d309c06741247f4f1e93843ad3e8c8c18b6e2d94c161b7ef67479b3938780a97134b618b5ce@52.56.136.200:30303",
+ "enode://a61215641fb8714a373c80edbfa0ea8878243193f57c96eeb44d0bc019ef295abd4e044fd619bfc4c59731a73fb79afe84e9ab6da0c743ceb479cbb6d263fa91@3.11.147.67:30303",
}
// DiscoveryV5Bootnodes are the enode URLs of the P2P bootstrap nodes for the
@@ -72,3 +71,14 @@ var DiscoveryV5Bootnodes = []string{
"enode://1c7a64d76c0334b0418c004af2f67c50e36a3be60b5e4790bdac0439d21603469a85fad36f2473c9a80eb043ae60936df905fa28f1ff614c3e5dc34f15dcd2dc@40.118.3.223:30306",
"enode://85c85d7143ae8bb96924f2b54f1b3e70d8c4d367af305325d30a61385a432f247d2c75c45c6b4a60335060d072d7f5b35dd1d4c45f76941f62a4f83b6e75daaf@40.118.3.223:30307",
}
+
+const dnsPrefix = "enrtree://AKA3AM6LPBYEUDMVNU3BSVQJ5AD45Y7YPOHJLEF6W26QOE4VTUDPE@"
+
+// These DNS names provide bootstrap connectivity for public testnets and the mainnet.
+// See https://github.com/ethereum/discv4-dns-lists for more information.
+var KnownDNSNetworks = map[common.Hash]string{
+ MainnetGenesisHash: dnsPrefix + "all.mainnet.ethdisco.net",
+ TestnetGenesisHash: dnsPrefix + "all.ropsten.ethdisco.net",
+ RinkebyGenesisHash: dnsPrefix + "all.rinkeby.ethdisco.net",
+ GoerliGenesisHash: dnsPrefix + "all.goerli.ethdisco.net",
+}
diff --git a/params/config.go b/params/config.go
index 42a77b0b63..08d9f315da 100644
--- a/params/config.go
+++ b/params/config.go
@@ -67,15 +67,16 @@ var (
ConstantinopleBlock: big.NewInt(7280000),
PetersburgBlock: big.NewInt(7280000),
IstanbulBlock: big.NewInt(9069000),
+ MuirGlacierBlock: big.NewInt(9200000),
Ethash: new(EthashConfig),
}
// MainnetTrustedCheckpoint contains the light client trusted checkpoint for the main network.
MainnetTrustedCheckpoint = &TrustedCheckpoint{
- SectionIndex: 270,
- SectionHead: common.HexToHash("0xb67c33d838a60c282c2fb49b188fbbac1ef8565ffb4a1c4909b0a05885e72e40"),
- CHTRoot: common.HexToHash("0x781daa4607782300da85d440df3813ba38a1262585231e35e9480726de81dbfc"),
- BloomRoot: common.HexToHash("0xfd8951fa6d779cbc981df40dc31056ed1a549db529349d7dfae016f9d96cae72"),
+ SectionIndex: 289,
+ SectionHead: common.HexToHash("0x5a95eed1a6e01d58b59f86c754cda88e8d6bede65428530eb0bec03267cda6a9"),
+ CHTRoot: common.HexToHash("0x6d4abf2b0f3c015952e6a3cbd5cc9885aacc29b8e55d4de662d29783c74a62bf"),
+ BloomRoot: common.HexToHash("0x1af2a8abbaca8048136b02f782cb6476ab546313186a1d1bd2b02df88ea48e7e"),
}
// MainnetCheckpointOracle contains a set of configs for the main network oracle.
@@ -105,15 +106,16 @@ var (
ConstantinopleBlock: big.NewInt(4230000),
PetersburgBlock: big.NewInt(4939394),
IstanbulBlock: big.NewInt(6485846),
+ MuirGlacierBlock: big.NewInt(7117117),
Ethash: new(EthashConfig),
}
// TestnetTrustedCheckpoint contains the light client trusted checkpoint for the Ropsten test network.
TestnetTrustedCheckpoint = &TrustedCheckpoint{
- SectionIndex: 204,
- SectionHead: common.HexToHash("0xa39168b51c3205456f30ce6a91f3590a43295b15a1c8c2ab86bb8c06b8ad1808"),
- CHTRoot: common.HexToHash("0x9a3654147b79882bfc4e16fbd3421512aa7e4dfadc6c511923980e0877bdf3b4"),
- BloomRoot: common.HexToHash("0xe72b979522d94fa45c1331639316da234a9bb85062d64d72e13afe1d3f5c17d5"),
+ SectionIndex: 223,
+ SectionHead: common.HexToHash("0x9aa51ca383f5075f816e0b8ce7125075cd562b918839ee286c03770722147661"),
+ CHTRoot: common.HexToHash("0x755c6a5931b7bd36e55e47f3f1e81fa79c930ae15c55682d3a85931eedaf8cf2"),
+ BloomRoot: common.HexToHash("0xabc37762d11b29dc7dde11b89846e2308ba681eeb015b6a202ef5e242bc107e8"),
}
// TestnetCheckpointOracle contains a set of configs for the Ropsten test network oracle.
@@ -151,10 +153,10 @@ var (
// RinkebyTrustedCheckpoint contains the light client trusted checkpoint for the Rinkeby test network.
RinkebyTrustedCheckpoint = &TrustedCheckpoint{
- SectionIndex: 163,
- SectionHead: common.HexToHash("0x36e5deaa46f258bece94b05d8e10f1ef68f422fb62ed47a2b6e616aa26e84997"),
- CHTRoot: common.HexToHash("0x829b9feca1c2cdf5a4cf3efac554889e438ee4df8718c2ce3e02555a02d9e9e5"),
- BloomRoot: common.HexToHash("0x58c01de24fdae7c082ebbe7665f189d0aa4d90ee10e72086bf56651c63269e54"),
+ SectionIndex: 181,
+ SectionHead: common.HexToHash("0xdda275f3e9ecadf4834a6a682db1ca3db6945fa4014c82dadcad032fc5c1aefa"),
+ CHTRoot: common.HexToHash("0x0fdfdbdb12e947e838fe26dd3ada4cc3092d6fa22aefec719b83f16004b5e596"),
+ BloomRoot: common.HexToHash("0xfd8dc404a438eaa5cf93dd58dbaeed648aa49d563b511892262acff77c5db7db"),
}
// RinkebyCheckpointOracle contains a set of configs for the Rinkeby test network oracle.
@@ -190,10 +192,10 @@ var (
// GoerliTrustedCheckpoint contains the light client trusted checkpoint for the Görli test network.
GoerliTrustedCheckpoint = &TrustedCheckpoint{
- SectionIndex: 47,
- SectionHead: common.HexToHash("0x00c5b54c6c9a73660501fd9273ccdb4c5bbdbe5d7b8b650e28f881ec9d2337f6"),
- CHTRoot: common.HexToHash("0xef35caa155fd659f57167e7d507de2f8132cbb31f771526481211d8a977d704c"),
- BloomRoot: common.HexToHash("0xbda330402f66008d52e7adc748da28535b1212a7912a21244acd2ba77ff0ff06"),
+ SectionIndex: 66,
+ SectionHead: common.HexToHash("0xeea3a7b2cb275956f3049dd27e6cdacd8a6ef86738d593d556efee5361019475"),
+ CHTRoot: common.HexToHash("0x11712af50b4083dc5910e452ca69fbfc0f2940770b9846200a573f87a0af94e6"),
+ BloomRoot: common.HexToHash("0x331b7a7b273e81daeac8cafb9952a16669d7facc7be3b0ebd3a792b4d8b95cc5"),
}
// GoerliCheckpointOracle contains a set of configs for the Goerli test network oracle.
@@ -214,19 +216,19 @@ var (
//
// This configuration is intentionally not using keyed fields to force anyone
// adding flags to the config to also have to set these fields.
- AllEthashProtocolChanges = &ChainConfig{big.NewInt(1337), big.NewInt(0), nil, false, big.NewInt(0), common.Hash{}, big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), nil, new(EthashConfig), nil, nil, false, 32, 35, big.NewInt(0), big.NewInt(0), nil, nil}
+ AllEthashProtocolChanges = &ChainConfig{big.NewInt(1337), big.NewInt(0), nil, false, big.NewInt(0), common.Hash{}, big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), nil, nil, new(EthashConfig), nil, nil, false, 32, 35, big.NewInt(0), big.NewInt(0), nil, nil}
// AllCliqueProtocolChanges contains every protocol change (EIPs) introduced
// and accepted by the Ethereum core developers into the Clique consensus.
//
// This configuration is intentionally not using keyed fields to force anyone
// adding flags to the config to also have to set these fields.
- AllCliqueProtocolChanges = &ChainConfig{big.NewInt(1337), big.NewInt(0), nil, false, big.NewInt(0), common.Hash{}, big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), nil, nil, &CliqueConfig{Period: 0, Epoch: 30000}, nil, false, 32, 32, big.NewInt(0), big.NewInt(0), nil, nil}
+ AllCliqueProtocolChanges = &ChainConfig{big.NewInt(1337), big.NewInt(0), nil, false, big.NewInt(0), common.Hash{}, big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), nil, nil, nil, &CliqueConfig{Period: 0, Epoch: 30000}, nil, false, 32, 32, big.NewInt(0), big.NewInt(0), nil, nil}
- TestChainConfig = &ChainConfig{big.NewInt(10), big.NewInt(0), nil, false, big.NewInt(0), common.Hash{}, big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), nil, new(EthashConfig), nil, nil, false, 32, 32, big.NewInt(0), big.NewInt(0), nil, nil}
+ TestChainConfig = &ChainConfig{big.NewInt(10), big.NewInt(0), nil, false, big.NewInt(0), common.Hash{}, big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), nil, nil, new(EthashConfig), nil, nil, false, 32, 32, big.NewInt(0), big.NewInt(0), nil, nil}
TestRules = TestChainConfig.Rules(new(big.Int))
- QuorumTestChainConfig = &ChainConfig{big.NewInt(10), big.NewInt(0), nil, false, big.NewInt(0), common.Hash{}, big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), nil, new(EthashConfig), nil, nil, true, 64, 32, big.NewInt(0), big.NewInt(0), nil, big.NewInt(0)}
+ QuorumTestChainConfig = &ChainConfig{big.NewInt(10), big.NewInt(0), nil, false, big.NewInt(0), common.Hash{}, big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), nil, nil, new(EthashConfig), nil, nil, true, 64, 32, big.NewInt(0), big.NewInt(0), nil, big.NewInt(0)}
)
// TrustedCheckpoint represents a set of post-processed trie roots (CHT and
@@ -300,6 +302,7 @@ type ChainConfig struct {
ConstantinopleBlock *big.Int `json:"constantinopleBlock,omitempty"` // Constantinople switch block (nil = no fork, 0 = already activated)
PetersburgBlock *big.Int `json:"petersburgBlock,omitempty"` // Petersburg switch block (nil = same as Constantinople)
IstanbulBlock *big.Int `json:"istanbulBlock,omitempty"` // Istanbul switch block (nil = no fork, 0 = already on istanbul)
+ MuirGlacierBlock *big.Int `json:"muirGlacierBlock,omitempty"` // Eip-2384 (bomb delay) switch block (nil = no fork, 0 = already activated)
EWASMBlock *big.Int `json:"ewasmBlock,omitempty"` // EWASM switch block (nil = no fork, 0 = already activated)
// Various consensus engines
@@ -318,7 +321,6 @@ type ChainConfig struct {
// to track multiple changes to maxCodeSize
MaxCodeSizeConfig []MaxCodeConfigStruct `json:"maxCodeSizeConfig,omitempty"`
// Quorum
-
PrivacyEnhancementsBlock *big.Int `json:"privacyEnhancementsBlock,omitempty"`
}
@@ -367,7 +369,7 @@ func (c *ChainConfig) String() string {
default:
engine = "unknown"
}
- return fmt.Sprintf("{ChainID: %v Homestead: %v DAO: %v DAOSupport: %v EIP150: %v EIP155: %v EIP158: %v Byzantium: %v IsQuorum: %v Constantinople: %v TransactionSizeLimit: %v MaxCodeSize: %v Petersburg: %v Istanbul: %v PrivacyEnhancements: %v Engine: %v}",
+ return fmt.Sprintf("{ChainID: %v Homestead: %v DAO: %v DAOSupport: %v EIP150: %v EIP155: %v EIP158: %v Byzantium: %v IsQuorum: %v Constantinople: %v TransactionSizeLimit: %v MaxCodeSize: %v Petersburg: %v Istanbul: %v Muir Glacier: %v PrivacyEnhancements: %v Engine: %v}",
c.ChainID,
c.HomesteadBlock,
c.DAOForkBlock,
@@ -382,6 +384,7 @@ func (c *ChainConfig) String() string {
c.MaxCodeSize,
c.PetersburgBlock,
c.IstanbulBlock,
+ c.MuirGlacierBlock,
c.PrivacyEnhancementsBlock,
engine,
)
@@ -436,6 +439,11 @@ func (c *ChainConfig) IsConstantinople(num *big.Int) bool {
return isForked(c.ConstantinopleBlock, num)
}
+// IsMuirGlacier returns whether num is either equal to the Muir Glacier (EIP-2384) fork block or greater.
+func (c *ChainConfig) IsMuirGlacier(num *big.Int) bool {
+ return isForked(c.MuirGlacierBlock, num)
+}
+
// IsPetersburg returns whether num is either
// - equal to or greater than the PetersburgBlock fork block,
// - OR is nil, and Constantinople is active
@@ -622,6 +630,7 @@ func (c *ChainConfig) CheckConfigForkOrder() error {
{"constantinopleBlock", c.ConstantinopleBlock},
{"petersburgBlock", c.PetersburgBlock},
{"istanbulBlock", c.IstanbulBlock},
+ {"muirGlacierBlock", c.MuirGlacierBlock},
} {
if lastFork.name != "" {
// Next one must be higher number
@@ -678,6 +687,9 @@ func (c *ChainConfig) checkCompatible(newcfg *ChainConfig, head *big.Int, isQuor
if isForkIncompatible(c.IstanbulBlock, newcfg.IstanbulBlock, head) {
return newCompatError("Istanbul fork block", c.IstanbulBlock, newcfg.IstanbulBlock)
}
+ if isForkIncompatible(c.MuirGlacierBlock, newcfg.MuirGlacierBlock, head) {
+ return newCompatError("Muir Glacier fork block", c.MuirGlacierBlock, newcfg.MuirGlacierBlock)
+ }
if isForkIncompatible(c.EWASMBlock, newcfg.EWASMBlock, head) {
return newCompatError("ewasm fork block", c.EWASMBlock, newcfg.EWASMBlock)
}
diff --git a/params/version.go b/params/version.go
index 7f224ecb3e..12095889d0 100644
--- a/params/version.go
+++ b/params/version.go
@@ -23,7 +23,7 @@ import (
const (
VersionMajor = 1 // Major version component of the current release
VersionMinor = 9 // Minor version component of the current release
- VersionPatch = 8 // Patch version component of the current release
+ VersionPatch = 11 // Patch version component of the current release
VersionMeta = "stable" // Version metadata to append to the version string
QuorumVersionMajor = 21
diff --git a/raft/handler_test.go b/raft/handler_test.go
index 7afa795a20..f3eecf9f34 100644
--- a/raft/handler_test.go
+++ b/raft/handler_test.go
@@ -155,9 +155,9 @@ func prepareServiceContext(key *ecdsa.PrivateKey) (ctx *node.ServiceContext, cfg
EventMux: new(event.TypeMux),
}
// config is private field so we need some workaround to set the value
- configField := reflect.ValueOf(ctx).Elem().FieldByName("config")
+ configField := reflect.ValueOf(ctx).Elem().FieldByName("Config")
configField = reflect.NewAt(configField.Type(), unsafe.Pointer(configField.UnsafeAddr())).Elem()
- configField.Set(reflect.ValueOf(cfg))
+ configField.Set(reflect.ValueOf(*cfg))
return
}
diff --git a/rpc/client.go b/rpc/client.go
index e17b6bc453..f467ccd197 100644
--- a/rpc/client.go
+++ b/rpc/client.go
@@ -276,6 +276,9 @@ func (c *Client) Call(result interface{}, method string, args ...interface{}) er
// The result must be a pointer so that package json can unmarshal into it. You
// can also pass nil, in which case the result is ignored.
func (c *Client) CallContext(ctx context.Context, result interface{}, method string, args ...interface{}) error {
+ if result != nil && reflect.TypeOf(result).Kind() != reflect.Ptr {
+ return fmt.Errorf("call result parameter must be pointer or nil interface: %v", result)
+ }
msg, err := c.newMessage(method, args...)
if err != nil {
return err
diff --git a/rpc/client_test.go b/rpc/client_test.go
index 01e86a567c..ceedc33df6 100644
--- a/rpc/client_test.go
+++ b/rpc/client_test.go
@@ -50,6 +50,23 @@ func TestClientRequest(t *testing.T) {
}
}
+func TestClientResponseType(t *testing.T) {
+ server := newTestServer()
+ defer server.Stop()
+ client := DialInProc(server)
+ defer client.Close()
+
+ if err := client.Call(nil, "test_echo", "hello", 10, &echoArgs{"world"}); err != nil {
+ t.Errorf("Passing nil as result should be fine, but got an error: %v", err)
+ }
+ var resultVar echoResult
+ // Note: passing the var, not a ref
+ err := client.Call(resultVar, "test_echo", "hello", 10, &echoArgs{"world"})
+ if err == nil {
+ t.Error("Passing a var as result should be an error")
+ }
+}
+
func TestClientBatchRequest(t *testing.T) {
server := newTestServer()
defer server.Stop()
@@ -281,7 +298,7 @@ func TestClientSubscribeClose(t *testing.T) {
var (
nc = make(chan int)
- errc = make(chan error)
+ errc = make(chan error, 1)
sub *ClientSubscription
err error
)
diff --git a/rpc/endpoints.go b/rpc/endpoints.go
index 988e2a8854..82d87fbafd 100644
--- a/rpc/endpoints.go
+++ b/rpc/endpoints.go
@@ -26,9 +26,30 @@ import (
"github.com/ethereum/go-ethereum/plugin/security"
)
+// checkModuleAvailability check that all names given in modules are actually
+// available API services.
+func checkModuleAvailability(modules []string, apis []API) (bad, available []string) {
+ availableSet := make(map[string]struct{})
+ for _, api := range apis {
+ if _, ok := availableSet[api.Namespace]; !ok {
+ availableSet[api.Namespace] = struct{}{}
+ available = append(available, api.Namespace)
+ }
+ }
+ for _, name := range modules {
+ if _, ok := availableSet[name]; !ok {
+ bad = append(bad, name)
+ }
+ }
+ return bad, available
+}
+
// StartHTTPEndpoint starts the HTTP RPC endpoint, configured with cors/vhosts/modules
// Quorum: tlsConfigSource and authManager are introduced to secure the HTTP endpoint
func StartHTTPEndpoint(endpoint string, apis []API, modules []string, cors []string, vhosts []string, timeouts HTTPTimeouts, tlsConfigSource security.TLSConfigurationSource, authManager security.AuthenticationManager) (net.Listener, *Server, bool, error) {
+ if bad, available := checkModuleAvailability(modules, apis); len(bad) > 0 {
+ log.Error("Unavailable modules in HTTP API list", "unavailable", bad, "available", available)
+ }
// Generate the whitelist based on the allowed modules
whitelist := make(map[string]bool)
for _, module := range modules {
@@ -88,7 +109,9 @@ func startListener(endpoint string, tlsConfigSource security.TLSConfigurationSou
// StartWSEndpoint starts a websocket endpoint
// Quorum: tlsConfigSource and authManager are introduced to secure the WS endpoint
func StartWSEndpoint(endpoint string, apis []API, modules []string, wsOrigins []string, exposeAll bool, tlsConfigSource security.TLSConfigurationSource, authManager security.AuthenticationManager) (net.Listener, *Server, bool, error) {
-
+ if bad, available := checkModuleAvailability(modules, apis); len(bad) > 0 {
+ log.Error("Unavailable modules in WS API list", "unavailable", bad, "available", available)
+ }
// Generate the whitelist based on the allowed modules
whitelist := make(map[string]bool)
for _, module := range modules {
diff --git a/rpc/websocket.go b/rpc/websocket.go
index 7bed8e7238..535c07bbcb 100644
--- a/rpc/websocket.go
+++ b/rpc/websocket.go
@@ -128,12 +128,12 @@ func (e wsHandshakeError) Error() string {
return s
}
-// DialWebsocket creates a new RPC client that communicates with a JSON-RPC server
-// that is listening on the given endpoint.
+// DialWebsocketWithDialer creates a new RPC client that communicates with a JSON-RPC server
+// that is listening on the given endpoint using the provided dialer.
//
// The context is used for the initial connection establishment. It does not
// affect subsequent interactions with the client.
-func DialWebsocket(ctx context.Context, endpoint, origin string) (*Client, error) {
+func DialWebsocketWithDialer(ctx context.Context, endpoint, origin string, dialer websocket.Dialer) (*Client, error) {
return DialWebsocketWithCustomTLS(ctx, endpoint, origin, nil)
}
@@ -146,18 +146,20 @@ func DialWebsocket(ctx context.Context, endpoint, origin string) (*Client, error
// The context is used for the initial connection establishment. It does not
// affect subsequent interactions with the client.
func DialWebsocketWithCustomTLS(ctx context.Context, endpoint, origin string, tlsConfig *tls.Config) (*Client, error) {
- endpoint, header, err := wsClientHeaders(endpoint, origin)
- if err != nil {
- return nil, err
- }
dialer := websocket.Dialer{
ReadBufferSize: wsReadBuffer,
WriteBufferSize: wsWriteBuffer,
WriteBufferPool: wsBufferPool,
}
+
+ endpoint, header, err := wsClientHeaders(endpoint, origin)
+ if err != nil {
+ return nil, err
+ }
if tlsConfig != nil {
dialer.TLSClientConfig = tlsConfig
}
+
credProviderFunc, hasCredProviderFunc := ctx.Value(CtxCredentialsProvider).(HttpCredentialsProviderFunc)
return newClient(ctx, func(ctx context.Context) (ServerCodec, error) {
if hasCredProviderFunc {
@@ -180,6 +182,20 @@ func DialWebsocketWithCustomTLS(ctx context.Context, endpoint, origin string, tl
})
}
+// DialWebsocket creates a new RPC client that communicates with a JSON-RPC server
+// that is listening on the given endpoint.
+//
+// The context is used for the initial connection establishment. It does not
+// affect subsequent interactions with the client.
+func DialWebsocket(ctx context.Context, endpoint, origin string) (*Client, error) {
+ dialer := websocket.Dialer{
+ ReadBufferSize: wsReadBuffer,
+ WriteBufferSize: wsWriteBuffer,
+ WriteBufferPool: wsBufferPool,
+ }
+ return DialWebsocketWithDialer(ctx, endpoint, origin, dialer)
+}
+
func wsClientHeaders(endpoint, origin string) (string, http.Header, error) {
endpointURL, err := url.Parse(endpoint)
if err != nil {
diff --git a/signer/core/signed_data.go b/signer/core/signed_data.go
index f512be7cea..3a827afa2d 100644
--- a/signer/core/signed_data.go
+++ b/signer/core/signed_data.go
@@ -923,7 +923,9 @@ func isPrimitiveTypeValid(primitiveType string) bool {
primitiveType == "bytes30" ||
primitiveType == "bytes30[]" ||
primitiveType == "bytes31" ||
- primitiveType == "bytes31[]" {
+ primitiveType == "bytes31[]" ||
+ primitiveType == "bytes32" ||
+ primitiveType == "bytes32[]" {
return true
}
if primitiveType == "int" ||
diff --git a/signer/fourbyte/abi.go b/signer/fourbyte/abi.go
index 585eae1cd8..796086d415 100644
--- a/signer/fourbyte/abi.go
+++ b/signer/fourbyte/abi.go
@@ -137,7 +137,7 @@ func parseCallData(calldata []byte, abidata string) (*decodedCallData, error) {
}
values, err := method.Inputs.UnpackValues(argdata)
if err != nil {
- return nil, err
+ return nil, fmt.Errorf("signature %q matches, but arguments mismatch: %v", method.String(), err)
}
// Everything valid, assemble the call infos for the signer
decoded := decodedCallData{signature: method.Sig(), name: method.RawName}
diff --git a/signer/fourbyte/validation.go b/signer/fourbyte/validation.go
index 4d042d240f..fd13e0a630 100644
--- a/signer/fourbyte/validation.go
+++ b/signer/fourbyte/validation.go
@@ -74,13 +74,13 @@ func (db *Database) ValidateTransaction(selector *string, tx *core.SendTxArgs) (
messages.Crit("Transaction recipient is the zero address")
}
// Semantic fields validated, try to make heads or tails of the call data
- db.validateCallData(selector, data, messages)
+ db.ValidateCallData(selector, data, messages)
return messages, nil
}
-// validateCallData checks if the ABI call-data + method selector (if given) can
+// ValidateCallData checks if the ABI call-data + method selector (if given) can
// be parsed and seems to match.
-func (db *Database) validateCallData(selector *string, data []byte, messages *core.ValidationMessages) {
+func (db *Database) ValidateCallData(selector *string, data []byte, messages *core.ValidationMessages) {
// If the data is empty, we have a plain value transfer, nothing more to do
if len(data) == 0 {
return
@@ -110,7 +110,7 @@ func (db *Database) validateCallData(selector *string, data []byte, messages *co
return
}
if info, err := verifySelector(embedded, data); err != nil {
- messages.Warn(fmt.Sprintf("Transaction contains data, but provided ABI signature could not be varified: %v", err))
+ messages.Warn(fmt.Sprintf("Transaction contains data, but provided ABI signature could not be verified: %v", err))
} else {
messages.Info(info.String())
}
diff --git a/signer/rules/rules.go b/signer/rules/rules.go
index cb375a62ad..03e5136730 100644
--- a/signer/rules/rules.go
+++ b/signer/rules/rules.go
@@ -22,12 +22,12 @@ import (
"os"
"strings"
+ "github.com/dop251/goja"
"github.com/ethereum/go-ethereum/internal/ethapi"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/signer/core"
"github.com/ethereum/go-ethereum/signer/rules/deps"
"github.com/ethereum/go-ethereum/signer/storage"
- "github.com/robertkrimen/otto"
)
var (
@@ -36,13 +36,13 @@ var (
// consoleOutput is an override for the console.log and console.error methods to
// stream the output into the configured output stream instead of stdout.
-func consoleOutput(call otto.FunctionCall) otto.Value {
+func consoleOutput(call goja.FunctionCall) goja.Value {
output := []string{"JS:> "}
- for _, argument := range call.ArgumentList {
+ for _, argument := range call.Arguments {
output = append(output, fmt.Sprintf("%v", argument))
}
fmt.Fprintln(os.Stderr, strings.Join(output, " "))
- return otto.Value{}
+ return goja.Undefined()
}
// rulesetUI provides an implementation of UIClientAPI that evaluates a javascript
@@ -70,45 +70,47 @@ func (r *rulesetUI) Init(javascriptRules string) error {
r.jsRules = javascriptRules
return nil
}
-func (r *rulesetUI) execute(jsfunc string, jsarg interface{}) (otto.Value, error) {
+func (r *rulesetUI) execute(jsfunc string, jsarg interface{}) (goja.Value, error) {
// Instantiate a fresh vm engine every time
- vm := otto.New()
+ vm := goja.New()
// Set the native callbacks
- consoleObj, _ := vm.Get("console")
- consoleObj.Object().Set("log", consoleOutput)
- consoleObj.Object().Set("error", consoleOutput)
+ consoleObj := vm.NewObject()
+ consoleObj.Set("log", consoleOutput)
+ consoleObj.Set("error", consoleOutput)
+ vm.Set("console", consoleObj)
- vm.Set("storage", struct{}{})
- storageObj, _ := vm.Get("storage")
- storageObj.Object().Set("put", func(call otto.FunctionCall) otto.Value {
+ storageObj := vm.NewObject()
+ storageObj.Set("put", func(call goja.FunctionCall) goja.Value {
key, val := call.Argument(0).String(), call.Argument(1).String()
if val == "" {
r.storage.Del(key)
} else {
r.storage.Put(key, val)
}
- return otto.NullValue()
+ return goja.Null()
})
- storageObj.Object().Set("get", func(call otto.FunctionCall) otto.Value {
+ storageObj.Set("get", func(call goja.FunctionCall) goja.Value {
goval, _ := r.storage.Get(call.Argument(0).String())
- jsval, _ := otto.ToValue(goval)
+ jsval := vm.ToValue(goval)
return jsval
})
+ vm.Set("storage", storageObj)
+
// Load bootstrap libraries
- script, err := vm.Compile("bignumber.js", BigNumber_JS)
+ script, err := goja.Compile("bignumber.js", string(BigNumber_JS), true)
if err != nil {
log.Warn("Failed loading libraries", "err", err)
- return otto.UndefinedValue(), err
+ return goja.Undefined(), err
}
- vm.Run(script)
+ vm.RunProgram(script)
// Run the actual rule implementation
- _, err = vm.Run(r.jsRules)
+ _, err = vm.RunString(r.jsRules)
if err != nil {
log.Warn("Execution failed", "err", err)
- return otto.UndefinedValue(), err
+ return goja.Undefined(), err
}
// And the actual call
@@ -119,7 +121,7 @@ func (r *rulesetUI) execute(jsfunc string, jsarg interface{}) (otto.Value, error
jsonbytes, err := json.Marshal(jsarg)
if err != nil {
log.Warn("failed marshalling data", "data", jsarg)
- return otto.UndefinedValue(), err
+ return goja.Undefined(), err
}
// Now, we call foobar(JSON.parse()).
var call string
@@ -128,7 +130,7 @@ func (r *rulesetUI) execute(jsfunc string, jsarg interface{}) (otto.Value, error
} else {
call = fmt.Sprintf("%v()", jsfunc)
}
- return vm.Run(call)
+ return vm.RunString(call)
}
func (r *rulesetUI) checkApproval(jsfunc string, jsarg []byte, err error) (bool, error) {
@@ -140,11 +142,7 @@ func (r *rulesetUI) checkApproval(jsfunc string, jsarg []byte, err error) (bool,
log.Info("error occurred during execution", "error", err)
return false, err
}
- result, err := v.ToString()
- if err != nil {
- log.Info("error occurred during response unmarshalling", "error", err)
- return false, err
- }
+ result := v.ToString().String()
if result == "Approve" {
log.Info("Op approved")
return true, nil
diff --git a/signer/rules/rules_test.go b/signer/rules/rules_test.go
index c030ed47ce..510c57e67f 100644
--- a/signer/rules/rules_test.go
+++ b/signer/rules/rules_test.go
@@ -337,7 +337,7 @@ func TestStorage(t *testing.T) {
if err != nil {
t.Errorf("Unexpected error %v", err)
}
- retval, err := v.ToString()
+ retval := v.ToString().String()
if err != nil {
t.Errorf("Unexpected error %v", err)
diff --git a/tests/fuzzers/README.md b/tests/fuzzers/README.md
new file mode 100644
index 0000000000..fd8c4ec57f
--- /dev/null
+++ b/tests/fuzzers/README.md
@@ -0,0 +1,45 @@
+## Fuzzers
+
+To run a fuzzer locally, you need [go-fuzz](https://github.com/dvyukov/go-fuzz) installed.
+
+First build a fuzzing-binary out of the selected package:
+
+```
+(cd ./rlp && CGO_ENABLED=0 go-fuzz-build .)
+```
+That command should generate a `rlp-fuzz.zip` in the `rlp/` directory. If you are already in that directory, you can do
+
+```
+[user@work rlp]$ go-fuzz
+2019/11/26 13:36:54 workers: 6, corpus: 3 (3s ago), crashers: 0, restarts: 1/0, execs: 0 (0/sec), cover: 0, uptime: 3s
+2019/11/26 13:36:57 workers: 6, corpus: 3 (6s ago), crashers: 0, restarts: 1/0, execs: 0 (0/sec), cover: 1054, uptime: 6s
+2019/11/26 13:37:00 workers: 6, corpus: 3 (9s ago), crashers: 0, restarts: 1/8358, execs: 25074 (2786/sec), cover: 1054, uptime: 9s
+2019/11/26 13:37:03 workers: 6, corpus: 3 (12s ago), crashers: 0, restarts: 1/8497, execs: 50986 (4249/sec), cover: 1054, uptime: 12s
+2019/11/26 13:37:06 workers: 6, corpus: 3 (15s ago), crashers: 0, restarts: 1/9330, execs: 74640 (4976/sec), cover: 1054, uptime: 15s
+2019/11/26 13:37:09 workers: 6, corpus: 3 (18s ago), crashers: 0, restarts: 1/9948, execs: 99482 (5527/sec), cover: 1054, uptime: 18s
+2019/11/26 13:37:12 workers: 6, corpus: 3 (21s ago), crashers: 0, restarts: 1/9428, execs: 122568 (5836/sec), cover: 1054, uptime: 21s
+2019/11/26 13:37:15 workers: 6, corpus: 3 (24s ago), crashers: 0, restarts: 1/9676, execs: 145152 (6048/sec), cover: 1054, uptime: 24s
+2019/11/26 13:37:18 workers: 6, corpus: 3 (27s ago), crashers: 0, restarts: 1/9855, execs: 167538 (6205/sec), cover: 1054, uptime: 27s
+2019/11/26 13:37:21 workers: 6, corpus: 3 (30s ago), crashers: 0, restarts: 1/9645, execs: 192901 (6430/sec), cover: 1054, uptime: 30s
+2019/11/26 13:37:24 workers: 6, corpus: 3 (33s ago), crashers: 0, restarts: 1/9967, execs: 219294 (6645/sec), cover: 1054, uptime: 33s
+
+```
+Otherwise:
+```
+go-fuzz -bin ./rlp/rlp-fuzz.zip
+```
+
+### Notes
+
+Once a 'crasher' is found, the fuzzer tries to avoid reporting the same vector twice, so stores the fault in the `suppressions` folder. Thus, if you
+e.g. make changes to fix a bug, you should _remove_ all data from the `suppressions`-folder, to verify that the issue is indeed resolved.
+
+Also, if you have only one and the same exit-point for multiple different types of test, the suppression can make the fuzzer hide differnent types of errors. So make
+sure that each type of failure is unique (for an example, see the rlp fuzzer, where a counter `i` is used to differentiate between failures:
+
+```golang
+ if !bytes.Equal(input, output) {
+ panic(fmt.Sprintf("case %d: encode-decode is not equal, \ninput : %x\noutput: %x", i, input, output))
+ }
+```
+
diff --git a/tests/fuzzers/keystore/corpus/0176eaf52ed014ec5c91cf4afa070dd3fd469077-1 b/tests/fuzzers/keystore/corpus/0176eaf52ed014ec5c91cf4afa070dd3fd469077-1
new file mode 100644
index 0000000000..1c0ecf5250
--- /dev/null
+++ b/tests/fuzzers/keystore/corpus/0176eaf52ed014ec5c91cf4afa070dd3fd469077-1
@@ -0,0 +1 @@
+ns��,��
\ No newline at end of file
diff --git a/tests/fuzzers/keystore/keystore-fuzzer.go b/tests/fuzzers/keystore/keystore-fuzzer.go
new file mode 100644
index 0000000000..704f29dc48
--- /dev/null
+++ b/tests/fuzzers/keystore/keystore-fuzzer.go
@@ -0,0 +1,37 @@
+// Copyright 2019 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package keystore
+
+import (
+ "os"
+
+ "github.com/ethereum/go-ethereum/accounts/keystore"
+)
+
+func Fuzz(input []byte) int {
+ ks := keystore.NewKeyStore("/tmp/ks", keystore.LightScryptN, keystore.LightScryptP)
+
+ a, err := ks.NewAccount(string(input))
+ if err != nil {
+ panic(err)
+ }
+ if err := ks.Unlock(a, string(input)); err != nil {
+ panic(err)
+ }
+ os.Remove(a.URL.Path)
+ return 0
+}
diff --git a/tests/fuzzers/rlp/corpus/block_with_uncle.rlp b/tests/fuzzers/rlp/corpus/block_with_uncle.rlp
new file mode 100644
index 0000000000..1b49fe6a09
Binary files /dev/null and b/tests/fuzzers/rlp/corpus/block_with_uncle.rlp differ
diff --git a/tests/fuzzers/rlp/corpus/r.bin b/tests/fuzzers/rlp/corpus/r.bin
new file mode 100644
index 0000000000..cb98a76a8a
--- /dev/null
+++ b/tests/fuzzers/rlp/corpus/r.bin
@@ -0,0 +1 @@
+ˀ����������
\ No newline at end of file
diff --git a/tests/fuzzers/rlp/corpus/transaction.rlp b/tests/fuzzers/rlp/corpus/transaction.rlp
new file mode 100644
index 0000000000..80eea1aec6
--- /dev/null
+++ b/tests/fuzzers/rlp/corpus/transaction.rlp
@@ -0,0 +1,2 @@
+�N�������
+���a����P?-'�{�ЋD�Y���f�j\�E��~읕��F?1(�ij6�@�v�L��ڑ�
\ No newline at end of file
diff --git a/tests/fuzzers/rlp/rlp_fuzzer.go b/tests/fuzzers/rlp/rlp_fuzzer.go
new file mode 100644
index 0000000000..534540476c
--- /dev/null
+++ b/tests/fuzzers/rlp/rlp_fuzzer.go
@@ -0,0 +1,127 @@
+// Copyright 2019 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package rlp
+
+import (
+ "bytes"
+ "fmt"
+
+ "github.com/ethereum/go-ethereum/core/types"
+ "github.com/ethereum/go-ethereum/rlp"
+)
+
+func decodeEncode(input []byte, val interface{}, i int) {
+ if err := rlp.DecodeBytes(input, val); err == nil {
+ output, err := rlp.EncodeToBytes(val)
+ if err != nil {
+ panic(err)
+ }
+ if !bytes.Equal(input, output) {
+ panic(fmt.Sprintf("case %d: encode-decode is not equal, \ninput : %x\noutput: %x", i, input, output))
+ }
+ }
+}
+
+func Fuzz(input []byte) int {
+ var i int
+ {
+ if len(input) > 0 {
+ rlp.Split(input)
+ }
+ }
+ {
+ if len(input) > 0 {
+ if elems, _, err := rlp.SplitList(input); err == nil {
+ rlp.CountValues(elems)
+ }
+ }
+ }
+
+ {
+ rlp.NewStream(bytes.NewReader(input), 0).Decode(new(interface{}))
+ }
+
+ {
+ decodeEncode(input, new(interface{}), i)
+ i++
+ }
+ {
+ var v struct {
+ Int uint
+ String string
+ Bytes []byte
+ }
+ decodeEncode(input, &v, i)
+ i++
+ }
+
+ {
+ type Types struct {
+ Bool bool
+ Raw rlp.RawValue
+ Slice []*Types
+ Iface []interface{}
+ }
+ var v Types
+ decodeEncode(input, &v, i)
+ i++
+ }
+ {
+ type AllTypes struct {
+ Int uint
+ String string
+ Bytes []byte
+ Bool bool
+ Raw rlp.RawValue
+ Slice []*AllTypes
+ Array [3]*AllTypes
+ Iface []interface{}
+ }
+ var v AllTypes
+ decodeEncode(input, &v, i)
+ i++
+ }
+ {
+ decodeEncode(input, [10]byte{}, i)
+ i++
+ }
+ {
+ var v struct {
+ Byte [10]byte
+ Rool [10]bool
+ }
+ decodeEncode(input, &v, i)
+ i++
+ }
+ {
+ var h types.Header
+ decodeEncode(input, &h, i)
+ i++
+ var b types.Block
+ decodeEncode(input, &b, i)
+ i++
+ var t types.Transaction
+ decodeEncode(input, &t, i)
+ i++
+ var txs types.Transactions
+ decodeEncode(input, &txs, i)
+ i++
+ var rs types.Receipts
+ decodeEncode(input, &rs, i)
+ }
+ return 0
+}
diff --git a/tests/fuzzers/trie/corpus/data b/tests/fuzzers/trie/corpus/data
new file mode 100644
index 0000000000..c4a4839cb8
--- /dev/null
+++ b/tests/fuzzers/trie/corpus/data
@@ -0,0 +1 @@
+asdlfkjasf23oiejfasdfadkfqlkjfasdlkfjalwk4jfalsdkfjawlefkjsadlfkjasldkfjwalefkjasdlfkjM
\ No newline at end of file
diff --git a/tests/fuzzers/trie/trie-fuzzer.go b/tests/fuzzers/trie/trie-fuzzer.go
new file mode 100644
index 0000000000..9818838053
--- /dev/null
+++ b/tests/fuzzers/trie/trie-fuzzer.go
@@ -0,0 +1,189 @@
+// Copyright 2019 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package trie
+
+import (
+ "bytes"
+ "encoding/binary"
+ "fmt"
+
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/ethdb/memorydb"
+ "github.com/ethereum/go-ethereum/trie"
+)
+
+// randTest performs random trie operations.
+// Instances of this test are created by Generate.
+type randTest []randTestStep
+
+type randTestStep struct {
+ op int
+ key []byte // for opUpdate, opDelete, opGet
+ value []byte // for opUpdate
+ err error // for debugging
+}
+
+type proofDb struct{}
+
+func (proofDb) Put(key []byte, value []byte) error {
+ return nil
+}
+
+func (proofDb) Delete(key []byte) error {
+ return nil
+}
+
+const (
+ opUpdate = iota
+ opDelete
+ opGet
+ opCommit
+ opHash
+ opReset
+ opItercheckhash
+ opProve
+ opMax // boundary value, not an actual op
+)
+
+type dataSource struct {
+ input []byte
+ reader *bytes.Reader
+}
+
+func newDataSource(input []byte) *dataSource {
+ return &dataSource{
+ input, bytes.NewReader(input),
+ }
+}
+func (ds *dataSource) ReadByte() byte {
+ if b, err := ds.reader.ReadByte(); err != nil {
+ return 0
+ } else {
+ return b
+ }
+}
+func (ds *dataSource) Read(buf []byte) (int, error) {
+ return ds.reader.Read(buf)
+}
+func (ds *dataSource) Ended() bool {
+ return ds.reader.Len() == 0
+}
+
+func Generate(input []byte) randTest {
+
+ var allKeys [][]byte
+ r := newDataSource(input)
+ genKey := func() []byte {
+
+ if len(allKeys) < 2 || r.ReadByte() < 0x0f {
+ // new key
+ key := make([]byte, r.ReadByte()%50)
+ r.Read(key)
+ allKeys = append(allKeys, key)
+ return key
+ }
+ // use existing key
+ return allKeys[int(r.ReadByte())%len(allKeys)]
+ }
+
+ var steps randTest
+
+ for i := 0; !r.Ended(); i++ {
+
+ step := randTestStep{op: int(r.ReadByte()) % opMax}
+ switch step.op {
+ case opUpdate:
+ step.key = genKey()
+ step.value = make([]byte, 8)
+ binary.BigEndian.PutUint64(step.value, uint64(i))
+ case opGet, opDelete, opProve:
+ step.key = genKey()
+ }
+ steps = append(steps, step)
+ if len(steps) > 500 {
+ break
+ }
+ }
+
+ return steps
+}
+
+func Fuzz(input []byte) int {
+ program := Generate(input)
+ if len(program) == 0 {
+ return -1
+ }
+ if err := runRandTest(program); err != nil {
+ panic(err)
+ }
+ return 0
+}
+
+func runRandTest(rt randTest) error {
+
+ triedb := trie.NewDatabase(memorydb.New())
+
+ tr, _ := trie.New(common.Hash{}, triedb)
+ values := make(map[string]string) // tracks content of the trie
+
+ for i, step := range rt {
+ switch step.op {
+ case opUpdate:
+ tr.Update(step.key, step.value)
+ values[string(step.key)] = string(step.value)
+ case opDelete:
+ tr.Delete(step.key)
+ delete(values, string(step.key))
+ case opGet:
+ v := tr.Get(step.key)
+ want := values[string(step.key)]
+ if string(v) != want {
+ rt[i].err = fmt.Errorf("mismatch for key 0x%x, got 0x%x want 0x%x", step.key, v, want)
+ }
+ case opCommit:
+ _, rt[i].err = tr.Commit(nil)
+ case opHash:
+ tr.Hash()
+ case opReset:
+ hash, err := tr.Commit(nil)
+ if err != nil {
+ return err
+ }
+ newtr, err := trie.New(hash, triedb)
+ if err != nil {
+ return err
+ }
+ tr = newtr
+ case opItercheckhash:
+ checktr, _ := trie.New(common.Hash{}, triedb)
+ it := trie.NewIterator(tr.NodeIterator(nil))
+ for it.Next() {
+ checktr.Update(it.Key, it.Value)
+ }
+ if tr.Hash() != checktr.Hash() {
+ return fmt.Errorf("hash mismatch in opItercheckhash")
+ }
+ case opProve:
+ rt[i].err = tr.Prove(step.key, 0, proofDb{})
+ }
+ // Abort the test on error.
+ if rt[i].err != nil {
+ return rt[i].err
+ }
+ }
+ return nil
+}
diff --git a/tests/fuzzers/txfetcher/corpus/0151ee1d0db4c74d3bcdfa4f7396a4c8538748c9-2 b/tests/fuzzers/txfetcher/corpus/0151ee1d0db4c74d3bcdfa4f7396a4c8538748c9-2
new file mode 100644
index 0000000000..2c75e9c7a7
--- /dev/null
+++ b/tests/fuzzers/txfetcher/corpus/0151ee1d0db4c74d3bcdfa4f7396a4c8538748c9-2
@@ -0,0 +1 @@
+��
\ No newline at end of file
diff --git a/tests/fuzzers/txfetcher/corpus/020dd7b492a6eb34ff0b7d8ee46189422c37e4a7-6 b/tests/fuzzers/txfetcher/corpus/020dd7b492a6eb34ff0b7d8ee46189422c37e4a7-6
new file mode 100644
index 0000000000..8d3b57789e
Binary files /dev/null and b/tests/fuzzers/txfetcher/corpus/020dd7b492a6eb34ff0b7d8ee46189422c37e4a7-6 differ
diff --git a/tests/fuzzers/txfetcher/corpus/021d1144e359233c496e22c3250609b11b213e9f-4 b/tests/fuzzers/txfetcher/corpus/021d1144e359233c496e22c3250609b11b213e9f-4
new file mode 100644
index 0000000000..73731899d5
--- /dev/null
+++ b/tests/fuzzers/txfetcher/corpus/021d1144e359233c496e22c3250609b11b213e9f-4
@@ -0,0 +1,12 @@
+ TESTING KEY-----
+MIICXgIBAAKBgQDuLnQAI3mDgey3VBzWnB2L39JUU4txjeVE6myuDqkM/uGlfjb9
+SjY1bIw4iAJm2gsvvZhIrCHS3l6afab4pZB
+l2+XsDlrKBxKKtDrGxlG4LjncdabFn9gvLZad2bSysqz/qTAUStTtqJQIDAQAB
+AoGAGRzwwir7XvBOAy5tuV6ef6anZzus1s1Y1Clb6HbnWWF/wbZGOpet
+3m4vD6MXc7jpTLryzTQIvVdfQbRc6+MUVeLKZTXtdZrh+k7hx0nTP8Jcb
+uqFk541awmMogY/EfbWd6IOkp+4xqjlFBEDytgbIECQQDvH/6nk+hgN4H
+qzzVtxxr397vWrjrIgPbJpQvBsafG7b0dA4AFjwVbFLmQcj2PprIMmPcQrooz84SHEg1Ak/7KCxmD/sfgS5TeuNi8DoUBEmiSJwm7FX
+ftxuvL7XvjwjN5B30pNEbc6Iuyt7y4MQJBAIt21su43sjXNueLKH8+ph2UfQuU9txblTu14q3N7gHRZB4ZMhFYyDy8CKrN2cPg/Fvyt0Xl/DoCzjA0CQQDU
+y2pGsuSmgUtWj3NM9xuwYPm+Z/F84K6+ARYiZ6PYj013sovGKUFfYAqVXVlxtI�o�X
+qUn3Xh9ps8ZfjLZO7BAkEAlT4R5Yl6cGhaJQYZHOde3JMhNRcVFMO8dDaFo
+f9Oeos0UotgiDktdQHxdNEwLjQlJBz+OtwwA=---E RATTIEY-
\ No newline at end of file
diff --git a/tests/fuzzers/txfetcher/corpus/0d28327b1fb52c1ba02a6eb96675c31633921bb2-2 b/tests/fuzzers/txfetcher/corpus/0d28327b1fb52c1ba02a6eb96675c31633921bb2-2
new file mode 100644
index 0000000000..8cc3039cb8
--- /dev/null
+++ b/tests/fuzzers/txfetcher/corpus/0d28327b1fb52c1ba02a6eb96675c31633921bb2-2
@@ -0,0 +1,15 @@
+�&^��o�ȗ-----BEGIN RSA TESTING KEY-----
+MIICXgIBAAKBgQDuLnQAI3mDgey3VBzWnB2L39JUU4txjeVE6myuDqkM/uGlfjb9
+SjY1bIw4iA5sBBZzHi3z0h1YV8QPuxEbi4nW91IJm2gsvvZhIrCHS3l6afab4pZB
+l2+XsDulrKBxKKtD1rGxlG4LjncdabFn9gvLZad2bSysqz/qTAUStTvqJQIDAQAB
+AoGAGRzwwir7XvBOAy5tM/uV6e+Zf6anZzus1s1Y1ClbjbE6HXbnWWF/wbZGOpet
+3Zm4vD6MXc7jpTLryzTQIvVdfQbRc6+MUVeLKwZatTXtdZrhu+Jk7hx0nTPy8Jcb
+uJqFk541aEw+mMogY/xEcfbWd6IOkp+4xqjlFLBEDytgbIECQQDvH/E6nk+hgN4H
+qzzVtxxr397vWrjrIgPbJpQvBsafG7b0dA4AFjwVbFLmQcj2PprIMmPcQrooz8vp
+jy4SHEg1AkEA/v13/5M47K9vCxmb8QeD/asydfsgS5TeuNi8DoUBEmiSJwma7FXY
+fFUtxuvL7XvjwjN5B30pNEbc6Iuyt7y4MQJBAIt21su4b3sjXNueLKH85Q+phy2U
+fQtuUE9txblTu14q3N7gHRZB4ZMhFYyDy8CKrN2cPg/Fvyt0Xlp/DoCzjA0CQQDU
+y2ptGsuSmgUtWj3NM9xuwYPm+Z/F84K6+ARYiZ6PYj013sovGKUFfYAqVXVlxtIX
+qyUBnu3X9ps8ZfjLZO7BAkEAlT4R5Yl6cGhaJQYZHOde3JEMhNRcVFMO8dJDaFeo
+f9Oeos0UUothgiDktdQHxdNEwLjQf7lJJBzV+5OtwswCWA==
+-----END RSA TESTING KEY-----Q_
\ No newline at end of file
diff --git a/tests/fuzzers/txfetcher/corpus/0fcd827b57ded58e91f7ba2ac2b7ea4d25ebedca-7 b/tests/fuzzers/txfetcher/corpus/0fcd827b57ded58e91f7ba2ac2b7ea4d25ebedca-7
new file mode 100644
index 0000000000..8ceee16af1
--- /dev/null
+++ b/tests/fuzzers/txfetcher/corpus/0fcd827b57ded58e91f7ba2ac2b7ea4d25ebedca-7
@@ -0,0 +1 @@
+�ap�������������������������V�������#��&��
\ No newline at end of file
diff --git a/tests/fuzzers/txfetcher/corpus/109bc9b8fd4fef63493e104c703c79bc4a5e8d34-6 b/tests/fuzzers/txfetcher/corpus/109bc9b8fd4fef63493e104c703c79bc4a5e8d34-6
new file mode 100644
index 0000000000..df9b986af1
Binary files /dev/null and b/tests/fuzzers/txfetcher/corpus/109bc9b8fd4fef63493e104c703c79bc4a5e8d34-6 differ
diff --git a/tests/fuzzers/txfetcher/corpus/163785ab002746452619f31e8dfcb4549e6f8b6e-6 b/tests/fuzzers/txfetcher/corpus/163785ab002746452619f31e8dfcb4549e6f8b6e-6
new file mode 100644
index 0000000000..55467373d4
Binary files /dev/null and b/tests/fuzzers/txfetcher/corpus/163785ab002746452619f31e8dfcb4549e6f8b6e-6 differ
diff --git a/tests/fuzzers/txfetcher/corpus/1adfa6b9ddf5766220c8ff7ede2926ca241bb947-3 b/tests/fuzzers/txfetcher/corpus/1adfa6b9ddf5766220c8ff7ede2926ca241bb947-3
new file mode 100644
index 0000000000..4a593aa28d
--- /dev/null
+++ b/tests/fuzzers/txfetcher/corpus/1adfa6b9ddf5766220c8ff7ede2926ca241bb947-3
@@ -0,0 +1,11 @@
+TAKBgDuLnQA3gey3VBznB39JUtxjeE6myuDkM/uGlfjb
+S1w4iA5sBzzh8uxEbi4nW91IJm2gsvvZhICHS3l6ab4pZB
+l2DulrKBxKKtD1rGxlG4LncabFn9vLZad2bSysqz/qTAUSTvqJQIDAQAB
+AoGAGRzwwir7XvBOAy5tM/uV6e+Zf6anZzus1s1Y1ClbjbE6HXbnWWF/wbZGOpet
+3Z4vMXc7jpTLryzTQIvVdfQbRc6+MUVeLKZatTXtdZrhu+Jk7hx0nTPy8Jcb
+uJqFk54MogxEcfbWd6IOkp+4xqFLBEDtgbIECnk+hgN4H
+qzzxxr397vWrjrIgbJpQvBv8QeeuNi8DoUBEmiSJwa7FXY
+FUtxuvL7XvjwjN5B30pEbc6Iuyt7y4MQJBAIt21su4b3sjphy2tuUE9xblTu14qgHZ6+AiZovGKU--FfYAqVXVlxtIX
+qyU3X9ps8ZfjLZ45l6cGhaJQYZHOde3JEMhNRcVFMO8dJDaFeo
+f9Oeos0UUothgiDktdQHxdNEwLjQf7lJJBzV+5OtwswCWA==
+-----END RSA T
\ No newline at end of file
diff --git a/tests/fuzzers/txfetcher/corpus/1b9a02e9a48fea1d2fc3fb77946ada278e152079-4 b/tests/fuzzers/txfetcher/corpus/1b9a02e9a48fea1d2fc3fb77946ada278e152079-4
new file mode 100644
index 0000000000..4a56f93d3b
Binary files /dev/null and b/tests/fuzzers/txfetcher/corpus/1b9a02e9a48fea1d2fc3fb77946ada278e152079-4 differ
diff --git a/tests/fuzzers/txfetcher/corpus/1e14c7ea1faef92890988061b5abe96db7190f98-7 b/tests/fuzzers/txfetcher/corpus/1e14c7ea1faef92890988061b5abe96db7190f98-7
new file mode 100644
index 0000000000..d2442fc5a6
--- /dev/null
+++ b/tests/fuzzers/txfetcher/corpus/1e14c7ea1faef92890988061b5abe96db7190f98-7
@@ -0,0 +1 @@
+0000000000000000000000000000000000000000000000000000000000000000000000000
\ No newline at end of file
diff --git a/tests/fuzzers/txfetcher/corpus/1e7d05f00e99cbf3ff0ef1cd7ea8dd07ad6dff23-6 b/tests/fuzzers/txfetcher/corpus/1e7d05f00e99cbf3ff0ef1cd7ea8dd07ad6dff23-6
new file mode 100644
index 0000000000..1c342ff53a
Binary files /dev/null and b/tests/fuzzers/txfetcher/corpus/1e7d05f00e99cbf3ff0ef1cd7ea8dd07ad6dff23-6 differ
diff --git a/tests/fuzzers/txfetcher/corpus/1ec95e347fd522e6385b5091aa81aa2485be4891-4 b/tests/fuzzers/txfetcher/corpus/1ec95e347fd522e6385b5091aa81aa2485be4891-4
new file mode 100644
index 0000000000..b0c776bd4d
Binary files /dev/null and b/tests/fuzzers/txfetcher/corpus/1ec95e347fd522e6385b5091aa81aa2485be4891-4 differ
diff --git a/tests/fuzzers/txfetcher/corpus/1fbfa5d214060d2a0905846a589fd6f78d411451-4 b/tests/fuzzers/txfetcher/corpus/1fbfa5d214060d2a0905846a589fd6f78d411451-4
new file mode 100644
index 0000000000..75de835c98
Binary files /dev/null and b/tests/fuzzers/txfetcher/corpus/1fbfa5d214060d2a0905846a589fd6f78d411451-4 differ
diff --git a/tests/fuzzers/txfetcher/corpus/1fd84ee194e791783a7f18f0a6deab8efe05fc04-2 b/tests/fuzzers/txfetcher/corpus/1fd84ee194e791783a7f18f0a6deab8efe05fc04-2
new file mode 100644
index 0000000000..3b6d2560ae
--- /dev/null
+++ b/tests/fuzzers/txfetcher/corpus/1fd84ee194e791783a7f18f0a6deab8efe05fc04-2
@@ -0,0 +1 @@
+�&
\ No newline at end of file
diff --git a/tests/fuzzers/txfetcher/corpus/21e76b9fca21d94d97f860c1c82f40697a83471b-8 b/tests/fuzzers/txfetcher/corpus/21e76b9fca21d94d97f860c1c82f40697a83471b-8
new file mode 100644
index 0000000000..1d4620f49f
--- /dev/null
+++ b/tests/fuzzers/txfetcher/corpus/21e76b9fca21d94d97f860c1c82f40697a83471b-8
@@ -0,0 +1,3 @@
+DtQvfQ+MULKZTXk78c
+/fWkpxlQQ/+hgNzVtx9vWgJsafG7b0dA4AFjwVbFLmQcj2PprIMmPNQrooX
+L
\ No newline at end of file
diff --git a/tests/fuzzers/txfetcher/corpus/220a87fed0c92474923054094eb7aff14289cf5e-4 b/tests/fuzzers/txfetcher/corpus/220a87fed0c92474923054094eb7aff14289cf5e-4
new file mode 100644
index 0000000000..175f74fd5a
Binary files /dev/null and b/tests/fuzzers/txfetcher/corpus/220a87fed0c92474923054094eb7aff14289cf5e-4 differ
diff --git a/tests/fuzzers/txfetcher/corpus/23ddcd66aa92fe3d78b7f5b6e7cddb1b55c5f5df-3 b/tests/fuzzers/txfetcher/corpus/23ddcd66aa92fe3d78b7f5b6e7cddb1b55c5f5df-3
new file mode 100644
index 0000000000..95892c7b00
--- /dev/null
+++ b/tests/fuzzers/txfetcher/corpus/23ddcd66aa92fe3d78b7f5b6e7cddb1b55c5f5df-3
@@ -0,0 +1,12 @@
+4txjeVE6myuDqkM/uGlfjb9
+SjY1bIw4iA5sBBZzHi3z0h1YV8QPuxEbi4nW91IJm2gsvvZeIrCHS3l6afab4pZB
+l2+XsDlrKBxKKtD1rGxlG4jncdabFn9gvLZad2bSysqz/qTAUSTvqJQIDAQAB
+AoGAGRzwwXvBOAy5tM/uV6e+Zf6aZzus1s1Y1ClbjbE6HXbnWWF/wbZGOpet
+3Z4vD6Mc7pLryzTQIVdfQbRc6+MUVeLKZaTXtdZru+Jk70PJJqFk541aEw+mMogY/xEcfbWd6IOkp+4xqjlFLBEDytgbIECQQDvH/E6nk+gN4H
+qzzVtxxr397vWrjrIgPbJpQvBsafG7b0dA4AFjwVbFLmQ2PprIMPcQroo8vpjSHg1Ev14KxmQeDydfsgeuN8UBESJwm7F
+UtuL7Xvjw50pNEbc6Iuyty4QJA21su4sjXNueLQphy2U
+fQtuUE9txblTu14qN7gHRZB4ZMhFYyDy8CKrN2cPg/Fvyt0Xlp/DoCzjA0CQQDU
+y2ptGsuSmgUtWj3NM9xuwYPm+Z/F84K6ARYiZPYj1oGUFfYAVVxtI
+qyBnu3X9pfLZOAkEAlT4R5Yl6cJQYZHOde3JEhNRcVFMO8dJFo
+f9Oeos0UUhgiDkQxdEwLjQf7lJJz5OtwC=
+-NRSA TESINGKEY-Q_
\ No newline at end of file
diff --git a/tests/fuzzers/txfetcher/corpus/2441d249faf9a859e38c49f6e305b394280c6ea5-1 b/tests/fuzzers/txfetcher/corpus/2441d249faf9a859e38c49f6e305b394280c6ea5-1
new file mode 100644
index 0000000000..d76207e992
Binary files /dev/null and b/tests/fuzzers/txfetcher/corpus/2441d249faf9a859e38c49f6e305b394280c6ea5-1 differ
diff --git a/tests/fuzzers/txfetcher/corpus/2da1f0635e11283b1927974f418aadd8837ad31e-7 b/tests/fuzzers/txfetcher/corpus/2da1f0635e11283b1927974f418aadd8837ad31e-7
new file mode 100644
index 0000000000..73ae705701
Binary files /dev/null and b/tests/fuzzers/txfetcher/corpus/2da1f0635e11283b1927974f418aadd8837ad31e-7 differ
diff --git a/tests/fuzzers/txfetcher/corpus/2e1853fbf8efe40098b1583224fe3b5f335e7037-6 b/tests/fuzzers/txfetcher/corpus/2e1853fbf8efe40098b1583224fe3b5f335e7037-6
new file mode 100644
index 0000000000..692981e614
Binary files /dev/null and b/tests/fuzzers/txfetcher/corpus/2e1853fbf8efe40098b1583224fe3b5f335e7037-6 differ
diff --git a/tests/fuzzers/txfetcher/corpus/2f25490dc49c103d653843ed47324b310ee7105e-7 b/tests/fuzzers/txfetcher/corpus/2f25490dc49c103d653843ed47324b310ee7105e-7
new file mode 100644
index 0000000000..5cf7da75df
Binary files /dev/null and b/tests/fuzzers/txfetcher/corpus/2f25490dc49c103d653843ed47324b310ee7105e-7 differ
diff --git a/tests/fuzzers/txfetcher/corpus/30494b85bb60ad7f099fa49d427007a761620d8f-5 b/tests/fuzzers/txfetcher/corpus/30494b85bb60ad7f099fa49d427007a761620d8f-5
new file mode 100644
index 0000000000..7ff9d39752
--- /dev/null
+++ b/tests/fuzzers/txfetcher/corpus/30494b85bb60ad7f099fa49d427007a761620d8f-5
@@ -0,0 +1,10 @@
+jXbnWWF/wbZGOpet
+3Zm4vD6MXc7jpTLryzTQIvVdfQbRc6+MUVeLKwZatTXtdZrhu+Jk7hx0nTPy8Jcb
+uJqFk541aEw+mMogY/xEcfbWd6IOkp+4xqjlFLBEDytgbIECQQDvH/E6nk+hgN4H
+qzzVtxxr397vWrjrIgPbJpQvBsafG7b0dA4AFjwVbFLmQcj2PprIMmPcQrooz8vp
+jy4SHEg1AkEA/v13/5M47K9vCxb8QeD/asydfsgS5TeuNi8DoUBEmiSJwma7FXY
+fFUtxuvL7XvjwjN5B30pNEbc6Iuyt7y4MQJBAIt21su4b3sjXNueLKH85Q+phy2U
+fQtuUE9txblTu14q3N7gHRZB4ZMhFYyDy8CKrN2cPg/Fvyt0Xl/DoCzjA0CQQDU
+y2ptGsuSmgUtWj3NM9xuwYPm+Z/F84K6+ARYiZ6Yj013sovGKUFfYAqVXVlxtIX
+qyUBnu3Xh9ps8ZfjLZO7BAkEAlT4R5Yl6cGhaJQYZHOde3JEMhNRcVFMO8dDaFeo
+f9Oeos0UotgiDktdQHxdNEwLjQfl
\ No newline at end of file
diff --git a/tests/fuzzers/txfetcher/corpus/316024ca3aaf09c1de5258733ff5fe3d799648d3-4 b/tests/fuzzers/txfetcher/corpus/316024ca3aaf09c1de5258733ff5fe3d799648d3-4
new file mode 100644
index 0000000000..61f7d78f34
--- /dev/null
+++ b/tests/fuzzers/txfetcher/corpus/316024ca3aaf09c1de5258733ff5fe3d799648d3-4
@@ -0,0 +1,15 @@
+�^�o�ȗ----BEGIN RA TTING KEY-----
+IIXgIBAAKBQDuLnQI3mDgey3VBzWnB2L39JUU4txjeVE6myuDqkM/uGlfjb9
+SjY1bIw4iA5sBBZzHi3z0h1YV8QPuxEbi4nW91IJmgsvvZhrCHSl6afab4pZB
+l2+XsDulrKBxKKtD1rGxlG4LjcdabF9gvLZad2bSysqz/qTAUStTvqJQDAQAB
+AoGAGRzwwir7XvBOAy5tM/uV6e+Zf6anZzus1s1Y1ClbjbE6HXbnWWF/wbZGOpet
+3Z4vD6MXc7jpTLryzTQIvVdfQbRc6+MUVeLKwZatTXtdZrhu+Jk7hx0nTPy8Jcb
+uJqFk541aEw+mMogY/xEcfbWd6IOkp+4xqjlFLBEDytgbIECQQDvH/E6nk+hgN4H
+qzzVtxxr397vWrjrIgPbJpQvBsafG7b0dA4AFjwVbFLmQcj2PprIMmPcQrooz8vp
+jy4SHEg1AkEA/v13/5M47K9vCxmb8QeD/asydfsgS5TeuNi8DoUBEmiSJwma7FXY
+fFUtxuvL7XvjwjN5B30pNEbc6Iuyt7y4MQJBAIt21su4b3sjXNueLKH85Q+phy2U
+fQtuUE9txblTu14q3N7gHRZB4ZMhFYyDy8CKrN2cPg/Fvyt0Xlp/DoCzjA0CQQDU
+y2ptGsuSmgUtWj3NM9xuwYPm+Z/F84K6+ARYiZ6PYj043sovGKUFfYAqVXVlxtIX
+qyUBnu3X9ps8ZfjLZO7BAkEAlT4R5Yl6cGhaJQYZHOde3JEMhNRcVFMO8dJDaFeo
+f9Oeos0UUothgiDktdQHxdNEwLjQf7lJJBzV+5OtwswCWA==
+-----END RSA TESTING KEY-----Q_
\ No newline at end of file
diff --git a/tests/fuzzers/txfetcher/corpus/32a089e2c439a91f4c1b67a13d52429bcded0dd9-7 b/tests/fuzzers/txfetcher/corpus/32a089e2c439a91f4c1b67a13d52429bcded0dd9-7
new file mode 100644
index 0000000000..a986a9d8e7
Binary files /dev/null and b/tests/fuzzers/txfetcher/corpus/32a089e2c439a91f4c1b67a13d52429bcded0dd9-7 differ
diff --git a/tests/fuzzers/txfetcher/corpus/33ec1dc0bfeb93d16edee3c07125fec6ac1aa17d-2 b/tests/fuzzers/txfetcher/corpus/33ec1dc0bfeb93d16edee3c07125fec6ac1aa17d-2
new file mode 100644
index 0000000000..d41771b86c
--- /dev/null
+++ b/tests/fuzzers/txfetcher/corpus/33ec1dc0bfeb93d16edee3c07125fec6ac1aa17d-2
@@ -0,0 +1 @@
+�
\ No newline at end of file
diff --git a/tests/fuzzers/txfetcher/corpus/37a0d207700b52caa005ec8aeb344dcb13150ed2-5 b/tests/fuzzers/txfetcher/corpus/37a0d207700b52caa005ec8aeb344dcb13150ed2-5
new file mode 100644
index 0000000000..2f09c6e28f
Binary files /dev/null and b/tests/fuzzers/txfetcher/corpus/37a0d207700b52caa005ec8aeb344dcb13150ed2-5 differ
diff --git a/tests/fuzzers/txfetcher/corpus/382f59c66d0ddb6747d3177263279789ca15c2db-5 b/tests/fuzzers/txfetcher/corpus/382f59c66d0ddb6747d3177263279789ca15c2db-5
new file mode 100644
index 0000000000..84441ac374
Binary files /dev/null and b/tests/fuzzers/txfetcher/corpus/382f59c66d0ddb6747d3177263279789ca15c2db-5 differ
diff --git a/tests/fuzzers/txfetcher/corpus/3a010483a4ad8d7215447ce27e0fac3791235c99-4 b/tests/fuzzers/txfetcher/corpus/3a010483a4ad8d7215447ce27e0fac3791235c99-4
new file mode 100644
index 0000000000..28f5d99b98
--- /dev/null
+++ b/tests/fuzzers/txfetcher/corpus/3a010483a4ad8d7215447ce27e0fac3791235c99-4
@@ -0,0 +1,7 @@
+
+lGAGRzwwir7XvBOAy5tM/uV6e+Zf6anZzus1s1Y1ClbjbE6HXbnWWF/wbZGOpet
+3Zm4vD6MXc7jpTLryzTQIvVdfQbRc6+MUVeLKwZatTXtdZrhu+Jk7hx0nTPy8Jcb
+uJqFk541aEw+mMogY/xEcfbWd6IOkp+4xqjlFLBEDytgbIECQQDvH/E6nk+hgN4H
+qzzVtxxr397vWrjrIgPbJpQvBsafG7b0dA4AFjwVbFLmQcj2PprIMmPcQrooz8vp
+jy4SHEg1AkEA/v13/5M47K9vCxmb8QeD/asydfsgS5TeuNi8DoUBEmiSJwma7FXY
+fFUtxuvL7XvjwjN5
\ No newline at end of file
diff --git a/tests/fuzzers/txfetcher/corpus/3a3b717fcfe7ffb000b906e5a76f32248a576bf7-6 b/tests/fuzzers/txfetcher/corpus/3a3b717fcfe7ffb000b906e5a76f32248a576bf7-6
new file mode 100644
index 0000000000..022de3c61d
Binary files /dev/null and b/tests/fuzzers/txfetcher/corpus/3a3b717fcfe7ffb000b906e5a76f32248a576bf7-6 differ
diff --git a/tests/fuzzers/txfetcher/corpus/3c37f6d58b8029971935f127f53e6aaeba558445-6 b/tests/fuzzers/txfetcher/corpus/3c37f6d58b8029971935f127f53e6aaeba558445-6
new file mode 100644
index 0000000000..9f3bf093ad
--- /dev/null
+++ b/tests/fuzzers/txfetcher/corpus/3c37f6d58b8029971935f127f53e6aaeba558445-6
@@ -0,0 +1,2 @@
+���w������������ �
+���
������������������� �!�"�#�$�%�&�'�(�)�*�+�,�-�.�/��0
\ No newline at end of file
diff --git a/tests/fuzzers/txfetcher/corpus/3c73b63bafa9f535c882ec17189adaf02b58f432-6 b/tests/fuzzers/txfetcher/corpus/3c73b63bafa9f535c882ec17189adaf02b58f432-6
new file mode 100644
index 0000000000..0dfbc46993
--- /dev/null
+++ b/tests/fuzzers/txfetcher/corpus/3c73b63bafa9f535c882ec17189adaf02b58f432-6
@@ -0,0 +1 @@
+LvhaJQHOe3EhRcdaFofeoogkjQfJB
\ No newline at end of file
diff --git a/tests/fuzzers/txfetcher/corpus/3d11500c4f66b20c73bbdfb1a7bddd7bbf92b29c-5 b/tests/fuzzers/txfetcher/corpus/3d11500c4f66b20c73bbdfb1a7bddd7bbf92b29c-5
new file mode 100644
index 0000000000..b19fc7f458
Binary files /dev/null and b/tests/fuzzers/txfetcher/corpus/3d11500c4f66b20c73bbdfb1a7bddd7bbf92b29c-5 differ
diff --git a/tests/fuzzers/txfetcher/corpus/3d8b5bf36c80d6f65802280039f85421f32b5055-6 b/tests/fuzzers/txfetcher/corpus/3d8b5bf36c80d6f65802280039f85421f32b5055-6
new file mode 100644
index 0000000000..eacd269f31
Binary files /dev/null and b/tests/fuzzers/txfetcher/corpus/3d8b5bf36c80d6f65802280039f85421f32b5055-6 differ
diff --git a/tests/fuzzers/txfetcher/corpus/3f99c546a3962256176d566c19e3fffb62072078-1 b/tests/fuzzers/txfetcher/corpus/3f99c546a3962256176d566c19e3fffb62072078-1
new file mode 100644
index 0000000000..9e90183d6b
--- /dev/null
+++ b/tests/fuzzers/txfetcher/corpus/3f99c546a3962256176d566c19e3fffb62072078-1
@@ -0,0 +1 @@
+�&^��o�
\ No newline at end of file
diff --git a/tests/fuzzers/txfetcher/corpus/408ec46539af27acd82b3d01e863597030882458-8 b/tests/fuzzers/txfetcher/corpus/408ec46539af27acd82b3d01e863597030882458-8
new file mode 100644
index 0000000000..65d55437e5
Binary files /dev/null and b/tests/fuzzers/txfetcher/corpus/408ec46539af27acd82b3d01e863597030882458-8 differ
diff --git a/tests/fuzzers/txfetcher/corpus/436154e5bb6487673f6642e6d2a582c01b083c08-8 b/tests/fuzzers/txfetcher/corpus/436154e5bb6487673f6642e6d2a582c01b083c08-8
new file mode 100644
index 0000000000..28e519c125
--- /dev/null
+++ b/tests/fuzzers/txfetcher/corpus/436154e5bb6487673f6642e6d2a582c01b083c08-8
@@ -0,0 +1 @@
+�apfffffffffffffffffffffffffffffffebadce6f48a0�_3bbfd2364
\ No newline at end of file
diff --git a/tests/fuzzers/txfetcher/corpus/45f565cd14b8de1ba2e925047ce776c2682b4b8d-3 b/tests/fuzzers/txfetcher/corpus/45f565cd14b8de1ba2e925047ce776c2682b4b8d-3
new file mode 100644
index 0000000000..9f03a095b9
Binary files /dev/null and b/tests/fuzzers/txfetcher/corpus/45f565cd14b8de1ba2e925047ce776c2682b4b8d-3 differ
diff --git a/tests/fuzzers/txfetcher/corpus/4a0a12f5b033c8c160cc3b5133692ea1e92c6cdf-7 b/tests/fuzzers/txfetcher/corpus/4a0a12f5b033c8c160cc3b5133692ea1e92c6cdf-7
new file mode 100644
index 0000000000..e50b5494c9
--- /dev/null
+++ b/tests/fuzzers/txfetcher/corpus/4a0a12f5b033c8c160cc3b5133692ea1e92c6cdf-7
@@ -0,0 +1,3 @@
+DtQvfQ+MULKZTXk78c
+/fWkpxlyEQQ/+hgNzVtx9vWgJsafG7b0dA4AFjwVbFLmQcj2PprIMmPNQg1Ak/7KCxmDgS5TDEmSJwFX
+txLjbt4xTgeXVlXsjLZ
\ No newline at end of file
diff --git a/tests/fuzzers/txfetcher/corpus/550f15ef65230cc4dcfab7fea67de212d9212ff8-8 b/tests/fuzzers/txfetcher/corpus/550f15ef65230cc4dcfab7fea67de212d9212ff8-8
new file mode 100644
index 0000000000..34005f43cb
Binary files /dev/null and b/tests/fuzzers/txfetcher/corpus/550f15ef65230cc4dcfab7fea67de212d9212ff8-8 differ
diff --git a/tests/fuzzers/txfetcher/corpus/5552213d659fef900a194c52718ffeffdc72d043-3 b/tests/fuzzers/txfetcher/corpus/5552213d659fef900a194c52718ffeffdc72d043-3
new file mode 100644
index 0000000000..7346ff1955
Binary files /dev/null and b/tests/fuzzers/txfetcher/corpus/5552213d659fef900a194c52718ffeffdc72d043-3 differ
diff --git a/tests/fuzzers/txfetcher/corpus/5570ef82893a9b9b9158572d43a7de7537121d2d-1 b/tests/fuzzers/txfetcher/corpus/5570ef82893a9b9b9158572d43a7de7537121d2d-1
new file mode 100644
index 0000000000..feffcebca0
--- /dev/null
+++ b/tests/fuzzers/txfetcher/corpus/5570ef82893a9b9b9158572d43a7de7537121d2d-1
@@ -0,0 +1 @@
+����ٯ0,1,2,3,4,5,6,7,-3420794409,(2,a)
\ No newline at end of file
diff --git a/tests/fuzzers/txfetcher/corpus/5e10f734f8af4116fbd164d96eec67aa53e6228c-5 b/tests/fuzzers/txfetcher/corpus/5e10f734f8af4116fbd164d96eec67aa53e6228c-5
new file mode 100644
index 0000000000..0eacd0b59a
Binary files /dev/null and b/tests/fuzzers/txfetcher/corpus/5e10f734f8af4116fbd164d96eec67aa53e6228c-5 differ
diff --git a/tests/fuzzers/txfetcher/corpus/608200b402488b3989ec8ec5f4190ccb537b8ea4-4 b/tests/fuzzers/txfetcher/corpus/608200b402488b3989ec8ec5f4190ccb537b8ea4-4
new file mode 100644
index 0000000000..d37b018515
Binary files /dev/null and b/tests/fuzzers/txfetcher/corpus/608200b402488b3989ec8ec5f4190ccb537b8ea4-4 differ
diff --git a/tests/fuzzers/txfetcher/corpus/61e89c3fbdf9eff74bd250ea73cc2e61f8ca0d97-5 b/tests/fuzzers/txfetcher/corpus/61e89c3fbdf9eff74bd250ea73cc2e61f8ca0d97-5
new file mode 100644
index 0000000000..155744bccc
--- /dev/null
+++ b/tests/fuzzers/txfetcher/corpus/61e89c3fbdf9eff74bd250ea73cc2e61f8ca0d97-5
@@ -0,0 +1 @@
+88242871'392752200424491531672177074144720616417147514758635765020556616�
\ No newline at end of file
diff --git a/tests/fuzzers/txfetcher/corpus/62817a48c78fbf2c12fcdc5ca58e2ca60c43543a-7 b/tests/fuzzers/txfetcher/corpus/62817a48c78fbf2c12fcdc5ca58e2ca60c43543a-7
new file mode 100644
index 0000000000..795608a789
Binary files /dev/null and b/tests/fuzzers/txfetcher/corpus/62817a48c78fbf2c12fcdc5ca58e2ca60c43543a-7 differ
diff --git a/tests/fuzzers/txfetcher/corpus/6782da8f1a432a77306d60d2ac2470c35b98004f-3 b/tests/fuzzers/txfetcher/corpus/6782da8f1a432a77306d60d2ac2470c35b98004f-3
new file mode 100644
index 0000000000..f44949e6ae
--- /dev/null
+++ b/tests/fuzzers/txfetcher/corpus/6782da8f1a432a77306d60d2ac2470c35b98004f-3
@@ -0,0 +1 @@
+21888242871'392752200424452601091531672177074144720616417147514758635765020556616��
\ No newline at end of file
diff --git a/tests/fuzzers/txfetcher/corpus/68fb55290cb9d6da5b259017c34bcecf96c944aa-5 b/tests/fuzzers/txfetcher/corpus/68fb55290cb9d6da5b259017c34bcecf96c944aa-5
new file mode 100644
index 0000000000..23d905b827
Binary files /dev/null and b/tests/fuzzers/txfetcher/corpus/68fb55290cb9d6da5b259017c34bcecf96c944aa-5 differ
diff --git a/tests/fuzzers/txfetcher/corpus/6a5059bc86872526241d21ab5dae9f0afd3b9ae1-3 b/tests/fuzzers/txfetcher/corpus/6a5059bc86872526241d21ab5dae9f0afd3b9ae1-3
new file mode 100644
index 0000000000..b71d5dff51
--- /dev/null
+++ b/tests/fuzzers/txfetcher/corpus/6a5059bc86872526241d21ab5dae9f0afd3b9ae1-3
@@ -0,0 +1 @@
+��
\ No newline at end of file
diff --git a/tests/fuzzers/txfetcher/corpus/717928e0e2d478c680c6409b173552ca98469ba5-6 b/tests/fuzzers/txfetcher/corpus/717928e0e2d478c680c6409b173552ca98469ba5-6
new file mode 100644
index 0000000000..dce5106115
--- /dev/null
+++ b/tests/fuzzers/txfetcher/corpus/717928e0e2d478c680c6409b173552ca98469ba5-6
@@ -0,0 +1 @@
+LvhaJcdaFofenogkjQfJB
\ No newline at end of file
diff --git a/tests/fuzzers/txfetcher/corpus/71d22f25419543e437f249ca437823b87ac926b1-6 b/tests/fuzzers/txfetcher/corpus/71d22f25419543e437f249ca437823b87ac926b1-6
new file mode 100644
index 0000000000..d07a6c2f32
Binary files /dev/null and b/tests/fuzzers/txfetcher/corpus/71d22f25419543e437f249ca437823b87ac926b1-6 differ
diff --git a/tests/fuzzers/txfetcher/corpus/7312a0f31ae5d773ed4fd74abc7521eb14754683-8 b/tests/fuzzers/txfetcher/corpus/7312a0f31ae5d773ed4fd74abc7521eb14754683-8
new file mode 100644
index 0000000000..3593ce2e19
--- /dev/null
+++ b/tests/fuzzers/txfetcher/corpus/7312a0f31ae5d773ed4fd74abc7521eb14754683-8
@@ -0,0 +1,2 @@
+DtQvfQ+MULKZTXk78c
+/fWkpxlyEQQ/+hgNzVtx9vWgJsafG7b0dA4AFjwVbFLmQcj2PprIMmPNQg1AkS5TDEmSJwFVlXsjLZ
\ No newline at end of file
diff --git a/tests/fuzzers/txfetcher/corpus/76e413a50dc8861e3756e556f796f1737bec2675-4 b/tests/fuzzers/txfetcher/corpus/76e413a50dc8861e3756e556f796f1737bec2675-4
new file mode 100644
index 0000000000..623fcf9601
Binary files /dev/null and b/tests/fuzzers/txfetcher/corpus/76e413a50dc8861e3756e556f796f1737bec2675-4 differ
diff --git a/tests/fuzzers/txfetcher/corpus/78480977d5c07386b06e9b37f5c82f5ed86c2f09-3 b/tests/fuzzers/txfetcher/corpus/78480977d5c07386b06e9b37f5c82f5ed86c2f09-3
new file mode 100644
index 0000000000..e92863a1c7
--- /dev/null
+++ b/tests/fuzzers/txfetcher/corpus/78480977d5c07386b06e9b37f5c82f5ed86c2f09-3
@@ -0,0 +1,14 @@
+ TESTING KEY-----
+MIICXgIBAAKBgQDuLnQAI3mDgey3VBzWnB2L39JUU4txjeVE6myuDqkM/uGlfjb9
+SjY1bIw4iAJm2gsvvZhIrCHS3l6afab4pZB
+l2+XsDulrKBxKKtD1rGxlG4LjncdabFn9gvLZad2bSysqz/qTAUStTvqJQIDAQAB
+AoGAGRzwwir7XvBOAy5tM/uV6e+Zf6anZzus1s1Y1ClbjbE6HXbnWWF/wbZGOpet
+3Zm4vD6MXc7jpTLryzTQIvVdfQbRc6+MUVeLKwZatTXtdZrhu+Jk7hx0nTPy8Jcb
+uJqFk541aEw+mMogY/xEcfbWd6IOkp+4xqjlFLBEDytgbIECQQDvH/E6nk+hgN4H
+qzzVtxxr397vWrjrIgPbJpQvBsafG7b0dA4AFjwVbFLmQcj2PprIMmPcQrooz8vp
+jy4SHEg1AkEA/v13/5M47K9vCxmb8QeD/asydfsgS5TeuNi8DoUBEmiSJwma7FXY
+fFUtxuvL7XvjwjN5B30pNEbc6Iuyt7y4MQJBAIt21su4b3sjXNueLKH85Q+phy2U
+fQtuUE9txblTu14q3N7gHRZB4ZMhFYyDy8CKrN2cPg/Fvyt0Xl/DoCzjA0CQQDU
+y2ptGsuSmgUtWj3NM9xuwYPm+Z/F84K6+ARYiZ6PYj013sovGKUFfYAqVXVlxtIX
+qyUBnu3Xh9ps8ZfjLZO7BAkEAlT4R5Yl6cGhaJQYZHOde3JEMhNRcVFMO8dDaFeo
+f9Oeos0UotgiDktdQHxdNEwLjQflJJBzV+5OtwswCA=----EN RATESTI EY-----Q
\ No newline at end of file
diff --git a/tests/fuzzers/txfetcher/corpus/7a113cd3c178934cdb64353af86d51462d7080a4-5 b/tests/fuzzers/txfetcher/corpus/7a113cd3c178934cdb64353af86d51462d7080a4-5
new file mode 100644
index 0000000000..16818128ae
--- /dev/null
+++ b/tests/fuzzers/txfetcher/corpus/7a113cd3c178934cdb64353af86d51462d7080a4-5
@@ -0,0 +1,10 @@
+l6afab4pZB
+l2+XsDlrKBxKKtDrGxlG4LjncdabFn9gvLZad2bSysqz/qTAUStTtqJQIDAQAB
+AoGAGRzwwir7XvBOAy5tuV6ef6anZzus1s1Y1Clb6HbnWWF/wbZGOpet
+3m4vD6MXc7jpTLryzTQIvVdfQbRc6+MUVeLKZTXtdZrh+k7hx0nTP8Jcb
+uqFk541awmMogY/EfbWd6IOkp+4xqjlFBEDytgbIECQQDvH/6nk+hgN4H
+qzzVtxxr397vWrjrIgPbJpQvBsafG7b0dA4AFjwVbFLmQcj2PprIMmPcQrooz84SHEg1Ak/7KCxmD/sfgS5TeuNi8DoUBEmiSJwm7FX
+ftxuvL7XvjwjN5B30pNEbc6Iuyt7y4MQJBAIt21su43sjXNueLKH8+ph2UfQuU9txblTu14q3N7gHRZB4ZMhFYyDy8CKrN2cPg/Fvyt0Xl/DoCzjA0CQQDU
+y2pGsuSmgUtWj3NM9xuwYPm+Z/F84K6+ARYiZ6PYj13sovGKUFfYAqVXVlxtI�o�X
+qUn3X9ps8ZfjLZO7BAkEAlT4R5Yl6cGhaJQYZHOde3JMhNRcVFMO8dDaFo
+f9Oeos0UotgiDktdQHxdNEwLjQlJBz+OtwwA=---E ATTIEY-
\ No newline at end of file
diff --git a/tests/fuzzers/txfetcher/corpus/7ea9f71020f3eb783f743f744eba8d8ca4b2582f-3 b/tests/fuzzers/txfetcher/corpus/7ea9f71020f3eb783f743f744eba8d8ca4b2582f-3
new file mode 100644
index 0000000000..08f5bb99f5
--- /dev/null
+++ b/tests/fuzzers/txfetcher/corpus/7ea9f71020f3eb783f743f744eba8d8ca4b2582f-3
@@ -0,0 +1,9 @@
+
+l2+DulrKBxKKtD1rGxlG4LjncdabFn9gvLZad2bSysqz/qTAUStTvqJQIDAQAB
+AoGAGRzwwir7XvBOAy5tM/uV6e+Zf6anZzus1s1Y1ClbjbE6HXbnWWF/wbZGOpet
+3Zm4vD6MXc7jpTLryzTQIvVdfQbRc6+MUVeLKwZatTXtdZrhu+Jk7hx0nTPy8Jcb
+uJqFk541aEw+mMogY/xEcfbWd6IOkp+4xqjlFLBEDytgbIECQQDvH/E6nk+hgN4H
+qzzVtxxr397vWrjrIgPbJpQvBsafG7b0dA4AFjwVbFLmQcj2PprIMmPcQrooz8vp
+jy4SHEg1AkEA/v13/5M47K9vCxmb8QeD/asydfsgS5TeuNi8DoUBEmiSJwma7FXY
+fFUtxuvL7XvjwjN5B30pNEbc6Iuyt7y4MQJBAIt21su4b3sjXNueLKH85Q+phy2U
+fQtuUE9txblTu14q3N7gHRZB4ZMhFYyDy8CKrN2cPg/Fvyt0Xlp/DoCzjA0CQQDU
diff --git a/tests/fuzzers/txfetcher/corpus/84f8c275f3ffbaf8c32c21782af13de10e7de28b-3 b/tests/fuzzers/txfetcher/corpus/84f8c275f3ffbaf8c32c21782af13de10e7de28b-3
new file mode 100644
index 0000000000..2d6060c406
--- /dev/null
+++ b/tests/fuzzers/txfetcher/corpus/84f8c275f3ffbaf8c32c21782af13de10e7de28b-3
@@ -0,0 +1 @@
+KKtDlbjVeLKwZatTXtdZrhu+Jk7hx0xxr397vWrjrIgPbJpQvBsafG7b0dA4AFjwVbFLQcmPcQETT YQ
\ No newline at end of file
diff --git a/tests/fuzzers/txfetcher/corpus/85dfe7ddee0e52aa19115c0ebb9ed28a14e488c6-5 b/tests/fuzzers/txfetcher/corpus/85dfe7ddee0e52aa19115c0ebb9ed28a14e488c6-5
new file mode 100644
index 0000000000..9b6fe78029
Binary files /dev/null and b/tests/fuzzers/txfetcher/corpus/85dfe7ddee0e52aa19115c0ebb9ed28a14e488c6-5 differ
diff --git a/tests/fuzzers/txfetcher/corpus/87bba5b1e3da38fed8cb5a9bc5c8baa819e83d05-5 b/tests/fuzzers/txfetcher/corpus/87bba5b1e3da38fed8cb5a9bc5c8baa819e83d05-5
new file mode 100644
index 0000000000..ef091f0be2
Binary files /dev/null and b/tests/fuzzers/txfetcher/corpus/87bba5b1e3da38fed8cb5a9bc5c8baa819e83d05-5 differ
diff --git a/tests/fuzzers/txfetcher/corpus/8a9ebedfbfec584d8b22761e6121dc1ca0248548-4 b/tests/fuzzers/txfetcher/corpus/8a9ebedfbfec584d8b22761e6121dc1ca0248548-4
new file mode 100644
index 0000000000..953be79201
Binary files /dev/null and b/tests/fuzzers/txfetcher/corpus/8a9ebedfbfec584d8b22761e6121dc1ca0248548-4 differ
diff --git a/tests/fuzzers/txfetcher/corpus/8ff3bd49f93079e5e1c7f8f2461ba7ee612900c3-5 b/tests/fuzzers/txfetcher/corpus/8ff3bd49f93079e5e1c7f8f2461ba7ee612900c3-5
new file mode 100644
index 0000000000..a86a66593b
Binary files /dev/null and b/tests/fuzzers/txfetcher/corpus/8ff3bd49f93079e5e1c7f8f2461ba7ee612900c3-5 differ
diff --git a/tests/fuzzers/txfetcher/corpus/9034aaf45143996a2b14465c352ab0c6fa26b221-2 b/tests/fuzzers/txfetcher/corpus/9034aaf45143996a2b14465c352ab0c6fa26b221-2
new file mode 100644
index 0000000000..9c95a6ba6a
--- /dev/null
+++ b/tests/fuzzers/txfetcher/corpus/9034aaf45143996a2b14465c352ab0c6fa26b221-2
@@ -0,0 +1 @@
+�
\ No newline at end of file
diff --git a/tests/fuzzers/txfetcher/corpus/92cefdc6251d04896349a464b29be03d6bb04c3d-2 b/tests/fuzzers/txfetcher/corpus/92cefdc6251d04896349a464b29be03d6bb04c3d-2
new file mode 100644
index 0000000000..9b78e45707
--- /dev/null
+++ b/tests/fuzzers/txfetcher/corpus/92cefdc6251d04896349a464b29be03d6bb04c3d-2
@@ -0,0 +1 @@
+�39402006196394479212279040100143613805079739270465446667948293404245721771496870329047266088258938001861606973112319��
\ No newline at end of file
diff --git a/tests/fuzzers/txfetcher/corpus/9613e580ccb69df7c9074f0e2f6886ac6b34ca55-5 b/tests/fuzzers/txfetcher/corpus/9613e580ccb69df7c9074f0e2f6886ac6b34ca55-5
new file mode 100644
index 0000000000..681adc6a9c
Binary files /dev/null and b/tests/fuzzers/txfetcher/corpus/9613e580ccb69df7c9074f0e2f6886ac6b34ca55-5 differ
diff --git a/tests/fuzzers/txfetcher/corpus/98afc8970a680fdc4aee0b5d48784f650c566b75-6 b/tests/fuzzers/txfetcher/corpus/98afc8970a680fdc4aee0b5d48784f650c566b75-6
new file mode 100644
index 0000000000..c82defc243
Binary files /dev/null and b/tests/fuzzers/txfetcher/corpus/98afc8970a680fdc4aee0b5d48784f650c566b75-6 differ
diff --git a/tests/fuzzers/txfetcher/corpus/9dfc92f4ca2ece0167096fca6751ff314765f08b-8 b/tests/fuzzers/txfetcher/corpus/9dfc92f4ca2ece0167096fca6751ff314765f08b-8
new file mode 100644
index 0000000000..be75c25fec
Binary files /dev/null and b/tests/fuzzers/txfetcher/corpus/9dfc92f4ca2ece0167096fca6751ff314765f08b-8 differ
diff --git a/tests/fuzzers/txfetcher/corpus/9ebcbbfdaf0e98c87652e57226a4d8a35170c67d-4 b/tests/fuzzers/txfetcher/corpus/9ebcbbfdaf0e98c87652e57226a4d8a35170c67d-4
new file mode 100644
index 0000000000..ab036767db
--- /dev/null
+++ b/tests/fuzzers/txfetcher/corpus/9ebcbbfdaf0e98c87652e57226a4d8a35170c67d-4
@@ -0,0 +1,5 @@
+l2+DulrKBxKKtD1rGxlG4LjncdabFn9gvLZad2bSysqz/qTAUStTvqJQIDAQAB
+AoGAGRzwwir7XvBOAy5tM/uV6e+Zf6anZzus1s1Y1ClbjbE6HXbnWWF/wbZGOpwVbFLmQet
+3Zm4vD6MXc7jpTLryzTQIvVdfQbRc6+MUVeLKwZatTXtdZrhu+Jk7hx0nTPy8Jcb
+uJqFk541aEw+mMogY/xEcfbWd6IOkp+4xqjlFLBEDytgbIECQQDvH/E6nk+hgN4H
+qzzVtxxr397vWrjr
\ No newline at end of file
diff --git a/tests/fuzzers/txfetcher/corpus/9ff520eb8b8319a5fdafbe4d1cbb02a75058d93b-7 b/tests/fuzzers/txfetcher/corpus/9ff520eb8b8319a5fdafbe4d1cbb02a75058d93b-7
new file mode 100644
index 0000000000..d91a13138c
--- /dev/null
+++ b/tests/fuzzers/txfetcher/corpus/9ff520eb8b8319a5fdafbe4d1cbb02a75058d93b-7
@@ -0,0 +1,2 @@
+&��w������������ �
+���
��������������������� �!�"�#�$�%�&�'�(�)�*�+�,�-�.�/��0
\ No newline at end of file
diff --git a/tests/fuzzers/txfetcher/corpus/a0b57a12e25ac5adcedb2a5c45915f0f62aee869-4 b/tests/fuzzers/txfetcher/corpus/a0b57a12e25ac5adcedb2a5c45915f0f62aee869-4
new file mode 100644
index 0000000000..78243163a8
Binary files /dev/null and b/tests/fuzzers/txfetcher/corpus/a0b57a12e25ac5adcedb2a5c45915f0f62aee869-4 differ
diff --git a/tests/fuzzers/txfetcher/corpus/a2684adccf16e036b051c12f283734fa803746e8-6 b/tests/fuzzers/txfetcher/corpus/a2684adccf16e036b051c12f283734fa803746e8-6
new file mode 100644
index 0000000000..4e12af2da8
Binary files /dev/null and b/tests/fuzzers/txfetcher/corpus/a2684adccf16e036b051c12f283734fa803746e8-6 differ
diff --git a/tests/fuzzers/txfetcher/corpus/a37305974cf477ecfe65fa92f37b1f51dea25910-4 b/tests/fuzzers/txfetcher/corpus/a37305974cf477ecfe65fa92f37b1f51dea25910-4
new file mode 100644
index 0000000000..75cb14e8d9
Binary files /dev/null and b/tests/fuzzers/txfetcher/corpus/a37305974cf477ecfe65fa92f37b1f51dea25910-4 differ
diff --git a/tests/fuzzers/txfetcher/corpus/a7eb43926bd14b1f62a66a33107776e487434d32-7 b/tests/fuzzers/txfetcher/corpus/a7eb43926bd14b1f62a66a33107776e487434d32-7
new file mode 100644
index 0000000000..88e6127355
Binary files /dev/null and b/tests/fuzzers/txfetcher/corpus/a7eb43926bd14b1f62a66a33107776e487434d32-7 differ
diff --git a/tests/fuzzers/txfetcher/corpus/a8f7c254eb64a40fd2a77b79979c7bbdac6a760c-4 b/tests/fuzzers/txfetcher/corpus/a8f7c254eb64a40fd2a77b79979c7bbdac6a760c-4
new file mode 100644
index 0000000000..da61777c22
--- /dev/null
+++ b/tests/fuzzers/txfetcher/corpus/a8f7c254eb64a40fd2a77b79979c7bbdac6a760c-4
@@ -0,0 +1,2 @@
+lxtIX
+qyU3X9ps8ZfjLZ45l6cGhaJQYZHOde3JEMhNRcVFMO8dJDaFe
\ No newline at end of file
diff --git a/tests/fuzzers/txfetcher/corpus/a9a8f287d6af24e47d8db468e8f967aa44fb5a1f-7 b/tests/fuzzers/txfetcher/corpus/a9a8f287d6af24e47d8db468e8f967aa44fb5a1f-7
new file mode 100644
index 0000000000..7811921b79
Binary files /dev/null and b/tests/fuzzers/txfetcher/corpus/a9a8f287d6af24e47d8db468e8f967aa44fb5a1f-7 differ
diff --git a/tests/fuzzers/txfetcher/corpus/aa7444d8e326158046862590a0db993c07aef372-7 b/tests/fuzzers/txfetcher/corpus/aa7444d8e326158046862590a0db993c07aef372-7
new file mode 100644
index 0000000000..870e12ffbc
--- /dev/null
+++ b/tests/fuzzers/txfetcher/corpus/aa7444d8e326158046862590a0db993c07aef372-7
@@ -0,0 +1 @@
+00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000@0000000000000
\ No newline at end of file
diff --git a/tests/fuzzers/txfetcher/corpus/ae4593626d8796e079a358c2395a4f6c9ddd6a44-6 b/tests/fuzzers/txfetcher/corpus/ae4593626d8796e079a358c2395a4f6c9ddd6a44-6
new file mode 100644
index 0000000000..845deedd0e
--- /dev/null
+++ b/tests/fuzzers/txfetcher/corpus/ae4593626d8796e079a358c2395a4f6c9ddd6a44-6
@@ -0,0 +1,8 @@
+9pmM gY/xEcfbWd6IOkp+4xqjlFLBEDytgbparsing /E6nk+hgN4H
+qzzVtxxr397vWrjrIgPbJpQvBsafG7b0dA4AFjwVbFLmQcj2PprLANGcQrooz8vp
+jy4SHEg1AkEA/v13/@M47K9vCxb8QeD/asydfsgS5TeuNi8DoUBEmiSJwma7FXY
+fFUtxuvL7XvjwjN5B30pNEbc6Iuyt7y4MQJBAIt21su4b3sjXNueLKH85Q+phy2U
+fQtuUE9txblTu14q3N7gHRZB4ZMhFYyDy8CKrN2cPg/Fvyt0Xl/DoCz� jA0CQQDU
+y2ptGsuSmgUtWj3NM9xuwYPm+Z/F84K6+ARYiZ6Yj013sovGKUFfYAqVXVlxtIX
+qyUBnu3Xh9ps8ZfjLZO7BAkEAlT4R5Yl6cGhaJQYFZHOde3JEMhNRcVFMO8dDaFeo
+f9Oeos0Uot
\ No newline at end of file
diff --git a/tests/fuzzers/txfetcher/corpus/b2942d4413a66939cda7db93020dee79eb17788c-9 b/tests/fuzzers/txfetcher/corpus/b2942d4413a66939cda7db93020dee79eb17788c-9
new file mode 100644
index 0000000000..10aca65121
Binary files /dev/null and b/tests/fuzzers/txfetcher/corpus/b2942d4413a66939cda7db93020dee79eb17788c-9 differ
diff --git a/tests/fuzzers/txfetcher/corpus/b4614117cdfd147d38f4e8a4d85f5a2bb99a6a4f-5 b/tests/fuzzers/txfetcher/corpus/b4614117cdfd147d38f4e8a4d85f5a2bb99a6a4f-5
new file mode 100644
index 0000000000..af69eef9b0
Binary files /dev/null and b/tests/fuzzers/txfetcher/corpus/b4614117cdfd147d38f4e8a4d85f5a2bb99a6a4f-5 differ
diff --git a/tests/fuzzers/txfetcher/corpus/b631ef3291fa405cd6517d11f4d1b9b6d02912d4-2 b/tests/fuzzers/txfetcher/corpus/b631ef3291fa405cd6517d11f4d1b9b6d02912d4-2
new file mode 100644
index 0000000000..a6b8858b40
--- /dev/null
+++ b/tests/fuzzers/txfetcher/corpus/b631ef3291fa405cd6517d11f4d1b9b6d02912d4-2
@@ -0,0 +1 @@
+&�o�
\ No newline at end of file
diff --git a/tests/fuzzers/txfetcher/corpus/b7a91e338cc11f50ebdb2c414610efc4d5be3137-4 b/tests/fuzzers/txfetcher/corpus/b7a91e338cc11f50ebdb2c414610efc4d5be3137-4
new file mode 100644
index 0000000000..9709a1fcb8
Binary files /dev/null and b/tests/fuzzers/txfetcher/corpus/b7a91e338cc11f50ebdb2c414610efc4d5be3137-4 differ
diff --git a/tests/fuzzers/txfetcher/corpus/b858cb282617fb0956d960215c8e84d1ccf909c6-2 b/tests/fuzzers/txfetcher/corpus/b858cb282617fb0956d960215c8e84d1ccf909c6-2
new file mode 100644
index 0000000000..0519ecba6e
--- /dev/null
+++ b/tests/fuzzers/txfetcher/corpus/b858cb282617fb0956d960215c8e84d1ccf909c6-2
@@ -0,0 +1 @@
+
\ No newline at end of file
diff --git a/tests/fuzzers/txfetcher/corpus/bc9d570aacf3acd39600feda8e72a293a4667da4-1 b/tests/fuzzers/txfetcher/corpus/bc9d570aacf3acd39600feda8e72a293a4667da4-1
new file mode 100644
index 0000000000..aab27c5909
--- /dev/null
+++ b/tests/fuzzers/txfetcher/corpus/bc9d570aacf3acd39600feda8e72a293a4667da4-1
@@ -0,0 +1 @@
+�
\ No newline at end of file
diff --git a/tests/fuzzers/txfetcher/corpus/be7eed35b245b5d5d2adcdb4c67f07794eb86b24-3 b/tests/fuzzers/txfetcher/corpus/be7eed35b245b5d5d2adcdb4c67f07794eb86b24-3
new file mode 100644
index 0000000000..47c996d33f
--- /dev/null
+++ b/tests/fuzzers/txfetcher/corpus/be7eed35b245b5d5d2adcdb4c67f07794eb86b24-3
@@ -0,0 +1,2 @@
+4LZmbRc6+MUVeLKXtdZr+Jk7hhgN4H
+qzzVtxxr397vWrjrIgPbJpQvBsafG7b0dA4AFjwVbFLQcmPcQ SN_
\ No newline at end of file
diff --git a/tests/fuzzers/txfetcher/corpus/c010b0cd70c7edbc5bd332fc9e2e91c6a1cbcdc4-5 b/tests/fuzzers/txfetcher/corpus/c010b0cd70c7edbc5bd332fc9e2e91c6a1cbcdc4-5
new file mode 100644
index 0000000000..474f14d89b
--- /dev/null
+++ b/tests/fuzzers/txfetcher/corpus/c010b0cd70c7edbc5bd332fc9e2e91c6a1cbcdc4-5
@@ -0,0 +1,4 @@
+
+Xc7jpTLryzTQIvVdfQbRc6+MUVeLKwZatTXtdZrhu+Jk7hx0nTPy8Jcb
+uJqFk541aEw+mMogY/xEcfbWd6IOkp+4xqjlFLBEDytgbIECQQDvH/E6nhgN4H
+qzzVtxx7vWrjrIgPbJpvfb
\ No newline at end of file
diff --git a/tests/fuzzers/txfetcher/corpus/c1690698607eb0f4c4244e9f9629968be4beb6bc-8 b/tests/fuzzers/txfetcher/corpus/c1690698607eb0f4c4244e9f9629968be4beb6bc-8
new file mode 100644
index 0000000000..d184a2d8a4
--- /dev/null
+++ b/tests/fuzzers/txfetcher/corpus/c1690698607eb0f4c4244e9f9629968be4beb6bc-8
@@ -0,0 +1,2 @@
+&Ƚ�� �
+���
��������������������� �!�"�#�$�%�&�'�(�)�*�+�,�-�.�/��0
\ No newline at end of file
diff --git a/tests/fuzzers/txfetcher/corpus/c1f435e4f53a9a17578d9e8c4789860f962a1379-6 b/tests/fuzzers/txfetcher/corpus/c1f435e4f53a9a17578d9e8c4789860f962a1379-6
new file mode 100644
index 0000000000..f2a68ec3de
Binary files /dev/null and b/tests/fuzzers/txfetcher/corpus/c1f435e4f53a9a17578d9e8c4789860f962a1379-6 differ
diff --git a/tests/fuzzers/txfetcher/corpus/c298a75334c3acf04bd129a8867447a25c8bacf8-7 b/tests/fuzzers/txfetcher/corpus/c298a75334c3acf04bd129a8867447a25c8bacf8-7
new file mode 100644
index 0000000000..0b437f2260
Binary files /dev/null and b/tests/fuzzers/txfetcher/corpus/c298a75334c3acf04bd129a8867447a25c8bacf8-7 differ
diff --git a/tests/fuzzers/txfetcher/corpus/c42287c7d225e530e822f23bbbba6819a9e48f38-6 b/tests/fuzzers/txfetcher/corpus/c42287c7d225e530e822f23bbbba6819a9e48f38-6
new file mode 100644
index 0000000000..91818f5634
Binary files /dev/null and b/tests/fuzzers/txfetcher/corpus/c42287c7d225e530e822f23bbbba6819a9e48f38-6 differ
diff --git a/tests/fuzzers/txfetcher/corpus/c4cdbb891f3ee76476b7375d5ed51691fed95421-10 b/tests/fuzzers/txfetcher/corpus/c4cdbb891f3ee76476b7375d5ed51691fed95421-10
new file mode 100644
index 0000000000..e365cc5262
Binary files /dev/null and b/tests/fuzzers/txfetcher/corpus/c4cdbb891f3ee76476b7375d5ed51691fed95421-10 differ
diff --git a/tests/fuzzers/txfetcher/corpus/cc9572d72dfa2937074b1766dcbcff9cc58d1137-4 b/tests/fuzzers/txfetcher/corpus/cc9572d72dfa2937074b1766dcbcff9cc58d1137-4
new file mode 100644
index 0000000000..b72a78f529
Binary files /dev/null and b/tests/fuzzers/txfetcher/corpus/cc9572d72dfa2937074b1766dcbcff9cc58d1137-4 differ
diff --git a/tests/fuzzers/txfetcher/corpus/cd1d73b4e101bc7b979e3f6f135cb12d4594d348-5 b/tests/fuzzers/txfetcher/corpus/cd1d73b4e101bc7b979e3f6f135cb12d4594d348-5
new file mode 100644
index 0000000000..3079de5557
--- /dev/null
+++ b/tests/fuzzers/txfetcher/corpus/cd1d73b4e101bc7b979e3f6f135cb12d4594d348-5
@@ -0,0 +1 @@
+822452601031714757585602556
\ No newline at end of file
diff --git a/tests/fuzzers/txfetcher/corpus/d0acdc8fca32bbd58d368eeac3bd9eaa46f59d27-5 b/tests/fuzzers/txfetcher/corpus/d0acdc8fca32bbd58d368eeac3bd9eaa46f59d27-5
new file mode 100644
index 0000000000..794d5d86c6
Binary files /dev/null and b/tests/fuzzers/txfetcher/corpus/d0acdc8fca32bbd58d368eeac3bd9eaa46f59d27-5 differ
diff --git a/tests/fuzzers/txfetcher/corpus/d0e43b715fd00953f7bdd6dfad95811985e81396-4 b/tests/fuzzers/txfetcher/corpus/d0e43b715fd00953f7bdd6dfad95811985e81396-4
new file mode 100644
index 0000000000..742db5fb3b
Binary files /dev/null and b/tests/fuzzers/txfetcher/corpus/d0e43b715fd00953f7bdd6dfad95811985e81396-4 differ
diff --git a/tests/fuzzers/txfetcher/corpus/d925fbd22c8bc0de34d6a9d1258ce3d2928d0927-8 b/tests/fuzzers/txfetcher/corpus/d925fbd22c8bc0de34d6a9d1258ce3d2928d0927-8
new file mode 100644
index 0000000000..5920dfe601
Binary files /dev/null and b/tests/fuzzers/txfetcher/corpus/d925fbd22c8bc0de34d6a9d1258ce3d2928d0927-8 differ
diff --git a/tests/fuzzers/txfetcher/corpus/d9ba78cb7425724185d5fa300cd5c03aec2683bb-7 b/tests/fuzzers/txfetcher/corpus/d9ba78cb7425724185d5fa300cd5c03aec2683bb-7
new file mode 100644
index 0000000000..c4df1cf210
Binary files /dev/null and b/tests/fuzzers/txfetcher/corpus/d9ba78cb7425724185d5fa300cd5c03aec2683bb-7 differ
diff --git a/tests/fuzzers/txfetcher/corpus/da39a3ee5e6b4b0d3255bfef95601890afd80709 b/tests/fuzzers/txfetcher/corpus/da39a3ee5e6b4b0d3255bfef95601890afd80709
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/tests/fuzzers/txfetcher/corpus/dcdb7758b87648b5d766b1b341a65834420cf621-7 b/tests/fuzzers/txfetcher/corpus/dcdb7758b87648b5d766b1b341a65834420cf621-7
new file mode 100644
index 0000000000..78cf11ae21
Binary files /dev/null and b/tests/fuzzers/txfetcher/corpus/dcdb7758b87648b5d766b1b341a65834420cf621-7 differ
diff --git a/tests/fuzzers/txfetcher/corpus/dd441bd24581332c9ce19e008260a69287aa3cbc-6 b/tests/fuzzers/txfetcher/corpus/dd441bd24581332c9ce19e008260a69287aa3cbc-6
new file mode 100644
index 0000000000..4e0c14006e
--- /dev/null
+++ b/tests/fuzzers/txfetcher/corpus/dd441bd24581332c9ce19e008260a69287aa3cbc-6
@@ -0,0 +1,2 @@
+Dtf1nWk78c
+/fWklyEQQ/+hgNzVtxxmDgS5TDETgeXVlXsjLZ
\ No newline at end of file
diff --git a/tests/fuzzers/txfetcher/corpus/def879fe0fd637a745c00c8f1da340518db8688c-2 b/tests/fuzzers/txfetcher/corpus/def879fe0fd637a745c00c8f1da340518db8688c-2
new file mode 100644
index 0000000000..555752f0ed
--- /dev/null
+++ b/tests/fuzzers/txfetcher/corpus/def879fe0fd637a745c00c8f1da340518db8688c-2
@@ -0,0 +1 @@
+� �
\ No newline at end of file
diff --git a/tests/fuzzers/txfetcher/corpus/df6c30a9781b93bd6d2f5e97e5592d5945210003-7 b/tests/fuzzers/txfetcher/corpus/df6c30a9781b93bd6d2f5e97e5592d5945210003-7
new file mode 100644
index 0000000000..2a7adb093b
Binary files /dev/null and b/tests/fuzzers/txfetcher/corpus/df6c30a9781b93bd6d2f5e97e5592d5945210003-7 differ
diff --git a/tests/fuzzers/txfetcher/corpus/dfc1c3a2e3ccdaf6f88c515fd00e8ad08421e431-6 b/tests/fuzzers/txfetcher/corpus/dfc1c3a2e3ccdaf6f88c515fd00e8ad08421e431-6
new file mode 100644
index 0000000000..59f3442c05
Binary files /dev/null and b/tests/fuzzers/txfetcher/corpus/dfc1c3a2e3ccdaf6f88c515fd00e8ad08421e431-6 differ
diff --git a/tests/fuzzers/txfetcher/corpus/e1dcc4e7ead6dfd1139ece7bf57d776cb9dac72d-7 b/tests/fuzzers/txfetcher/corpus/e1dcc4e7ead6dfd1139ece7bf57d776cb9dac72d-7
new file mode 100644
index 0000000000..5ba489f99d
Binary files /dev/null and b/tests/fuzzers/txfetcher/corpus/e1dcc4e7ead6dfd1139ece7bf57d776cb9dac72d-7 differ
diff --git a/tests/fuzzers/txfetcher/corpus/e39c2de2c8937d2cbd4339b13d6a0ce94d94f8d2-8 b/tests/fuzzers/txfetcher/corpus/e39c2de2c8937d2cbd4339b13d6a0ce94d94f8d2-8
new file mode 100644
index 0000000000..0e9508938e
Binary files /dev/null and b/tests/fuzzers/txfetcher/corpus/e39c2de2c8937d2cbd4339b13d6a0ce94d94f8d2-8 differ
diff --git a/tests/fuzzers/txfetcher/corpus/e72f76b9579c792e545d02fe405d9186f0d6c39b-6 b/tests/fuzzers/txfetcher/corpus/e72f76b9579c792e545d02fe405d9186f0d6c39b-6
new file mode 100644
index 0000000000..c4d34b1732
Binary files /dev/null and b/tests/fuzzers/txfetcher/corpus/e72f76b9579c792e545d02fe405d9186f0d6c39b-6 differ
diff --git a/tests/fuzzers/txfetcher/corpus/eb70814d6355a4498b8f301ba8dbc34f895a9947-5 b/tests/fuzzers/txfetcher/corpus/eb70814d6355a4498b8f301ba8dbc34f895a9947-5
new file mode 100644
index 0000000000..bd57a22fb1
Binary files /dev/null and b/tests/fuzzers/txfetcher/corpus/eb70814d6355a4498b8f301ba8dbc34f895a9947-5 differ
diff --git a/tests/fuzzers/txfetcher/corpus/ebdc17efe343e412634dca57cecd5a0e1ce1c1c7-5 b/tests/fuzzers/txfetcher/corpus/ebdc17efe343e412634dca57cecd5a0e1ce1c1c7-5
new file mode 100644
index 0000000000..aaa3f695ab
Binary files /dev/null and b/tests/fuzzers/txfetcher/corpus/ebdc17efe343e412634dca57cecd5a0e1ce1c1c7-5 differ
diff --git a/tests/fuzzers/txfetcher/corpus/ec0a25eba8966b8f628d821b3cfbdf2dfd4bbb4c-3 b/tests/fuzzers/txfetcher/corpus/ec0a25eba8966b8f628d821b3cfbdf2dfd4bbb4c-3
new file mode 100644
index 0000000000..65cf0df801
--- /dev/null
+++ b/tests/fuzzers/txfetcher/corpus/ec0a25eba8966b8f628d821b3cfbdf2dfd4bbb4c-3
@@ -0,0 +1,13 @@
+�&^��o�ȗ-----BEGIN RSA TESTING KEY-----
+MIICXgIBAAKBgQDuLnQAI3mDgey3VBzWnB2L39JUU4txjeVE6myuDqkM/uGlfjb9
+SjY1bIw4iA5sBBZzHi3z0h1YV8PuxEbi4nW91IJm2gsvvZhIrHS3l6afab4pZB
+l2+XsDulrKBxKKtD1rGxlG4Ljncdabn9vLZad2bSysqz/qTAUStvqJQIDAQAB
+AoGAGRzwwir7XvBOAy5tM/uV6e+Zf6anZzus1s1K1ClbjbE6HXbnWWF/wbZGOpet
+3Zm4vD6MXc7jpTLryzQIvVdfQbRc6+MUVeLKwZatTXtZru+Jk7hx0nTPy8Jcb
+uJqFk541aEw+mMogY/xEcfbW6IOkp+4xqjlFLBEDytgbIECQQDvH/E6nk+hg4
+qzzVtxxr397vWrjrIgPbJpQvBsafG7b0dA4AFjwVbFLcj2pIMPQroozvjg1AkEA/v13/5M47K9vCxmb8QeD/aydfsgS5TeuNi8DoUBEmiSJwmaXY
+fFUtxv7XvjwjN5B30pNEbc6Iuyt7y4MQJBAIt21su4bjeLKH8Q+ph2
+fQtuUE9txblTu14q3N7gHRZB4ZMhFYyDy8CKrN2cPg/Fvyt0Xlp/DoCzjA0CQQDU
+y2ptGsuSmgUtWj3NM9xuwYPm+Z/F84K6+AYiZ6PYj013sovGKFYqVXVlxtIX
+qyUBnu3X9s8ZfjZO7BAkl4R5Yl6cGhaJQYZHOe3JEMhVFaFf9Oes0UUothgiDktdQxdNLj7+5CWA==
+-----END RSASQ
\ No newline at end of file
diff --git a/tests/fuzzers/txfetcher/corpus/eebe3b76aeba6deed965d17d2b024f7eae1a43f1-5 b/tests/fuzzers/txfetcher/corpus/eebe3b76aeba6deed965d17d2b024f7eae1a43f1-5
new file mode 100644
index 0000000000..20d62e15b3
Binary files /dev/null and b/tests/fuzzers/txfetcher/corpus/eebe3b76aeba6deed965d17d2b024f7eae1a43f1-5 differ
diff --git a/tests/fuzzers/txfetcher/corpus/ef8741a9faf030794d98ff113f556c68a24719a5-6 b/tests/fuzzers/txfetcher/corpus/ef8741a9faf030794d98ff113f556c68a24719a5-6
new file mode 100644
index 0000000000..09fcd86d77
Binary files /dev/null and b/tests/fuzzers/txfetcher/corpus/ef8741a9faf030794d98ff113f556c68a24719a5-6 differ
diff --git a/tests/fuzzers/txfetcher/corpus/efb7410d02418befeba25a43d676cc6124129125-4 b/tests/fuzzers/txfetcher/corpus/efb7410d02418befeba25a43d676cc6124129125-4
new file mode 100644
index 0000000000..2191a7324a
--- /dev/null
+++ b/tests/fuzzers/txfetcher/corpus/efb7410d02418befeba25a43d676cc6124129125-4
@@ -0,0 +1 @@
+88242871'392752200424452601091531672177074144720616417147514758635765020556616�
\ No newline at end of file
diff --git a/tests/fuzzers/txfetcher/corpus/f6f97d781a5a749903790e07db8619866cb7c3a1-6 b/tests/fuzzers/txfetcher/corpus/f6f97d781a5a749903790e07db8619866cb7c3a1-6
new file mode 100644
index 0000000000..219a8d3682
Binary files /dev/null and b/tests/fuzzers/txfetcher/corpus/f6f97d781a5a749903790e07db8619866cb7c3a1-6 differ
diff --git a/tests/fuzzers/txfetcher/corpus/f7a3cd00fa0e57742e7dbbb8283dcaea067eaf7b-5 b/tests/fuzzers/txfetcher/corpus/f7a3cd00fa0e57742e7dbbb8283dcaea067eaf7b-5
new file mode 100644
index 0000000000..f01ccd89ef
--- /dev/null
+++ b/tests/fuzzers/txfetcher/corpus/f7a3cd00fa0e57742e7dbbb8283dcaea067eaf7b-5
@@ -0,0 +1,2 @@
+Xyt0Xl/DoCzjA0CQQDU
+y2ptGsuSmgUtWj3NM9xuwYPm+Z/F84K6+ARYi
\ No newline at end of file
diff --git a/tests/fuzzers/txfetcher/corpus/f94d60a6c556ce485ab60088291760b8be25776c-6 b/tests/fuzzers/txfetcher/corpus/f94d60a6c556ce485ab60088291760b8be25776c-6
new file mode 100644
index 0000000000..58d841ff03
--- /dev/null
+++ b/tests/fuzzers/txfetcher/corpus/f94d60a6c556ce485ab60088291760b8be25776c-6
@@ -0,0 +1,2 @@
+HZB4cQZde3JMNRcVFMO8dDFo
+f9OeosiDdQQl
\ No newline at end of file
diff --git a/tests/fuzzers/txfetcher/corpus/f9e627b2cb82ffa1ea5e0c6d7f2802f3000b18a8-6 b/tests/fuzzers/txfetcher/corpus/f9e627b2cb82ffa1ea5e0c6d7f2802f3000b18a8-6
new file mode 100644
index 0000000000..b5dfecc1e9
Binary files /dev/null and b/tests/fuzzers/txfetcher/corpus/f9e627b2cb82ffa1ea5e0c6d7f2802f3000b18a8-6 differ
diff --git a/tests/fuzzers/txfetcher/corpus/fb3775aa24e5667e658920c05ba4b7b19ff256fb-5 b/tests/fuzzers/txfetcher/corpus/fb3775aa24e5667e658920c05ba4b7b19ff256fb-5
new file mode 100644
index 0000000000..6f4927d822
--- /dev/null
+++ b/tests/fuzzers/txfetcher/corpus/fb3775aa24e5667e658920c05ba4b7b19ff256fb-5
@@ -0,0 +1 @@
+HZB4c2cPclieoverpGsumgUtWj3NMYPZ/F8t�5YlNR8dDFoiDdQQl
\ No newline at end of file
diff --git a/tests/fuzzers/txfetcher/corpus/fd6386548e119a50db96b2fa406e54924c45a2d5-6 b/tests/fuzzers/txfetcher/corpus/fd6386548e119a50db96b2fa406e54924c45a2d5-6
new file mode 100644
index 0000000000..6fff60edd4
Binary files /dev/null and b/tests/fuzzers/txfetcher/corpus/fd6386548e119a50db96b2fa406e54924c45a2d5-6 differ
diff --git a/tests/fuzzers/txfetcher/txfetcher_fuzzer.go b/tests/fuzzers/txfetcher/txfetcher_fuzzer.go
new file mode 100644
index 0000000000..10c7eb9424
--- /dev/null
+++ b/tests/fuzzers/txfetcher/txfetcher_fuzzer.go
@@ -0,0 +1,199 @@
+// Copyright 2020 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package txfetcher
+
+import (
+ "bytes"
+ "fmt"
+ "math/big"
+ "math/rand"
+ "time"
+
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/common/mclock"
+ "github.com/ethereum/go-ethereum/core/types"
+ "github.com/ethereum/go-ethereum/eth/fetcher"
+)
+
+var (
+ peers []string
+ txs []*types.Transaction
+)
+
+func init() {
+ // Random is nice, but we need it deterministic
+ rand := rand.New(rand.NewSource(0x3a29))
+
+ peers = make([]string, 10)
+ for i := 0; i < len(peers); i++ {
+ peers[i] = fmt.Sprintf("Peer #%d", i)
+ }
+ txs = make([]*types.Transaction, 65536) // We need to bump enough to hit all the limits
+ for i := 0; i < len(txs); i++ {
+ txs[i] = types.NewTransaction(rand.Uint64(), common.Address{byte(rand.Intn(256))}, new(big.Int), 0, new(big.Int), nil)
+ }
+}
+
+func Fuzz(input []byte) int {
+ // Don't generate insanely large test cases, not much value in them
+ if len(input) > 16*1024 {
+ return -1
+ }
+ r := bytes.NewReader(input)
+
+ // Reduce the problem space for certain fuzz runs. Small tx space is better
+ // for testing clashes and in general the fetcher, but we should still run
+ // some tests with large spaces to hit potential issues on limits.
+ limit, err := r.ReadByte()
+ if err != nil {
+ return 0
+ }
+ switch limit % 4 {
+ case 0:
+ txs = txs[:4]
+ case 1:
+ txs = txs[:256]
+ case 2:
+ txs = txs[:4096]
+ case 3:
+ // Full run
+ }
+ // Create a fetcher and hook into it's simulated fields
+ clock := new(mclock.Simulated)
+ rand := rand.New(rand.NewSource(0x3a29)) // Same used in package tests!!!
+
+ f := fetcher.NewTxFetcherForTests(
+ func(common.Hash) bool { return false },
+ func(txs []*types.Transaction) []error {
+ return make([]error, len(txs))
+ },
+ func(string, []common.Hash) error { return nil },
+ clock, rand,
+ )
+ f.Start()
+ defer f.Stop()
+
+ // Try to throw random junk at the fetcher
+ for {
+ // Read the next command and abort if we're done
+ cmd, err := r.ReadByte()
+ if err != nil {
+ return 0
+ }
+ switch cmd % 4 {
+ case 0:
+ // Notify a new set of transactions:
+ // Byte 1: Peer index to announce with
+ // Byte 2: Number of hashes to announce
+ // Byte 3-4, 5-6, etc: Transaction indices (2 byte) to announce
+ peerIdx, err := r.ReadByte()
+ if err != nil {
+ return 0
+ }
+ peer := peers[int(peerIdx)%len(peers)]
+
+ announceCnt, err := r.ReadByte()
+ if err != nil {
+ return 0
+ }
+ announce := int(announceCnt) % (2 * len(txs)) // No point in generating too many duplicates
+
+ var (
+ announceIdxs = make([]int, announce)
+ announces = make([]common.Hash, announce)
+ )
+ for i := 0; i < len(announces); i++ {
+ annBuf := make([]byte, 2)
+ if n, err := r.Read(annBuf); err != nil || n != 2 {
+ return 0
+ }
+ announceIdxs[i] = (int(annBuf[0])*256 + int(annBuf[1])) % len(txs)
+ announces[i] = txs[announceIdxs[i]].Hash()
+ }
+ fmt.Println("Notify", peer, announceIdxs)
+ if err := f.Notify(peer, announces); err != nil {
+ panic(err)
+ }
+
+ case 1:
+ // Deliver a new set of transactions:
+ // Byte 1: Peer index to announce with
+ // Byte 2: Number of hashes to announce
+ // Byte 3-4, 5-6, etc: Transaction indices (2 byte) to announce
+ peerIdx, err := r.ReadByte()
+ if err != nil {
+ return 0
+ }
+ peer := peers[int(peerIdx)%len(peers)]
+
+ deliverCnt, err := r.ReadByte()
+ if err != nil {
+ return 0
+ }
+ deliver := int(deliverCnt) % (2 * len(txs)) // No point in generating too many duplicates
+
+ var (
+ deliverIdxs = make([]int, deliver)
+ deliveries = make([]*types.Transaction, deliver)
+ )
+ for i := 0; i < len(deliveries); i++ {
+ deliverBuf := make([]byte, 2)
+ if n, err := r.Read(deliverBuf); err != nil || n != 2 {
+ return 0
+ }
+ deliverIdxs[i] = (int(deliverBuf[0])*256 + int(deliverBuf[1])) % len(txs)
+ deliveries[i] = txs[deliverIdxs[i]]
+ }
+ directFlag, err := r.ReadByte()
+ if err != nil {
+ return 0
+ }
+ direct := (directFlag % 2) == 0
+
+ fmt.Println("Enqueue", peer, deliverIdxs, direct)
+ if err := f.Enqueue(peer, deliveries, direct); err != nil {
+ panic(err)
+ }
+
+ case 2:
+ // Drop a peer:
+ // Byte 1: Peer index to drop
+ peerIdx, err := r.ReadByte()
+ if err != nil {
+ return 0
+ }
+ peer := peers[int(peerIdx)%len(peers)]
+
+ fmt.Println("Drop", peer)
+ if err := f.Drop(peer); err != nil {
+ panic(err)
+ }
+
+ case 3:
+ // Move the simulated clock forward
+ // Byte 1: 100ms increment to move forward
+ tickCnt, err := r.ReadByte()
+ if err != nil {
+ return 0
+ }
+ tick := time.Duration(tickCnt) * 100 * time.Millisecond
+
+ fmt.Println("Sleep", tick)
+ clock.Run(tick)
+ }
+ }
+}
diff --git a/tests/fuzzers/whisperv6/corpus/009c5adfa4fd685caef58e1ce932fa7fb209730a b/tests/fuzzers/whisperv6/corpus/009c5adfa4fd685caef58e1ce932fa7fb209730a
new file mode 100644
index 0000000000..af2f082673
Binary files /dev/null and b/tests/fuzzers/whisperv6/corpus/009c5adfa4fd685caef58e1ce932fa7fb209730a differ
diff --git a/tests/fuzzers/whisperv6/whisper-fuzzer.go b/tests/fuzzers/whisperv6/whisper-fuzzer.go
new file mode 100644
index 0000000000..379e4224fd
--- /dev/null
+++ b/tests/fuzzers/whisperv6/whisper-fuzzer.go
@@ -0,0 +1,90 @@
+// Copyright 2019 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package whisperv6
+
+import (
+ "bytes"
+
+ "github.com/ethereum/go-ethereum/crypto"
+ "github.com/ethereum/go-ethereum/rlp"
+ "github.com/ethereum/go-ethereum/whisper/whisperv6"
+)
+
+type MessageParams struct {
+ Topic whisperv6.TopicType
+ WorkTime uint32
+ TTL uint32
+ KeySym []byte
+ Payload []byte
+}
+
+//export fuzzer_entry
+func Fuzz(input []byte) int {
+
+ var paramsDecoded MessageParams
+ err := rlp.DecodeBytes(input, ¶msDecoded)
+ if err != nil {
+ return 0
+ }
+ var params whisperv6.MessageParams
+ params.KeySym = make([]byte, 32)
+ if len(paramsDecoded.KeySym) <= 32 {
+ copy(params.KeySym, paramsDecoded.KeySym)
+ }
+ if input[0] == 255 {
+ params.PoW = 0.01
+ params.WorkTime = 1
+ } else {
+ params.PoW = 0
+ params.WorkTime = 0
+ }
+ params.TTL = paramsDecoded.TTL
+ params.Payload = paramsDecoded.Payload
+ text := make([]byte, 0, 512)
+ text = append(text, params.Payload...)
+ params.Topic = paramsDecoded.Topic
+ params.Src, err = crypto.GenerateKey()
+ if err != nil {
+ return 0
+ }
+ msg, err := whisperv6.NewSentMessage(¶ms)
+ if err != nil {
+ panic(err)
+ //return
+ }
+ env, err := msg.Wrap(¶ms)
+ if err != nil {
+ panic(err)
+ }
+ decrypted, err := env.OpenSymmetric(params.KeySym)
+ if err != nil {
+ panic(err)
+ }
+ if !decrypted.ValidateAndParse() {
+ panic("ValidateAndParse failed")
+ }
+ if !bytes.Equal(text, decrypted.Payload) {
+ panic("text != decrypted.Payload")
+ }
+ if len(decrypted.Signature) != 65 {
+ panic("Unexpected signature length")
+ }
+ if !whisperv6.IsPubKeyEqual(decrypted.Src, ¶ms.Src.PublicKey) {
+ panic("Unexpected public key")
+ }
+ return 0
+}
diff --git a/trie/committer.go b/trie/committer.go
new file mode 100644
index 0000000000..eacefdff11
--- /dev/null
+++ b/trie/committer.go
@@ -0,0 +1,279 @@
+// Copyright 2019 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package trie
+
+import (
+ "errors"
+ "fmt"
+ "sync"
+
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/rlp"
+ "golang.org/x/crypto/sha3"
+)
+
+// leafChanSize is the size of the leafCh. It's a pretty arbitrary number, to allow
+// some paralellism but not incur too much memory overhead.
+const leafChanSize = 200
+
+// leaf represents a trie leaf value
+type leaf struct {
+ size int // size of the rlp data (estimate)
+ hash common.Hash // hash of rlp data
+ node node // the node to commit
+ vnodes bool // set to true if the node (possibly) contains a valueNode
+}
+
+// committer is a type used for the trie Commit operation. A committer has some
+// internal preallocated temp space, and also a callback that is invoked when
+// leaves are committed. The leafs are passed through the `leafCh`, to allow
+// some level of paralellism.
+// By 'some level' of parallelism, it's still the case that all leaves will be
+// processed sequentially - onleaf will never be called in parallel or out of order.
+type committer struct {
+ tmp sliceBuffer
+ sha keccakState
+
+ onleaf LeafCallback
+ leafCh chan *leaf
+}
+
+// committers live in a global sync.Pool
+var committerPool = sync.Pool{
+ New: func() interface{} {
+ return &committer{
+ tmp: make(sliceBuffer, 0, 550), // cap is as large as a full fullNode.
+ sha: sha3.NewLegacyKeccak256().(keccakState),
+ }
+ },
+}
+
+// newCommitter creates a new committer or picks one from the pool.
+func newCommitter() *committer {
+ return committerPool.Get().(*committer)
+}
+
+func returnCommitterToPool(h *committer) {
+ h.onleaf = nil
+ h.leafCh = nil
+ committerPool.Put(h)
+}
+
+// commitNeeded returns 'false' if the given node is already in sync with db
+func (c *committer) commitNeeded(n node) bool {
+ hash, dirty := n.cache()
+ return hash == nil || dirty
+}
+
+// commit collapses a node down into a hash node and inserts it into the database
+func (c *committer) Commit(n node, db *Database) (hashNode, error) {
+ if db == nil {
+ return nil, errors.New("no db provided")
+ }
+ h, err := c.commit(n, db, true)
+ if err != nil {
+ return nil, err
+ }
+ return h.(hashNode), nil
+}
+
+// commit collapses a node down into a hash node and inserts it into the database
+func (c *committer) commit(n node, db *Database, force bool) (node, error) {
+ // if this path is clean, use available cached data
+ hash, dirty := n.cache()
+ if hash != nil && !dirty {
+ return hash, nil
+ }
+ // Commit children, then parent, and remove remove the dirty flag.
+ switch cn := n.(type) {
+ case *shortNode:
+ // Commit child
+ collapsed := cn.copy()
+ if _, ok := cn.Val.(valueNode); !ok {
+ if childV, err := c.commit(cn.Val, db, false); err != nil {
+ return nil, err
+ } else {
+ collapsed.Val = childV
+ }
+ }
+ // The key needs to be copied, since we're delivering it to database
+ collapsed.Key = hexToCompact(cn.Key)
+ hashedNode := c.store(collapsed, db, force, true)
+ if hn, ok := hashedNode.(hashNode); ok {
+ return hn, nil
+ } else {
+ return collapsed, nil
+ }
+ case *fullNode:
+ hashedKids, hasVnodes, err := c.commitChildren(cn, db, force)
+ if err != nil {
+ return nil, err
+ }
+ collapsed := cn.copy()
+ collapsed.Children = hashedKids
+
+ hashedNode := c.store(collapsed, db, force, hasVnodes)
+ if hn, ok := hashedNode.(hashNode); ok {
+ return hn, nil
+ } else {
+ return collapsed, nil
+ }
+ case valueNode:
+ return c.store(cn, db, force, false), nil
+ // hashnodes aren't stored
+ case hashNode:
+ return cn, nil
+ }
+ return hash, nil
+}
+
+// commitChildren commits the children of the given fullnode
+func (c *committer) commitChildren(n *fullNode, db *Database, force bool) ([17]node, bool, error) {
+ var children [17]node
+ var hasValueNodeChildren = false
+ for i, child := range n.Children {
+ if child == nil {
+ continue
+ }
+ hnode, err := c.commit(child, db, false)
+ if err != nil {
+ return children, false, err
+ }
+ children[i] = hnode
+ if _, ok := hnode.(valueNode); ok {
+ hasValueNodeChildren = true
+ }
+ }
+ return children, hasValueNodeChildren, nil
+}
+
+// store hashes the node n and if we have a storage layer specified, it writes
+// the key/value pair to it and tracks any node->child references as well as any
+// node->external trie references.
+func (c *committer) store(n node, db *Database, force bool, hasVnodeChildren bool) node {
+ // Larger nodes are replaced by their hash and stored in the database.
+ var (
+ hash, _ = n.cache()
+ size int
+ )
+ if hash == nil {
+ if vn, ok := n.(valueNode); ok {
+ c.tmp.Reset()
+ if err := rlp.Encode(&c.tmp, vn); err != nil {
+ panic("encode error: " + err.Error())
+ }
+ size = len(c.tmp)
+ if size < 32 && !force {
+ return n // Nodes smaller than 32 bytes are stored inside their parent
+ }
+ hash = c.makeHashNode(c.tmp)
+ } else {
+ // This was not generated - must be a small node stored in the parent
+ // No need to do anything here
+ return n
+ }
+ } else {
+ // We have the hash already, estimate the RLP encoding-size of the node.
+ // The size is used for mem tracking, does not need to be exact
+ size = estimateSize(n)
+ }
+ // If we're using channel-based leaf-reporting, send to channel.
+ // The leaf channel will be active only when there an active leaf-callback
+ if c.leafCh != nil {
+ c.leafCh <- &leaf{
+ size: size,
+ hash: common.BytesToHash(hash),
+ node: n,
+ vnodes: hasVnodeChildren,
+ }
+ } else if db != nil {
+ // No leaf-callback used, but there's still a database. Do serial
+ // insertion
+ db.lock.Lock()
+ db.insert(common.BytesToHash(hash), size, n)
+ db.lock.Unlock()
+ }
+ return hash
+}
+
+// commitLoop does the actual insert + leaf callback for nodes
+func (c *committer) commitLoop(db *Database) {
+ for item := range c.leafCh {
+ var (
+ hash = item.hash
+ size = item.size
+ n = item.node
+ hasVnodes = item.vnodes
+ )
+ // We are pooling the trie nodes into an intermediate memory cache
+ db.lock.Lock()
+ db.insert(hash, size, n)
+ db.lock.Unlock()
+ if c.onleaf != nil && hasVnodes {
+ switch n := n.(type) {
+ case *shortNode:
+ if child, ok := n.Val.(valueNode); ok {
+ c.onleaf(child, hash)
+ }
+ case *fullNode:
+ for i := 0; i < 16; i++ {
+ if child, ok := n.Children[i].(valueNode); ok {
+ c.onleaf(child, hash)
+ }
+ }
+ }
+ }
+ }
+}
+
+func (c *committer) makeHashNode(data []byte) hashNode {
+ n := make(hashNode, c.sha.Size())
+ c.sha.Reset()
+ c.sha.Write(data)
+ c.sha.Read(n)
+ return n
+}
+
+// estimateSize estimates the size of an rlp-encoded node, without actually
+// rlp-encoding it (zero allocs). This method has been experimentally tried, and with a trie
+// with 1000 leafs, the only errors above 1% are on small shortnodes, where this
+// method overestimates by 2 or 3 bytes (e.g. 37 instead of 35)
+func estimateSize(n node) int {
+ switch n := n.(type) {
+ case *shortNode:
+ // A short node contains a compacted key, and a value.
+ return 3 + len(n.Key) + estimateSize(n.Val)
+ case *fullNode:
+ // A full node contains up to 16 hashes (some nils), and a key
+ s := 3
+ for i := 0; i < 16; i++ {
+ if child := n.Children[i]; child != nil {
+ s += estimateSize(child)
+ } else {
+ s += 1
+ }
+ }
+ return s
+ case valueNode:
+ return 1 + len(n)
+ case hashNode:
+ return 1 + len(n)
+ default:
+ panic(fmt.Sprintf("node type %T", n))
+
+ }
+}
diff --git a/trie/database.go b/trie/database.go
index 2fa9a9ebc6..522a5d1a7a 100644
--- a/trie/database.go
+++ b/trie/database.go
@@ -17,7 +17,6 @@
package trie
import (
- "encoding/binary"
"errors"
"fmt"
"io"
@@ -39,6 +38,11 @@ var (
memcacheCleanReadMeter = metrics.NewRegisteredMeter("trie/memcache/clean/read", nil)
memcacheCleanWriteMeter = metrics.NewRegisteredMeter("trie/memcache/clean/write", nil)
+ memcacheDirtyHitMeter = metrics.NewRegisteredMeter("trie/memcache/dirty/hit", nil)
+ memcacheDirtyMissMeter = metrics.NewRegisteredMeter("trie/memcache/dirty/miss", nil)
+ memcacheDirtyReadMeter = metrics.NewRegisteredMeter("trie/memcache/dirty/read", nil)
+ memcacheDirtyWriteMeter = metrics.NewRegisteredMeter("trie/memcache/dirty/write", nil)
+
memcacheFlushTimeTimer = metrics.NewRegisteredResettingTimer("trie/memcache/flush/time", nil)
memcacheFlushNodesMeter = metrics.NewRegisteredMeter("trie/memcache/flush/nodes", nil)
memcacheFlushSizeMeter = metrics.NewRegisteredMeter("trie/memcache/flush/size", nil)
@@ -179,35 +183,31 @@ func (n *cachedNode) obj(hash common.Hash) node {
return expandNode(hash[:], n.node)
}
-// childs returns all the tracked children of this node, both the implicit ones
-// from inside the node as well as the explicit ones from outside the node.
-func (n *cachedNode) childs() []common.Hash {
- children := make([]common.Hash, 0, 16)
+// forChilds invokes the callback for all the tracked children of this node,
+// both the implicit ones from inside the node as well as the explicit ones
+//from outside the node.
+func (n *cachedNode) forChilds(onChild func(hash common.Hash)) {
for child := range n.children {
- children = append(children, child)
+ onChild(child)
}
if _, ok := n.node.(rawNode); !ok {
- gatherChildren(n.node, &children)
+ forGatherChildren(n.node, onChild)
}
- return children
}
-// gatherChildren traverses the node hierarchy of a collapsed storage node and
-// retrieves all the hashnode children.
-func gatherChildren(n node, children *[]common.Hash) {
+// forGatherChildren traverses the node hierarchy of a collapsed storage node and
+// invokes the callback for all the hashnode children.
+func forGatherChildren(n node, onChild func(hash common.Hash)) {
switch n := n.(type) {
case *rawShortNode:
- gatherChildren(n.Val, children)
-
+ forGatherChildren(n.Val, onChild)
case rawFullNode:
for i := 0; i < 16; i++ {
- gatherChildren(n[i], children)
+ forGatherChildren(n[i], onChild)
}
case hashNode:
- *children = append(*children, common.BytesToHash(n))
-
+ onChild(common.BytesToHash(n))
case valueNode, nil:
-
default:
panic(fmt.Sprintf("unknown node type: %T", n))
}
@@ -275,19 +275,6 @@ func expandNode(hash hashNode, n node) node {
}
}
-// trienodeHasher is a struct to be used with BigCache, which uses a Hasher to
-// determine which shard to place an entry into. It's not a cryptographic hash,
-// just to provide a bit of anti-collision (default is FNV64a).
-//
-// Since trie keys are already hashes, we can just use the key directly to
-// map shard id.
-type trienodeHasher struct{}
-
-// Sum64 implements the bigcache.Hasher interface.
-func (t trienodeHasher) Sum64(key string) uint64 {
- return binary.BigEndian.Uint64([]byte(key))
-}
-
// NewDatabase creates a new trie database to store ephemeral trie content before
// its written out to disk or garbage collected. No read cache is created, so all
// data retrievals will hit the underlying disk database.
@@ -326,29 +313,31 @@ func (db *Database) InsertBlob(hash common.Hash, blob []byte) {
db.lock.Lock()
defer db.lock.Unlock()
- db.insert(hash, blob, rawNode(blob))
+ db.insert(hash, len(blob), rawNode(blob))
}
// insert inserts a collapsed trie node into the memory database. This method is
// a more generic version of InsertBlob, supporting both raw blob insertions as
-// well ex trie node insertions. The blob must always be specified to allow proper
+// well ex trie node insertions. The blob size must be specified to allow proper
// size tracking.
-func (db *Database) insert(hash common.Hash, blob []byte, node node) {
+func (db *Database) insert(hash common.Hash, size int, node node) {
// If the node's already cached, skip
if _, ok := db.dirties[hash]; ok {
return
}
+ memcacheDirtyWriteMeter.Mark(int64(size))
+
// Create the cached entry for this node
entry := &cachedNode{
node: simplifyNode(node),
- size: uint16(len(blob)),
+ size: uint16(size),
flushPrev: db.newest,
}
- for _, child := range entry.childs() {
+ entry.forChilds(func(child common.Hash) {
if c := db.dirties[child]; c != nil {
c.parents++
}
- }
+ })
db.dirties[hash] = entry
// Update the flush-list endpoints
@@ -389,8 +378,12 @@ func (db *Database) node(hash common.Hash) node {
db.lock.RUnlock()
if dirty != nil {
+ memcacheDirtyHitMeter.Mark(1)
+ memcacheDirtyReadMeter.Mark(int64(dirty.size))
return dirty.obj(hash)
}
+ memcacheDirtyMissMeter.Mark(1)
+
// Content unavailable in memory, attempt to retrieve from disk
enc, err := db.diskdb.Get(hash[:])
if err != nil || enc == nil {
@@ -425,8 +418,12 @@ func (db *Database) Node(hash common.Hash) ([]byte, error) {
db.lock.RUnlock()
if dirty != nil {
+ memcacheDirtyHitMeter.Mark(1)
+ memcacheDirtyReadMeter.Mark(int64(dirty.size))
return dirty.rlp(), nil
}
+ memcacheDirtyMissMeter.Mark(1)
+
// Content unavailable in memory, attempt to retrieve from disk
enc, err := db.diskdb.Get(hash[:])
if err == nil && enc != nil {
@@ -572,9 +569,9 @@ func (db *Database) dereference(child common.Hash, parent common.Hash) {
db.dirties[node.flushNext].flushPrev = node.flushPrev
}
// Dereference all children and delete the node
- for _, hash := range node.childs() {
+ node.forChilds(func(hash common.Hash) {
db.dereference(hash, child)
- }
+ })
delete(db.dirties, child)
db.dirtiesSize -= common.StorageSize(common.HashLength + int(node.size))
if node.children != nil {
@@ -768,10 +765,14 @@ func (db *Database) commit(hash common.Hash, batch ethdb.Batch, uncacher *cleane
if !ok {
return nil
}
- for _, child := range node.childs() {
- if err := db.commit(child, batch, uncacher); err != nil {
- return err
+ var err error
+ node.forChilds(func(child common.Hash) {
+ if err == nil {
+ err = db.commit(child, batch, uncacher)
}
+ })
+ if err != nil {
+ return err
}
if err := batch.Put(hash[:], node.rlp()); err != nil {
return err
@@ -829,6 +830,7 @@ func (c *cleaner) Put(key []byte, rlp []byte) error {
// Move the flushed node into the clean cache to prevent insta-reloads
if c.db.cleans != nil {
c.db.cleans.Set(hash[:], rlp)
+ memcacheCleanWriteMeter.Mark(int64(len(rlp)))
}
return nil
}
diff --git a/trie/hasher.go b/trie/hasher.go
index 54f6a9de2b..8e8eec9f61 100644
--- a/trie/hasher.go
+++ b/trie/hasher.go
@@ -1,4 +1,4 @@
-// Copyright 2016 The go-ethereum Authors
+// Copyright 2019 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
@@ -20,17 +20,10 @@ import (
"hash"
"sync"
- "github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/rlp"
"golang.org/x/crypto/sha3"
)
-type hasher struct {
- tmp sliceBuffer
- sha keccakState
- onleaf LeafCallback
-}
-
// keccakState wraps sha3.state. In addition to the usual hash methods, it also supports
// Read to get a variable amount of data from the hash state. Read is faster than Sum
// because it doesn't copy the internal state, but also modifies the internal state.
@@ -50,7 +43,15 @@ func (b *sliceBuffer) Reset() {
*b = (*b)[:0]
}
-// hashers live in a global db.
+// hasher is a type used for the trie Hash operation. A hasher has some
+// internal preallocated temp space
+type hasher struct {
+ sha keccakState
+ tmp sliceBuffer
+ parallel bool // Whether to use paralallel threads when hashing
+}
+
+// hasherPool holds pureHashers
var hasherPool = sync.Pool{
New: func() interface{} {
return &hasher{
@@ -60,9 +61,9 @@ var hasherPool = sync.Pool{
},
}
-func newHasher(onleaf LeafCallback) *hasher {
+func newHasher(parallel bool) *hasher {
h := hasherPool.Get().(*hasher)
- h.onleaf = onleaf
+ h.parallel = parallel
return h
}
@@ -72,144 +73,143 @@ func returnHasherToPool(h *hasher) {
// hash collapses a node down into a hash node, also returning a copy of the
// original node initialized with the computed hash to replace the original one.
-func (h *hasher) hash(n node, db *Database, force bool) (node, node, error) {
- // If we're not storing the node, just hashing, use available cached data
- if hash, dirty := n.cache(); hash != nil {
- if db == nil {
- return hash, n, nil
- }
- if !dirty {
- switch n.(type) {
- case *fullNode, *shortNode:
- return hash, hash, nil
- default:
- return hash, n, nil
- }
- }
+func (h *hasher) hash(n node, force bool) (hashed node, cached node) {
+ // We're not storing the node, just hashing, use available cached data
+ if hash, _ := n.cache(); hash != nil {
+ return hash, n
}
// Trie not processed yet or needs storage, walk the children
- collapsed, cached, err := h.hashChildren(n, db)
- if err != nil {
- return hashNode{}, n, err
- }
- hashed, err := h.store(collapsed, db, force)
- if err != nil {
- return hashNode{}, n, err
- }
- // Cache the hash of the node for later reuse and remove
- // the dirty flag in commit mode. It's fine to assign these values directly
- // without copying the node first because hashChildren copies it.
- cachedHash, _ := hashed.(hashNode)
- switch cn := cached.(type) {
+ switch n := n.(type) {
case *shortNode:
- cn.flags.hash = cachedHash
- if db != nil {
- cn.flags.dirty = false
+ collapsed, cached := h.hashShortNodeChildren(n)
+ hashed := h.shortnodeToHash(collapsed, force)
+ // We need to retain the possibly _not_ hashed node, in case it was too
+ // small to be hashed
+ if hn, ok := hashed.(hashNode); ok {
+ cached.flags.hash = hn
+ } else {
+ cached.flags.hash = nil
}
+ return hashed, cached
case *fullNode:
- cn.flags.hash = cachedHash
- if db != nil {
- cn.flags.dirty = false
+ collapsed, cached := h.hashFullNodeChildren(n)
+ hashed = h.fullnodeToHash(collapsed, force)
+ if hn, ok := hashed.(hashNode); ok {
+ cached.flags.hash = hn
+ } else {
+ cached.flags.hash = nil
}
+ return hashed, cached
+ default:
+ // Value and hash nodes don't have children so they're left as were
+ return n, n
}
- return hashed, cached, nil
}
-// hashChildren replaces the children of a node with their hashes if the encoded
-// size of the child is larger than a hash, returning the collapsed node as well
-// as a replacement for the original node with the child hashes cached in.
-func (h *hasher) hashChildren(original node, db *Database) (node, node, error) {
- var err error
-
- switch n := original.(type) {
- case *shortNode:
- // Hash the short node's child, caching the newly hashed subtree
- collapsed, cached := n.copy(), n.copy()
- collapsed.Key = hexToCompact(n.Key)
- cached.Key = common.CopyBytes(n.Key)
-
- if _, ok := n.Val.(valueNode); !ok {
- collapsed.Val, cached.Val, err = h.hash(n.Val, db, false)
- if err != nil {
- return original, original, err
- }
- }
- return collapsed, cached, nil
-
- case *fullNode:
- // Hash the full node's children, caching the newly hashed subtrees
- collapsed, cached := n.copy(), n.copy()
+// hashShortNodeChildren collapses the short node. The returned collapsed node
+// holds a live reference to the Key, and must not be modified.
+// The cached
+func (h *hasher) hashShortNodeChildren(n *shortNode) (collapsed, cached *shortNode) {
+ // Hash the short node's child, caching the newly hashed subtree
+ collapsed, cached = n.copy(), n.copy()
+ // Previously, we did copy this one. We don't seem to need to actually
+ // do that, since we don't overwrite/reuse keys
+ //cached.Key = common.CopyBytes(n.Key)
+ collapsed.Key = hexToCompact(n.Key)
+ // Unless the child is a valuenode or hashnode, hash it
+ switch n.Val.(type) {
+ case *fullNode, *shortNode:
+ collapsed.Val, cached.Val = h.hash(n.Val, false)
+ }
+ return collapsed, cached
+}
+func (h *hasher) hashFullNodeChildren(n *fullNode) (collapsed *fullNode, cached *fullNode) {
+ // Hash the full node's children, caching the newly hashed subtrees
+ cached = n.copy()
+ collapsed = n.copy()
+ if h.parallel {
+ var wg sync.WaitGroup
+ wg.Add(16)
for i := 0; i < 16; i++ {
- if n.Children[i] != nil {
- collapsed.Children[i], cached.Children[i], err = h.hash(n.Children[i], db, false)
- if err != nil {
- return original, original, err
+ go func(i int) {
+ hasher := newHasher(false)
+ if child := n.Children[i]; child != nil {
+ collapsed.Children[i], cached.Children[i] = hasher.hash(child, false)
+ } else {
+ collapsed.Children[i] = nilValueNode
}
+ returnHasherToPool(hasher)
+ wg.Done()
+ }(i)
+ }
+ wg.Wait()
+ } else {
+ for i := 0; i < 16; i++ {
+ if child := n.Children[i]; child != nil {
+ collapsed.Children[i], cached.Children[i] = h.hash(child, false)
+ } else {
+ collapsed.Children[i] = nilValueNode
}
}
- cached.Children[16] = n.Children[16]
- return collapsed, cached, nil
-
- default:
- // Value and hash nodes don't have children so they're left as were
- return n, original, nil
}
+ return collapsed, cached
}
-// store hashes the node n and if we have a storage layer specified, it writes
-// the key/value pair to it and tracks any node->child references as well as any
-// node->external trie references.
-func (h *hasher) store(n node, db *Database, force bool) (node, error) {
- // Don't store hashes or empty nodes.
- if _, isHash := n.(hashNode); n == nil || isHash {
- return n, nil
- }
- // Generate the RLP encoding of the node
+// shortnodeToHash creates a hashNode from a shortNode. The supplied shortnode
+// should have hex-type Key, which will be converted (without modification)
+// into compact form for RLP encoding.
+// If the rlp data is smaller than 32 bytes, `nil` is returned.
+func (h *hasher) shortnodeToHash(n *shortNode, force bool) node {
h.tmp.Reset()
if err := rlp.Encode(&h.tmp, n); err != nil {
panic("encode error: " + err.Error())
}
+
if len(h.tmp) < 32 && !force {
- return n, nil // Nodes smaller than 32 bytes are stored inside their parent
- }
- // Larger nodes are replaced by their hash and stored in the database.
- hash, _ := n.cache()
- if hash == nil {
- hash = h.makeHashNode(h.tmp)
+ return n // Nodes smaller than 32 bytes are stored inside their parent
}
+ return h.hashData(h.tmp)
+}
- if db != nil {
- // We are pooling the trie nodes into an intermediate memory cache
- hash := common.BytesToHash(hash)
-
- db.lock.Lock()
- db.insert(hash, h.tmp, n)
- db.lock.Unlock()
+// shortnodeToHash is used to creates a hashNode from a set of hashNodes, (which
+// may contain nil values)
+func (h *hasher) fullnodeToHash(n *fullNode, force bool) node {
+ h.tmp.Reset()
+ // Generate the RLP encoding of the node
+ if err := n.EncodeRLP(&h.tmp); err != nil {
+ panic("encode error: " + err.Error())
+ }
- // Track external references from account->storage trie
- if h.onleaf != nil {
- switch n := n.(type) {
- case *shortNode:
- if child, ok := n.Val.(valueNode); ok {
- h.onleaf(child, hash)
- }
- case *fullNode:
- for i := 0; i < 16; i++ {
- if child, ok := n.Children[i].(valueNode); ok {
- h.onleaf(child, hash)
- }
- }
- }
- }
+ if len(h.tmp) < 32 && !force {
+ return n // Nodes smaller than 32 bytes are stored inside their parent
}
- return hash, nil
+ return h.hashData(h.tmp)
}
-func (h *hasher) makeHashNode(data []byte) hashNode {
- n := make(hashNode, h.sha.Size())
+// hashData hashes the provided data
+func (h *hasher) hashData(data []byte) hashNode {
+ n := make(hashNode, 32)
h.sha.Reset()
h.sha.Write(data)
h.sha.Read(n)
return n
}
+
+// proofHash is used to construct trie proofs, and returns the 'collapsed'
+// node (for later RLP encoding) aswell as the hashed node -- unless the
+// node is smaller than 32 bytes, in which case it will be returned as is.
+// This method does not do anything on value- or hash-nodes.
+func (h *hasher) proofHash(original node) (collapsed, hashed node) {
+ switch n := original.(type) {
+ case *shortNode:
+ sn, _ := h.hashShortNodeChildren(n)
+ return sn, h.shortnodeToHash(sn, false)
+ case *fullNode:
+ fn, _ := h.hashFullNodeChildren(n)
+ return fn, h.fullnodeToHash(fn, false)
+ default:
+ // Value and hash nodes don't have children so they're left as were
+ return n, n
+ }
+}
diff --git a/trie/iterator.go b/trie/iterator.go
index 8e84dee3b6..bb4025d8f3 100644
--- a/trie/iterator.go
+++ b/trie/iterator.go
@@ -182,15 +182,13 @@ func (it *nodeIterator) LeafBlob() []byte {
func (it *nodeIterator) LeafProof() [][]byte {
if len(it.stack) > 0 {
if _, ok := it.stack[len(it.stack)-1].node.(valueNode); ok {
- hasher := newHasher(nil)
+ hasher := newHasher(false)
defer returnHasherToPool(hasher)
-
proofs := make([][]byte, 0, len(it.stack))
for i, item := range it.stack[:len(it.stack)-1] {
// Gather nodes that end up as hash nodes (or the root)
- node, _, _ := hasher.hashChildren(item.node, nil)
- hashed, _ := hasher.store(node, nil, false)
+ node, hashed := hasher.proofHash(item.node)
if _, ok := hashed.(hashNode); ok || i == 0 {
enc, _ := rlp.EncodeToBytes(node)
proofs = append(proofs, enc)
diff --git a/trie/node_test.go b/trie/node_test.go
new file mode 100644
index 0000000000..52720f1c77
--- /dev/null
+++ b/trie/node_test.go
@@ -0,0 +1,94 @@
+// Copyright 2019 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package trie
+
+import (
+ "bytes"
+ "testing"
+
+ "github.com/ethereum/go-ethereum/rlp"
+)
+
+func newTestFullNode(v []byte) []interface{} {
+ fullNodeData := []interface{}{}
+ for i := 0; i < 16; i++ {
+ k := bytes.Repeat([]byte{byte(i + 1)}, 32)
+ fullNodeData = append(fullNodeData, k)
+ }
+ fullNodeData = append(fullNodeData, v)
+ return fullNodeData
+}
+
+func TestDecodeNestedNode(t *testing.T) {
+ fullNodeData := newTestFullNode([]byte("fullnode"))
+
+ data := [][]byte{}
+ for i := 0; i < 16; i++ {
+ data = append(data, nil)
+ }
+ data = append(data, []byte("subnode"))
+ fullNodeData[15] = data
+
+ buf := bytes.NewBuffer([]byte{})
+ rlp.Encode(buf, fullNodeData)
+
+ if _, err := decodeNode([]byte("testdecode"), buf.Bytes()); err != nil {
+ t.Fatalf("decode nested full node err: %v", err)
+ }
+}
+
+func TestDecodeFullNodeWrongSizeChild(t *testing.T) {
+ fullNodeData := newTestFullNode([]byte("wrongsizechild"))
+ fullNodeData[0] = []byte("00")
+ buf := bytes.NewBuffer([]byte{})
+ rlp.Encode(buf, fullNodeData)
+
+ _, err := decodeNode([]byte("testdecode"), buf.Bytes())
+ if _, ok := err.(*decodeError); !ok {
+ t.Fatalf("decodeNode returned wrong err: %v", err)
+ }
+}
+
+func TestDecodeFullNodeWrongNestedFullNode(t *testing.T) {
+ fullNodeData := newTestFullNode([]byte("fullnode"))
+
+ data := [][]byte{}
+ for i := 0; i < 16; i++ {
+ data = append(data, []byte("123456"))
+ }
+ data = append(data, []byte("subnode"))
+ fullNodeData[15] = data
+
+ buf := bytes.NewBuffer([]byte{})
+ rlp.Encode(buf, fullNodeData)
+
+ _, err := decodeNode([]byte("testdecode"), buf.Bytes())
+ if _, ok := err.(*decodeError); !ok {
+ t.Fatalf("decodeNode returned wrong err: %v", err)
+ }
+}
+
+func TestDecodeFullNode(t *testing.T) {
+ fullNodeData := newTestFullNode([]byte("decodefullnode"))
+ buf := bytes.NewBuffer([]byte{})
+ rlp.Encode(buf, fullNodeData)
+
+ _, err := decodeNode([]byte("testdecode"), buf.Bytes())
+ if err != nil {
+ t.Fatalf("decode full node err: %v", err)
+ }
+}
diff --git a/trie/proof.go b/trie/proof.go
index 9985e730dd..58ca69c680 100644
--- a/trie/proof.go
+++ b/trie/proof.go
@@ -64,26 +64,24 @@ func (t *Trie) Prove(key []byte, fromLevel uint, proofDb ethdb.KeyValueWriter) e
panic(fmt.Sprintf("%T: invalid node: %v", tn, tn))
}
}
- hasher := newHasher(nil)
+ hasher := newHasher(false)
defer returnHasherToPool(hasher)
for i, n := range nodes {
- // Don't bother checking for errors here since hasher panics
- // if encoding doesn't work and we're not writing to any database.
- n, _, _ = hasher.hashChildren(n, nil)
- hn, _ := hasher.store(n, nil, false)
+ if fromLevel > 0 {
+ fromLevel--
+ continue
+ }
+ var hn node
+ n, hn = hasher.proofHash(n)
if hash, ok := hn.(hashNode); ok || i == 0 {
// If the node's database encoding is a hash (or is the
// root node), it becomes a proof element.
- if fromLevel > 0 {
- fromLevel--
- } else {
- enc, _ := rlp.EncodeToBytes(n)
- if !ok {
- hash = hasher.makeHashNode(enc)
- }
- proofDb.Put(hash, enc)
+ enc, _ := rlp.EncodeToBytes(n)
+ if !ok {
+ hash = hasher.hashData(enc)
}
+ proofDb.Put(hash, enc)
}
}
return nil
diff --git a/trie/secure_trie.go b/trie/secure_trie.go
index fbc591ed10..955771495b 100644
--- a/trie/secure_trie.go
+++ b/trie/secure_trie.go
@@ -176,7 +176,7 @@ func (t *SecureTrie) NodeIterator(start []byte) NodeIterator {
// The caller must not hold onto the return value because it will become
// invalid on the next call to hashKey or secKey.
func (t *SecureTrie) hashKey(key []byte) []byte {
- h := newHasher(nil)
+ h := newHasher(false)
h.sha.Reset()
h.sha.Write(key)
buf := h.sha.Sum(t.hashKeyBuf[:0])
diff --git a/trie/trie.go b/trie/trie.go
index 920e331fd6..78e2eff534 100644
--- a/trie/trie.go
+++ b/trie/trie.go
@@ -20,6 +20,7 @@ package trie
import (
"bytes"
"fmt"
+ "sync"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/crypto"
@@ -47,6 +48,10 @@ type LeafCallback func(leaf []byte, parent common.Hash) error
type Trie struct {
db *Database
root node
+ // Keep track of the number leafs which have been inserted since the last
+ // hashing operation. This number will not directly map to the number of
+ // actually unhashed nodes
+ unhashed int
}
// newFlag returns the cache flag value for a newly created node.
@@ -162,6 +167,7 @@ func (t *Trie) Update(key, value []byte) {
//
// If a node was not found in the database, a MissingNodeError is returned.
func (t *Trie) TryUpdate(key, value []byte) error {
+ t.unhashed++
k := keybytesToHex(key)
if len(value) != 0 {
_, n, err := t.insert(t.root, nil, k, valueNode(value))
@@ -258,6 +264,7 @@ func (t *Trie) Delete(key []byte) {
// TryDelete removes any existing value for key from the trie.
// If a node was not found in the database, a MissingNodeError is returned.
func (t *Trie) TryDelete(key []byte) error {
+ t.unhashed++
k := keybytesToHex(key)
_, n, err := t.delete(t.root, nil, k)
if err != nil {
@@ -404,7 +411,7 @@ func (t *Trie) resolveHash(n hashNode, prefix []byte) (node, error) {
// Hash returns the root hash of the trie. It does not write to the
// database and can be used even if the trie doesn't have one.
func (t *Trie) Hash() common.Hash {
- hash, cached, _ := t.hashRoot(nil, nil)
+ hash, cached, _ := t.hashRoot(nil)
t.root = cached
return common.BytesToHash(hash.(hashNode))
}
@@ -415,19 +422,54 @@ func (t *Trie) Commit(onleaf LeafCallback) (root common.Hash, err error) {
if t.db == nil {
panic("commit called on trie with nil database")
}
- hash, cached, err := t.hashRoot(t.db, onleaf)
+ if t.root == nil {
+ return emptyRoot, nil
+ }
+ rootHash := t.Hash()
+ h := newCommitter()
+ defer returnCommitterToPool(h)
+ // Do a quick check if we really need to commit, before we spin
+ // up goroutines. This can happen e.g. if we load a trie for reading storage
+ // values, but don't write to it.
+ if !h.commitNeeded(t.root) {
+ return rootHash, nil
+ }
+ var wg sync.WaitGroup
+ if onleaf != nil {
+ h.onleaf = onleaf
+ h.leafCh = make(chan *leaf, leafChanSize)
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ h.commitLoop(t.db)
+ }()
+ }
+ var newRoot hashNode
+ newRoot, err = h.Commit(t.root, t.db)
+ if onleaf != nil {
+ // The leafch is created in newCommitter if there was an onleaf callback
+ // provided. The commitLoop only _reads_ from it, and the commit
+ // operation was the sole writer. Therefore, it's safe to close this
+ // channel here.
+ close(h.leafCh)
+ wg.Wait()
+ }
if err != nil {
return common.Hash{}, err
}
- t.root = cached
- return common.BytesToHash(hash.(hashNode)), nil
+ t.root = newRoot
+ return rootHash, nil
}
-func (t *Trie) hashRoot(db *Database, onleaf LeafCallback) (node, node, error) {
+// hashRoot calculates the root hash of the given trie
+func (t *Trie) hashRoot(db *Database) (node, node, error) {
if t.root == nil {
return hashNode(emptyRoot.Bytes()), nil, nil
}
- h := newHasher(onleaf)
+ // If the number of changes is below 100, we let one thread handle it
+ h := newHasher(t.unhashed >= 100)
defer returnHasherToPool(h)
- return h.hash(t.root, db, true)
+ hashed, cached := h.hash(t.root, true)
+ t.unhashed = 0
+ return hashed, cached, nil
}
diff --git a/trie/trie_test.go b/trie/trie_test.go
index e53ac568e9..172572dddc 100644
--- a/trie/trie_test.go
+++ b/trie/trie_test.go
@@ -161,7 +161,7 @@ func TestInsert(t *testing.T) {
exp := common.HexToHash("8aad789dff2f538bca5d8ea56e8abe10f4c7ba3a5dea95fea4cd6e7c3a1168d3")
root := trie.Hash()
if root != exp {
- t.Errorf("exp %x got %x", exp, root)
+ t.Errorf("case 1: exp %x got %x", exp, root)
}
trie = newEmpty()
@@ -173,7 +173,7 @@ func TestInsert(t *testing.T) {
t.Fatalf("commit error: %v", err)
}
if root != exp {
- t.Errorf("exp %x got %x", exp, root)
+ t.Errorf("case 2: exp %x got %x", exp, root)
}
}
@@ -316,6 +316,40 @@ func TestLargeValue(t *testing.T) {
trie.Hash()
}
+// TestRandomCases tests som cases that were found via random fuzzing
+func TestRandomCases(t *testing.T) {
+ var rt []randTestStep = []randTestStep{
+ {op: 6, key: common.Hex2Bytes(""), value: common.Hex2Bytes("")}, // step 0
+ {op: 6, key: common.Hex2Bytes(""), value: common.Hex2Bytes("")}, // step 1
+ {op: 0, key: common.Hex2Bytes("d51b182b95d677e5f1c82508c0228de96b73092d78ce78b2230cd948674f66fd1483bd"), value: common.Hex2Bytes("0000000000000002")}, // step 2
+ {op: 2, key: common.Hex2Bytes("c2a38512b83107d665c65235b0250002882ac2022eb00711552354832c5f1d030d0e408e"), value: common.Hex2Bytes("")}, // step 3
+ {op: 3, key: common.Hex2Bytes(""), value: common.Hex2Bytes("")}, // step 4
+ {op: 3, key: common.Hex2Bytes(""), value: common.Hex2Bytes("")}, // step 5
+ {op: 6, key: common.Hex2Bytes(""), value: common.Hex2Bytes("")}, // step 6
+ {op: 3, key: common.Hex2Bytes(""), value: common.Hex2Bytes("")}, // step 7
+ {op: 0, key: common.Hex2Bytes("c2a38512b83107d665c65235b0250002882ac2022eb00711552354832c5f1d030d0e408e"), value: common.Hex2Bytes("0000000000000008")}, // step 8
+ {op: 0, key: common.Hex2Bytes("d51b182b95d677e5f1c82508c0228de96b73092d78ce78b2230cd948674f66fd1483bd"), value: common.Hex2Bytes("0000000000000009")}, // step 9
+ {op: 2, key: common.Hex2Bytes("fd"), value: common.Hex2Bytes("")}, // step 10
+ {op: 6, key: common.Hex2Bytes(""), value: common.Hex2Bytes("")}, // step 11
+ {op: 6, key: common.Hex2Bytes(""), value: common.Hex2Bytes("")}, // step 12
+ {op: 0, key: common.Hex2Bytes("fd"), value: common.Hex2Bytes("000000000000000d")}, // step 13
+ {op: 6, key: common.Hex2Bytes(""), value: common.Hex2Bytes("")}, // step 14
+ {op: 1, key: common.Hex2Bytes("c2a38512b83107d665c65235b0250002882ac2022eb00711552354832c5f1d030d0e408e"), value: common.Hex2Bytes("")}, // step 15
+ {op: 3, key: common.Hex2Bytes(""), value: common.Hex2Bytes("")}, // step 16
+ {op: 0, key: common.Hex2Bytes("c2a38512b83107d665c65235b0250002882ac2022eb00711552354832c5f1d030d0e408e"), value: common.Hex2Bytes("0000000000000011")}, // step 17
+ {op: 5, key: common.Hex2Bytes(""), value: common.Hex2Bytes("")}, // step 18
+ {op: 3, key: common.Hex2Bytes(""), value: common.Hex2Bytes("")}, // step 19
+ {op: 0, key: common.Hex2Bytes("d51b182b95d677e5f1c82508c0228de96b73092d78ce78b2230cd948674f66fd1483bd"), value: common.Hex2Bytes("0000000000000014")}, // step 20
+ {op: 0, key: common.Hex2Bytes("d51b182b95d677e5f1c82508c0228de96b73092d78ce78b2230cd948674f66fd1483bd"), value: common.Hex2Bytes("0000000000000015")}, // step 21
+ {op: 0, key: common.Hex2Bytes("c2a38512b83107d665c65235b0250002882ac2022eb00711552354832c5f1d030d0e408e"), value: common.Hex2Bytes("0000000000000016")}, // step 22
+ {op: 5, key: common.Hex2Bytes(""), value: common.Hex2Bytes("")}, // step 23
+ {op: 1, key: common.Hex2Bytes("980c393656413a15c8da01978ed9f89feb80b502f58f2d640e3a2f5f7a99a7018f1b573befd92053ac6f78fca4a87268"), value: common.Hex2Bytes("")}, // step 24
+ {op: 1, key: common.Hex2Bytes("fd"), value: common.Hex2Bytes("")}, // step 25
+ }
+ runRandTest(rt)
+
+}
+
// randTest performs random trie operations.
// Instances of this test are created by Generate.
type randTest []randTestStep
@@ -375,6 +409,8 @@ func runRandTest(rt randTest) bool {
values := make(map[string]string) // tracks content of the trie
for i, step := range rt {
+ fmt.Printf("{op: %d, key: common.Hex2Bytes(\"%x\"), value: common.Hex2Bytes(\"%x\")}, // step %d\n",
+ step.op, step.key, step.value, i)
switch step.op {
case opUpdate:
tr.Update(step.key, step.value)
@@ -470,6 +506,7 @@ func benchGet(b *testing.B, commit bool) {
func benchUpdate(b *testing.B, e binary.ByteOrder) *Trie {
trie := newEmpty()
k := make([]byte, 32)
+ b.ReportAllocs()
for i := 0; i < b.N; i++ {
e.PutUint64(k, uint64(i))
trie.Update(k, k)
@@ -481,18 +518,135 @@ func benchUpdate(b *testing.B, e binary.ByteOrder) *Trie {
// we cannot use b.N as the number of hashing rouns, since all rounds apart from
// the first one will be NOOP. As such, we'll use b.N as the number of account to
// insert into the trie before measuring the hashing.
+// BenchmarkHash-6 288680 4561 ns/op 682 B/op 9 allocs/op
+// BenchmarkHash-6 275095 4800 ns/op 685 B/op 9 allocs/op
+// pure hasher:
+// BenchmarkHash-6 319362 4230 ns/op 675 B/op 9 allocs/op
+// BenchmarkHash-6 257460 4674 ns/op 689 B/op 9 allocs/op
+// With hashing in-between and pure hasher:
+// BenchmarkHash-6 225417 7150 ns/op 982 B/op 12 allocs/op
+// BenchmarkHash-6 220378 6197 ns/op 983 B/op 12 allocs/op
+// same with old hasher
+// BenchmarkHash-6 229758 6437 ns/op 981 B/op 12 allocs/op
+// BenchmarkHash-6 212610 7137 ns/op 986 B/op 12 allocs/op
func BenchmarkHash(b *testing.B) {
+ // Create a realistic account trie to hash. We're first adding and hashing N
+ // entries, then adding N more.
+ addresses, accounts := makeAccounts(2 * b.N)
+ // Insert the accounts into the trie and hash it
+ trie := newEmpty()
+ i := 0
+ for ; i < len(addresses)/2; i++ {
+ trie.Update(crypto.Keccak256(addresses[i][:]), accounts[i])
+ }
+ trie.Hash()
+ for ; i < len(addresses); i++ {
+ trie.Update(crypto.Keccak256(addresses[i][:]), accounts[i])
+ }
+ b.ResetTimer()
+ b.ReportAllocs()
+ //trie.hashRoot(nil, nil)
+ trie.Hash()
+}
+
+type account struct {
+ Nonce uint64
+ Balance *big.Int
+ Root common.Hash
+ Code []byte
+}
+
+// Benchmarks the trie Commit following a Hash. Since the trie caches the result of any operation,
+// we cannot use b.N as the number of hashing rouns, since all rounds apart from
+// the first one will be NOOP. As such, we'll use b.N as the number of account to
+// insert into the trie before measuring the hashing.
+func BenchmarkCommitAfterHash(b *testing.B) {
+ b.Run("no-onleaf", func(b *testing.B) {
+ benchmarkCommitAfterHash(b, nil)
+ })
+ var a account
+ onleaf := func(leaf []byte, parent common.Hash) error {
+ rlp.DecodeBytes(leaf, &a)
+ return nil
+ }
+ b.Run("with-onleaf", func(b *testing.B) {
+ benchmarkCommitAfterHash(b, onleaf)
+ })
+}
+
+func benchmarkCommitAfterHash(b *testing.B, onleaf LeafCallback) {
// Make the random benchmark deterministic
- random := rand.New(rand.NewSource(0))
+ addresses, accounts := makeAccounts(b.N)
+ trie := newEmpty()
+ for i := 0; i < len(addresses); i++ {
+ trie.Update(crypto.Keccak256(addresses[i][:]), accounts[i])
+ }
+ // Insert the accounts into the trie and hash it
+ trie.Hash()
+ b.ResetTimer()
+ b.ReportAllocs()
+ trie.Commit(onleaf)
+}
+
+func TestTinyTrie(t *testing.T) {
+ // Create a realistic account trie to hash
+ _, accounts := makeAccounts(10000)
+ trie := newEmpty()
+ trie.Update(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000001337"), accounts[3])
+ if exp, root := common.HexToHash("4fa6efd292cffa2db0083b8bedd23add2798ae73802442f52486e95c3df7111c"), trie.Hash(); exp != root {
+ t.Fatalf("1: got %x, exp %x", root, exp)
+ }
+ trie.Update(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000001338"), accounts[4])
+ if exp, root := common.HexToHash("cb5fb1213826dad9e604f095f8ceb5258fe6b5c01805ce6ef019a50699d2d479"), trie.Hash(); exp != root {
+ t.Fatalf("2: got %x, exp %x", root, exp)
+ }
+ trie.Update(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000001339"), accounts[4])
+ if exp, root := common.HexToHash("ed7e06b4010057d8703e7b9a160a6d42cf4021f9020da3c8891030349a646987"), trie.Hash(); exp != root {
+ t.Fatalf("3: got %x, exp %x", root, exp)
+ }
+
+ checktr, _ := New(common.Hash{}, trie.db)
+ it := NewIterator(trie.NodeIterator(nil))
+ for it.Next() {
+ checktr.Update(it.Key, it.Value)
+ }
+ if troot, itroot := trie.Hash(), checktr.Hash(); troot != itroot {
+ t.Fatalf("hash mismatch in opItercheckhash, trie: %x, check: %x", troot, itroot)
+ }
+}
+func TestCommitAfterHash(t *testing.T) {
// Create a realistic account trie to hash
- addresses := make([][20]byte, b.N)
+ addresses, accounts := makeAccounts(1000)
+ trie := newEmpty()
+ for i := 0; i < len(addresses); i++ {
+ trie.Update(crypto.Keccak256(addresses[i][:]), accounts[i])
+ }
+ // Insert the accounts into the trie and hash it
+ trie.Hash()
+ trie.Commit(nil)
+ root := trie.Hash()
+ exp := common.HexToHash("e5e9c29bb50446a4081e6d1d748d2892c6101c1e883a1f77cf21d4094b697822")
+ if exp != root {
+ t.Errorf("got %x, exp %x", root, exp)
+ }
+ root, _ = trie.Commit(nil)
+ if exp != root {
+ t.Errorf("got %x, exp %x", root, exp)
+ }
+}
+
+func makeAccounts(size int) (addresses [][20]byte, accounts [][]byte) {
+ // Make the random benchmark deterministic
+ random := rand.New(rand.NewSource(0))
+ // Create a realistic account trie to hash
+ addresses = make([][20]byte, size)
for i := 0; i < len(addresses); i++ {
for j := 0; j < len(addresses[i]); j++ {
addresses[i][j] = byte(random.Intn(256))
}
}
- accounts := make([][]byte, len(addresses))
+ accounts = make([][]byte, len(addresses))
for i := 0; i < len(accounts); i++ {
var (
nonce = uint64(random.Int63())
@@ -500,16 +654,168 @@ func BenchmarkHash(b *testing.B) {
root = emptyRoot
code = crypto.Keccak256(nil)
)
- accounts[i], _ = rlp.EncodeToBytes([]interface{}{nonce, balance, root, code})
+ accounts[i], _ = rlp.EncodeToBytes(&account{nonce, balance, root, code})
}
- // Insert the accounts into the trie and hash it
+ return addresses, accounts
+}
+
+// BenchmarkCommitAfterHashFixedSize benchmarks the Commit (after Hash) of a fixed number of updates to a trie.
+// This benchmark is meant to capture the difference on efficiency of small versus large changes. Typically,
+// storage tries are small (a couple of entries), whereas the full post-block account trie update is large (a couple
+// of thousand entries)
+func BenchmarkHashFixedSize(b *testing.B) {
+ b.Run("10", func(b *testing.B) {
+ b.StopTimer()
+ acc, add := makeAccounts(20)
+ for i := 0; i < b.N; i++ {
+ benchmarkHashFixedSize(b, acc, add)
+ }
+ })
+ b.Run("100", func(b *testing.B) {
+ b.StopTimer()
+ acc, add := makeAccounts(100)
+ for i := 0; i < b.N; i++ {
+ benchmarkHashFixedSize(b, acc, add)
+ }
+ })
+
+ b.Run("1K", func(b *testing.B) {
+ b.StopTimer()
+ acc, add := makeAccounts(1000)
+ for i := 0; i < b.N; i++ {
+ benchmarkHashFixedSize(b, acc, add)
+ }
+ })
+ b.Run("10K", func(b *testing.B) {
+ b.StopTimer()
+ acc, add := makeAccounts(10000)
+ for i := 0; i < b.N; i++ {
+ benchmarkHashFixedSize(b, acc, add)
+ }
+ })
+ b.Run("100K", func(b *testing.B) {
+ b.StopTimer()
+ acc, add := makeAccounts(100000)
+ for i := 0; i < b.N; i++ {
+ benchmarkHashFixedSize(b, acc, add)
+ }
+ })
+}
+
+func benchmarkHashFixedSize(b *testing.B, addresses [][20]byte, accounts [][]byte) {
+ b.ReportAllocs()
trie := newEmpty()
for i := 0; i < len(addresses); i++ {
trie.Update(crypto.Keccak256(addresses[i][:]), accounts[i])
}
- b.ResetTimer()
+ // Insert the accounts into the trie and hash it
+ b.StartTimer()
+ trie.Hash()
+ b.StopTimer()
+}
+
+func BenchmarkCommitAfterHashFixedSize(b *testing.B) {
+ b.Run("10", func(b *testing.B) {
+ b.StopTimer()
+ acc, add := makeAccounts(20)
+ for i := 0; i < b.N; i++ {
+ benchmarkCommitAfterHashFixedSize(b, acc, add)
+ }
+ })
+ b.Run("100", func(b *testing.B) {
+ b.StopTimer()
+ acc, add := makeAccounts(100)
+ for i := 0; i < b.N; i++ {
+ benchmarkCommitAfterHashFixedSize(b, acc, add)
+ }
+ })
+
+ b.Run("1K", func(b *testing.B) {
+ b.StopTimer()
+ acc, add := makeAccounts(1000)
+ for i := 0; i < b.N; i++ {
+ benchmarkCommitAfterHashFixedSize(b, acc, add)
+ }
+ })
+ b.Run("10K", func(b *testing.B) {
+ b.StopTimer()
+ acc, add := makeAccounts(10000)
+ for i := 0; i < b.N; i++ {
+ benchmarkCommitAfterHashFixedSize(b, acc, add)
+ }
+ })
+ b.Run("100K", func(b *testing.B) {
+ b.StopTimer()
+ acc, add := makeAccounts(100000)
+ for i := 0; i < b.N; i++ {
+ benchmarkCommitAfterHashFixedSize(b, acc, add)
+ }
+ })
+}
+
+func benchmarkCommitAfterHashFixedSize(b *testing.B, addresses [][20]byte, accounts [][]byte) {
b.ReportAllocs()
+ trie := newEmpty()
+ for i := 0; i < len(addresses); i++ {
+ trie.Update(crypto.Keccak256(addresses[i][:]), accounts[i])
+ }
+ // Insert the accounts into the trie and hash it
trie.Hash()
+ b.StartTimer()
+ trie.Commit(nil)
+ b.StopTimer()
+}
+
+func BenchmarkDerefRootFixedSize(b *testing.B) {
+ b.Run("10", func(b *testing.B) {
+ b.StopTimer()
+ acc, add := makeAccounts(20)
+ for i := 0; i < b.N; i++ {
+ benchmarkDerefRootFixedSize(b, acc, add)
+ }
+ })
+ b.Run("100", func(b *testing.B) {
+ b.StopTimer()
+ acc, add := makeAccounts(100)
+ for i := 0; i < b.N; i++ {
+ benchmarkDerefRootFixedSize(b, acc, add)
+ }
+ })
+
+ b.Run("1K", func(b *testing.B) {
+ b.StopTimer()
+ acc, add := makeAccounts(1000)
+ for i := 0; i < b.N; i++ {
+ benchmarkDerefRootFixedSize(b, acc, add)
+ }
+ })
+ b.Run("10K", func(b *testing.B) {
+ b.StopTimer()
+ acc, add := makeAccounts(10000)
+ for i := 0; i < b.N; i++ {
+ benchmarkDerefRootFixedSize(b, acc, add)
+ }
+ })
+ b.Run("100K", func(b *testing.B) {
+ b.StopTimer()
+ acc, add := makeAccounts(100000)
+ for i := 0; i < b.N; i++ {
+ benchmarkDerefRootFixedSize(b, acc, add)
+ }
+ })
+}
+
+func benchmarkDerefRootFixedSize(b *testing.B, addresses [][20]byte, accounts [][]byte) {
+ b.ReportAllocs()
+ trie := newEmpty()
+ for i := 0; i < len(addresses); i++ {
+ trie.Update(crypto.Keccak256(addresses[i][:]), accounts[i])
+ }
+ h := trie.Hash()
+ trie.Commit(nil)
+ b.StartTimer()
+ trie.db.Dereference(h)
+ b.StopTimer()
}
func tempDB() (string, *Database) {
diff --git a/whisper/whisperv6/peer.go b/whisper/whisperv6/peer.go
index 4451f14958..29d8bdf17e 100644
--- a/whisper/whisperv6/peer.go
+++ b/whisper/whisperv6/peer.go
@@ -146,7 +146,9 @@ func (peer *Peer) handshake() error {
func (peer *Peer) update() {
// Start the tickers for the updates
expire := time.NewTicker(expirationCycle)
+ defer expire.Stop()
transmit := time.NewTicker(transmissionCycle)
+ defer transmit.Stop()
// Loop and transmit until termination is requested
for {