Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add eth_getProof support for historical blocks #7115

Merged
merged 6 commits into from
Mar 23, 2023
Merged
Show file tree
Hide file tree
Changes from 5 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
125 changes: 92 additions & 33 deletions cmd/rpcdaemon/commands/eth_call.go
Original file line number Diff line number Diff line change
Expand Up @@ -5,24 +5,29 @@ import (
"errors"
"fmt"
"math/big"
"os"

"github.com/holiman/uint256"
libcommon "github.com/ledgerwatch/erigon-lib/common"
"github.com/ledgerwatch/erigon-lib/common/datadir"
"github.com/ledgerwatch/erigon-lib/gointerfaces"
txpool_proto "github.com/ledgerwatch/erigon-lib/gointerfaces/txpool"
"github.com/ledgerwatch/erigon-lib/kv"
"github.com/ledgerwatch/erigon-lib/kv/memdb"
types2 "github.com/ledgerwatch/erigon-lib/types"
"github.com/ledgerwatch/log/v3"
"google.golang.org/grpc"

"github.com/ledgerwatch/erigon/common"
"github.com/ledgerwatch/erigon/common/hexutil"
"github.com/ledgerwatch/erigon/core"
"github.com/ledgerwatch/erigon/core/rawdb"
"github.com/ledgerwatch/erigon/core/state"
"github.com/ledgerwatch/erigon/core/types"
"github.com/ledgerwatch/erigon/core/types/accounts"
"github.com/ledgerwatch/erigon/core/vm"
"github.com/ledgerwatch/erigon/crypto"
"github.com/ledgerwatch/erigon/eth/stagedsync"
"github.com/ledgerwatch/erigon/eth/tracers/logger"
"github.com/ledgerwatch/erigon/params"
"github.com/ledgerwatch/erigon/rpc"
Expand Down Expand Up @@ -303,7 +308,19 @@ func (api *APIImpl) EstimateGas(ctx context.Context, argsOrNil *ethapi2.CallArgs
return hexutil.Uint64(hi), nil
}

// GetProof is partially implemented; no Storage proofs; only for the latest block
// maxGetProofRewindBlockCount limits the number of blocks into the past that
// GetProof will allow computing proofs. Because we must rewind the hash state
// and re-compute the state trie, the further back in time the request, the more
// computationally intensive the operation becomes. The staged sync code
// assumes that if more than 100_000 blocks are skipped, that the entire trie
// should be re-computed. Re-computing the entire trie will currently take ~15
// minutes on mainnet. The current limit has been chosen arbitrarily as
// 'useful' without likely being overly computationally intense. This parameter
// could possibly be made configurable in the future if needed.
var maxGetProofRewindBlockCount uint64 = 1_000

// GetProof is partially implemented; no Storage proofs, and proofs must be for
// blocks within maxGetProofRewindBlockCount blocks of the head.
func (api *APIImpl) GetProof(ctx context.Context, address libcommon.Address, storageKeys []libcommon.Hash, blockNrOrHash rpc.BlockNumberOrHash) (*accounts.AccProofResult, error) {

tx, err := api.db.BeginRo(ctx)
Expand All @@ -312,59 +329,101 @@ func (api *APIImpl) GetProof(ctx context.Context, address libcommon.Address, sto
}
defer tx.Rollback()

blockNr, _, _, err := rpchelper.GetBlockNumber(blockNrOrHash, tx, api.filters)
blockNr, blockHash, _, err := rpchelper.GetBlockNumber(blockNrOrHash, tx, api.filters)
if err != nil {
return nil, err
}

header := rawdb.ReadHeader(tx, blockHash, blockNr)
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

use api._blockReader.HeaderByNumber

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Done

if header == nil {
return nil, err
}

latestBlock, err := rpchelper.GetLatestBlockNumber(tx)
if err != nil {
return nil, err
} else if blockNr != latestBlock {
return nil, fmt.Errorf(NotImplemented, "eth_getProof for block != latest")
} else if len(storageKeys) != 0 {
return nil, fmt.Errorf(NotImplemented, "eth_getProof with storageKeys")
} else {
addrHash, err := common.HashData(address[:])
if err != nil {
return nil, err
}
}

if latestBlock < blockNr {
// shouldn't happen, but check anyway
return nil, fmt.Errorf("block number is in the future")
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

let's add more info to the error: values of latestBlock, blockNr

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Done. I don't think it's actually reachable, but, can't hurt to have more context in the error.

}

rl := trie.NewRetainList(0)
rl.AddKey(addrHash[:])
if len(storageKeys) != 0 {
return nil, fmt.Errorf(NotImplemented, "eth_getProof with storageKeys")
}

loader := trie.NewFlatDBTrieLoader("getProof")
if err := loader.Reset(rl, nil, nil, false); err != nil {
return nil, err
}
addrHash, err := common.HashData(address[:])
if err != nil {
return nil, err
}

var accProof accounts.AccProofResult
accProof.Address = address
rl := trie.NewRetainList(0)
rl.AddKey(addrHash[:])

// Fill in the Account fields here to reduce the code changes
// needed in turbo/trie/hashbuilder.go
reader, err := rpchelper.CreateStateReader(ctx, tx, blockNrOrHash, 0, api.filters, api.stateCache, api.historyV3(tx), "")
if err != nil {
return nil, err
var loader *trie.FlatDBTrieLoader
if blockNr < latestBlock {
if latestBlock-blockNr > maxGetProofRewindBlockCount {
return nil, fmt.Errorf("requested block is too old, block must be within %d blocks of the head block number (currently %d)", maxGetProofRewindBlockCount, latestBlock)
}
a, err := reader.ReadAccountData(address)
tmpDir, err := os.MkdirTemp("", "eth_getHash")
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

i advise youse existing tmpdir: no reason to manually create/remove folders/files - memdb/etl - will automatically delete what need - if defer rollback/close.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I didn't like this part of the PR, but, wasn't really sure of the best way to do it and didn't see any of the other RPC daemon commands referencing a tempdir.

My best guess is that you're referring to the datadir temp dir? I see there is a datadir on the CLI flags, so, I've pushed a commit on top that wires the datadir.Dirs through to the call. Unfortunately this creates a bit of churn in the other tests, but, it's not too bad.

I'm not sure if that's what you were hoping for, so please let me know if you had something different in mind, happy to take a different approach.

if err != nil {
return nil, err
}
if a != nil {
accProof.Balance = (*hexutil.Big)(a.Balance.ToBig())
accProof.CodeHash = a.CodeHash
accProof.Nonce = hexutil.Uint64(a.Nonce)
accProof.StorageHash = a.Root
defer os.RemoveAll(tmpDir)
dirs := datadir.New(tmpDir)

batch := memdb.NewMemoryBatch(tx, dirs.Tmp)
defer batch.Rollback()

unwindState := &stagedsync.UnwindState{UnwindPoint: blockNr}
stageState := &stagedsync.StageState{BlockNumber: latestBlock}

hashStageCfg := stagedsync.StageHashStateCfg(nil, dirs, api.historyV3(batch), api._agg)
if err := stagedsync.UnwindHashStateStage(unwindState, stageState, batch, hashStageCfg, ctx); err != nil {
return nil, err
}

loader.SetProofReturn(&accProof)
_, err = loader.CalcTrieRoot(tx, nil, nil)
interHashStageCfg := stagedsync.StageTrieCfg(nil, false, false, false, dirs.Tmp, api._blockReader, nil, api.historyV3(batch), api._agg)
loader, err = stagedsync.UnwindIntermediateHashesForTrieLoader("eth_getProof", rl, unwindState, stageState, batch, interHashStageCfg, nil, nil, ctx.Done())
if err != nil {
return nil, err
}
return &accProof, nil
tx = batch
} else {
loader = trie.NewFlatDBTrieLoader("eth_getProof", rl, nil, nil, false)
}

var accProof accounts.AccProofResult
accProof.Address = address

// Fill in the Account fields here to reduce the code changes
// needed in turbo/trie/hashbuilder.go
reader, err := rpchelper.CreateStateReader(ctx, tx, blockNrOrHash, 0, api.filters, api.stateCache, api.historyV3(tx), "")
if err != nil {
return nil, err
}
a, err := reader.ReadAccountData(address)
if err != nil {
return nil, err
}
if a != nil {
accProof.Balance = (*hexutil.Big)(a.Balance.ToBig())
accProof.CodeHash = a.CodeHash
accProof.Nonce = hexutil.Uint64(a.Nonce)
accProof.StorageHash = a.Root
}

loader.SetProofReturn(&accProof)
root, err := loader.CalcTrieRoot(tx, nil)
if err != nil {
return nil, err
}

if root != header.Root {
return nil, fmt.Errorf("mismatch in expected state root computed %v vs %v indicates bug in proof implementation", root, header.Root)
}
return &accProof, nil
}

func (api *APIImpl) tryBlockFromLru(hash libcommon.Hash) *types.Block {
Expand Down
26 changes: 13 additions & 13 deletions cmd/rpcdaemon/commands/eth_call_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -250,13 +250,11 @@ func verifyStorageProof(t *testing.T, storageRoot libcommon.Hash, proof accounts
}

func TestGetProof(t *testing.T) {
pruneTo := uint64(3)
maxGetProofRewindBlockCount = 1 // Note, this is unsafe for parallel tests, but, this test is the only consumer for now

m, bankAddress, _ := chainWithDeployedContract(t)
br := snapshotsync.NewBlockReaderWithSnapshots(m.BlockSnapshots, m.TransactionsV3)

doPrune(t, m.DB, pruneTo)

agg := m.HistoryV3Components()

stateCache := kvcache.New(kvcache.DefaultCoherentConfig)
Expand All @@ -270,18 +268,22 @@ func TestGetProof(t *testing.T) {
}{
{
name: "currentBlock",
blockNum: 2,
blockNum: 3,
},
{
name: "withState",
blockNum: 2,
blockNum: 3,
storageKeys: []libcommon.Hash{{1}},
expectedErr: "the method is currently not implemented: eth_getProof with storageKeys",
},
{
name: "olderBlock",
name: "olderBlock",
blockNum: 2,
},
{
name: "tooOldBlock",
blockNum: 1,
expectedErr: "the method is currently not implemented: eth_getProof for block != latest",
expectedErr: "requested block is too old, block must be within 1 blocks of the head block number (currently 3)",
},
}

Expand All @@ -298,16 +300,15 @@ func TestGetProof(t *testing.T) {
require.Nil(t, proof)
return
}
require.NoError(t, err)
require.NotNil(t, proof)

tx, err := m.DB.BeginRo(context.Background())
assert.NoError(t, err)
defer tx.Rollback()
header, err := api.headerByRPCNumber(rpc.BlockNumber(tt.blockNum), tx)
require.NoError(t, err)

require.NoError(t, err)
require.NotNil(t, proof)

require.Equal(t, bankAddress, proof.Address)
verifyAccountProof(t, header.Root, proof)

Expand Down Expand Up @@ -489,7 +490,6 @@ func TestGetBlockByTimeMiddle(t *testing.T) {
if err != nil {
t.Errorf("couldn't retrieve block %v", err)
}

if block["timestamp"] != response["timestamp"] || block["hash"] != response["hash"] {
t.Errorf("Retrieved the wrong block.\nexpected block hash: %s expected timestamp: %d\nblock hash retrieved: %s timestamp retrieved: %d", response["hash"], response["timestamp"], block["hash"], block["timestamp"])
}
Expand Down Expand Up @@ -558,15 +558,15 @@ func chainWithDeployedContract(t *testing.T) (*stages.MockSentry, libcommon.Addr

var contractAddr libcommon.Address

chain, err := core.GenerateChain(m.ChainConfig, m.Genesis, m.Engine, m.DB, 2, func(i int, block *core.BlockGen) {
chain, err := core.GenerateChain(m.ChainConfig, m.Genesis, m.Engine, m.DB, 3, func(i int, block *core.BlockGen) {
nonce := block.TxNonce(bankAddress)
switch i {
case 0:
tx, err := types.SignTx(types.NewContractCreation(nonce, new(uint256.Int), 1e6, new(uint256.Int), contract), *signer, bankKey)
assert.NoError(t, err)
block.AddTx(tx)
contractAddr = crypto.CreateAddress(bankAddress, nonce)
case 1:
case 1, 2:
txn, err := types.SignTx(types.NewTransaction(nonce, contractAddr, new(uint256.Int), 90000, new(uint256.Int), nil), *signer, bankKey)
assert.NoError(t, err)
block.AddTx(txn)
Expand Down
38 changes: 19 additions & 19 deletions eth/stagedsync/stage_interhashes.go
Original file line number Diff line number Diff line change
Expand Up @@ -163,11 +163,8 @@ func RegenerateIntermediateHashes(logPrefix string, db kv.RwTx, cfg TrieCfg, exp
defer stTrieCollector.Close()
stTrieCollectorFunc := storageTrieCollector(stTrieCollector)

loader := trie.NewFlatDBTrieLoader(logPrefix)
if err := loader.Reset(trie.NewRetainList(0), accTrieCollectorFunc, stTrieCollectorFunc, false); err != nil {
return trie.EmptyRoot, err
}
hash, err := loader.CalcTrieRoot(db, []byte{}, ctx.Done())
loader := trie.NewFlatDBTrieLoader(logPrefix, trie.NewRetainList(0), accTrieCollectorFunc, stTrieCollectorFunc, false)
hash, err := loader.CalcTrieRoot(db, ctx.Done())
if err != nil {
return trie.EmptyRoot, err
}
Expand Down Expand Up @@ -581,11 +578,8 @@ func incrementIntermediateHashes(logPrefix string, s *StageState, db kv.RwTx, to
defer stTrieCollector.Close()
stTrieCollectorFunc := storageTrieCollector(stTrieCollector)

loader := trie.NewFlatDBTrieLoader(logPrefix)
if err := loader.Reset(rl, accTrieCollectorFunc, stTrieCollectorFunc, false); err != nil {
return trie.EmptyRoot, err
}
hash, err := loader.CalcTrieRoot(db, []byte{}, quit)
loader := trie.NewFlatDBTrieLoader(logPrefix, rl, accTrieCollectorFunc, stTrieCollectorFunc, false)
hash, err := loader.CalcTrieRoot(db, quit)
if err != nil {
return trie.EmptyRoot, err
}
Expand Down Expand Up @@ -638,33 +632,36 @@ func UnwindIntermediateHashesStage(u *UnwindState, s *StageState, tx kv.RwTx, cf
return nil
}

func unwindIntermediateHashesStageImpl(logPrefix string, u *UnwindState, s *StageState, db kv.RwTx, cfg TrieCfg, expectedRootHash libcommon.Hash, quit <-chan struct{}) error {
func UnwindIntermediateHashesForTrieLoader(logPrefix string, rl *trie.RetainList, u *UnwindState, s *StageState, db kv.RwTx, cfg TrieCfg, accTrieCollectorFunc trie.HashCollector2, stTrieCollectorFunc trie.StorageHashCollector2, quit <-chan struct{}) (*trie.FlatDBTrieLoader, error) {
p := NewHashPromoter(db, cfg.tmpDir, quit, logPrefix)
rl := trie.NewRetainList(0)
if cfg.historyV3 {
cfg.agg.SetTx(db)
collect := func(k, v []byte) {
rl.AddKeyWithMarker(k, len(v) == 0)
}
if err := p.UnwindOnHistoryV3(logPrefix, cfg.agg, s.BlockNumber, u.UnwindPoint, false, collect); err != nil {
return err
return nil, err
}
if err := p.UnwindOnHistoryV3(logPrefix, cfg.agg, s.BlockNumber, u.UnwindPoint, true, collect); err != nil {
return err
return nil, err
}
} else {
collect := func(k, v []byte, _ etl.CurrentTableReader, _ etl.LoadNextFunc) error {
rl.AddKeyWithMarker(k, len(v) == 0)
return nil
}
if err := p.Unwind(logPrefix, s, u, false /* storage */, collect); err != nil {
return err
return nil, err
}
if err := p.Unwind(logPrefix, s, u, true /* storage */, collect); err != nil {
return err
return nil, err
}
}

return trie.NewFlatDBTrieLoader(logPrefix, rl, accTrieCollectorFunc, stTrieCollectorFunc, false), nil
}

func unwindIntermediateHashesStageImpl(logPrefix string, u *UnwindState, s *StageState, db kv.RwTx, cfg TrieCfg, expectedRootHash libcommon.Hash, quit <-chan struct{}) error {
accTrieCollector := etl.NewCollector(logPrefix, cfg.tmpDir, etl.NewSortableBuffer(etl.BufferOptimalSize))
defer accTrieCollector.Close()
accTrieCollectorFunc := accountTrieCollector(accTrieCollector)
Expand All @@ -673,11 +670,14 @@ func unwindIntermediateHashesStageImpl(logPrefix string, u *UnwindState, s *Stag
defer stTrieCollector.Close()
stTrieCollectorFunc := storageTrieCollector(stTrieCollector)

loader := trie.NewFlatDBTrieLoader(logPrefix)
if err := loader.Reset(rl, accTrieCollectorFunc, stTrieCollectorFunc, false); err != nil {
rl := trie.NewRetainList(0)

loader, err := UnwindIntermediateHashesForTrieLoader(logPrefix, rl, u, s, db, cfg, accTrieCollectorFunc, stTrieCollectorFunc, quit)
if err != nil {
return err
}
hash, err := loader.CalcTrieRoot(db, []byte{}, quit)

hash, err := loader.CalcTrieRoot(db, quit)
if err != nil {
return err
}
Expand Down
4 changes: 0 additions & 4 deletions turbo/adapter/ethapi/get_proof.go
Original file line number Diff line number Diff line change
Expand Up @@ -230,7 +230,3 @@ func (r *Receiver) Receive(
// We ran out of modifications, simply pass through
return r.defaultReceiver.Receive(itemType, accountKey, storageKey, accountValue, storageValue, hash, hasTree, cutoff)
}

func (r *Receiver) Result() trie.SubTries {
return r.defaultReceiver.Result()
}
Loading