Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Fix consensus bug from deleting account in same block that creates it #1913

Merged
merged 5 commits into from
Feb 10, 2020
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 5 additions & 1 deletion eth/chains/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -471,7 +471,11 @@ def import_block(self,

# Validate the imported block.
if perform_validation:
validate_imported_block_unchanged(imported_block, block)
try:
validate_imported_block_unchanged(imported_block, block)
except ValidationError:
self.logger.warning("Proposed %s doesn't follow EVM rules, rejecting...", block)
raise
self.validate_block(imported_block)

(
Expand Down
10 changes: 8 additions & 2 deletions eth/db/account.py
Original file line number Diff line number Diff line change
Expand Up @@ -384,7 +384,8 @@ def make_state_root(self) -> Hash32:
address.hex(),
storage_root.hex(),
)
self._set_storage_root(address, storage_root)
if self.account_exists(address) or storage_root != BLANK_ROOT_HASH:
self._set_storage_root(address, storage_root)

self._journaldb.persist()

Expand All @@ -409,7 +410,12 @@ def persist(self) -> None:
store.persist(write_batch)

for address, new_root in self._get_changed_roots():
if new_root not in self._raw_store_db and new_root != BLANK_ROOT_HASH:
if new_root is None:
raise ValidationError(
f"Cannot validate new root of account 0x{address.hex()} "
f"which has a new root hash of None"
)
elif new_root not in self._raw_store_db and new_root != BLANK_ROOT_HASH:
raise ValidationError(
"After persisting storage trie, a root node was not found. "
f"State root for account 0x{address.hex()} "
Expand Down
172 changes: 159 additions & 13 deletions eth/db/storage.py
Original file line number Diff line number Diff line change
@@ -1,12 +1,20 @@
from typing import (
List,
NamedTuple,
)

from eth_hash.auto import keccak
from eth_typing import (
Address,
Hash32
Hash32,
)
from eth_utils import (
ValidationError,
encode_hex,
get_extended_debug_logger,
int_to_big_endian,
to_bytes,
to_int,
)
import rlp
from trie import (
Expand All @@ -22,9 +30,15 @@
AtomicDatabaseAPI,
DatabaseAPI,
)
from eth.constants import (
BLANK_ROOT_HASH,
)
from eth.db.backends.base import (
BaseDB,
)
from eth.db.backends.memory import (
MemoryDB,
)
from eth.db.batch import (
BatchDB,
)
Expand All @@ -42,6 +56,17 @@
)


class PendingWrites(NamedTuple):
"""
A set of variables captured just before account storage deletion.
The variables are used to revive storage if the EVM reverts to a point
prior to deletion.
"""
write_trie: HexaryTrie # The write trie at the time of deletion
trie_nodes_batch: BatchDB # A batch of all trie nodes written to the trie
starting_root_hash: Hash32 # The starting root hash


class StorageLookup(BaseDB):
"""
This lookup converts lookups of storage slot integers into the appropriate trie lookup.
Expand All @@ -51,12 +76,23 @@ class StorageLookup(BaseDB):
"""
logger = get_extended_debug_logger("eth.db.storage.StorageLookup")

# The trie that is modified in-place, used to calculate storage root on-demand
_write_trie: HexaryTrie

# These are the new trie nodes, waiting to be committed to disk
_trie_nodes_batch: BatchDB

# When deleting an account, push the pending write info onto this stack.
# This stack can get as big as the number of transactions per block: one for each delete.
_historical_write_tries: List[PendingWrites]

def __init__(self, db: DatabaseAPI, storage_root: Hash32, address: Address) -> None:
self._db = db
self._starting_root_hash = storage_root

# Set the starting root hash, to be used for on-disk storage read lookups
self._initialize_to_root_hash(storage_root)

self._address = address
self._write_trie = None
self._trie_nodes_batch: BatchDB = None

def _get_write_trie(self) -> HexaryTrie:
if self._trie_nodes_batch is None:
Expand Down Expand Up @@ -120,18 +156,21 @@ def __delitem__(self, key: bytes) -> None:

@property
def has_changed_root(self) -> bool:
return self._write_trie and self._write_trie.root_hash != self._starting_root_hash
return self._write_trie is not None

def get_changed_root(self) -> Hash32:
if self._write_trie is not None:
return self._write_trie.root_hash
else:
raise ValidationError("Asked for changed root when no writes have been made")

def _clear_changed_root(self) -> None:
def _initialize_to_root_hash(self, root_hash: Hash32) -> None:
self._starting_root_hash = root_hash
self._write_trie = None
self._trie_nodes_batch = None
self._starting_root_hash = None

# Reset the historical writes, which can't be reverted after committing
self._historical_write_tries = []

def commit_to(self, db: DatabaseAPI) -> None:
"""
Expand All @@ -142,10 +181,67 @@ def commit_to(self, db: DatabaseAPI) -> None:
if self._trie_nodes_batch is None:
raise ValidationError(
"It is invalid to commit an account's storage if it has no pending changes. "
"Always check storage_lookup.has_changed_root before attempting to commit."
"Always check storage_lookup.has_changed_root before attempting to commit. "
f"Write tries on stack = {len(self._historical_write_tries)}; Root hash = "
f"{encode_hex(self._starting_root_hash)}"
)
self._trie_nodes_batch.commit_to(db, apply_deletes=False)
self._clear_changed_root()

# Mark the trie as having been all written out to the database.
# It removes the 'dirty' flag and clears out any pending writes.
self._initialize_to_root_hash(self._write_trie.root_hash)

def new_trie(self) -> int:
"""
Switch to an empty trie. Save the old trie, and pending writes, in
case of a revert.

:return: index for reviving the previous trie
"""
write_trie = self._get_write_trie()

# Write the previous trie into a historical stack
self._historical_write_tries.append(PendingWrites(
write_trie,
self._trie_nodes_batch,
self._starting_root_hash,
))

new_idx = len(self._historical_write_tries)
self._starting_root_hash = BLANK_ROOT_HASH
self._write_trie = None
self._trie_nodes_batch = None

return new_idx

def rollback_trie(self, trie_index: int) -> None:
"""
Revert back to the previous trie, using the index returned by a
:meth:`~new_trie` call. The index returned by that call returns you
to the trie in place *before* the call.

:param trie_index: index for reviving the previous trie
"""

if trie_index >= len(self._historical_write_tries):
raise ValidationError(
f"Trying to roll back a delete to index {trie_index}, but there are only"
f" {len(self._historical_write_tries)} indices available."
)

(
self._write_trie,
self._trie_nodes_batch,
self._starting_root_hash,
) = self._historical_write_tries[trie_index]

# Cannot roll forward after a rollback, so remove created/ignored tries.
# This also deletes the trie that you just reverted to. It will be re-added
# to the stack when the next new_trie() is called.
del self._historical_write_tries[trie_index:]


CLEAR_COUNT_KEY_NAME = b'clear-count'


class AccountStorageDB(AccountStorageDatabaseAPI):
Expand Down Expand Up @@ -187,9 +283,15 @@ def __init__(self, db: AtomicDatabaseAPI, storage_root: Hash32, address: Address
self._address = address
self._storage_lookup = StorageLookup(db, storage_root, address)
self._storage_cache = CacheDB(self._storage_lookup)
self._locked_changes = BatchDB(self._storage_cache)
self._locked_changes = JournalDB(self._storage_cache)
self._journal_storage = JournalDB(self._locked_changes)

# Track how many times we have cleared the storage. This is journaled
# in lockstep with other storage changes. That way, we can detect if a revert
# causes use to revert past the previous storage deletion. The clear count is used
# as an index to find the base trie from before the revert.
self._clear_count = JournalDB(MemoryDB({CLEAR_COUNT_KEY_NAME: to_bytes(0)}))
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Using the JournalDB here feels like it may be the wrong choice. Can this just be modeled using a list (stack) or somethng?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I updated the comment a bit to try to explain: it has to move in lockstep with the main storage journal, so we can detect when a revert crosses back over an account delete.


def get(self, slot: int, from_journal: bool=True) -> int:
key = int_to_big_endian(slot)
lookup_db = self._journal_storage if from_journal else self._locked_changes
Expand Down Expand Up @@ -222,40 +324,84 @@ def set(self, slot: int, value: int) -> None:

def delete(self) -> None:
self.logger.debug2(
"Deleting all storage in account 0x%s, hashed 0x%s",
"Deleting all storage in account 0x%s",
self._address.hex(),
keccak(self._address).hex(),
)
self._journal_storage.clear()
self._storage_cache.reset_cache()

# Empty out the storage lookup trie (keeping history, in case of a revert)
new_clear_count = self._storage_lookup.new_trie()

# Look up the previous count of how many times the account has been deleted.
# This can happen multiple times in one block, via CREATE2.
old_clear_count = to_int(self._clear_count[CLEAR_COUNT_KEY_NAME])

# Gut check that we have incremented correctly
if new_clear_count != old_clear_count + 1:
raise ValidationError(
f"Must increase clear count by one on each delete. Instead, went from"
f" {old_clear_count} -> {new_clear_count} in account 0x{self._address.hex()}"
)

# Save the new count, ie~ the index used for a future revert.
self._clear_count[CLEAR_COUNT_KEY_NAME] = to_bytes(new_clear_count)

def record(self, checkpoint: JournalDBCheckpoint) -> None:
self._journal_storage.record(checkpoint)
self._clear_count.record(checkpoint)

def discard(self, checkpoint: JournalDBCheckpoint) -> None:
self.logger.debug2('discard checkpoint %r', checkpoint)
latest_clear_count = to_int(self._clear_count[CLEAR_COUNT_KEY_NAME])

if self._journal_storage.has_checkpoint(checkpoint):
self._journal_storage.discard(checkpoint)
self._clear_count.discard(checkpoint)
else:
# if the checkpoint comes before this account started tracking,
# then simply reset to the beginning
self._journal_storage.reset()
self._clear_count.reset()
self._storage_cache.reset_cache()

reverted_clear_count = to_int(self._clear_count[CLEAR_COUNT_KEY_NAME])

if reverted_clear_count == latest_clear_count - 1:
# This revert rewinds past a trie deletion, so roll back to the trie at
# that point. We use the clear count as an index to get back to the
# old base trie.
self._storage_lookup.rollback_trie(reverted_clear_count)
elif reverted_clear_count == latest_clear_count:
# No change in the base trie, take no action
pass
else:
# Although CREATE2 permits multiple creates and deletes in a single block,
# you can still only revert across a single delete. That's because delete
# is only triggered at the end of the transaction.
raise ValidationError(
f"This revert has changed the clear count in an invalid way, from"
f" {latest_clear_count} to {reverted_clear_count}, in 0x{self._address.hex()}"
)

def commit(self, checkpoint: JournalDBCheckpoint) -> None:
if self._journal_storage.has_checkpoint(checkpoint):
self._journal_storage.commit(checkpoint)
self._clear_count.commit(checkpoint)
else:
# if the checkpoint comes before this account started tracking,
# then flatten all changes, without persisting
self._journal_storage.flatten()
self._clear_count.flatten()

def lock_changes(self) -> None:
if self._journal_storage.has_clear():
self._locked_changes.clear()
self._journal_storage.persist()

def make_storage_root(self) -> None:
self.lock_changes()
self._locked_changes.commit(apply_deletes=True)
self._locked_changes.persist()

def _validate_flushed(self) -> None:
"""
Expand Down
2 changes: 2 additions & 0 deletions newsfragments/1912.bugfix.rst
Original file line number Diff line number Diff line change
@@ -0,0 +1,2 @@
Fixed a consensus-critical bug for contracts that are created and destroyed in the same block,
especially pre-Byzantium.
67 changes: 67 additions & 0 deletions tests/core/vm/test_vm_state.py
Original file line number Diff line number Diff line change
Expand Up @@ -103,6 +103,73 @@ def test_revert_selfdestruct(state, read_storage_before_snapshot):
assert state.get_storage(ADDRESS, 1) == 2


@pytest.mark.parametrize('make_state_root_after_create', [True, False])
def test_delete_after_create_in_same_block(state, make_state_root_after_create):
# create account with storage in one "transaction"
state.set_storage(ADDRESS, 0, 1)
state.lock_changes()

if make_state_root_after_create:
state.make_state_root()

# delete account in next "transaction"
state.delete_account(ADDRESS)
state.lock_changes()

# deleted account should not exist
assert not state.account_exists(ADDRESS)

state.persist()

# deleted account should not exist, even after persisting
assert not state.account_exists(ADDRESS)


@pytest.mark.parametrize('make_state_root_after_lock', [True, False])
@pytest.mark.parametrize('persist_after_first_create', [True, False])
def test_delete_and_revive_in_same_block(
state,
make_state_root_after_lock,
persist_after_first_create):

# create account with storage in one "transaction"
state.set_storage(ADDRESS, 0, 1)
state.lock_changes()

if persist_after_first_create:
state.persist()
elif make_state_root_after_lock:
state.make_state_root()

# delete account in next "transaction"
state.delete_account(ADDRESS)
assert state.get_storage(ADDRESS, 0) == 0
state.lock_changes()

assert state.get_storage(ADDRESS, 0) == 0

if make_state_root_after_lock:
state.make_state_root()

assert state.get_storage(ADDRESS, 0) == 0

# revive account in next "transaction"
state.set_storage(ADDRESS, 2, 3)
state.lock_changes()

# make sure original value stays deleted
assert state.get_storage(ADDRESS, 0) == 0
# but new value is saved
assert state.get_storage(ADDRESS, 2) == 3

state.persist()

# make sure original value stays deleted
assert state.get_storage(ADDRESS, 0) == 0
# but new value is saved
assert state.get_storage(ADDRESS, 2) == 3


def test_lock_state(state):
assert state.get_storage(ADDRESS, 1, from_journal=False) == 0

Expand Down
Loading