diff --git a/.circleci/config.yml b/.circleci/config.yml index 1de55179d4..38bd6f422d 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -35,13 +35,13 @@ commands: description: "Restore the cache with pyspec keys" steps: - restore_cached_venv: - venv_name: v30-pyspec + venv_name: v32-pyspec reqs_checksum: cache-{{ checksum "setup.py" }}-{{ checksum "requirements_preinstallation.txt" }} save_pyspec_cached_venv: description: Save a venv into a cache with pyspec keys" steps: - save_cached_venv: - venv_name: v30-pyspec + venv_name: v32-pyspec reqs_checksum: cache-{{ checksum "setup.py" }}-{{ checksum "requirements_preinstallation.txt" }} venv_path: ./venv jobs: diff --git a/Makefile b/Makefile index a3a3e24288..09e914c3ca 100644 --- a/Makefile +++ b/Makefile @@ -16,7 +16,6 @@ ALL_EXECUTABLE_SPEC_NAMES = \ # A list of fake targets. .PHONY: \ - check_toc \ clean \ coverage \ detect_errors \ @@ -39,7 +38,6 @@ NORM = $(shell tput sgr0) # Print target descriptions. help: - @echo "make $(BOLD)check_toc$(NORM) -- check table of contents" @echo "make $(BOLD)clean$(NORM) -- delete all untracked files" @echo "make $(BOLD)coverage$(NORM) -- run pyspec tests with coverage" @echo "make $(BOLD)detect_errors$(NORM) -- detect generator errors" @@ -85,7 +83,7 @@ $(ETH2SPEC): setup.py | $(VENV) # Force rebuild/install the eth2spec package. eth2spec: - $(MAKE) --always-make $(ETH2SPEC) + @$(MAKE) --always-make $(ETH2SPEC) # Create the pyspec for all phases. pyspec: $(VENV) setup.py @@ -99,6 +97,8 @@ pyspec: $(VENV) setup.py TEST_REPORT_DIR = $(PYSPEC_DIR)/test-reports # Run pyspec tests. +# Note: for debugging output to show, print to stderr. +# # To run a specific test, append k=, eg: # make test k=test_verify_kzg_proof # To run tests for a specific fork, append fork=, eg: @@ -117,6 +117,7 @@ test: $(ETH2SPEC) pyspec @mkdir -p $(TEST_REPORT_DIR) @$(PYTHON_VENV) -m pytest \ -n auto \ + --capture=no \ $(MAYBE_TEST) \ $(MAYBE_FORK) \ $(PRESET) \ @@ -193,10 +194,6 @@ MARKDOWN_FILES = $(wildcard $(SPEC_DIR)/*/*.md) \ $(wildcard $(SPEC_DIR)/_features/*/*/*.md) \ $(wildcard $(SSZ_DIR)/*.md) -# Check all files and error if any ToC were modified. -check_toc: $(MARKDOWN_FILES:=.toc) - @[ "$$(find . -name '*.md.tmp' -print -quit)" ] && exit 1 || exit 0 - # Generate ToC sections & save copy of original if modified. %.toc: @cp $* $*.tmp; \ @@ -209,8 +206,12 @@ check_toc: $(MARKDOWN_FILES:=.toc) echo "\033[1;34m See $*.tmp\033[0m"; \ fi +# Check all files and error if any ToC were modified. +_check_toc: $(MARKDOWN_FILES:=.toc) + @[ "$$(find . -name '*.md.tmp' -print -quit)" ] && exit 1 || exit 0 + # Check for mistakes. -lint: $(ETH2SPEC) pyspec check_toc +lint: $(ETH2SPEC) pyspec _check_toc @$(CODESPELL_VENV) . --skip "./.git,$(VENV),$(PYSPEC_DIR)/.mypy_cache" -I .codespell-whitelist @$(PYTHON_VENV) -m flake8 --config $(FLAKE8_CONFIG) $(PYSPEC_DIR)/eth2spec @$(PYTHON_VENV) -m flake8 --config $(FLAKE8_CONFIG) $(TEST_GENERATORS_DIR) @@ -235,17 +236,19 @@ gen_list: done # Run one generator. +# This will forcibly rebuild eth2spec just in case. # To check modules for a generator, append modcheck=true, eg: # make gen_genesis modcheck=true gen_%: MAYBE_MODCHECK := $(if $(filter true,$(modcheck)),--modcheck) -gen_%: $(ETH2SPEC) pyspec +gen_%: eth2spec @mkdir -p $(TEST_VECTOR_DIR) @$(PYTHON_VENV) $(GENERATOR_DIR)/$*/main.py \ --output $(TEST_VECTOR_DIR) \ $(MAYBE_MODCHECK) # Run all generators then check for errors. -gen_all: $(GENERATOR_TARGETS) detect_errors +gen_all: $(GENERATOR_TARGETS) + @$(MAKE) detect_errors # Detect errors in generators. detect_errors: $(TEST_VECTOR_DIR) diff --git a/configs/mainnet.yaml b/configs/mainnet.yaml index deb3dcf5fe..e54db49661 100644 --- a/configs/mainnet.yaml +++ b/configs/mainnet.yaml @@ -115,15 +115,13 @@ DEPOSIT_CONTRACT_ADDRESS: 0x00000000219ab540356cBB839Cbe05303d7705Fa # Networking # --------------------------------------------------------------- # `10 * 2**20` (= 10485760, 10 MiB) -GOSSIP_MAX_SIZE: 10485760 +MAX_PAYLOAD_SIZE: 10485760 # `2**10` (= 1024) MAX_REQUEST_BLOCKS: 1024 # `2**8` (= 256) EPOCHS_PER_SUBNET_SUBSCRIPTION: 256 # `MIN_VALIDATOR_WITHDRAWABILITY_DELAY + CHURN_LIMIT_QUOTIENT // 2` (= 33024, ~5 months) MIN_EPOCHS_FOR_BLOCK_REQUESTS: 33024 -# `10 * 2**20` (=10485760, 10 MiB) -MAX_CHUNK_SIZE: 10485760 # 5s TTFB_TIMEOUT: 5 # 10s diff --git a/configs/minimal.yaml b/configs/minimal.yaml index 460474ebf7..a15314bb1f 100644 --- a/configs/minimal.yaml +++ b/configs/minimal.yaml @@ -116,15 +116,13 @@ DEPOSIT_CONTRACT_ADDRESS: 0x1234567890123456789012345678901234567890 # Networking # --------------------------------------------------------------- # `10 * 2**20` (= 10485760, 10 MiB) -GOSSIP_MAX_SIZE: 10485760 +MAX_PAYLOAD_SIZE: 10485760 # `2**10` (= 1024) MAX_REQUEST_BLOCKS: 1024 # `2**8` (= 256) EPOCHS_PER_SUBNET_SUBSCRIPTION: 256 # [customized] `MIN_VALIDATOR_WITHDRAWABILITY_DELAY + CHURN_LIMIT_QUOTIENT // 2` (= 272) MIN_EPOCHS_FOR_BLOCK_REQUESTS: 272 -# `10 * 2**20` (=10485760, 10 MiB) -MAX_CHUNK_SIZE: 10485760 # 5s TTFB_TIMEOUT: 5 # 10s diff --git a/docker/README.md b/docker/README.md index 4824fc283a..34bdd94c51 100644 --- a/docker/README.md +++ b/docker/README.md @@ -10,7 +10,7 @@ Handy commands: Ideally manual running of docker containers is for advanced users, we recommend the script based approach described below for most users. -The `scripts/build_run_docker_tests.sh` script will cover most usecases. The script allows the user to configure the fork(altair/bellatrix/capella..), `$IMAGE_NAME` (specifies the container to use), preset type (mainnet/minimal), and test all forks flags. Ideally, this is the main way that users interact with the spec tests instead of running it locally with varying versions of dependencies. +The `scripts/build_run_docker_tests.sh` script will cover most use cases. The script allows the user to configure the fork(altair/bellatrix/capella..), `$IMAGE_NAME` (specifies the container to use), preset type (mainnet/minimal), and test all forks flags. Ideally, this is the main way that users interact with the spec tests instead of running it locally with varying versions of dependencies. E.g: - `./build_run_docker_tests.sh --p mainnet` will run the mainnet preset tests diff --git a/setup.py b/setup.py index 0bc90ae787..55f1d0e344 100644 --- a/setup.py +++ b/setup.py @@ -12,7 +12,7 @@ import copy from collections import OrderedDict import json -from functools import reduce +from functools import lru_cache from pysetup.constants import ( # code names @@ -70,6 +70,7 @@ def installPackage(package: str): from marko.ext.gfm.elements import Table +@lru_cache(maxsize=None) def _get_name_from_heading(heading: Heading) -> Optional[str]: last_child = heading.children[-1] if isinstance(last_child, CodeSpan): @@ -77,15 +78,18 @@ def _get_name_from_heading(heading: Heading) -> Optional[str]: return None +@lru_cache(maxsize=None) def _get_source_from_code_block(block: FencedCode) -> str: return block.children[0].children.strip() +@lru_cache(maxsize=None) def _get_function_name_from_source(source: str) -> str: fn = ast.parse(source).body[0] return fn.name +@lru_cache(maxsize=None) def _get_self_type_from_source(source: str) -> Optional[str]: fn = ast.parse(source).body[0] args = fn.args.args @@ -98,6 +102,7 @@ def _get_self_type_from_source(source: str) -> Optional[str]: return args[0].annotation.id +@lru_cache(maxsize=None) def _get_class_info_from_source(source: str) -> Tuple[str, Optional[str]]: class_def = ast.parse(source).body[0] base = class_def.bases[0] @@ -113,12 +118,14 @@ def _get_class_info_from_source(source: str) -> Tuple[str, Optional[str]]: return class_def.name, parent_class +@lru_cache(maxsize=None) def _is_constant_id(name: str) -> bool: if name[0] not in string.ascii_uppercase + '_': return False return all(map(lambda c: c in string.ascii_uppercase + '_' + string.digits, name[1:])) +@lru_cache(maxsize=None) def _load_kzg_trusted_setups(preset_name): trusted_setups_file_path = str(Path(__file__).parent) + '/presets/' + preset_name + '/trusted_setups/trusted_setup_4096.json' @@ -130,6 +137,7 @@ def _load_kzg_trusted_setups(preset_name): return trusted_setup_G1_monomial, trusted_setup_G1_lagrange, trusted_setup_G2_monomial +@lru_cache(maxsize=None) def _load_curdleproofs_crs(preset_name): """ NOTE: File generated from https://github.com/asn-d6/curdleproofs/blob/8e8bf6d4191fb6a844002f75666fb7009716319b/tests/crs.rs#L53-L67 @@ -153,6 +161,7 @@ def _load_curdleproofs_crs(preset_name): } +@lru_cache(maxsize=None) def _get_eth2_spec_comment(child: LinkRefDef) -> Optional[str]: _, _, title = child._parse_info if not (title[0] == "(" and title[len(title)-1] == ")"): @@ -163,6 +172,7 @@ def _get_eth2_spec_comment(child: LinkRefDef) -> Optional[str]: return title[len(ETH2_SPEC_COMMENT_PREFIX):].strip() +@lru_cache(maxsize=None) def _parse_value(name: str, typed_value: str, type_hint: Optional[str] = None) -> VariableDefinition: comment = None if name in ("ROOT_OF_UNITY_EXTENDED", "ROOTS_OF_UNITY_EXTENDED", "ROOTS_OF_UNITY_REDUCED"): @@ -185,6 +195,11 @@ def _update_constant_vars_with_kzg_setups(constant_vars, preset_name): constant_vars['KZG_SETUP_G2_MONOMIAL'] = VariableDefinition(constant_vars['KZG_SETUP_G2_MONOMIAL'].value, str(kzg_setups[2]), comment, None) +@lru_cache(maxsize=None) +def parse_markdown(content: str): + return gfm.parse(content) + + def get_spec(file_name: Path, preset: Dict[str, str], config: Dict[str, str], preset_name=str) -> SpecObject: functions: Dict[str, str] = {} protocols: Dict[str, ProtocolDefinition] = {} @@ -198,7 +213,7 @@ def get_spec(file_name: Path, preset: Dict[str, str], config: Dict[str, str], pr custom_types: Dict[str, str] = {} with open(file_name) as source_file: - document = gfm.parse(source_file.read()) + document = parse_markdown(source_file.read()) current_name = None should_skip = False @@ -326,6 +341,7 @@ def get_spec(file_name: Path, preset: Dict[str, str], config: Dict[str, str], pr ) +@lru_cache(maxsize=None) def load_preset(preset_files: Sequence[Path]) -> Dict[str, str]: """ Loads the a directory of preset files, merges the result into one preset. @@ -344,6 +360,7 @@ def load_preset(preset_files: Sequence[Path]) -> Dict[str, str]: return parse_config_vars(preset) +@lru_cache(maxsize=None) def load_config(config_path: Path) -> Dict[str, str]: """ Loads the given configuration file. @@ -358,7 +375,7 @@ def build_spec(fork: str, source_files: Sequence[Path], preset_files: Sequence[Path], config_file: Path) -> str: - preset = load_preset(preset_files) + preset = load_preset(tuple(preset_files)) config = load_config(config_file) all_specs = [get_spec(spec, preset, config, preset_name) for spec in source_files] diff --git a/specs/_features/custody_game/beacon-chain.md b/specs/_features/custody_game/beacon-chain.md index 092846a484..66aea773a7 100644 --- a/specs/_features/custody_game/beacon-chain.md +++ b/specs/_features/custody_game/beacon-chain.md @@ -619,7 +619,7 @@ def process_custody_slashing(state: BeaconState, signed_custody_slashing: Signed for attester_index in attesters: if attester_index != custody_slashing.malefactor_index: increase_balance(state, attester_index, whistleblower_reward) - # No special whisteblower reward: it is expected to be an attester. Others are free to slash too however. + # No special whistleblower reward: it is expected to be an attester. Others are free to slash too however. else: # The claim was false, the custody bit was correct. Slash the whistleblower that induced this work. slash_validator(state, custody_slashing.whistleblower_index) diff --git a/specs/_features/eip7732/p2p-interface.md b/specs/_features/eip7732/p2p-interface.md index df02cc2382..a2716933cd 100644 --- a/specs/_features/eip7732/p2p-interface.md +++ b/specs/_features/eip7732/p2p-interface.md @@ -130,7 +130,7 @@ The *type* of the payload of this topic changes to the (modified) `SignedBeaconB There are no new validations for this topic. However, all validations with regards to the `ExecutionPayload` are removed: -- _[REJECT]_ The length of KZG commitments is less than or equal to the limitation defined in Consensus Layer -- i.e. validate that len(body.signed_beacon_block.message.blob_kzg_commitments) <= MAX_BLOBS_PER_BLOCK +- _[REJECT]_ The length of KZG commitments is less than or equal to the limitation defined in Consensus Layer -- i.e. validate that `len(signed_beacon_block.message.body.blob_kzg_commitments) <= MAX_BLOBS_PER_BLOCK` - _[REJECT]_ The block's execution payload timestamp is correct with respect to the slot -- i.e. `execution_payload.timestamp == compute_timestamp_at_slot(state, block.slot)`. - If `execution_payload` verification of block's parent by an execution node is *not* complete: @@ -151,7 +151,7 @@ This topic is used to propagate execution payload messages as `SignedExecutionPa The following validations MUST pass before forwarding the `signed_execution_payload_envelope` on the network, assuming the alias `envelope = signed_execution_payload_envelope.message`, `payload = payload_envelope.payload`: -- _[IGNORE]_ The envelope's block root `envelope.block_root` has been seen (via both gossip and non-gossip sources) (a client MAY queue payload for processing once the block is retrieved). +- _[IGNORE]_ The envelope's block root `envelope.block_root` has been seen (via gossip or non-gossip sources) (a client MAY queue payload for processing once the block is retrieved). - _[IGNORE]_ The node has not seen another valid `SignedExecutionPayloadEnvelope` for this block root from this builder. Let `block` be the block with `envelope.beacon_block_root`. @@ -171,7 +171,7 @@ The following validations MUST pass before forwarding the `payload_attestation_m - _[IGNORE]_ The message's slot is for the current slot (with a `MAXIMUM_GOSSIP_CLOCK_DISPARITY` allowance), i.e. `data.slot == current_slot`. - _[REJECT]_ The message's payload status is a valid status, i.e. `data.payload_status < PAYLOAD_INVALID_STATUS`. - _[IGNORE]_ The `payload_attestation_message` is the first valid message received from the validator with index `payload_attestation_message.validate_index`. -- _[IGNORE]_ The message's block `data.beacon_block_root` has been seen (via both gossip and non-gossip sources) (a client MAY queue attestation for processing once the block is retrieved. Note a client might want to request payload after). +- _[IGNORE]_ The message's block `data.beacon_block_root` has been seen (via gossip or non-gossip sources) (a client MAY queue attestation for processing once the block is retrieved. Note a client might want to request payload after). - _[REJECT]_ The message's block `data.beacon_block_root` passes validation. - _[REJECT]_ The message's validator index is within the payload committee in `get_ptc(state, data.slot)`. The `state` is the head state corresponding to processing the block up to the current slot as determined by the fork choice. - _[REJECT]_ The message's signature of `payload_attestation_message.signature` is valid with respect to the validator index. diff --git a/specs/_features/whisk/beacon-chain.md b/specs/_features/whisk/beacon-chain.md index de8051ffeb..3b527900e7 100644 --- a/specs/_features/whisk/beacon-chain.md +++ b/specs/_features/whisk/beacon-chain.md @@ -54,7 +54,7 @@ This document details the beacon chain additions and changes of to support the W | `WHISK_PROPOSER_TRACKERS_COUNT` | `uint64(2**13)` (= 8,192) | number of proposer trackers | | `WHISK_VALIDATORS_PER_SHUFFLE` | `uint64(2**7 - 4)` (= 124) | number of validators shuffled per shuffle step | | `WHISK_MAX_SHUFFLE_PROOF_SIZE` | `uint64(2**15)` | max size of a shuffle proof | -| `WHISK_MAX_OPENING_PROOF_SIZE` | `uint64(2**10)` | max size of a opening proof | +| `WHISK_MAX_OPENING_PROOF_SIZE` | `uint64(2**10)` | max size of an opening proof | ## Configuration diff --git a/specs/altair/light-client/full-node.md b/specs/altair/light-client/full-node.md index d887691c67..b3b65b83fa 100644 --- a/specs/altair/light-client/full-node.md +++ b/specs/altair/light-client/full-node.md @@ -146,7 +146,7 @@ Full nodes SHOULD provide the best derivable `LightClientUpdate` (according to ` - `LightClientUpdate` are assigned to sync committee periods based on their `attested_header.beacon.slot` - `LightClientUpdate` are only considered if `compute_sync_committee_period_at_slot(update.attested_header.beacon.slot) == compute_sync_committee_period_at_slot(update.signature_slot)` -- Only `LightClientUpdate` with `next_sync_committee` as selected by fork choice are provided, regardless of ranking by `is_better_update`. To uniquely identify a non-finalized sync committee fork, all of `period`, `current_sync_committee` and `next_sync_committee` need to be incorporated, as sync committees may reappear over time. +- Only `LightClientUpdate` with `sync_aggregate` from blocks on the canonical chain as selected by fork choice are considered, regardless of ranking by `is_better_update`. `LightClientUpdate` referring to orphaned blocks SHOULD NOT be provided. ### `create_light_client_finality_update` diff --git a/specs/altair/validator.md b/specs/altair/validator.md index 3602377acd..00dca30308 100644 --- a/specs/altair/validator.md +++ b/specs/altair/validator.md @@ -295,7 +295,7 @@ The `subnet_id` is derived from the position in the sync committee such that the *Note*: This function returns multiple deduplicated subnets if a given validator index is included multiple times in a given sync committee across multiple subcommittees. ```python -def compute_subnets_for_sync_committee(state: BeaconState, validator_index: ValidatorIndex) -> Set[uint64]: +def compute_subnets_for_sync_committee(state: BeaconState, validator_index: ValidatorIndex) -> Set[SubnetID]: next_slot_epoch = compute_epoch_at_slot(Slot(state.slot + 1)) if compute_sync_committee_period(get_current_epoch(state)) == compute_sync_committee_period(next_slot_epoch): sync_committee = state.current_sync_committee @@ -305,7 +305,7 @@ def compute_subnets_for_sync_committee(state: BeaconState, validator_index: Vali target_pubkey = state.validators[validator_index].pubkey sync_committee_indices = [index for index, pubkey in enumerate(sync_committee.pubkeys) if pubkey == target_pubkey] return set([ - uint64(index // (SYNC_COMMITTEE_SIZE // SYNC_COMMITTEE_SUBNET_COUNT)) + SubnetID(index // (SYNC_COMMITTEE_SIZE // SYNC_COMMITTEE_SUBNET_COUNT)) for index in sync_committee_indices ]) ``` diff --git a/specs/bellatrix/p2p-interface.md b/specs/bellatrix/p2p-interface.md index 1f4c815660..b2d28cf1f4 100644 --- a/specs/bellatrix/p2p-interface.md +++ b/specs/bellatrix/p2p-interface.md @@ -148,8 +148,8 @@ Per `context = compute_fork_digest(fork_version, genesis_validators_root)`: #### Why was the max gossip message size increased at Bellatrix? With the addition of `ExecutionPayload` to `BeaconBlock`s, there is a dynamic -field -- `transactions` -- which can validly exceed the `GOSSIP_MAX_SIZE` limit (1 MiB) put in -place at Phase 0, so GOSSIP_MAX_SIZE has increased to 10 Mib on the network. +field -- `transactions` -- which can validly exceed the `MAX_PAYLOAD_SIZE` limit (1 MiB) put in +place at Phase 0, so MAX_PAYLOAD_SIZE has increased to 10 MiB on the network. At the `GAS_LIMIT` (~30M) currently seen on mainnet in 2021, a single transaction filled entirely with data at a cost of 16 gas per byte can create a valid `ExecutionPayload` of ~2 MiB. Thus we need a size limit to at least account for diff --git a/specs/deneb/beacon-chain.md b/specs/deneb/beacon-chain.md index 43360f8b3e..966a7007d9 100644 --- a/specs/deneb/beacon-chain.md +++ b/specs/deneb/beacon-chain.md @@ -20,6 +20,7 @@ - [`BeaconBlockBody`](#beaconblockbody) - [`ExecutionPayload`](#executionpayload) - [`ExecutionPayloadHeader`](#executionpayloadheader) + - [`BeaconState`](#beaconstate) - [Helper functions](#helper-functions) - [Misc](#misc) - [`kzg_commitment_to_versioned_hash`](#kzg_commitment_to_versioned_hash) @@ -171,6 +172,53 @@ class ExecutionPayloadHeader(Container): excess_blob_gas: uint64 # [New in Deneb:EIP4844] ``` +#### `BeaconState` + +```python +class BeaconState(Container): + # Versioning + genesis_time: uint64 + genesis_validators_root: Root + slot: Slot + fork: Fork + # History + latest_block_header: BeaconBlockHeader + block_roots: Vector[Root, SLOTS_PER_HISTORICAL_ROOT] + state_roots: Vector[Root, SLOTS_PER_HISTORICAL_ROOT] + historical_roots: List[Root, HISTORICAL_ROOTS_LIMIT] + # Eth1 + eth1_data: Eth1Data + eth1_data_votes: List[Eth1Data, EPOCHS_PER_ETH1_VOTING_PERIOD * SLOTS_PER_EPOCH] + eth1_deposit_index: uint64 + # Registry + validators: List[Validator, VALIDATOR_REGISTRY_LIMIT] + balances: List[Gwei, VALIDATOR_REGISTRY_LIMIT] + # Randomness + randao_mixes: Vector[Bytes32, EPOCHS_PER_HISTORICAL_VECTOR] + # Slashings + slashings: Vector[Gwei, EPOCHS_PER_SLASHINGS_VECTOR] # Per-epoch sums of slashed effective balances + # Participation + previous_epoch_participation: List[ParticipationFlags, VALIDATOR_REGISTRY_LIMIT] + current_epoch_participation: List[ParticipationFlags, VALIDATOR_REGISTRY_LIMIT] + # Finality + justification_bits: Bitvector[JUSTIFICATION_BITS_LENGTH] # Bit set for every recent justified epoch + previous_justified_checkpoint: Checkpoint + current_justified_checkpoint: Checkpoint + finalized_checkpoint: Checkpoint + # Inactivity + inactivity_scores: List[uint64, VALIDATOR_REGISTRY_LIMIT] + # Sync + current_sync_committee: SyncCommittee + next_sync_committee: SyncCommittee + # Execution + latest_execution_payload_header: ExecutionPayloadHeader # [Modified in Deneb:EIP4844] + # Withdrawals + next_withdrawal_index: WithdrawalIndex + next_withdrawal_validator_index: ValidatorIndex + # Deep history valid from Capella onwards + historical_summaries: List[HistoricalSummary, HISTORICAL_ROOTS_LIMIT] +``` + ## Helper functions ### Misc diff --git a/specs/deneb/p2p-interface.md b/specs/deneb/p2p-interface.md index 5f71bc854a..e38a50ba2e 100644 --- a/specs/deneb/p2p-interface.md +++ b/specs/deneb/p2p-interface.md @@ -147,7 +147,7 @@ The *type* of the payload of this topic changes to the (modified) `SignedBeaconB New validation: - _[REJECT]_ The length of KZG commitments is less than or equal to the limitation defined in Consensus Layer -- - i.e. validate that `len(body.signed_beacon_block.message.blob_kzg_commitments) <= MAX_BLOBS_PER_BLOCK` + i.e. validate that `len(signed_beacon_block.message.body.blob_kzg_commitments) <= MAX_BLOBS_PER_BLOCK` ###### `beacon_aggregate_and_proof` @@ -181,7 +181,7 @@ The following validations MUST pass before forwarding the `blob_sidecar` on the - _[IGNORE]_ The sidecar is not from a future slot (with a `MAXIMUM_GOSSIP_CLOCK_DISPARITY` allowance) -- i.e. validate that `block_header.slot <= current_slot` (a client MAY queue future sidecars for processing at the appropriate slot). - _[IGNORE]_ The sidecar is from a slot greater than the latest finalized slot -- i.e. validate that `block_header.slot > compute_start_slot_at_epoch(store.finalized_checkpoint.epoch)` - _[REJECT]_ The proposer signature of `blob_sidecar.signed_block_header`, is valid with respect to the `block_header.proposer_index` pubkey. -- _[IGNORE]_ The sidecar's block's parent (defined by `block_header.parent_root`) has been seen (via both gossip and non-gossip sources) (a client MAY queue sidecars for processing once the parent block is retrieved). +- _[IGNORE]_ The sidecar's block's parent (defined by `block_header.parent_root`) has been seen (via gossip or non-gossip sources) (a client MAY queue sidecars for processing once the parent block is retrieved). - _[REJECT]_ The sidecar's block's parent (defined by `block_header.parent_root`) passes validation. - _[REJECT]_ The sidecar is from a higher slot than the sidecar's block's parent (defined by `block_header.parent_root`). - _[REJECT]_ The current finalized_checkpoint is an ancestor of the sidecar's block -- i.e. `get_checkpoint_block(store, block_header.parent_root, store.finalized_checkpoint.epoch) == store.finalized_checkpoint.root`. diff --git a/specs/electra/p2p-interface.md b/specs/electra/p2p-interface.md index 0016976e93..5064676f18 100644 --- a/specs/electra/p2p-interface.md +++ b/specs/electra/p2p-interface.md @@ -14,12 +14,13 @@ - [Global topics](#global-topics) - [`beacon_block`](#beacon_block) - [`beacon_aggregate_and_proof`](#beacon_aggregate_and_proof) + - [`blob_sidecar_{subnet_id}`](#blob_sidecar_subnet_id) - [Attestation subnets](#attestation-subnets) - [`beacon_attestation_{subnet_id}`](#beacon_attestation_subnet_id) - [The Req/Resp domain](#the-reqresp-domain) - [Messages](#messages) - - [BlobSidecarsByRoot v2](#blobsidecarsbyroot-v2) - - [BlobSidecarsByRange v2](#blobsidecarsbyrange-v2) + - [BlobSidecarsByRoot v1](#blobsidecarsbyroot-v1) + - [BlobSidecarsByRange v1](#blobsidecarsbyrange-v1) @@ -66,7 +67,7 @@ The derivation of the `message-id` remains stable. *Updated validation* - _[REJECT]_ The length of KZG commitments is less than or equal to the limitation defined in Consensus Layer -- - i.e. validate that `len(body.signed_beacon_block.message.blob_kzg_commitments) <= MAX_BLOBS_PER_BLOCK_ELECTRA` + i.e. validate that `len(signed_beacon_block.message.body.blob_kzg_commitments) <= MAX_BLOBS_PER_BLOCK_ELECTRA` ###### `beacon_aggregate_and_proof` @@ -77,6 +78,14 @@ The following validations are added: * [REJECT] `len(committee_indices) == 1`, where `committee_indices = get_committee_indices(aggregate)`. * [REJECT] `aggregate.data.index == 0` +###### `blob_sidecar_{subnet_id}` + +*[Modified in Electra:EIP7691]* + +The existing validations all apply as given from previous forks, with the following exceptions: + +* Uses of `MAX_BLOBS_PER_BLOCK` in existing validations are replaced with `MAX_BLOBS_PER_BLOCK_ELECTRA`. + ##### Attestation subnets ###### `beacon_attestation_{subnet_id}` @@ -101,21 +110,12 @@ The following validations are removed: #### Messages -##### BlobSidecarsByRoot v2 +##### BlobSidecarsByRoot v1 -**Protocol ID:** `/eth2/beacon_chain/req/blob_sidecars_by_root/2/` +**Protocol ID:** `/eth2/beacon_chain/req/blob_sidecars_by_root/1/` *[Modified in Electra:EIP7691]* -The `` field is calculated as `context = compute_fork_digest(fork_version, genesis_validators_root)`: - -[1]: # (eth2spec: skip) - -| `fork_version` | Chunk SSZ type | -|------------------------|-----------------------| -| `DENEB_FORK_VERSION` | `deneb.BlobSidecar` | -| `ELECTRA_FORK_VERSION` | `electra.BlobSidecar` | - Request Content: ``` @@ -136,21 +136,12 @@ Response Content: No more than `MAX_REQUEST_BLOB_SIDECARS_ELECTRA` may be requested at a time. -##### BlobSidecarsByRange v2 +##### BlobSidecarsByRange v1 -**Protocol ID:** `/eth2/beacon_chain/req/blob_sidecars_by_range/2/` +**Protocol ID:** `/eth2/beacon_chain/req/blob_sidecars_by_range/1/` *[Modified in Electra:EIP7691]* -The `` field is calculated as `context = compute_fork_digest(fork_version, genesis_validators_root)`: - -[1]: # (eth2spec: skip) - -| `fork_version` | Chunk SSZ type | -|------------------------|-----------------------| -| `DENEB_FORK_VERSION` | `deneb.BlobSidecar` | -| `ELECTRA_FORK_VERSION` | `electra.BlobSidecar` | - Request Content: ``` diff --git a/specs/electra/validator.md b/specs/electra/validator.md index 2e980d5345..3620c30790 100644 --- a/specs/electra/validator.md +++ b/specs/electra/validator.md @@ -24,6 +24,8 @@ - [Deposits](#deposits) - [Execution payload](#execution-payload) - [Execution Requests](#execution-requests) + - [Constructing the `BlobSidecar`s](#constructing-the-blobsidecars) + - [Sidecar](#sidecar) - [Attesting](#attesting) - [Construct attestation](#construct-attestation) - [Attestation aggregation](#attestation-aggregation) @@ -240,6 +242,17 @@ def get_execution_requests(execution_requests_list: Sequence[bytes]) -> Executio ) ``` +### Constructing the `BlobSidecar`s + +#### Sidecar + +*[Modified in Electra:EIP7691]* + +```python +def compute_subnet_for_blob_sidecar(blob_index: BlobIndex) -> SubnetID: + return SubnetID(blob_index % BLOB_SIDECAR_SUBNET_COUNT_ELECTRA) +``` + ## Attesting ### Construct attestation diff --git a/specs/fulu/das-core.md b/specs/fulu/das-core.md index 25576bc1f4..846f6b206e 100644 --- a/specs/fulu/das-core.md +++ b/specs/fulu/das-core.md @@ -105,19 +105,20 @@ class MatrixEntry(Container): def get_custody_groups(node_id: NodeID, custody_group_count: uint64) -> Sequence[CustodyIndex]: assert custody_group_count <= NUMBER_OF_CUSTODY_GROUPS - custody_groups: List[uint64] = [] current_id = uint256(node_id) + custody_groups: List[CustodyIndex] = [] while len(custody_groups) < custody_group_count: custody_group = CustodyIndex( - bytes_to_uint64(hash(uint_to_bytes(uint256(current_id)))[0:8]) + bytes_to_uint64(hash(uint_to_bytes(current_id))[0:8]) % NUMBER_OF_CUSTODY_GROUPS ) if custody_group not in custody_groups: custody_groups.append(custody_group) if current_id == UINT256_MAX: # Overflow prevention - current_id = NodeID(0) - current_id += 1 + current_id = uint256(0) + else: + current_id += 1 assert len(custody_groups) == len(set(custody_groups)) return sorted(custody_groups) @@ -237,7 +238,7 @@ The particular columns/groups that a node custodies are selected pseudo-randomly ## Custody sampling -At each slot, a node advertising `custody_group_count` downloads a minimum of `sampling_size = max(SAMPLES_PER_SLOT, custody_group_count)` total custody groups. The corresponding set of columns is selected by `groups = get_custody_groups(node_id, sampling_size)` and `compute_columns_for_custody_group(group) for group in groups`, so that in particular the subset of columns to custody is consistent with the output of `get_custody_groups(node_id, custody_group_count)`. Sampling is considered successful if the node manages to retrieve all selected columns. +At each slot, a node advertising `custody_group_count` downloads a minimum of `sampling_size = max(SAMPLES_PER_SLOT, custody_group_count * columns_per_group)` total columns, where `columns_per_group = NUMBER_OF_COLUMNS // NUMBER_OF_CUSTODY_GROUPS`. The corresponding set of columns is selected by `groups = get_custody_groups(node_id, sampling_size)` and `compute_columns_for_custody_group(group) for group in groups`, so that in particular the subset of columns to custody is consistent with the output of `get_custody_groups(node_id, custody_group_count)`. Sampling is considered successful if the node manages to retrieve all selected columns. ## Extended data diff --git a/specs/fulu/p2p-interface.md b/specs/fulu/p2p-interface.md index abebbffecc..73d96192ff 100644 --- a/specs/fulu/p2p-interface.md +++ b/specs/fulu/p2p-interface.md @@ -29,8 +29,6 @@ - [`data_column_sidecar_{subnet_id}`](#data_column_sidecar_subnet_id) - [The Req/Resp domain](#the-reqresp-domain) - [Messages](#messages) - - [BlobSidecarsByRoot v3](#blobsidecarsbyroot-v3) - - [BlobSidecarsByRange v3](#blobsidecarsbyrange-v3) - [DataColumnSidecarsByRoot v1](#datacolumnsidecarsbyroot-v1) - [DataColumnSidecarsByRange v1](#datacolumnsidecarsbyrange-v1) - [GetMetaData v3](#getmetadata-v3) @@ -64,7 +62,6 @@ The specification of these changes continues in the same format as the network s | `DATA_COLUMN_SIDECAR_SUBNET_COUNT` | `128` | The number of data column sidecar subnets used in the gossipsub protocol | | `MAX_REQUEST_DATA_COLUMN_SIDECARS` | `MAX_REQUEST_BLOCKS_DENEB * NUMBER_OF_COLUMNS` | Maximum number of data column sidecars in a single request | | `MIN_EPOCHS_FOR_DATA_COLUMN_SIDECARS_REQUESTS` | `2**12` (= 4096 epochs, ~18 days) | The minimum epoch range over which a node must serve data column sidecars | -| `MAX_REQUEST_BLOB_SIDECARS_FULU` | `MAX_REQUEST_BLOCKS_DENEB * MAX_BLOBS_PER_BLOCK_FULU` | Maximum number of blob sidecars in a single request | ### Containers @@ -152,14 +149,14 @@ The `MetaData` stored locally by clients is updated with an additional field to seq_number: uint64 attnets: Bitvector[ATTESTATION_SUBNET_COUNT] syncnets: Bitvector[SYNC_COMMITTEE_SUBNET_COUNT] - custody_subnet_count: uint64 # csc + custody_group_count: uint64 # cgc ) ``` Where - `seq_number`, `attnets`, and `syncnets` have the same meaning defined in the Altair document. -- `custody_subnet_count` represents the node's custody subnet count. Clients MAY reject peers with a value less than `CUSTODY_REQUIREMENT`. +- `custody_group_count` represents the node's custody group count. Clients MAY reject peers with a value less than `CUSTODY_REQUIREMENT`. ### The gossip domain: gossipsub @@ -174,7 +171,7 @@ Some gossip meshes are upgraded in the Fulu fork to support upgraded types. *Updated validation* - _[REJECT]_ The length of KZG commitments is less than or equal to the limitation defined in Consensus Layer -- - i.e. validate that `len(body.signed_beacon_block.message.blob_kzg_commitments) <= MAX_BLOBS_PER_BLOCK_FULU` + i.e. validate that `len(signed_beacon_block.message.body.blob_kzg_commitments) <= MAX_BLOBS_PER_BLOCK_FULU` ##### Blob subnets @@ -195,7 +192,7 @@ The following validations MUST pass before forwarding the `sidecar: DataColumnSi - _[IGNORE]_ The sidecar is not from a future slot (with a `MAXIMUM_GOSSIP_CLOCK_DISPARITY` allowance) -- i.e. validate that `block_header.slot <= current_slot` (a client MAY queue future sidecars for processing at the appropriate slot). - _[IGNORE]_ The sidecar is from a slot greater than the latest finalized slot -- i.e. validate that `block_header.slot > compute_start_slot_at_epoch(state.finalized_checkpoint.epoch)` - _[REJECT]_ The proposer signature of `sidecar.signed_block_header`, is valid with respect to the `block_header.proposer_index` pubkey. -- _[IGNORE]_ The sidecar's block's parent (defined by `block_header.parent_root`) has been seen (via both gossip and non-gossip sources) (a client MAY queue sidecars for processing once the parent block is retrieved). +- _[IGNORE]_ The sidecar's block's parent (defined by `block_header.parent_root`) has been seen (via gossip or non-gossip sources) (a client MAY queue sidecars for processing once the parent block is retrieved). - _[REJECT]_ The sidecar's block's parent (defined by `block_header.parent_root`) passes validation. - _[REJECT]_ The sidecar is from a higher slot than the sidecar's block's parent (defined by `block_header.parent_root`). - _[REJECT]_ The current finalized_checkpoint is an ancestor of the sidecar's block -- i.e. `get_checkpoint_block(store, block_header.parent_root, store.finalized_checkpoint.epoch) == store.finalized_checkpoint.root`. @@ -211,75 +208,6 @@ The following validations MUST pass before forwarding the `sidecar: DataColumnSi #### Messages -##### BlobSidecarsByRoot v3 - -**Protocol ID:** `/eth2/beacon_chain/req/blob_sidecars_by_root/3/` - -*[Modified in Fulu:EIP7594]* - -The `` field is calculated as `context = compute_fork_digest(fork_version, genesis_validators_root)`: - -[1]: # (eth2spec: skip) - -| `fork_version` | Chunk SSZ type | -|---------------------|--------------------| -| `FULU_FORK_VERSION` | `fulu.BlobSidecar` | - -Request Content: - -``` -( - List[BlobIdentifier, MAX_REQUEST_BLOB_SIDECARS_FULU] -) -``` - -Response Content: - -``` -( - List[BlobSidecar, MAX_REQUEST_BLOB_SIDECARS_FULU] -) -``` - -*Updated validation* - -No more than `MAX_REQUEST_BLOB_SIDECARS_FULU` may be requested at a time. - -##### BlobSidecarsByRange v3 - -**Protocol ID:** `/eth2/beacon_chain/req/blob_sidecars_by_range/3/` - -*[Modified in Fulu:EIP7594]* - -The `` field is calculated as `context = compute_fork_digest(fork_version, genesis_validators_root)`: - -[1]: # (eth2spec: skip) - -| `fork_version` | Chunk SSZ type | -|---------------------|--------------------| -| `FULU_FORK_VERSION` | `fulu.BlobSidecar` | - -Request Content: - -``` -( - start_slot: Slot - count: uint64 -) -``` - -Response Content: - -``` -( - List[BlobSidecar, MAX_REQUEST_BLOB_SIDECARS_FULU] -) -``` - -*Updated validation* - -Clients MUST respond with at least the blob sidecars of the first blob-carrying block that exists in the range, if they have it, and no more than `MAX_REQUEST_BLOB_SIDECARS_FULU` sidecars. - ##### DataColumnSidecarsByRoot v1 **Protocol ID:** `/eth2/beacon_chain/req/data_column_sidecars_by_root/1/` diff --git a/specs/phase0/p2p-interface.md b/specs/phase0/p2p-interface.md index 396e4671b8..240b3ad2cf 100644 --- a/specs/phase0/p2p-interface.md +++ b/specs/phase0/p2p-interface.md @@ -16,6 +16,9 @@ - [Constants](#constants) - [Configuration](#configuration) - [MetaData](#metadata) + - [Maximum message sizes](#maximum-message-sizes) + - [`max_compressed_len`](#max_compressed_len) + - [`max_message_size`](#max_message_size) - [The gossip domain: gossipsub](#the-gossip-domain-gossipsub) - [Topics and messages](#topics-and-messages) - [Global topics](#global-topics) @@ -28,6 +31,7 @@ - [`beacon_attestation_{subnet_id}`](#beacon_attestation_subnet_id) - [Attestations and Aggregation](#attestations-and-aggregation) - [Encodings](#encodings) + - [Gossipsub size limits](#gossipsub-size-limits) - [The Req/Resp domain](#the-reqresp-domain) - [Protocol identification](#protocol-identification) - [Req/Resp interaction](#reqresp-interaction) @@ -102,6 +106,8 @@ - [Why are we using Snappy for compression?](#why-are-we-using-snappy-for-compression) - [Can I get access to unencrypted bytes on the wire for debugging purposes?](#can-i-get-access-to-unencrypted-bytes-on-the-wire-for-debugging-purposes) - [What are SSZ type size bounds?](#what-are-ssz-type-size-bounds) + - [Why is the message size defined in terms of application payload?](#why-is-the-message-size-defined-in-terms-of-application-payload) + - [Why is there a limit on message sizes at all?](#why-is-there-a-limit-on-message-sizes-at-all) - [libp2p implementations matrix](#libp2p-implementations-matrix) @@ -127,8 +133,8 @@ This section outlines the specification for the networking stack in Ethereum con Even though libp2p is a multi-transport stack (designed to listen on multiple simultaneous transports and endpoints transparently), we hereby define a profile for basic interoperability. -All implementations MUST support the TCP libp2p transport, and it MUST be enabled for both dialing and listening (i.e. outbound and inbound connections). -The libp2p TCP transport supports listening on IPv4 and IPv6 addresses (and on multiple simultaneously). +All implementations MUST support the TCP libp2p transport, MAY support the QUIC (UDP) libp2p transport, and MUST be enabled for both dialing and listening (i.e. outbound and inbound connections). +The libp2p TCP and QUIC (UDP) transports support listening on IPv4 and IPv6 addresses (and on multiple simultaneously). Clients must support listening on at least one of IPv4 or IPv6. Clients that do _not_ have support for listening on IPv4 SHOULD be cognizant of the potential disadvantages in terms of @@ -193,11 +199,10 @@ This section outlines configurations that are used in this spec. | Name | Value | Description | |---|---|---| -| `GOSSIP_MAX_SIZE` | `10 * 2**20` (= 10485760, 10 MiB) | The maximum allowed size of uncompressed gossip messages. | +| `MAX_PAYLOAD_SIZE` | `10 * 2**20` (= 10485760, 10 MiB) | The maximum allowed size of uncompressed payload in gossipsub messages and RPC chunks | | `MAX_REQUEST_BLOCKS` | `2**10` (= 1024) | Maximum number of blocks in a single request | | `EPOCHS_PER_SUBNET_SUBSCRIPTION` | `2**8` (= 256) | Number of epochs on a subnet subscription (~27 hours) | | `MIN_EPOCHS_FOR_BLOCK_REQUESTS` | `MIN_VALIDATOR_WITHDRAWABILITY_DELAY + CHURN_LIMIT_QUOTIENT // 2` (= 33024, ~5 months) | The minimum epoch range over which a node must serve blocks | -| `MAX_CHUNK_SIZE` | `10 * 2**20` (=10485760, 10 MiB) | The maximum allowed size of uncompressed req/resp chunked responses. | | `ATTESTATION_PROPAGATION_SLOT_RANGE` | `32` | The maximum number of slots during which an attestation can be propagated. | | `MAXIMUM_GOSSIP_CLOCK_DISPARITY` | `500` | The maximum **milliseconds** of clock disparity assumed between honest nodes. | | `MESSAGE_DOMAIN_INVALID_SNAPPY` | `DomainType('0x00000000')` | 4-byte domain for gossip message-id isolation of *invalid* snappy messages | @@ -229,6 +234,27 @@ Where is entirely independent of the ENR sequence number, and will in most cases be out of sync with the ENR sequence number. +### Maximum message sizes + +Maximum message sizes are derived from the maximum payload size that the network can carry according to the following functions: + +#### `max_compressed_len` + +```python +def max_compressed_len(n: uint64) -> uint64: + # Worst-case compressed length for a given payload of size n when using snappy: + # https://github.com/google/snappy/blob/32ded457c0b1fe78ceb8397632c416568d6714a0/snappy.cc#L218C1-L218C47 + return uint64(32 + n + n / 6) +``` + +#### `max_message_size` + +```python +def max_message_size() -> uint64: + # Allow 1024 bytes for framing and encoding overhead but at least 1MiB in case MAX_PAYLOAD_SIZE is small. + return max(max_compressed_len(MAX_PAYLOAD_SIZE) + 1024, 1024 * 1024) +``` + ### The gossip domain: gossipsub Clients MUST support the [gossipsub v1](https://github.com/libp2p/specs/blob/master/pubsub/gossipsub/gossipsub-v1.0.md) libp2p Protocol @@ -268,13 +294,11 @@ This defines both the type of data being sent on the topic and how the data fiel - `Encoding` - the encoding strategy describes a specific representation of bytes that will be transmitted over the wire. See the [Encodings](#Encodings) section for further details. +Clients MUST reject messages with an unknown topic. + *Note*: `ForkDigestValue` is composed of values that are not known until the genesis block/state are available. Due to this, clients SHOULD NOT subscribe to gossipsub topics until these genesis values are known. -Each gossipsub [message](https://github.com/libp2p/go-libp2p-pubsub/blob/master/pb/rpc.proto#L17-L24) has a maximum size of `GOSSIP_MAX_SIZE`. -Clients MUST reject (fail validation) messages that are over this size limit. -Likewise, clients MUST NOT emit or propagate messages larger than this limit. - The optional `from` (1), `seqno` (3), `signature` (5) and `key` (6) protobuf fields are omitted from the message, since messages are identified by content, anonymous, and signed where necessary in the application layer. Starting from Gossipsub v1.1, clients MUST enforce this by applying the `StrictNoSign` @@ -288,6 +312,8 @@ The `message-id` of a gossipsub message MUST be the following 20 byte value comp the concatenation of `MESSAGE_DOMAIN_INVALID_SNAPPY` with the raw message data, i.e. `SHA256(MESSAGE_DOMAIN_INVALID_SNAPPY + message.data)[:20]`. +Where relevant, clients MUST reject messages with `message-id` sizes other than 20 bytes. + *Note*: The above logic handles two exceptional cases: (1) multiple snappy `data` can decompress to the same value, and (2) some message `data` can fail to snappy decompress altogether. @@ -338,7 +364,7 @@ The following validations MUST pass before forwarding the `signed_beacon_block` - _[IGNORE]_ The block is the first block with valid signature received for the proposer for the slot, `signed_beacon_block.message.slot`. - _[REJECT]_ The proposer signature, `signed_beacon_block.signature`, is valid with respect to the `proposer_index` pubkey. - _[IGNORE]_ The block's parent (defined by `block.parent_root`) has been seen - (via both gossip and non-gossip sources) + (via gossip or non-gossip sources) (a client MAY queue blocks for processing once the parent block is retrieved). - _[REJECT]_ The block's parent (defined by `block.parent_root`) passes validation. - _[REJECT]_ The block is from a higher slot than its parent. @@ -387,7 +413,7 @@ The following validations MUST pass before forwarding the `signed_aggregate_and_ - _[REJECT]_ The aggregator signature, `signed_aggregate_and_proof.signature`, is valid. - _[REJECT]_ The signature of `aggregate` is valid. - _[IGNORE]_ The block being voted for (`aggregate.data.beacon_block_root`) has been seen - (via both gossip and non-gossip sources) + (via gossip or non-gossip sources) (a client MAY queue aggregates for processing once block is retrieved). - _[REJECT]_ The block being voted for (`aggregate.data.beacon_block_root`) passes validation. - _[REJECT]_ The aggregate attestation's target block is an ancestor of the block named in the LMD vote -- i.e. @@ -462,7 +488,7 @@ The following validations MUST pass before forwarding the `attestation` on the s that has an identical `attestation.data.target.epoch` and participating validator index. - _[REJECT]_ The signature of `attestation` is valid. - _[IGNORE]_ The block being voted for (`attestation.data.beacon_block_root`) has been seen - (via both gossip and non-gossip sources) + (via gossip or non-gossip sources) (a client MAY queue attestations for processing once block is retrieved). - _[REJECT]_ The block being voted for (`attestation.data.beacon_block_root`) passes validation. - _[REJECT]_ The attestation's target block is an ancestor of the block named in the LMD vote -- i.e. @@ -502,6 +528,16 @@ so [basic snappy block compression](https://github.com/google/snappy/blob/master Implementations MUST use a single encoding for gossip. Changing an encoding will require coordination between participating implementations. +#### Gossipsub size limits + +Size limits are placed both on the [`RPCMsg`](https://github.com/libp2p/specs/blob/b5f7fce29b32d4c7d0efe37b019936a11e5db872/pubsub/README.md#the-rpc) frame as well as the encoded payload in each [`Message`](https://github.com/libp2p/specs/blob/b5f7fce29b32d4c7d0efe37b019936a11e5db872/pubsub/README.md#the-message). + +Clients MUST reject and MUST NOT emit or propagate messages whose size exceed the following limits: + +* The size of the encoded `RPCMsg` (including control messages, framing, topics, etc) must not exceed `max_message_size()`. +* The size of the compressed payload in the `Message.data` field must not exceed `max_compressed_len(MAX_PAYLOAD_SIZE)`. +* The size of the uncompressed payload must not exceed `MAX_PAYLOAD_SIZE` or the [type-specific SSZ bound](#what-are-ssz-type-size-bounds), whichever is lower. + ### The Req/Resp domain #### Protocol identification @@ -551,7 +587,7 @@ All other response types (non-Lists) send a single `response_chunk`. For both `request`s and `response`s, the `encoding-dependent-header` MUST be valid, and the `encoded-payload` must be valid within the constraints of the `encoding-dependent-header`. This includes type-specific bounds on payload size for some encoding strategies. -Regardless of these type specific bounds, a global maximum uncompressed byte size of `MAX_CHUNK_SIZE` MUST be applied to all method response chunks. +Regardless of these type specific bounds, a global maximum uncompressed byte size of `MAX_PAYLOAD_SIZE` MUST be applied to all method response chunks. Clients MUST ensure that lengths are within these bounds; if not, they SHOULD reset the stream immediately. Clients tracking peer reputation MAY decrement the score of the misbehaving peer under this circumstance. @@ -665,15 +701,13 @@ When snappy is applied, it can be passed through a buffered Snappy reader to dec Before reading the payload, the header MUST be validated: - The unsigned protobuf varint used for the length-prefix MUST not be longer than 10 bytes, which is sufficient for any `uint64`. -- The length-prefix is within the expected [size bounds derived from the payload SSZ type](#what-are-ssz-type-size-bounds). +- The length-prefix is within the expected [size bounds derived from the payload SSZ type](#what-are-ssz-type-size-bounds) or `MAX_PAYLOAD_SIZE`, whichever is smaller. After reading a valid header, the payload MAY be read, while maintaining the size constraints from the header. -A reader SHOULD NOT read more than `max_encoded_len(n)` bytes after reading the SSZ length-prefix `n` from the header. -- For `ssz_snappy` this is: `32 + n + n // 6`. - This is considered the [worst-case compression result](https://github.com/google/snappy/blob/537f4ad6240e586970fe554614542e9717df7902/snappy.cc#L98) by Snappy. +A reader MUST NOT read more than `max_compressed_len(n)` bytes after reading the SSZ length-prefix `n` from the header. -A reader SHOULD consider the following cases as invalid input: +A reader MUST consider the following cases as invalid input: - Any remaining bytes, after having read the `n` SSZ bytes. An EOF is expected if more bytes are read than required. - An early EOF, before fully reading the declared length-prefix worth of SSZ bytes. @@ -963,9 +997,9 @@ The Ethereum Node Record (ENR) for an Ethereum consensus client MUST contain the The ENR MAY contain the following entries: - An IPv4 address (`ip` field) and/or IPv6 address (`ip6` field). -- A TCP port (`tcp` field) representing the local libp2p TCP listening port. -- A QUIC port (`quic` field) representing the local libp2p QUIC (UDP) listening port. -- A UDP port (`udp` field) representing the local discv5 listening port. +- An IPv4 TCP port (`tcp` field) representing the local libp2p TCP listening port and/or the corresponding IPv6 port (`tcp6` field). +- An IPv4 QUIC port (`quic` field) representing the local libp2p QUIC (UDP) listening port and/or the corresponding IPv6 port (`quic6` field). +- An IPv4 UDP port (`udp` field) representing the local discv5 listening port and/or the corresponding IPv6 port (`udp6` field). Specifications of these parameters can be found in the [ENR Specification](http://eips.ethereum.org/EIPS/eip-778). @@ -1430,7 +1464,7 @@ Nevertheless, in the case of `ssz_snappy`, messages are still length-prefixed wi * Alignment with protocols like gRPC over HTTP/2 that prefix with length * Sanity checking of message length, and enabling much stricter message length limiting based on SSZ type information, to provide even more DOS protection than the global message length already does. - E.g. a small `Status` message does not nearly require `MAX_CHUNK_SIZE` bytes. + E.g. a small `Status` message does not nearly require `MAX_PAYLOAD_SIZE` bytes. [Protobuf varint](https://developers.google.com/protocol-buffers/docs/encoding#varints) is an efficient technique to encode variable-length (unsigned here) ints. Instead of reserving a fixed-size field of as many bytes as necessary to convey the maximum possible value, this field is elastic in exchange for 1-bit overhead per byte. @@ -1679,6 +1713,22 @@ Other types are static, they have a fixed size: no dynamic-length content is inv For reference, the type bounds can be computed ahead of time, [as per this example](https://gist.github.com/protolambda/db75c7faa1e94f2464787a480e5d613e). It is advisable to derive these lengths from the SSZ type definitions in use, to ensure that version changes do not cause out-of-sync type bounds. +#### Why is the message size defined in terms of application payload? + +When transmitting messages over gossipsub and/or the req/resp domain, we want to ensure that the same payload sizes are supported regardless of the underlying transport, decoupling the consensus layer from libp2p-induced overhead and the particular transmission strategy. + +To derive "encoded size limits" from desired application sizes, we take into account snappy compression and framing overhead. + +In the case of gossipsub, the protocol supports sending multiple application payloads as well as mixing application data with control messages in each gossipsub frame. The limit is set such that at least one max-sized application-level message together with a small amount (1 KiB) of gossipsub overhead is allowed. Implementations are free to pack multiple smaller application messages into a single gossipsub frame, and/or combine it with control messages as they see fit. + +The limit is set on the uncompressed payload size in particular to protect against decompression bombs. + +#### Why is there a limit on message sizes at all? + +The message size limit protects against several forms of DoS and network-based amplification attacks and provides upper bounds for resource (network, memory) usage in the client based on protocol requirements to decode, buffer, cache, store and re-transmit messages which in turn translate into performance and protection tradeoffs, ensuring capacity to handle worst cases during recovery from network instability. + +In particular, blocks—-currently the only message type without a practical SSZ-derived upper bound on size—-cannot be fully verified synchronously as part of gossipsub validity checks. This means that there exist cases where invalid messages signed by a validator may be amplified by the network. + ## libp2p implementations matrix This section will soon contain a matrix showing the maturity/state of the libp2p features required diff --git a/tests/README.md b/tests/README.md index 798627577d..dc2e02439d 100644 --- a/tests/README.md +++ b/tests/README.md @@ -54,15 +54,14 @@ To learn how consensus spec tests are written, let's go over the code: This [decorator](https://book.pythontips.com/en/latest/decorators.html) specifies that this test is applicable to all the phases of consensus layer development. These phases are similar to forks (Istanbul, -Berlin, London, etc.) in the execution blockchain. If you are interested, [you can see the definition of -this decorator here](https://github.com/ethereum/consensus-specs/blob/dev/tests/core/pyspec/eth2spec/test/context.py#L331-L335). +Berlin, London, etc.) in the execution blockchain. ```python @spec_state_test ``` -[This decorator](https://github.com/qbzzt/consensus-specs/blob/dev/tests/core/pyspec/eth2spec/test/context.py#L232-L234) specifies -that this test is a state transition test, and that it does not include a transition between different forks. +This decorator specifies that this test is a state transition test, and that it does not include a transition +between different forks. ```python def test_empty_block_transition(spec, state): @@ -162,8 +161,7 @@ find . -name '*.py' -exec grep 'def state_transition_and_sign_block' {} \; -prin ``` And you'll find that the function is defined in -[`eth2spec/test/helpers/state.py`](https://github.com/ethereum/consensus-specs/blob/dev/tests/core/pyspec/eth2spec/test/helpers/state.py). Looking -in that file, we see that the second function is: +`eth2spec/test/helpers/state.py`. Looking in that file, we see that the second function is: ```python def next_slot(spec, state): @@ -199,8 +197,7 @@ verify this). It is important to make sure that the system rejects invalid input, so our next step is to deal with cases where the protocol is supposed to reject something. To see such a test, look at `test_prev_slot_block_transition` (in the same -file we used previously, -[`~/consensus-specs/tests/core/pyspec/eth2spec/test/phase0/sanity/test_blocks.py`](https://github.com/ethereum/consensus-specs/blob/dev/tests/core/pyspec/eth2spec/test/phase0/sanity/test_blocks.py)). +file we used previously, `~/consensus-specs/tests/core/pyspec/eth2spec/test/phase0/sanity/test_blocks.py`). ```python @with_all_phases @@ -230,8 +227,7 @@ Transition to the new slot, which naturally has a different proposer. ``` Specify that the function `transition_unsigned_block` will cause an assertion error. -You can see this function in -[`~/consensus-specs/tests/core/pyspec/eth2spec/test/helpers/block.py`](https://github.com/ethereum/consensus-specs/blob/dev/tests/core/pyspec/eth2spec/test/helpers/block.py), +You can see this function in `~/consensus-specs/tests/core/pyspec/eth2spec/test/helpers/block.py`, and one of the tests is that the block must be for this slot: > ```python > assert state.slot == block.slot diff --git a/tests/core/pyspec/eth2spec/VERSION.txt b/tests/core/pyspec/eth2spec/VERSION.txt index e7fd637b5a..ba25d3754e 100644 --- a/tests/core/pyspec/eth2spec/VERSION.txt +++ b/tests/core/pyspec/eth2spec/VERSION.txt @@ -1 +1 @@ -1.5.0-alpha.10 +1.5.0-beta.0 diff --git a/tests/core/pyspec/eth2spec/gen_helpers/README.md b/tests/core/pyspec/eth2spec/gen_helpers/README.md index 8fda6b585e..595b411f70 100644 --- a/tests/core/pyspec/eth2spec/gen_helpers/README.md +++ b/tests/core/pyspec/eth2spec/gen_helpers/README.md @@ -26,7 +26,7 @@ Options: ## `gen_from_tests` -This is an util to derive tests from a tests source file. +This is a util to derive tests from a tests source file. This requires the tests to yield test-case-part outputs. These outputs are then written to the test case directory. Yielding data is illegal in normal pytests, so it is only done when in "generator mode". diff --git a/tests/core/pyspec/eth2spec/test/altair/light_client/test_data_collection.py b/tests/core/pyspec/eth2spec/test/altair/light_client/test_data_collection.py new file mode 100644 index 0000000000..af73b26345 --- /dev/null +++ b/tests/core/pyspec/eth2spec/test/altair/light_client/test_data_collection.py @@ -0,0 +1,193 @@ +from eth2spec.test.context import ( + spec_state_test_with_matching_config, + with_presets, + with_light_client, +) +from eth2spec.test.helpers.constants import ( + MINIMAL, +) +from eth2spec.test.helpers.light_client_data_collection import ( + add_new_block, + finish_lc_data_collection_test, + get_lc_bootstrap_block_id, + get_lc_update_attested_block_id, + get_light_client_bootstrap, + get_light_client_finality_update, + get_light_client_optimistic_update, + get_light_client_update_for_period, + select_new_head, + setup_lc_data_collection_test, + BlockID, +) + + +@with_light_client +@spec_state_test_with_matching_config +@with_presets([MINIMAL], reason="too slow") +def test_light_client_data_collection(spec, state): + # Start test + test = yield from setup_lc_data_collection_test(spec, state) + + # Genesis block is post Altair and is finalized, so can be used as bootstrap + genesis_bid = BlockID(slot=state.slot, root=spec.BeaconBlock(state_root=state.hash_tree_root()).hash_tree_root()) + assert get_lc_bootstrap_block_id(get_light_client_bootstrap(test, genesis_bid.root).data) == genesis_bid + + # No blocks have been imported, so no other light client data is available + period = spec.compute_sync_committee_period_at_slot(state.slot) + assert get_light_client_update_for_period(test, period).spec is None + assert get_light_client_finality_update(test).spec is None + assert get_light_client_optimistic_update(test).spec is None + + # Start branch A with a block that has an empty sync aggregate + spec_a, state_a, bid_1 = yield from add_new_block(test, spec, state, slot=1) + yield from select_new_head(test, spec_a, bid_1) + period = spec_a.compute_sync_committee_period_at_slot(state_a.slot) + assert get_light_client_update_for_period(test, period).spec is None + assert get_light_client_finality_update(test).spec is None + assert get_light_client_optimistic_update(test).spec is None + + # Start branch B with a block that has 1 participant + spec_b, state_b, bid_2 = yield from add_new_block(test, spec, state, slot=2, num_sync_participants=1) + yield from select_new_head(test, spec_b, bid_2) + period = spec_b.compute_sync_committee_period_at_slot(state_b.slot) + assert get_lc_update_attested_block_id(get_light_client_update_for_period(test, period).data) == genesis_bid + assert get_lc_update_attested_block_id(get_light_client_finality_update(test).data) == genesis_bid + assert get_lc_update_attested_block_id(get_light_client_optimistic_update(test).data) == genesis_bid + + # Build on branch A, once more with an empty sync aggregate + spec_a, state_a, bid_3 = yield from add_new_block(test, spec_a, state_a, slot=3) + yield from select_new_head(test, spec_a, bid_3) + period = spec_a.compute_sync_committee_period_at_slot(state_a.slot) + assert get_light_client_update_for_period(test, period).spec is None + assert get_light_client_finality_update(test).spec is None + assert get_light_client_optimistic_update(test).spec is None + + # Build on branch B, this time with an empty sync aggregate + spec_b, state_b, bid_4 = yield from add_new_block(test, spec_b, state_b, slot=4) + yield from select_new_head(test, spec_b, bid_4) + period = spec_b.compute_sync_committee_period_at_slot(state_b.slot) + assert get_lc_update_attested_block_id(get_light_client_update_for_period(test, period).data) == genesis_bid + assert get_lc_update_attested_block_id(get_light_client_finality_update(test).data) == genesis_bid + assert get_lc_update_attested_block_id(get_light_client_optimistic_update(test).data) == genesis_bid + + # Build on branch B, once more with 1 participant + spec_b, state_b, bid_5 = yield from add_new_block(test, spec_b, state_b, slot=5, num_sync_participants=1) + yield from select_new_head(test, spec_b, bid_5) + period = spec_b.compute_sync_committee_period_at_slot(state_b.slot) + assert get_lc_update_attested_block_id(get_light_client_update_for_period(test, period).data) == genesis_bid + assert get_lc_update_attested_block_id(get_light_client_finality_update(test).data) == bid_4 + assert get_lc_update_attested_block_id(get_light_client_optimistic_update(test).data) == bid_4 + + # Build on branch B, this time with 3 participants + spec_b, state_b, bid_6 = yield from add_new_block(test, spec_b, state_b, slot=6, num_sync_participants=3) + yield from select_new_head(test, spec_b, bid_6) + period = spec_b.compute_sync_committee_period_at_slot(state_b.slot) + assert get_lc_update_attested_block_id(get_light_client_update_for_period(test, period).data) == bid_5 + assert get_lc_update_attested_block_id(get_light_client_finality_update(test).data) == bid_5 + assert get_lc_update_attested_block_id(get_light_client_optimistic_update(test).data) == bid_5 + + # Build on branch A, with 2 participants + spec_a, state_a, bid_7 = yield from add_new_block(test, spec_a, state_a, slot=7, num_sync_participants=2) + yield from select_new_head(test, spec_a, bid_7) + period = spec_a.compute_sync_committee_period_at_slot(state_a.slot) + assert get_lc_update_attested_block_id(get_light_client_update_for_period(test, period).data) == bid_3 + assert get_lc_update_attested_block_id(get_light_client_finality_update(test).data) == bid_3 + assert get_lc_update_attested_block_id(get_light_client_optimistic_update(test).data) == bid_3 + + # Branch A: epoch 1, slot 5 + slot = spec_a.compute_start_slot_at_epoch(1) + 5 + spec_a, state_a, bid_1_5 = yield from add_new_block(test, spec_a, state_a, slot=slot, num_sync_participants=4) + yield from select_new_head(test, spec_a, bid_1_5) + assert get_light_client_bootstrap(test, bid_7.root).spec is None + assert get_light_client_bootstrap(test, bid_1_5.root).spec is None + period = spec_a.compute_sync_committee_period_at_slot(state_a.slot) + assert get_lc_update_attested_block_id(get_light_client_update_for_period(test, period).data) == bid_7 + assert get_lc_update_attested_block_id(get_light_client_finality_update(test).data) == bid_7 + assert get_lc_update_attested_block_id(get_light_client_optimistic_update(test).data) == bid_7 + + # Branch B: epoch 2, slot 4 + slot = spec_b.compute_start_slot_at_epoch(2) + 4 + spec_b, state_b, bid_2_4 = yield from add_new_block(test, spec_b, state_b, slot=slot, num_sync_participants=5) + yield from select_new_head(test, spec_b, bid_2_4) + assert get_light_client_bootstrap(test, bid_7.root).spec is None + assert get_light_client_bootstrap(test, bid_1_5.root).spec is None + assert get_light_client_bootstrap(test, bid_2_4.root).spec is None + period = spec_b.compute_sync_committee_period_at_slot(state_b.slot) + assert get_lc_update_attested_block_id(get_light_client_update_for_period(test, period).data) == bid_6 + assert get_lc_update_attested_block_id(get_light_client_finality_update(test).data) == bid_6 + assert get_lc_update_attested_block_id(get_light_client_optimistic_update(test).data) == bid_6 + + # Branch A: epoch 3, slot 0 + slot = spec_a.compute_start_slot_at_epoch(3) + 0 + spec_a, state_a, bid_3_0 = yield from add_new_block(test, spec_a, state_a, slot=slot, num_sync_participants=6) + yield from select_new_head(test, spec_a, bid_3_0) + assert get_light_client_bootstrap(test, bid_7.root).spec is None + assert get_light_client_bootstrap(test, bid_1_5.root).spec is None + assert get_light_client_bootstrap(test, bid_2_4.root).spec is None + assert get_light_client_bootstrap(test, bid_3_0.root).spec is None + period = spec_a.compute_sync_committee_period_at_slot(state_a.slot) + assert get_lc_update_attested_block_id(get_light_client_update_for_period(test, period).data) == bid_1_5 + assert get_lc_update_attested_block_id(get_light_client_finality_update(test).data) == bid_1_5 + assert get_lc_update_attested_block_id(get_light_client_optimistic_update(test).data) == bid_1_5 + + # Branch A: fill epoch + for i in range(1, spec_a.SLOTS_PER_EPOCH): + spec_a, state_a, bid_a = yield from add_new_block(test, spec_a, state_a) + yield from select_new_head(test, spec_a, bid_a) + assert get_light_client_bootstrap(test, bid_7.root).spec is None + assert get_light_client_bootstrap(test, bid_1_5.root).spec is None + assert get_light_client_bootstrap(test, bid_2_4.root).spec is None + assert get_light_client_bootstrap(test, bid_3_0.root).spec is None + period = spec_a.compute_sync_committee_period_at_slot(state_a.slot) + assert get_lc_update_attested_block_id(get_light_client_update_for_period(test, period).data) == bid_1_5 + assert get_lc_update_attested_block_id(get_light_client_finality_update(test).data) == bid_1_5 + assert get_lc_update_attested_block_id(get_light_client_optimistic_update(test).data) == bid_1_5 + assert state_a.slot == spec_a.compute_start_slot_at_epoch(4) - 1 + bid_3_n = bid_a + + # Branch A: epoch 4, slot 0 + slot = spec_a.compute_start_slot_at_epoch(4) + 0 + spec_a, state_a, bid_4_0 = yield from add_new_block(test, spec_a, state_a, slot=slot, num_sync_participants=6) + yield from select_new_head(test, spec_a, bid_4_0) + assert get_light_client_bootstrap(test, bid_7.root).spec is None + assert get_light_client_bootstrap(test, bid_1_5.root).spec is None + assert get_light_client_bootstrap(test, bid_2_4.root).spec is None + assert get_light_client_bootstrap(test, bid_3_0.root).spec is None + assert get_light_client_bootstrap(test, bid_4_0.root).spec is None + period = spec_a.compute_sync_committee_period_at_slot(state_a.slot) + assert get_lc_update_attested_block_id(get_light_client_update_for_period(test, period).data) == bid_1_5 + assert get_lc_update_attested_block_id(get_light_client_finality_update(test).data) == bid_3_n + assert get_lc_update_attested_block_id(get_light_client_optimistic_update(test).data) == bid_3_n + + # Branch A: fill epoch + for i in range(1, spec_a.SLOTS_PER_EPOCH): + spec_a, state_a, bid_a = yield from add_new_block(test, spec_a, state_a) + yield from select_new_head(test, spec_a, bid_a) + assert get_light_client_bootstrap(test, bid_7.root).spec is None + assert get_light_client_bootstrap(test, bid_1_5.root).spec is None + assert get_light_client_bootstrap(test, bid_2_4.root).spec is None + assert get_light_client_bootstrap(test, bid_3_0.root).spec is None + assert get_light_client_bootstrap(test, bid_4_0.root).spec is None + period = spec_a.compute_sync_committee_period_at_slot(state_a.slot) + assert get_lc_update_attested_block_id(get_light_client_update_for_period(test, period).data) == bid_1_5 + assert get_lc_update_attested_block_id(get_light_client_finality_update(test).data) == bid_3_n + assert get_lc_update_attested_block_id(get_light_client_optimistic_update(test).data) == bid_3_n + assert state_a.slot == spec_a.compute_start_slot_at_epoch(5) - 1 + bid_4_n = bid_a + + # Branch A: epoch 6, slot 2 + slot = spec_a.compute_start_slot_at_epoch(6) + 2 + spec_a, state_a, bid_6_2 = yield from add_new_block(test, spec_a, state_a, slot=slot, num_sync_participants=6) + yield from select_new_head(test, spec_a, bid_6_2) + assert get_lc_bootstrap_block_id(get_light_client_bootstrap(test, bid_7.root).data) == bid_7 + assert get_lc_bootstrap_block_id(get_light_client_bootstrap(test, bid_1_5.root).data) == bid_1_5 + assert get_light_client_bootstrap(test, bid_2_4.root).spec is None + assert get_lc_bootstrap_block_id(get_light_client_bootstrap(test, bid_3_0.root).data) == bid_3_0 + assert get_light_client_bootstrap(test, bid_4_0.root).spec is None + period = spec_a.compute_sync_committee_period_at_slot(state_a.slot) + assert get_lc_update_attested_block_id(get_light_client_update_for_period(test, period).data) == bid_1_5 + assert get_lc_update_attested_block_id(get_light_client_finality_update(test).data) == bid_4_n + assert get_lc_update_attested_block_id(get_light_client_optimistic_update(test).data) == bid_4_n + + # Finish test + yield from finish_lc_data_collection_test(test) diff --git a/tests/core/pyspec/eth2spec/test/altair/light_client/test_sync.py b/tests/core/pyspec/eth2spec/test/altair/light_client/test_sync.py index 45c7d77887..15437f0959 100644 --- a/tests/core/pyspec/eth2spec/test/altair/light_client/test_sync.py +++ b/tests/core/pyspec/eth2spec/test/altair/light_client/test_sync.py @@ -1,38 +1,29 @@ -from typing import (Any, Dict, List) - -from eth_utils import encode_hex from eth2spec.test.context import ( spec_state_test_with_matching_config, spec_test, - with_config_overrides, + with_all_phases_from_to, + with_light_client, with_matching_spec_config, - with_phases, with_presets, with_state, - with_light_client, ) from eth2spec.test.helpers.attestations import ( next_slots_with_attestations, state_transition_with_full_block, ) from eth2spec.test.helpers.constants import ( - ALTAIR, BELLATRIX, CAPELLA, DENEB, ELECTRA, + ALTAIR, CAPELLA, DENEB, ELECTRA, MINIMAL, ) -from eth2spec.test.helpers.fork_transition import ( - do_fork, - transition_across_forks, -) -from eth2spec.test.helpers.forks import ( - get_spec_for_fork_version, - is_post_capella, is_post_deneb, is_post_electra, -) from eth2spec.test.helpers.light_client import ( - compute_start_slot_at_next_sync_committee_period, get_sync_aggregate, - upgrade_lc_bootstrap_to_new_spec, - upgrade_lc_update_to_new_spec, - upgrade_lc_store_to_new_spec, + compute_start_slot_at_next_sync_committee_period, +) +from eth2spec.test.helpers.light_client_sync import ( + emit_force_update, + emit_update, + finish_lc_sync_test, + setup_lc_sync_test, ) from eth2spec.test.helpers.state import ( next_slots, @@ -40,162 +31,12 @@ ) -class LightClientSyncTest(object): - steps: List[Dict[str, Any]] - genesis_validators_root: Any - s_spec: Any - store: Any - - -def get_store_fork_version(s_spec): - if is_post_electra(s_spec): - return s_spec.config.ELECTRA_FORK_VERSION - if is_post_deneb(s_spec): - return s_spec.config.DENEB_FORK_VERSION - if is_post_capella(s_spec): - return s_spec.config.CAPELLA_FORK_VERSION - return s_spec.config.ALTAIR_FORK_VERSION - - -def setup_test(spec, state, s_spec=None, phases=None): - test = LightClientSyncTest() - test.steps = [] - - if s_spec is None: - s_spec = spec - if phases is None: - phases = { - spec.fork: spec, - s_spec.fork: s_spec, - } - test.s_spec = s_spec - - yield "genesis_validators_root", "meta", "0x" + state.genesis_validators_root.hex() - test.genesis_validators_root = state.genesis_validators_root - - next_slots(spec, state, spec.SLOTS_PER_EPOCH * 2 - 1) - trusted_block = state_transition_with_full_block(spec, state, True, True) - trusted_block_root = trusted_block.message.hash_tree_root() - yield "trusted_block_root", "meta", "0x" + trusted_block_root.hex() - - data_fork_version = spec.compute_fork_version(spec.compute_epoch_at_slot(trusted_block.message.slot)) - data_fork_digest = spec.compute_fork_digest(data_fork_version, test.genesis_validators_root) - d_spec = get_spec_for_fork_version(spec, data_fork_version, phases) - data = d_spec.create_light_client_bootstrap(state, trusted_block) - yield "bootstrap_fork_digest", "meta", encode_hex(data_fork_digest) - yield "bootstrap", data - - upgraded = upgrade_lc_bootstrap_to_new_spec(d_spec, test.s_spec, data, phases) - test.store = test.s_spec.initialize_light_client_store(trusted_block_root, upgraded) - store_fork_version = get_store_fork_version(test.s_spec) - store_fork_digest = test.s_spec.compute_fork_digest(store_fork_version, test.genesis_validators_root) - yield "store_fork_digest", "meta", encode_hex(store_fork_digest) - - return test - - -def finish_test(test): - yield "steps", test.steps - - -def get_update_file_name(d_spec, update): - if d_spec.is_sync_committee_update(update): - suffix1 = "s" - else: - suffix1 = "x" - if d_spec.is_finality_update(update): - suffix2 = "f" - else: - suffix2 = "x" - return f"update_{encode_hex(update.attested_header.beacon.hash_tree_root())}_{suffix1}{suffix2}" - - -def get_checks(s_spec, store): - if is_post_capella(s_spec): - return { - "finalized_header": { - 'slot': int(store.finalized_header.beacon.slot), - 'beacon_root': encode_hex(store.finalized_header.beacon.hash_tree_root()), - 'execution_root': encode_hex(s_spec.get_lc_execution_root(store.finalized_header)), - }, - "optimistic_header": { - 'slot': int(store.optimistic_header.beacon.slot), - 'beacon_root': encode_hex(store.optimistic_header.beacon.hash_tree_root()), - 'execution_root': encode_hex(s_spec.get_lc_execution_root(store.optimistic_header)), - }, - } - - return { - "finalized_header": { - 'slot': int(store.finalized_header.beacon.slot), - 'beacon_root': encode_hex(store.finalized_header.beacon.hash_tree_root()), - }, - "optimistic_header": { - 'slot': int(store.optimistic_header.beacon.slot), - 'beacon_root': encode_hex(store.optimistic_header.beacon.hash_tree_root()), - }, - } - - -def emit_force_update(test, spec, state): - current_slot = state.slot - test.s_spec.process_light_client_store_force_update(test.store, current_slot) - - yield from [] # Consistently enable `yield from` syntax in calling tests - test.steps.append({ - "force_update": { - "current_slot": int(current_slot), - "checks": get_checks(test.s_spec, test.store), - } - }) - - -def emit_update(test, spec, state, block, attested_state, attested_block, finalized_block, with_next=True, phases=None): - data_fork_version = spec.compute_fork_version(spec.compute_epoch_at_slot(attested_block.message.slot)) - data_fork_digest = spec.compute_fork_digest(data_fork_version, test.genesis_validators_root) - d_spec = get_spec_for_fork_version(spec, data_fork_version, phases) - data = d_spec.create_light_client_update(state, block, attested_state, attested_block, finalized_block) - if not with_next: - data.next_sync_committee = spec.SyncCommittee() - data.next_sync_committee_branch = spec.NextSyncCommitteeBranch() - current_slot = state.slot - - upgraded = upgrade_lc_update_to_new_spec(d_spec, test.s_spec, data, phases) - test.s_spec.process_light_client_update(test.store, upgraded, current_slot, test.genesis_validators_root) - - yield get_update_file_name(d_spec, data), data - test.steps.append({ - "process_update": { - "update_fork_digest": encode_hex(data_fork_digest), - "update": get_update_file_name(d_spec, data), - "current_slot": int(current_slot), - "checks": get_checks(test.s_spec, test.store), - } - }) - return upgraded - - -def emit_upgrade_store(test, new_s_spec, phases=None): - test.store = upgrade_lc_store_to_new_spec(test.s_spec, new_s_spec, test.store, phases) - test.s_spec = new_s_spec - store_fork_version = get_store_fork_version(test.s_spec) - store_fork_digest = test.s_spec.compute_fork_digest(store_fork_version, test.genesis_validators_root) - - yield from [] # Consistently enable `yield from` syntax in calling tests - test.steps.append({ - "upgrade_store": { - "store_fork_digest": encode_hex(store_fork_digest), - "checks": get_checks(test.s_spec, test.store), - } - }) - - @with_light_client @spec_state_test_with_matching_config @with_presets([MINIMAL], reason="too slow") def test_light_client_sync(spec, state): # Start test - test = yield from setup_test(spec, state) + test = yield from setup_lc_sync_test(spec, state) # Initial `LightClientUpdate`, populating `store.next_sync_committee` # ``` @@ -409,7 +250,7 @@ def test_light_client_sync(spec, state): assert test.store.optimistic_header.beacon.slot == attested_state.slot # Finish test - yield from finish_test(test) + yield from finish_lc_sync_test(test) @with_light_client @@ -428,7 +269,7 @@ def test_supply_sync_committee_from_past_update(spec, state): past_state = state.copy() # Start test - test = yield from setup_test(spec, state) + test = yield from setup_lc_sync_test(spec, state) assert not spec.is_next_sync_committee_known(test.store) # Apply `LightClientUpdate` from the past, populating `store.next_sync_committee` @@ -439,7 +280,7 @@ def test_supply_sync_committee_from_past_update(spec, state): assert test.store.optimistic_header.beacon.slot == state.slot # Finish test - yield from finish_test(test) + yield from finish_lc_sync_test(test) @with_light_client @@ -447,7 +288,7 @@ def test_supply_sync_committee_from_past_update(spec, state): @with_presets([MINIMAL], reason="too slow") def test_advance_finality_without_sync_committee(spec, state): # Start test - test = yield from setup_test(spec, state) + test = yield from setup_lc_sync_test(spec, state) # Initial `LightClientUpdate`, populating `store.next_sync_committee` next_slots(spec, state, spec.SLOTS_PER_EPOCH - 1) @@ -515,233 +356,12 @@ def test_advance_finality_without_sync_committee(spec, state): assert test.store.optimistic_header.beacon.slot == attested_state.slot # Finish test - yield from finish_test(test) - - -def run_test_single_fork(spec, phases, state, fork): - # Start test - test = yield from setup_test(spec, state, phases=phases) - - # Initial `LightClientUpdate` - finalized_block = spec.SignedBeaconBlock() - finalized_block.message.state_root = state.hash_tree_root() - finalized_state = state.copy() - attested_block = state_transition_with_full_block(spec, state, True, True) - attested_state = state.copy() - sync_aggregate, _ = get_sync_aggregate(spec, state, phases=phases) - block = state_transition_with_full_block(spec, state, True, True, sync_aggregate=sync_aggregate) - yield from emit_update(test, spec, state, block, attested_state, attested_block, finalized_block, phases=phases) - assert test.store.finalized_header.beacon.slot == finalized_state.slot - assert test.store.next_sync_committee == finalized_state.next_sync_committee - assert test.store.best_valid_update is None - assert test.store.optimistic_header.beacon.slot == attested_state.slot - - # Jump to two slots before fork - fork_epoch = getattr(phases[fork].config, fork.upper() + '_FORK_EPOCH') - transition_to(spec, state, spec.compute_start_slot_at_epoch(fork_epoch) - 4) - attested_block = state_transition_with_full_block(spec, state, True, True) - attested_state = state.copy() - sync_aggregate, _ = get_sync_aggregate(spec, state, phases=phases) - block = state_transition_with_full_block(spec, state, True, True, sync_aggregate=sync_aggregate) - update = yield from emit_update( - test, spec, state, block, attested_state, attested_block, finalized_block, phases=phases) - assert test.store.finalized_header.beacon.slot == finalized_state.slot - assert test.store.next_sync_committee == finalized_state.next_sync_committee - assert test.store.best_valid_update == update - assert test.store.optimistic_header.beacon.slot == attested_state.slot - - # Perform `LightClientStore` upgrade - yield from emit_upgrade_store(test, phases[fork], phases=phases) - update = test.store.best_valid_update - - # Final slot before fork, check that importing the pre-fork format still works - attested_block = block.copy() - attested_state = state.copy() - sync_aggregate, _ = get_sync_aggregate(spec, state, phases=phases) - block = state_transition_with_full_block(spec, state, True, True, sync_aggregate=sync_aggregate) - yield from emit_update(test, spec, state, block, attested_state, attested_block, finalized_block, phases=phases) - assert test.store.finalized_header.beacon.slot == finalized_state.slot - assert test.store.next_sync_committee == finalized_state.next_sync_committee - assert test.store.best_valid_update == update - assert test.store.optimistic_header.beacon.slot == attested_state.slot - - # Upgrade to post-fork spec, attested block is still before the fork - attested_block = block.copy() - attested_state = state.copy() - sync_aggregate, _ = get_sync_aggregate(spec, state, phases=phases) - state, block = do_fork(state, spec, phases[fork], fork_epoch, sync_aggregate=sync_aggregate) - spec = phases[fork] - yield from emit_update(test, spec, state, block, attested_state, attested_block, finalized_block, phases=phases) - assert test.store.finalized_header.beacon.slot == finalized_state.slot - assert test.store.next_sync_committee == finalized_state.next_sync_committee - assert test.store.best_valid_update == update - assert test.store.optimistic_header.beacon.slot == attested_state.slot - - # Another block after the fork, this time attested block is after the fork - attested_block = block.copy() - attested_state = state.copy() - sync_aggregate, _ = get_sync_aggregate(spec, state, phases=phases) - block = state_transition_with_full_block(spec, state, True, True, sync_aggregate=sync_aggregate) - yield from emit_update(test, spec, state, block, attested_state, attested_block, finalized_block, phases=phases) - assert test.store.finalized_header.beacon.slot == finalized_state.slot - assert test.store.next_sync_committee == finalized_state.next_sync_committee - assert test.store.best_valid_update == update - assert test.store.optimistic_header.beacon.slot == attested_state.slot - - # Jump to next epoch - transition_to(spec, state, spec.compute_start_slot_at_epoch(fork_epoch + 1) - 2) - attested_block = state_transition_with_full_block(spec, state, True, True) - attested_state = state.copy() - sync_aggregate, _ = get_sync_aggregate(spec, state, phases=phases) - block = state_transition_with_full_block(spec, state, True, True, sync_aggregate=sync_aggregate) - yield from emit_update(test, spec, state, block, attested_state, attested_block, finalized_block, phases=phases) - assert test.store.finalized_header.beacon.slot == finalized_state.slot - assert test.store.next_sync_committee == finalized_state.next_sync_committee - assert test.store.best_valid_update == update - assert test.store.optimistic_header.beacon.slot == attested_state.slot - - # Finalize the fork - finalized_block = block.copy() - finalized_state = state.copy() - _, _, state = next_slots_with_attestations(spec, state, 2 * spec.SLOTS_PER_EPOCH - 1, True, True) - attested_block = state_transition_with_full_block(spec, state, True, True) - attested_state = state.copy() - sync_aggregate, _ = get_sync_aggregate(spec, state, phases=phases) - block = state_transition_with_full_block(spec, state, True, True, sync_aggregate=sync_aggregate) - yield from emit_update(test, spec, state, block, attested_state, attested_block, finalized_block, phases=phases) - assert test.store.finalized_header.beacon.slot == finalized_state.slot - assert test.store.next_sync_committee == finalized_state.next_sync_committee - assert test.store.best_valid_update is None - assert test.store.optimistic_header.beacon.slot == attested_state.slot - - # Finish test - yield from finish_test(test) - - -@with_phases(phases=[BELLATRIX], other_phases=[CAPELLA]) -@spec_test -@with_config_overrides({ - 'CAPELLA_FORK_EPOCH': 3, # `setup_test` advances to epoch 2 -}, emit=False) -@with_state -@with_matching_spec_config(emitted_fork=CAPELLA) -@with_presets([MINIMAL], reason="too slow") -def test_capella_fork(spec, phases, state): - yield from run_test_single_fork(spec, phases, state, CAPELLA) - - -@with_phases(phases=[CAPELLA], other_phases=[DENEB]) -@spec_test -@with_config_overrides({ - 'DENEB_FORK_EPOCH': 3, # `setup_test` advances to epoch 2 -}, emit=False) -@with_state -@with_matching_spec_config(emitted_fork=DENEB) -@with_presets([MINIMAL], reason="too slow") -def test_deneb_fork(spec, phases, state): - yield from run_test_single_fork(spec, phases, state, DENEB) - - -@with_phases(phases=[DENEB], other_phases=[ELECTRA]) -@spec_test -@with_config_overrides({ - 'ELECTRA_FORK_EPOCH': 3, # `setup_test` advances to epoch 2 -}, emit=False) -@with_state -@with_matching_spec_config(emitted_fork=ELECTRA) -@with_presets([MINIMAL], reason="too slow") -def test_electra_fork(spec, phases, state): - yield from run_test_single_fork(spec, phases, state, ELECTRA) - - -def run_test_multi_fork(spec, phases, state, fork_1, fork_2): - # Start test - test = yield from setup_test(spec, state, phases[fork_2], phases) - - # Set up so that finalized is from `spec`, ... - finalized_block = spec.SignedBeaconBlock() - finalized_block.message.state_root = state.hash_tree_root() - finalized_state = state.copy() - - # ..., attested is from `fork_1`, ... - fork_1_epoch = getattr(phases[fork_1].config, fork_1.upper() + '_FORK_EPOCH') - spec, state, attested_block = transition_across_forks( - spec, - state, - spec.compute_start_slot_at_epoch(fork_1_epoch), - phases, - with_block=True, - ) - attested_state = state.copy() - - # ..., and signature is from `fork_2` - fork_2_epoch = getattr(phases[fork_2].config, fork_2.upper() + '_FORK_EPOCH') - spec, state, _ = transition_across_forks( - spec, state, spec.compute_start_slot_at_epoch(fork_2_epoch) - 1, phases) - sync_aggregate, _ = get_sync_aggregate(spec, state, phases=phases) - spec, state, block = transition_across_forks( - spec, - state, - spec.compute_start_slot_at_epoch(fork_2_epoch), - phases, - with_block=True, - sync_aggregate=sync_aggregate, - ) - - # Check that update applies - yield from emit_update(test, spec, state, block, attested_state, attested_block, finalized_block, phases=phases) - assert test.store.finalized_header.beacon.slot == finalized_state.slot - assert test.store.next_sync_committee == finalized_state.next_sync_committee - assert test.store.best_valid_update is None - assert test.store.optimistic_header.beacon.slot == attested_state.slot - - # Finish test - yield from finish_test(test) - - -@with_phases(phases=[BELLATRIX], other_phases=[CAPELLA, DENEB]) -@spec_test -@with_config_overrides({ - 'CAPELLA_FORK_EPOCH': 3, # `setup_test` advances to epoch 2 - 'DENEB_FORK_EPOCH': 4, -}, emit=False) -@with_state -@with_matching_spec_config(emitted_fork=DENEB) -@with_presets([MINIMAL], reason="too slow") -def test_capella_deneb_fork(spec, phases, state): - yield from run_test_multi_fork(spec, phases, state, CAPELLA, DENEB) - - -@with_phases(phases=[BELLATRIX], other_phases=[CAPELLA, DENEB, ELECTRA]) -@spec_test -@with_config_overrides({ - 'CAPELLA_FORK_EPOCH': 3, # `setup_test` advances to epoch 2 - 'DENEB_FORK_EPOCH': 4, - 'ELECTRA_FORK_EPOCH': 5, -}, emit=False) -@with_state -@with_matching_spec_config(emitted_fork=ELECTRA) -@with_presets([MINIMAL], reason="too slow") -def test_capella_electra_fork(spec, phases, state): - yield from run_test_multi_fork(spec, phases, state, CAPELLA, ELECTRA) - - -@with_phases(phases=[CAPELLA], other_phases=[DENEB, ELECTRA]) -@spec_test -@with_config_overrides({ - 'DENEB_FORK_EPOCH': 3, # `setup_test` advances to epoch 2 - 'ELECTRA_FORK_EPOCH': 4, -}, emit=False) -@with_state -@with_matching_spec_config(emitted_fork=ELECTRA) -@with_presets([MINIMAL], reason="too slow") -def test_deneb_electra_fork(spec, phases, state): - yield from run_test_multi_fork(spec, phases, state, DENEB, ELECTRA) + yield from finish_lc_sync_test(test) -def run_test_upgraded_store_with_legacy_data(spec, phases, state, fork): +def run_lc_sync_test_upgraded_store_with_legacy_data(spec, phases, state, fork): # Start test (Legacy bootstrap with an upgraded store) - test = yield from setup_test(spec, state, phases[fork], phases) + test = yield from setup_lc_sync_test(spec, state, phases[fork], phases) # Initial `LightClientUpdate` (check that the upgraded store can process it) finalized_block = spec.SignedBeaconBlock() @@ -758,31 +378,31 @@ def run_test_upgraded_store_with_legacy_data(spec, phases, state, fork): assert test.store.optimistic_header.beacon.slot == attested_state.slot # Finish test - yield from finish_test(test) + yield from finish_lc_sync_test(test) -@with_phases(phases=[ALTAIR, BELLATRIX], other_phases=[CAPELLA]) +@with_all_phases_from_to(ALTAIR, CAPELLA, other_phases=[CAPELLA]) @spec_test @with_state @with_matching_spec_config(emitted_fork=CAPELLA) @with_presets([MINIMAL], reason="too slow") def test_capella_store_with_legacy_data(spec, phases, state): - yield from run_test_upgraded_store_with_legacy_data(spec, phases, state, CAPELLA) + yield from run_lc_sync_test_upgraded_store_with_legacy_data(spec, phases, state, CAPELLA) -@with_phases(phases=[ALTAIR, BELLATRIX, CAPELLA], other_phases=[CAPELLA, DENEB]) +@with_all_phases_from_to(ALTAIR, DENEB, other_phases=[CAPELLA, DENEB]) @spec_test @with_state @with_matching_spec_config(emitted_fork=DENEB) @with_presets([MINIMAL], reason="too slow") def test_deneb_store_with_legacy_data(spec, phases, state): - yield from run_test_upgraded_store_with_legacy_data(spec, phases, state, DENEB) + yield from run_lc_sync_test_upgraded_store_with_legacy_data(spec, phases, state, DENEB) -@with_phases(phases=[ALTAIR, BELLATRIX, CAPELLA, DENEB], other_phases=[CAPELLA, DENEB, ELECTRA]) +@with_all_phases_from_to(ALTAIR, ELECTRA, other_phases=[CAPELLA, DENEB, ELECTRA]) @spec_test @with_state @with_matching_spec_config(emitted_fork=ELECTRA) @with_presets([MINIMAL], reason="too slow") def test_electra_store_with_legacy_data(spec, phases, state): - yield from run_test_upgraded_store_with_legacy_data(spec, phases, state, ELECTRA) + yield from run_lc_sync_test_upgraded_store_with_legacy_data(spec, phases, state, ELECTRA) diff --git a/tests/core/pyspec/eth2spec/test/bellatrix/light_client/__init__.py b/tests/core/pyspec/eth2spec/test/bellatrix/light_client/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/tests/core/pyspec/eth2spec/test/bellatrix/light_client/test_data_collection.py b/tests/core/pyspec/eth2spec/test/bellatrix/light_client/test_data_collection.py new file mode 100644 index 0000000000..5e894a5d13 --- /dev/null +++ b/tests/core/pyspec/eth2spec/test/bellatrix/light_client/test_data_collection.py @@ -0,0 +1,41 @@ +from eth2spec.test.context import ( + spec_test, + with_config_overrides, + with_matching_spec_config, + with_phases, + with_presets, + with_state, +) +from eth2spec.test.helpers.constants import ( + BELLATRIX, CAPELLA, DENEB, + MINIMAL, +) +from eth2spec.test.helpers.light_client_data_collection import ( + run_lc_data_collection_test_multi_fork, +) + + +@with_phases(phases=[BELLATRIX], other_phases=[CAPELLA, DENEB]) +@spec_test +@with_config_overrides({ + 'CAPELLA_FORK_EPOCH': 1 * 8, # SyncCommitteePeriod 1 + 'DENEB_FORK_EPOCH': 2 * 8, # SyncCommitteePeriod 2 +}, emit=False) +@with_state +@with_matching_spec_config(emitted_fork=DENEB) +@with_presets([MINIMAL], reason="too slow") +def test_capella_deneb_reorg_aligned(spec, phases, state): + yield from run_lc_data_collection_test_multi_fork(spec, phases, state, CAPELLA, DENEB) + + +@with_phases(phases=[BELLATRIX], other_phases=[CAPELLA, DENEB]) +@spec_test +@with_config_overrides({ + 'CAPELLA_FORK_EPOCH': 1 * 8 + 4, # SyncCommitteePeriod 1 (+ 4 epochs) + 'DENEB_FORK_EPOCH': 3 * 8 + 4, # SyncCommitteePeriod 3 (+ 4 epochs) +}, emit=False) +@with_state +@with_matching_spec_config(emitted_fork=DENEB) +@with_presets([MINIMAL], reason="too slow") +def test_capella_deneb_reorg_unaligned(spec, phases, state): + yield from run_lc_data_collection_test_multi_fork(spec, phases, state, CAPELLA, DENEB) diff --git a/tests/core/pyspec/eth2spec/test/bellatrix/light_client/test_sync.py b/tests/core/pyspec/eth2spec/test/bellatrix/light_client/test_sync.py new file mode 100644 index 0000000000..81b44d8749 --- /dev/null +++ b/tests/core/pyspec/eth2spec/test/bellatrix/light_client/test_sync.py @@ -0,0 +1,55 @@ +from eth2spec.test.context import ( + spec_test, + with_config_overrides, + with_matching_spec_config, + with_phases, + with_presets, + with_state, +) +from eth2spec.test.helpers.constants import ( + BELLATRIX, CAPELLA, DENEB, ELECTRA, + MINIMAL, +) +from eth2spec.test.helpers.light_client_sync import ( + run_lc_sync_test_multi_fork, + run_lc_sync_test_single_fork, +) + + +@with_phases(phases=[BELLATRIX], other_phases=[CAPELLA]) +@spec_test +@with_config_overrides({ + 'CAPELLA_FORK_EPOCH': 3, # Test setup advances to epoch 2 +}, emit=False) +@with_state +@with_matching_spec_config(emitted_fork=CAPELLA) +@with_presets([MINIMAL], reason="too slow") +def test_capella_fork(spec, phases, state): + yield from run_lc_sync_test_single_fork(spec, phases, state, CAPELLA) + + +@with_phases(phases=[BELLATRIX], other_phases=[CAPELLA, DENEB]) +@spec_test +@with_config_overrides({ + 'CAPELLA_FORK_EPOCH': 3, # Test setup advances to epoch 2 + 'DENEB_FORK_EPOCH': 4, +}, emit=False) +@with_state +@with_matching_spec_config(emitted_fork=DENEB) +@with_presets([MINIMAL], reason="too slow") +def test_capella_deneb_fork(spec, phases, state): + yield from run_lc_sync_test_multi_fork(spec, phases, state, CAPELLA, DENEB) + + +@with_phases(phases=[BELLATRIX], other_phases=[CAPELLA, DENEB, ELECTRA]) +@spec_test +@with_config_overrides({ + 'CAPELLA_FORK_EPOCH': 3, # Test setup advances to epoch 2 + 'DENEB_FORK_EPOCH': 4, + 'ELECTRA_FORK_EPOCH': 5, +}, emit=False) +@with_state +@with_matching_spec_config(emitted_fork=ELECTRA) +@with_presets([MINIMAL], reason="too slow") +def test_capella_electra_fork(spec, phases, state): + yield from run_lc_sync_test_multi_fork(spec, phases, state, CAPELLA, ELECTRA) diff --git a/tests/core/pyspec/eth2spec/test/capella/light_client/test_data_collection.py b/tests/core/pyspec/eth2spec/test/capella/light_client/test_data_collection.py new file mode 100644 index 0000000000..d85b0dfda1 --- /dev/null +++ b/tests/core/pyspec/eth2spec/test/capella/light_client/test_data_collection.py @@ -0,0 +1,41 @@ +from eth2spec.test.context import ( + spec_test, + with_config_overrides, + with_matching_spec_config, + with_phases, + with_presets, + with_state, +) +from eth2spec.test.helpers.constants import ( + CAPELLA, DENEB, ELECTRA, + MINIMAL, +) +from eth2spec.test.helpers.light_client_data_collection import ( + run_lc_data_collection_test_multi_fork, +) + + +@with_phases(phases=[CAPELLA], other_phases=[DENEB, ELECTRA]) +@spec_test +@with_config_overrides({ + 'DENEB_FORK_EPOCH': 1 * 8, # SyncCommitteePeriod 1 + 'ELECTRA_FORK_EPOCH': 2 * 8, # SyncCommitteePeriod 2 +}, emit=False) +@with_state +@with_matching_spec_config(emitted_fork=ELECTRA) +@with_presets([MINIMAL], reason="too slow") +def test_deneb_electra_reorg_aligned(spec, phases, state): + yield from run_lc_data_collection_test_multi_fork(spec, phases, state, DENEB, ELECTRA) + + +@with_phases(phases=[CAPELLA], other_phases=[DENEB, ELECTRA]) +@spec_test +@with_config_overrides({ + 'DENEB_FORK_EPOCH': 1 * 8 + 4, # SyncCommitteePeriod 1 (+ 4 epochs) + 'ELECTRA_FORK_EPOCH': 3 * 8 + 4, # SyncCommitteePeriod 3 (+ 4 epochs) +}, emit=False) +@with_state +@with_matching_spec_config(emitted_fork=ELECTRA) +@with_presets([MINIMAL], reason="too slow") +def test_deneb_electra_reorg_unaligned(spec, phases, state): + yield from run_lc_data_collection_test_multi_fork(spec, phases, state, DENEB, ELECTRA) diff --git a/tests/core/pyspec/eth2spec/test/capella/light_client/test_sync.py b/tests/core/pyspec/eth2spec/test/capella/light_client/test_sync.py new file mode 100644 index 0000000000..faa727d6d2 --- /dev/null +++ b/tests/core/pyspec/eth2spec/test/capella/light_client/test_sync.py @@ -0,0 +1,41 @@ +from eth2spec.test.context import ( + spec_test, + with_config_overrides, + with_matching_spec_config, + with_phases, + with_presets, + with_state, +) +from eth2spec.test.helpers.constants import ( + CAPELLA, DENEB, ELECTRA, + MINIMAL, +) +from eth2spec.test.helpers.light_client_sync import ( + run_lc_sync_test_multi_fork, + run_lc_sync_test_single_fork, +) + + +@with_phases(phases=[CAPELLA], other_phases=[DENEB]) +@spec_test +@with_config_overrides({ + 'DENEB_FORK_EPOCH': 3, # Test setup advances to epoch 2 +}, emit=False) +@with_state +@with_matching_spec_config(emitted_fork=DENEB) +@with_presets([MINIMAL], reason="too slow") +def test_deneb_fork(spec, phases, state): + yield from run_lc_sync_test_single_fork(spec, phases, state, DENEB) + + +@with_phases(phases=[CAPELLA], other_phases=[DENEB, ELECTRA]) +@spec_test +@with_config_overrides({ + 'DENEB_FORK_EPOCH': 3, # Test setup advances to epoch 2 + 'ELECTRA_FORK_EPOCH': 4, +}, emit=False) +@with_state +@with_matching_spec_config(emitted_fork=ELECTRA) +@with_presets([MINIMAL], reason="too slow") +def test_deneb_electra_fork(spec, phases, state): + yield from run_lc_sync_test_multi_fork(spec, phases, state, DENEB, ELECTRA) diff --git a/tests/core/pyspec/eth2spec/test/context.py b/tests/core/pyspec/eth2spec/test/context.py index 8c960cfc75..33209f17e2 100644 --- a/tests/core/pyspec/eth2spec/test/context.py +++ b/tests/core/pyspec/eth2spec/test/context.py @@ -436,6 +436,22 @@ def with_all_phases_from_except(earliest_phase, except_phases=None): return with_all_phases_from(earliest_phase, [phase for phase in ALL_PHASES if phase not in except_phases]) +def with_all_phases_from_to(from_phase, to_phase, other_phases=None, all_phases=ALL_PHASES): + """ + A decorator factory for running a tests with every phase + from a given start phase up to and excluding a given end phase + """ + def decorator(fn): + return with_phases( + [phase for phase in all_phases if ( + phase != to_phase and is_post_fork(to_phase, phase) + and is_post_fork(phase, from_phase) + )], + other_phases=other_phases, + )(fn) + return decorator + + def with_all_phases_except(exclusion_phases): """ A decorator factory for running a tests with every phase except the ones listed diff --git a/tests/core/pyspec/eth2spec/test/deneb/light_client/__init__.py b/tests/core/pyspec/eth2spec/test/deneb/light_client/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/tests/core/pyspec/eth2spec/test/deneb/light_client/test_sync.py b/tests/core/pyspec/eth2spec/test/deneb/light_client/test_sync.py new file mode 100644 index 0000000000..2a2b4db118 --- /dev/null +++ b/tests/core/pyspec/eth2spec/test/deneb/light_client/test_sync.py @@ -0,0 +1,27 @@ +from eth2spec.test.context import ( + spec_test, + with_config_overrides, + with_matching_spec_config, + with_phases, + with_presets, + with_state, +) +from eth2spec.test.helpers.constants import ( + DENEB, ELECTRA, + MINIMAL, +) +from eth2spec.test.helpers.light_client_sync import ( + run_lc_sync_test_single_fork, +) + + +@with_phases(phases=[DENEB], other_phases=[ELECTRA]) +@spec_test +@with_config_overrides({ + 'ELECTRA_FORK_EPOCH': 3, # Test setup advances to epoch 2 +}, emit=False) +@with_state +@with_matching_spec_config(emitted_fork=ELECTRA) +@with_presets([MINIMAL], reason="too slow") +def test_electra_fork(spec, phases, state): + yield from run_lc_sync_test_single_fork(spec, phases, state, ELECTRA) diff --git a/tests/core/pyspec/eth2spec/test/electra/fork/test_electra_fork_basic.py b/tests/core/pyspec/eth2spec/test/electra/fork/test_electra_fork_basic.py index aade4a1605..4416063b39 100644 --- a/tests/core/pyspec/eth2spec/test/electra/fork/test_electra_fork_basic.py +++ b/tests/core/pyspec/eth2spec/test/electra/fork/test_electra_fork_basic.py @@ -151,6 +151,40 @@ def test_fork_has_compounding_withdrawal_credential(spec, phases, state): )] +@with_phases(phases=[DENEB], other_phases=[ELECTRA]) +@spec_test +@with_state +@with_meta_tags(ELECTRA_FORK_TEST_META_TAGS) +def test_fork_inactive_compounding_validator_with_excess_balance(spec, phases, state): + index = 0 + post_spec = phases[ELECTRA] + validator = state.validators[index] + + # set validator balance greater than min_activation_balance + state.balances[index] = post_spec.MIN_ACTIVATION_BALANCE + 1 + # set validator as not active yet + validator.activation_epoch = spec.FAR_FUTURE_EPOCH + # set validator activation eligibility epoch to the latest finalized epoch + validator.activation_eligibility_epoch = state.finalized_checkpoint.epoch + # give the validator compounding withdrawal credentials + validator.withdrawal_credentials = post_spec.COMPOUNDING_WITHDRAWAL_PREFIX + validator.withdrawal_credentials[1:] + + post_state = yield from run_fork_test(post_spec, state) + + # the validator cannot be activated again + assert post_state.validators[index].activation_eligibility_epoch == spec.FAR_FUTURE_EPOCH + # the validator should now have a zero balance + assert post_state.balances[index] == 0 + # there should be a single pending deposit for this validator + assert post_state.pending_deposits == [post_spec.PendingDeposit( + pubkey=validator.pubkey, + withdrawal_credentials=validator.withdrawal_credentials, + amount=state.balances[index], + signature=spec.bls.G2_POINT_AT_INFINITY, + slot=spec.GENESIS_SLOT, + )] + + @with_phases(phases=[DENEB], other_phases=[ELECTRA]) @spec_test @with_state diff --git a/tests/core/pyspec/eth2spec/test/fulu/networking/test_compute_columns_for_custody_group.py b/tests/core/pyspec/eth2spec/test/fulu/networking/test_compute_columns_for_custody_group.py new file mode 100644 index 0000000000..61752e919a --- /dev/null +++ b/tests/core/pyspec/eth2spec/test/fulu/networking/test_compute_columns_for_custody_group.py @@ -0,0 +1,62 @@ +import random + +from eth2spec.test.context import ( + single_phase, + spec_test, + with_fulu_and_later, +) + + +def _run_compute_columns_for_custody_group(spec, rng, custody_group=None): + if custody_group is None: + custody_group = rng.randint(0, spec.config.NUMBER_OF_CUSTODY_GROUPS - 1) + + result = spec.compute_columns_for_custody_group(custody_group) + yield 'custody_group', 'meta', custody_group + + assert len(result) == len(set(result)) + assert len(result) == spec.config.NUMBER_OF_COLUMNS // spec.config.NUMBER_OF_CUSTODY_GROUPS + assert all(i < spec.config.NUMBER_OF_COLUMNS for i in result) + python_list_result = [int(i) for i in result] + + yield 'result', 'meta', python_list_result + + +@with_fulu_and_later +@spec_test +@single_phase +def test_compute_columns_for_custody_group__min_custody_group(spec): + rng = random.Random(1111) + yield from _run_compute_columns_for_custody_group(spec, rng, custody_group=0) + + +@with_fulu_and_later +@spec_test +@single_phase +def test_compute_columns_for_custody_group__max_custody_group(spec): + rng = random.Random(1111) + yield from _run_compute_columns_for_custody_group(spec, rng, custody_group=spec.config.NUMBER_OF_CUSTODY_GROUPS - 1) + + +@with_fulu_and_later +@spec_test +@single_phase +def test_compute_columns_for_custody_group__1(spec): + rng = random.Random(1111) + yield from _run_compute_columns_for_custody_group(spec, rng) + + +@with_fulu_and_later +@spec_test +@single_phase +def test_compute_columns_for_custody_group__2(spec): + rng = random.Random(2222) + yield from _run_compute_columns_for_custody_group(spec, rng) + + +@with_fulu_and_later +@spec_test +@single_phase +def test_compute_columns_for_custody_group__3(spec): + rng = random.Random(3333) + yield from _run_compute_columns_for_custody_group(spec, rng) diff --git a/tests/core/pyspec/eth2spec/test/fulu/networking/test_get_custody_columns.py b/tests/core/pyspec/eth2spec/test/fulu/networking/test_get_custody_columns.py deleted file mode 100644 index d3be42ce16..0000000000 --- a/tests/core/pyspec/eth2spec/test/fulu/networking/test_get_custody_columns.py +++ /dev/null @@ -1,113 +0,0 @@ -import random - -from eth2spec.test.context import ( - single_phase, - spec_test, - with_fulu_and_later, -) - - -def _run_get_custody_columns(spec, rng, node_id=None, custody_group_count=None): - if node_id is None: - node_id = rng.randint(0, 2**256 - 1) - - if custody_group_count is None: - custody_group_count = rng.randint(0, spec.config.NUMBER_OF_CUSTODY_GROUPS) - - columns_per_group = spec.config.NUMBER_OF_COLUMNS // spec.config.NUMBER_OF_CUSTODY_GROUPS - groups = spec.get_custody_groups(node_id, custody_group_count) - yield 'node_id', 'meta', node_id - yield 'custody_group_count', 'meta', int(custody_group_count) - - result = [] - for group in groups: - group_columns = spec.compute_columns_for_custody_group(group) - assert len(group_columns) == columns_per_group - result.extend(group_columns) - - assert len(result) == len(set(result)) - assert len(result) == custody_group_count * columns_per_group - assert all(i < spec.config.NUMBER_OF_COLUMNS for i in result) - python_list_result = [int(i) for i in result] - - yield 'result', 'meta', python_list_result - - -@with_fulu_and_later -@spec_test -@single_phase -def test_get_custody_columns__min_node_id_min_custody_group_count(spec): - rng = random.Random(1111) - yield from _run_get_custody_columns(spec, rng, node_id=0, custody_group_count=0) - - -@with_fulu_and_later -@spec_test -@single_phase -def test_get_custody_columns__min_node_id_max_custody_group_count(spec): - rng = random.Random(1111) - yield from _run_get_custody_columns( - spec, rng, node_id=0, - custody_group_count=spec.config.NUMBER_OF_CUSTODY_GROUPS) - - -@with_fulu_and_later -@spec_test -@single_phase -def test_get_custody_columns__max_node_id_min_custody_group_count(spec): - rng = random.Random(1111) - yield from _run_get_custody_columns(spec, rng, node_id=2**256 - 1, custody_group_count=0) - - -@with_fulu_and_later -@spec_test -@single_phase -def test_get_custody_columns__max_node_id_max_custody_group_count(spec): - rng = random.Random(1111) - yield from _run_get_custody_columns( - spec, rng, node_id=2**256 - 1, - custody_group_count=spec.config.NUMBER_OF_CUSTODY_GROUPS, - ) - - -@with_fulu_and_later -@spec_test -@single_phase -def test_get_custody_columns__max_node_id_max_custody_group_count_minus_1(spec): - rng = random.Random(1111) - yield from _run_get_custody_columns( - spec, rng, node_id=2**256 - 2, - custody_group_count=spec.config.NUMBER_OF_CUSTODY_GROUPS, - ) - - -@with_fulu_and_later -@spec_test -@single_phase -def test_get_custody_columns__short_node_id(spec): - rng = random.Random(1111) - yield from _run_get_custody_columns(spec, rng, node_id=1048576, custody_group_count=1) - - -@with_fulu_and_later -@spec_test -@single_phase -def test_get_custody_columns__1(spec): - rng = random.Random(1111) - yield from _run_get_custody_columns(spec, rng) - - -@with_fulu_and_later -@spec_test -@single_phase -def test_get_custody_columns__2(spec): - rng = random.Random(2222) - yield from _run_get_custody_columns(spec, rng) - - -@with_fulu_and_later -@spec_test -@single_phase -def test_get_custody_columns__3(spec): - rng = random.Random(3333) - yield from _run_get_custody_columns(spec, rng) diff --git a/tests/core/pyspec/eth2spec/test/fulu/networking/test_get_custody_groups.py b/tests/core/pyspec/eth2spec/test/fulu/networking/test_get_custody_groups.py new file mode 100644 index 0000000000..8d33a2b920 --- /dev/null +++ b/tests/core/pyspec/eth2spec/test/fulu/networking/test_get_custody_groups.py @@ -0,0 +1,106 @@ +import random + +from eth2spec.test.context import ( + single_phase, + spec_test, + with_fulu_and_later, +) + + +def _run_get_custody_groups(spec, rng, node_id=None, custody_group_count=None): + if node_id is None: + node_id = rng.randint(0, 2**256 - 1) + + if custody_group_count is None: + custody_group_count = rng.randint(0, spec.config.NUMBER_OF_CUSTODY_GROUPS) + + result = spec.get_custody_groups(node_id, custody_group_count) + yield 'node_id', 'meta', node_id + yield 'custody_group_count', 'meta', int(custody_group_count) + + assert len(result) == len(set(result)) + assert len(result) == custody_group_count + assert all(i < spec.config.NUMBER_OF_CUSTODY_GROUPS for i in result) + python_list_result = [int(i) for i in result] + + yield 'result', 'meta', python_list_result + + +@with_fulu_and_later +@spec_test +@single_phase +def test_get_custody_groups__min_node_id_min_custody_group_count(spec): + rng = random.Random(1111) + yield from _run_get_custody_groups(spec, rng, node_id=0, custody_group_count=0) + + +@with_fulu_and_later +@spec_test +@single_phase +def test_get_custody_groups__min_node_id_max_custody_group_count(spec): + rng = random.Random(1111) + yield from _run_get_custody_groups( + spec, rng, node_id=0, + custody_group_count=spec.config.NUMBER_OF_CUSTODY_GROUPS) + + +@with_fulu_and_later +@spec_test +@single_phase +def test_get_custody_groups__max_node_id_min_custody_group_count(spec): + rng = random.Random(1111) + yield from _run_get_custody_groups(spec, rng, node_id=2**256 - 1, custody_group_count=0) + + +@with_fulu_and_later +@spec_test +@single_phase +def test_get_custody_groups__max_node_id_max_custody_group_count(spec): + rng = random.Random(1111) + yield from _run_get_custody_groups( + spec, rng, node_id=2**256 - 1, + custody_group_count=spec.config.NUMBER_OF_CUSTODY_GROUPS, + ) + + +@with_fulu_and_later +@spec_test +@single_phase +def test_get_custody_groups__max_node_id_max_custody_group_count_minus_1(spec): + rng = random.Random(1111) + yield from _run_get_custody_groups( + spec, rng, node_id=2**256 - 2, + custody_group_count=spec.config.NUMBER_OF_CUSTODY_GROUPS, + ) + + +@with_fulu_and_later +@spec_test +@single_phase +def test_get_custody_groups__short_node_id(spec): + rng = random.Random(1111) + yield from _run_get_custody_groups(spec, rng, node_id=1048576, custody_group_count=1) + + +@with_fulu_and_later +@spec_test +@single_phase +def test_get_custody_groups__1(spec): + rng = random.Random(1111) + yield from _run_get_custody_groups(spec, rng) + + +@with_fulu_and_later +@spec_test +@single_phase +def test_get_custody_groups__2(spec): + rng = random.Random(2222) + yield from _run_get_custody_groups(spec, rng) + + +@with_fulu_and_later +@spec_test +@single_phase +def test_get_custody_groups__3(spec): + rng = random.Random(3333) + yield from _run_get_custody_groups(spec, rng) diff --git a/tests/core/pyspec/eth2spec/test/fulu/unittests/test_config_invariants.py b/tests/core/pyspec/eth2spec/test/fulu/unittests/test_config_invariants.py index fcf98c7e75..a0b8d30ac3 100644 --- a/tests/core/pyspec/eth2spec/test/fulu/unittests/test_config_invariants.py +++ b/tests/core/pyspec/eth2spec/test/fulu/unittests/test_config_invariants.py @@ -32,7 +32,3 @@ def test_polynomical_commitments_sampling(spec): @single_phase def test_networking(spec): assert spec.config.MAX_BLOBS_PER_BLOCK_FULU <= spec.MAX_BLOB_COMMITMENTS_PER_BLOCK - assert ( - spec.config.MAX_REQUEST_BLOB_SIDECARS_FULU == - spec.config.MAX_REQUEST_BLOCKS_DENEB * spec.config.MAX_BLOBS_PER_BLOCK_FULU - ) diff --git a/tests/core/pyspec/eth2spec/test/helpers/light_client.py b/tests/core/pyspec/eth2spec/test/helpers/light_client.py index 4638c988b5..c9334788ae 100644 --- a/tests/core/pyspec/eth2spec/test/helpers/light_client.py +++ b/tests/core/pyspec/eth2spec/test/helpers/light_client.py @@ -32,6 +32,12 @@ def latest_next_sync_committee_gindex(spec): return spec.NEXT_SYNC_COMMITTEE_GINDEX +def latest_normalize_merkle_branch(spec, branch, gindex): + if hasattr(spec, 'normalize_merkle_branch'): + return spec.normalize_merkle_branch(branch, gindex) + return branch + + def compute_start_slot_at_sync_committee_period(spec, sync_committee_period): return spec.compute_start_slot_at_epoch(sync_committee_period * spec.EPOCHS_PER_SYNC_COMMITTEE_PERIOD) diff --git a/tests/core/pyspec/eth2spec/test/helpers/light_client_data_collection.py b/tests/core/pyspec/eth2spec/test/helpers/light_client_data_collection.py new file mode 100644 index 0000000000..5de9b37c61 --- /dev/null +++ b/tests/core/pyspec/eth2spec/test/helpers/light_client_data_collection.py @@ -0,0 +1,909 @@ +from typing import (Any, Dict, List, Set) +from dataclasses import dataclass + +from eth_utils import encode_hex +from eth2spec.test.helpers.constants import ( + ALTAIR, +) +from eth2spec.test.helpers.fork_transition import ( + transition_across_forks, +) +from eth2spec.test.helpers.forks import ( + is_post_altair, +) +from eth2spec.test.helpers.light_client import ( + compute_start_slot_at_sync_committee_period, + get_sync_aggregate, + latest_current_sync_committee_gindex, + latest_finalized_root_gindex, + latest_next_sync_committee_gindex, + latest_normalize_merkle_branch, + upgrade_lc_header_to_new_spec, + upgrade_lc_update_to_new_spec, +) + + +def _next_epoch_boundary_slot(spec, slot): + # Compute the first possible epoch boundary state slot of a `Checkpoint` + # referring to a block at given slot. + epoch = spec.compute_epoch_at_slot(slot + spec.SLOTS_PER_EPOCH - 1) + return spec.compute_start_slot_at_epoch(epoch) + + +@dataclass(frozen=True) +class BlockID(object): + slot: Any + root: Any + + +def _block_to_block_id(block): + return BlockID( + slot=block.message.slot, + root=block.message.hash_tree_root(), + ) + + +def _state_to_block_id(state): + parent_header = state.latest_block_header.copy() + parent_header.state_root = state.hash_tree_root() + return BlockID(slot=parent_header.slot, root=parent_header.hash_tree_root()) + + +def get_lc_bootstrap_block_id(bootstrap): + return BlockID( + slot=bootstrap.header.beacon.slot, + root=bootstrap.header.beacon.hash_tree_root(), + ) + + +def get_lc_update_attested_block_id(update): + return BlockID( + slot=update.attested_header.beacon.slot, + root=update.attested_header.beacon.hash_tree_root(), + ) + + +@dataclass +class ForkedBeaconState(object): + spec: Any + data: Any + + +@dataclass +class ForkedSignedBeaconBlock(object): + spec: Any + data: Any + + +@dataclass +class ForkedLightClientHeader(object): + spec: Any + data: Any + + +@dataclass +class ForkedLightClientBootstrap(object): + spec: Any + data: Any + + +@dataclass +class ForkedLightClientUpdate(object): + spec: Any + data: Any + + +@dataclass +class ForkedLightClientFinalityUpdate(object): + spec: Any + data: Any + + +@dataclass +class ForkedLightClientOptimisticUpdate(object): + spec: Any + data: Any + + +@dataclass +class CachedLightClientData(object): + # Sync committee branches at block's post-state + current_sync_committee_branch: Any # CurrentSyncCommitteeBranch + next_sync_committee_branch: Any # NextSyncCommitteeBranch + + # Finality information at block's post-state + finalized_slot: Any # Slot + finality_branch: Any # FinalityBranch + + # Best / latest light client data + current_period_best_update: ForkedLightClientUpdate + latest_signature_slot: Any # Slot + + +@dataclass +class LightClientDataCache(object): + # Cached data for creating future `LightClientUpdate` instances. + # Key is the block ID of which the post state was used to get the data. + # Data stored for the finalized head block and all non-finalized blocks. + data: Dict[BlockID, CachedLightClientData] + + # Light client data for the latest slot that was signed by at least + # `MIN_SYNC_COMMITTEE_PARTICIPANTS`. May be older than head + latest: ForkedLightClientFinalityUpdate + + # The earliest slot for which light client data is imported + tail_slot: Any # Slot + + +@dataclass +class LightClientDataDB(object): + headers: Dict[Any, ForkedLightClientHeader] # Root -> ForkedLightClientHeader + current_branches: Dict[Any, Any] # Slot -> CurrentSyncCommitteeBranch + sync_committees: Dict[Any, Any] # SyncCommitteePeriod -> SyncCommittee + best_updates: Dict[Any, ForkedLightClientUpdate] # SyncCommitteePeriod -> ForkedLightClientUpdate + + +@dataclass +class LightClientDataStore(object): + spec: Any + + # Cached data to accelerate creating light client data + cache: LightClientDataCache + + # Persistent light client data + db: LightClientDataDB + + +@dataclass +class LightClientDataCollectionTest(object): + steps: List[Dict[str, Any]] + files: Set[str] + + # Fork schedule + phases: Any + + # History access + blocks: Dict[Any, ForkedSignedBeaconBlock] # Block root -> ForkedSignedBeaconBlock + finalized_block_roots: Dict[Any, Any] # Slot -> Root + states: Dict[Any, ForkedBeaconState] # State root -> ForkedBeaconState + finalized_checkpoint_states: Dict[Any, ForkedBeaconState] # State root -> ForkedBeaconState + latest_finalized_epoch: Any # Epoch + latest_finalized_bid: BlockID + historical_tail_slot: Any # Slot + + # Light client data + lc_data_store: LightClientDataStore + + +def get_ancestor_of_block_id(test, bid, slot): # -> Optional[BlockID] + try: + block = test.blocks[bid.root] + while True: + if block.data.message.slot <= slot: + return _block_to_block_id(block.data) + + block = test.blocks[block.data.message.parent_root] + except KeyError: + return None + + +def _block_id_at_finalized_slot(test, slot): # -> Optional[BlockID] + while slot >= test.historical_tail_slot: + try: + return BlockID(slot=slot, root=test.finalized_block_roots[slot]) + except KeyError: + slot = slot - 1 + return None + + +def _get_current_sync_committee_for_finalized_period(test, period): # -> Optional[SyncCommittee] + low_slot = max( + test.historical_tail_slot, + test.lc_data_store.spec.compute_start_slot_at_epoch( + test.lc_data_store.spec.config.ALTAIR_FORK_EPOCH) + ) + if period < test.lc_data_store.spec.compute_sync_committee_period_at_slot(low_slot): + return None + period_start_slot = compute_start_slot_at_sync_committee_period(test.lc_data_store.spec, period) + sync_committee_slot = max(period_start_slot, low_slot) + bid = _block_id_at_finalized_slot(test, sync_committee_slot) + if bid is None: + return None + block = test.blocks[bid.root] + state = test.finalized_checkpoint_states[block.data.message.state_root] + if sync_committee_slot > state.data.slot: + state.spec, state.data, _ = transition_across_forks( + state.spec, state.data, sync_committee_slot, phases=test.phases) + assert is_post_altair(state.spec) + return state.data.current_sync_committee + + +def _light_client_header_for_block(test, block): # -> ForkedLightClientHeader + if not is_post_altair(block.spec): + spec = test.phases[ALTAIR] + else: + spec = block.spec + return ForkedLightClientHeader(spec=spec, data=spec.block_to_light_client_header(block.data)) + + +def _light_client_header_for_block_id(test, bid): # -> ForkedLightClientHeader + block = test.blocks[bid.root] + if not is_post_altair(block.spec): + spec = test.phases[ALTAIR] + else: + spec = block.spec + return ForkedLightClientHeader(spec=spec, data=spec.block_to_light_client_header(block.data)) + + +def _sync_aggregate_for_block_id(test, bid): # -> Optional[SyncAggregate] + block = test.blocks[bid.root] + if not is_post_altair(block.spec): + return None + return block.data.message.body.sync_aggregate + + +def _get_light_client_data(lc_data_store, bid): # -> CachedLightClientData + # Fetch cached light client data about a given block. + # Data must be cached (`_cache_lc_data`) before calling this function. + try: + return lc_data_store.cache.data[bid] + except KeyError: + raise ValueError("Trying to get light client data that was not cached") + + +def _cache_lc_data(lc_data_store, spec, state, bid, current_period_best_update, latest_signature_slot): + # Cache data for a given block and its post-state to speed up creating future + # `LightClientUpdate` and `LightClientBootstrap` instances that refer to this + # block and state. + cached_data = CachedLightClientData( + current_sync_committee_branch=latest_normalize_merkle_branch( + lc_data_store.spec, + spec.compute_merkle_proof(state, spec.current_sync_committee_gindex_at_slot(state.slot)), + latest_current_sync_committee_gindex(lc_data_store.spec)), + next_sync_committee_branch=latest_normalize_merkle_branch( + lc_data_store.spec, + spec.compute_merkle_proof(state, spec.next_sync_committee_gindex_at_slot(state.slot)), + latest_next_sync_committee_gindex(lc_data_store.spec)), + finalized_slot=spec.compute_start_slot_at_epoch(state.finalized_checkpoint.epoch), + finality_branch=latest_normalize_merkle_branch( + lc_data_store.spec, + spec.compute_merkle_proof(state, spec.finalized_root_gindex_at_slot(state.slot)), + latest_finalized_root_gindex(lc_data_store.spec)), + current_period_best_update=current_period_best_update, + latest_signature_slot=latest_signature_slot, + ) + if bid in lc_data_store.cache.data: + raise ValueError("Redundant `_cache_lc_data` call") + lc_data_store.cache.data[bid] = cached_data + + +def _delete_light_client_data(lc_data_store, bid): + # Delete cached light client data for a given block. This needs to be called + # when a block becomes unreachable due to finalization of a different fork. + del lc_data_store.cache.data[bid] + + +def _create_lc_finality_update_from_lc_data(test, + attested_bid, + signature_slot, + sync_aggregate): # -> ForkedLightClientFinalityUpdate + attested_header = _light_client_header_for_block_id(test, attested_bid) + attested_data = _get_light_client_data(test.lc_data_store, attested_bid) + finalized_bid = _block_id_at_finalized_slot(test, attested_data.finalized_slot) + if finalized_bid is not None: + if finalized_bid.slot != attested_data.finalized_slot: + # Empty slots at end of epoch, update cache for latest block slot + attested_data.finalized_slot = finalized_bid.slot + if finalized_bid.slot == attested_header.spec.GENESIS_SLOT: + finalized_header = ForkedLightClientHeader( + spec=attested_header.spec, + data=attested_header.spec.LightClientHeader(), + ) + else: + finalized_header = _light_client_header_for_block_id(test, finalized_bid) + finalized_header = ForkedLightClientHeader( + spec=attested_header.spec, + data=upgrade_lc_header_to_new_spec( + finalized_header.spec, + attested_header.spec, + finalized_header.data, + ) + ) + finality_branch = attested_data.finality_branch + return ForkedLightClientFinalityUpdate( + spec=attested_header.spec, + data=attested_header.spec.LightClientFinalityUpdate( + attested_header=attested_header.data, + finalized_header=finalized_header.data, + finality_branch=finality_branch, + sync_aggregate=sync_aggregate, + signature_slot=signature_slot, + ), + ) + + +def _create_lc_update_from_lc_data(test, + attested_bid, + signature_slot, + sync_aggregate, + next_sync_committee): # -> ForkedLightClientUpdate + finality_update = _create_lc_finality_update_from_lc_data( + test, attested_bid, signature_slot, sync_aggregate) + attested_data = _get_light_client_data(test.lc_data_store, attested_bid) + return ForkedLightClientUpdate( + spec=finality_update.spec, + data=finality_update.spec.LightClientUpdate( + attested_header=finality_update.data.attested_header, + next_sync_committee=next_sync_committee, + next_sync_committee_branch=attested_data.next_sync_committee_branch, + finalized_header=finality_update.data.finalized_header, + finality_branch=finality_update.data.finality_branch, + sync_aggregate=finality_update.data.sync_aggregate, + signature_slot=finality_update.data.signature_slot, + ) + ) + + +def _create_lc_update(test, spec, state, block, parent_bid): + # Create `LightClientUpdate` instances for a given block and its post-state, + # and keep track of best / latest ones. Data about the parent block's + # post-state must be cached (`_cache_lc_data`) before calling this. + + # Verify attested block (parent) is recent enough and that state is available + attested_bid = parent_bid + attested_slot = attested_bid.slot + if attested_slot < test.lc_data_store.cache.tail_slot: + _cache_lc_data( + test.lc_data_store, + spec, + state, + _block_to_block_id(block), + current_period_best_update=ForkedLightClientUpdate(spec=None, data=None), + latest_signature_slot=spec.GENESIS_SLOT, + ) + return + + # If sync committee period changed, reset `best` + attested_period = spec.compute_sync_committee_period_at_slot(attested_slot) + signature_slot = block.message.slot + signature_period = spec.compute_sync_committee_period_at_slot(signature_slot) + attested_data = _get_light_client_data(test.lc_data_store, attested_bid) + if attested_period != signature_period: + best = ForkedLightClientUpdate(spec=None, data=None) + else: + best = attested_data.current_period_best_update + + # If sync committee does not have sufficient participants, do not bump latest + sync_aggregate = block.message.body.sync_aggregate + num_active_participants = sum(sync_aggregate.sync_committee_bits) + if num_active_participants < spec.MIN_SYNC_COMMITTEE_PARTICIPANTS: + latest_signature_slot = attested_data.latest_signature_slot + else: + latest_signature_slot = signature_slot + + # To update `best`, sync committee must have sufficient participants, and + # `signature_slot` must be in `attested_slot`'s sync committee period + if ( + num_active_participants < spec.MIN_SYNC_COMMITTEE_PARTICIPANTS + or attested_period != signature_period + ): + _cache_lc_data( + test.lc_data_store, + spec, + state, + _block_to_block_id(block), + current_period_best_update=best, + latest_signature_slot=latest_signature_slot, + ) + return + + # Check if light client data improved + update = _create_lc_update_from_lc_data( + test, attested_bid, signature_slot, sync_aggregate, state.next_sync_committee) + is_better = ( + best.spec is None + or spec.is_better_update(update.data, upgrade_lc_update_to_new_spec( + best.spec, update.spec, best.data, test.phases)) + ) + + # Update best light client data for current sync committee period + if is_better: + best = update + _cache_lc_data( + test.lc_data_store, + spec, + state, + _block_to_block_id(block), + current_period_best_update=best, + latest_signature_slot=latest_signature_slot, + ) + + +def _create_lc_bootstrap(test, spec, bid): + block = test.blocks[bid.root] + period = spec.compute_sync_committee_period_at_slot(bid.slot) + if period not in test.lc_data_store.db.sync_committees: + test.lc_data_store.db.sync_committees[period] = \ + _get_current_sync_committee_for_finalized_period(test, period) + test.lc_data_store.db.headers[bid.root] = ForkedLightClientHeader( + spec=block.spec, data=block.spec.block_to_light_client_header(block.data)) + test.lc_data_store.db.current_branches[bid.slot] = \ + _get_light_client_data(test.lc_data_store, bid).current_sync_committee_branch + + +def _process_new_block_for_light_client(test, spec, state, block, parent_bid): + # Update light client data with information from a new block. + if block.message.slot < test.lc_data_store.cache.tail_slot: + return + + if is_post_altair(spec): + _create_lc_update(test, spec, state, block, parent_bid) + else: + raise ValueError("`tail_slot` cannot be before Altair") + + +def _process_head_change_for_light_client(test, spec, head_bid, old_finalized_bid): + # Update light client data to account for a new head block. + # Note that `old_finalized_bid` is not yet updated when this is called. + if head_bid.slot < test.lc_data_store.cache.tail_slot: + return + + # Commit best light client data for non-finalized periods + head_period = spec.compute_sync_committee_period_at_slot(head_bid.slot) + low_slot = max(test.lc_data_store.cache.tail_slot, old_finalized_bid.slot) + low_period = spec.compute_sync_committee_period_at_slot(low_slot) + bid = head_bid + for period in reversed(range(low_period, head_period + 1)): + period_end_slot = compute_start_slot_at_sync_committee_period(spec, period + 1) - 1 + bid = get_ancestor_of_block_id(test, bid, period_end_slot) + if bid is None or bid.slot < low_slot: + break + best = _get_light_client_data(test.lc_data_store, bid).current_period_best_update + if ( + best.spec is None + or sum(best.data.sync_aggregate.sync_committee_bits) < spec.MIN_SYNC_COMMITTEE_PARTICIPANTS + ): + test.lc_data_store.db.best_updates.pop(period, None) + else: + test.lc_data_store.db.best_updates[period] = best + + # Update latest light client data + head_data = _get_light_client_data(test.lc_data_store, head_bid) + signature_slot = head_data.latest_signature_slot + if signature_slot <= low_slot: + test.lc_data_store.cache.latest = ForkedLightClientFinalityUpdate(spec=None, data=None) + return + signature_bid = get_ancestor_of_block_id(test, head_bid, signature_slot) + if signature_bid is None or signature_bid.slot <= low_slot: + test.lc_data_store.cache.latest = ForkedLightClientFinalityUpdate(spec=None, data=None) + return + attested_bid = get_ancestor_of_block_id(test, signature_bid, signature_bid.slot - 1) + if attested_bid is None or attested_bid.slot < low_slot: + test.lc_data_store.cache.latest = ForkedLightClientFinalityUpdate(spec=None, data=None) + return + sync_aggregate = _sync_aggregate_for_block_id(test, signature_bid) + assert sync_aggregate is not None + test.lc_data_store.cache.latest = _create_lc_finality_update_from_lc_data( + test, attested_bid, signature_slot, sync_aggregate) + + +def _process_finalization_for_light_client(test, spec, finalized_bid, old_finalized_bid): + # Prune cached data that is no longer useful for creating future + # `LightClientUpdate` and `LightClientBootstrap` instances. + # This needs to be called whenever `finalized_checkpoint` changes. + finalized_slot = finalized_bid.slot + if finalized_slot < test.lc_data_store.cache.tail_slot: + return + + # Cache `LightClientBootstrap` for newly finalized epoch boundary blocks + first_new_slot = old_finalized_bid.slot + 1 + low_slot = max(first_new_slot, test.lc_data_store.cache.tail_slot) + boundary_slot = finalized_slot + while boundary_slot >= low_slot: + bid = _block_id_at_finalized_slot(test, boundary_slot) + if bid is None: + break + if bid.slot >= low_slot: + _create_lc_bootstrap(test, spec, bid) + boundary_slot = _next_epoch_boundary_slot(spec, bid.slot) + if boundary_slot < spec.SLOTS_PER_EPOCH: + break + boundary_slot = boundary_slot - spec.SLOTS_PER_EPOCH + + # Prune light client data that is no longer referrable by future updates + bids_to_delete = [] + for bid in test.lc_data_store.cache.data: + if bid.slot >= finalized_bid.slot: + continue + bids_to_delete.append(bid) + for bid in bids_to_delete: + _delete_light_client_data(test.lc_data_store, bid) + + +def get_light_client_bootstrap(test, block_root): # -> ForkedLightClientBootstrap + try: + header = test.lc_data_store.db.headers[block_root] + except KeyError: + return ForkedLightClientBootstrap(spec=None, data=None) + + slot = header.data.beacon.slot + period = header.spec.compute_sync_committee_period_at_slot(slot) + return ForkedLightClientBootstrap( + spec=header.spec, + data=header.spec.LightClientBootstrap( + header=header.data, + current_sync_committee=test.lc_data_store.db.sync_committees[period], + current_sync_committee_branch=test.lc_data_store.db.current_branches[slot], + ) + ) + + +def get_light_client_update_for_period(test, period): # -> ForkedLightClientUpdate + try: + return test.lc_data_store.db.best_updates[period] + except KeyError: + return ForkedLightClientUpdate(spec=None, data=None) + + +def get_light_client_finality_update(test): # -> ForkedLightClientFinalityUpdate + return test.lc_data_store.cache.latest + + +def get_light_client_optimistic_update(test): # -> ForkedLightClientOptimisticUpdate + finality_update = get_light_client_finality_update(test) + if finality_update.spec is None: + return ForkedLightClientOptimisticUpdate(spec=None, data=None) + return ForkedLightClientOptimisticUpdate( + spec=finality_update.spec, + data=finality_update.spec.LightClientOptimisticUpdate( + attested_header=finality_update.data.attested_header, + sync_aggregate=finality_update.data.sync_aggregate, + signature_slot=finality_update.data.signature_slot, + ), + ) + + +def setup_lc_data_collection_test(spec, state, phases=None): + assert spec.compute_slots_since_epoch_start(state.slot) == 0 + + test = LightClientDataCollectionTest( + steps=[], + files=set(), + phases=phases, + blocks={}, + finalized_block_roots={}, + states={}, + finalized_checkpoint_states={}, + latest_finalized_epoch=state.finalized_checkpoint.epoch, + latest_finalized_bid=BlockID( + slot=spec.compute_start_slot_at_epoch(state.finalized_checkpoint.epoch), + root=state.finalized_checkpoint.root, + ), + historical_tail_slot=state.slot, + lc_data_store=LightClientDataStore( + spec=spec, + cache=LightClientDataCache( + data={}, + latest=ForkedLightClientFinalityUpdate(spec=None, data=None), + tail_slot=max(state.slot, spec.compute_start_slot_at_epoch(spec.config.ALTAIR_FORK_EPOCH)), + ), + db=LightClientDataDB( + headers={}, + current_branches={}, + sync_committees={}, + best_updates={}, + ), + ), + ) + bid = _state_to_block_id(state) + yield "initial_state", state + test.blocks[bid.root] = ForkedSignedBeaconBlock(spec=spec, data=spec.SignedBeaconBlock( + message=spec.BeaconBlock(state_root=state.hash_tree_root()), + )) + test.finalized_block_roots[bid.slot] = bid.root + test.states[state.hash_tree_root()] = ForkedBeaconState(spec=spec, data=state) + test.finalized_checkpoint_states[state.hash_tree_root()] = ForkedBeaconState(spec=spec, data=state) + _cache_lc_data( + test.lc_data_store, spec, state, bid, + current_period_best_update=ForkedLightClientUpdate(spec=None, data=None), + latest_signature_slot=spec.GENESIS_SLOT, + ) + _create_lc_bootstrap(test, spec, bid) + + return test + + +def finish_lc_data_collection_test(test): + yield "steps", test.steps + + +def _encode_lc_object(test, prefix, obj, slot, genesis_validators_root): + yield from [] # Consistently enable `yield from` syntax in calling tests + + file_name = f"{prefix}_{slot}_{encode_hex(obj.data.hash_tree_root())}" + if file_name not in test.files: + test.files.add(file_name) + yield file_name, obj.data + return { + "fork_digest": encode_hex(obj.spec.compute_fork_digest( + obj.spec.compute_fork_version(obj.spec.compute_epoch_at_slot(slot)), + genesis_validators_root, + )), + "data": file_name, + } + + +def add_new_block(test, spec, state, slot=None, num_sync_participants=0): + if slot is None: + slot = state.slot + 1 + assert slot > state.slot + parent_bid = _state_to_block_id(state) + + # Advance to target slot - 1 to ensure sync aggregate can be efficiently computed + if state.slot < slot - 1: + spec, state, _ = transition_across_forks(spec, state, slot - 1, phases=test.phases) + + # Compute sync aggregate, using: + # - sync committee based on target slot + # - fork digest based on target slot - 1 + # - signed data based on parent_bid.slot + # All three slots may be from different forks + sync_aggregate, signature_slot = get_sync_aggregate( + spec, state, num_participants=num_sync_participants, phases=test.phases) + assert signature_slot == slot + + # Apply final block with computed sync aggregate + spec, state, block = transition_across_forks( + spec, state, slot, phases=test.phases, with_block=True, sync_aggregate=sync_aggregate) + bid = _block_to_block_id(block) + test.blocks[bid.root] = ForkedSignedBeaconBlock(spec=spec, data=block) + test.states[block.message.state_root] = ForkedBeaconState(spec=spec, data=state) + _process_new_block_for_light_client(test, spec, state, block, parent_bid) + block_obj = yield from _encode_lc_object( + test, "block", ForkedSignedBeaconBlock(spec=spec, data=block), block.message.slot, + state.genesis_validators_root, + ) + test.steps.append({ + "new_block": block_obj + }) + return spec, state, bid + + +def select_new_head(test, spec, head_bid): + old_finalized_bid = test.latest_finalized_bid + _process_head_change_for_light_client(test, spec, head_bid, old_finalized_bid) + + # Process finalization + block = test.blocks[head_bid.root] + state = test.states[block.data.message.state_root] + if state.data.finalized_checkpoint.epoch != spec.GENESIS_EPOCH: + block = test.blocks[state.data.finalized_checkpoint.root] + bid = _block_to_block_id(block.data) + new_finalized_bid = bid + if new_finalized_bid.slot > old_finalized_bid.slot: + old_finalized_epoch = None + new_finalized_epoch = state.data.finalized_checkpoint.epoch + while bid.slot > test.latest_finalized_bid.slot: + test.finalized_block_roots[bid.slot] = bid.root + finalized_epoch = spec.compute_epoch_at_slot(bid.slot + spec.SLOTS_PER_EPOCH - 1) + if finalized_epoch != old_finalized_epoch: + state = test.states[block.data.message.state_root] + test.finalized_checkpoint_states[block.data.message.state_root] = state + old_finalized_epoch = finalized_epoch + block = test.blocks[block.data.message.parent_root] + bid = _block_to_block_id(block.data) + test.latest_finalized_epoch = new_finalized_epoch + test.latest_finalized_bid = new_finalized_bid + _process_finalization_for_light_client(test, spec, new_finalized_bid, old_finalized_bid) + + blocks_to_delete = [] + for block_root, block in test.blocks.items(): + if block.data.message.slot < new_finalized_bid.slot: + blocks_to_delete.append(block_root) + for block_root in blocks_to_delete: + del test.blocks[block_root] + states_to_delete = [] + for state_root, state in test.states.items(): + if state.data.slot < new_finalized_bid.slot: + states_to_delete.append(state_root) + for state_root in states_to_delete: + del test.states[state_root] + + yield from [] # Consistently enable `yield from` syntax in calling tests + + bootstraps = [] + for state in test.finalized_checkpoint_states.values(): + bid = _state_to_block_id(state.data) + entry = { + "block_root": encode_hex(bid.root), + } + bootstrap = get_light_client_bootstrap(test, bid.root) + if bootstrap.spec is not None: + bootstrap_obj = yield from _encode_lc_object( + test, "bootstrap", bootstrap, bootstrap.data.header.beacon.slot, + state.data.genesis_validators_root, + ) + entry["bootstrap"] = bootstrap_obj + bootstraps.append(entry) + + best_updates = [] + low_period = spec.compute_sync_committee_period_at_slot(test.lc_data_store.cache.tail_slot) + head_period = spec.compute_sync_committee_period_at_slot(head_bid.slot) + for period in range(low_period, head_period + 1): + entry = { + "period": int(period), + } + update = get_light_client_update_for_period(test, period) + if update.spec is not None: + update_obj = yield from _encode_lc_object( + test, "update", update, update.data.attested_header.beacon.slot, + state.data.genesis_validators_root, + ) + entry["update"] = update_obj + best_updates.append(entry) + + checks = { + "latest_finalized_checkpoint": { + "epoch": int(test.latest_finalized_epoch), + "root": encode_hex(test.latest_finalized_bid.root), + }, + "bootstraps": bootstraps, + "best_updates": best_updates, + } + finality_update = get_light_client_finality_update(test) + if finality_update.spec is not None: + finality_update_obj = yield from _encode_lc_object( + test, "finality_update", finality_update, finality_update.data.attested_header.beacon.slot, + state.data.genesis_validators_root, + ) + checks["latest_finality_update"] = finality_update_obj + optimistic_update = get_light_client_optimistic_update(test) + if optimistic_update.spec is not None: + optimistic_update_obj = yield from _encode_lc_object( + test, "optimistic_update", optimistic_update, optimistic_update.data.attested_header.beacon.slot, + state.data.genesis_validators_root, + ) + checks["latest_optimistic_update"] = optimistic_update_obj + + test.steps.append({ + "new_head": { + "head_block_root": encode_hex(head_bid.root), + "checks": checks, + } + }) + + +def run_lc_data_collection_test_multi_fork(spec, phases, state, fork_1, fork_2): + # Start test + test = yield from setup_lc_data_collection_test(spec, state, phases=phases) + + # Genesis block is post Altair and is finalized, so can be used as bootstrap + genesis_bid = BlockID(slot=state.slot, root=spec.BeaconBlock(state_root=state.hash_tree_root()).hash_tree_root()) + assert get_lc_bootstrap_block_id(get_light_client_bootstrap(test, genesis_bid.root).data) == genesis_bid + + # Shared history up to final epoch of period before `fork_1` + fork_1_epoch = getattr(phases[fork_1].config, fork_1.upper() + '_FORK_EPOCH') + fork_1_period = spec.compute_sync_committee_period(fork_1_epoch) + slot = compute_start_slot_at_sync_committee_period(spec, fork_1_period) - spec.SLOTS_PER_EPOCH + spec, state, bid = yield from add_new_block(test, spec, state, slot=slot, num_sync_participants=1) + yield from select_new_head(test, spec, bid) + assert get_light_client_bootstrap(test, bid.root).spec is None + slot_period = spec.compute_sync_committee_period_at_slot(slot) + if slot_period == 0: + assert get_lc_update_attested_block_id(get_light_client_update_for_period(test, 0).data) == genesis_bid + else: + for period in range(0, slot_period): + assert get_light_client_update_for_period(test, period).spec is None # attested period != signature period + state_period = spec.compute_sync_committee_period_at_slot(state.slot) + + # Branch A: Advance past `fork_2`, having blocks at slots 0 and 4 of each epoch + spec_a = spec + state_a = state + slot_a = state_a.slot + bids_a = [bid] + num_sync_participants_a = 1 + fork_2_epoch = getattr(phases[fork_2].config, fork_2.upper() + '_FORK_EPOCH') + while spec_a.get_current_epoch(state_a) <= fork_2_epoch: + attested_period = spec_a.compute_sync_committee_period_at_slot(slot_a) + slot_a += 4 + signature_period = spec_a.compute_sync_committee_period_at_slot(slot_a) + if signature_period != attested_period: + num_sync_participants_a = 0 + num_sync_participants_a += 1 + spec_a, state_a, bid_a = yield from add_new_block( + test, spec_a, state_a, slot=slot_a, num_sync_participants=num_sync_participants_a) + yield from select_new_head(test, spec_a, bid_a) + for bid in bids_a: + assert get_light_client_bootstrap(test, bid.root).spec is None + if attested_period == signature_period: + assert get_lc_update_attested_block_id( + get_light_client_update_for_period(test, attested_period).data, + ) == bids_a[-1] + else: + assert signature_period == attested_period + 1 + assert get_lc_update_attested_block_id( + get_light_client_update_for_period(test, attested_period).data, + ) == bids_a[-2] + assert get_light_client_update_for_period(test, signature_period).spec is None + assert get_lc_update_attested_block_id(get_light_client_finality_update(test).data) == bids_a[-1] + assert get_lc_update_attested_block_id(get_light_client_optimistic_update(test).data) == bids_a[-1] + bids_a.append(bid_a) + + # Branch B: Advance past `fork_2`, having blocks at slots 1 and 5 of each epoch but no sync participation + spec_b = spec + state_b = state + slot_b = state_b.slot + bids_b = [bid] + while spec_b.get_current_epoch(state_b) <= fork_2_epoch: + slot_b += 4 + signature_period = spec_b.compute_sync_committee_period_at_slot(slot_b) + spec_b, state_b, bid_b = yield from add_new_block( + test, spec_b, state_b, slot=slot_b) + # Simulate that this does not become head yet, e.g., this branch was withheld + for bid in bids_b: + assert get_light_client_bootstrap(test, bid.root).spec is None + bids_b.append(bid_b) + + # Branch B: Another block that becomes head + attested_period = spec_b.compute_sync_committee_period_at_slot(slot_b) + slot_b += 1 + signature_period = spec_b.compute_sync_committee_period_at_slot(slot_b) + num_sync_participants_b = 1 + spec_b, state_b, bid_b = yield from add_new_block( + test, spec_b, state_b, slot=slot_b, num_sync_participants=num_sync_participants_b) + yield from select_new_head(test, spec_b, bid_b) + for bid in bids_b: + assert get_light_client_bootstrap(test, bid.root).spec is None + if attested_period == signature_period: + assert get_lc_update_attested_block_id( + get_light_client_update_for_period(test, attested_period).data, + ) == bids_b[-1] + else: + assert signature_period == attested_period + 1 + assert get_lc_update_attested_block_id( + get_light_client_update_for_period(test, attested_period).data, + ) == bids_b[-2] + assert get_light_client_update_for_period(test, signature_period).spec is None + assert get_lc_update_attested_block_id(get_light_client_finality_update(test).data) == bids_b[-1] + assert get_lc_update_attested_block_id(get_light_client_optimistic_update(test).data) == bids_b[-1] + bids_b.append(bid_b) + + # All data for periods between the common ancestor of the two branches should have reorged. + # As there was no sync participation on branch B, that means it is deleted. + state_b_period = spec_b.compute_sync_committee_period_at_slot(state_b.slot) + for period in range(state_period + 1, state_b_period): + assert get_light_client_update_for_period(test, period).spec is None + + # Branch A: Another block, reorging branch B once more + attested_period = spec_a.compute_sync_committee_period_at_slot(slot_a) + slot_a = slot_b + 1 + signature_period = spec_a.compute_sync_committee_period_at_slot(slot_a) + if signature_period != attested_period: + num_sync_participants_a = 0 + num_sync_participants_a += 1 + spec_a, state_a, bid_a = yield from add_new_block( + test, spec_a, state_a, slot=slot_a, num_sync_participants=num_sync_participants_a) + yield from select_new_head(test, spec_a, bid_a) + for bid in bids_a: + assert get_light_client_bootstrap(test, bid.root).spec is None + if attested_period == signature_period: + assert get_lc_update_attested_block_id( + get_light_client_update_for_period(test, attested_period).data, + ) == bids_a[-1] + else: + assert signature_period == attested_period + 1 + assert get_lc_update_attested_block_id( + get_light_client_update_for_period(test, attested_period).data, + ) == bids_a[-2] + assert get_light_client_update_for_period(test, signature_period).spec is None + assert get_lc_update_attested_block_id(get_light_client_finality_update(test).data) == bids_a[-1] + assert get_lc_update_attested_block_id(get_light_client_optimistic_update(test).data) == bids_a[-1] + bids_a.append(bid_a) + + # Data has been restored + state_a_period = spec_a.compute_sync_committee_period_at_slot(state_a.slot) + for period in range(state_period + 1, state_a_period): + assert get_light_client_update_for_period(test, period).spec is not None + + # Finish test + yield from finish_lc_data_collection_test(test) diff --git a/tests/core/pyspec/eth2spec/test/helpers/light_client_sync.py b/tests/core/pyspec/eth2spec/test/helpers/light_client_sync.py new file mode 100644 index 0000000000..54a5c0f970 --- /dev/null +++ b/tests/core/pyspec/eth2spec/test/helpers/light_client_sync.py @@ -0,0 +1,320 @@ +from typing import (Any, Dict, List) + +from eth_utils import encode_hex +from eth2spec.test.helpers.attestations import ( + next_slots_with_attestations, + state_transition_with_full_block, +) +from eth2spec.test.helpers.fork_transition import ( + do_fork, + transition_across_forks, +) +from eth2spec.test.helpers.forks import ( + get_spec_for_fork_version, + is_post_capella, is_post_deneb, is_post_electra, +) +from eth2spec.test.helpers.light_client import ( + get_sync_aggregate, + upgrade_lc_bootstrap_to_new_spec, + upgrade_lc_update_to_new_spec, + upgrade_lc_store_to_new_spec, +) +from eth2spec.test.helpers.state import ( + next_slots, + transition_to, +) + + +class LightClientSyncTest(object): + steps: List[Dict[str, Any]] + genesis_validators_root: Any + s_spec: Any + store: Any + + +def _get_store_fork_version(s_spec): + if is_post_electra(s_spec): + return s_spec.config.ELECTRA_FORK_VERSION + if is_post_deneb(s_spec): + return s_spec.config.DENEB_FORK_VERSION + if is_post_capella(s_spec): + return s_spec.config.CAPELLA_FORK_VERSION + return s_spec.config.ALTAIR_FORK_VERSION + + +def setup_lc_sync_test(spec, state, s_spec=None, phases=None): + test = LightClientSyncTest() + test.steps = [] + + if s_spec is None: + s_spec = spec + if phases is None: + phases = { + spec.fork: spec, + s_spec.fork: s_spec, + } + test.s_spec = s_spec + + yield "genesis_validators_root", "meta", "0x" + state.genesis_validators_root.hex() + test.genesis_validators_root = state.genesis_validators_root + + next_slots(spec, state, spec.SLOTS_PER_EPOCH * 2 - 1) + trusted_block = state_transition_with_full_block(spec, state, True, True) + trusted_block_root = trusted_block.message.hash_tree_root() + yield "trusted_block_root", "meta", "0x" + trusted_block_root.hex() + + data_fork_version = spec.compute_fork_version(spec.compute_epoch_at_slot(trusted_block.message.slot)) + data_fork_digest = spec.compute_fork_digest(data_fork_version, test.genesis_validators_root) + d_spec = get_spec_for_fork_version(spec, data_fork_version, phases) + data = d_spec.create_light_client_bootstrap(state, trusted_block) + yield "bootstrap_fork_digest", "meta", encode_hex(data_fork_digest) + yield "bootstrap", data + + upgraded = upgrade_lc_bootstrap_to_new_spec(d_spec, test.s_spec, data, phases) + test.store = test.s_spec.initialize_light_client_store(trusted_block_root, upgraded) + store_fork_version = _get_store_fork_version(test.s_spec) + store_fork_digest = test.s_spec.compute_fork_digest(store_fork_version, test.genesis_validators_root) + yield "store_fork_digest", "meta", encode_hex(store_fork_digest) + + return test + + +def finish_lc_sync_test(test): + yield "steps", test.steps + + +def _get_update_file_name(d_spec, update): + if d_spec.is_sync_committee_update(update): + suffix1 = "s" + else: + suffix1 = "x" + if d_spec.is_finality_update(update): + suffix2 = "f" + else: + suffix2 = "x" + return f"update_{encode_hex(update.attested_header.beacon.hash_tree_root())}_{suffix1}{suffix2}" + + +def _get_checks(s_spec, store): + if is_post_capella(s_spec): + return { + "finalized_header": { + 'slot': int(store.finalized_header.beacon.slot), + 'beacon_root': encode_hex(store.finalized_header.beacon.hash_tree_root()), + 'execution_root': encode_hex(s_spec.get_lc_execution_root(store.finalized_header)), + }, + "optimistic_header": { + 'slot': int(store.optimistic_header.beacon.slot), + 'beacon_root': encode_hex(store.optimistic_header.beacon.hash_tree_root()), + 'execution_root': encode_hex(s_spec.get_lc_execution_root(store.optimistic_header)), + }, + } + + return { + "finalized_header": { + 'slot': int(store.finalized_header.beacon.slot), + 'beacon_root': encode_hex(store.finalized_header.beacon.hash_tree_root()), + }, + "optimistic_header": { + 'slot': int(store.optimistic_header.beacon.slot), + 'beacon_root': encode_hex(store.optimistic_header.beacon.hash_tree_root()), + }, + } + + +def emit_force_update(test, spec, state): + current_slot = state.slot + test.s_spec.process_light_client_store_force_update(test.store, current_slot) + + yield from [] # Consistently enable `yield from` syntax in calling tests + test.steps.append({ + "force_update": { + "current_slot": int(current_slot), + "checks": _get_checks(test.s_spec, test.store), + } + }) + + +def emit_update(test, spec, state, block, attested_state, attested_block, finalized_block, with_next=True, phases=None): + data_fork_version = spec.compute_fork_version(spec.compute_epoch_at_slot(attested_block.message.slot)) + data_fork_digest = spec.compute_fork_digest(data_fork_version, test.genesis_validators_root) + d_spec = get_spec_for_fork_version(spec, data_fork_version, phases) + data = d_spec.create_light_client_update(state, block, attested_state, attested_block, finalized_block) + if not with_next: + data.next_sync_committee = spec.SyncCommittee() + data.next_sync_committee_branch = spec.NextSyncCommitteeBranch() + current_slot = state.slot + + upgraded = upgrade_lc_update_to_new_spec(d_spec, test.s_spec, data, phases) + test.s_spec.process_light_client_update(test.store, upgraded, current_slot, test.genesis_validators_root) + + yield _get_update_file_name(d_spec, data), data + test.steps.append({ + "process_update": { + "update_fork_digest": encode_hex(data_fork_digest), + "update": _get_update_file_name(d_spec, data), + "current_slot": int(current_slot), + "checks": _get_checks(test.s_spec, test.store), + } + }) + return upgraded + + +def _emit_upgrade_store(test, new_s_spec, phases=None): + test.store = upgrade_lc_store_to_new_spec(test.s_spec, new_s_spec, test.store, phases) + test.s_spec = new_s_spec + store_fork_version = _get_store_fork_version(test.s_spec) + store_fork_digest = test.s_spec.compute_fork_digest(store_fork_version, test.genesis_validators_root) + + yield from [] # Consistently enable `yield from` syntax in calling tests + test.steps.append({ + "upgrade_store": { + "store_fork_digest": encode_hex(store_fork_digest), + "checks": _get_checks(test.s_spec, test.store), + } + }) + + +def run_lc_sync_test_single_fork(spec, phases, state, fork): + # Start test + test = yield from setup_lc_sync_test(spec, state, phases=phases) + + # Initial `LightClientUpdate` + finalized_block = spec.SignedBeaconBlock() + finalized_block.message.state_root = state.hash_tree_root() + finalized_state = state.copy() + attested_block = state_transition_with_full_block(spec, state, True, True) + attested_state = state.copy() + sync_aggregate, _ = get_sync_aggregate(spec, state, phases=phases) + block = state_transition_with_full_block(spec, state, True, True, sync_aggregate=sync_aggregate) + yield from emit_update(test, spec, state, block, attested_state, attested_block, finalized_block, phases=phases) + assert test.store.finalized_header.beacon.slot == finalized_state.slot + assert test.store.next_sync_committee == finalized_state.next_sync_committee + assert test.store.best_valid_update is None + assert test.store.optimistic_header.beacon.slot == attested_state.slot + + # Jump to two slots before fork + fork_epoch = getattr(phases[fork].config, fork.upper() + '_FORK_EPOCH') + transition_to(spec, state, spec.compute_start_slot_at_epoch(fork_epoch) - 4) + attested_block = state_transition_with_full_block(spec, state, True, True) + attested_state = state.copy() + sync_aggregate, _ = get_sync_aggregate(spec, state, phases=phases) + block = state_transition_with_full_block(spec, state, True, True, sync_aggregate=sync_aggregate) + update = yield from emit_update( + test, spec, state, block, attested_state, attested_block, finalized_block, phases=phases) + assert test.store.finalized_header.beacon.slot == finalized_state.slot + assert test.store.next_sync_committee == finalized_state.next_sync_committee + assert test.store.best_valid_update == update + assert test.store.optimistic_header.beacon.slot == attested_state.slot + + # Perform `LightClientStore` upgrade + yield from _emit_upgrade_store(test, phases[fork], phases=phases) + update = test.store.best_valid_update + + # Final slot before fork, check that importing the pre-fork format still works + attested_block = block.copy() + attested_state = state.copy() + sync_aggregate, _ = get_sync_aggregate(spec, state, phases=phases) + block = state_transition_with_full_block(spec, state, True, True, sync_aggregate=sync_aggregate) + yield from emit_update(test, spec, state, block, attested_state, attested_block, finalized_block, phases=phases) + assert test.store.finalized_header.beacon.slot == finalized_state.slot + assert test.store.next_sync_committee == finalized_state.next_sync_committee + assert test.store.best_valid_update == update + assert test.store.optimistic_header.beacon.slot == attested_state.slot + + # Upgrade to post-fork spec, attested block is still before the fork + attested_block = block.copy() + attested_state = state.copy() + sync_aggregate, _ = get_sync_aggregate(spec, state, phases=phases) + state, block = do_fork(state, spec, phases[fork], fork_epoch, sync_aggregate=sync_aggregate) + spec = phases[fork] + yield from emit_update(test, spec, state, block, attested_state, attested_block, finalized_block, phases=phases) + assert test.store.finalized_header.beacon.slot == finalized_state.slot + assert test.store.next_sync_committee == finalized_state.next_sync_committee + assert test.store.best_valid_update == update + assert test.store.optimistic_header.beacon.slot == attested_state.slot + + # Another block after the fork, this time attested block is after the fork + attested_block = block.copy() + attested_state = state.copy() + sync_aggregate, _ = get_sync_aggregate(spec, state, phases=phases) + block = state_transition_with_full_block(spec, state, True, True, sync_aggregate=sync_aggregate) + yield from emit_update(test, spec, state, block, attested_state, attested_block, finalized_block, phases=phases) + assert test.store.finalized_header.beacon.slot == finalized_state.slot + assert test.store.next_sync_committee == finalized_state.next_sync_committee + assert test.store.best_valid_update == update + assert test.store.optimistic_header.beacon.slot == attested_state.slot + + # Jump to next epoch + transition_to(spec, state, spec.compute_start_slot_at_epoch(fork_epoch + 1) - 2) + attested_block = state_transition_with_full_block(spec, state, True, True) + attested_state = state.copy() + sync_aggregate, _ = get_sync_aggregate(spec, state, phases=phases) + block = state_transition_with_full_block(spec, state, True, True, sync_aggregate=sync_aggregate) + yield from emit_update(test, spec, state, block, attested_state, attested_block, finalized_block, phases=phases) + assert test.store.finalized_header.beacon.slot == finalized_state.slot + assert test.store.next_sync_committee == finalized_state.next_sync_committee + assert test.store.best_valid_update == update + assert test.store.optimistic_header.beacon.slot == attested_state.slot + + # Finalize the fork + finalized_block = block.copy() + finalized_state = state.copy() + _, _, state = next_slots_with_attestations(spec, state, 2 * spec.SLOTS_PER_EPOCH - 1, True, True) + attested_block = state_transition_with_full_block(spec, state, True, True) + attested_state = state.copy() + sync_aggregate, _ = get_sync_aggregate(spec, state, phases=phases) + block = state_transition_with_full_block(spec, state, True, True, sync_aggregate=sync_aggregate) + yield from emit_update(test, spec, state, block, attested_state, attested_block, finalized_block, phases=phases) + assert test.store.finalized_header.beacon.slot == finalized_state.slot + assert test.store.next_sync_committee == finalized_state.next_sync_committee + assert test.store.best_valid_update is None + assert test.store.optimistic_header.beacon.slot == attested_state.slot + + # Finish test + yield from finish_lc_sync_test(test) + + +def run_lc_sync_test_multi_fork(spec, phases, state, fork_1, fork_2): + # Start test + test = yield from setup_lc_sync_test(spec, state, phases[fork_2], phases) + + # Set up so that finalized is from `spec`, ... + finalized_block = spec.SignedBeaconBlock() + finalized_block.message.state_root = state.hash_tree_root() + finalized_state = state.copy() + + # ..., attested is from `fork_1`, ... + fork_1_epoch = getattr(phases[fork_1].config, fork_1.upper() + '_FORK_EPOCH') + spec, state, attested_block = transition_across_forks( + spec, + state, + spec.compute_start_slot_at_epoch(fork_1_epoch), + phases, + with_block=True, + ) + attested_state = state.copy() + + # ..., and signature is from `fork_2` + fork_2_epoch = getattr(phases[fork_2].config, fork_2.upper() + '_FORK_EPOCH') + spec, state, _ = transition_across_forks( + spec, state, spec.compute_start_slot_at_epoch(fork_2_epoch) - 1, phases) + sync_aggregate, _ = get_sync_aggregate(spec, state, phases=phases) + spec, state, block = transition_across_forks( + spec, + state, + spec.compute_start_slot_at_epoch(fork_2_epoch), + phases, + with_block=True, + sync_aggregate=sync_aggregate, + ) + + # Check that update applies + yield from emit_update(test, spec, state, block, attested_state, attested_block, finalized_block, phases=phases) + assert test.store.finalized_header.beacon.slot == finalized_state.slot + assert test.store.next_sync_committee == finalized_state.next_sync_committee + assert test.store.best_valid_update is None + assert test.store.optimistic_header.beacon.slot == attested_state.slot + + # Finish test + yield from finish_lc_sync_test(test) diff --git a/tests/core/pyspec/eth2spec/test/utils/randomized_block_tests.py b/tests/core/pyspec/eth2spec/test/utils/randomized_block_tests.py index 0e4727b794..3dae15c694 100644 --- a/tests/core/pyspec/eth2spec/test/utils/randomized_block_tests.py +++ b/tests/core/pyspec/eth2spec/test/utils/randomized_block_tests.py @@ -8,7 +8,7 @@ from typing import Callable from eth2spec.test.helpers.execution_payload import ( - compute_el_block_hash, + compute_el_block_hash_for_block, build_randomized_execution_payload, ) from eth2spec.test.helpers.multi_operations import ( @@ -255,7 +255,7 @@ def random_block_deneb(spec, state, signed_blocks, scenario_state, rng=Random(34 opaque_tx, _, blob_kzg_commitments, _ = get_sample_blob_tx( spec, blob_count=rng.randint(0, spec.config.MAX_BLOBS_PER_BLOCK), rng=rng) block.body.execution_payload.transactions.append(opaque_tx) - block.body.execution_payload.block_hash = compute_el_block_hash(spec, block.body.execution_payload, state) + block.body.execution_payload.block_hash = compute_el_block_hash_for_block(spec, block) block.body.blob_kzg_commitments = blob_kzg_commitments return block @@ -264,6 +264,7 @@ def random_block_deneb(spec, state, signed_blocks, scenario_state, rng=Random(34 def random_block_electra(spec, state, signed_blocks, scenario_state, rng=Random(3456)): block = random_block_deneb(spec, state, signed_blocks, scenario_state, rng=rng) block.body.execution_requests = get_random_execution_requests(spec, state, rng=rng) + block.body.execution_payload.block_hash = compute_el_block_hash_for_block(spec, block) return block diff --git a/tests/formats/fork_choice/README.md b/tests/formats/fork_choice/README.md index 58709b3fee..37d09f4787 100644 --- a/tests/formats/fork_choice/README.md +++ b/tests/formats/fork_choice/README.md @@ -156,10 +156,10 @@ value that Execution Layer client mock returns in responses to the following Eng The checks to verify the current status of `store`. ```yaml -checks: {: value} -- the assertions. +checks: {: value} -- the assertions. ``` -`` is the field member or property of [`Store`](../../../specs/phase0/fork-choice.md#store) object that maintained by client implementation. The fields include: +`` is the field member or property of [`Store`](../../../specs/phase0/fork-choice.md#store) object that maintained by client implementation. The fields include: ```yaml head: { diff --git a/tests/formats/light_client/README.md b/tests/formats/light_client/README.md index 505b416019..050b406f0b 100644 --- a/tests/formats/light_client/README.md +++ b/tests/formats/light_client/README.md @@ -3,6 +3,7 @@ This series of tests provides reference test vectors for the light client sync protocol spec. Handlers: +- `data_collection`: see [Light client data collection test format](./data_collection.md) - `single_merkle_proof`: see [Single leaf merkle proof test format](./single_merkle_proof.md) - `sync`: see [Light client sync test format](./sync.md) - `update_ranking`: see [`LightClientUpdate` ranking test format](./update_ranking.md) diff --git a/tests/formats/light_client/data_collection.md b/tests/formats/light_client/data_collection.md new file mode 100644 index 0000000000..b0d17a68e9 --- /dev/null +++ b/tests/formats/light_client/data_collection.md @@ -0,0 +1,76 @@ +# Light client data collection tests + +This series of tests provides reference test vectors for validating that a full node collects canonical data for serving to light clients implementing the light client sync protocol to sync to the latest block header. + +## Test case format + +### `initial_state.ssz_snappy` + +An SSZ-snappy encoded object of type `BeaconState` to initialize the blockchain from. The state's `slot` is epoch aligned. + +### `steps.yaml` + +The steps to execute in sequence. + +#### `new_block` execution step + +The new block described by the test step should be imported, but does not become head yet. + +```yaml +{ + fork_digest: string -- encoded `ForkDigest`-context of `block` + data: string -- name of the `*.ssz_snappy` file to load + as a `SignedBeaconBlock` object +} +``` + +#### `new_head` execution step + +The given block (previously imported) should become head, leading to potential updates to: + +- The best `LightClientUpdate` for non-finalized sync committee periods. +- The latest `LightClientFinalityUpdate` and `LightClientOptimisticUpdate`. +- The latest finalized `Checkpoint` (across all branches). +- The available `LightClientBootstrap` instances for newly finalized `Checkpoint`s. + +```yaml +{ + head_block_root: Bytes32 -- string, hex encoded, with 0x prefix + checks: { + latest_finalized_checkpoint: { -- tracked across all branches + epoch: int -- integer, decimal + root: Bytes32 -- string, hex encoded, with 0x prefix + } + bootstraps: [ -- one entry per `LightClientBootstrap` + block_root: Bytes32 -- string, hex encoded, with 0x prefix + bootstrap: { -- only exists if a `LightClientBootstrap` is available + fork_digest: string -- encoded `ForkDigest`-context of `data` + data: string -- name of the `*.ssz_snappy` file to load + as a `LightClientBootstrap` object + } + ] + best_updates: [ -- one entry per sync committee period + period: int -- integer, decimal + update: { -- only exists if a best `LightClientUpdate` is available + fork_digest: string -- encoded `ForkDigest`-context of `data` + data: string -- name of the `*.ssz_snappy` file to load + as a `LightClientUpdate` object + } + ] + latest_finality_update: { -- only exists if a `LightClientFinalityUpdate` is available + fork_digest: string -- encoded `ForkDigest`-context of `data` + data: string -- name of the `*.ssz_snappy` file to load + as a `LightClientFinalityUpdate` object + } + latest_optimistic_update: { -- only exists if a `LightClientOptimisticUpdate` is available + fork_digest: string -- encoded `ForkDigest`-context of `data` + data: string -- name of the `*.ssz_snappy` file to load + as a `LightClientOptimisticUpdate` object + } + } +} +``` + +## Condition + +A test-runner should initialize a simplified blockchain from `initial_state`. An external signal is used to control fork choice. The test-runner should then proceed to execute all the test steps in sequence, collecting light client data during execution. After each `new_head` step, it should verify that the collected light client data matches the provided `checks`. diff --git a/tests/formats/light_client/sync.md b/tests/formats/light_client/sync.md index 1706b4c162..7a8448547a 100644 --- a/tests/formats/light_client/sync.md +++ b/tests/formats/light_client/sync.md @@ -9,8 +9,8 @@ This series of tests provides reference test vectors for validating that a light ```yaml genesis_validators_root: Bytes32 -- string, hex encoded, with 0x prefix trusted_block_root: Bytes32 -- string, hex encoded, with 0x prefix -bootstrap_fork_digest: string -- Encoded `ForkDigest`-context of `bootstrap` -store_fork_digest: string -- Encoded `ForkDigest`-context of `store` object being tested +bootstrap_fork_digest: string -- encoded `ForkDigest`-context of `bootstrap` +store_fork_digest: string -- encoded `ForkDigest`-context of `store` object being tested ``` ### `bootstrap.ssz_snappy` @@ -47,8 +47,8 @@ should be executed with the specified parameters: ```yaml { - current_slot: int -- integer, decimal - checks: {: value} -- the assertions. + current_slot: int -- integer, decimal + checks: {: value} -- the assertions. } ``` @@ -60,11 +60,11 @@ The function `process_light_client_update(store, update, current_slot, genesis_v ```yaml { - update_fork_digest: string -- Encoded `ForkDigest`-context of `update` - update: string -- name of the `*.ssz_snappy` file to load - as a `LightClientUpdate` object - current_slot: int -- integer, decimal - checks: {: value} -- the assertions. + update_fork_digest: string -- encoded `ForkDigest`-context of `update` + update: string -- name of the `*.ssz_snappy` file to load + as a `LightClientUpdate` object + current_slot: int -- integer, decimal + checks: {: value} -- the assertions. } ``` @@ -78,8 +78,8 @@ The `store` should be upgraded to reflect the new `store_fork_digest`: ```yaml { - store_fork_digest: string -- Encoded `ForkDigest`-context of `store` - checks: {: value} -- the assertions. + store_fork_digest: string -- encoded `ForkDigest`-context of `store` + checks: {: value} -- the assertions. } ``` diff --git a/tests/generators/light_client/main.py b/tests/generators/light_client/main.py index 23aed84775..6420382240 100644 --- a/tests/generators/light_client/main.py +++ b/tests/generators/light_client/main.py @@ -4,17 +4,29 @@ if __name__ == "__main__": altair_mods = {key: 'eth2spec.test.altair.light_client.test_' + key for key in [ + 'data_collection', 'single_merkle_proof', 'sync', 'update_ranking', ]} - bellatrix_mods = altair_mods + + _new_bellatrix_mods = {key: 'eth2spec.test.bellatrix.light_client.test_' + key for key in [ + 'data_collection', + 'sync', + ]} + bellatrix_mods = combine_mods(_new_bellatrix_mods, altair_mods) _new_capella_mods = {key: 'eth2spec.test.capella.light_client.test_' + key for key in [ + 'data_collection', 'single_merkle_proof', + 'sync', ]} capella_mods = combine_mods(_new_capella_mods, bellatrix_mods) - deneb_mods = capella_mods + + _new_deneb_mods = {key: 'eth2spec.test.deneb.light_client.test_' + key for key in [ + 'sync', + ]} + deneb_mods = combine_mods(_new_deneb_mods, capella_mods) electra_mods = deneb_mods all_mods = { diff --git a/tests/generators/networking/main.py b/tests/generators/networking/main.py index 3217c2cce2..a670f7bd4d 100644 --- a/tests/generators/networking/main.py +++ b/tests/generators/networking/main.py @@ -5,7 +5,8 @@ if __name__ == "__main__": fulu_mods = {key: 'eth2spec.test.fulu.networking.test_' + key for key in [ - 'get_custody_columns', + 'compute_columns_for_custody_group', + 'get_custody_groups', ]} all_mods = { FULU: fulu_mods