diff --git a/.github/workflows/ci-core-reusable.yml b/.github/workflows/ci-core-reusable.yml index 9dbd4202afd3..b2044d025c4c 100644 --- a/.github/workflows/ci-core-reusable.yml +++ b/.github/workflows/ci-core-reusable.yml @@ -168,6 +168,7 @@ jobs: echo "SCCACHE_GCS_SERVICE_ACCOUNT=gha-ci-runners@matterlabs-infra.iam.gserviceaccount.com" >> .env echo "SCCACHE_GCS_RW_MODE=READ_WRITE" >> .env echo "RUSTC_WRAPPER=sccache" >> .env + echo "GITHUB_TOKEN=${{ secrets.GITHUB_TOKEN }}" >> .env echo RUN_CONTRACT_VERIFICATION_TEST=true >> $GITHUB_ENV - name: Start services @@ -216,8 +217,6 @@ jobs: --deploy-ecosystem --l1-rpc-url=http://localhost:8545 \ --server-db-url=postgres://postgres:notsecurepassword@localhost:5432 \ --server-db-name=zksync_server_localhost_era \ - --prover-db-url=postgres://postgres:notsecurepassword@localhost:5432 \ - --prover-db-name=zksync_prover_localhost_era \ --ignore-prerequisites --verbose \ --observability=false @@ -246,8 +245,6 @@ jobs: --l1-rpc-url=http://localhost:8545 \ --server-db-url=postgres://postgres:notsecurepassword@localhost:5432 \ --server-db-name=zksync_server_localhost_validium \ - --prover-db-url=postgres://postgres:notsecurepassword@localhost:5432 \ - --prover-db-name=zksync_prover_localhost_validium \ --chain validium - name: Create and initialize chain with Custom Token @@ -269,8 +266,6 @@ jobs: --l1-rpc-url=http://localhost:8545 \ --server-db-url=postgres://postgres:notsecurepassword@localhost:5432 \ --server-db-name=zksync_server_localhost_custom_token \ - --prover-db-url=postgres://postgres:notsecurepassword@localhost:5432 \ - --prover-db-name=zksync_prover_localhost_custom_token \ --chain custom_token - name: Create and register chain with transactions signed "offline" @@ -327,10 +322,13 @@ jobs: --l1-rpc-url=http://localhost:8545 \ --server-db-url=postgres://postgres:notsecurepassword@localhost:5432 \ --server-db-name=zksync_server_localhost_consensus \ - --prover-db-url=postgres://postgres:notsecurepassword@localhost:5432 \ - --prover-db-name=zksync_prover_localhost_consensus \ --chain consensus + - name: Export chain list to environment variable + run: | + CHAINS="era,validium,custom_token,consensus" + echo "CHAINS=$CHAINS" >> $GITHUB_ENV + - name: Build test dependencies run: | ci_run zk_supervisor test build @@ -353,27 +351,11 @@ jobs: - name: Setup attester committee for the consensus chain run: | - ci_run zk_inception consensus set-attester-committee --chain consensus &> ${{ env.INTEGRATION_TESTS_LOGS_DIR }}/consensus.log + ci_run zk_inception consensus set-attester-committee --chain consensus --from-genesis &> ${{ env.INTEGRATION_TESTS_LOGS_DIR }}/consensus.log - name: Run integration tests run: | - PASSED_ENV_VARS="RUN_CONTRACT_VERIFICATION_TEST" \ - ci_run zk_supervisor test integration --no-deps --ignore-prerequisites --chain era &> ${{ env.INTEGRATION_TESTS_LOGS_DIR }}/rollup.log & - PID1=$! - - ci_run zk_supervisor test integration --no-deps --ignore-prerequisites --chain validium &> ${{ env.INTEGRATION_TESTS_LOGS_DIR }}/validium.log & - PID2=$! - - ci_run zk_supervisor test integration --no-deps --ignore-prerequisites --chain custom_token &> ${{ env.INTEGRATION_TESTS_LOGS_DIR }}/custom_token.log & - PID3=$! - - ci_run zk_supervisor test integration --no-deps --ignore-prerequisites --chain consensus &> ${{ env.INTEGRATION_TESTS_LOGS_DIR }}/consensus.log & - PID4=$! - - wait $PID1 - wait $PID2 - wait $PID3 - wait $PID4 + ci_run ./bin/run_on_all_chains.sh "zk_supervisor test integration --no-deps --ignore-prerequisites" ${{ env.CHAINS }} ${{ env.INTEGRATION_TESTS_LOGS_DIR }} - name: Init external nodes run: | @@ -395,42 +377,11 @@ jobs: - name: Run recovery tests (from snapshot) run: | - - ci_run zk_supervisor test recovery --snapshot --no-deps --ignore-prerequisites --verbose --chain era &> ${{ env.SNAPSHOT_RECOVERY_LOGS_DIR }}/rollup.log & - PID1=$! - - ci_run zk_supervisor test recovery --snapshot --no-deps --ignore-prerequisites --verbose --chain validium &> ${{ env.SNAPSHOT_RECOVERY_LOGS_DIR }}/validium.log & - PID2=$! - - ci_run zk_supervisor test recovery --snapshot --no-deps --ignore-prerequisites --verbose --chain custom_token &> ${{ env.SNAPSHOT_RECOVERY_LOGS_DIR }}/custom_token.log & - PID3=$! - - ci_run zk_supervisor test recovery --snapshot --no-deps --ignore-prerequisites --verbose --chain consensus &> ${{ env.SNAPSHOT_RECOVERY_LOGS_DIR }}/consensus.log & - PID4=$! - - wait $PID1 - wait $PID2 - wait $PID3 - wait $PID4 + ci_run ./bin/run_on_all_chains.sh "zk_supervisor test recovery --snapshot --no-deps --ignore-prerequisites --verbose" ${{ env.CHAINS }} ${{ env.INTEGRATION_TESTS_LOGS_DIR }} - name: Run recovery tests (from genesis) run: | - ci_run zk_supervisor test recovery --no-deps --no-kill --ignore-prerequisites --verbose --chain era &> ${{ env.GENESIS_RECOVERY_LOGS_DIR }}/rollup.log & - PID1=$! - - ci_run zk_supervisor test recovery --no-deps --no-kill --ignore-prerequisites --verbose --chain validium &> ${{ env.GENESIS_RECOVERY_LOGS_DIR }}/validium.log & - PID2=$! - - ci_run zk_supervisor test recovery --no-deps --no-kill --ignore-prerequisites --verbose --chain custom_token &> ${{ env.GENESIS_RECOVERY_LOGS_DIR }}/custom_token.log & - PID3=$! - - ci_run zk_supervisor test recovery --no-deps --no-kill --ignore-prerequisites --verbose --chain consensus &> ${{ env.GENESIS_RECOVERY_LOGS_DIR }}/consensus.log & - PID4=$! - - wait $PID1 - wait $PID2 - wait $PID3 - wait $PID4 + ci_run ./bin/run_on_all_chains.sh "zk_supervisor test recovery --no-deps --no-kill --ignore-prerequisites --verbose" ${{ env.CHAINS }} ${{ env.INTEGRATION_TESTS_LOGS_DIR }} - name: Run external node server run: | @@ -441,66 +392,19 @@ jobs: - name: Run integration tests en run: | - ci_run zk_supervisor test integration --no-deps --ignore-prerequisites --external-node --chain era &> ${{ env.INTEGRATION_TESTS_EN_LOGS_DIR }}/rollup.log & - PID1=$! - - ci_run zk_supervisor test integration --no-deps --ignore-prerequisites --external-node --chain validium &> ${{ env.INTEGRATION_TESTS_EN_LOGS_DIR }}/validium.log & - PID2=$! - - ci_run zk_supervisor test integration --no-deps --ignore-prerequisites --external-node --chain custom_token &> ${{ env.INTEGRATION_TESTS_EN_LOGS_DIR }}/custom_token.log & - PID3=$! - - ci_run zk_supervisor test integration --no-deps --ignore-prerequisites --external-node --chain consensus &> ${{ env.INTEGRATION_TESTS_EN_LOGS_DIR }}/consensus.log & - PID4=$! - - wait $PID1 - wait $PID2 - wait $PID3 - wait $PID4 + ci_run ./bin/run_on_all_chains.sh "zk_supervisor test integration --no-deps --ignore-prerequisites --external-node" ${{ env.CHAINS }} ${{ env.INTEGRATION_TESTS_LOGS_DIR }} - name: Fee projection tests run: | ci_run killall -INT zksync_server || true - - ci_run zk_supervisor test fees --no-deps --no-kill --chain era &> ${{ env.FEES_LOGS_DIR }}/era.log & - PID1=$! - - ci_run zk_supervisor test fees --no-deps --no-kill --chain validium &> ${{ env.FEES_LOGS_DIR }}/validium.log & - PID2=$! - - ci_run zk_supervisor test fees --no-deps --no-kill --chain custom_token &> ${{ env.FEES_LOGS_DIR }}/custom_token.log & - PID3=$! - - ci_run zk_supervisor test fees --no-deps --no-kill --chain consensus &> ${{ env.FEES_LOGS_DIR }}/consensus.log & - PID4=$! - - wait $PID1 - wait $PID2 - wait $PID3 - wait $PID4 + ci_run ./bin/run_on_all_chains.sh "zk_supervisor test fees --no-deps --no-kill" ${{ env.CHAINS }} ${{ env.INTEGRATION_TESTS_LOGS_DIR }} - name: Run revert tests run: | ci_run killall -INT zksync_server || true ci_run killall -INT zksync_external_node || true - ci_run zk_supervisor test revert --no-deps --external-node --no-kill --ignore-prerequisites --chain era &> ${{ env.REVERT_LOGS_DIR }}/rollup.log & - PID1=$! - - ci_run zk_supervisor test revert --no-deps --external-node --no-kill --ignore-prerequisites --chain validium &> ${{ env.REVERT_LOGS_DIR }}/validium.log & - PID2=$! - - ci_run zk_supervisor test revert --no-deps --external-node --no-kill --ignore-prerequisites --chain custom_token &> ${{ env.REVERT_LOGS_DIR }}/custom_token.log & - PID3=$! - - ci_run zk_supervisor test revert --no-deps --external-node --no-kill --ignore-prerequisites --chain consensus &> ${{ env.REVERT_LOGS_DIR }}/consensus.log & - PID4=$! - - wait $PID1 - wait $PID2 - wait $PID3 - wait $PID4 - + ci_run ./bin/run_on_all_chains.sh "zk_supervisor test revert --no-deps --external-node --no-kill --ignore-prerequisites" ${{ env.CHAINS }} ${{ env.INTEGRATION_TESTS_LOGS_DIR }} # Upgrade tests should run last, because as soon as they # finish the bootloader will be different diff --git a/.github/workflows/zk-environment-publish.yml b/.github/workflows/zk-environment-publish.yml index 5a08dff178c4..73303d15cb30 100644 --- a/.github/workflows/zk-environment-publish.yml +++ b/.github/workflows/zk-environment-publish.yml @@ -4,6 +4,12 @@ on: # Workflow dispatch, to allow building and pushing new environments. # It will NOT mark them as latest. workflow_dispatch: + inputs: + build_cuda: + description: "Build CUDA images or not" + type: boolean + required: false + default: false push: branches: @@ -202,25 +208,25 @@ jobs: echo "should_run=$changed_files_output" >> "$GITHUB_OUTPUT" - name: Checkout code - if: steps.condition.outputs.should_run == 'true' + if: ${{ (steps.condition.outputs.should_run == 'true') || (github.event_name == 'workflow_dispatch' && inputs.build_cuda) }} uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 with: submodules: "recursive" - name: Log in to US GAR - if: steps.condition.outputs.should_run == 'true' && github.event_name == 'push' && github.ref == 'refs/heads/main' + if: ${{ (steps.condition.outputs.should_run == 'true' && github.event_name == 'push' && github.ref == 'refs/heads/main') || (github.event_name == 'workflow_dispatch' && inputs.build_cuda) }} run: | gcloud auth print-access-token --lifetime=7200 --impersonate-service-account=gha-ci-runners@matterlabs-infra.iam.gserviceaccount.com | docker login -u oauth2accesstoken --password-stdin https://us-docker.pkg.dev - name: Log in to Docker Hub - if: steps.condition.outputs.should_run == 'true' && github.event_name == 'push' && github.ref == 'refs/heads/main' + if: ${{ (steps.condition.outputs.should_run == 'true' && github.event_name == 'push' && github.ref == 'refs/heads/main') || (github.event_name == 'workflow_dispatch' && inputs.build_cuda) }} uses: docker/login-action@9780b0c442fbb1117ed29e0efdff1e18412f7567 # v3.3.0 with: username: ${{ secrets.DOCKERHUB_USER }} password: ${{ secrets.DOCKERHUB_TOKEN }} - name: Login to GitHub Container Registry - if: steps.condition.outputs.should_run == 'true' && github.event_name == 'push' && github.ref == 'refs/heads/main' + if: ${{ (steps.condition.outputs.should_run == 'true' && github.event_name == 'push' && github.ref == 'refs/heads/main') || (github.event_name == 'workflow_dispatch' && inputs.build_cuda) }} uses: docker/login-action@9780b0c442fbb1117ed29e0efdff1e18412f7567 # v3.3.0 with: registry: ghcr.io @@ -228,19 +234,19 @@ jobs: password: ${{ secrets.GITHUB_TOKEN }} - name: Set up QEMU - if: steps.condition.outputs.should_run == 'true' + if: ${{ (steps.condition.outputs.should_run == 'true') || (github.event_name == 'workflow_dispatch' && inputs.build_cuda) }} uses: docker/setup-qemu-action@49b3bc8e6bdd4a60e6116a5414239cba5943d3cf # v3.2.0 - name: Set up Docker Buildx - if: steps.condition.outputs.should_run == 'true' + if: ${{ (steps.condition.outputs.should_run == 'true') || (github.event_name == 'workflow_dispatch' && inputs.build_cuda) }} uses: docker/setup-buildx-action@aa33708b10e362ff993539393ff100fa93ed6a27 # v3.5.0 - name: Build and optionally push - if: steps.condition.outputs.should_run == 'true' + if: ${{ (steps.condition.outputs.should_run == 'true') || (github.event_name == 'workflow_dispatch' && inputs.build_cuda) }} uses: docker/build-push-action@5176d81f87c23d6fc96624dfdbcd9f3830bbe445 # v6.5.0 with: file: docker/zk-environment/20.04_amd64_cuda_${{ matrix.cuda_version }}.Dockerfile - push: ${{ github.event_name == 'push' && github.ref == 'refs/heads/main' }} + push: ${{ ( github.event_name == 'push' && github.ref == 'refs/heads/main' ) || (github.event_name == 'workflow_dispatch' && inputs.build_cuda) }} tags: | us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/zk-environment-cuda-${{ matrix.cuda_version }}:latest matterlabs/zk-environment:cuda-${{ matrix.cuda_version }}-latest diff --git a/.gitignore b/.gitignore index c3de7a2df84d..bbd13e2319af 100644 --- a/.gitignore +++ b/.gitignore @@ -114,6 +114,7 @@ prover/data/keys/setup_* # Zk Toolbox chains/era/configs/* +chains/gateway/* configs/* era-observability/ core/tests/ts-integration/deployments-zk diff --git a/Cargo.lock b/Cargo.lock index 283d11415a6b..bd9f2d5ef28d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -9672,8 +9672,12 @@ dependencies = [ "secrecy", "serde", "serde_json", + "strum", + "strum_macros", + "time", "tracing", "url", + "vise", "zksync_basic_types", "zksync_concurrency", "zksync_consensus_utils", @@ -10161,8 +10165,8 @@ dependencies = [ "async-trait", "rlp", "thiserror", - "tokio", - "zksync_types", + "zksync_basic_types", + "zksync_crypto_primitives", ] [[package]] @@ -10485,7 +10489,6 @@ dependencies = [ "once_cell", "pretty_assertions", "thiserror", - "tokio", "tracing", "vise", "zk_evm 0.131.0-rc.2", @@ -10912,6 +10915,7 @@ dependencies = [ "secrecy", "serde_json", "serde_yaml", + "time", "tracing", "zksync_basic_types", "zksync_config", @@ -11303,8 +11307,8 @@ dependencies = [ [[package]] name = "zksync_vm2" -version = "0.1.0" -source = "git+https://github.com/matter-labs/vm2.git?rev=74577d9be13b1bff9d1a712389731f669b179e47#74577d9be13b1bff9d1a712389731f669b179e47" +version = "0.2.1" +source = "git+https://github.com/matter-labs/vm2.git?rev=a233d44bbe61dc6a758a754c3b78fe4f83e56699#a233d44bbe61dc6a758a754c3b78fe4f83e56699" dependencies = [ "enum_dispatch", "primitive-types", @@ -11315,8 +11319,8 @@ dependencies = [ [[package]] name = "zksync_vm2_interface" -version = "0.1.0" -source = "git+https://github.com/matter-labs/vm2.git?rev=74577d9be13b1bff9d1a712389731f669b179e47#74577d9be13b1bff9d1a712389731f669b179e47" +version = "0.2.1" +source = "git+https://github.com/matter-labs/vm2.git?rev=a233d44bbe61dc6a758a754c3b78fe4f83e56699#a233d44bbe61dc6a758a754c3b78fe4f83e56699" dependencies = [ "primitive-types", ] diff --git a/Cargo.toml b/Cargo.toml index df52986281e5..d597f4af7542 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,85 +1,85 @@ [workspace] members = [ - # Binaries - "core/bin/block_reverter", - "core/bin/contract-verifier", - "core/bin/external_node", - "core/bin/merkle_tree_consistency_checker", - "core/bin/snapshots_creator", - "core/bin/selector_generator", - "core/bin/system-constants-generator", - "core/bin/verified_sources_fetcher", - "core/bin/zksync_server", - "core/bin/genesis_generator", - "core/bin/zksync_tee_prover", - # Node services - "core/node/node_framework", - "core/node/proof_data_handler", - "core/node/block_reverter", - "core/node/commitment_generator", - "core/node/house_keeper", - "core/node/genesis", - "core/node/shared_metrics", - "core/node/db_pruner", - "core/node/fee_model", - "core/node/da_dispatcher", - "core/node/eth_sender", - "core/node/vm_runner", - "core/node/test_utils", - "core/node/state_keeper", - "core/node/reorg_detector", - "core/node/consistency_checker", - "core/node/metadata_calculator", - "core/node/node_sync", - "core/node/node_storage_init", - "core/node/consensus", - "core/node/contract_verification_server", - "core/node/api_server", - "core/node/tee_verifier_input_producer", - "core/node/base_token_adjuster", - "core/node/external_proof_integration_api", - "core/node/logs_bloom_backfill", - "core/node/da_clients", - # Libraries - "core/lib/db_connection", - "core/lib/zksync_core_leftovers", - "core/lib/basic_types", - "core/lib/config", - "core/lib/constants", - "core/lib/contract_verifier", - "core/lib/contracts", - "core/lib/circuit_breaker", - "core/lib/dal", - "core/lib/env_config", - "core/lib/da_client", - "core/lib/eth_client", - "core/lib/eth_signer", - "core/lib/l1_contract_interface", - "core/lib/mempool", - "core/lib/merkle_tree", - "core/lib/mini_merkle_tree", - "core/lib/node_framework_derive", - "core/lib/object_store", - "core/lib/prover_interface", - "core/lib/queued_job_processor", - "core/lib/state", - "core/lib/storage", - "core/lib/tee_verifier", - "core/lib/types", - "core/lib/protobuf_config", - "core/lib/utils", - "core/lib/vlog", - "core/lib/multivm", - "core/lib/vm_interface", - "core/lib/vm_executor", - "core/lib/web3_decl", - "core/lib/snapshots_applier", - "core/lib/crypto_primitives", - "core/lib/external_price_api", - # Test infrastructure - "core/tests/test_account", - "core/tests/loadnext", - "core/tests/vm-benchmark", + # Binaries + "core/bin/block_reverter", + "core/bin/contract-verifier", + "core/bin/external_node", + "core/bin/merkle_tree_consistency_checker", + "core/bin/snapshots_creator", + "core/bin/selector_generator", + "core/bin/system-constants-generator", + "core/bin/verified_sources_fetcher", + "core/bin/zksync_server", + "core/bin/genesis_generator", + "core/bin/zksync_tee_prover", + # Node services + "core/node/node_framework", + "core/node/proof_data_handler", + "core/node/block_reverter", + "core/node/commitment_generator", + "core/node/house_keeper", + "core/node/genesis", + "core/node/shared_metrics", + "core/node/db_pruner", + "core/node/fee_model", + "core/node/da_dispatcher", + "core/node/eth_sender", + "core/node/vm_runner", + "core/node/test_utils", + "core/node/state_keeper", + "core/node/reorg_detector", + "core/node/consistency_checker", + "core/node/metadata_calculator", + "core/node/node_sync", + "core/node/node_storage_init", + "core/node/consensus", + "core/node/contract_verification_server", + "core/node/api_server", + "core/node/tee_verifier_input_producer", + "core/node/base_token_adjuster", + "core/node/external_proof_integration_api", + "core/node/logs_bloom_backfill", + "core/node/da_clients", + # Libraries + "core/lib/db_connection", + "core/lib/zksync_core_leftovers", + "core/lib/basic_types", + "core/lib/config", + "core/lib/constants", + "core/lib/contract_verifier", + "core/lib/contracts", + "core/lib/circuit_breaker", + "core/lib/dal", + "core/lib/env_config", + "core/lib/da_client", + "core/lib/eth_client", + "core/lib/eth_signer", + "core/lib/l1_contract_interface", + "core/lib/mempool", + "core/lib/merkle_tree", + "core/lib/mini_merkle_tree", + "core/lib/node_framework_derive", + "core/lib/object_store", + "core/lib/prover_interface", + "core/lib/queued_job_processor", + "core/lib/state", + "core/lib/storage", + "core/lib/tee_verifier", + "core/lib/types", + "core/lib/protobuf_config", + "core/lib/utils", + "core/lib/vlog", + "core/lib/multivm", + "core/lib/vm_interface", + "core/lib/vm_executor", + "core/lib/web3_decl", + "core/lib/snapshots_applier", + "core/lib/crypto_primitives", + "core/lib/external_price_api", + # Test infrastructure + "core/tests/test_account", + "core/tests/loadnext", + "core/tests/vm-benchmark", ] resolver = "2" @@ -172,6 +172,7 @@ sqlx = "0.8.1" static_assertions = "1.1" structopt = "0.3.20" strum = "0.26" +strum_macros = "0.26.4" tempfile = "3.0.2" test-casing = "0.1.2" test-log = "0.2.15" @@ -185,7 +186,7 @@ tower-http = "0.5.2" tracing = "0.1" tracing-subscriber = "0.3" tracing-opentelemetry = "0.25.0" -time = "0.3.36" # Has to be same as used by `tracing-subscriber` +time = "0.3.36" # Has to be same as used by `tracing-subscriber` url = "2" web3 = "0.19.0" fraction = "0.15.3" @@ -228,7 +229,7 @@ zk_evm_1_4_1 = { package = "zk_evm", version = "0.141" } zk_evm_1_5_0 = { package = "zk_evm", version = "=0.150.5" } # New VM; pinned to a specific commit because of instability -zksync_vm2 = { git = "https://github.com/matter-labs/vm2.git", rev = "74577d9be13b1bff9d1a712389731f669b179e47" } +zksync_vm2 = { git = "https://github.com/matter-labs/vm2.git", rev = "a233d44bbe61dc6a758a754c3b78fe4f83e56699" } # Consensus dependencies. zksync_concurrency = "=0.5.0" diff --git a/bin/run_on_all_chains.sh b/bin/run_on_all_chains.sh new file mode 100755 index 000000000000..68b6e81662fd --- /dev/null +++ b/bin/run_on_all_chains.sh @@ -0,0 +1,41 @@ +#!/bin/bash + +# Colors for the terminal output +RED='\033[0;31m' +GREEN='\033[0;32m' +NC='\033[0m' # No Color + + +command=$1 +chain_list=$2 +log_dir=$3 +IFS=',' read -r -a chains <<< "$chain_list" +pids=() +statuses=() + +# Start background processes +for i in "${!chains[@]}"; do + eval "$command --chain ${chains[$i]} &> ${log_dir}/${chains[$i]}.log" & + pids+=($!) +done + +# Wait for all processes to complete and capture their exit statuses +for i in "${!pids[@]}"; do + wait ${pids[$i]} + statuses[$i]=$? +done + +# Check exit statuses and set overall status +overall_status=0 + +for i in "${!statuses[@]}"; do + if [ ${statuses[$i]} -ne 0 ]; then + overall_status=1 + echo -e "${RED}✗ ERROR (exit code ${statuses[$i]}): ${chains[$i]}${NC}" + else + echo -e "${GREEN}✓ SUCCESS: ${chains[$i]}${NC}" + fi +done + +# Exit with overall status +exit $overall_status diff --git a/core/bin/external_node/src/config/mod.rs b/core/bin/external_node/src/config/mod.rs index 9b1677c47c4d..56ee3edfd253 100644 --- a/core/bin/external_node/src/config/mod.rs +++ b/core/bin/external_node/src/config/mod.rs @@ -445,6 +445,8 @@ pub(crate) struct OptionalENConfig { /// Gateway RPC URL, needed for operating during migration. #[allow(dead_code)] pub gateway_url: Option, + /// Interval for bridge addresses refreshing in seconds. + bridge_addresses_refresh_interval_sec: Option, } impl OptionalENConfig { @@ -675,6 +677,7 @@ impl OptionalENConfig { api_namespaces, contracts_diamond_proxy_addr: None, gateway_url: enconfig.gateway_url.clone(), + bridge_addresses_refresh_interval_sec: enconfig.bridge_addresses_refresh_interval_sec, }) } @@ -901,6 +904,11 @@ impl OptionalENConfig { Duration::from_secs(self.pruning_data_retention_sec) } + pub fn bridge_addresses_refresh_interval(&self) -> Option { + self.bridge_addresses_refresh_interval_sec + .map(|n| Duration::from_secs(n.get())) + } + #[cfg(test)] fn mock() -> Self { // Set all values to their defaults diff --git a/core/bin/external_node/src/node_builder.rs b/core/bin/external_node/src/node_builder.rs index d0055896d42e..14e09b9c2a7a 100644 --- a/core/bin/external_node/src/node_builder.rs +++ b/core/bin/external_node/src/node_builder.rs @@ -430,6 +430,10 @@ impl ExternalNodeBuilder { response_body_size_limit: Some(self.config.optional.max_response_body_size()), with_extended_tracing: self.config.optional.extended_rpc_tracing, pruning_info_refresh_interval: Some(pruning_info_refresh_interval), + bridge_addresses_refresh_interval: self + .config + .optional + .bridge_addresses_refresh_interval(), polling_interval: Some(self.config.optional.polling_interval()), websocket_requests_per_minute_limit: None, // To be set by WS server layer method if required. replication_lag_limit: None, // TODO: Support replication lag limit diff --git a/core/bin/genesis_generator/src/main.rs b/core/bin/genesis_generator/src/main.rs index 4f8200b3af78..2a96cdc6c6cc 100644 --- a/core/bin/genesis_generator/src/main.rs +++ b/core/bin/genesis_generator/src/main.rs @@ -87,6 +87,7 @@ async fn generate_new_config( genesis_commitment: None, bootloader_hash: Some(base_system_contracts.bootloader), default_aa_hash: Some(base_system_contracts.default_aa), + evm_emulator_hash: base_system_contracts.evm_emulator, ..genesis_config }; diff --git a/core/bin/system-constants-generator/src/utils.rs b/core/bin/system-constants-generator/src/utils.rs index 43ac9841c400..8d36f7344676 100644 --- a/core/bin/system-constants-generator/src/utils.rs +++ b/core/bin/system-constants-generator/src/utils.rs @@ -71,12 +71,14 @@ pub static GAS_TEST_SYSTEM_CONTRACTS: Lazy = Lazy::new(|| { let bytecode = read_sys_contract_bytecode("", "DefaultAccount", ContractLanguage::Sol); let hash = hash_bytecode(&bytecode); + BaseSystemContracts { default_aa: SystemContractCode { code: bytes_to_be_words(bytecode), hash, }, bootloader, + evm_emulator: None, } }); @@ -221,6 +223,7 @@ pub(super) fn execute_internal_transfer_test() -> u32 { let base_system_smart_contracts = BaseSystemContracts { bootloader, default_aa, + evm_emulator: None, }; let system_env = SystemEnv { diff --git a/core/lib/config/Cargo.toml b/core/lib/config/Cargo.toml index d1ab5ce8438f..af39e5159ba8 100644 --- a/core/lib/config/Cargo.toml +++ b/core/lib/config/Cargo.toml @@ -23,6 +23,10 @@ anyhow.workspace = true rand.workspace = true secrecy.workspace = true serde = { workspace = true, features = ["derive"] } +time = { workspace = true, features = ["serde-human-readable"] } +strum.workspace = true +strum_macros.workspace = true +vise.workspace = true [dev-dependencies] serde_json.workspace = true diff --git a/core/lib/config/src/configs/chain.rs b/core/lib/config/src/configs/chain.rs index 7e33f6964bb7..c117064dbc40 100644 --- a/core/lib/config/src/configs/chain.rs +++ b/core/lib/config/src/configs/chain.rs @@ -138,6 +138,8 @@ pub struct StateKeeperConfig { pub bootloader_hash: Option, #[deprecated(note = "Use GenesisConfig::default_aa_hash instead")] pub default_aa_hash: Option, + #[deprecated(note = "Use GenesisConfig::evm_emulator_hash instead")] + pub evm_emulator_hash: Option, #[deprecated(note = "Use GenesisConfig::l1_batch_commit_data_generator_mode instead")] #[serde(default)] pub l1_batch_commit_data_generator_mode: L1BatchCommitmentMode, @@ -178,6 +180,7 @@ impl StateKeeperConfig { protective_reads_persistence_enabled: true, bootloader_hash: None, default_aa_hash: None, + evm_emulator_hash: None, l1_batch_commit_data_generator_mode: L1BatchCommitmentMode::Rollup, } } diff --git a/core/lib/config/src/configs/en_config.rs b/core/lib/config/src/configs/en_config.rs index 7f130e3539a8..4cab47b0779e 100644 --- a/core/lib/config/src/configs/en_config.rs +++ b/core/lib/config/src/configs/en_config.rs @@ -1,4 +1,4 @@ -use std::num::NonZeroUsize; +use std::num::{NonZeroU64, NonZeroUsize}; use serde::Deserialize; use zksync_basic_types::{ @@ -19,4 +19,5 @@ pub struct ENConfig { pub main_node_rate_limit_rps: Option, pub gateway_url: Option, + pub bridge_addresses_refresh_interval_sec: Option, } diff --git a/core/lib/config/src/configs/eth_sender.rs b/core/lib/config/src/configs/eth_sender.rs index 7e6ef2244cbf..3a1a0505728c 100644 --- a/core/lib/config/src/configs/eth_sender.rs +++ b/core/lib/config/src/configs/eth_sender.rs @@ -42,6 +42,7 @@ impl EthConfig { pubdata_sending_mode: PubdataSendingMode::Calldata, tx_aggregation_paused: false, tx_aggregation_only_prove_and_execute: false, + time_in_mempool_in_l1_blocks_cap: 1800, }), gas_adjuster: Some(GasAdjusterConfig { default_priority_fee_per_gas: 1000000000, @@ -127,6 +128,10 @@ pub struct SenderConfig { /// special mode specifically for gateway migration to decrease number of non-executed batches #[serde(default = "SenderConfig::default_tx_aggregation_only_prove_and_execute")] pub tx_aggregation_only_prove_and_execute: bool, + + /// Cap of time in mempool for price calculations + #[serde(default = "SenderConfig::default_time_in_mempool_in_l1_blocks_cap")] + pub time_in_mempool_in_l1_blocks_cap: u32, } impl SenderConfig { @@ -168,6 +173,13 @@ impl SenderConfig { const fn default_tx_aggregation_only_prove_and_execute() -> bool { false } + + pub const fn default_time_in_mempool_in_l1_blocks_cap() -> u32 { + let blocks_per_hour = 3600 / 12; + // we cap it at 6h to not allow nearly infinite values when a tx is stuck for a long time + // 1,001 ^ 1800 ~= 6, so by default we cap exponential price formula at roughly median * 6 + blocks_per_hour * 6 + } } #[derive(Debug, Deserialize, Copy, Clone, PartialEq, Default)] @@ -177,8 +189,10 @@ pub struct GasAdjusterConfig { /// Number of blocks collected by GasAdjuster from which base_fee median is taken pub max_base_fee_samples: usize, /// Parameter of the transaction base_fee_per_gas pricing formula + #[serde(default = "GasAdjusterConfig::default_pricing_formula_parameter_a")] pub pricing_formula_parameter_a: f64, /// Parameter of the transaction base_fee_per_gas pricing formula + #[serde(default = "GasAdjusterConfig::default_pricing_formula_parameter_b")] pub pricing_formula_parameter_b: f64, /// Parameter by which the base fee will be multiplied for internal purposes pub internal_l1_pricing_multiplier: f64, @@ -225,4 +239,12 @@ impl GasAdjusterConfig { pub const fn default_internal_pubdata_pricing_multiplier() -> f64 { 1.0 } + + pub const fn default_pricing_formula_parameter_a() -> f64 { + 1.1 + } + + pub const fn default_pricing_formula_parameter_b() -> f64 { + 1.001 + } } diff --git a/core/lib/config/src/configs/genesis.rs b/core/lib/config/src/configs/genesis.rs index 6c4bacc3a6e2..9e1ffbd87cb5 100644 --- a/core/lib/config/src/configs/genesis.rs +++ b/core/lib/config/src/configs/genesis.rs @@ -17,6 +17,7 @@ pub struct GenesisConfig { pub genesis_commitment: Option, pub bootloader_hash: Option, pub default_aa_hash: Option, + pub evm_emulator_hash: Option, pub l1_chain_id: L1ChainId, pub sl_chain_id: Option, pub l2_chain_id: L2ChainId, @@ -49,6 +50,7 @@ impl GenesisConfig { genesis_commitment: Some(H256::repeat_byte(0x17)), bootloader_hash: Default::default(), default_aa_hash: Default::default(), + evm_emulator_hash: Default::default(), l1_chain_id: L1ChainId(9), sl_chain_id: None, protocol_version: Some(ProtocolSemanticVersion { diff --git a/core/lib/config/src/configs/mod.rs b/core/lib/config/src/configs/mod.rs index 1ad503e0687f..a8d136d632ea 100644 --- a/core/lib/config/src/configs/mod.rs +++ b/core/lib/config/src/configs/mod.rs @@ -60,6 +60,7 @@ pub mod house_keeper; pub mod object_store; pub mod observability; pub mod proof_data_handler; +pub mod prover_autoscaler; pub mod prover_job_monitor; pub mod pruning; pub mod secrets; diff --git a/core/lib/config/src/configs/prover_autoscaler.rs b/core/lib/config/src/configs/prover_autoscaler.rs new file mode 100644 index 000000000000..41131fc1b8c7 --- /dev/null +++ b/core/lib/config/src/configs/prover_autoscaler.rs @@ -0,0 +1,117 @@ +use std::collections::HashMap; + +use serde::Deserialize; +use strum::Display; +use strum_macros::EnumString; +use time::Duration; +use vise::EncodeLabelValue; + +use crate::configs::ObservabilityConfig; + +/// Config used for running ProverAutoscaler (both Scaler and Agent). +#[derive(Debug, Clone, PartialEq)] +pub struct ProverAutoscalerConfig { + /// Amount of time ProverJobMonitor will wait all it's tasks to finish. + // TODO: find a way to use #[serde(with = "humantime_serde")] with time::Duration. + pub graceful_shutdown_timeout: Duration, + pub agent_config: Option, + pub scaler_config: Option, + pub observability: Option, +} + +#[derive(Debug, Clone, PartialEq, Deserialize)] +pub struct ProverAutoscalerAgentConfig { + /// Port for prometheus metrics connection. + pub prometheus_port: u16, + /// HTTP port for global Scaler to connect to the Agent running in a cluster. + pub http_port: u16, + /// List of namespaces to watch. + #[serde(default = "ProverAutoscalerAgentConfig::default_namespaces")] + pub namespaces: Vec, + /// Watched cluster name. Also can be set via flag. + pub cluster_name: Option, +} + +#[derive(Debug, Clone, PartialEq, Deserialize, Default)] +pub struct ProverAutoscalerScalerConfig { + /// Port for prometheus metrics connection. + pub prometheus_port: u16, + /// The interval between runs for global Scaler. + #[serde(default = "ProverAutoscalerScalerConfig::default_scaler_run_interval")] + pub scaler_run_interval: Duration, + /// URL to get queue reports from. + /// In production should be "http://prover-job-monitor.stage2.svc.cluster.local:3074/queue_report". + #[serde(default = "ProverAutoscalerScalerConfig::default_prover_job_monitor_url")] + pub prover_job_monitor_url: String, + /// List of ProverAutoscaler Agents to get cluster data from. + pub agents: Vec, + /// Mapping of namespaces to protocol versions. + pub protocol_versions: HashMap, + /// Default priorities, which cluster to prefer when there is no other information. + pub cluster_priorities: HashMap, + /// Prover speed per GPU. Used to calculate desired number of provers for queue size. + pub prover_speed: HashMap, + /// Duration after which pending pod considered long pending. + #[serde(default = "ProverAutoscalerScalerConfig::default_long_pending_duration")] + pub long_pending_duration: Duration, +} + +#[derive( + Default, + Debug, + Display, + Hash, + PartialEq, + Eq, + Clone, + Copy, + Ord, + PartialOrd, + EnumString, + EncodeLabelValue, + Deserialize, +)] +pub enum Gpu { + #[default] + Unknown, + #[strum(ascii_case_insensitive)] + L4, + #[strum(ascii_case_insensitive)] + T4, + #[strum(ascii_case_insensitive)] + V100, + #[strum(ascii_case_insensitive)] + P100, + #[strum(ascii_case_insensitive)] + A100, +} + +impl ProverAutoscalerConfig { + /// Default graceful shutdown timeout -- 5 seconds + pub fn default_graceful_shutdown_timeout() -> Duration { + Duration::seconds(5) + } +} + +impl ProverAutoscalerAgentConfig { + pub fn default_namespaces() -> Vec { + vec!["prover-blue".to_string(), "prover-red".to_string()] + } +} + +impl ProverAutoscalerScalerConfig { + /// Default scaler_run_interval -- 10s + pub fn default_scaler_run_interval() -> Duration { + Duration::seconds(10) + } + + /// Default prover_job_monitor_url -- cluster local URL + pub fn default_prover_job_monitor_url() -> String { + "http://localhost:3074/queue_report".to_string() + } + + /// Default long_pending_duration -- 10m + pub fn default_long_pending_duration() -> Duration { + Duration::minutes(10) + } +} diff --git a/core/lib/config/src/testonly.rs b/core/lib/config/src/testonly.rs index 6fbbad9d8ff2..a6ff30e04a90 100644 --- a/core/lib/config/src/testonly.rs +++ b/core/lib/config/src/testonly.rs @@ -192,6 +192,7 @@ impl Distribution for EncodeDist { fee_account_addr: None, bootloader_hash: None, default_aa_hash: None, + evm_emulator_hash: None, l1_batch_commit_data_generator_mode: Default::default(), } } @@ -419,6 +420,7 @@ impl Distribution for EncodeDist { pubdata_sending_mode: PubdataSendingMode::Calldata, tx_aggregation_paused: false, tx_aggregation_only_prove_and_execute: false, + time_in_mempool_in_l1_blocks_cap: self.sample(rng), } } } @@ -731,6 +733,7 @@ impl Distribution for EncodeDist { genesis_commitment: Some(rng.gen()), bootloader_hash: Some(rng.gen()), default_aa_hash: Some(rng.gen()), + evm_emulator_hash: Some(rng.gen()), fee_account: rng.gen(), l1_chain_id: L1ChainId(self.sample(rng)), sl_chain_id: None, @@ -933,6 +936,7 @@ impl Distribution for EncodeDist { main_node_rate_limit_rps: self.sample_opt(|| rng.gen()), gateway_url: self .sample_opt(|| format!("localhost:{}", rng.gen::()).parse().unwrap()), + bridge_addresses_refresh_interval_sec: self.sample_opt(|| rng.gen()), } } } diff --git a/core/lib/constants/src/contracts.rs b/core/lib/constants/src/contracts.rs index 73b4a0ffaaa2..fe37ef6c69fd 100644 --- a/core/lib/constants/src/contracts.rs +++ b/core/lib/constants/src/contracts.rs @@ -130,6 +130,11 @@ pub const CODE_ORACLE_ADDRESS: Address = H160([ 0x00, 0x00, 0x80, 0x12, ]); +pub const EVM_GAS_MANAGER_ADDRESS: Address = H160([ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x80, 0x13, +]); + /// Note, that the `Create2Factory` is explicitly deployed on a non-system-contract address. pub const CREATE2_FACTORY_ADDRESS: Address = H160([ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, diff --git a/core/lib/contracts/src/lib.rs b/core/lib/contracts/src/lib.rs index a72b5c95d1bc..fb28693887a9 100644 --- a/core/lib/contracts/src/lib.rs +++ b/core/lib/contracts/src/lib.rs @@ -293,6 +293,7 @@ fn read_zbin_bytecode_from_path(bytecode_path: PathBuf) -> Vec { fs::read(&bytecode_path) .unwrap_or_else(|err| panic!("Can't read .zbin bytecode at {:?}: {}", bytecode_path, err)) } + /// Hash of code and code which consists of 32 bytes words #[derive(Debug, Clone, Serialize, Deserialize)] pub struct SystemContractCode { @@ -304,18 +305,23 @@ pub struct SystemContractCode { pub struct BaseSystemContracts { pub bootloader: SystemContractCode, pub default_aa: SystemContractCode, + /// Never filled in constructors for now. The only way to get the EVM emulator enabled is to call [`Self::with_evm_emulator()`]. + pub evm_emulator: Option, } #[derive(Debug, Clone, Copy, Default, Serialize, Deserialize, PartialEq)] pub struct BaseSystemContractsHashes { pub bootloader: H256, pub default_aa: H256, + pub evm_emulator: Option, } impl PartialEq for BaseSystemContracts { fn eq(&self, other: &Self) -> bool { self.bootloader.hash == other.bootloader.hash && self.default_aa.hash == other.default_aa.hash + && self.evm_emulator.as_ref().map(|contract| contract.hash) + == other.evm_emulator.as_ref().map(|contract| contract.hash) } } @@ -339,14 +345,27 @@ impl BaseSystemContracts { BaseSystemContracts { bootloader, default_aa, + evm_emulator: None, } } - // BaseSystemContracts with proved bootloader - for handling transactions. + + /// BaseSystemContracts with proved bootloader - for handling transactions. pub fn load_from_disk() -> Self { let bootloader_bytecode = read_proved_batch_bootloader_bytecode(); BaseSystemContracts::load_with_bootloader(bootloader_bytecode) } + /// Loads the latest EVM emulator for these base system contracts. Logically, it only makes sense to do for the latest protocol version. + pub fn with_latest_evm_emulator(mut self) -> Self { + let bytecode = read_sys_contract_bytecode("", "EvmInterpreter", ContractLanguage::Yul); + let hash = hash_bytecode(&bytecode); + self.evm_emulator = Some(SystemContractCode { + code: bytes_to_be_words(bytecode), + hash, + }); + self + } + /// BaseSystemContracts with playground bootloader - used for handling eth_calls. pub fn playground() -> Self { let bootloader_bytecode = read_playground_batch_bootloader_bytecode(); @@ -475,6 +494,7 @@ impl BaseSystemContracts { BaseSystemContractsHashes { bootloader: self.bootloader.hash, default_aa: self.default_aa.hash, + evm_emulator: self.evm_emulator.as_ref().map(|contract| contract.hash), } } } diff --git a/core/lib/dal/.sqlx/query-ae30067056fe29febd68408c2ca2e604958488a41d3ee2bcbd05d269bcdfc7aa.json b/core/lib/dal/.sqlx/query-05726523bb494b40011c28acd3f52dba1d37493d4c1db4b957cfec476a791b32.json similarity index 73% rename from core/lib/dal/.sqlx/query-ae30067056fe29febd68408c2ca2e604958488a41d3ee2bcbd05d269bcdfc7aa.json rename to core/lib/dal/.sqlx/query-05726523bb494b40011c28acd3f52dba1d37493d4c1db4b957cfec476a791b32.json index 9e212249490c..c93e6aef3e7c 100644 --- a/core/lib/dal/.sqlx/query-ae30067056fe29febd68408c2ca2e604958488a41d3ee2bcbd05d269bcdfc7aa.json +++ b/core/lib/dal/.sqlx/query-05726523bb494b40011c28acd3f52dba1d37493d4c1db4b957cfec476a791b32.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n number,\n timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n bootloader_code_hash,\n default_aa_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n system_logs,\n compressed_state_diffs,\n protocol_version,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input\n FROM\n (\n SELECT\n l1_batches.*,\n ROW_NUMBER() OVER (\n ORDER BY\n number ASC\n ) AS row_number\n FROM\n l1_batches\n WHERE\n eth_commit_tx_id IS NOT NULL\n AND l1_batches.skip_proof = TRUE\n AND l1_batches.number > $1\n ORDER BY\n number\n LIMIT\n $2\n ) inn\n LEFT JOIN commitments ON commitments.l1_batch_number = inn.number\n WHERE\n number - row_number = $1\n ", + "query": "\n SELECT\n number,\n timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n bootloader_code_hash,\n default_aa_code_hash,\n evm_emulator_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n system_logs,\n compressed_state_diffs,\n protocol_version,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input\n FROM\n (\n SELECT\n l1_batches.*,\n ROW_NUMBER() OVER (\n ORDER BY\n number ASC\n ) AS row_number\n FROM\n l1_batches\n WHERE\n eth_commit_tx_id IS NOT NULL\n AND l1_batches.skip_proof = TRUE\n AND l1_batches.number > $1\n ORDER BY\n number\n LIMIT\n $2\n ) inn\n LEFT JOIN commitments ON commitments.l1_batch_number = inn.number\n WHERE\n number - row_number = $1\n ", "describe": { "columns": [ { @@ -90,46 +90,51 @@ }, { "ordinal": 17, - "name": "aux_data_hash", + "name": "evm_emulator_code_hash", "type_info": "Bytea" }, { "ordinal": 18, - "name": "pass_through_data_hash", + "name": "aux_data_hash", "type_info": "Bytea" }, { "ordinal": 19, - "name": "meta_parameters_hash", + "name": "pass_through_data_hash", "type_info": "Bytea" }, { "ordinal": 20, + "name": "meta_parameters_hash", + "type_info": "Bytea" + }, + { + "ordinal": 21, "name": "system_logs", "type_info": "ByteaArray" }, { - "ordinal": 21, + "ordinal": 22, "name": "compressed_state_diffs", "type_info": "Bytea" }, { - "ordinal": 22, + "ordinal": 23, "name": "protocol_version", "type_info": "Int4" }, { - "ordinal": 23, + "ordinal": 24, "name": "events_queue_commitment", "type_info": "Bytea" }, { - "ordinal": 24, + "ordinal": 25, "name": "bootloader_initial_content_commitment", "type_info": "Bytea" }, { - "ordinal": 25, + "ordinal": 26, "name": "pubdata_input", "type_info": "Bytea" } @@ -161,6 +166,7 @@ true, true, true, + true, false, true, true, @@ -169,5 +175,5 @@ true ] }, - "hash": "ae30067056fe29febd68408c2ca2e604958488a41d3ee2bcbd05d269bcdfc7aa" + "hash": "05726523bb494b40011c28acd3f52dba1d37493d4c1db4b957cfec476a791b32" } diff --git a/core/lib/dal/.sqlx/query-2dc550a35fb0f0ddb1aded83d54a2e93066a5cffbb3857dfd3c6fe00c307eada.json b/core/lib/dal/.sqlx/query-16d4658899c5b604fb794d44a8b3bef013ad12b66bdca7251be2af21e98fe870.json similarity index 74% rename from core/lib/dal/.sqlx/query-2dc550a35fb0f0ddb1aded83d54a2e93066a5cffbb3857dfd3c6fe00c307eada.json rename to core/lib/dal/.sqlx/query-16d4658899c5b604fb794d44a8b3bef013ad12b66bdca7251be2af21e98fe870.json index 8bf22e1b6fb7..a3d356f4bea9 100644 --- a/core/lib/dal/.sqlx/query-2dc550a35fb0f0ddb1aded83d54a2e93066a5cffbb3857dfd3c6fe00c307eada.json +++ b/core/lib/dal/.sqlx/query-16d4658899c5b604fb794d44a8b3bef013ad12b66bdca7251be2af21e98fe870.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n number,\n l1_batches.timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n l1_batches.bootloader_code_hash,\n l1_batches.default_aa_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n compressed_state_diffs,\n system_logs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n JOIN protocol_versions ON protocol_versions.id = l1_batches.protocol_version\n WHERE\n eth_commit_tx_id IS NULL\n AND number != 0\n AND protocol_versions.bootloader_code_hash = $1\n AND protocol_versions.default_account_code_hash = $2\n AND commitment IS NOT NULL\n AND (\n protocol_versions.id = $3\n OR protocol_versions.upgrade_tx_hash IS NULL\n )\n ORDER BY\n number\n LIMIT\n $4\n ", + "query": "\n SELECT\n number,\n l1_batches.timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n l1_batches.bootloader_code_hash,\n l1_batches.default_aa_code_hash,\n l1_batches.evm_emulator_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n compressed_state_diffs,\n system_logs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n JOIN protocol_versions ON protocol_versions.id = l1_batches.protocol_version\n WHERE\n eth_commit_tx_id IS NULL\n AND number != 0\n AND protocol_versions.bootloader_code_hash = $1\n AND protocol_versions.default_account_code_hash = $2\n AND commitment IS NOT NULL\n AND (\n protocol_versions.id = $3\n OR protocol_versions.upgrade_tx_hash IS NULL\n )\n ORDER BY\n number\n LIMIT\n $4\n ", "describe": { "columns": [ { @@ -90,46 +90,51 @@ }, { "ordinal": 17, - "name": "aux_data_hash", + "name": "evm_emulator_code_hash", "type_info": "Bytea" }, { "ordinal": 18, - "name": "pass_through_data_hash", + "name": "aux_data_hash", "type_info": "Bytea" }, { "ordinal": 19, - "name": "meta_parameters_hash", + "name": "pass_through_data_hash", "type_info": "Bytea" }, { "ordinal": 20, + "name": "meta_parameters_hash", + "type_info": "Bytea" + }, + { + "ordinal": 21, "name": "protocol_version", "type_info": "Int4" }, { - "ordinal": 21, + "ordinal": 22, "name": "compressed_state_diffs", "type_info": "Bytea" }, { - "ordinal": 22, + "ordinal": 23, "name": "system_logs", "type_info": "ByteaArray" }, { - "ordinal": 23, + "ordinal": 24, "name": "events_queue_commitment", "type_info": "Bytea" }, { - "ordinal": 24, + "ordinal": 25, "name": "bootloader_initial_content_commitment", "type_info": "Bytea" }, { - "ordinal": 25, + "ordinal": 26, "name": "pubdata_input", "type_info": "Bytea" } @@ -165,11 +170,12 @@ true, true, true, + true, false, true, true, true ] }, - "hash": "2dc550a35fb0f0ddb1aded83d54a2e93066a5cffbb3857dfd3c6fe00c307eada" + "hash": "16d4658899c5b604fb794d44a8b3bef013ad12b66bdca7251be2af21e98fe870" } diff --git a/core/lib/dal/.sqlx/query-34910600545933d85931d41bfe2dfcb3522a0772ac3d2476652df4216d823e04.json b/core/lib/dal/.sqlx/query-34910600545933d85931d41bfe2dfcb3522a0772ac3d2476652df4216d823e04.json new file mode 100644 index 000000000000..35c606bf22bb --- /dev/null +++ b/core/lib/dal/.sqlx/query-34910600545933d85931d41bfe2dfcb3522a0772ac3d2476652df4216d823e04.json @@ -0,0 +1,31 @@ +{ + "db_name": "PostgreSQL", + "query": "\n INSERT INTO\n miniblocks (\n number,\n timestamp,\n hash,\n l1_tx_count,\n l2_tx_count,\n fee_account_address,\n base_fee_per_gas,\n l1_gas_price,\n l2_fair_gas_price,\n gas_per_pubdata_limit,\n bootloader_code_hash,\n default_aa_code_hash,\n evm_emulator_code_hash,\n protocol_version,\n virtual_blocks,\n fair_pubdata_price,\n gas_limit,\n logs_bloom,\n created_at,\n updated_at\n )\n VALUES\n (\n $1,\n $2,\n $3,\n $4,\n $5,\n $6,\n $7,\n $8,\n $9,\n $10,\n $11,\n $12,\n $13,\n $14,\n $15,\n $16,\n $17,\n $18,\n NOW(),\n NOW()\n )\n ", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Int8", + "Int8", + "Bytea", + "Int4", + "Int4", + "Bytea", + "Numeric", + "Int8", + "Int8", + "Int8", + "Bytea", + "Bytea", + "Bytea", + "Int4", + "Int8", + "Int8", + "Int8", + "Bytea" + ] + }, + "nullable": [] + }, + "hash": "34910600545933d85931d41bfe2dfcb3522a0772ac3d2476652df4216d823e04" +} diff --git a/core/lib/dal/.sqlx/query-5250341acd42582e41570b6d7e380ae6c8a26f425429116a62892be84c2ff9fb.json b/core/lib/dal/.sqlx/query-4f5f59bc6fd27bb73c6020b6f0be7ca0e4b83f50724a0b18256aafab69909a98.json similarity index 83% rename from core/lib/dal/.sqlx/query-5250341acd42582e41570b6d7e380ae6c8a26f425429116a62892be84c2ff9fb.json rename to core/lib/dal/.sqlx/query-4f5f59bc6fd27bb73c6020b6f0be7ca0e4b83f50724a0b18256aafab69909a98.json index 93d522f5fb73..752e171f58cb 100644 --- a/core/lib/dal/.sqlx/query-5250341acd42582e41570b6d7e380ae6c8a26f425429116a62892be84c2ff9fb.json +++ b/core/lib/dal/.sqlx/query-4f5f59bc6fd27bb73c6020b6f0be7ca0e4b83f50724a0b18256aafab69909a98.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n number,\n timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n bootloader_code_hash,\n default_aa_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n compressed_state_diffs,\n system_logs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n WHERE\n eth_commit_tx_id IS NOT NULL\n AND eth_prove_tx_id IS NULL\n ORDER BY\n number\n LIMIT\n $1\n ", + "query": "\n SELECT\n number,\n timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n bootloader_code_hash,\n default_aa_code_hash,\n evm_emulator_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n system_logs,\n compressed_state_diffs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n WHERE\n number = $1\n ", "describe": { "columns": [ { @@ -90,28 +90,28 @@ }, { "ordinal": 17, - "name": "aux_data_hash", + "name": "evm_emulator_code_hash", "type_info": "Bytea" }, { "ordinal": 18, - "name": "pass_through_data_hash", + "name": "aux_data_hash", "type_info": "Bytea" }, { "ordinal": 19, - "name": "meta_parameters_hash", + "name": "pass_through_data_hash", "type_info": "Bytea" }, { "ordinal": 20, - "name": "protocol_version", - "type_info": "Int4" + "name": "meta_parameters_hash", + "type_info": "Bytea" }, { "ordinal": 21, - "name": "compressed_state_diffs", - "type_info": "Bytea" + "name": "protocol_version", + "type_info": "Int4" }, { "ordinal": 22, @@ -120,16 +120,21 @@ }, { "ordinal": 23, - "name": "events_queue_commitment", + "name": "compressed_state_diffs", "type_info": "Bytea" }, { "ordinal": 24, - "name": "bootloader_initial_content_commitment", + "name": "events_queue_commitment", "type_info": "Bytea" }, { "ordinal": 25, + "name": "bootloader_initial_content_commitment", + "type_info": "Bytea" + }, + { + "ordinal": 26, "name": "pubdata_input", "type_info": "Bytea" } @@ -165,8 +170,9 @@ false, true, true, + true, true ] }, - "hash": "5250341acd42582e41570b6d7e380ae6c8a26f425429116a62892be84c2ff9fb" + "hash": "4f5f59bc6fd27bb73c6020b6f0be7ca0e4b83f50724a0b18256aafab69909a98" } diff --git a/core/lib/dal/.sqlx/query-778f92b1ac91e1ae279f588053d75a9ac877fdd28bda99661e423405e695223d.json b/core/lib/dal/.sqlx/query-51d5b6fd147fa06ddadb5f8c9c0e12784694d2f8fe9a67159ad4c7abc2279ca6.json similarity index 81% rename from core/lib/dal/.sqlx/query-778f92b1ac91e1ae279f588053d75a9ac877fdd28bda99661e423405e695223d.json rename to core/lib/dal/.sqlx/query-51d5b6fd147fa06ddadb5f8c9c0e12784694d2f8fe9a67159ad4c7abc2279ca6.json index aa7d4c65a39d..6f77a656072b 100644 --- a/core/lib/dal/.sqlx/query-778f92b1ac91e1ae279f588053d75a9ac877fdd28bda99661e423405e695223d.json +++ b/core/lib/dal/.sqlx/query-51d5b6fd147fa06ddadb5f8c9c0e12784694d2f8fe9a67159ad4c7abc2279ca6.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n miniblocks.number,\n COALESCE(\n miniblocks.l1_batch_number,\n (\n SELECT\n (MAX(number) + 1)\n FROM\n l1_batches\n ),\n (\n SELECT\n MAX(l1_batch_number) + 1\n FROM\n snapshot_recovery\n )\n ) AS \"l1_batch_number!\",\n (miniblocks.l1_tx_count + miniblocks.l2_tx_count) AS \"tx_count!\",\n miniblocks.timestamp,\n miniblocks.l1_gas_price,\n miniblocks.l2_fair_gas_price,\n miniblocks.fair_pubdata_price,\n miniblocks.bootloader_code_hash,\n miniblocks.default_aa_code_hash,\n miniblocks.virtual_blocks,\n miniblocks.hash,\n miniblocks.protocol_version AS \"protocol_version!\",\n miniblocks.fee_account_address AS \"fee_account_address!\"\n FROM\n miniblocks\n WHERE\n miniblocks.number BETWEEN $1 AND $2\n ", + "query": "\n SELECT\n miniblocks.number,\n COALESCE(\n miniblocks.l1_batch_number,\n (\n SELECT\n (MAX(number) + 1)\n FROM\n l1_batches\n ),\n (\n SELECT\n MAX(l1_batch_number) + 1\n FROM\n snapshot_recovery\n )\n ) AS \"l1_batch_number!\",\n (miniblocks.l1_tx_count + miniblocks.l2_tx_count) AS \"tx_count!\",\n miniblocks.timestamp,\n miniblocks.l1_gas_price,\n miniblocks.l2_fair_gas_price,\n miniblocks.fair_pubdata_price,\n miniblocks.bootloader_code_hash,\n miniblocks.default_aa_code_hash,\n miniblocks.evm_emulator_code_hash,\n miniblocks.virtual_blocks,\n miniblocks.hash,\n miniblocks.protocol_version AS \"protocol_version!\",\n miniblocks.fee_account_address AS \"fee_account_address!\"\n FROM\n miniblocks\n WHERE\n miniblocks.number BETWEEN $1 AND $2\n ", "describe": { "columns": [ { @@ -50,21 +50,26 @@ }, { "ordinal": 9, + "name": "evm_emulator_code_hash", + "type_info": "Bytea" + }, + { + "ordinal": 10, "name": "virtual_blocks", "type_info": "Int8" }, { - "ordinal": 10, + "ordinal": 11, "name": "hash", "type_info": "Bytea" }, { - "ordinal": 11, + "ordinal": 12, "name": "protocol_version!", "type_info": "Int4" }, { - "ordinal": 12, + "ordinal": 13, "name": "fee_account_address!", "type_info": "Bytea" } @@ -85,11 +90,12 @@ true, true, true, + true, false, false, true, false ] }, - "hash": "778f92b1ac91e1ae279f588053d75a9ac877fdd28bda99661e423405e695223d" + "hash": "51d5b6fd147fa06ddadb5f8c9c0e12784694d2f8fe9a67159ad4c7abc2279ca6" } diff --git a/core/lib/dal/.sqlx/query-b3bccd33945c657856f12b9ced6addab6569d897e748d39abca9538872d986ad.json b/core/lib/dal/.sqlx/query-7240ff1240a2cdae14ab1bbfaad97f64d0c9620506bb41890548181bccca9ee5.json similarity index 74% rename from core/lib/dal/.sqlx/query-b3bccd33945c657856f12b9ced6addab6569d897e748d39abca9538872d986ad.json rename to core/lib/dal/.sqlx/query-7240ff1240a2cdae14ab1bbfaad97f64d0c9620506bb41890548181bccca9ee5.json index f440a2655937..b2f195c4e5c1 100644 --- a/core/lib/dal/.sqlx/query-b3bccd33945c657856f12b9ced6addab6569d897e748d39abca9538872d986ad.json +++ b/core/lib/dal/.sqlx/query-7240ff1240a2cdae14ab1bbfaad97f64d0c9620506bb41890548181bccca9ee5.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n miniblocks.number,\n COALESCE(\n miniblocks.l1_batch_number,\n (\n SELECT\n (MAX(number) + 1)\n FROM\n l1_batches\n )\n ) AS \"l1_batch_number!\",\n miniblocks.timestamp,\n miniblocks.l1_tx_count,\n miniblocks.l2_tx_count,\n miniblocks.hash AS \"root_hash?\",\n commit_tx.tx_hash AS \"commit_tx_hash?\",\n commit_tx.confirmed_at AS \"committed_at?\",\n prove_tx.tx_hash AS \"prove_tx_hash?\",\n prove_tx.confirmed_at AS \"proven_at?\",\n execute_tx.tx_hash AS \"execute_tx_hash?\",\n execute_tx.confirmed_at AS \"executed_at?\",\n miniblocks.l1_gas_price,\n miniblocks.l2_fair_gas_price,\n miniblocks.fair_pubdata_price,\n miniblocks.bootloader_code_hash,\n miniblocks.default_aa_code_hash,\n miniblocks.protocol_version,\n miniblocks.fee_account_address\n FROM\n miniblocks\n LEFT JOIN l1_batches ON miniblocks.l1_batch_number = l1_batches.number\n LEFT JOIN eth_txs_history AS commit_tx\n ON (\n l1_batches.eth_commit_tx_id = commit_tx.eth_tx_id\n AND commit_tx.confirmed_at IS NOT NULL\n )\n LEFT JOIN eth_txs_history AS prove_tx\n ON (\n l1_batches.eth_prove_tx_id = prove_tx.eth_tx_id\n AND prove_tx.confirmed_at IS NOT NULL\n )\n LEFT JOIN eth_txs_history AS execute_tx\n ON (\n l1_batches.eth_execute_tx_id = execute_tx.eth_tx_id\n AND execute_tx.confirmed_at IS NOT NULL\n )\n WHERE\n miniblocks.number = $1\n ", + "query": "\n SELECT\n miniblocks.number,\n COALESCE(\n miniblocks.l1_batch_number,\n (\n SELECT\n (MAX(number) + 1)\n FROM\n l1_batches\n )\n ) AS \"l1_batch_number!\",\n miniblocks.timestamp,\n miniblocks.l1_tx_count,\n miniblocks.l2_tx_count,\n miniblocks.hash AS \"root_hash?\",\n commit_tx.tx_hash AS \"commit_tx_hash?\",\n commit_tx.confirmed_at AS \"committed_at?\",\n prove_tx.tx_hash AS \"prove_tx_hash?\",\n prove_tx.confirmed_at AS \"proven_at?\",\n execute_tx.tx_hash AS \"execute_tx_hash?\",\n execute_tx.confirmed_at AS \"executed_at?\",\n miniblocks.l1_gas_price,\n miniblocks.l2_fair_gas_price,\n miniblocks.fair_pubdata_price,\n miniblocks.bootloader_code_hash,\n miniblocks.default_aa_code_hash,\n l1_batches.evm_emulator_code_hash,\n miniblocks.protocol_version,\n miniblocks.fee_account_address\n FROM\n miniblocks\n LEFT JOIN l1_batches ON miniblocks.l1_batch_number = l1_batches.number\n LEFT JOIN eth_txs_history AS commit_tx\n ON (\n l1_batches.eth_commit_tx_id = commit_tx.eth_tx_id\n AND commit_tx.confirmed_at IS NOT NULL\n )\n LEFT JOIN eth_txs_history AS prove_tx\n ON (\n l1_batches.eth_prove_tx_id = prove_tx.eth_tx_id\n AND prove_tx.confirmed_at IS NOT NULL\n )\n LEFT JOIN eth_txs_history AS execute_tx\n ON (\n l1_batches.eth_execute_tx_id = execute_tx.eth_tx_id\n AND execute_tx.confirmed_at IS NOT NULL\n )\n WHERE\n miniblocks.number = $1\n ", "describe": { "columns": [ { @@ -90,11 +90,16 @@ }, { "ordinal": 17, + "name": "evm_emulator_code_hash", + "type_info": "Bytea" + }, + { + "ordinal": 18, "name": "protocol_version", "type_info": "Int4" }, { - "ordinal": 18, + "ordinal": 19, "name": "fee_account_address", "type_info": "Bytea" } @@ -123,8 +128,9 @@ true, true, true, + true, false ] }, - "hash": "b3bccd33945c657856f12b9ced6addab6569d897e748d39abca9538872d986ad" + "hash": "7240ff1240a2cdae14ab1bbfaad97f64d0c9620506bb41890548181bccca9ee5" } diff --git a/core/lib/dal/.sqlx/query-454e16ddb5e85285d0c4b9013bcce5d464ecc55c80b54bc16040226df7e297bd.json b/core/lib/dal/.sqlx/query-7cceb18485c0fdeed57b7f279debfe9b944b2dd80eb56965a5874ce3168e8c5e.json similarity index 82% rename from core/lib/dal/.sqlx/query-454e16ddb5e85285d0c4b9013bcce5d464ecc55c80b54bc16040226df7e297bd.json rename to core/lib/dal/.sqlx/query-7cceb18485c0fdeed57b7f279debfe9b944b2dd80eb56965a5874ce3168e8c5e.json index 4a73fde57e29..28fbea09998c 100644 --- a/core/lib/dal/.sqlx/query-454e16ddb5e85285d0c4b9013bcce5d464ecc55c80b54bc16040226df7e297bd.json +++ b/core/lib/dal/.sqlx/query-7cceb18485c0fdeed57b7f279debfe9b944b2dd80eb56965a5874ce3168e8c5e.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n number,\n l1_tx_count,\n l2_tx_count,\n timestamp,\n l2_to_l1_messages,\n bloom,\n priority_ops_onchain_data,\n used_contract_hashes,\n bootloader_code_hash,\n default_aa_code_hash,\n protocol_version,\n system_logs,\n pubdata_input\n FROM\n l1_batches\n WHERE\n number = $1\n ", + "query": "\n SELECT\n number,\n l1_tx_count,\n l2_tx_count,\n timestamp,\n l2_to_l1_messages,\n bloom,\n priority_ops_onchain_data,\n used_contract_hashes,\n bootloader_code_hash,\n default_aa_code_hash,\n evm_emulator_code_hash,\n protocol_version,\n system_logs,\n pubdata_input\n FROM\n l1_batches\n WHERE\n number = $1\n ", "describe": { "columns": [ { @@ -55,16 +55,21 @@ }, { "ordinal": 10, + "name": "evm_emulator_code_hash", + "type_info": "Bytea" + }, + { + "ordinal": 11, "name": "protocol_version", "type_info": "Int4" }, { - "ordinal": 11, + "ordinal": 12, "name": "system_logs", "type_info": "ByteaArray" }, { - "ordinal": 12, + "ordinal": 13, "name": "pubdata_input", "type_info": "Bytea" } @@ -86,9 +91,10 @@ true, true, true, + true, false, true ] }, - "hash": "454e16ddb5e85285d0c4b9013bcce5d464ecc55c80b54bc16040226df7e297bd" + "hash": "7cceb18485c0fdeed57b7f279debfe9b944b2dd80eb56965a5874ce3168e8c5e" } diff --git a/core/lib/dal/.sqlx/query-60e68195b375775fc8bc293f6a053681759272f74c47836d34e0ee6de1f639f7.json b/core/lib/dal/.sqlx/query-860de4af5c11c3a7c9eb660ec7049749bd5fc78b09578589c26d3017cc6bd192.json similarity index 80% rename from core/lib/dal/.sqlx/query-60e68195b375775fc8bc293f6a053681759272f74c47836d34e0ee6de1f639f7.json rename to core/lib/dal/.sqlx/query-860de4af5c11c3a7c9eb660ec7049749bd5fc78b09578589c26d3017cc6bd192.json index a9eae0bd01dc..8f41bf3b4916 100644 --- a/core/lib/dal/.sqlx/query-60e68195b375775fc8bc293f6a053681759272f74c47836d34e0ee6de1f639f7.json +++ b/core/lib/dal/.sqlx/query-860de4af5c11c3a7c9eb660ec7049749bd5fc78b09578589c26d3017cc6bd192.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n number,\n timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n bootloader_code_hash,\n default_aa_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n system_logs,\n compressed_state_diffs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n WHERE\n number = $1\n ", + "query": "\n SELECT\n number,\n timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n bootloader_code_hash,\n default_aa_code_hash,\n evm_emulator_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n compressed_state_diffs,\n system_logs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n WHERE\n eth_commit_tx_id IS NOT NULL\n AND eth_prove_tx_id IS NULL\n ORDER BY\n number\n LIMIT\n $1\n ", "describe": { "columns": [ { @@ -90,28 +90,28 @@ }, { "ordinal": 17, - "name": "aux_data_hash", + "name": "evm_emulator_code_hash", "type_info": "Bytea" }, { "ordinal": 18, - "name": "pass_through_data_hash", + "name": "aux_data_hash", "type_info": "Bytea" }, { "ordinal": 19, - "name": "meta_parameters_hash", + "name": "pass_through_data_hash", "type_info": "Bytea" }, { "ordinal": 20, - "name": "protocol_version", - "type_info": "Int4" + "name": "meta_parameters_hash", + "type_info": "Bytea" }, { "ordinal": 21, - "name": "system_logs", - "type_info": "ByteaArray" + "name": "protocol_version", + "type_info": "Int4" }, { "ordinal": 22, @@ -120,16 +120,21 @@ }, { "ordinal": 23, + "name": "system_logs", + "type_info": "ByteaArray" + }, + { + "ordinal": 24, "name": "events_queue_commitment", "type_info": "Bytea" }, { - "ordinal": 24, + "ordinal": 25, "name": "bootloader_initial_content_commitment", "type_info": "Bytea" }, { - "ordinal": 25, + "ordinal": 26, "name": "pubdata_input", "type_info": "Bytea" } @@ -161,12 +166,13 @@ true, true, true, - false, true, true, + false, + true, true, true ] }, - "hash": "60e68195b375775fc8bc293f6a053681759272f74c47836d34e0ee6de1f639f7" + "hash": "860de4af5c11c3a7c9eb660ec7049749bd5fc78b09578589c26d3017cc6bd192" } diff --git a/core/lib/dal/.sqlx/query-c4e81f78ef078cbed2d588be91418997f003e26768c5de663160d89af54a1ee7.json b/core/lib/dal/.sqlx/query-89e53b297b2b1c0dfb263f9175cb70e7a5fe02b60d5d23e4d153190138112c5b.json similarity index 62% rename from core/lib/dal/.sqlx/query-c4e81f78ef078cbed2d588be91418997f003e26768c5de663160d89af54a1ee7.json rename to core/lib/dal/.sqlx/query-89e53b297b2b1c0dfb263f9175cb70e7a5fe02b60d5d23e4d153190138112c5b.json index 162c722add9b..d944b6abf9e1 100644 --- a/core/lib/dal/.sqlx/query-c4e81f78ef078cbed2d588be91418997f003e26768c5de663160d89af54a1ee7.json +++ b/core/lib/dal/.sqlx/query-89e53b297b2b1c0dfb263f9175cb70e7a5fe02b60d5d23e4d153190138112c5b.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n protocol_versions.id AS \"minor!\",\n protocol_versions.timestamp,\n protocol_versions.bootloader_code_hash,\n protocol_versions.default_account_code_hash,\n protocol_patches.patch,\n protocol_patches.snark_wrapper_vk_hash\n FROM\n protocol_versions\n JOIN protocol_patches ON protocol_patches.minor = protocol_versions.id\n WHERE\n id = $1\n ORDER BY\n protocol_patches.patch DESC\n LIMIT\n 1\n ", + "query": "\n SELECT\n protocol_versions.id AS \"minor!\",\n protocol_versions.timestamp,\n protocol_versions.bootloader_code_hash,\n protocol_versions.default_account_code_hash,\n protocol_versions.evm_emulator_code_hash,\n protocol_patches.patch,\n protocol_patches.snark_wrapper_vk_hash\n FROM\n protocol_versions\n JOIN protocol_patches ON protocol_patches.minor = protocol_versions.id\n WHERE\n id = $1\n ORDER BY\n protocol_patches.patch DESC\n LIMIT\n 1\n ", "describe": { "columns": [ { @@ -25,11 +25,16 @@ }, { "ordinal": 4, + "name": "evm_emulator_code_hash", + "type_info": "Bytea" + }, + { + "ordinal": 5, "name": "patch", "type_info": "Int4" }, { - "ordinal": 5, + "ordinal": 6, "name": "snark_wrapper_vk_hash", "type_info": "Bytea" } @@ -44,9 +49,10 @@ false, false, false, + true, false, false ] }, - "hash": "c4e81f78ef078cbed2d588be91418997f003e26768c5de663160d89af54a1ee7" + "hash": "89e53b297b2b1c0dfb263f9175cb70e7a5fe02b60d5d23e4d153190138112c5b" } diff --git a/core/lib/dal/.sqlx/query-30268c71e4bd0d08015af6ae130d3ee5d5140714297401b4bde1e950ed6e971e.json b/core/lib/dal/.sqlx/query-9ece18b3a36cbaeaa99ca3db466b9fabba23fa52a17a54146931476681edbd24.json similarity index 80% rename from core/lib/dal/.sqlx/query-30268c71e4bd0d08015af6ae130d3ee5d5140714297401b4bde1e950ed6e971e.json rename to core/lib/dal/.sqlx/query-9ece18b3a36cbaeaa99ca3db466b9fabba23fa52a17a54146931476681edbd24.json index a96d94a5c55f..9eb67bb8299a 100644 --- a/core/lib/dal/.sqlx/query-30268c71e4bd0d08015af6ae130d3ee5d5140714297401b4bde1e950ed6e971e.json +++ b/core/lib/dal/.sqlx/query-9ece18b3a36cbaeaa99ca3db466b9fabba23fa52a17a54146931476681edbd24.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n number,\n timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n bootloader_code_hash,\n default_aa_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n compressed_state_diffs,\n system_logs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n WHERE\n number = 0\n OR eth_commit_tx_id IS NOT NULL\n AND commitment IS NOT NULL\n ORDER BY\n number DESC\n LIMIT\n 1\n ", + "query": "\n SELECT\n number,\n timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n bootloader_code_hash,\n default_aa_code_hash,\n evm_emulator_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n compressed_state_diffs,\n system_logs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n WHERE\n number = 0\n OR eth_commit_tx_id IS NOT NULL\n AND commitment IS NOT NULL\n ORDER BY\n number DESC\n LIMIT\n 1\n ", "describe": { "columns": [ { @@ -90,46 +90,51 @@ }, { "ordinal": 17, - "name": "aux_data_hash", + "name": "evm_emulator_code_hash", "type_info": "Bytea" }, { "ordinal": 18, - "name": "pass_through_data_hash", + "name": "aux_data_hash", "type_info": "Bytea" }, { "ordinal": 19, - "name": "meta_parameters_hash", + "name": "pass_through_data_hash", "type_info": "Bytea" }, { "ordinal": 20, + "name": "meta_parameters_hash", + "type_info": "Bytea" + }, + { + "ordinal": 21, "name": "protocol_version", "type_info": "Int4" }, { - "ordinal": 21, + "ordinal": 22, "name": "compressed_state_diffs", "type_info": "Bytea" }, { - "ordinal": 22, + "ordinal": 23, "name": "system_logs", "type_info": "ByteaArray" }, { - "ordinal": 23, + "ordinal": 24, "name": "events_queue_commitment", "type_info": "Bytea" }, { - "ordinal": 24, + "ordinal": 25, "name": "bootloader_initial_content_commitment", "type_info": "Bytea" }, { - "ordinal": 25, + "ordinal": 26, "name": "pubdata_input", "type_info": "Bytea" } @@ -160,11 +165,12 @@ true, true, true, + true, false, true, true, true ] }, - "hash": "30268c71e4bd0d08015af6ae130d3ee5d5140714297401b4bde1e950ed6e971e" + "hash": "9ece18b3a36cbaeaa99ca3db466b9fabba23fa52a17a54146931476681edbd24" } diff --git a/core/lib/dal/.sqlx/query-ac4f1e7af7d866daf45b6997a8ce0a02a40c9f37be949bd4d088744f9c842ef3.json b/core/lib/dal/.sqlx/query-9f2e976278266ae5845c5188c95876eb8a6a508aea04d93342df50dd9745c361.json similarity index 69% rename from core/lib/dal/.sqlx/query-ac4f1e7af7d866daf45b6997a8ce0a02a40c9f37be949bd4d088744f9c842ef3.json rename to core/lib/dal/.sqlx/query-9f2e976278266ae5845c5188c95876eb8a6a508aea04d93342df50dd9745c361.json index 671b56760d6b..55d56cc4ab00 100644 --- a/core/lib/dal/.sqlx/query-ac4f1e7af7d866daf45b6997a8ce0a02a40c9f37be949bd4d088744f9c842ef3.json +++ b/core/lib/dal/.sqlx/query-9f2e976278266ae5845c5188c95876eb8a6a508aea04d93342df50dd9745c361.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n number,\n l1_batches.timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n l1_batches.bootloader_code_hash,\n l1_batches.default_aa_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n compressed_state_diffs,\n system_logs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n LEFT JOIN\n data_availability\n ON data_availability.l1_batch_number = l1_batches.number\n JOIN protocol_versions ON protocol_versions.id = l1_batches.protocol_version\n WHERE\n eth_commit_tx_id IS NULL\n AND number != 0\n AND protocol_versions.bootloader_code_hash = $1\n AND protocol_versions.default_account_code_hash = $2\n AND commitment IS NOT NULL\n AND (\n protocol_versions.id = $3\n OR protocol_versions.upgrade_tx_hash IS NULL\n )\n AND events_queue_commitment IS NOT NULL\n AND bootloader_initial_content_commitment IS NOT NULL\n AND (\n data_availability.inclusion_data IS NOT NULL\n OR $4 IS FALSE\n )\n ORDER BY\n number\n LIMIT\n $5\n ", + "query": "\n SELECT\n number,\n l1_batches.timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n l1_batches.bootloader_code_hash,\n l1_batches.default_aa_code_hash,\n l1_batches.evm_emulator_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n compressed_state_diffs,\n system_logs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n LEFT JOIN\n data_availability\n ON data_availability.l1_batch_number = l1_batches.number\n JOIN protocol_versions ON protocol_versions.id = l1_batches.protocol_version\n WHERE\n eth_commit_tx_id IS NULL\n AND number != 0\n AND protocol_versions.bootloader_code_hash = $1\n AND protocol_versions.default_account_code_hash = $2\n AND commitment IS NOT NULL\n AND (\n protocol_versions.id = $3\n OR protocol_versions.upgrade_tx_hash IS NULL\n )\n AND events_queue_commitment IS NOT NULL\n AND bootloader_initial_content_commitment IS NOT NULL\n AND (\n data_availability.inclusion_data IS NOT NULL\n OR $4 IS FALSE\n )\n ORDER BY\n number\n LIMIT\n $5\n ", "describe": { "columns": [ { @@ -90,46 +90,51 @@ }, { "ordinal": 17, - "name": "aux_data_hash", + "name": "evm_emulator_code_hash", "type_info": "Bytea" }, { "ordinal": 18, - "name": "pass_through_data_hash", + "name": "aux_data_hash", "type_info": "Bytea" }, { "ordinal": 19, - "name": "meta_parameters_hash", + "name": "pass_through_data_hash", "type_info": "Bytea" }, { "ordinal": 20, + "name": "meta_parameters_hash", + "type_info": "Bytea" + }, + { + "ordinal": 21, "name": "protocol_version", "type_info": "Int4" }, { - "ordinal": 21, + "ordinal": 22, "name": "compressed_state_diffs", "type_info": "Bytea" }, { - "ordinal": 22, + "ordinal": 23, "name": "system_logs", "type_info": "ByteaArray" }, { - "ordinal": 23, + "ordinal": 24, "name": "events_queue_commitment", "type_info": "Bytea" }, { - "ordinal": 24, + "ordinal": 25, "name": "bootloader_initial_content_commitment", "type_info": "Bytea" }, { - "ordinal": 25, + "ordinal": 26, "name": "pubdata_input", "type_info": "Bytea" } @@ -166,11 +171,12 @@ true, true, true, + true, false, true, true, true ] }, - "hash": "ac4f1e7af7d866daf45b6997a8ce0a02a40c9f37be949bd4d088744f9c842ef3" + "hash": "9f2e976278266ae5845c5188c95876eb8a6a508aea04d93342df50dd9745c361" } diff --git a/core/lib/dal/.sqlx/query-39a105cba1be0ec8f2b2b88d2f10c6286fcc824e84bb40a6e9f289c34b85fded.json b/core/lib/dal/.sqlx/query-a62f400a5b0b66300f5febf762c7e0c8a39a49d1cea78ef771d4c64fbbc16756.json similarity index 81% rename from core/lib/dal/.sqlx/query-39a105cba1be0ec8f2b2b88d2f10c6286fcc824e84bb40a6e9f289c34b85fded.json rename to core/lib/dal/.sqlx/query-a62f400a5b0b66300f5febf762c7e0c8a39a49d1cea78ef771d4c64fbbc16756.json index 26a3458bff9b..c8c438295e49 100644 --- a/core/lib/dal/.sqlx/query-39a105cba1be0ec8f2b2b88d2f10c6286fcc824e84bb40a6e9f289c34b85fded.json +++ b/core/lib/dal/.sqlx/query-a62f400a5b0b66300f5febf762c7e0c8a39a49d1cea78ef771d4c64fbbc16756.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n number,\n timestamp,\n hash,\n l1_tx_count,\n l2_tx_count,\n fee_account_address AS \"fee_account_address!\",\n base_fee_per_gas,\n l1_gas_price,\n l2_fair_gas_price,\n gas_per_pubdata_limit,\n bootloader_code_hash,\n default_aa_code_hash,\n protocol_version,\n virtual_blocks,\n fair_pubdata_price,\n gas_limit,\n logs_bloom\n FROM\n miniblocks\n ORDER BY\n number DESC\n LIMIT\n 1\n ", + "query": "\n SELECT\n number,\n timestamp,\n hash,\n l1_tx_count,\n l2_tx_count,\n fee_account_address AS \"fee_account_address!\",\n base_fee_per_gas,\n l1_gas_price,\n l2_fair_gas_price,\n gas_per_pubdata_limit,\n bootloader_code_hash,\n default_aa_code_hash,\n evm_emulator_code_hash,\n protocol_version,\n virtual_blocks,\n fair_pubdata_price,\n gas_limit,\n logs_bloom\n FROM\n miniblocks\n ORDER BY\n number DESC\n LIMIT\n 1\n ", "describe": { "columns": [ { @@ -65,26 +65,31 @@ }, { "ordinal": 12, + "name": "evm_emulator_code_hash", + "type_info": "Bytea" + }, + { + "ordinal": 13, "name": "protocol_version", "type_info": "Int4" }, { - "ordinal": 13, + "ordinal": 14, "name": "virtual_blocks", "type_info": "Int8" }, { - "ordinal": 14, + "ordinal": 15, "name": "fair_pubdata_price", "type_info": "Int8" }, { - "ordinal": 15, + "ordinal": 16, "name": "gas_limit", "type_info": "Int8" }, { - "ordinal": 16, + "ordinal": 17, "name": "logs_bloom", "type_info": "Bytea" } @@ -106,11 +111,12 @@ true, true, true, + true, false, true, true, true ] }, - "hash": "39a105cba1be0ec8f2b2b88d2f10c6286fcc824e84bb40a6e9f289c34b85fded" + "hash": "a62f400a5b0b66300f5febf762c7e0c8a39a49d1cea78ef771d4c64fbbc16756" } diff --git a/core/lib/dal/.sqlx/query-2d467a96fd065137d1038998ae5f8dcb1642688940bc52831498479b250de2b1.json b/core/lib/dal/.sqlx/query-a88b113b5dc06ac990a66202b3c05e2c2f10d5cbdb03d02c3c541f7eaa1f58a6.json similarity index 73% rename from core/lib/dal/.sqlx/query-2d467a96fd065137d1038998ae5f8dcb1642688940bc52831498479b250de2b1.json rename to core/lib/dal/.sqlx/query-a88b113b5dc06ac990a66202b3c05e2c2f10d5cbdb03d02c3c541f7eaa1f58a6.json index 81ae6c590f95..28ffcc5ae468 100644 --- a/core/lib/dal/.sqlx/query-2d467a96fd065137d1038998ae5f8dcb1642688940bc52831498479b250de2b1.json +++ b/core/lib/dal/.sqlx/query-a88b113b5dc06ac990a66202b3c05e2c2f10d5cbdb03d02c3c541f7eaa1f58a6.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n WITH\n mb AS (\n SELECT\n l1_gas_price,\n l2_fair_gas_price,\n fair_pubdata_price\n FROM\n miniblocks\n WHERE\n l1_batch_number = $1\n LIMIT\n 1\n )\n \n SELECT\n l1_batches.number,\n l1_batches.timestamp,\n l1_batches.l1_tx_count,\n l1_batches.l2_tx_count,\n l1_batches.hash AS \"root_hash?\",\n commit_tx.tx_hash AS \"commit_tx_hash?\",\n commit_tx.confirmed_at AS \"committed_at?\",\n prove_tx.tx_hash AS \"prove_tx_hash?\",\n prove_tx.confirmed_at AS \"proven_at?\",\n execute_tx.tx_hash AS \"execute_tx_hash?\",\n execute_tx.confirmed_at AS \"executed_at?\",\n mb.l1_gas_price,\n mb.l2_fair_gas_price,\n mb.fair_pubdata_price,\n l1_batches.bootloader_code_hash,\n l1_batches.default_aa_code_hash\n FROM\n l1_batches\n INNER JOIN mb ON TRUE\n LEFT JOIN eth_txs_history AS commit_tx\n ON (\n l1_batches.eth_commit_tx_id = commit_tx.eth_tx_id\n AND commit_tx.confirmed_at IS NOT NULL\n )\n LEFT JOIN eth_txs_history AS prove_tx\n ON (\n l1_batches.eth_prove_tx_id = prove_tx.eth_tx_id\n AND prove_tx.confirmed_at IS NOT NULL\n )\n LEFT JOIN eth_txs_history AS execute_tx\n ON (\n l1_batches.eth_execute_tx_id = execute_tx.eth_tx_id\n AND execute_tx.confirmed_at IS NOT NULL\n )\n WHERE\n l1_batches.number = $1\n ", + "query": "\n WITH\n mb AS (\n SELECT\n l1_gas_price,\n l2_fair_gas_price,\n fair_pubdata_price\n FROM\n miniblocks\n WHERE\n l1_batch_number = $1\n LIMIT\n 1\n )\n \n SELECT\n l1_batches.number,\n l1_batches.timestamp,\n l1_batches.l1_tx_count,\n l1_batches.l2_tx_count,\n l1_batches.hash AS \"root_hash?\",\n commit_tx.tx_hash AS \"commit_tx_hash?\",\n commit_tx.confirmed_at AS \"committed_at?\",\n prove_tx.tx_hash AS \"prove_tx_hash?\",\n prove_tx.confirmed_at AS \"proven_at?\",\n execute_tx.tx_hash AS \"execute_tx_hash?\",\n execute_tx.confirmed_at AS \"executed_at?\",\n mb.l1_gas_price,\n mb.l2_fair_gas_price,\n mb.fair_pubdata_price,\n l1_batches.bootloader_code_hash,\n l1_batches.default_aa_code_hash,\n l1_batches.evm_emulator_code_hash\n FROM\n l1_batches\n INNER JOIN mb ON TRUE\n LEFT JOIN eth_txs_history AS commit_tx\n ON (\n l1_batches.eth_commit_tx_id = commit_tx.eth_tx_id\n AND commit_tx.confirmed_at IS NOT NULL\n )\n LEFT JOIN eth_txs_history AS prove_tx\n ON (\n l1_batches.eth_prove_tx_id = prove_tx.eth_tx_id\n AND prove_tx.confirmed_at IS NOT NULL\n )\n LEFT JOIN eth_txs_history AS execute_tx\n ON (\n l1_batches.eth_execute_tx_id = execute_tx.eth_tx_id\n AND execute_tx.confirmed_at IS NOT NULL\n )\n WHERE\n l1_batches.number = $1\n ", "describe": { "columns": [ { @@ -82,6 +82,11 @@ "ordinal": 15, "name": "default_aa_code_hash", "type_info": "Bytea" + }, + { + "ordinal": 16, + "name": "evm_emulator_code_hash", + "type_info": "Bytea" } ], "parameters": { @@ -105,8 +110,9 @@ false, true, true, + true, true ] }, - "hash": "2d467a96fd065137d1038998ae5f8dcb1642688940bc52831498479b250de2b1" + "hash": "a88b113b5dc06ac990a66202b3c05e2c2f10d5cbdb03d02c3c541f7eaa1f58a6" } diff --git a/core/lib/dal/.sqlx/query-932ec4483be7ebf34579f17694f6d14963cbfc84261824e47fbab1323895371d.json b/core/lib/dal/.sqlx/query-b037613a81f7b3cb106cf62205feb2d1aa6b398c6981c8d4f35e499f42b01731.json similarity index 80% rename from core/lib/dal/.sqlx/query-932ec4483be7ebf34579f17694f6d14963cbfc84261824e47fbab1323895371d.json rename to core/lib/dal/.sqlx/query-b037613a81f7b3cb106cf62205feb2d1aa6b398c6981c8d4f35e499f42b01731.json index 0b1daaa10e5f..6588ee2f11ef 100644 --- a/core/lib/dal/.sqlx/query-932ec4483be7ebf34579f17694f6d14963cbfc84261824e47fbab1323895371d.json +++ b/core/lib/dal/.sqlx/query-b037613a81f7b3cb106cf62205feb2d1aa6b398c6981c8d4f35e499f42b01731.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n number,\n timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n bootloader_code_hash,\n default_aa_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n compressed_state_diffs,\n system_logs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n WHERE\n number BETWEEN $1 AND $2\n ORDER BY\n number\n LIMIT\n $3\n ", + "query": "\n SELECT\n number,\n timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n bootloader_code_hash,\n default_aa_code_hash,\n evm_emulator_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n compressed_state_diffs,\n system_logs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n WHERE\n number BETWEEN $1 AND $2\n ORDER BY\n number\n LIMIT\n $3\n ", "describe": { "columns": [ { @@ -90,46 +90,51 @@ }, { "ordinal": 17, - "name": "aux_data_hash", + "name": "evm_emulator_code_hash", "type_info": "Bytea" }, { "ordinal": 18, - "name": "pass_through_data_hash", + "name": "aux_data_hash", "type_info": "Bytea" }, { "ordinal": 19, - "name": "meta_parameters_hash", + "name": "pass_through_data_hash", "type_info": "Bytea" }, { "ordinal": 20, + "name": "meta_parameters_hash", + "type_info": "Bytea" + }, + { + "ordinal": 21, "name": "protocol_version", "type_info": "Int4" }, { - "ordinal": 21, + "ordinal": 22, "name": "compressed_state_diffs", "type_info": "Bytea" }, { - "ordinal": 22, + "ordinal": 23, "name": "system_logs", "type_info": "ByteaArray" }, { - "ordinal": 23, + "ordinal": 24, "name": "events_queue_commitment", "type_info": "Bytea" }, { - "ordinal": 24, + "ordinal": 25, "name": "bootloader_initial_content_commitment", "type_info": "Bytea" }, { - "ordinal": 25, + "ordinal": 26, "name": "pubdata_input", "type_info": "Bytea" } @@ -164,11 +169,12 @@ true, true, true, + true, false, true, true, true ] }, - "hash": "932ec4483be7ebf34579f17694f6d14963cbfc84261824e47fbab1323895371d" + "hash": "b037613a81f7b3cb106cf62205feb2d1aa6b398c6981c8d4f35e499f42b01731" } diff --git a/core/lib/dal/.sqlx/query-048f255042f0a75c04bc19369c4b0dd75acbe4248159d2fb62d9e9878c158016.json b/core/lib/dal/.sqlx/query-b23f9879be394270a0985c082fd2e82c5aa84c85b9486e81261d17901a786917.json similarity index 54% rename from core/lib/dal/.sqlx/query-048f255042f0a75c04bc19369c4b0dd75acbe4248159d2fb62d9e9878c158016.json rename to core/lib/dal/.sqlx/query-b23f9879be394270a0985c082fd2e82c5aa84c85b9486e81261d17901a786917.json index 8c41c0ab9763..9d9fa72595db 100644 --- a/core/lib/dal/.sqlx/query-048f255042f0a75c04bc19369c4b0dd75acbe4248159d2fb62d9e9878c158016.json +++ b/core/lib/dal/.sqlx/query-b23f9879be394270a0985c082fd2e82c5aa84c85b9486e81261d17901a786917.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n INSERT INTO\n protocol_versions (\n id,\n timestamp,\n bootloader_code_hash,\n default_account_code_hash,\n upgrade_tx_hash,\n created_at\n )\n VALUES\n ($1, $2, $3, $4, $5, NOW())\n ON CONFLICT DO NOTHING\n ", + "query": "\n INSERT INTO\n protocol_versions (\n id,\n timestamp,\n bootloader_code_hash,\n default_account_code_hash,\n evm_emulator_code_hash,\n upgrade_tx_hash,\n created_at\n )\n VALUES\n ($1, $2, $3, $4, $5, $6, NOW())\n ON CONFLICT DO NOTHING\n ", "describe": { "columns": [], "parameters": { @@ -9,10 +9,11 @@ "Int8", "Bytea", "Bytea", + "Bytea", "Bytea" ] }, "nullable": [] }, - "hash": "048f255042f0a75c04bc19369c4b0dd75acbe4248159d2fb62d9e9878c158016" + "hash": "b23f9879be394270a0985c082fd2e82c5aa84c85b9486e81261d17901a786917" } diff --git a/core/lib/dal/.sqlx/query-5d493cbce749cc5b56d4069423597b16599abaf51df0f19effe1a536376cf6a6.json b/core/lib/dal/.sqlx/query-bdd9b56fd8505170125d4e1271f865162bce330edd9b16587e8f9fdab17a8456.json similarity index 52% rename from core/lib/dal/.sqlx/query-5d493cbce749cc5b56d4069423597b16599abaf51df0f19effe1a536376cf6a6.json rename to core/lib/dal/.sqlx/query-bdd9b56fd8505170125d4e1271f865162bce330edd9b16587e8f9fdab17a8456.json index eba36994fb34..2689716c38ac 100644 --- a/core/lib/dal/.sqlx/query-5d493cbce749cc5b56d4069423597b16599abaf51df0f19effe1a536376cf6a6.json +++ b/core/lib/dal/.sqlx/query-bdd9b56fd8505170125d4e1271f865162bce330edd9b16587e8f9fdab17a8456.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n bootloader_code_hash,\n default_account_code_hash\n FROM\n protocol_versions\n WHERE\n id = $1\n ", + "query": "\n SELECT\n bootloader_code_hash,\n default_account_code_hash,\n evm_emulator_code_hash\n FROM\n protocol_versions\n WHERE\n id = $1\n ", "describe": { "columns": [ { @@ -12,6 +12,11 @@ "ordinal": 1, "name": "default_account_code_hash", "type_info": "Bytea" + }, + { + "ordinal": 2, + "name": "evm_emulator_code_hash", + "type_info": "Bytea" } ], "parameters": { @@ -21,8 +26,9 @@ }, "nullable": [ false, - false + false, + true ] }, - "hash": "5d493cbce749cc5b56d4069423597b16599abaf51df0f19effe1a536376cf6a6" + "hash": "bdd9b56fd8505170125d4e1271f865162bce330edd9b16587e8f9fdab17a8456" } diff --git a/core/lib/dal/.sqlx/query-c5480ba88a93bb7da027e36e0939f1fd7606fdf3e6d4c882cea76eb579c24a93.json b/core/lib/dal/.sqlx/query-c5480ba88a93bb7da027e36e0939f1fd7606fdf3e6d4c882cea76eb579c24a93.json deleted file mode 100644 index 09e34a7e33ac..000000000000 --- a/core/lib/dal/.sqlx/query-c5480ba88a93bb7da027e36e0939f1fd7606fdf3e6d4c882cea76eb579c24a93.json +++ /dev/null @@ -1,30 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n INSERT INTO\n miniblocks (\n number,\n timestamp,\n hash,\n l1_tx_count,\n l2_tx_count,\n fee_account_address,\n base_fee_per_gas,\n l1_gas_price,\n l2_fair_gas_price,\n gas_per_pubdata_limit,\n bootloader_code_hash,\n default_aa_code_hash,\n protocol_version,\n virtual_blocks,\n fair_pubdata_price,\n gas_limit,\n logs_bloom,\n created_at,\n updated_at\n )\n VALUES\n (\n $1,\n $2,\n $3,\n $4,\n $5,\n $6,\n $7,\n $8,\n $9,\n $10,\n $11,\n $12,\n $13,\n $14,\n $15,\n $16,\n $17,\n NOW(),\n NOW()\n )\n ", - "describe": { - "columns": [], - "parameters": { - "Left": [ - "Int8", - "Int8", - "Bytea", - "Int4", - "Int4", - "Bytea", - "Numeric", - "Int8", - "Int8", - "Int8", - "Bytea", - "Bytea", - "Int4", - "Int8", - "Int8", - "Int8", - "Bytea" - ] - }, - "nullable": [] - }, - "hash": "c5480ba88a93bb7da027e36e0939f1fd7606fdf3e6d4c882cea76eb579c24a93" -} diff --git a/core/lib/dal/.sqlx/query-4ef330359df85ad6e0110a068ef3afa9cf50eafc7ac542975edea9bd592ce862.json b/core/lib/dal/.sqlx/query-da1ea91f3a1189f881020a6cec17fc5d8943e65a30508898d90a098432050bc7.json similarity index 78% rename from core/lib/dal/.sqlx/query-4ef330359df85ad6e0110a068ef3afa9cf50eafc7ac542975edea9bd592ce862.json rename to core/lib/dal/.sqlx/query-da1ea91f3a1189f881020a6cec17fc5d8943e65a30508898d90a098432050bc7.json index 10e2a76618f6..032cf987fc0b 100644 --- a/core/lib/dal/.sqlx/query-4ef330359df85ad6e0110a068ef3afa9cf50eafc7ac542975edea9bd592ce862.json +++ b/core/lib/dal/.sqlx/query-da1ea91f3a1189f881020a6cec17fc5d8943e65a30508898d90a098432050bc7.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n number,\n timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n bootloader_code_hash,\n default_aa_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n compressed_state_diffs,\n system_logs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n WHERE\n eth_prove_tx_id IS NOT NULL\n AND eth_execute_tx_id IS NULL\n ORDER BY\n number\n LIMIT\n $1\n ", + "query": "\n SELECT\n number,\n timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n bootloader_code_hash,\n default_aa_code_hash,\n evm_emulator_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n compressed_state_diffs,\n system_logs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n WHERE\n eth_prove_tx_id IS NOT NULL\n AND eth_execute_tx_id IS NULL\n ORDER BY\n number\n LIMIT\n $1\n ", "describe": { "columns": [ { @@ -90,46 +90,51 @@ }, { "ordinal": 17, - "name": "aux_data_hash", + "name": "evm_emulator_code_hash", "type_info": "Bytea" }, { "ordinal": 18, - "name": "pass_through_data_hash", + "name": "aux_data_hash", "type_info": "Bytea" }, { "ordinal": 19, - "name": "meta_parameters_hash", + "name": "pass_through_data_hash", "type_info": "Bytea" }, { "ordinal": 20, + "name": "meta_parameters_hash", + "type_info": "Bytea" + }, + { + "ordinal": 21, "name": "protocol_version", "type_info": "Int4" }, { - "ordinal": 21, + "ordinal": 22, "name": "compressed_state_diffs", "type_info": "Bytea" }, { - "ordinal": 22, + "ordinal": 23, "name": "system_logs", "type_info": "ByteaArray" }, { - "ordinal": 23, + "ordinal": 24, "name": "events_queue_commitment", "type_info": "Bytea" }, { - "ordinal": 24, + "ordinal": 25, "name": "bootloader_initial_content_commitment", "type_info": "Bytea" }, { - "ordinal": 25, + "ordinal": 26, "name": "pubdata_input", "type_info": "Bytea" } @@ -162,11 +167,12 @@ true, true, true, + true, false, true, true, true ] }, - "hash": "4ef330359df85ad6e0110a068ef3afa9cf50eafc7ac542975edea9bd592ce862" + "hash": "da1ea91f3a1189f881020a6cec17fc5d8943e65a30508898d90a098432050bc7" } diff --git a/core/lib/dal/.sqlx/query-45e52d05a4483def84c141e3529bab30553732953e589cd237595227044f438d.json b/core/lib/dal/.sqlx/query-f208ac4d454220cdd5cf8fa1405b21ca4cc94c38a7d18023ef1e89de484e60d8.json similarity index 83% rename from core/lib/dal/.sqlx/query-45e52d05a4483def84c141e3529bab30553732953e589cd237595227044f438d.json rename to core/lib/dal/.sqlx/query-f208ac4d454220cdd5cf8fa1405b21ca4cc94c38a7d18023ef1e89de484e60d8.json index 74a6187e6444..700352c1a8bf 100644 --- a/core/lib/dal/.sqlx/query-45e52d05a4483def84c141e3529bab30553732953e589cd237595227044f438d.json +++ b/core/lib/dal/.sqlx/query-f208ac4d454220cdd5cf8fa1405b21ca4cc94c38a7d18023ef1e89de484e60d8.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n number,\n timestamp,\n hash,\n l1_tx_count,\n l2_tx_count,\n fee_account_address AS \"fee_account_address!\",\n base_fee_per_gas,\n l1_gas_price,\n l2_fair_gas_price,\n gas_per_pubdata_limit,\n bootloader_code_hash,\n default_aa_code_hash,\n protocol_version,\n virtual_blocks,\n fair_pubdata_price,\n gas_limit,\n logs_bloom\n FROM\n miniblocks\n WHERE\n number = $1\n ", + "query": "\n SELECT\n number,\n timestamp,\n hash,\n l1_tx_count,\n l2_tx_count,\n fee_account_address AS \"fee_account_address!\",\n base_fee_per_gas,\n l1_gas_price,\n l2_fair_gas_price,\n gas_per_pubdata_limit,\n bootloader_code_hash,\n default_aa_code_hash,\n evm_emulator_code_hash,\n protocol_version,\n virtual_blocks,\n fair_pubdata_price,\n gas_limit,\n logs_bloom\n FROM\n miniblocks\n WHERE\n number = $1\n ", "describe": { "columns": [ { @@ -65,26 +65,31 @@ }, { "ordinal": 12, + "name": "evm_emulator_code_hash", + "type_info": "Bytea" + }, + { + "ordinal": 13, "name": "protocol_version", "type_info": "Int4" }, { - "ordinal": 13, + "ordinal": 14, "name": "virtual_blocks", "type_info": "Int8" }, { - "ordinal": 14, + "ordinal": 15, "name": "fair_pubdata_price", "type_info": "Int8" }, { - "ordinal": 15, + "ordinal": 16, "name": "gas_limit", "type_info": "Int8" }, { - "ordinal": 16, + "ordinal": 17, "name": "logs_bloom", "type_info": "Bytea" } @@ -108,11 +113,12 @@ true, true, true, + true, false, true, true, true ] }, - "hash": "45e52d05a4483def84c141e3529bab30553732953e589cd237595227044f438d" + "hash": "f208ac4d454220cdd5cf8fa1405b21ca4cc94c38a7d18023ef1e89de484e60d8" } diff --git a/core/lib/dal/.sqlx/query-9c6e1d3bd95d03ef32835dc454663f500b8358757e5a453cf0a87d5cd9620d7e.json b/core/lib/dal/.sqlx/query-f81c5b92cac0466af8a2721b44d914f15fd7a5fa3d7f7bc56906817c70b04950.json similarity index 52% rename from core/lib/dal/.sqlx/query-9c6e1d3bd95d03ef32835dc454663f500b8358757e5a453cf0a87d5cd9620d7e.json rename to core/lib/dal/.sqlx/query-f81c5b92cac0466af8a2721b44d914f15fd7a5fa3d7f7bc56906817c70b04950.json index d2c999a70d4c..4fe32531a3f1 100644 --- a/core/lib/dal/.sqlx/query-9c6e1d3bd95d03ef32835dc454663f500b8358757e5a453cf0a87d5cd9620d7e.json +++ b/core/lib/dal/.sqlx/query-f81c5b92cac0466af8a2721b44d914f15fd7a5fa3d7f7bc56906817c70b04950.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n INSERT INTO\n l1_batches (\n number,\n l1_tx_count,\n l2_tx_count,\n timestamp,\n l2_to_l1_messages,\n bloom,\n priority_ops_onchain_data,\n predicted_commit_gas_cost,\n predicted_prove_gas_cost,\n predicted_execute_gas_cost,\n initial_bootloader_heap_content,\n used_contract_hashes,\n bootloader_code_hash,\n default_aa_code_hash,\n protocol_version,\n system_logs,\n storage_refunds,\n pubdata_costs,\n pubdata_input,\n predicted_circuits_by_type,\n created_at,\n updated_at\n )\n VALUES\n (\n $1,\n $2,\n $3,\n $4,\n $5,\n $6,\n $7,\n $8,\n $9,\n $10,\n $11,\n $12,\n $13,\n $14,\n $15,\n $16,\n $17,\n $18,\n $19,\n $20,\n NOW(),\n NOW()\n )\n ", + "query": "\n INSERT INTO\n l1_batches (\n number,\n l1_tx_count,\n l2_tx_count,\n timestamp,\n l2_to_l1_messages,\n bloom,\n priority_ops_onchain_data,\n predicted_commit_gas_cost,\n predicted_prove_gas_cost,\n predicted_execute_gas_cost,\n initial_bootloader_heap_content,\n used_contract_hashes,\n bootloader_code_hash,\n default_aa_code_hash,\n evm_emulator_code_hash,\n protocol_version,\n system_logs,\n storage_refunds,\n pubdata_costs,\n pubdata_input,\n predicted_circuits_by_type,\n created_at,\n updated_at\n )\n VALUES\n (\n $1,\n $2,\n $3,\n $4,\n $5,\n $6,\n $7,\n $8,\n $9,\n $10,\n $11,\n $12,\n $13,\n $14,\n $15,\n $16,\n $17,\n $18,\n $19,\n $20,\n $21,\n NOW(),\n NOW()\n )\n ", "describe": { "columns": [], "parameters": { @@ -19,6 +19,7 @@ "Jsonb", "Bytea", "Bytea", + "Bytea", "Int4", "ByteaArray", "Int8Array", @@ -29,5 +30,5 @@ }, "nullable": [] }, - "hash": "9c6e1d3bd95d03ef32835dc454663f500b8358757e5a453cf0a87d5cd9620d7e" + "hash": "f81c5b92cac0466af8a2721b44d914f15fd7a5fa3d7f7bc56906817c70b04950" } diff --git a/core/lib/dal/.sqlx/query-5556ebdb040428b42c04ea9121b3c2a3d0a09c5ee88bdd671462904d4d27a355.json b/core/lib/dal/.sqlx/query-f9a47bd5532fc10dd0bc1be2af45b243bb067514b67daaf084353e5ada15b23a.json similarity index 71% rename from core/lib/dal/.sqlx/query-5556ebdb040428b42c04ea9121b3c2a3d0a09c5ee88bdd671462904d4d27a355.json rename to core/lib/dal/.sqlx/query-f9a47bd5532fc10dd0bc1be2af45b243bb067514b67daaf084353e5ada15b23a.json index 5e9051587bb9..1b50a750dacf 100644 --- a/core/lib/dal/.sqlx/query-5556ebdb040428b42c04ea9121b3c2a3d0a09c5ee88bdd671462904d4d27a355.json +++ b/core/lib/dal/.sqlx/query-f9a47bd5532fc10dd0bc1be2af45b243bb067514b67daaf084353e5ada15b23a.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n id AS \"minor!\",\n timestamp,\n bootloader_code_hash,\n default_account_code_hash,\n upgrade_tx_hash\n FROM\n protocol_versions\n WHERE\n id = $1\n ", + "query": "\n SELECT\n id AS \"minor!\",\n timestamp,\n bootloader_code_hash,\n default_account_code_hash,\n evm_emulator_code_hash,\n upgrade_tx_hash\n FROM\n protocol_versions\n WHERE\n id = $1\n ", "describe": { "columns": [ { @@ -25,6 +25,11 @@ }, { "ordinal": 4, + "name": "evm_emulator_code_hash", + "type_info": "Bytea" + }, + { + "ordinal": 5, "name": "upgrade_tx_hash", "type_info": "Bytea" } @@ -39,8 +44,9 @@ false, false, false, + true, true ] }, - "hash": "5556ebdb040428b42c04ea9121b3c2a3d0a09c5ee88bdd671462904d4d27a355" + "hash": "f9a47bd5532fc10dd0bc1be2af45b243bb067514b67daaf084353e5ada15b23a" } diff --git a/core/lib/dal/migrations/20240911161714_evm-simulator.down.sql b/core/lib/dal/migrations/20240911161714_evm-simulator.down.sql new file mode 100644 index 000000000000..74ac4e603830 --- /dev/null +++ b/core/lib/dal/migrations/20240911161714_evm-simulator.down.sql @@ -0,0 +1,3 @@ +ALTER TABLE protocol_versions DROP COLUMN IF EXISTS evm_emulator_code_hash; +ALTER TABLE l1_batches DROP COLUMN IF EXISTS evm_emulator_code_hash; +ALTER TABLE miniblocks DROP COLUMN IF EXISTS evm_emulator_code_hash; diff --git a/core/lib/dal/migrations/20240911161714_evm-simulator.up.sql b/core/lib/dal/migrations/20240911161714_evm-simulator.up.sql new file mode 100644 index 000000000000..43ae361e7ee2 --- /dev/null +++ b/core/lib/dal/migrations/20240911161714_evm-simulator.up.sql @@ -0,0 +1,4 @@ +ALTER TABLE protocol_versions ADD COLUMN IF NOT EXISTS evm_emulator_code_hash BYTEA; +ALTER TABLE l1_batches ADD COLUMN IF NOT EXISTS evm_emulator_code_hash BYTEA; +-- We need this column in `miniblocks` as well in order to store data for the pending L1 batch +ALTER TABLE miniblocks ADD COLUMN IF NOT EXISTS evm_emulator_code_hash BYTEA; diff --git a/core/lib/dal/src/blocks_dal.rs b/core/lib/dal/src/blocks_dal.rs index 5b351511a06d..59cc557f36ec 100644 --- a/core/lib/dal/src/blocks_dal.rs +++ b/core/lib/dal/src/blocks_dal.rs @@ -325,6 +325,7 @@ impl BlocksDal<'_, '_> { zkporter_is_available, bootloader_code_hash, default_aa_code_hash, + evm_emulator_code_hash, aux_data_hash, pass_through_data_hash, meta_parameters_hash, @@ -366,6 +367,7 @@ impl BlocksDal<'_, '_> { used_contract_hashes, bootloader_code_hash, default_aa_code_hash, + evm_emulator_code_hash, protocol_version, system_logs, pubdata_input @@ -610,6 +612,7 @@ impl BlocksDal<'_, '_> { used_contract_hashes, bootloader_code_hash, default_aa_code_hash, + evm_emulator_code_hash, protocol_version, system_logs, storage_refunds, @@ -641,6 +644,7 @@ impl BlocksDal<'_, '_> { $18, $19, $20, + $21, NOW(), NOW() ) @@ -659,6 +663,11 @@ impl BlocksDal<'_, '_> { used_contract_hashes, header.base_system_contracts_hashes.bootloader.as_bytes(), header.base_system_contracts_hashes.default_aa.as_bytes(), + header + .base_system_contracts_hashes + .evm_emulator + .as_ref() + .map(H256::as_bytes), header.protocol_version.map(|v| v as i32), &system_logs, &storage_refunds, @@ -703,6 +712,7 @@ impl BlocksDal<'_, '_> { gas_per_pubdata_limit, bootloader_code_hash, default_aa_code_hash, + evm_emulator_code_hash, protocol_version, virtual_blocks, fair_pubdata_price, @@ -730,6 +740,7 @@ impl BlocksDal<'_, '_> { $15, $16, $17, + $18, NOW(), NOW() ) @@ -752,6 +763,11 @@ impl BlocksDal<'_, '_> { .base_system_contracts_hashes .default_aa .as_bytes(), + l2_block_header + .base_system_contracts_hashes + .evm_emulator + .as_ref() + .map(H256::as_bytes), l2_block_header.protocol_version.map(|v| v as i32), i64::from(l2_block_header.virtual_blocks), l2_block_header.batch_fee_input.fair_pubdata_price() as i64, @@ -780,6 +796,7 @@ impl BlocksDal<'_, '_> { gas_per_pubdata_limit, bootloader_code_hash, default_aa_code_hash, + evm_emulator_code_hash, protocol_version, virtual_blocks, fair_pubdata_price, @@ -820,6 +837,7 @@ impl BlocksDal<'_, '_> { gas_per_pubdata_limit, bootloader_code_hash, default_aa_code_hash, + evm_emulator_code_hash, protocol_version, virtual_blocks, fair_pubdata_price, @@ -1038,6 +1056,7 @@ impl BlocksDal<'_, '_> { zkporter_is_available, bootloader_code_hash, default_aa_code_hash, + evm_emulator_code_hash, aux_data_hash, pass_through_data_hash, meta_parameters_hash, @@ -1224,6 +1243,7 @@ impl BlocksDal<'_, '_> { zkporter_is_available, bootloader_code_hash, default_aa_code_hash, + evm_emulator_code_hash, aux_data_hash, pass_through_data_hash, meta_parameters_hash, @@ -1304,6 +1324,7 @@ impl BlocksDal<'_, '_> { zkporter_is_available, bootloader_code_hash, default_aa_code_hash, + evm_emulator_code_hash, aux_data_hash, pass_through_data_hash, meta_parameters_hash, @@ -1377,6 +1398,7 @@ impl BlocksDal<'_, '_> { zkporter_is_available, bootloader_code_hash, default_aa_code_hash, + evm_emulator_code_hash, aux_data_hash, pass_through_data_hash, meta_parameters_hash, @@ -1504,6 +1526,7 @@ impl BlocksDal<'_, '_> { zkporter_is_available, bootloader_code_hash, default_aa_code_hash, + evm_emulator_code_hash, aux_data_hash, pass_through_data_hash, meta_parameters_hash, @@ -1568,6 +1591,7 @@ impl BlocksDal<'_, '_> { zkporter_is_available, l1_batches.bootloader_code_hash, l1_batches.default_aa_code_hash, + l1_batches.evm_emulator_code_hash, aux_data_hash, pass_through_data_hash, meta_parameters_hash, @@ -1646,6 +1670,7 @@ impl BlocksDal<'_, '_> { zkporter_is_available, l1_batches.bootloader_code_hash, l1_batches.default_aa_code_hash, + l1_batches.evm_emulator_code_hash, aux_data_hash, pass_through_data_hash, meta_parameters_hash, @@ -2695,6 +2720,40 @@ mod tests { .is_err()); } + #[tokio::test] + async fn persisting_evm_emulator_hash() { + let pool = ConnectionPool::::test_pool().await; + let mut conn = pool.connection().await.unwrap(); + + conn.protocol_versions_dal() + .save_protocol_version_with_tx(&ProtocolVersion::default()) + .await + .unwrap(); + + let mut l2_block_header = create_l2_block_header(1); + l2_block_header.base_system_contracts_hashes.evm_emulator = Some(H256::repeat_byte(0x23)); + conn.blocks_dal() + .insert_l2_block(&l2_block_header) + .await + .unwrap(); + + let mut fetched_block_header = conn + .blocks_dal() + .get_last_sealed_l2_block_header() + .await + .unwrap() + .expect("no block"); + // Batch fee input isn't restored exactly + fetched_block_header.batch_fee_input = l2_block_header.batch_fee_input; + + assert_eq!(fetched_block_header, l2_block_header); + // ...and a sanity check just to be sure + assert!(fetched_block_header + .base_system_contracts_hashes + .evm_emulator + .is_some()); + } + #[tokio::test] async fn loading_l1_batch_header() { let pool = ConnectionPool::::test_pool().await; diff --git a/core/lib/dal/src/blocks_web3_dal.rs b/core/lib/dal/src/blocks_web3_dal.rs index 904e167d1a6f..c1a1e6765b69 100644 --- a/core/lib/dal/src/blocks_web3_dal.rs +++ b/core/lib/dal/src/blocks_web3_dal.rs @@ -673,6 +673,7 @@ impl BlocksWeb3Dal<'_, '_> { miniblocks.fair_pubdata_price, miniblocks.bootloader_code_hash, miniblocks.default_aa_code_hash, + l1_batches.evm_emulator_code_hash, miniblocks.protocol_version, miniblocks.fee_account_address FROM @@ -744,7 +745,8 @@ impl BlocksWeb3Dal<'_, '_> { mb.l2_fair_gas_price, mb.fair_pubdata_price, l1_batches.bootloader_code_hash, - l1_batches.default_aa_code_hash + l1_batches.default_aa_code_hash, + l1_batches.evm_emulator_code_hash FROM l1_batches INNER JOIN mb ON TRUE diff --git a/core/lib/dal/src/consensus/conv.rs b/core/lib/dal/src/consensus/conv.rs index ac852957e2f5..269c47fa2dd1 100644 --- a/core/lib/dal/src/consensus/conv.rs +++ b/core/lib/dal/src/consensus/conv.rs @@ -208,7 +208,7 @@ impl ProtoRepr for proto::TransactionV25 { }, T::L2(l2) => abi::Transaction::L2(required(&l2.rlp).context("rlp")?.clone()), }; - tx.try_into() + Transaction::from_abi(tx, true) } fn build(tx: &Self::Type) -> Self { diff --git a/core/lib/dal/src/factory_deps_dal.rs b/core/lib/dal/src/factory_deps_dal.rs index 36dfaa1a466d..857e2973ae33 100644 --- a/core/lib/dal/src/factory_deps_dal.rs +++ b/core/lib/dal/src/factory_deps_dal.rs @@ -94,6 +94,7 @@ impl FactoryDepsDal<'_, '_> { &mut self, bootloader_hash: H256, default_aa_hash: H256, + evm_emulator_hash: Option, ) -> anyhow::Result { let bootloader_bytecode = self .get_sealed_factory_dep(bootloader_hash) @@ -115,9 +116,26 @@ impl FactoryDepsDal<'_, '_> { code: bytes_to_be_words(default_aa_bytecode), hash: default_aa_hash, }; + + let evm_emulator_code = if let Some(evm_emulator_hash) = evm_emulator_hash { + let evm_emulator_bytecode = self + .get_sealed_factory_dep(evm_emulator_hash) + .await + .context("failed loading EVM emulator code")? + .with_context(|| format!("EVM emulator code with hash {evm_emulator_hash:?} should be present in the database"))?; + + Some(SystemContractCode { + code: bytes_to_be_words(evm_emulator_bytecode), + hash: evm_emulator_hash, + }) + } else { + None + }; + Ok(BaseSystemContracts { bootloader: bootloader_code, default_aa: default_aa_code, + evm_emulator: evm_emulator_code, }) } diff --git a/core/lib/dal/src/models/storage_block.rs b/core/lib/dal/src/models/storage_block.rs index 34e14387ca61..7e9a9eca9d41 100644 --- a/core/lib/dal/src/models/storage_block.rs +++ b/core/lib/dal/src/models/storage_block.rs @@ -44,6 +44,7 @@ pub(crate) struct StorageL1BatchHeader { pub used_contract_hashes: serde_json::Value, pub bootloader_code_hash: Option>, pub default_aa_code_hash: Option>, + pub evm_emulator_code_hash: Option>, pub protocol_version: Option, // `system_logs` are introduced as part of boojum and will be absent in all batches generated prior to boojum. @@ -82,6 +83,7 @@ impl StorageL1BatchHeader { base_system_contracts_hashes: convert_base_system_contracts_hashes( self.bootloader_code_hash, self.default_aa_code_hash, + self.evm_emulator_code_hash, ), system_logs: system_logs.into_iter().map(SystemL2ToL1Log).collect(), protocol_version: self @@ -103,6 +105,7 @@ fn convert_l2_to_l1_logs(raw_logs: Vec>) -> Vec { fn convert_base_system_contracts_hashes( bootloader_code_hash: Option>, default_aa_code_hash: Option>, + evm_emulator_code_hash: Option>, ) -> BaseSystemContractsHashes { BaseSystemContractsHashes { bootloader: bootloader_code_hash @@ -111,6 +114,7 @@ fn convert_base_system_contracts_hashes( default_aa: default_aa_code_hash .map(|hash| H256::from_slice(&hash)) .expect("should not be none"), + evm_emulator: evm_emulator_code_hash.map(|hash| H256::from_slice(&hash)), } } @@ -134,6 +138,7 @@ pub(crate) struct StorageL1Batch { pub zkporter_is_available: Option, pub bootloader_code_hash: Option>, pub default_aa_code_hash: Option>, + pub evm_emulator_code_hash: Option>, pub l2_to_l1_messages: Vec>, pub l2_l1_merkle_root: Option>, @@ -177,6 +182,7 @@ impl StorageL1Batch { base_system_contracts_hashes: convert_base_system_contracts_hashes( self.bootloader_code_hash, self.default_aa_code_hash, + self.evm_emulator_code_hash, ), system_logs: system_logs.into_iter().map(SystemL2ToL1Log).collect(), protocol_version: self @@ -240,6 +246,10 @@ impl TryFrom for L1BatchMetadata { .default_aa_code_hash .ok_or(L1BatchMetadataError::Incomplete("default_aa_code_hash"))?, ), + evm_emulator_code_hash: batch + .evm_emulator_code_hash + .as_deref() + .map(H256::from_slice), protocol_version: batch .protocol_version .map(|v| (v as u16).try_into().unwrap()), @@ -275,6 +285,7 @@ pub(crate) struct StorageBlockDetails { pub fair_pubdata_price: Option, pub bootloader_code_hash: Option>, pub default_aa_code_hash: Option>, + pub evm_emulator_code_hash: Option>, pub fee_account_address: Vec, pub protocol_version: Option, } @@ -320,6 +331,7 @@ impl From for api::BlockDetails { base_system_contracts_hashes: convert_base_system_contracts_hashes( details.bootloader_code_hash, details.default_aa_code_hash, + details.evm_emulator_code_hash, ), }; api::BlockDetails { @@ -352,6 +364,7 @@ pub(crate) struct StorageL1BatchDetails { pub fair_pubdata_price: Option, pub bootloader_code_hash: Option>, pub default_aa_code_hash: Option>, + pub evm_emulator_code_hash: Option>, } impl From for api::L1BatchDetails { @@ -395,6 +408,7 @@ impl From for api::L1BatchDetails { base_system_contracts_hashes: convert_base_system_contracts_hashes( details.bootloader_code_hash, details.default_aa_code_hash, + details.evm_emulator_code_hash, ), }; api::L1BatchDetails { @@ -418,6 +432,7 @@ pub(crate) struct StorageL2BlockHeader { // L2 gas price assumed in the corresponding batch pub bootloader_code_hash: Option>, pub default_aa_code_hash: Option>, + pub evm_emulator_code_hash: Option>, pub protocol_version: Option, pub fair_pubdata_price: Option, @@ -471,6 +486,7 @@ impl From for L2BlockHeader { base_system_contracts_hashes: convert_base_system_contracts_hashes( row.bootloader_code_hash, row.default_aa_code_hash, + row.evm_emulator_code_hash, ), gas_per_pubdata_limit: row.gas_per_pubdata_limit as u64, protocol_version, diff --git a/core/lib/dal/src/models/storage_protocol_version.rs b/core/lib/dal/src/models/storage_protocol_version.rs index e53bf7b9d0a4..a833236a7b62 100644 --- a/core/lib/dal/src/models/storage_protocol_version.rs +++ b/core/lib/dal/src/models/storage_protocol_version.rs @@ -16,6 +16,7 @@ pub struct StorageProtocolVersion { pub snark_wrapper_vk_hash: Vec, pub bootloader_code_hash: Vec, pub default_account_code_hash: Vec, + pub evm_emulator_code_hash: Option>, } pub(crate) fn protocol_version_from_storage( @@ -34,6 +35,10 @@ pub(crate) fn protocol_version_from_storage( base_system_contracts_hashes: BaseSystemContractsHashes { bootloader: H256::from_slice(&storage_version.bootloader_code_hash), default_aa: H256::from_slice(&storage_version.default_account_code_hash), + evm_emulator: storage_version + .evm_emulator_code_hash + .as_deref() + .map(H256::from_slice), }, tx, } @@ -45,6 +50,7 @@ pub struct StorageApiProtocolVersion { pub timestamp: i64, pub bootloader_code_hash: Vec, pub default_account_code_hash: Vec, + pub evm_emulator_code_hash: Option>, pub upgrade_tx_hash: Option>, } @@ -60,6 +66,10 @@ impl From for api::ProtocolVersion { storage_protocol_version.timestamp as u64, H256::from_slice(&storage_protocol_version.bootloader_code_hash), H256::from_slice(&storage_protocol_version.default_account_code_hash), + storage_protocol_version + .evm_emulator_code_hash + .as_deref() + .map(H256::from_slice), l2_system_upgrade_tx_hash, ) } diff --git a/core/lib/dal/src/models/storage_sync.rs b/core/lib/dal/src/models/storage_sync.rs index 4b2f1937f46c..7a4ebe074fe0 100644 --- a/core/lib/dal/src/models/storage_sync.rs +++ b/core/lib/dal/src/models/storage_sync.rs @@ -20,6 +20,7 @@ pub(crate) struct StorageSyncBlock { pub fair_pubdata_price: Option, pub bootloader_code_hash: Option>, pub default_aa_code_hash: Option>, + pub evm_emulator_code_hash: Option>, pub fee_account_address: Vec, pub protocol_version: i32, pub virtual_blocks: i64, @@ -73,6 +74,12 @@ impl TryFrom for SyncBlock { .decode_column("bootloader_code_hash")?, default_aa: parse_h256_opt(block.default_aa_code_hash.as_deref()) .decode_column("default_aa_code_hash")?, + evm_emulator: block + .evm_emulator_code_hash + .as_deref() + .map(parse_h256) + .transpose() + .decode_column("evm_emulator_code_hash")?, }, fee_account_address: parse_h160(&block.fee_account_address) .decode_column("fee_account_address")?, diff --git a/core/lib/dal/src/models/storage_transaction.rs b/core/lib/dal/src/models/storage_transaction.rs index bb219ee1d61b..78daaebb335e 100644 --- a/core/lib/dal/src/models/storage_transaction.rs +++ b/core/lib/dal/src/models/storage_transaction.rs @@ -352,6 +352,16 @@ impl From for TransactionReceipt { .index_in_block .map_or_else(Default::default, U64::from); + // For better compatibility with various clients, we never return `None` recipient address. + let to = storage_receipt + .transfer_to + .or(storage_receipt.execute_contract_address) + .and_then(|addr| { + serde_json::from_value::>(addr) + .expect("invalid address value in the database") + }) + .unwrap_or_else(Address::zero); + let block_hash = H256::from_slice(&storage_receipt.block_hash); TransactionReceipt { transaction_hash: H256::from_slice(&storage_receipt.tx_hash), @@ -361,15 +371,7 @@ impl From for TransactionReceipt { l1_batch_tx_index: storage_receipt.l1_batch_tx_index.map(U64::from), l1_batch_number: storage_receipt.l1_batch_number.map(U64::from), from: H160::from_slice(&storage_receipt.initiator_address), - to: storage_receipt - .transfer_to - .or(storage_receipt.execute_contract_address) - .map(|addr| { - serde_json::from_value::
(addr) - .expect("invalid address value in the database") - }) - // For better compatibility with various clients, we never return null. - .or_else(|| Some(Address::default())), + to: Some(to), cumulative_gas_used: Default::default(), // TODO: Should be actually calculated (SMA-1183). gas_used: { let refunded_gas: U256 = storage_receipt.refunded_gas.into(); @@ -508,6 +510,10 @@ impl StorageApiTransaction { .signature .and_then(|signature| PackedEthSignature::deserialize_packed(&signature).ok()); + let to = serde_json::from_value(self.execute_contract_address) + .ok() + .unwrap_or_default(); + // For legacy and EIP-2930 transactions it is gas price willing to be paid by the sender in wei. // For other transactions it should be the effective gas price if transaction is included in block, // otherwise this value should be set equal to the max fee per gas. @@ -528,7 +534,7 @@ impl StorageApiTransaction { block_number: self.block_number.map(|number| U64::from(number as u64)), transaction_index: self.index_in_block.map(|idx| U64::from(idx as u64)), from: Some(Address::from_slice(&self.initiator_address)), - to: Some(serde_json::from_value(self.execute_contract_address).unwrap()), + to, value: bigdecimal_to_u256(self.value), gas_price: Some(bigdecimal_to_u256(gas_price)), gas: bigdecimal_to_u256(self.gas_limit.unwrap_or_else(BigDecimal::zero)), diff --git a/core/lib/dal/src/protocol_versions_dal.rs b/core/lib/dal/src/protocol_versions_dal.rs index 72ae811ce764..3b500e07a08a 100644 --- a/core/lib/dal/src/protocol_versions_dal.rs +++ b/core/lib/dal/src/protocol_versions_dal.rs @@ -45,17 +45,22 @@ impl ProtocolVersionsDal<'_, '_> { timestamp, bootloader_code_hash, default_account_code_hash, + evm_emulator_code_hash, upgrade_tx_hash, created_at ) VALUES - ($1, $2, $3, $4, $5, NOW()) + ($1, $2, $3, $4, $5, $6, NOW()) ON CONFLICT DO NOTHING "#, version.minor as i32, timestamp as i64, base_system_contracts_hashes.bootloader.as_bytes(), base_system_contracts_hashes.default_aa.as_bytes(), + base_system_contracts_hashes + .evm_emulator + .as_ref() + .map(H256::as_bytes), tx_hash.as_ref().map(H256::as_bytes), ) .instrument("save_protocol_version#minor") @@ -193,7 +198,8 @@ impl ProtocolVersionsDal<'_, '_> { r#" SELECT bootloader_code_hash, - default_account_code_hash + default_account_code_hash, + evm_emulator_code_hash FROM protocol_versions WHERE @@ -212,6 +218,7 @@ impl ProtocolVersionsDal<'_, '_> { .get_base_system_contracts( H256::from_slice(&row.bootloader_code_hash), H256::from_slice(&row.default_account_code_hash), + row.evm_emulator_code_hash.as_deref().map(H256::from_slice), ) .await?; Some(contracts) @@ -232,6 +239,7 @@ impl ProtocolVersionsDal<'_, '_> { protocol_versions.timestamp, protocol_versions.bootloader_code_hash, protocol_versions.default_account_code_hash, + protocol_versions.evm_emulator_code_hash, protocol_patches.patch, protocol_patches.snark_wrapper_vk_hash FROM diff --git a/core/lib/dal/src/protocol_versions_web3_dal.rs b/core/lib/dal/src/protocol_versions_web3_dal.rs index a3a7a162c3dd..adc3957f8722 100644 --- a/core/lib/dal/src/protocol_versions_web3_dal.rs +++ b/core/lib/dal/src/protocol_versions_web3_dal.rs @@ -21,6 +21,7 @@ impl ProtocolVersionsWeb3Dal<'_, '_> { timestamp, bootloader_code_hash, default_account_code_hash, + evm_emulator_code_hash, upgrade_tx_hash FROM protocol_versions diff --git a/core/lib/dal/src/sync_dal.rs b/core/lib/dal/src/sync_dal.rs index ec6ee0f92812..ab5684007d0b 100644 --- a/core/lib/dal/src/sync_dal.rs +++ b/core/lib/dal/src/sync_dal.rs @@ -50,6 +50,7 @@ impl SyncDal<'_, '_> { miniblocks.fair_pubdata_price, miniblocks.bootloader_code_hash, miniblocks.default_aa_code_hash, + miniblocks.evm_emulator_code_hash, miniblocks.virtual_blocks, miniblocks.hash, miniblocks.protocol_version AS "protocol_version!", diff --git a/core/lib/dal/src/tests/mod.rs b/core/lib/dal/src/tests/mod.rs index dc672fa1f807..bf85008f7b58 100644 --- a/core/lib/dal/src/tests/mod.rs +++ b/core/lib/dal/src/tests/mod.rs @@ -54,6 +54,7 @@ pub(crate) fn create_l2_block_header(number: u32) -> L2BlockHeader { logs_bloom: Default::default(), } } + pub(crate) fn create_l1_batch_header(number: u32) -> L1BatchHeader { L1BatchHeader::new( L1BatchNumber(number), @@ -61,6 +62,7 @@ pub(crate) fn create_l1_batch_header(number: u32) -> L1BatchHeader { BaseSystemContractsHashes { bootloader: H256::repeat_byte(1), default_aa: H256::repeat_byte(42), + evm_emulator: Some(H256::repeat_byte(43)), }, ProtocolVersionId::latest(), ) diff --git a/core/lib/dal/src/transactions_web3_dal.rs b/core/lib/dal/src/transactions_web3_dal.rs index dcf5f25f104d..c2209bb9c938 100644 --- a/core/lib/dal/src/transactions_web3_dal.rs +++ b/core/lib/dal/src/transactions_web3_dal.rs @@ -607,6 +607,39 @@ mod tests { ); } + #[tokio::test] + async fn getting_evm_deployment_tx() { + let connection_pool = ConnectionPool::::test_pool().await; + let mut conn = connection_pool.connection().await.unwrap(); + conn.protocol_versions_dal() + .save_protocol_version_with_tx(&ProtocolVersion::default()) + .await + .unwrap(); + let mut tx = mock_l2_transaction(); + tx.execute.contract_address = None; + let tx_hash = tx.hash(); + prepare_transactions(&mut conn, vec![tx.clone()]).await; + + let fetched_tx = conn + .transactions_dal() + .get_tx_by_hash(tx_hash) + .await + .unwrap() + .expect("no transaction"); + let mut fetched_tx = L2Tx::try_from(fetched_tx).unwrap(); + assert_eq!(fetched_tx.execute.contract_address, None); + fetched_tx.raw_bytes = tx.raw_bytes.clone(); + assert_eq!(fetched_tx, tx); + + let web3_tx = conn + .transactions_web3_dal() + .get_transaction_by_position(L2BlockNumber(1), 0, L2ChainId::from(270)) + .await; + let web3_tx = web3_tx.unwrap().expect("no transaction"); + assert_eq!(web3_tx.hash, tx_hash); + assert_eq!(web3_tx.to, None); + } + #[tokio::test] async fn getting_receipts() { let connection_pool = ConnectionPool::::test_pool().await; @@ -621,7 +654,7 @@ mod tests { let tx2 = mock_l2_transaction(); let tx2_hash = tx2.hash(); - prepare_transactions(&mut conn, vec![tx1.clone(), tx2.clone()]).await; + prepare_transactions(&mut conn, vec![tx1, tx2]).await; let mut receipts = conn .transactions_web3_dal() @@ -636,6 +669,31 @@ mod tests { assert_eq!(receipts[1].transaction_hash, tx2_hash); } + #[tokio::test] + async fn getting_receipt_for_evm_deployment_tx() { + let connection_pool = ConnectionPool::::test_pool().await; + let mut conn = connection_pool.connection().await.unwrap(); + conn.protocol_versions_dal() + .save_protocol_version_with_tx(&ProtocolVersion::default()) + .await + .unwrap(); + + let mut tx = mock_l2_transaction(); + let tx_hash = tx.hash(); + tx.execute.contract_address = None; + prepare_transactions(&mut conn, vec![tx]).await; + + let receipts = conn + .transactions_web3_dal() + .get_transaction_receipts(&[tx_hash]) + .await + .unwrap(); + assert_eq!(receipts.len(), 1); + let receipt = receipts.into_iter().next().unwrap(); + assert_eq!(receipt.transaction_hash, tx_hash); + assert_eq!(receipt.to, Some(Address::zero())); + } + #[tokio::test] async fn getting_l2_block_transactions() { let connection_pool = ConnectionPool::::test_pool().await; diff --git a/core/lib/env_config/src/chain.rs b/core/lib/env_config/src/chain.rs index a25c593bd881..a125f3314961 100644 --- a/core/lib/env_config/src/chain.rs +++ b/core/lib/env_config/src/chain.rs @@ -102,6 +102,7 @@ mod tests { default_aa_hash: Some(hash( "0x0100055b041eb28aff6e3a6e0f37c31fd053fc9ef142683b05e5f0aee6934066", )), + evm_emulator_hash: None, l1_batch_commit_data_generator_mode, max_circuits_per_batch: 24100, protective_reads_persistence_enabled: true, diff --git a/core/lib/env_config/src/eth_sender.rs b/core/lib/env_config/src/eth_sender.rs index 64e0a89d5a42..e5132eb7d91f 100644 --- a/core/lib/env_config/src/eth_sender.rs +++ b/core/lib/env_config/src/eth_sender.rs @@ -72,6 +72,7 @@ mod tests { pubdata_sending_mode: PubdataSendingMode::Calldata, tx_aggregation_only_prove_and_execute: false, tx_aggregation_paused: false, + time_in_mempool_in_l1_blocks_cap: 2000, }), gas_adjuster: Some(GasAdjusterConfig { default_priority_fee_per_gas: 20000000000, @@ -131,6 +132,7 @@ mod tests { ETH_SENDER_SENDER_TIMESTAMP_CRITERIA_MAX_ALLOWED_LAG="30" ETH_SENDER_SENDER_MAX_AGGREGATED_TX_GAS="4000000" ETH_SENDER_SENDER_MAX_ETH_TX_DATA_SIZE="120000" + ETH_SENDER_SENDER_TIME_IN_MEMPOOL_IN_L1_BLOCKS_CAP="2000" ETH_SENDER_SENDER_L1_BATCH_MIN_AGE_BEFORE_EXECUTE_SECONDS="1000" ETH_SENDER_SENDER_MAX_ACCEPTABLE_PRIORITY_FEE_IN_GWEI="100000000000" ETH_SENDER_SENDER_PUBDATA_SENDING_MODE="Calldata" diff --git a/core/lib/env_config/src/genesis.rs b/core/lib/env_config/src/genesis.rs index bf30fd4cc339..55c79eceb502 100644 --- a/core/lib/env_config/src/genesis.rs +++ b/core/lib/env_config/src/genesis.rs @@ -68,6 +68,7 @@ impl FromEnv for GenesisConfig { genesis_commitment: contracts_config.genesis_batch_commitment, bootloader_hash: state_keeper.bootloader_hash, default_aa_hash: state_keeper.default_aa_hash, + evm_emulator_hash: state_keeper.evm_emulator_hash, // TODO(EVM-676): for now, the settlement layer is always the same as the L1 network l1_chain_id: L1ChainId(network_config.network.chain_id().0), sl_chain_id: Some(network_config.network.chain_id()), diff --git a/core/lib/eth_client/src/types.rs b/core/lib/eth_client/src/types.rs index 59fb1cdeddcc..dd332351afb0 100644 --- a/core/lib/eth_client/src/types.rs +++ b/core/lib/eth_client/src/types.rs @@ -320,7 +320,7 @@ pub struct FailureInfo { #[cfg(test)] mod tests { - use zksync_eth_signer::{EthereumSigner, PrivateKeySigner, TransactionParameters}; + use zksync_eth_signer::{PrivateKeySigner, TransactionParameters}; use zksync_types::{ eth_sender::{EthTxBlobSidecarV1, SidecarBlobV1}, web3, K256PrivateKey, EIP_4844_TX_TYPE, H256, U256, U64, @@ -384,10 +384,7 @@ mod tests { .as_ref(), )]), }; - let raw_tx = signer - .sign_transaction(raw_transaction.clone()) - .await - .unwrap(); + let raw_tx = signer.sign_transaction(raw_transaction.clone()); let hash = web3::keccak256(&raw_tx).into(); // Transaction generated with https://github.com/inphi/blob-utils with @@ -493,10 +490,7 @@ mod tests { blob_versioned_hashes: Some(vec![versioned_hash_1, versioned_hash_2]), }; - let raw_tx = signer - .sign_transaction(raw_transaction.clone()) - .await - .unwrap(); + let raw_tx = signer.sign_transaction(raw_transaction); let hash = web3::keccak256(&raw_tx).into(); // Transaction generated with https://github.com/inphi/blob-utils with diff --git a/core/lib/eth_signer/Cargo.toml b/core/lib/eth_signer/Cargo.toml index f760134e09bb..92bb47824f3c 100644 --- a/core/lib/eth_signer/Cargo.toml +++ b/core/lib/eth_signer/Cargo.toml @@ -11,10 +11,9 @@ keywords.workspace = true categories.workspace = true [dependencies] -zksync_types.workspace = true +zksync_basic_types.workspace = true +zksync_crypto_primitives.workspace = true + +async-trait.workspace = true rlp.workspace = true thiserror.workspace = true -async-trait.workspace = true - -[dev-dependencies] -tokio = { workspace = true, features = ["full"] } diff --git a/core/lib/eth_signer/src/error.rs b/core/lib/eth_signer/src/error.rs deleted file mode 100644 index 8b137891791f..000000000000 --- a/core/lib/eth_signer/src/error.rs +++ /dev/null @@ -1 +0,0 @@ - diff --git a/core/lib/eth_signer/src/lib.rs b/core/lib/eth_signer/src/lib.rs index 3a92d47b062a..8b6025eb15da 100644 --- a/core/lib/eth_signer/src/lib.rs +++ b/core/lib/eth_signer/src/lib.rs @@ -1,5 +1,6 @@ use async_trait::async_trait; -use zksync_types::{Address, EIP712TypedStructure, Eip712Domain, PackedEthSignature}; +use zksync_basic_types::Address; +use zksync_crypto_primitives::{EIP712TypedStructure, Eip712Domain, PackedEthSignature}; pub use crate::{pk_signer::PrivateKeySigner, raw_ethereum_tx::TransactionParameters}; diff --git a/core/lib/eth_signer/src/pk_signer.rs b/core/lib/eth_signer/src/pk_signer.rs index 47b0e1109911..0f55425a0d58 100644 --- a/core/lib/eth_signer/src/pk_signer.rs +++ b/core/lib/eth_signer/src/pk_signer.rs @@ -1,5 +1,7 @@ -use zksync_types::{ - Address, EIP712TypedStructure, Eip712Domain, K256PrivateKey, PackedEthSignature, +use async_trait::async_trait; +use zksync_basic_types::Address; +use zksync_crypto_primitives::{ + EIP712TypedStructure, Eip712Domain, K256PrivateKey, PackedEthSignature, }; use crate::{ @@ -12,22 +14,20 @@ pub struct PrivateKeySigner { private_key: K256PrivateKey, } +// We define inherent methods duplicating `EthereumSigner` ones because they are sync and (other than `sign_typed_data`) infallible. impl PrivateKeySigner { pub fn new(private_key: K256PrivateKey) -> Self { Self { private_key } } -} -#[async_trait::async_trait] -impl EthereumSigner for PrivateKeySigner { - /// Get Ethereum address that matches the private key. - async fn get_address(&self) -> Result { - Ok(self.private_key.address()) + /// Gets an Ethereum address that matches this private key. + pub fn address(&self) -> Address { + self.private_key.address() } /// Signs typed struct using Ethereum private key by EIP-712 signature standard. /// Result of this function is the equivalent of RPC calling `eth_signTypedData`. - async fn sign_typed_data( + pub fn sign_typed_data( &self, domain: &Eip712Domain, typed_struct: &S, @@ -39,16 +39,11 @@ impl EthereumSigner for PrivateKeySigner { } /// Signs and returns the RLP-encoded transaction. - async fn sign_transaction( - &self, - raw_tx: TransactionParameters, - ) -> Result, SignerError> { + pub fn sign_transaction(&self, raw_tx: TransactionParameters) -> Vec { // According to the code in web3 // We should use `max_fee_per_gas` as `gas_price` if we use EIP1559 let gas_price = raw_tx.max_fee_per_gas; - let max_priority_fee_per_gas = raw_tx.max_priority_fee_per_gas; - let tx = Transaction { to: raw_tx.to, nonce: raw_tx.nonce, @@ -62,21 +57,42 @@ impl EthereumSigner for PrivateKeySigner { max_fee_per_blob_gas: raw_tx.max_fee_per_blob_gas, blob_versioned_hashes: raw_tx.blob_versioned_hashes, }; - let signed = tx.sign(&self.private_key, raw_tx.chain_id); - Ok(signed.raw_transaction.0) + signed.raw_transaction.0 + } +} + +#[async_trait] +impl EthereumSigner for PrivateKeySigner { + async fn get_address(&self) -> Result { + Ok(self.address()) + } + + async fn sign_typed_data( + &self, + domain: &Eip712Domain, + typed_struct: &S, + ) -> Result { + self.sign_typed_data(domain, typed_struct) + } + + async fn sign_transaction( + &self, + raw_tx: TransactionParameters, + ) -> Result, SignerError> { + Ok(self.sign_transaction(raw_tx)) } } #[cfg(test)] mod test { - use zksync_types::{K256PrivateKey, H160, H256, U256, U64}; + use zksync_basic_types::{H160, H256, U256, U64}; + use zksync_crypto_primitives::K256PrivateKey; - use super::PrivateKeySigner; - use crate::{raw_ethereum_tx::TransactionParameters, EthereumSigner}; + use super::*; - #[tokio::test] - async fn test_generating_signed_raw_transaction() { + #[test] + fn test_generating_signed_raw_transaction() { let private_key = K256PrivateKey::from_bytes(H256::from([5; 32])).unwrap(); let signer = PrivateKeySigner::new(private_key); let raw_transaction = TransactionParameters { @@ -94,10 +110,7 @@ mod test { blob_versioned_hashes: None, max_fee_per_blob_gas: None, }; - let raw_tx = signer - .sign_transaction(raw_transaction.clone()) - .await - .unwrap(); + let raw_tx = signer.sign_transaction(raw_transaction); assert_ne!(raw_tx.len(), 1); // pre-calculated signature with right algorithm implementation let precalculated_raw_tx: Vec = vec![ diff --git a/core/lib/eth_signer/src/raw_ethereum_tx.rs b/core/lib/eth_signer/src/raw_ethereum_tx.rs index 9479b5bd9d79..bea64305b47c 100644 --- a/core/lib/eth_signer/src/raw_ethereum_tx.rs +++ b/core/lib/eth_signer/src/raw_ethereum_tx.rs @@ -10,11 +10,11 @@ //! Link to @Deniallugo's PR to web3: https://github.com/tomusdrw/rust-web3/pull/630 use rlp::RlpStream; -use zksync_types::{ - ethabi::Address, +use zksync_basic_types::{ web3::{keccak256, AccessList, Signature, SignedTransaction}, - K256PrivateKey, H256, U256, U64, + Address, H256, U256, U64, }; +use zksync_crypto_primitives::K256PrivateKey; const LEGACY_TX_ID: u64 = 0; const ACCESSLISTS_TX_ID: u64 = 1; diff --git a/core/lib/l1_contract_interface/src/multicall3/mod.rs b/core/lib/l1_contract_interface/src/multicall3/mod.rs index 7d922668f940..52df37e04304 100644 --- a/core/lib/l1_contract_interface/src/multicall3/mod.rs +++ b/core/lib/l1_contract_interface/src/multicall3/mod.rs @@ -7,6 +7,7 @@ use zksync_types::{ }; /// Multicall3 contract aggregate method input vector struct. +#[derive(Debug)] pub struct Multicall3Call { pub target: Address, pub allow_failure: bool, @@ -21,6 +22,7 @@ impl Tokenizable for Multicall3Call { self.calldata.into_token(), ]) } + fn from_token(token: Token) -> Result { let Token::Tuple(mut result_token) = token else { return Err(error(&[token], "Multicall3Call")); diff --git a/core/lib/multivm/Cargo.toml b/core/lib/multivm/Cargo.toml index 2c2cd4f044b9..ab418d24cd12 100644 --- a/core/lib/multivm/Cargo.toml +++ b/core/lib/multivm/Cargo.toml @@ -37,11 +37,10 @@ once_cell.workspace = true thiserror.workspace = true tracing.workspace = true vise.workspace = true +ethabi.workspace = true [dev-dependencies] assert_matches.workspace = true pretty_assertions.workspace = true -tokio = { workspace = true, features = ["time"] } zksync_test_account.workspace = true -ethabi.workspace = true zksync_eth_signer.workspace = true diff --git a/core/lib/multivm/src/glue/types/vm/vm_block_result.rs b/core/lib/multivm/src/glue/types/vm/vm_block_result.rs index ce928e652d76..50bb19938fe7 100644 --- a/core/lib/multivm/src/glue/types/vm/vm_block_result.rs +++ b/core/lib/multivm/src/glue/types/vm/vm_block_result.rs @@ -47,6 +47,7 @@ impl GlueFrom for crate::interface::Fi circuit_statistic: Default::default(), }, refunds: Refunds::default(), + new_known_factory_deps: None, }, final_execution_state: CurrentExecutionState { events: value.full_result.events, @@ -103,6 +104,7 @@ impl GlueFrom for crate::interface::Fi circuit_statistic: Default::default(), }, refunds: Refunds::default(), + new_known_factory_deps: None, }, final_execution_state: CurrentExecutionState { events: value.full_result.events, @@ -158,6 +160,7 @@ impl GlueFrom for crate::interface: circuit_statistic: Default::default(), }, refunds: Refunds::default(), + new_known_factory_deps: None, }, final_execution_state: CurrentExecutionState { events: value.full_result.events, @@ -227,6 +230,7 @@ impl GlueFrom circuit_statistic: Default::default(), }, refunds: Refunds::default(), + new_known_factory_deps: None, } } } @@ -259,6 +263,7 @@ impl GlueFrom circuit_statistic: Default::default(), }, refunds: Refunds::default(), + new_known_factory_deps: None, } } } @@ -307,6 +312,7 @@ impl GlueFrom circuit_statistic: Default::default(), }, refunds: Refunds::default(), + new_known_factory_deps: None, } } } diff --git a/core/lib/multivm/src/glue/types/vm/vm_partial_execution_result.rs b/core/lib/multivm/src/glue/types/vm/vm_partial_execution_result.rs index 3cb61b461a42..4c4cffcc6876 100644 --- a/core/lib/multivm/src/glue/types/vm/vm_partial_execution_result.rs +++ b/core/lib/multivm/src/glue/types/vm/vm_partial_execution_result.rs @@ -22,6 +22,7 @@ impl GlueFrom gas_refunded: 0, operator_suggested_refund: 0, }, + new_known_factory_deps: None, } } } @@ -48,6 +49,7 @@ impl GlueFrom gas_refunded: 0, operator_suggested_refund: 0, }, + new_known_factory_deps: None, } } } @@ -74,6 +76,7 @@ impl GlueFrom gas_refunded: 0, operator_suggested_refund: 0, }, + new_known_factory_deps: None, } } } diff --git a/core/lib/multivm/src/glue/types/vm/vm_tx_execution_result.rs b/core/lib/multivm/src/glue/types/vm/vm_tx_execution_result.rs index 2dc680ba77d9..8978d4348edd 100644 --- a/core/lib/multivm/src/glue/types/vm/vm_tx_execution_result.rs +++ b/core/lib/multivm/src/glue/types/vm/vm_tx_execution_result.rs @@ -66,12 +66,14 @@ impl GlueFrom VmExecutionResultAndLogs { result: ExecutionResult::Halt { reason: halt }, logs: Default::default(), statistics: Default::default(), refunds: Default::default(), + new_known_factory_deps: None, }, } } @@ -100,12 +102,14 @@ impl logs: Default::default(), statistics: Default::default(), refunds: Default::default(), + new_known_factory_deps: None, }, TxRevertReason::Halt(halt) => VmExecutionResultAndLogs { result: ExecutionResult::Halt { reason: halt }, logs: Default::default(), statistics: Default::default(), refunds: Default::default(), + new_known_factory_deps: None, }, } } @@ -129,6 +133,7 @@ impl GlueFrom { unreachable!("Halt is the only revert reason for VM 5") diff --git a/core/lib/multivm/src/versions/vm_1_3_2/vm.rs b/core/lib/multivm/src/versions/vm_1_3_2/vm.rs index 5692f103da3d..89196788a762 100644 --- a/core/lib/multivm/src/versions/vm_1_3_2/vm.rs +++ b/core/lib/multivm/src/versions/vm_1_3_2/vm.rs @@ -22,6 +22,25 @@ pub struct Vm { pub(crate) system_env: SystemEnv, } +impl Vm { + pub(crate) fn record_vm_memory_metrics(&self) -> VmMemoryMetrics { + VmMemoryMetrics { + event_sink_inner: self.vm.state.event_sink.get_size(), + event_sink_history: self.vm.state.event_sink.get_history_size(), + memory_inner: self.vm.state.memory.get_size(), + memory_history: self.vm.state.memory.get_history_size(), + decommittment_processor_inner: self.vm.state.decommittment_processor.get_size(), + decommittment_processor_history: self + .vm + .state + .decommittment_processor + .get_history_size(), + storage_inner: self.vm.state.storage.get_size(), + storage_history: self.vm.state.storage.get_history_size(), + } + } +} + impl VmInterface for Vm { type TracerDispatcher = TracerDispatcher; @@ -160,23 +179,6 @@ impl VmInterface for Vm { } } - fn record_vm_memory_metrics(&self) -> VmMemoryMetrics { - VmMemoryMetrics { - event_sink_inner: self.vm.state.event_sink.get_size(), - event_sink_history: self.vm.state.event_sink.get_history_size(), - memory_inner: self.vm.state.memory.get_size(), - memory_history: self.vm.state.memory.get_history_size(), - decommittment_processor_inner: self.vm.state.decommittment_processor.get_size(), - decommittment_processor_history: self - .vm - .state - .decommittment_processor - .get_history_size(), - storage_inner: self.vm.state.storage.get_size(), - storage_history: self.vm.state.storage.get_history_size(), - } - } - fn finish_batch(&mut self) -> FinishedL1Batch { self.vm .execute_till_block_end( diff --git a/core/lib/multivm/src/versions/vm_1_4_1/bootloader_state/utils.rs b/core/lib/multivm/src/versions/vm_1_4_1/bootloader_state/utils.rs index 393eb043cb76..1acf75b27e1b 100644 --- a/core/lib/multivm/src/versions/vm_1_4_1/bootloader_state/utils.rs +++ b/core/lib/multivm/src/versions/vm_1_4_1/bootloader_state/utils.rs @@ -167,8 +167,8 @@ pub(super) fn assemble_tx_meta(execution_mode: TxExecutionMode, execute_tx: bool // Set 0 byte (execution mode) output[0] = match execution_mode { TxExecutionMode::VerifyExecute => 0x00, - TxExecutionMode::EstimateFee { .. } => 0x00, - TxExecutionMode::EthCall { .. } => 0x02, + TxExecutionMode::EstimateFee => 0x00, + TxExecutionMode::EthCall => 0x02, }; // Set 31 byte (marker for tx execution) diff --git a/core/lib/multivm/src/versions/vm_1_4_1/implementation/execution.rs b/core/lib/multivm/src/versions/vm_1_4_1/implementation/execution.rs index 2160c4b56a0c..cc199fef9416 100644 --- a/core/lib/multivm/src/versions/vm_1_4_1/implementation/execution.rs +++ b/core/lib/multivm/src/versions/vm_1_4_1/implementation/execution.rs @@ -99,6 +99,7 @@ impl Vm { logs, statistics, refunds, + new_known_factory_deps: None, }; (stop_reason, result) diff --git a/core/lib/multivm/src/versions/vm_1_4_1/implementation/statistics.rs b/core/lib/multivm/src/versions/vm_1_4_1/implementation/statistics.rs index 71ae20d44061..3a3b22ea2460 100644 --- a/core/lib/multivm/src/versions/vm_1_4_1/implementation/statistics.rs +++ b/core/lib/multivm/src/versions/vm_1_4_1/implementation/statistics.rs @@ -57,7 +57,7 @@ impl Vm { } /// Returns the info about all oracles' sizes. - pub(crate) fn record_vm_memory_metrics_inner(&self) -> VmMemoryMetrics { + pub(crate) fn record_vm_memory_metrics(&self) -> VmMemoryMetrics { VmMemoryMetrics { event_sink_inner: self.state.event_sink.get_size(), event_sink_history: self.state.event_sink.get_history_size(), diff --git a/core/lib/multivm/src/versions/vm_1_4_1/tests/block_tip.rs b/core/lib/multivm/src/versions/vm_1_4_1/tests/block_tip.rs deleted file mode 100644 index ba699e7558b4..000000000000 --- a/core/lib/multivm/src/versions/vm_1_4_1/tests/block_tip.rs +++ /dev/null @@ -1,284 +0,0 @@ -use std::borrow::BorrowMut; - -use ethabi::Token; -use zk_evm_1_4_1::{ - aux_structures::Timestamp, zkevm_opcode_defs::system_params::MAX_PUBDATA_PER_BLOCK, -}; -use zksync_contracts::load_sys_contract; -use zksync_system_constants::{ - CONTRACT_FORCE_DEPLOYER_ADDRESS, KNOWN_CODES_STORAGE_ADDRESS, L1_MESSENGER_ADDRESS, -}; -use zksync_types::{ - commitment::SerializeCommitment, get_code_key, l2_to_l1_log::L2ToL1Log, - writes::StateDiffRecord, Address, Execute, H256, U256, -}; -use zksync_utils::{bytecode::hash_bytecode, bytes_to_be_words, h256_to_u256, u256_to_h256}; - -use super::utils::{get_complex_upgrade_abi, read_complex_upgrade}; -use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface}, - vm_1_4_1::{ - constants::BOOTLOADER_BATCH_TIP_OVERHEAD, - tests::tester::{get_empty_storage, InMemoryStorageView, VmTesterBuilder}, - tracers::PubdataTracer, - HistoryEnabled, TracerDispatcher, - }, -}; - -#[derive(Debug, Clone, Default)] -struct L1MessengerTestData { - l2_to_l1_logs: usize, - messages: Vec>, - bytecodes: Vec>, - state_diffs: Vec, -} - -struct MimicCallInfo { - to: Address, - who_to_mimic: Address, - data: Vec, -} - -fn populate_mimic_calls(data: L1MessengerTestData) -> Vec { - let complex_upgrade = get_complex_upgrade_abi(); - let l1_messenger = load_sys_contract("L1Messenger"); - - let logs_mimic_calls = (0..data.l2_to_l1_logs).map(|_| MimicCallInfo { - to: L1_MESSENGER_ADDRESS, - who_to_mimic: KNOWN_CODES_STORAGE_ADDRESS, - data: l1_messenger - .function("sendL2ToL1Log") - .unwrap() - .encode_input(&[ - Token::Bool(false), - Token::FixedBytes(H256::random().0.to_vec()), - Token::FixedBytes(H256::random().0.to_vec()), - ]) - .unwrap(), - }); - let messages_mimic_calls = data.messages.iter().map(|message| MimicCallInfo { - to: L1_MESSENGER_ADDRESS, - who_to_mimic: KNOWN_CODES_STORAGE_ADDRESS, - data: l1_messenger - .function("sendToL1") - .unwrap() - .encode_input(&[Token::Bytes(message.clone())]) - .unwrap(), - }); - let bytecodes_mimic_calls = data.bytecodes.iter().map(|bytecode| MimicCallInfo { - to: L1_MESSENGER_ADDRESS, - who_to_mimic: KNOWN_CODES_STORAGE_ADDRESS, - data: l1_messenger - .function("requestBytecodeL1Publication") - .unwrap() - .encode_input(&[Token::FixedBytes(hash_bytecode(bytecode).0.to_vec())]) - .unwrap(), - }); - - let encoded_calls = logs_mimic_calls - .chain(messages_mimic_calls) - .chain(bytecodes_mimic_calls) - .map(|call| { - Token::Tuple(vec![ - Token::Address(call.to), - Token::Address(call.who_to_mimic), - Token::Bytes(call.data), - ]) - }) - .collect::>(); - - complex_upgrade - .function("mimicCalls") - .unwrap() - .encode_input(&[Token::Array(encoded_calls)]) - .unwrap() -} - -fn execute_test(test_data: L1MessengerTestData) -> u32 { - let mut storage = get_empty_storage(); - let complex_upgrade_code = read_complex_upgrade(); - - // For this test we'll just put the bytecode onto the force deployer address - storage.borrow_mut().set_value( - get_code_key(&CONTRACT_FORCE_DEPLOYER_ADDRESS), - hash_bytecode(&complex_upgrade_code), - ); - storage - .borrow_mut() - .store_factory_dep(hash_bytecode(&complex_upgrade_code), complex_upgrade_code); - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_storage(storage) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let bytecodes = test_data - .bytecodes - .iter() - .map(|bytecode| { - let hash = hash_bytecode(bytecode); - let words = bytes_to_be_words(bytecode.clone()); - (h256_to_u256(hash), words) - }) - .collect(); - vm.vm - .state - .decommittment_processor - .populate(bytecodes, Timestamp(0)); - - let data = populate_mimic_calls(test_data.clone()); - let account = &mut vm.rich_accounts[0]; - let tx = account.get_l2_tx_for_execute( - Execute { - contract_address: CONTRACT_FORCE_DEPLOYER_ADDRESS, - calldata: data, - value: U256::zero(), - factory_deps: None, - }, - None, - ); - - vm.vm.push_transaction(tx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!result.result.is_failed(), "Transaction wasn't successful"); - - // Now we count how much ergs were spent at the end of the batch - // It is assumed that the top level frame is the bootloader - - let ergs_before = vm.vm.state.local_state.callstack.current.ergs_remaining; - - // We ensure that indeed the provided state diffs are used - let pubdata_tracer = PubdataTracer::::new_with_forced_state_diffs( - vm.vm.batch_env.clone(), - VmExecutionMode::Batch, - test_data.state_diffs, - ); - - let result = vm.vm.inspect_inner( - TracerDispatcher::default(), - VmExecutionMode::Batch, - Some(pubdata_tracer), - ); - - assert!(!result.result.is_failed(), "Batch wasn't successful"); - - let ergs_after = vm.vm.state.local_state.callstack.current.ergs_remaining; - - ergs_before - ergs_after -} - -fn generate_state_diffs( - repeated_writes: bool, - small_diff: bool, - number_of_state_diffs: usize, -) -> Vec { - (0..number_of_state_diffs) - .map(|i| { - let address = Address::from_low_u64_be(i as u64); - let key = U256::from(i); - let enumeration_index = if repeated_writes { i + 1 } else { 0 }; - - let (initial_value, final_value) = if small_diff { - // As small as it gets, one byte to denote zeroing out the value - (U256::from(1), U256::from(0)) - } else { - // As large as it gets - (U256::from(0), U256::from(2).pow(255.into())) - }; - - StateDiffRecord { - address, - key, - derived_key: u256_to_h256(i.into()).0, - enumeration_index: enumeration_index as u64, - initial_value, - final_value, - } - }) - .collect() -} - -#[test] -fn test_dry_run_upper_bound() { - // We are re-using the `ComplexUpgrade` contract as it already has the `mimicCall` functionality. - // To get the upper bound, we'll try to do the following: - // 1. Max number of logs. - // 2. Lots of small L2->L1 messages / one large L2->L1 message. - // 3. Lots of small bytecodes / one large bytecode. - // 4. Lots of storage slot updates. - - let max_logs = execute_test(L1MessengerTestData { - l2_to_l1_logs: L2ToL1Log::MIN_L2_L1_LOGS_TREE_SIZE, - ..Default::default() - }); - - let max_messages = execute_test(L1MessengerTestData { - // Each L2->L1 message is accompanied by a Log, so the max number of pubdata is bound by it - messages: vec![vec![0; 0]; MAX_PUBDATA_PER_BLOCK as usize / L2ToL1Log::SERIALIZED_SIZE], - ..Default::default() - }); - - let long_message = execute_test(L1MessengerTestData { - // Each L2->L1 message is accompanied by a Log, so the max number of pubdata is bound by it - messages: vec![vec![0; MAX_PUBDATA_PER_BLOCK as usize]; 1], - ..Default::default() - }); - - let max_bytecodes = execute_test(L1MessengerTestData { - // Each bytecode must be at least 32 bytes long - bytecodes: vec![vec![0; 32]; MAX_PUBDATA_PER_BLOCK as usize / 32], - ..Default::default() - }); - - let long_bytecode = execute_test(L1MessengerTestData { - // We have to add 48 since a valid bytecode must have an odd number of 32 byte words - bytecodes: vec![vec![0; MAX_PUBDATA_PER_BLOCK as usize + 48]; 1], - ..Default::default() - }); - - let lots_of_small_repeated_writes = execute_test(L1MessengerTestData { - // In theory each state diff can require only 5 bytes to be published (enum index + 4 bytes for the key) - state_diffs: generate_state_diffs(true, true, MAX_PUBDATA_PER_BLOCK as usize / 5), - ..Default::default() - }); - - let lots_of_big_repeated_writes = execute_test(L1MessengerTestData { - // Each big write will approximately require 32 bytes to encode - state_diffs: generate_state_diffs(true, false, MAX_PUBDATA_PER_BLOCK as usize / 32), - ..Default::default() - }); - - let lots_of_small_initial_writes = execute_test(L1MessengerTestData { - // Each initial write will take at least 32 bytes for derived key + 5 bytes for value - state_diffs: generate_state_diffs(false, true, MAX_PUBDATA_PER_BLOCK as usize / 37), - ..Default::default() - }); - - let lots_of_large_initial_writes = execute_test(L1MessengerTestData { - // Each big write will take at least 32 bytes for derived key + 32 bytes for value - state_diffs: generate_state_diffs(false, false, MAX_PUBDATA_PER_BLOCK as usize / 64), - ..Default::default() - }); - - let max_used_gas = vec![ - max_logs, - max_messages, - long_message, - max_bytecodes, - long_bytecode, - lots_of_small_repeated_writes, - lots_of_big_repeated_writes, - lots_of_small_initial_writes, - lots_of_large_initial_writes, - ] - .into_iter() - .max() - .unwrap(); - - // We use 2x overhead for the batch tip compared to the worst estimated scenario. - assert!( - max_used_gas * 2 <= BOOTLOADER_BATCH_TIP_OVERHEAD, - "BOOTLOADER_BATCH_TIP_OVERHEAD is too low" - ); -} diff --git a/core/lib/multivm/src/versions/vm_1_4_1/tests/bootloader.rs b/core/lib/multivm/src/versions/vm_1_4_1/tests/bootloader.rs deleted file mode 100644 index 47e047ebbf72..000000000000 --- a/core/lib/multivm/src/versions/vm_1_4_1/tests/bootloader.rs +++ /dev/null @@ -1,56 +0,0 @@ -use zksync_types::U256; - -use crate::{ - interface::{ExecutionResult, Halt, TxExecutionMode, VmExecutionMode, VmInterface}, - vm_1_4_1::{ - constants::BOOTLOADER_HEAP_PAGE, - tests::{ - tester::VmTesterBuilder, - utils::{get_bootloader, verify_required_memory, BASE_SYSTEM_CONTRACTS}, - }, - HistoryEnabled, - }, -}; - -#[test] -fn test_dummy_bootloader() { - let mut base_system_contracts = BASE_SYSTEM_CONTRACTS.clone(); - base_system_contracts.bootloader = get_bootloader("dummy"); - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_base_system_smart_contracts(base_system_contracts) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .build(); - - let result = vm.vm.execute(VmExecutionMode::Batch); - assert!(!result.result.is_failed()); - - let correct_first_cell = U256::from_str_radix("123123123", 16).unwrap(); - verify_required_memory( - &vm.vm.state, - vec![(correct_first_cell, BOOTLOADER_HEAP_PAGE, 0)], - ); -} - -#[test] -fn test_bootloader_out_of_gas() { - let mut base_system_contracts = BASE_SYSTEM_CONTRACTS.clone(); - base_system_contracts.bootloader = get_bootloader("dummy"); - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_base_system_smart_contracts(base_system_contracts) - .with_gas_limit(10) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .build(); - - let res = vm.vm.execute(VmExecutionMode::Batch); - - assert_matches!( - res.result, - ExecutionResult::Halt { - reason: Halt::BootloaderOutOfGas - } - ); -} diff --git a/core/lib/multivm/src/versions/vm_1_4_1/tests/bytecode_publishing.rs b/core/lib/multivm/src/versions/vm_1_4_1/tests/bytecode_publishing.rs deleted file mode 100644 index 9db5e7326e71..000000000000 --- a/core/lib/multivm/src/versions/vm_1_4_1/tests/bytecode_publishing.rs +++ /dev/null @@ -1,43 +0,0 @@ -use zksync_types::event::extract_long_l2_to_l1_messages; -use zksync_utils::bytecode::compress_bytecode; - -use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface}, - vm_1_4_1::{ - tests::{ - tester::{DeployContractsTx, TxType, VmTesterBuilder}, - utils::read_test_contract, - }, - HistoryEnabled, - }, -}; - -#[test] -fn test_bytecode_publishing() { - // In this test, we aim to ensure that the contents of the compressed bytecodes - // are included as part of the L2->L1 long messages - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let counter = read_test_contract(); - let account = &mut vm.rich_accounts[0]; - - let compressed_bytecode = compress_bytecode(&counter).unwrap(); - - let DeployContractsTx { tx, .. } = account.get_deploy_tx(&counter, None, TxType::L2); - vm.vm.push_transaction(tx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!result.result.is_failed(), "Transaction wasn't successful"); - - vm.vm.execute(VmExecutionMode::Batch); - - let state = vm.vm.get_current_execution_state(); - let long_messages = extract_long_l2_to_l1_messages(&state.events); - assert!( - long_messages.contains(&compressed_bytecode), - "Bytecode not published" - ); -} diff --git a/core/lib/multivm/src/versions/vm_1_4_1/tests/call_tracer.rs b/core/lib/multivm/src/versions/vm_1_4_1/tests/call_tracer.rs deleted file mode 100644 index 1a4c026a23f4..000000000000 --- a/core/lib/multivm/src/versions/vm_1_4_1/tests/call_tracer.rs +++ /dev/null @@ -1,92 +0,0 @@ -use std::sync::Arc; - -use once_cell::sync::OnceCell; -use zksync_types::{Address, Execute}; - -use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface}, - tracers::CallTracer, - vm_1_4_1::{ - constants::BLOCK_GAS_LIMIT, - tests::{ - tester::VmTesterBuilder, - utils::{read_max_depth_contract, read_test_contract}, - }, - HistoryEnabled, ToTracerPointer, - }, -}; - -// This test is ultra slow, so it's ignored by default. -#[test] -#[ignore] -fn test_max_depth() { - let contarct = read_max_depth_contract(); - let address = Address::random(); - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_random_rich_accounts(1) - .with_deployer() - .with_gas_limit(BLOCK_GAS_LIMIT) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_custom_contracts(vec![(contarct, address, true)]) - .build(); - - let account = &mut vm.rich_accounts[0]; - let tx = account.get_l2_tx_for_execute( - Execute { - contract_address: address, - calldata: vec![], - value: Default::default(), - factory_deps: None, - }, - None, - ); - - let result = Arc::new(OnceCell::new()); - let call_tracer = CallTracer::new(result.clone()).into_tracer_pointer(); - vm.vm.push_transaction(tx); - let res = vm.vm.inspect(call_tracer.into(), VmExecutionMode::OneTx); - assert!(result.get().is_some()); - assert!(res.result.is_failed()); -} - -#[test] -fn test_basic_behavior() { - let contarct = read_test_contract(); - let address = Address::random(); - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_random_rich_accounts(1) - .with_deployer() - .with_gas_limit(BLOCK_GAS_LIMIT) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_custom_contracts(vec![(contarct, address, true)]) - .build(); - - let increment_by_6_calldata = - "7cf5dab00000000000000000000000000000000000000000000000000000000000000006"; - - let account = &mut vm.rich_accounts[0]; - let tx = account.get_l2_tx_for_execute( - Execute { - contract_address: address, - calldata: hex::decode(increment_by_6_calldata).unwrap(), - value: Default::default(), - factory_deps: None, - }, - None, - ); - - let result = Arc::new(OnceCell::new()); - let call_tracer = CallTracer::new(result.clone()).into_tracer_pointer(); - vm.vm.push_transaction(tx); - let res = vm.vm.inspect(call_tracer.into(), VmExecutionMode::OneTx); - - let call_tracer_result = result.get().unwrap(); - - assert_eq!(call_tracer_result.len(), 1); - // Expect that there are a plenty of subcalls underneath. - let subcall = &call_tracer_result[0].calls; - assert!(subcall.len() > 10); - assert!(!res.result.is_failed()); -} diff --git a/core/lib/multivm/src/versions/vm_1_4_1/tests/circuits.rs b/core/lib/multivm/src/versions/vm_1_4_1/tests/circuits.rs deleted file mode 100644 index ecc2fdfe6c0a..000000000000 --- a/core/lib/multivm/src/versions/vm_1_4_1/tests/circuits.rs +++ /dev/null @@ -1,69 +0,0 @@ -use zksync_types::{Address, Execute, U256}; - -use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface}, - vm_1_4_1::{constants::BLOCK_GAS_LIMIT, tests::tester::VmTesterBuilder, HistoryEnabled}, -}; - -// Checks that estimated number of circuits for simple transfer doesn't differ much -// from hardcoded expected value. -#[test] -fn test_circuits() { - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_random_rich_accounts(1) - .with_deployer() - .with_gas_limit(BLOCK_GAS_LIMIT) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .build(); - - let account = &mut vm.rich_accounts[0]; - let tx = account.get_l2_tx_for_execute( - Execute { - contract_address: Address::random(), - calldata: Vec::new(), - value: U256::from(1u8), - factory_deps: None, - }, - None, - ); - vm.vm.push_transaction(tx); - let res = vm.vm.inspect(Default::default(), VmExecutionMode::OneTx); - - let s = res.statistics.circuit_statistic; - // Check `circuit_statistic`. - const EXPECTED: [f32; 11] = [ - 1.1979, 0.1390, 1.5455, 0.0031, 1.0573, 0.00059, 0.00226, 0.00077, 0.1195, 0.1429, 0.0, - ]; - let actual = [ - (s.main_vm, "main_vm"), - (s.ram_permutation, "ram_permutation"), - (s.storage_application, "storage_application"), - (s.storage_sorter, "storage_sorter"), - (s.code_decommitter, "code_decommitter"), - (s.code_decommitter_sorter, "code_decommitter_sorter"), - (s.log_demuxer, "log_demuxer"), - (s.events_sorter, "events_sorter"), - (s.keccak256, "keccak256"), - (s.ecrecover, "ecrecover"), - (s.sha256, "sha256"), - ]; - for ((actual, name), expected) in actual.iter().zip(EXPECTED) { - if expected == 0.0 { - assert_eq!( - *actual, expected, - "Check failed for {}, expected {}, actual {}", - name, expected, actual - ); - } else { - let diff = (actual - expected) / expected; - assert!( - diff.abs() < 0.1, - "Check failed for {}, expected {}, actual {}", - name, - expected, - actual - ); - } - } -} diff --git a/core/lib/multivm/src/versions/vm_1_4_1/tests/default_aa.rs b/core/lib/multivm/src/versions/vm_1_4_1/tests/default_aa.rs deleted file mode 100644 index be8e253c6d8a..000000000000 --- a/core/lib/multivm/src/versions/vm_1_4_1/tests/default_aa.rs +++ /dev/null @@ -1,78 +0,0 @@ -use zksync_system_constants::L2_ETH_TOKEN_ADDRESS; -use zksync_types::{ - get_code_key, get_known_code_key, get_nonce_key, - system_contracts::{DEPLOYMENT_NONCE_INCREMENT, TX_NONCE_INCREMENT}, - AccountTreeId, U256, -}; -use zksync_utils::u256_to_h256; - -use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface}, - vm_1_4_1::{ - tests::{ - tester::{DeployContractsTx, TxType, VmTesterBuilder}, - utils::{get_balance, read_test_contract, verify_required_storage}, - }, - utils::fee::get_batch_base_fee, - HistoryEnabled, - }, -}; - -#[test] -fn test_default_aa_interaction() { - // In this test, we aim to test whether a simple account interaction (without any fee logic) - // will work. The account will try to deploy a simple contract from integration tests. - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let counter = read_test_contract(); - let account = &mut vm.rich_accounts[0]; - let DeployContractsTx { - tx, - bytecode_hash, - address, - } = account.get_deploy_tx(&counter, None, TxType::L2); - let maximal_fee = tx.gas_limit() * get_batch_base_fee(&vm.vm.batch_env); - - vm.vm.push_transaction(tx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!result.result.is_failed(), "Transaction wasn't successful"); - - vm.vm.execute(VmExecutionMode::Batch); - vm.vm.get_current_execution_state(); - - // Both deployment and ordinary nonce should be incremented by one. - let account_nonce_key = get_nonce_key(&account.address); - let expected_nonce = TX_NONCE_INCREMENT + DEPLOYMENT_NONCE_INCREMENT; - - // The code hash of the deployed contract should be marked as republished. - let known_codes_key = get_known_code_key(&bytecode_hash); - - // The contract should be deployed successfully. - let account_code_key = get_code_key(&address); - - let expected_slots = vec![ - (u256_to_h256(expected_nonce), account_nonce_key), - (u256_to_h256(U256::from(1u32)), known_codes_key), - (bytecode_hash, account_code_key), - ]; - - verify_required_storage(&vm.vm.state, expected_slots); - - let expected_fee = maximal_fee - - U256::from(result.refunds.gas_refunded) - * U256::from(get_batch_base_fee(&vm.vm.batch_env)); - let operator_balance = get_balance( - AccountTreeId::new(L2_ETH_TOKEN_ADDRESS), - &vm.fee_account, - vm.vm.state.storage.storage.get_ptr(), - ); - - assert_eq!( - operator_balance, expected_fee, - "Operator did not receive his fee" - ); -} diff --git a/core/lib/multivm/src/versions/vm_1_4_1/tests/gas_limit.rs b/core/lib/multivm/src/versions/vm_1_4_1/tests/gas_limit.rs deleted file mode 100644 index 9dfda9e1a68c..000000000000 --- a/core/lib/multivm/src/versions/vm_1_4_1/tests/gas_limit.rs +++ /dev/null @@ -1,45 +0,0 @@ -use zksync_types::{fee::Fee, Execute}; - -use crate::{ - interface::{TxExecutionMode, VmInterface}, - vm_1_4_1::{ - constants::{BOOTLOADER_HEAP_PAGE, TX_DESCRIPTION_OFFSET, TX_GAS_LIMIT_OFFSET}, - tests::tester::VmTesterBuilder, - HistoryDisabled, - }, -}; - -/// Checks that `TX_GAS_LIMIT_OFFSET` constant is correct. -#[test] -fn test_tx_gas_limit_offset() { - let mut vm = VmTesterBuilder::new(HistoryDisabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let gas_limit = 9999.into(); - let tx = vm.rich_accounts[0].get_l2_tx_for_execute( - Execute { - contract_address: Some(Default::default()), - ..Default::default() - }, - Some(Fee { - gas_limit, - ..Default::default() - }), - ); - - vm.vm.push_transaction(tx); - - let gas_limit_from_memory = vm - .vm - .state - .memory - .read_slot( - BOOTLOADER_HEAP_PAGE as usize, - TX_DESCRIPTION_OFFSET + TX_GAS_LIMIT_OFFSET, - ) - .value; - assert_eq!(gas_limit_from_memory, gas_limit); -} diff --git a/core/lib/multivm/src/versions/vm_1_4_1/tests/get_used_contracts.rs b/core/lib/multivm/src/versions/vm_1_4_1/tests/get_used_contracts.rs deleted file mode 100644 index a7cbcd8e2953..000000000000 --- a/core/lib/multivm/src/versions/vm_1_4_1/tests/get_used_contracts.rs +++ /dev/null @@ -1,109 +0,0 @@ -use std::collections::{HashMap, HashSet}; - -use itertools::Itertools; -use crate::interface::storage::WriteStorage; -use zksync_system_constants::CONTRACT_DEPLOYER_ADDRESS; -use zksync_test_account::Account; -use zksync_types::{Execute, U256}; -use zksync_utils::{bytecode::hash_bytecode, h256_to_u256}; - -use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface}, - vm_1_4_1::{ - tests::{ - tester::{TxType, VmTesterBuilder}, - utils::{read_test_contract, BASE_SYSTEM_CONTRACTS}, - }, - HistoryDisabled, Vm, - }, - HistoryMode, -}; - -#[test] -fn test_get_used_contracts() { - let mut vm = VmTesterBuilder::new(HistoryDisabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .build(); - - assert!(known_bytecodes_without_aa_code(&vm.vm).is_empty()); - - // create and push and execute some not-empty factory deps transaction with success status - // to check that `get_used_contracts()` updates - let contract_code = read_test_contract(); - let mut account = Account::random(); - let tx = account.get_deploy_tx(&contract_code, None, TxType::L1 { serial_id: 0 }); - vm.vm.push_transaction(tx.tx.clone()); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!result.result.is_failed()); - - assert!(vm - .vm - .get_used_contracts() - .contains(&h256_to_u256(tx.bytecode_hash))); - - // Note: `Default_AA` will be in the list of used contracts if L2 tx is used - assert_eq!( - vm.vm - .get_used_contracts() - .into_iter() - .collect::>(), - known_bytecodes_without_aa_code(&vm.vm) - .keys() - .cloned() - .collect::>() - ); - - // create push and execute some non-empty factory deps transaction that fails - // (`known_bytecodes` will be updated but we expect `get_used_contracts()` to not be updated) - - let calldata = [1, 2, 3]; - let big_calldata: Vec = calldata - .iter() - .cycle() - .take(calldata.len() * 1024) - .cloned() - .collect(); - let account2 = Account::random(); - let tx2 = account2.get_l1_tx( - Execute { - contract_address: CONTRACT_DEPLOYER_ADDRESS, - calldata: big_calldata, - value: Default::default(), - factory_deps: Some(vec![vec![1; 32]]), - }, - 1, - ); - - vm.vm.push_transaction(tx2.clone()); - - let res2 = vm.vm.execute(VmExecutionMode::OneTx); - - assert!(res2.result.is_failed()); - - for factory_dep in tx2.execute.factory_deps.unwrap() { - let hash = hash_bytecode(&factory_dep); - let hash_to_u256 = h256_to_u256(hash); - assert!(known_bytecodes_without_aa_code(&vm.vm) - .keys() - .contains(&hash_to_u256)); - assert!(!vm.vm.get_used_contracts().contains(&hash_to_u256)); - } -} - -fn known_bytecodes_without_aa_code( - vm: &Vm, -) -> HashMap> { - let mut known_bytecodes_without_aa_code = vm - .state - .decommittment_processor - .known_bytecodes - .inner() - .clone(); - - known_bytecodes_without_aa_code - .remove(&h256_to_u256(BASE_SYSTEM_CONTRACTS.default_aa.hash)) - .unwrap(); - - known_bytecodes_without_aa_code -} diff --git a/core/lib/multivm/src/versions/vm_1_4_1/tests/invalid_bytecode.rs b/core/lib/multivm/src/versions/vm_1_4_1/tests/invalid_bytecode.rs deleted file mode 100644 index 75517138db30..000000000000 --- a/core/lib/multivm/src/versions/vm_1_4_1/tests/invalid_bytecode.rs +++ /dev/null @@ -1,120 +0,0 @@ -use zksync_types::H256; -use zksync_utils::h256_to_u256; - -use crate::vm_1_4_1::tests::tester::VmTesterBuilder; -use crate::vm_1_4_1::types::inputs::system_env::TxExecutionMode; -use crate::vm_1_4_1::{HistoryEnabled, TxRevertReason}; - -// TODO this test requires a lot of hacks for bypassing the bytecode checks in the VM. -// Port it later, it's not significant. for now - -#[test] -fn test_invalid_bytecode() { - let mut vm_builder = VmTesterBuilder::new(HistoryEnabled) - .with_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1); - let mut storage = vm_builder.take_storage(); - let mut vm = vm_builder.build(&mut storage); - - let block_gas_per_pubdata = vm_test_env - .block_context - .context - .block_gas_price_per_pubdata(); - - let mut test_vm_with_custom_bytecode_hash = - |bytecode_hash: H256, expected_revert_reason: Option| { - let mut oracle_tools = - OracleTools::new(vm_test_env.storage_ptr.as_mut(), HistoryEnabled); - - let (encoded_tx, predefined_overhead) = get_l1_tx_with_custom_bytecode_hash( - h256_to_u256(bytecode_hash), - block_gas_per_pubdata as u32, - ); - - run_vm_with_custom_factory_deps( - &mut oracle_tools, - vm_test_env.block_context.context, - &vm_test_env.block_properties, - encoded_tx, - predefined_overhead, - expected_revert_reason, - ); - }; - - let failed_to_mark_factory_deps = |msg: &str, data: Vec| { - TxRevertReason::FailedToMarkFactoryDependencies(VmRevertReason::General { - msg: msg.to_string(), - data, - }) - }; - - // Here we provide the correctly-formatted bytecode hash of - // odd length, so it should work. - test_vm_with_custom_bytecode_hash( - H256([ - 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, - ]), - None, - ); - - // Here we provide correctly formatted bytecode of even length, so - // it should fail. - test_vm_with_custom_bytecode_hash( - H256([ - 1, 0, 2, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, - ]), - Some(failed_to_mark_factory_deps( - "Code length in words must be odd", - vec![ - 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 32, 67, 111, 100, 101, 32, 108, 101, 110, - 103, 116, 104, 32, 105, 110, 32, 119, 111, 114, 100, 115, 32, 109, 117, 115, 116, - 32, 98, 101, 32, 111, 100, 100, - ], - )), - ); - - // Here we provide incorrectly formatted bytecode of odd length, so - // it should fail. - test_vm_with_custom_bytecode_hash( - H256([ - 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, - ]), - Some(failed_to_mark_factory_deps( - "Incorrectly formatted bytecodeHash", - vec![ - 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 34, 73, 110, 99, 111, 114, 114, 101, 99, - 116, 108, 121, 32, 102, 111, 114, 109, 97, 116, 116, 101, 100, 32, 98, 121, 116, - 101, 99, 111, 100, 101, 72, 97, 115, 104, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - ], - )), - ); - - // Here we provide incorrectly formatted bytecode of odd length, so - // it should fail. - test_vm_with_custom_bytecode_hash( - H256([ - 2, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, - ]), - Some(failed_to_mark_factory_deps( - "Incorrectly formatted bytecodeHash", - vec![ - 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 34, 73, 110, 99, 111, 114, 114, 101, 99, - 116, 108, 121, 32, 102, 111, 114, 109, 97, 116, 116, 101, 100, 32, 98, 121, 116, - 101, 99, 111, 100, 101, 72, 97, 115, 104, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - ], - )), - ); -} diff --git a/core/lib/multivm/src/versions/vm_1_4_1/tests/is_write_initial.rs b/core/lib/multivm/src/versions/vm_1_4_1/tests/is_write_initial.rs deleted file mode 100644 index 7644064f4af6..000000000000 --- a/core/lib/multivm/src/versions/vm_1_4_1/tests/is_write_initial.rs +++ /dev/null @@ -1,48 +0,0 @@ -use crate::interface::storage::ReadStorage; -use zksync_types::get_nonce_key; - -use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface}, - vm_1_4_1::{ - tests::{ - tester::{Account, TxType, VmTesterBuilder}, - utils::read_test_contract, - }, - HistoryDisabled, - }, -}; - -#[test] -fn test_is_write_initial_behaviour() { - // In this test, we check result of `is_write_initial` at different stages. - // The main idea is to check that `is_write_initial` storage uses the correct cache for initial_writes and doesn't - // messed up it with the repeated writes during the one batch execution. - - let mut account = Account::random(); - let mut vm = VmTesterBuilder::new(HistoryDisabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_rich_accounts(vec![account.clone()]) - .build(); - - let nonce_key = get_nonce_key(&account.address); - // Check that the next write to the nonce key will be initial. - assert!(vm - .storage - .as_ref() - .borrow_mut() - .is_write_initial(&nonce_key)); - - let contract_code = read_test_contract(); - let tx = account.get_deploy_tx(&contract_code, None, TxType::L2).tx; - - vm.vm.push_transaction(tx); - vm.vm.execute(VmExecutionMode::OneTx); - - // Check that `is_write_initial` still returns true for the nonce key. - assert!(vm - .storage - .as_ref() - .borrow_mut() - .is_write_initial(&nonce_key)); -} diff --git a/core/lib/multivm/src/versions/vm_1_4_1/tests/l1_tx_execution.rs b/core/lib/multivm/src/versions/vm_1_4_1/tests/l1_tx_execution.rs deleted file mode 100644 index e98fc23c6eb4..000000000000 --- a/core/lib/multivm/src/versions/vm_1_4_1/tests/l1_tx_execution.rs +++ /dev/null @@ -1,189 +0,0 @@ -use ethabi::Token; -use zksync_contracts::l1_messenger_contract; -use zksync_system_constants::{BOOTLOADER_ADDRESS, L1_MESSENGER_ADDRESS}; -use zksync_types::{ - get_code_key, get_known_code_key, - l2_to_l1_log::{L2ToL1Log, UserL2ToL1Log}, - storage_writes_deduplicator::StorageWritesDeduplicator, - Execute, ExecuteTransactionCommon, U256, -}; -use zksync_utils::u256_to_h256; - -use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface}, - vm_1_4_1::{ - tests::{ - tester::{TxType, VmTesterBuilder}, - utils::{read_test_contract, verify_required_storage, BASE_SYSTEM_CONTRACTS}, - }, - types::internals::TransactionData, - HistoryEnabled, - }, -}; - -#[test] -fn test_l1_tx_execution() { - // In this test, we try to execute a contract deployment from L1 - // Here instead of marking code hash via the bootloader means, we will be - // using L1->L2 communication, the same it would likely be done during the priority mode. - - // There are always at least 7 initial writes here, because we pay fees from l1: - // - `totalSupply` of ETH token - // - balance of the refund recipient - // - balance of the bootloader - // - `tx_rolling` hash - // - rolling hash of L2->L1 logs - // - transaction number in block counter - // - L2->L1 log counter in `L1Messenger` - - // TODO(PLA-537): right now we are using 4 slots instead of 7 due to 0 fee for transaction. - let basic_initial_writes = 4; - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_base_system_smart_contracts(BASE_SYSTEM_CONTRACTS.clone()) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let contract_code = read_test_contract(); - let account = &mut vm.rich_accounts[0]; - let deploy_tx = account.get_deploy_tx(&contract_code, None, TxType::L1 { serial_id: 1 }); - let tx_data: TransactionData = deploy_tx.tx.clone().into(); - - let required_l2_to_l1_logs: Vec<_> = vec![L2ToL1Log { - shard_id: 0, - is_service: true, - tx_number_in_block: 0, - sender: BOOTLOADER_ADDRESS, - key: tx_data.tx_hash(0.into()), - value: u256_to_h256(U256::from(1u32)), - }] - .into_iter() - .map(UserL2ToL1Log) - .collect(); - - vm.vm.push_transaction(deploy_tx.tx.clone()); - - let res = vm.vm.execute(VmExecutionMode::OneTx); - - // The code hash of the deployed contract should be marked as republished. - let known_codes_key = get_known_code_key(&deploy_tx.bytecode_hash); - - // The contract should be deployed successfully. - let account_code_key = get_code_key(&deploy_tx.address); - - let expected_slots = vec![ - (u256_to_h256(U256::from(1u32)), known_codes_key), - (deploy_tx.bytecode_hash, account_code_key), - ]; - assert!(!res.result.is_failed()); - - verify_required_storage(&vm.vm.state, expected_slots); - - assert_eq!(res.logs.user_l2_to_l1_logs, required_l2_to_l1_logs); - - let tx = account.get_test_contract_transaction( - deploy_tx.address, - true, - None, - false, - TxType::L1 { serial_id: 0 }, - ); - vm.vm.push_transaction(tx); - let res = vm.vm.execute(VmExecutionMode::OneTx); - let storage_logs = res.logs.storage_logs; - let res = StorageWritesDeduplicator::apply_on_empty_state(&storage_logs); - - // Tx panicked - assert_eq!(res.initial_storage_writes - basic_initial_writes, 0); - - let tx = account.get_test_contract_transaction( - deploy_tx.address, - false, - None, - false, - TxType::L1 { serial_id: 0 }, - ); - vm.vm.push_transaction(tx.clone()); - let res = vm.vm.execute(VmExecutionMode::OneTx); - let storage_logs = res.logs.storage_logs; - let res = StorageWritesDeduplicator::apply_on_empty_state(&storage_logs); - // We changed one slot inside contract - assert_eq!(res.initial_storage_writes - basic_initial_writes, 1); - - // No repeated writes - let repeated_writes = res.repeated_storage_writes; - assert_eq!(res.repeated_storage_writes, 0); - - vm.vm.push_transaction(tx); - let storage_logs = vm.vm.execute(VmExecutionMode::OneTx).logs.storage_logs; - let res = StorageWritesDeduplicator::apply_on_empty_state(&storage_logs); - // We do the same storage write, it will be deduplicated, so still 4 initial write and 0 repeated - assert_eq!(res.initial_storage_writes - basic_initial_writes, 1); - assert_eq!(res.repeated_storage_writes, repeated_writes); - - let tx = account.get_test_contract_transaction( - deploy_tx.address, - false, - Some(10.into()), - false, - TxType::L1 { serial_id: 1 }, - ); - vm.vm.push_transaction(tx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - // Method is not payable tx should fail - assert!(result.result.is_failed(), "The transaction should fail"); - - let res = StorageWritesDeduplicator::apply_on_empty_state(&result.logs.storage_logs); - // There are only basic initial writes - assert_eq!(res.initial_storage_writes - basic_initial_writes, 2); -} - -#[test] -fn test_l1_tx_execution_high_gas_limit() { - // In this test, we try to execute an L1->L2 transaction with a high gas limit. - // Usually priority transactions with dangerously gas limit should even pass the checks on the L1, - // however, they might pass during the transition period to the new fee model, so we check that we can safely process those. - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_base_system_smart_contracts(BASE_SYSTEM_CONTRACTS.clone()) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let account = &mut vm.rich_accounts[0]; - - let l1_messenger = l1_messenger_contract(); - - let contract_function = l1_messenger.function("sendToL1").unwrap(); - let params = [ - // Even a message of size 100k should not be able to be sent by a priority transaction - Token::Bytes(vec![0u8; 100_000]), - ]; - let calldata = contract_function.encode_input(¶ms).unwrap(); - - let mut tx = account.get_l1_tx( - Execute { - contract_address: L1_MESSENGER_ADDRESS, - value: 0.into(), - factory_deps: None, - calldata, - }, - 0, - ); - - if let ExecuteTransactionCommon::L1(data) = &mut tx.common_data { - // Using some large gas limit - data.gas_limit = 300_000_000.into(); - } else { - unreachable!() - }; - - vm.vm.push_transaction(tx); - - let res = vm.vm.execute(VmExecutionMode::OneTx); - - assert!(res.result.is_failed(), "The transaction should've failed"); -} diff --git a/core/lib/multivm/src/versions/vm_1_4_1/tests/l2_blocks.rs b/core/lib/multivm/src/versions/vm_1_4_1/tests/l2_blocks.rs deleted file mode 100644 index 073d9ce5800b..000000000000 --- a/core/lib/multivm/src/versions/vm_1_4_1/tests/l2_blocks.rs +++ /dev/null @@ -1,437 +0,0 @@ -//! -//! Tests for the bootloader -//! The description for each of the tests can be found in the corresponding `.yul` file. -//! - -use zk_evm_1_4_1::aux_structures::Timestamp; -use crate::interface::storage::WriteStorage; -use zksync_system_constants::REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_BYTE; -use zksync_types::{ - block::{pack_block_info, MiniblockHasher}, - AccountTreeId, Execute, ExecuteTransactionCommon, L1BatchNumber, L1TxCommonData, - MiniblockNumber, ProtocolVersionId, StorageKey, Transaction, H160, H256, - SYSTEM_CONTEXT_ADDRESS, SYSTEM_CONTEXT_BLOCK_INFO_POSITION, - SYSTEM_CONTEXT_CURRENT_L2_BLOCK_INFO_POSITION, SYSTEM_CONTEXT_CURRENT_TX_ROLLING_HASH_POSITION, - U256, -}; -use zksync_utils::{h256_to_u256, u256_to_h256}; - -use crate::{ - interface::{ExecutionResult, Halt, L2BlockEnv, TxExecutionMode, VmExecutionMode, VmInterface}, - vm_1_4_1::{ - constants::{ - BOOTLOADER_HEAP_PAGE, TX_OPERATOR_L2_BLOCK_INFO_OFFSET, - TX_OPERATOR_SLOTS_PER_L2_BLOCK_INFO, - }, - tests::tester::{default_l1_batch, VmTesterBuilder}, - utils::l2_blocks::get_l2_block_hash_key, - HistoryEnabled, Vm, - }, - HistoryMode, -}; - -fn get_l1_noop() -> Transaction { - Transaction { - common_data: ExecuteTransactionCommon::L1(L1TxCommonData { - sender: H160::random(), - gas_limit: U256::from(2000000u32), - gas_per_pubdata_limit: REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_BYTE.into(), - ..Default::default() - }), - execute: Execute { - contract_address: H160::zero(), - calldata: vec![], - value: U256::zero(), - factory_deps: None, - }, - received_timestamp_ms: 0, - raw_bytes: None, - } -} - -#[test] -fn test_l2_block_initialization_timestamp() { - // This test checks that the L2 block initialization works correctly. - // Here we check that that the first block must have timestamp that is greater or equal to the timestamp - // of the current batch. - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - // Override the timestamp of the current miniblock to be 0. - vm.vm.bootloader_state.push_l2_block(L2BlockEnv { - number: 1, - timestamp: 0, - prev_block_hash: MiniblockHasher::legacy_hash(MiniblockNumber(0)), - max_virtual_blocks_to_create: 1, - }); - let l1_tx = get_l1_noop(); - - vm.vm.push_transaction(l1_tx); - let res = vm.vm.execute(VmExecutionMode::OneTx); - - assert_eq!( - res.result, - ExecutionResult::Halt {reason: Halt::FailedToSetL2Block("The timestamp of the L2 block must be greater than or equal to the timestamp of the current batch".to_string())} - ); -} - -#[test] -fn test_l2_block_initialization_number_non_zero() { - // This test checks that the L2 block initialization works correctly. - // Here we check that the first miniblock number can not be zero. - - let l1_batch = default_l1_batch(L1BatchNumber(1)); - let first_l2_block = L2BlockEnv { - number: 0, - timestamp: l1_batch.timestamp, - prev_block_hash: MiniblockHasher::legacy_hash(MiniblockNumber(0)), - max_virtual_blocks_to_create: 1, - }; - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_l1_batch_env(l1_batch) - .with_random_rich_accounts(1) - .build(); - - let l1_tx = get_l1_noop(); - - vm.vm.push_transaction(l1_tx); - - let timestamp = Timestamp(vm.vm.state.local_state.timestamp); - set_manual_l2_block_info(&mut vm.vm, 0, first_l2_block, timestamp); - - let res = vm.vm.execute(VmExecutionMode::OneTx); - - assert_eq!( - res.result, - ExecutionResult::Halt { - reason: Halt::FailedToSetL2Block( - "L2 block number is never expected to be zero".to_string() - ) - } - ); -} - -fn test_same_l2_block( - expected_error: Option, - override_timestamp: Option, - override_prev_block_hash: Option, -) { - let mut l1_batch = default_l1_batch(L1BatchNumber(1)); - l1_batch.timestamp = 1; - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_l1_batch_env(l1_batch) - .with_random_rich_accounts(1) - .build(); - - let l1_tx = get_l1_noop(); - vm.vm.push_transaction(l1_tx.clone()); - let res = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!res.result.is_failed()); - - let mut current_l2_block = vm.vm.batch_env.first_l2_block; - - if let Some(timestamp) = override_timestamp { - current_l2_block.timestamp = timestamp; - } - if let Some(prev_block_hash) = override_prev_block_hash { - current_l2_block.prev_block_hash = prev_block_hash; - } - - if (None, None) == (override_timestamp, override_prev_block_hash) { - current_l2_block.max_virtual_blocks_to_create = 0; - } - - vm.vm.push_transaction(l1_tx); - let timestamp = Timestamp(vm.vm.state.local_state.timestamp); - set_manual_l2_block_info(&mut vm.vm, 1, current_l2_block, timestamp); - - let result = vm.vm.execute(VmExecutionMode::OneTx); - - if let Some(err) = expected_error { - assert_eq!(result.result, ExecutionResult::Halt { reason: err }); - } else { - assert_eq!(result.result, ExecutionResult::Success { output: vec![] }); - } -} - -#[test] -fn test_l2_block_same_l2_block() { - // This test aims to test the case when there are multiple transactions inside the same L2 block. - - // Case 1: Incorrect timestamp - test_same_l2_block( - Some(Halt::FailedToSetL2Block( - "The timestamp of the same L2 block must be same".to_string(), - )), - Some(0), - None, - ); - - // Case 2: Incorrect previous block hash - test_same_l2_block( - Some(Halt::FailedToSetL2Block( - "The previous hash of the same L2 block must be same".to_string(), - )), - None, - Some(H256::zero()), - ); - - // Case 3: Correct continuation of the same L2 block - test_same_l2_block(None, None, None); -} - -fn test_new_l2_block( - first_l2_block: L2BlockEnv, - overriden_second_block_number: Option, - overriden_second_block_timestamp: Option, - overriden_second_block_prev_block_hash: Option, - expected_error: Option, -) { - let mut l1_batch = default_l1_batch(L1BatchNumber(1)); - l1_batch.timestamp = 1; - l1_batch.first_l2_block = first_l2_block; - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_l1_batch_env(l1_batch) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let l1_tx = get_l1_noop(); - - // Firstly we execute the first transaction - vm.vm.push_transaction(l1_tx.clone()); - vm.vm.execute(VmExecutionMode::OneTx); - - let mut second_l2_block = vm.vm.batch_env.first_l2_block; - second_l2_block.number += 1; - second_l2_block.timestamp += 1; - second_l2_block.prev_block_hash = vm.vm.bootloader_state.last_l2_block().get_hash(); - - if let Some(block_number) = overriden_second_block_number { - second_l2_block.number = block_number; - } - if let Some(timestamp) = overriden_second_block_timestamp { - second_l2_block.timestamp = timestamp; - } - if let Some(prev_block_hash) = overriden_second_block_prev_block_hash { - second_l2_block.prev_block_hash = prev_block_hash; - } - - vm.vm.bootloader_state.push_l2_block(second_l2_block); - - vm.vm.push_transaction(l1_tx); - - let result = vm.vm.execute(VmExecutionMode::OneTx); - if let Some(err) = expected_error { - assert_eq!(result.result, ExecutionResult::Halt { reason: err }); - } else { - assert_eq!(result.result, ExecutionResult::Success { output: vec![] }); - } -} - -#[test] -fn test_l2_block_new_l2_block() { - // This test is aimed to cover potential issue - - let correct_first_block = L2BlockEnv { - number: 1, - timestamp: 1, - prev_block_hash: MiniblockHasher::legacy_hash(MiniblockNumber(0)), - max_virtual_blocks_to_create: 1, - }; - - // Case 1: Block number increasing by more than 1 - test_new_l2_block( - correct_first_block, - Some(3), - None, - None, - Some(Halt::FailedToSetL2Block( - "Invalid new L2 block number".to_string(), - )), - ); - - // Case 2: Timestamp not increasing - test_new_l2_block( - correct_first_block, - None, - Some(1), - None, - Some(Halt::FailedToSetL2Block("The timestamp of the new L2 block must be greater than the timestamp of the previous L2 block".to_string())), - ); - - // Case 3: Incorrect previous block hash - test_new_l2_block( - correct_first_block, - None, - None, - Some(H256::zero()), - Some(Halt::FailedToSetL2Block( - "The current L2 block hash is incorrect".to_string(), - )), - ); - - // Case 4: Correct new block - test_new_l2_block(correct_first_block, None, None, None, None); -} - -#[allow(clippy::too_many_arguments)] -fn test_first_in_batch( - miniblock_timestamp: u64, - miniblock_number: u32, - pending_txs_hash: H256, - batch_timestamp: u64, - new_batch_timestamp: u64, - batch_number: u32, - proposed_block: L2BlockEnv, - expected_error: Option, -) { - let mut l1_batch = default_l1_batch(L1BatchNumber(1)); - l1_batch.number += 1; - l1_batch.timestamp = new_batch_timestamp; - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_l1_batch_env(l1_batch) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - let l1_tx = get_l1_noop(); - - // Setting the values provided. - let storage_ptr = vm.vm.state.storage.storage.get_ptr(); - let miniblock_info_slot = StorageKey::new( - AccountTreeId::new(SYSTEM_CONTEXT_ADDRESS), - SYSTEM_CONTEXT_CURRENT_L2_BLOCK_INFO_POSITION, - ); - let pending_txs_hash_slot = StorageKey::new( - AccountTreeId::new(SYSTEM_CONTEXT_ADDRESS), - SYSTEM_CONTEXT_CURRENT_TX_ROLLING_HASH_POSITION, - ); - let batch_info_slot = StorageKey::new( - AccountTreeId::new(SYSTEM_CONTEXT_ADDRESS), - SYSTEM_CONTEXT_BLOCK_INFO_POSITION, - ); - let prev_block_hash_position = get_l2_block_hash_key(miniblock_number - 1); - - storage_ptr.borrow_mut().set_value( - miniblock_info_slot, - u256_to_h256(pack_block_info( - miniblock_number as u64, - miniblock_timestamp, - )), - ); - storage_ptr - .borrow_mut() - .set_value(pending_txs_hash_slot, pending_txs_hash); - storage_ptr.borrow_mut().set_value( - batch_info_slot, - u256_to_h256(pack_block_info(batch_number as u64, batch_timestamp)), - ); - storage_ptr.borrow_mut().set_value( - prev_block_hash_position, - MiniblockHasher::legacy_hash(MiniblockNumber(miniblock_number - 1)), - ); - - // In order to skip checks from the Rust side of the VM, we firstly use some definitely correct L2 block info. - // And then override it with the user-provided value - - let last_l2_block = vm.vm.bootloader_state.last_l2_block(); - let new_l2_block = L2BlockEnv { - number: last_l2_block.number + 1, - timestamp: last_l2_block.timestamp + 1, - prev_block_hash: last_l2_block.get_hash(), - max_virtual_blocks_to_create: last_l2_block.max_virtual_blocks_to_create, - }; - - vm.vm.bootloader_state.push_l2_block(new_l2_block); - vm.vm.push_transaction(l1_tx); - let timestamp = Timestamp(vm.vm.state.local_state.timestamp); - set_manual_l2_block_info(&mut vm.vm, 0, proposed_block, timestamp); - - let result = vm.vm.execute(VmExecutionMode::OneTx); - if let Some(err) = expected_error { - assert_eq!(result.result, ExecutionResult::Halt { reason: err }); - } else { - assert_eq!(result.result, ExecutionResult::Success { output: vec![] }); - } -} - -#[test] -fn test_l2_block_first_in_batch() { - let prev_block_hash = MiniblockHasher::legacy_hash(MiniblockNumber(0)); - let prev_block_hash = MiniblockHasher::new(MiniblockNumber(1), 1, prev_block_hash) - .finalize(ProtocolVersionId::latest()); - test_first_in_batch( - 1, - 1, - H256::zero(), - 1, - 2, - 1, - L2BlockEnv { - number: 2, - timestamp: 2, - prev_block_hash, - max_virtual_blocks_to_create: 1, - }, - None, - ); - - let prev_block_hash = MiniblockHasher::legacy_hash(MiniblockNumber(0)); - let prev_block_hash = MiniblockHasher::new(MiniblockNumber(1), 8, prev_block_hash) - .finalize(ProtocolVersionId::latest()); - test_first_in_batch( - 8, - 1, - H256::zero(), - 5, - 12, - 1, - L2BlockEnv { - number: 2, - timestamp: 9, - prev_block_hash, - max_virtual_blocks_to_create: 1, - }, - Some(Halt::FailedToSetL2Block("The timestamp of the L2 block must be greater than or equal to the timestamp of the current batch".to_string())), - ); -} - -fn set_manual_l2_block_info( - vm: &mut Vm, - tx_number: usize, - block_info: L2BlockEnv, - timestamp: Timestamp, -) { - let fictive_miniblock_position = - TX_OPERATOR_L2_BLOCK_INFO_OFFSET + TX_OPERATOR_SLOTS_PER_L2_BLOCK_INFO * tx_number; - - vm.state.memory.populate_page( - BOOTLOADER_HEAP_PAGE as usize, - vec![ - (fictive_miniblock_position, block_info.number.into()), - (fictive_miniblock_position + 1, block_info.timestamp.into()), - ( - fictive_miniblock_position + 2, - h256_to_u256(block_info.prev_block_hash), - ), - ( - fictive_miniblock_position + 3, - block_info.max_virtual_blocks_to_create.into(), - ), - ], - timestamp, - ) -} diff --git a/core/lib/multivm/src/versions/vm_1_4_1/tests/mod.rs b/core/lib/multivm/src/versions/vm_1_4_1/tests/mod.rs deleted file mode 100644 index a07608121bc1..000000000000 --- a/core/lib/multivm/src/versions/vm_1_4_1/tests/mod.rs +++ /dev/null @@ -1,23 +0,0 @@ -mod bootloader; -mod default_aa; -// TODO - fix this test -// `mod invalid_bytecode;` -mod block_tip; -mod bytecode_publishing; -mod call_tracer; -mod circuits; -mod gas_limit; -mod get_used_contracts; -mod is_write_initial; -mod l1_tx_execution; -mod l2_blocks; -mod nonce_holder; -mod precompiles; -mod refunds; -mod require_eip712; -mod rollbacks; -mod simple_execution; -mod tester; -mod tracing_execution_error; -mod upgrade; -mod utils; diff --git a/core/lib/multivm/src/versions/vm_1_4_1/tests/nonce_holder.rs b/core/lib/multivm/src/versions/vm_1_4_1/tests/nonce_holder.rs deleted file mode 100644 index 915a802b1e85..000000000000 --- a/core/lib/multivm/src/versions/vm_1_4_1/tests/nonce_holder.rs +++ /dev/null @@ -1,188 +0,0 @@ -use zksync_types::{Execute, Nonce}; - -use crate::{ - interface::{ - ExecutionResult, Halt, TxExecutionMode, TxRevertReason, VmExecutionMode, VmInterface, - VmRevertReason, - }, - vm_1_4_1::{ - tests::{ - tester::{Account, VmTesterBuilder}, - utils::read_nonce_holder_tester, - }, - types::internals::TransactionData, - HistoryEnabled, - }, -}; - -pub enum NonceHolderTestMode { - SetValueUnderNonce, - IncreaseMinNonceBy5, - IncreaseMinNonceTooMuch, - LeaveNonceUnused, - IncreaseMinNonceBy1, - SwitchToArbitraryOrdering, -} - -impl From for u8 { - fn from(mode: NonceHolderTestMode) -> u8 { - match mode { - NonceHolderTestMode::SetValueUnderNonce => 0, - NonceHolderTestMode::IncreaseMinNonceBy5 => 1, - NonceHolderTestMode::IncreaseMinNonceTooMuch => 2, - NonceHolderTestMode::LeaveNonceUnused => 3, - NonceHolderTestMode::IncreaseMinNonceBy1 => 4, - NonceHolderTestMode::SwitchToArbitraryOrdering => 5, - } - } -} - -#[test] -fn test_nonce_holder() { - let mut account = Account::random(); - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_deployer() - .with_custom_contracts(vec![( - read_nonce_holder_tester().to_vec(), - account.address, - true, - )]) - .with_rich_accounts(vec![account.clone()]) - .build(); - - let mut run_nonce_test = |nonce: u32, - test_mode: NonceHolderTestMode, - error_message: Option, - comment: &'static str| { - // In this test we have to reset VM state after each test case. Because once bootloader failed during the validation of the transaction, - // it will fail again and again. At the same time we have to keep the same storage, because we want to keep the nonce holder contract state. - // The easiest way in terms of lifetimes is to reuse `vm_builder` to achieve it. - vm.reset_state(true); - let mut transaction_data: TransactionData = account - .get_l2_tx_for_execute_with_nonce( - Execute { - contract_address: account.address, - calldata: vec![12], - value: Default::default(), - factory_deps: None, - }, - None, - Nonce(nonce), - ) - .into(); - - transaction_data.signature = vec![test_mode.into()]; - vm.vm.push_raw_transaction(transaction_data, 0, 0, true); - let result = vm.vm.execute(VmExecutionMode::OneTx); - - if let Some(msg) = error_message { - let expected_error = - TxRevertReason::Halt(Halt::ValidationFailed(VmRevertReason::General { - msg, - data: vec![], - })); - let ExecutionResult::Halt { reason } = result.result else { - panic!("Expected revert, got {:?}", result.result); - }; - assert_eq!( - reason.to_string(), - expected_error.to_string(), - "{}", - comment - ); - } else { - assert!(!result.result.is_failed(), "{}", comment); - } - }; - // Test 1: trying to set value under non sequential nonce value. - run_nonce_test( - 1u32, - NonceHolderTestMode::SetValueUnderNonce, - Some("Previous nonce has not been used".to_string()), - "Allowed to set value under non sequential value", - ); - - // Test 2: increase min nonce by 1 with sequential nonce ordering: - run_nonce_test( - 0u32, - NonceHolderTestMode::IncreaseMinNonceBy1, - None, - "Failed to increment nonce by 1 for sequential account", - ); - - // Test 3: correctly set value under nonce with sequential nonce ordering: - run_nonce_test( - 1u32, - NonceHolderTestMode::SetValueUnderNonce, - None, - "Failed to set value under nonce sequential value", - ); - - // Test 5: migrate to the arbitrary nonce ordering: - run_nonce_test( - 2u32, - NonceHolderTestMode::SwitchToArbitraryOrdering, - None, - "Failed to switch to arbitrary ordering", - ); - - // Test 6: increase min nonce by 5 - run_nonce_test( - 6u32, - NonceHolderTestMode::IncreaseMinNonceBy5, - None, - "Failed to increase min nonce by 5", - ); - - // Test 7: since the nonces in range [6,10] are no longer allowed, the - // tx with nonce 10 should not be allowed - run_nonce_test( - 10u32, - NonceHolderTestMode::IncreaseMinNonceBy5, - Some("Reusing the same nonce twice".to_string()), - "Allowed to reuse nonce below the minimal one", - ); - - // Test 8: we should be able to use nonce 13 - run_nonce_test( - 13u32, - NonceHolderTestMode::SetValueUnderNonce, - None, - "Did not allow to use unused nonce 10", - ); - - // Test 9: we should not be able to reuse nonce 13 - run_nonce_test( - 13u32, - NonceHolderTestMode::IncreaseMinNonceBy5, - Some("Reusing the same nonce twice".to_string()), - "Allowed to reuse the same nonce twice", - ); - - // Test 10: we should be able to simply use nonce 14, while bumping the minimal nonce by 5 - run_nonce_test( - 14u32, - NonceHolderTestMode::IncreaseMinNonceBy5, - None, - "Did not allow to use a bumped nonce", - ); - - // Test 11: Do not allow bumping nonce by too much - run_nonce_test( - 16u32, - NonceHolderTestMode::IncreaseMinNonceTooMuch, - Some("The value for incrementing the nonce is too high".to_string()), - "Allowed for incrementing min nonce too much", - ); - - // Test 12: Do not allow not setting a nonce as used - run_nonce_test( - 16u32, - NonceHolderTestMode::LeaveNonceUnused, - Some("The nonce was not set as used".to_string()), - "Allowed to leave nonce as unused", - ); -} diff --git a/core/lib/multivm/src/versions/vm_1_4_1/tests/precompiles.rs b/core/lib/multivm/src/versions/vm_1_4_1/tests/precompiles.rs deleted file mode 100644 index 37e871fbc70e..000000000000 --- a/core/lib/multivm/src/versions/vm_1_4_1/tests/precompiles.rs +++ /dev/null @@ -1,136 +0,0 @@ -use zk_evm_1_4_1::zk_evm_abstractions::precompiles::PrecompileAddress; -use zksync_types::{Address, Execute}; - -use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface}, - vm_1_4_1::{ - constants::BLOCK_GAS_LIMIT, - tests::{tester::VmTesterBuilder, utils::read_precompiles_contract}, - HistoryEnabled, - }, -}; - -#[test] -fn test_keccak() { - // Execute special transaction and check that at least 1000 keccak calls were made. - let contract = read_precompiles_contract(); - let address = Address::random(); - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_random_rich_accounts(1) - .with_deployer() - .with_gas_limit(BLOCK_GAS_LIMIT) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_custom_contracts(vec![(contract, address, true)]) - .build(); - - // calldata for `doKeccak(1000)`. - let keccak1000_calldata = - "370f20ac00000000000000000000000000000000000000000000000000000000000003e8"; - - let account = &mut vm.rich_accounts[0]; - let tx = account.get_l2_tx_for_execute( - Execute { - contract_address: address, - calldata: hex::decode(keccak1000_calldata).unwrap(), - value: Default::default(), - factory_deps: None, - }, - None, - ); - vm.vm.push_transaction(tx); - let _ = vm.vm.inspect(Default::default(), VmExecutionMode::OneTx); - - let keccak_count = vm - .vm - .state - .precompiles_processor - .precompile_cycles_history - .inner() - .iter() - .filter(|(precompile, _)| precompile == &PrecompileAddress::Keccak256) - .count(); - - assert!(keccak_count >= 1000); -} - -#[test] -fn test_sha256() { - // Execute special transaction and check that at least 1000 `sha256` calls were made. - let contract = read_precompiles_contract(); - let address = Address::random(); - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_random_rich_accounts(1) - .with_deployer() - .with_gas_limit(BLOCK_GAS_LIMIT) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_custom_contracts(vec![(contract, address, true)]) - .build(); - - // calldata for `doSha256(1000)`. - let sha1000_calldata = - "5d0b4fb500000000000000000000000000000000000000000000000000000000000003e8"; - - let account = &mut vm.rich_accounts[0]; - let tx = account.get_l2_tx_for_execute( - Execute { - contract_address: address, - calldata: hex::decode(sha1000_calldata).unwrap(), - value: Default::default(), - factory_deps: None, - }, - None, - ); - vm.vm.push_transaction(tx); - let _ = vm.vm.inspect(Default::default(), VmExecutionMode::OneTx); - - let sha_count = vm - .vm - .state - .precompiles_processor - .precompile_cycles_history - .inner() - .iter() - .filter(|(precompile, _)| precompile == &PrecompileAddress::SHA256) - .count(); - - assert!(sha_count >= 1000); -} - -#[test] -fn test_ecrecover() { - // Execute simple transfer and check that exactly 1 `ecrecover` call was made (it's done during tx validation). - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_random_rich_accounts(1) - .with_deployer() - .with_gas_limit(BLOCK_GAS_LIMIT) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .build(); - - let account = &mut vm.rich_accounts[0]; - let tx = account.get_l2_tx_for_execute( - Execute { - contract_address: account.address, - calldata: Vec::new(), - value: Default::default(), - factory_deps: None, - }, - None, - ); - vm.vm.push_transaction(tx); - let _ = vm.vm.inspect(Default::default(), VmExecutionMode::OneTx); - - let ecrecover_count = vm - .vm - .state - .precompiles_processor - .precompile_cycles_history - .inner() - .iter() - .filter(|(precompile, _)| precompile == &PrecompileAddress::Ecrecover) - .count(); - - assert_eq!(ecrecover_count, 1); -} diff --git a/core/lib/multivm/src/versions/vm_1_4_1/tests/refunds.rs b/core/lib/multivm/src/versions/vm_1_4_1/tests/refunds.rs deleted file mode 100644 index 8700eb14b53d..000000000000 --- a/core/lib/multivm/src/versions/vm_1_4_1/tests/refunds.rs +++ /dev/null @@ -1,166 +0,0 @@ -use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface}, - vm_1_4_1::{ - tests::{ - tester::{DeployContractsTx, TxType, VmTesterBuilder}, - utils::read_test_contract, - }, - types::internals::TransactionData, - HistoryEnabled, - }, -}; - -#[test] -fn test_predetermined_refunded_gas() { - // In this test, we compare the execution of the bootloader with the predefined - // refunded gas and without them - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - let l1_batch = vm.vm.batch_env.clone(); - - let counter = read_test_contract(); - let account = &mut vm.rich_accounts[0]; - - let DeployContractsTx { - tx, - bytecode_hash: _, - address: _, - } = account.get_deploy_tx(&counter, None, TxType::L2); - vm.vm.push_transaction(tx.clone()); - let result = vm.vm.execute(VmExecutionMode::OneTx); - - assert!(!result.result.is_failed()); - - // If the refund provided by the operator or the final refund are the 0 - // there is no impact of the operator's refund at all and so this test does not - // make much sense. - assert!( - result.refunds.operator_suggested_refund > 0, - "The operator's refund is 0" - ); - assert!(result.refunds.gas_refunded > 0, "The final refund is 0"); - - let result_without_predefined_refunds = vm.vm.execute(VmExecutionMode::Batch); - let mut current_state_without_predefined_refunds = vm.vm.get_current_execution_state(); - assert!(!result_without_predefined_refunds.result.is_failed(),); - - // Here we want to provide the same refund from the operator and check that it's the correct one. - // We execute the whole block without refund tracer, because refund tracer will eventually override the provided refund. - // But the overall result should be the same - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_l1_batch_env(l1_batch.clone()) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_rich_accounts(vec![account.clone()]) - .build(); - - let tx: TransactionData = tx.into(); - // Overhead - let overhead = tx.overhead_gas(); - vm.vm - .push_raw_transaction(tx.clone(), overhead, result.refunds.gas_refunded, true); - - let result_with_predefined_refunds = vm.vm.execute(VmExecutionMode::Batch); - let mut current_state_with_predefined_refunds = vm.vm.get_current_execution_state(); - - assert!(!result_with_predefined_refunds.result.is_failed()); - - // We need to sort these lists as those are flattened from HashMaps - current_state_with_predefined_refunds - .used_contract_hashes - .sort(); - current_state_without_predefined_refunds - .used_contract_hashes - .sort(); - - assert_eq!( - current_state_with_predefined_refunds.events, - current_state_without_predefined_refunds.events - ); - - assert_eq!( - current_state_with_predefined_refunds.user_l2_to_l1_logs, - current_state_without_predefined_refunds.user_l2_to_l1_logs - ); - - assert_eq!( - current_state_with_predefined_refunds.system_logs, - current_state_without_predefined_refunds.system_logs - ); - - assert_eq!( - current_state_with_predefined_refunds.storage_log_queries, - current_state_without_predefined_refunds.storage_log_queries - ); - assert_eq!( - current_state_with_predefined_refunds.used_contract_hashes, - current_state_without_predefined_refunds.used_contract_hashes - ); - - // In this test we put the different refund from the operator. - // We still can't use the refund tracer, because it will override the refund. - // But we can check that the logs and events have changed. - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_l1_batch_env(l1_batch) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_rich_accounts(vec![account.clone()]) - .build(); - - let changed_operator_suggested_refund = result.refunds.gas_refunded + 1000; - vm.vm - .push_raw_transaction(tx, overhead, changed_operator_suggested_refund, true); - let result = vm.vm.execute(VmExecutionMode::Batch); - let mut current_state_with_changed_predefined_refunds = vm.vm.get_current_execution_state(); - - assert!(!result.result.is_failed()); - current_state_with_changed_predefined_refunds - .used_contract_hashes - .sort(); - current_state_without_predefined_refunds - .used_contract_hashes - .sort(); - - assert_eq!( - current_state_with_changed_predefined_refunds.events.len(), - current_state_without_predefined_refunds.events.len() - ); - - assert_ne!( - current_state_with_changed_predefined_refunds.events, - current_state_without_predefined_refunds.events - ); - - assert_eq!( - current_state_with_changed_predefined_refunds.user_l2_to_l1_logs, - current_state_without_predefined_refunds.user_l2_to_l1_logs - ); - - assert_ne!( - current_state_with_changed_predefined_refunds.system_logs, - current_state_without_predefined_refunds.system_logs - ); - - assert_eq!( - current_state_with_changed_predefined_refunds - .storage_log_queries - .len(), - current_state_without_predefined_refunds - .storage_log_queries - .len() - ); - - assert_ne!( - current_state_with_changed_predefined_refunds.storage_log_queries, - current_state_without_predefined_refunds.storage_log_queries - ); - assert_eq!( - current_state_with_changed_predefined_refunds.used_contract_hashes, - current_state_without_predefined_refunds.used_contract_hashes - ); -} diff --git a/core/lib/multivm/src/versions/vm_1_4_1/tests/require_eip712.rs b/core/lib/multivm/src/versions/vm_1_4_1/tests/require_eip712.rs deleted file mode 100644 index aebc956e6735..000000000000 --- a/core/lib/multivm/src/versions/vm_1_4_1/tests/require_eip712.rs +++ /dev/null @@ -1,165 +0,0 @@ -use std::convert::TryInto; - -use ethabi::Token; -use zksync_eth_signer::{raw_ethereum_tx::TransactionParameters, EthereumSigner}; -use zksync_system_constants::L2_ETH_TOKEN_ADDRESS; -use zksync_types::{ - fee::Fee, l2::L2Tx, transaction_request::TransactionRequest, - utils::storage_key_for_standard_token_balance, AccountTreeId, Address, Eip712Domain, Execute, - L2ChainId, Nonce, Transaction, U256, -}; - -use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface}, - vm_1_4_1::{ - tests::{ - tester::{Account, VmTester, VmTesterBuilder}, - utils::read_many_owners_custom_account_contract, - }, - HistoryDisabled, - }, -}; - -impl VmTester { - pub(crate) fn get_eth_balance(&mut self, address: Address) -> U256 { - let key = storage_key_for_standard_token_balance( - AccountTreeId::new(L2_ETH_TOKEN_ADDRESS), - &address, - ); - self.vm.state.storage.storage.read_from_storage(&key) - } -} - -// TODO refactor this test it use too much internal details of the VM -#[tokio::test] -/// This test deploys 'buggy' account abstraction code, and then tries accessing it both with legacy -/// and EIP712 transactions. -/// Currently we support both, but in the future, we should allow only EIP712 transactions to access the AA accounts. -async fn test_require_eip712() { - // Use 3 accounts: - // - `private_address` - EOA account, where we have the key - // - `account_address` - AA account, where the contract is deployed - // - beneficiary - an EOA account, where we'll try to transfer the tokens. - let account_abstraction = Account::random(); - let mut private_account = Account::random(); - let beneficiary = Account::random(); - - let (bytecode, contract) = read_many_owners_custom_account_contract(); - let mut vm = VmTesterBuilder::new(HistoryDisabled) - .with_empty_in_memory_storage() - .with_custom_contracts(vec![(bytecode, account_abstraction.address, true)]) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_rich_accounts(vec![account_abstraction.clone(), private_account.clone()]) - .build(); - - assert_eq!(vm.get_eth_balance(beneficiary.address), U256::from(0)); - - let chain_id: u32 = 270; - - // First, let's set the owners of the AA account to the `private_address`. - // (so that messages signed by `private_address`, are authorized to act on behalf of the AA account). - let set_owners_function = contract.function("setOwners").unwrap(); - let encoded_input = set_owners_function - .encode_input(&[Token::Array(vec![Token::Address(private_account.address)])]) - .unwrap(); - - let tx = private_account.get_l2_tx_for_execute( - Execute { - contract_address: account_abstraction.address, - calldata: encoded_input, - value: Default::default(), - factory_deps: None, - }, - None, - ); - - vm.vm.push_transaction(tx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!result.result.is_failed()); - - let private_account_balance = vm.get_eth_balance(private_account.address); - - // And now let's do the transfer from the 'account abstraction' to 'beneficiary' (using 'legacy' transaction). - // Normally this would not work - unless the operator is malicious. - let aa_raw_tx = TransactionParameters { - nonce: U256::from(0), - to: Some(beneficiary.address), - gas: U256::from(100000000), - gas_price: Some(U256::from(10000000)), - value: U256::from(888000088), - data: vec![], - chain_id: 270, - transaction_type: None, - access_list: None, - max_fee_per_gas: U256::from(1000000000), - max_priority_fee_per_gas: U256::from(1000000000), - }; - - let aa_tx = private_account.sign_legacy_tx(aa_raw_tx).await; - let (tx_request, hash) = TransactionRequest::from_bytes(&aa_tx, L2ChainId::from(270)).unwrap(); - - let mut l2_tx: L2Tx = L2Tx::from_request(tx_request, 10000).unwrap(); - l2_tx.set_input(aa_tx, hash); - // Pretend that operator is malicious and sets the initiator to the AA account. - l2_tx.common_data.initiator_address = account_abstraction.address; - let transaction: Transaction = l2_tx.try_into().unwrap(); - - vm.vm.push_transaction(transaction); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!result.result.is_failed()); - assert_eq!( - vm.get_eth_balance(beneficiary.address), - U256::from(888000088) - ); - // Make sure that the tokens were transferred from the AA account. - assert_eq!( - private_account_balance, - vm.get_eth_balance(private_account.address) - ); - - // // Now send the 'classic' EIP712 transaction - let tx_712 = L2Tx::new( - beneficiary.address, - vec![], - Nonce(1), - Fee { - gas_limit: U256::from(1000000000), - max_fee_per_gas: U256::from(1000000000), - max_priority_fee_per_gas: U256::from(1000000000), - gas_per_pubdata_limit: U256::from(1000000000), - }, - account_abstraction.address, - U256::from(28374938), - None, - Default::default(), - ); - - let transaction_request: TransactionRequest = tx_712.into(); - - let domain = Eip712Domain::new(L2ChainId::from(chain_id)); - let signature = private_account - .get_pk_signer() - .sign_typed_data(&domain, &transaction_request) - .await - .unwrap(); - let encoded_tx = transaction_request.get_signed_bytes(&signature, L2ChainId::from(chain_id)); - - let (aa_txn_request, aa_hash) = - TransactionRequest::from_bytes(&encoded_tx, L2ChainId::from(chain_id)).unwrap(); - - let mut l2_tx = L2Tx::from_request(aa_txn_request, 100000).unwrap(); - l2_tx.set_input(encoded_tx, aa_hash); - - let transaction: Transaction = l2_tx.try_into().unwrap(); - vm.vm.push_transaction(transaction); - vm.vm.execute(VmExecutionMode::OneTx); - - assert_eq!( - vm.get_eth_balance(beneficiary.address), - U256::from(916375026) - ); - assert_eq!( - private_account_balance, - vm.get_eth_balance(private_account.address) - ); -} diff --git a/core/lib/multivm/src/versions/vm_1_4_1/tests/rollbacks.rs b/core/lib/multivm/src/versions/vm_1_4_1/tests/rollbacks.rs deleted file mode 100644 index 2ae942c26526..000000000000 --- a/core/lib/multivm/src/versions/vm_1_4_1/tests/rollbacks.rs +++ /dev/null @@ -1,263 +0,0 @@ -use ethabi::Token; -use zksync_contracts::{get_loadnext_contract, test_contracts::LoadnextContractExecutionParams}; -use crate::interface::storage::WriteStorage; -use zksync_types::{get_nonce_key, Execute, U256}; - -use crate::{ - interface::{ - dyn_tracers::vm_1_4_1::DynTracer, - tracer::{TracerExecutionStatus, TracerExecutionStopReason}, - TxExecutionMode, VmExecutionMode, VmInterface, VmInterfaceHistoryEnabled, - }, - vm_1_4_1::{ - tests::{ - tester::{DeployContractsTx, TransactionTestInfo, TxModifier, TxType, VmTesterBuilder}, - utils::read_test_contract, - }, - types::internals::ZkSyncVmState, - BootloaderState, HistoryEnabled, HistoryMode, SimpleMemory, ToTracerPointer, VmTracer, - }, -}; - -#[test] -fn test_vm_rollbacks() { - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let mut account = vm.rich_accounts[0].clone(); - let counter = read_test_contract(); - let tx_0 = account.get_deploy_tx(&counter, None, TxType::L2).tx; - let tx_1 = account.get_deploy_tx(&counter, None, TxType::L2).tx; - let tx_2 = account.get_deploy_tx(&counter, None, TxType::L2).tx; - - let result_without_rollbacks = vm.execute_and_verify_txs(&vec![ - TransactionTestInfo::new_processed(tx_0.clone(), false), - TransactionTestInfo::new_processed(tx_1.clone(), false), - TransactionTestInfo::new_processed(tx_2.clone(), false), - ]); - - // reset vm - vm.reset_with_empty_storage(); - - let result_with_rollbacks = vm.execute_and_verify_txs(&vec![ - TransactionTestInfo::new_rejected(tx_0.clone(), TxModifier::WrongSignatureLength.into()), - TransactionTestInfo::new_rejected(tx_0.clone(), TxModifier::WrongMagicValue.into()), - TransactionTestInfo::new_rejected(tx_0.clone(), TxModifier::WrongSignature.into()), - // The correct nonce is 0, this tx will fail - TransactionTestInfo::new_rejected(tx_2.clone(), TxModifier::WrongNonce.into()), - // This tx will succeed - TransactionTestInfo::new_processed(tx_0.clone(), false), - // The correct nonce is 1, this tx will fail - TransactionTestInfo::new_rejected(tx_0.clone(), TxModifier::NonceReused.into()), - // The correct nonce is 1, this tx will fail - TransactionTestInfo::new_rejected(tx_2.clone(), TxModifier::WrongNonce.into()), - // This tx will succeed - TransactionTestInfo::new_processed(tx_1, false), - // The correct nonce is 2, this tx will fail - TransactionTestInfo::new_rejected(tx_0.clone(), TxModifier::NonceReused.into()), - // This tx will succeed - TransactionTestInfo::new_processed(tx_2.clone(), false), - // This tx will fail - TransactionTestInfo::new_rejected(tx_2, TxModifier::NonceReused.into()), - TransactionTestInfo::new_rejected(tx_0, TxModifier::NonceReused.into()), - ]); - - assert_eq!(result_without_rollbacks, result_with_rollbacks); -} - -#[test] -fn test_vm_loadnext_rollbacks() { - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - let mut account = vm.rich_accounts[0].clone(); - - let loadnext_contract = get_loadnext_contract(); - let loadnext_constructor_data = &[Token::Uint(U256::from(100))]; - let DeployContractsTx { - tx: loadnext_deploy_tx, - address, - .. - } = account.get_deploy_tx_with_factory_deps( - &loadnext_contract.bytecode, - Some(loadnext_constructor_data), - loadnext_contract.factory_deps.clone(), - TxType::L2, - ); - - let loadnext_tx_1 = account.get_l2_tx_for_execute( - Execute { - contract_address: address, - calldata: LoadnextContractExecutionParams { - reads: 100, - writes: 100, - events: 100, - hashes: 500, - recursive_calls: 10, - deploys: 60, - } - .to_bytes(), - value: Default::default(), - factory_deps: None, - }, - None, - ); - - let loadnext_tx_2 = account.get_l2_tx_for_execute( - Execute { - contract_address: address, - calldata: LoadnextContractExecutionParams { - reads: 100, - writes: 100, - events: 100, - hashes: 500, - recursive_calls: 10, - deploys: 60, - } - .to_bytes(), - value: Default::default(), - factory_deps: None, - }, - None, - ); - - let result_without_rollbacks = vm.execute_and_verify_txs(&vec![ - TransactionTestInfo::new_processed(loadnext_deploy_tx.clone(), false), - TransactionTestInfo::new_processed(loadnext_tx_1.clone(), false), - TransactionTestInfo::new_processed(loadnext_tx_2.clone(), false), - ]); - - // reset vm - vm.reset_with_empty_storage(); - - let result_with_rollbacks = vm.execute_and_verify_txs(&vec![ - TransactionTestInfo::new_processed(loadnext_deploy_tx.clone(), false), - TransactionTestInfo::new_processed(loadnext_tx_1.clone(), true), - TransactionTestInfo::new_rejected( - loadnext_deploy_tx.clone(), - TxModifier::NonceReused.into(), - ), - TransactionTestInfo::new_processed(loadnext_tx_1, false), - TransactionTestInfo::new_processed(loadnext_tx_2.clone(), true), - TransactionTestInfo::new_processed(loadnext_tx_2.clone(), true), - TransactionTestInfo::new_rejected(loadnext_deploy_tx, TxModifier::NonceReused.into()), - TransactionTestInfo::new_processed(loadnext_tx_2, false), - ]); - - assert_eq!(result_without_rollbacks, result_with_rollbacks); -} - -// Testing tracer that does not allow the recursion to go deeper than a certain limit -struct MaxRecursionTracer { - max_recursion_depth: usize, -} - -/// Tracer responsible for calculating the number of storage invocations and -/// stopping the VM execution if the limit is reached. -impl DynTracer> for MaxRecursionTracer {} - -impl VmTracer for MaxRecursionTracer { - fn finish_cycle( - &mut self, - state: &mut ZkSyncVmState, - _bootloader_state: &mut BootloaderState, - ) -> TracerExecutionStatus { - let current_depth = state.local_state.callstack.depth(); - - if current_depth > self.max_recursion_depth { - TracerExecutionStatus::Stop(TracerExecutionStopReason::Finish) - } else { - TracerExecutionStatus::Continue - } - } -} - -#[test] -fn test_layered_rollback() { - // This test checks that the layered rollbacks work correctly, i.e. - // the rollback by the operator will always revert all the changes - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let account = &mut vm.rich_accounts[0]; - let loadnext_contract = get_loadnext_contract().bytecode; - - let DeployContractsTx { - tx: deploy_tx, - address, - .. - } = account.get_deploy_tx( - &loadnext_contract, - Some(&[Token::Uint(0.into())]), - TxType::L2, - ); - vm.vm.push_transaction(deploy_tx); - let deployment_res = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!deployment_res.result.is_failed(), "transaction failed"); - - let loadnext_transaction = account.get_loadnext_transaction( - address, - LoadnextContractExecutionParams { - writes: 1, - recursive_calls: 20, - ..LoadnextContractExecutionParams::empty() - }, - TxType::L2, - ); - - let nonce_val = vm - .vm - .state - .storage - .storage - .read_from_storage(&get_nonce_key(&account.address)); - - vm.vm.make_snapshot(); - - vm.vm.push_transaction(loadnext_transaction.clone()); - vm.vm.inspect( - MaxRecursionTracer { - max_recursion_depth: 15, - } - .into_tracer_pointer() - .into(), - VmExecutionMode::OneTx, - ); - - let nonce_val2 = vm - .vm - .state - .storage - .storage - .read_from_storage(&get_nonce_key(&account.address)); - - // The tracer stopped after the validation has passed, so nonce has already been increased - assert_eq!(nonce_val + U256::one(), nonce_val2, "nonce did not change"); - - vm.vm.rollback_to_the_latest_snapshot(); - - let nonce_val_after_rollback = vm - .vm - .state - .storage - .storage - .read_from_storage(&get_nonce_key(&account.address)); - - assert_eq!( - nonce_val, nonce_val_after_rollback, - "nonce changed after rollback" - ); - - vm.vm.push_transaction(loadnext_transaction); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!result.result.is_failed(), "transaction must not fail"); -} diff --git a/core/lib/multivm/src/versions/vm_1_4_1/tests/simple_execution.rs b/core/lib/multivm/src/versions/vm_1_4_1/tests/simple_execution.rs deleted file mode 100644 index 384bc4cf325e..000000000000 --- a/core/lib/multivm/src/versions/vm_1_4_1/tests/simple_execution.rs +++ /dev/null @@ -1,81 +0,0 @@ -use crate::{ - interface::{ExecutionResult, VmExecutionMode, VmInterface}, - vm_1_4_1::{ - tests::tester::{TxType, VmTesterBuilder}, - HistoryDisabled, - }, -}; - -#[test] -fn estimate_fee() { - let mut vm_tester = VmTesterBuilder::new(HistoryDisabled) - .with_empty_in_memory_storage() - .with_deployer() - .with_random_rich_accounts(1) - .build(); - - vm_tester.deploy_test_contract(); - let account = &mut vm_tester.rich_accounts[0]; - - let tx = account.get_test_contract_transaction( - vm_tester.test_contract.unwrap(), - false, - Default::default(), - false, - TxType::L2, - ); - - vm_tester.vm.push_transaction(tx); - - let result = vm_tester.vm.execute(VmExecutionMode::OneTx); - assert_matches!(result.result, ExecutionResult::Success { .. }); -} - -#[test] -fn simple_execute() { - let mut vm_tester = VmTesterBuilder::new(HistoryDisabled) - .with_empty_in_memory_storage() - .with_deployer() - .with_random_rich_accounts(1) - .build(); - - vm_tester.deploy_test_contract(); - - let account = &mut vm_tester.rich_accounts[0]; - - let tx1 = account.get_test_contract_transaction( - vm_tester.test_contract.unwrap(), - false, - Default::default(), - false, - TxType::L1 { serial_id: 1 }, - ); - - let tx2 = account.get_test_contract_transaction( - vm_tester.test_contract.unwrap(), - true, - Default::default(), - false, - TxType::L1 { serial_id: 1 }, - ); - - let tx3 = account.get_test_contract_transaction( - vm_tester.test_contract.unwrap(), - false, - Default::default(), - false, - TxType::L1 { serial_id: 1 }, - ); - let vm = &mut vm_tester.vm; - vm.push_transaction(tx1); - vm.push_transaction(tx2); - vm.push_transaction(tx3); - let tx = vm.execute(VmExecutionMode::OneTx); - assert_matches!(tx.result, ExecutionResult::Success { .. }); - let tx = vm.execute(VmExecutionMode::OneTx); - assert_matches!(tx.result, ExecutionResult::Revert { .. }); - let tx = vm.execute(VmExecutionMode::OneTx); - assert_matches!(tx.result, ExecutionResult::Success { .. }); - let block_tip = vm.execute(VmExecutionMode::Batch); - assert_matches!(block_tip.result, ExecutionResult::Success { .. }); -} diff --git a/core/lib/multivm/src/versions/vm_1_4_1/tests/tester/inner_state.rs b/core/lib/multivm/src/versions/vm_1_4_1/tests/tester/inner_state.rs deleted file mode 100644 index 11e9d7fd6dfe..000000000000 --- a/core/lib/multivm/src/versions/vm_1_4_1/tests/tester/inner_state.rs +++ /dev/null @@ -1,131 +0,0 @@ -use std::collections::HashMap; - -use zk_evm_1_4_1::{aux_structures::Timestamp, vm_state::VmLocalState}; -use crate::interface::storage::WriteStorage; -use zksync_types::{StorageKey, StorageValue, U256}; - -use crate::{ - vm_1_4_1::{ - old_vm::{ - event_sink::InMemoryEventSink, - history_recorder::{AppDataFrameManagerWithHistory, HistoryRecorder}, - }, - utils::logs::StorageLogQuery, - HistoryEnabled, HistoryMode, SimpleMemory, Vm, - }, - HistoryMode as CommonHistoryMode, -}; - -#[derive(Clone, Debug)] -pub(crate) struct ModifiedKeysMap(HashMap); - -// We consider hashmaps to be equal even if there is a key -// that is not present in one but has zero value in another. -impl PartialEq for ModifiedKeysMap { - fn eq(&self, other: &Self) -> bool { - for (key, value) in self.0.iter() { - if *value != other.0.get(key).cloned().unwrap_or_default() { - return false; - } - } - for (key, value) in other.0.iter() { - if *value != self.0.get(key).cloned().unwrap_or_default() { - return false; - } - } - true - } -} - -#[derive(Clone, PartialEq, Debug)] -pub(crate) struct DecommitterTestInnerState { - /// There is no way to "truly" compare the storage pointer, - /// so we just compare the modified keys. This is reasonable enough. - pub(crate) modified_storage_keys: ModifiedKeysMap, - pub(crate) known_bytecodes: HistoryRecorder>, H>, - pub(crate) decommitted_code_hashes: HistoryRecorder, HistoryEnabled>, -} - -#[derive(Clone, PartialEq, Debug)] -pub(crate) struct StorageOracleInnerState { - /// There is no way to "truly" compare the storage pointer, - /// so we just compare the modified keys. This is reasonable enough. - pub(crate) modified_storage_keys: ModifiedKeysMap, - - pub(crate) frames_stack: AppDataFrameManagerWithHistory, H>, - - pub(crate) pre_paid_changes: HistoryRecorder, H>, - pub(crate) paid_changes: HistoryRecorder, H>, - pub(crate) initial_values: HistoryRecorder, H>, - pub(crate) returned_refunds: HistoryRecorder, H>, -} - -#[derive(Clone, PartialEq, Debug)] -pub(crate) struct PrecompileProcessorTestInnerState { - pub(crate) timestamp_history: HistoryRecorder, H>, -} - -/// A struct that encapsulates the state of the VM's oracles -/// The state is to be used in tests. -#[derive(Clone, PartialEq, Debug)] -pub(crate) struct VmInstanceInnerState { - event_sink: InMemoryEventSink, - precompile_processor_state: PrecompileProcessorTestInnerState, - memory: SimpleMemory, - decommitter_state: DecommitterTestInnerState, - storage_oracle_state: StorageOracleInnerState, - local_state: VmLocalState, -} - -impl Vm { - // Dump inner state of the VM. - pub(crate) fn dump_inner_state(&self) -> VmInstanceInnerState { - let event_sink = self.state.event_sink.clone(); - let precompile_processor_state = PrecompileProcessorTestInnerState { - timestamp_history: self.state.precompiles_processor.timestamp_history.clone(), - }; - let memory = self.state.memory.clone(); - let decommitter_state = DecommitterTestInnerState { - modified_storage_keys: ModifiedKeysMap( - self.state - .decommittment_processor - .get_storage() - .borrow() - .modified_storage_keys() - .clone(), - ), - known_bytecodes: self.state.decommittment_processor.known_bytecodes.clone(), - decommitted_code_hashes: self - .state - .decommittment_processor - .get_decommitted_code_hashes_with_history() - .clone(), - }; - let storage_oracle_state = StorageOracleInnerState { - modified_storage_keys: ModifiedKeysMap( - self.state - .storage - .storage - .get_ptr() - .borrow() - .modified_storage_keys() - .clone(), - ), - frames_stack: self.state.storage.frames_stack.clone(), - pre_paid_changes: self.state.storage.pre_paid_changes.clone(), - paid_changes: self.state.storage.paid_changes.clone(), - initial_values: self.state.storage.initial_values.clone(), - returned_refunds: self.state.storage.returned_refunds.clone(), - }; - let local_state = self.state.local_state.clone(); - - VmInstanceInnerState { - event_sink, - precompile_processor_state, - memory, - decommitter_state, - storage_oracle_state, - local_state, - } - } -} diff --git a/core/lib/multivm/src/versions/vm_1_4_1/tests/tester/mod.rs b/core/lib/multivm/src/versions/vm_1_4_1/tests/tester/mod.rs deleted file mode 100644 index c3cc5d8d9803..000000000000 --- a/core/lib/multivm/src/versions/vm_1_4_1/tests/tester/mod.rs +++ /dev/null @@ -1,9 +0,0 @@ -pub(crate) use transaction_test_info::{ExpectedError, TransactionTestInfo, TxModifier}; -pub(crate) use vm_tester::{ - default_l1_batch, get_empty_storage, InMemoryStorageView, VmTester, VmTesterBuilder, -}; -pub(crate) use zksync_test_account::{Account, DeployContractsTx, TxType}; - -mod inner_state; -mod transaction_test_info; -mod vm_tester; diff --git a/core/lib/multivm/src/versions/vm_1_4_1/tests/tester/transaction_test_info.rs b/core/lib/multivm/src/versions/vm_1_4_1/tests/tester/transaction_test_info.rs deleted file mode 100644 index 443acf716763..000000000000 --- a/core/lib/multivm/src/versions/vm_1_4_1/tests/tester/transaction_test_info.rs +++ /dev/null @@ -1,217 +0,0 @@ -use zksync_types::{ExecuteTransactionCommon, Transaction}; - -use crate::{ - interface::{ - CurrentExecutionState, ExecutionResult, Halt, TxRevertReason, VmExecutionMode, - VmExecutionResultAndLogs, VmInterface, VmInterfaceHistoryEnabled, VmRevertReason, - }, - vm_1_4_1::{tests::tester::vm_tester::VmTester, HistoryEnabled}, -}; - -#[derive(Debug, Clone)] -pub(crate) enum TxModifier { - WrongSignatureLength, - WrongSignature, - WrongMagicValue, - WrongNonce, - NonceReused, -} - -#[derive(Debug, Clone)] -pub(crate) enum TxExpectedResult { - Rejected { error: ExpectedError }, - Processed { rollback: bool }, -} - -#[derive(Debug, Clone)] -pub(crate) struct TransactionTestInfo { - tx: Transaction, - result: TxExpectedResult, -} - -#[derive(Debug, Clone)] -pub(crate) struct ExpectedError { - pub(crate) revert_reason: TxRevertReason, - pub(crate) modifier: Option, -} - -impl From for ExpectedError { - fn from(value: TxModifier) -> Self { - let revert_reason = match value { - TxModifier::WrongSignatureLength => { - Halt::ValidationFailed(VmRevertReason::General { - msg: "Signature length is incorrect".to_string(), - data: vec![ - 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 29, 83, 105, 103, 110, 97, 116, 117, 114, 101, 32, - 108, 101, 110, 103, 116, 104, 32, 105, 115, 32, 105, 110, 99, 111, 114, 114, 101, 99, - 116, 0, 0, 0, - ], - }) - } - TxModifier::WrongSignature => { - Halt::ValidationFailed(VmRevertReason::General { - msg: "Account validation returned invalid magic value. Most often this means that the signature is incorrect".to_string(), - data: vec![], - }) - } - TxModifier::WrongMagicValue => { - Halt::ValidationFailed(VmRevertReason::General { - msg: "v is neither 27 nor 28".to_string(), - data: vec![ - 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 22, 118, 32, 105, 115, 32, 110, 101, 105, 116, 104, - 101, 114, 32, 50, 55, 32, 110, 111, 114, 32, 50, 56, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - ], - }) - - } - TxModifier::WrongNonce => { - Halt::ValidationFailed(VmRevertReason::General { - msg: "Incorrect nonce".to_string(), - data: vec![ - 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 15, 73, 110, 99, 111, 114, 114, 101, 99, 116, 32, 110, - 111, 110, 99, 101, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - ], - }) - } - TxModifier::NonceReused => { - Halt::ValidationFailed(VmRevertReason::General { - msg: "Reusing the same nonce twice".to_string(), - data: vec![ - 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 28, 82, 101, 117, 115, 105, 110, 103, 32, 116, 104, - 101, 32, 115, 97, 109, 101, 32, 110, 111, 110, 99, 101, 32, 116, 119, 105, 99, 101, 0, - 0, 0, 0, - ], - }) - } - }; - - ExpectedError { - revert_reason: TxRevertReason::Halt(revert_reason), - modifier: Some(value), - } - } -} - -impl TransactionTestInfo { - pub(crate) fn new_rejected( - mut transaction: Transaction, - expected_error: ExpectedError, - ) -> Self { - transaction.common_data = match transaction.common_data { - ExecuteTransactionCommon::L2(mut data) => { - if let Some(modifier) = &expected_error.modifier { - match modifier { - TxModifier::WrongSignatureLength => { - data.signature = data.signature[..data.signature.len() - 20].to_vec() - } - TxModifier::WrongSignature => data.signature = vec![27u8; 65], - TxModifier::WrongMagicValue => data.signature = vec![1u8; 65], - TxModifier::WrongNonce => { - // Do not need to modify signature for nonce error - } - TxModifier::NonceReused => { - // Do not need to modify signature for nonce error - } - } - } - ExecuteTransactionCommon::L2(data) - } - _ => panic!("L1 transactions are not supported"), - }; - - Self { - tx: transaction, - result: TxExpectedResult::Rejected { - error: expected_error, - }, - } - } - - pub(crate) fn new_processed(transaction: Transaction, should_be_rollbacked: bool) -> Self { - Self { - tx: transaction, - result: TxExpectedResult::Processed { - rollback: should_be_rollbacked, - }, - } - } - - fn verify_result(&self, result: &VmExecutionResultAndLogs) { - match &self.result { - TxExpectedResult::Rejected { error } => match &result.result { - ExecutionResult::Success { .. } => { - panic!("Transaction should be reverted {:?}", self.tx.nonce()) - } - ExecutionResult::Revert { output } => match &error.revert_reason { - TxRevertReason::TxReverted(expected) => { - assert_eq!(output, expected) - } - _ => { - panic!("Error types mismatch"); - } - }, - ExecutionResult::Halt { reason } => match &error.revert_reason { - TxRevertReason::Halt(expected) => { - assert_eq!(reason, expected) - } - _ => { - panic!("Error types mismatch"); - } - }, - }, - TxExpectedResult::Processed { .. } => { - assert!(!result.result.is_failed()); - } - } - } - - fn should_rollback(&self) -> bool { - match &self.result { - TxExpectedResult::Rejected { .. } => true, - TxExpectedResult::Processed { rollback } => *rollback, - } - } -} - -impl VmTester { - pub(crate) fn execute_and_verify_txs( - &mut self, - txs: &[TransactionTestInfo], - ) -> CurrentExecutionState { - for tx_test_info in txs { - self.execute_tx_and_verify(tx_test_info.clone()); - } - self.vm.execute(VmExecutionMode::Batch); - let mut state = self.vm.get_current_execution_state(); - state.used_contract_hashes.sort(); - state - } - - pub(crate) fn execute_tx_and_verify( - &mut self, - tx_test_info: TransactionTestInfo, - ) -> VmExecutionResultAndLogs { - let inner_state_before = self.vm.dump_inner_state(); - self.vm.make_snapshot(); - self.vm.push_transaction(tx_test_info.tx.clone()); - let result = self.vm.execute(VmExecutionMode::OneTx); - tx_test_info.verify_result(&result); - if tx_test_info.should_rollback() { - self.vm.rollback_to_the_latest_snapshot(); - let inner_state_after = self.vm.dump_inner_state(); - assert_eq!( - inner_state_before, inner_state_after, - "Inner state before and after rollback should be equal" - ); - } - result - } -} diff --git a/core/lib/multivm/src/versions/vm_1_4_1/tests/tester/vm_tester.rs b/core/lib/multivm/src/versions/vm_1_4_1/tests/tester/vm_tester.rs deleted file mode 100644 index 24bd0b4d0bcc..000000000000 --- a/core/lib/multivm/src/versions/vm_1_4_1/tests/tester/vm_tester.rs +++ /dev/null @@ -1,298 +0,0 @@ -use std::marker::PhantomData; - -use zksync_contracts::BaseSystemContracts; -use crate::interface::storage::{InMemoryStorage, StoragePtr, StorageView, WriteStorage}; -use zksync_types::{ - block::MiniblockHasher, - fee_model::BatchFeeInput, - get_code_key, get_is_account_key, - helpers::unix_timestamp_ms, - utils::{deployed_address_create, storage_key_for_eth_balance}, - Address, L1BatchNumber, L2ChainId, MiniblockNumber, Nonce, ProtocolVersionId, U256, -}; -use zksync_utils::{bytecode::hash_bytecode, u256_to_h256}; - -use crate::{ - interface::{ - L1BatchEnv, L2Block, L2BlockEnv, SystemEnv, TxExecutionMode, VmExecutionMode, VmInterface, - }, - vm_1_4_1::{ - constants::BLOCK_GAS_LIMIT, - tests::{ - tester::{Account, TxType}, - utils::read_test_contract, - }, - utils::l2_blocks::load_last_l2_block, - Vm, - }, - HistoryMode, -}; - -pub(crate) type InMemoryStorageView = StorageView; - -pub(crate) struct VmTester { - pub(crate) vm: Vm, - pub(crate) storage: StoragePtr, - pub(crate) fee_account: Address, - pub(crate) deployer: Option, - pub(crate) test_contract: Option
, - pub(crate) rich_accounts: Vec, - pub(crate) custom_contracts: Vec, - _phantom: std::marker::PhantomData, -} - -impl VmTester { - pub(crate) fn deploy_test_contract(&mut self) { - let contract = read_test_contract(); - let tx = self - .deployer - .as_mut() - .expect("You have to initialize builder with deployer") - .get_deploy_tx(&contract, None, TxType::L2) - .tx; - let nonce = tx.nonce().unwrap().0.into(); - self.vm.push_transaction(tx); - self.vm.execute(VmExecutionMode::OneTx); - let deployed_address = - deployed_address_create(self.deployer.as_ref().unwrap().address, nonce); - self.test_contract = Some(deployed_address); - } - - pub(crate) fn reset_with_empty_storage(&mut self) { - self.storage = StorageView::new(get_empty_storage()).to_rc_ptr(); - self.reset_state(false); - } - - /// Reset the state of the VM to the initial state. - /// If `use_latest_l2_block` is true, then the VM will use the latest L2 block from storage, - /// otherwise it will use the first L2 block of l1 batch env - pub(crate) fn reset_state(&mut self, use_latest_l2_block: bool) { - for account in self.rich_accounts.iter_mut() { - account.nonce = Nonce(0); - make_account_rich(self.storage.clone(), account); - } - if let Some(deployer) = &self.deployer { - make_account_rich(self.storage.clone(), deployer); - } - - if !self.custom_contracts.is_empty() { - println!("Inserting custom contracts is not yet supported") - // `insert_contracts(&mut self.storage, &self.custom_contracts);` - } - - let mut l1_batch = self.vm.batch_env.clone(); - if use_latest_l2_block { - let last_l2_block = load_last_l2_block(self.storage.clone()).unwrap_or(L2Block { - number: 0, - timestamp: 0, - hash: MiniblockHasher::legacy_hash(MiniblockNumber(0)), - }); - l1_batch.first_l2_block = L2BlockEnv { - number: last_l2_block.number + 1, - timestamp: std::cmp::max(last_l2_block.timestamp + 1, l1_batch.timestamp), - prev_block_hash: last_l2_block.hash, - max_virtual_blocks_to_create: 1, - }; - } - - let vm = Vm::new(l1_batch, self.vm.system_env.clone(), self.storage.clone()); - - if self.test_contract.is_some() { - self.deploy_test_contract(); - } - - self.vm = vm; - } -} - -pub(crate) type ContractsToDeploy = (Vec, Address, bool); - -pub(crate) struct VmTesterBuilder { - storage: Option, - l1_batch_env: Option, - system_env: SystemEnv, - deployer: Option, - rich_accounts: Vec, - custom_contracts: Vec, - _phantom: PhantomData, -} - -impl Clone for VmTesterBuilder { - fn clone(&self) -> Self { - Self { - storage: None, - l1_batch_env: self.l1_batch_env.clone(), - system_env: self.system_env.clone(), - deployer: self.deployer.clone(), - rich_accounts: self.rich_accounts.clone(), - custom_contracts: self.custom_contracts.clone(), - _phantom: PhantomData, - } - } -} - -#[allow(dead_code)] -impl VmTesterBuilder { - pub(crate) fn new(_: H) -> Self { - Self { - storage: None, - l1_batch_env: None, - system_env: SystemEnv { - zk_porter_available: false, - version: ProtocolVersionId::latest(), - base_system_smart_contracts: BaseSystemContracts::playground(), - gas_limit: BLOCK_GAS_LIMIT, - execution_mode: TxExecutionMode::VerifyExecute, - default_validation_computational_gas_limit: BLOCK_GAS_LIMIT, - chain_id: L2ChainId::from(270), - }, - deployer: None, - rich_accounts: vec![], - custom_contracts: vec![], - _phantom: PhantomData, - } - } - - pub(crate) fn with_l1_batch_env(mut self, l1_batch_env: L1BatchEnv) -> Self { - self.l1_batch_env = Some(l1_batch_env); - self - } - - pub(crate) fn with_system_env(mut self, system_env: SystemEnv) -> Self { - self.system_env = system_env; - self - } - - pub(crate) fn with_storage(mut self, storage: InMemoryStorage) -> Self { - self.storage = Some(storage); - self - } - - pub(crate) fn with_base_system_smart_contracts( - mut self, - base_system_smart_contracts: BaseSystemContracts, - ) -> Self { - self.system_env.base_system_smart_contracts = base_system_smart_contracts; - self - } - - pub(crate) fn with_gas_limit(mut self, gas_limit: u32) -> Self { - self.system_env.gas_limit = gas_limit; - self - } - - pub(crate) fn with_execution_mode(mut self, execution_mode: TxExecutionMode) -> Self { - self.system_env.execution_mode = execution_mode; - self - } - - pub(crate) fn with_empty_in_memory_storage(mut self) -> Self { - self.storage = Some(get_empty_storage()); - self - } - - pub(crate) fn with_random_rich_accounts(mut self, number: u32) -> Self { - for _ in 0..number { - let account = Account::random(); - self.rich_accounts.push(account); - } - self - } - - pub(crate) fn with_rich_accounts(mut self, accounts: Vec) -> Self { - self.rich_accounts.extend(accounts); - self - } - - pub(crate) fn with_deployer(mut self) -> Self { - let deployer = Account::random(); - self.deployer = Some(deployer); - self - } - - pub(crate) fn with_custom_contracts(mut self, contracts: Vec) -> Self { - self.custom_contracts = contracts; - self - } - - pub(crate) fn build(self) -> VmTester { - let l1_batch_env = self - .l1_batch_env - .unwrap_or_else(|| default_l1_batch(L1BatchNumber(1))); - - let mut raw_storage = self.storage.unwrap_or_else(get_empty_storage); - insert_contracts(&mut raw_storage, &self.custom_contracts); - let storage_ptr = StorageView::new(raw_storage).to_rc_ptr(); - for account in self.rich_accounts.iter() { - make_account_rich(storage_ptr.clone(), account); - } - if let Some(deployer) = &self.deployer { - make_account_rich(storage_ptr.clone(), deployer); - } - let fee_account = l1_batch_env.fee_account; - - let vm = Vm::new(l1_batch_env, self.system_env, storage_ptr.clone()); - - VmTester { - vm, - storage: storage_ptr, - fee_account, - deployer: self.deployer, - test_contract: None, - rich_accounts: self.rich_accounts.clone(), - custom_contracts: self.custom_contracts.clone(), - _phantom: PhantomData, - } - } -} - -pub(crate) fn default_l1_batch(number: L1BatchNumber) -> L1BatchEnv { - let timestamp = unix_timestamp_ms(); - L1BatchEnv { - previous_batch_hash: None, - number, - timestamp, - fee_input: BatchFeeInput::l1_pegged( - 50_000_000_000, // 50 gwei - 250_000_000, // 0.25 gwei - ), - fee_account: Address::random(), - enforced_base_fee: None, - first_l2_block: L2BlockEnv { - number: 1, - timestamp, - prev_block_hash: MiniblockHasher::legacy_hash(MiniblockNumber(0)), - max_virtual_blocks_to_create: 100, - }, - } -} - -pub(crate) fn make_account_rich(storage: StoragePtr, account: &Account) { - let key = storage_key_for_eth_balance(&account.address); - storage - .as_ref() - .borrow_mut() - .set_value(key, u256_to_h256(U256::from(10u64.pow(19)))); -} - -pub(crate) fn get_empty_storage() -> InMemoryStorage { - InMemoryStorage::with_system_contracts(hash_bytecode) -} - -// Inserts the contracts into the test environment, bypassing the -// deployer system contract. Besides the reference to storage -// it accepts a `contracts` tuple of information about the contract -// and whether or not it is an account. -fn insert_contracts(raw_storage: &mut InMemoryStorage, contracts: &[ContractsToDeploy]) { - for (contract, address, is_account) in contracts { - let deployer_code_key = get_code_key(address); - raw_storage.set_value(deployer_code_key, hash_bytecode(contract)); - - if *is_account { - let is_account_key = get_is_account_key(address); - raw_storage.set_value(is_account_key, u256_to_h256(1_u32.into())); - } - - raw_storage.store_factory_dep(hash_bytecode(contract), contract.clone()); - } -} diff --git a/core/lib/multivm/src/versions/vm_1_4_1/tests/tracing_execution_error.rs b/core/lib/multivm/src/versions/vm_1_4_1/tests/tracing_execution_error.rs deleted file mode 100644 index 02c7590c1bea..000000000000 --- a/core/lib/multivm/src/versions/vm_1_4_1/tests/tracing_execution_error.rs +++ /dev/null @@ -1,54 +0,0 @@ -use zksync_types::{Execute, H160}; - -use crate::{ - interface::{TxExecutionMode, TxRevertReason, VmRevertReason}, - vm_1_4_1::{ - tests::{ - tester::{ExpectedError, TransactionTestInfo, VmTesterBuilder}, - utils::{get_execute_error_calldata, read_error_contract, BASE_SYSTEM_CONTRACTS}, - }, - HistoryEnabled, - }, -}; - -#[test] -fn test_tracing_of_execution_errors() { - let contract_address = H160::random(); - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_base_system_smart_contracts(BASE_SYSTEM_CONTRACTS.clone()) - .with_custom_contracts(vec![(read_error_contract(), contract_address, false)]) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_deployer() - .with_random_rich_accounts(1) - .build(); - - let account = &mut vm.rich_accounts[0]; - - let tx = account.get_l2_tx_for_execute( - Execute { - contract_address, - calldata: get_execute_error_calldata(), - value: Default::default(), - factory_deps: Some(vec![]), - }, - None, - ); - - vm.execute_tx_and_verify(TransactionTestInfo::new_rejected( - tx, - ExpectedError { - revert_reason: TxRevertReason::TxReverted(VmRevertReason::General { - msg: "short".to_string(), - data: vec![ - 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 5, 115, 104, 111, 114, 116, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, - ], - }), - modifier: None, - }, - )); -} diff --git a/core/lib/multivm/src/versions/vm_1_4_1/tests/upgrade.rs b/core/lib/multivm/src/versions/vm_1_4_1/tests/upgrade.rs deleted file mode 100644 index af3701d919fa..000000000000 --- a/core/lib/multivm/src/versions/vm_1_4_1/tests/upgrade.rs +++ /dev/null @@ -1,355 +0,0 @@ -use zk_evm_1_4_1::aux_structures::Timestamp; -use zksync_contracts::{deployer_contract, load_sys_contract, read_bytecode}; -use crate::interface::storage::WriteStorage; -use zksync_test_account::TxType; -use zksync_types::{ - ethabi::{Contract, Token}, - get_code_key, get_known_code_key, - protocol_version::ProtocolUpgradeTxCommonData, - Address, Execute, ExecuteTransactionCommon, Transaction, COMPLEX_UPGRADER_ADDRESS, - CONTRACT_DEPLOYER_ADDRESS, CONTRACT_FORCE_DEPLOYER_ADDRESS, H160, H256, - REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_BYTE, U256, -}; -use zksync_utils::{bytecode::hash_bytecode, bytes_to_be_words, h256_to_u256, u256_to_h256}; - -use super::utils::{get_complex_upgrade_abi, read_test_contract}; -use crate::{ - interface::{ - ExecutionResult, Halt, TxExecutionMode, VmExecutionMode, VmInterface, - VmInterfaceHistoryEnabled, - }, - vm_1_4_1::{ - tests::{ - tester::VmTesterBuilder, - utils::{read_complex_upgrade, verify_required_storage}, - }, - HistoryEnabled, - }, -}; - -/// In this test we ensure that the requirements for protocol upgrade transactions are enforced by the bootloader: -/// - This transaction must be the only one in block -/// - If present, this transaction must be the first one in block -#[test] -fn test_protocol_upgrade_is_first() { - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let bytecode_hash = hash_bytecode(&read_test_contract()); - vm.vm - .storage - .borrow_mut() - .set_value(get_known_code_key(&bytecode_hash), u256_to_h256(1.into())); - - // Here we just use some random transaction of protocol upgrade type: - let protocol_upgrade_transaction = get_forced_deploy_tx(&[ForceDeployment { - // The bytecode hash to put on an address - bytecode_hash, - // The address on which to deploy the bytecode hash to - address: H160::random(), - // Whether to run the constructor on the force deployment - call_constructor: false, - // The value with which to initialize a contract - value: U256::zero(), - // The constructor calldata - input: vec![], - }]); - - // Another random upgrade transaction - let another_protocol_upgrade_transaction = get_forced_deploy_tx(&[ForceDeployment { - // The bytecode hash to put on an address - bytecode_hash, - // The address on which to deploy the bytecode hash to - address: H160::random(), - // Whether to run the constructor on the force deployment - call_constructor: false, - // The value with which to initialize a contract - value: U256::zero(), - // The constructor calldata - input: vec![], - }]); - - let normal_l1_transaction = vm.rich_accounts[0] - .get_deploy_tx(&read_test_contract(), None, TxType::L1 { serial_id: 0 }) - .tx; - - let expected_error = - Halt::UnexpectedVMBehavior("Assertion error: Protocol upgrade tx not first".to_string()); - - vm.vm.make_snapshot(); - // Test 1: there must be only one system transaction in block - vm.vm.push_transaction(protocol_upgrade_transaction.clone()); - vm.vm.push_transaction(normal_l1_transaction.clone()); - vm.vm.push_transaction(another_protocol_upgrade_transaction); - - vm.vm.execute(VmExecutionMode::OneTx); - vm.vm.execute(VmExecutionMode::OneTx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert_eq!( - result.result, - ExecutionResult::Halt { - reason: expected_error.clone() - } - ); - - // Test 2: the protocol upgrade tx must be the first one in block - vm.vm.rollback_to_the_latest_snapshot(); - vm.vm.make_snapshot(); - vm.vm.push_transaction(normal_l1_transaction.clone()); - vm.vm.push_transaction(protocol_upgrade_transaction.clone()); - - vm.vm.execute(VmExecutionMode::OneTx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert_eq!( - result.result, - ExecutionResult::Halt { - reason: expected_error - } - ); - - vm.vm.rollback_to_the_latest_snapshot(); - vm.vm.make_snapshot(); - vm.vm.push_transaction(protocol_upgrade_transaction); - vm.vm.push_transaction(normal_l1_transaction); - - vm.vm.execute(VmExecutionMode::OneTx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!result.result.is_failed()); -} - -/// In this test we try to test how force deployments could be done via protocol upgrade transactions. -#[test] -fn test_force_deploy_upgrade() { - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let storage_view = vm.storage.clone(); - let bytecode_hash = hash_bytecode(&read_test_contract()); - - let known_code_key = get_known_code_key(&bytecode_hash); - // It is generally expected that all the keys will be set as known prior to the protocol upgrade. - storage_view - .borrow_mut() - .set_value(known_code_key, u256_to_h256(1.into())); - drop(storage_view); - - let address_to_deploy = H160::random(); - // Here we just use some random transaction of protocol upgrade type: - let transaction = get_forced_deploy_tx(&[ForceDeployment { - // The bytecode hash to put on an address - bytecode_hash, - // The address on which to deploy the bytecode hash to - address: address_to_deploy, - // Whether to run the constructor on the force deployment - call_constructor: false, - // The value with which to initialize a contract - value: U256::zero(), - // The constructor calldata - input: vec![], - }]); - - vm.vm.push_transaction(transaction); - - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!( - !result.result.is_failed(), - "The force upgrade was not successful" - ); - - let expected_slots = vec![(bytecode_hash, get_code_key(&address_to_deploy))]; - - // Verify that the bytecode has been set correctly - verify_required_storage(&vm.vm.state, expected_slots); -} - -/// Here we show how the work with the complex upgrader could be done -#[test] -fn test_complex_upgrader() { - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let storage_view = vm.storage.clone(); - - let bytecode_hash = hash_bytecode(&read_complex_upgrade()); - let msg_sender_test_hash = hash_bytecode(&read_msg_sender_test()); - - // Let's assume that the bytecode for the implementation of the complex upgrade - // is already deployed in some address in user space - let upgrade_impl = H160::random(); - let account_code_key = get_code_key(&upgrade_impl); - - storage_view - .borrow_mut() - .set_value(get_known_code_key(&bytecode_hash), u256_to_h256(1.into())); - storage_view.borrow_mut().set_value( - get_known_code_key(&msg_sender_test_hash), - u256_to_h256(1.into()), - ); - storage_view - .borrow_mut() - .set_value(account_code_key, bytecode_hash); - drop(storage_view); - - vm.vm.state.decommittment_processor.populate( - vec![ - ( - h256_to_u256(bytecode_hash), - bytes_to_be_words(read_complex_upgrade()), - ), - ( - h256_to_u256(msg_sender_test_hash), - bytes_to_be_words(read_msg_sender_test()), - ), - ], - Timestamp(0), - ); - - let address_to_deploy1 = H160::random(); - let address_to_deploy2 = H160::random(); - - let transaction = get_complex_upgrade_tx( - upgrade_impl, - address_to_deploy1, - address_to_deploy2, - bytecode_hash, - ); - - vm.vm.push_transaction(transaction); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!( - !result.result.is_failed(), - "The force upgrade was not successful" - ); - - let expected_slots = vec![ - (bytecode_hash, get_code_key(&address_to_deploy1)), - (bytecode_hash, get_code_key(&address_to_deploy2)), - ]; - - // Verify that the bytecode has been set correctly - verify_required_storage(&vm.vm.state, expected_slots); -} - -#[derive(Debug, Clone)] -struct ForceDeployment { - // The bytecode hash to put on an address - bytecode_hash: H256, - // The address on which to deploy the bytecode hash to - address: Address, - // Whether to run the constructor on the force deployment - call_constructor: bool, - // The value with which to initialize a contract - value: U256, - // The constructor calldata - input: Vec, -} - -fn get_forced_deploy_tx(deployment: &[ForceDeployment]) -> Transaction { - let deployer = deployer_contract(); - let contract_function = deployer.function("forceDeployOnAddresses").unwrap(); - - let encoded_deployments: Vec<_> = deployment - .iter() - .map(|deployment| { - Token::Tuple(vec![ - Token::FixedBytes(deployment.bytecode_hash.as_bytes().to_vec()), - Token::Address(deployment.address), - Token::Bool(deployment.call_constructor), - Token::Uint(deployment.value), - Token::Bytes(deployment.input.clone()), - ]) - }) - .collect(); - - let params = [Token::Array(encoded_deployments)]; - - let calldata = contract_function - .encode_input(¶ms) - .expect("failed to encode parameters"); - - let execute = Execute { - contract_address: CONTRACT_DEPLOYER_ADDRESS, - calldata, - factory_deps: None, - value: U256::zero(), - }; - - Transaction { - common_data: ExecuteTransactionCommon::ProtocolUpgrade(ProtocolUpgradeTxCommonData { - sender: CONTRACT_FORCE_DEPLOYER_ADDRESS, - gas_limit: U256::from(200_000_000u32), - gas_per_pubdata_limit: REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_BYTE.into(), - ..Default::default() - }), - execute, - received_timestamp_ms: 0, - raw_bytes: None, - } -} - -// Returns the transaction that performs a complex protocol upgrade. -// The first param is the address of the implementation of the complex upgrade -// in user-space, while the next 3 params are params of the implementation itself -// For the explanation for the parameters, please refer to: -// etc/contracts-test-data/complex-upgrade/complex-upgrade.sol -fn get_complex_upgrade_tx( - implementation_address: Address, - address1: Address, - address2: Address, - bytecode_hash: H256, -) -> Transaction { - let impl_contract = get_complex_upgrade_abi(); - let impl_function = impl_contract.function("someComplexUpgrade").unwrap(); - let impl_calldata = impl_function - .encode_input(&[ - Token::Address(address1), - Token::Address(address2), - Token::FixedBytes(bytecode_hash.as_bytes().to_vec()), - ]) - .unwrap(); - - let complex_upgrader = get_complex_upgrader_abi(); - let upgrade_function = complex_upgrader.function("upgrade").unwrap(); - let complex_upgrader_calldata = upgrade_function - .encode_input(&[ - Token::Address(implementation_address), - Token::Bytes(impl_calldata), - ]) - .unwrap(); - - let execute = Execute { - contract_address: COMPLEX_UPGRADER_ADDRESS, - calldata: complex_upgrader_calldata, - factory_deps: None, - value: U256::zero(), - }; - - Transaction { - common_data: ExecuteTransactionCommon::ProtocolUpgrade(ProtocolUpgradeTxCommonData { - sender: CONTRACT_FORCE_DEPLOYER_ADDRESS, - gas_limit: U256::from(200_000_000u32), - gas_per_pubdata_limit: REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_BYTE.into(), - ..Default::default() - }), - execute, - received_timestamp_ms: 0, - raw_bytes: None, - } -} - -fn read_msg_sender_test() -> Vec { - read_bytecode("etc/contracts-test-data/artifacts-zk/contracts/complex-upgrade/msg-sender.sol/MsgSenderTest.json") -} - -fn get_complex_upgrader_abi() -> Contract { - load_sys_contract("ComplexUpgrader") -} diff --git a/core/lib/multivm/src/versions/vm_1_4_1/tests/utils.rs b/core/lib/multivm/src/versions/vm_1_4_1/tests/utils.rs deleted file mode 100644 index da69c107a20b..000000000000 --- a/core/lib/multivm/src/versions/vm_1_4_1/tests/utils.rs +++ /dev/null @@ -1,121 +0,0 @@ -use ethabi::Contract; -use once_cell::sync::Lazy; -use zksync_contracts::{ - load_contract, read_bytecode, read_zbin_bytecode, BaseSystemContracts, SystemContractCode, -}; -use crate::interface::storage::{StoragePtr, WriteStorage}; -use zksync_types::{ - utils::storage_key_for_standard_token_balance, AccountTreeId, Address, StorageKey, H256, U256, -}; -use zksync_utils::{bytecode::hash_bytecode, bytes_to_be_words, h256_to_u256, u256_to_h256}; - -use crate::vm_1_4_1::{ - tests::tester::InMemoryStorageView, types::internals::ZkSyncVmState, HistoryMode, -}; - -pub(crate) static BASE_SYSTEM_CONTRACTS: Lazy = - Lazy::new(BaseSystemContracts::load_from_disk); - -// Probably make it a part of vm tester -pub(crate) fn verify_required_storage( - state: &ZkSyncVmState, - required_values: Vec<(H256, StorageKey)>, -) { - for (required_value, key) in required_values { - let current_value = state.storage.storage.read_from_storage(&key); - - assert_eq!( - u256_to_h256(current_value), - required_value, - "Invalid value at key {key:?}" - ); - } -} - -pub(crate) fn verify_required_memory( - state: &ZkSyncVmState, - required_values: Vec<(U256, u32, u32)>, -) { - for (required_value, memory_page, cell) in required_values { - let current_value = state - .memory - .read_slot(memory_page as usize, cell as usize) - .value; - assert_eq!(current_value, required_value); - } -} - -pub(crate) fn get_balance( - token_id: AccountTreeId, - account: &Address, - main_storage: StoragePtr, -) -> U256 { - let key = storage_key_for_standard_token_balance(token_id, account); - h256_to_u256(main_storage.borrow_mut().read_value(&key)) -} - -pub(crate) fn read_test_contract() -> Vec { - read_bytecode("etc/contracts-test-data/artifacts-zk/contracts/counter/counter.sol/Counter.json") -} - -pub(crate) fn get_bootloader(test: &str) -> SystemContractCode { - let bootloader_code = read_zbin_bytecode(format!( - "contracts/system-contracts/bootloader/tests/artifacts/{}.yul.zbin", - test - )); - - let bootloader_hash = hash_bytecode(&bootloader_code); - SystemContractCode { - code: bytes_to_be_words(bootloader_code), - hash: bootloader_hash, - } -} - -pub(crate) fn read_nonce_holder_tester() -> Vec { - read_bytecode("etc/contracts-test-data/artifacts-zk/contracts/custom-account/nonce-holder-test.sol/NonceHolderTest.json") -} - -pub(crate) fn read_error_contract() -> Vec { - read_bytecode( - "etc/contracts-test-data/artifacts-zk/contracts/error/error.sol/SimpleRequire.json", - ) -} - -pub(crate) fn get_execute_error_calldata() -> Vec { - let test_contract = load_contract( - "etc/contracts-test-data/artifacts-zk/contracts/error/error.sol/SimpleRequire.json", - ); - - let function = test_contract.function("require_short").unwrap(); - - function - .encode_input(&[]) - .expect("failed to encode parameters") -} - -pub(crate) fn read_many_owners_custom_account_contract() -> (Vec, Contract) { - let path = "etc/contracts-test-data/artifacts-zk/contracts/custom-account/many-owners-custom-account.sol/ManyOwnersCustomAccount.json"; - (read_bytecode(path), load_contract(path)) -} - -pub(crate) fn read_max_depth_contract() -> Vec { - read_zbin_bytecode( - "core/tests/ts-integration/contracts/zkasm/artifacts/deep_stak.zkasm/deep_stak.zkasm.zbin", - ) -} - -pub(crate) fn read_precompiles_contract() -> Vec { - read_bytecode( - "etc/contracts-test-data/artifacts-zk/contracts/precompiles/precompiles.sol/Precompiles.json", - ) -} - -pub(crate) fn read_complex_upgrade() -> Vec { - read_bytecode("etc/contracts-test-data/artifacts-zk/contracts/complex-upgrade/complex-upgrade.sol/ComplexUpgrade.json") -} - -pub(crate) fn get_complex_upgrade_abi() -> Contract { - load_contract( - "etc/contracts-test-data/artifacts-zk/contracts/complex-upgrade/complex-upgrade.sol/ComplexUpgrade.json" - ) -} diff --git a/core/lib/multivm/src/versions/vm_1_4_1/vm.rs b/core/lib/multivm/src/versions/vm_1_4_1/vm.rs index 68c8e92a03a1..4122ee94e66a 100644 --- a/core/lib/multivm/src/versions/vm_1_4_1/vm.rs +++ b/core/lib/multivm/src/versions/vm_1_4_1/vm.rs @@ -11,7 +11,6 @@ use crate::{ BytecodeCompressionError, BytecodeCompressionResult, CurrentExecutionState, FinishedL1Batch, L1BatchEnv, L2BlockEnv, SystemEnv, VmExecutionMode, VmExecutionResultAndLogs, VmFactory, VmInterface, VmInterfaceHistoryEnabled, - VmMemoryMetrics, }, utils::events::extract_l2tol1logs_from_l1_messenger, vm_1_4_1::{ @@ -124,10 +123,6 @@ impl VmInterface for Vm { } } - fn record_vm_memory_metrics(&self) -> VmMemoryMetrics { - self.record_vm_memory_metrics_inner() - } - fn finish_batch(&mut self) -> FinishedL1Batch { let result = self.inspect(&mut TracerDispatcher::default(), VmExecutionMode::Batch); let execution_state = self.get_current_execution_state(); diff --git a/core/lib/multivm/src/versions/vm_1_4_2/bootloader_state/utils.rs b/core/lib/multivm/src/versions/vm_1_4_2/bootloader_state/utils.rs index 600ab83bf484..182f6eff4414 100644 --- a/core/lib/multivm/src/versions/vm_1_4_2/bootloader_state/utils.rs +++ b/core/lib/multivm/src/versions/vm_1_4_2/bootloader_state/utils.rs @@ -167,8 +167,8 @@ pub(super) fn assemble_tx_meta(execution_mode: TxExecutionMode, execute_tx: bool // Set 0 byte (execution mode) output[0] = match execution_mode { TxExecutionMode::VerifyExecute => 0x00, - TxExecutionMode::EstimateFee { .. } => 0x00, - TxExecutionMode::EthCall { .. } => 0x02, + TxExecutionMode::EstimateFee => 0x00, + TxExecutionMode::EthCall => 0x02, }; // Set 31 byte (marker for tx execution) diff --git a/core/lib/multivm/src/versions/vm_1_4_2/implementation/execution.rs b/core/lib/multivm/src/versions/vm_1_4_2/implementation/execution.rs index d42d18809331..f6e49cd8b149 100644 --- a/core/lib/multivm/src/versions/vm_1_4_2/implementation/execution.rs +++ b/core/lib/multivm/src/versions/vm_1_4_2/implementation/execution.rs @@ -96,6 +96,7 @@ impl Vm { logs, statistics, refunds, + new_known_factory_deps: None, }; (stop_reason, result) diff --git a/core/lib/multivm/src/versions/vm_1_4_2/implementation/statistics.rs b/core/lib/multivm/src/versions/vm_1_4_2/implementation/statistics.rs index 92a2eaa650c1..754b8476182f 100644 --- a/core/lib/multivm/src/versions/vm_1_4_2/implementation/statistics.rs +++ b/core/lib/multivm/src/versions/vm_1_4_2/implementation/statistics.rs @@ -57,7 +57,7 @@ impl Vm { } /// Returns the info about all oracles' sizes. - pub(crate) fn record_vm_memory_metrics_inner(&self) -> VmMemoryMetrics { + pub(crate) fn record_vm_memory_metrics(&self) -> VmMemoryMetrics { VmMemoryMetrics { event_sink_inner: self.state.event_sink.get_size(), event_sink_history: self.state.event_sink.get_history_size(), diff --git a/core/lib/multivm/src/versions/vm_1_4_2/tests/block_tip.rs b/core/lib/multivm/src/versions/vm_1_4_2/tests/block_tip.rs deleted file mode 100644 index 8578b73ccfa9..000000000000 --- a/core/lib/multivm/src/versions/vm_1_4_2/tests/block_tip.rs +++ /dev/null @@ -1,399 +0,0 @@ -use std::borrow::BorrowMut; - -use ethabi::Token; -use zk_evm_1_4_1::aux_structures::Timestamp; -use zksync_contracts::load_sys_contract; -use zksync_system_constants::{ - CONTRACT_FORCE_DEPLOYER_ADDRESS, KNOWN_CODES_STORAGE_ADDRESS, L1_MESSENGER_ADDRESS, -}; -use zksync_types::{ - commitment::SerializeCommitment, get_code_key, l2_to_l1_log::L2ToL1Log, - writes::StateDiffRecord, Address, Execute, H256, U256, -}; -use zksync_utils::{bytecode::hash_bytecode, bytes_to_be_words, h256_to_u256, u256_to_h256}; - -use super::utils::{get_complex_upgrade_abi, read_complex_upgrade}; -use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface}, - vm_1_4_2::{ - constants::{ - BOOTLOADER_BATCH_TIP_CIRCUIT_STATISTICS_OVERHEAD, - BOOTLOADER_BATCH_TIP_METRICS_SIZE_OVERHEAD, BOOTLOADER_BATCH_TIP_OVERHEAD, - MAX_VM_PUBDATA_PER_BATCH, - }, - tests::tester::{get_empty_storage, InMemoryStorageView, VmTesterBuilder}, - tracers::PubdataTracer, - TracerDispatcher, - }, -}; - -#[derive(Debug, Clone, Default)] -struct L1MessengerTestData { - l2_to_l1_logs: usize, - messages: Vec>, - bytecodes: Vec>, - state_diffs: Vec, -} - -struct MimicCallInfo { - to: Address, - who_to_mimic: Address, - data: Vec, -} - -fn populate_mimic_calls(data: L1MessengerTestData) -> Vec { - let complex_upgrade = get_complex_upgrade_abi(); - let l1_messenger = load_sys_contract("L1Messenger"); - - let logs_mimic_calls = (0..data.l2_to_l1_logs).map(|_| MimicCallInfo { - to: L1_MESSENGER_ADDRESS, - who_to_mimic: KNOWN_CODES_STORAGE_ADDRESS, - data: l1_messenger - .function("sendL2ToL1Log") - .unwrap() - .encode_input(&[ - Token::Bool(false), - Token::FixedBytes(H256::random().0.to_vec()), - Token::FixedBytes(H256::random().0.to_vec()), - ]) - .unwrap(), - }); - let messages_mimic_calls = data.messages.iter().map(|message| MimicCallInfo { - to: L1_MESSENGER_ADDRESS, - who_to_mimic: KNOWN_CODES_STORAGE_ADDRESS, - data: l1_messenger - .function("sendToL1") - .unwrap() - .encode_input(&[Token::Bytes(message.clone())]) - .unwrap(), - }); - let bytecodes_mimic_calls = data.bytecodes.iter().map(|bytecode| MimicCallInfo { - to: L1_MESSENGER_ADDRESS, - who_to_mimic: KNOWN_CODES_STORAGE_ADDRESS, - data: l1_messenger - .function("requestBytecodeL1Publication") - .unwrap() - .encode_input(&[Token::FixedBytes(hash_bytecode(bytecode).0.to_vec())]) - .unwrap(), - }); - - let encoded_calls = logs_mimic_calls - .chain(messages_mimic_calls) - .chain(bytecodes_mimic_calls) - .map(|call| { - Token::Tuple(vec![ - Token::Address(call.to), - Token::Address(call.who_to_mimic), - Token::Bytes(call.data), - ]) - }) - .collect::>(); - - complex_upgrade - .function("mimicCalls") - .unwrap() - .encode_input(&[Token::Array(encoded_calls)]) - .unwrap() -} - -struct TestStatistics { - pub max_used_gas: u32, - pub circuit_statistics: u64, - pub execution_metrics_size: u64, -} - -struct StatisticsTagged { - pub statistics: TestStatistics, - pub tag: String, -} - -fn execute_test(test_data: L1MessengerTestData) -> TestStatistics { - let mut storage = get_empty_storage(); - let complex_upgrade_code = read_complex_upgrade(); - - // For this test we'll just put the bytecode onto the force deployer address - storage.borrow_mut().set_value( - get_code_key(&CONTRACT_FORCE_DEPLOYER_ADDRESS), - hash_bytecode(&complex_upgrade_code), - ); - storage - .borrow_mut() - .store_factory_dep(hash_bytecode(&complex_upgrade_code), complex_upgrade_code); - - let mut vm = VmTesterBuilder::new(crate::vm_latest::HistoryEnabled) - .with_storage(storage) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let bytecodes = test_data - .bytecodes - .iter() - .map(|bytecode| { - let hash = hash_bytecode(bytecode); - let words = bytes_to_be_words(bytecode.clone()); - (h256_to_u256(hash), words) - }) - .collect(); - vm.vm - .state - .decommittment_processor - .populate(bytecodes, Timestamp(0)); - - let data = populate_mimic_calls(test_data.clone()); - let account = &mut vm.rich_accounts[0]; - let tx = account.get_l2_tx_for_execute( - Execute { - contract_address: CONTRACT_FORCE_DEPLOYER_ADDRESS, - calldata: data, - value: U256::zero(), - factory_deps: None, - }, - None, - ); - - vm.vm.push_transaction(tx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!( - !result.result.is_failed(), - "Transaction wasn't successful for input: {:?}", - test_data - ); - - // Now we count how much ergs were spent at the end of the batch - // It is assumed that the top level frame is the bootloader - - let ergs_before = vm.vm.state.local_state.callstack.current.ergs_remaining; - - // We ensure that indeed the provided state diffs are used - let pubdata_tracer = PubdataTracer::::new_with_forced_state_diffs( - vm.vm.batch_env.clone(), - VmExecutionMode::Batch, - test_data.state_diffs.clone(), - ); - - let result = vm.vm.inspect_inner( - TracerDispatcher::default(), - VmExecutionMode::Batch, - Some(pubdata_tracer), - ); - - assert!( - !result.result.is_failed(), - "Batch wasn't successful for input: {:?}", - test_data - ); - - let ergs_after = vm.vm.state.local_state.callstack.current.ergs_remaining; - - assert_eq!( - (ergs_before - ergs_after) as u64, - result.statistics.gas_used - ); - - TestStatistics { - max_used_gas: ergs_before - ergs_after, - circuit_statistics: result.statistics.circuit_statistic.total() as u64, - execution_metrics_size: result.get_execution_metrics(None).size() as u64, - } -} - -fn generate_state_diffs( - repeated_writes: bool, - small_diff: bool, - number_of_state_diffs: usize, -) -> Vec { - (0..number_of_state_diffs) - .map(|i| { - let address = Address::from_low_u64_be(i as u64); - let key = U256::from(i); - let enumeration_index = if repeated_writes { i + 1 } else { 0 }; - - let (initial_value, final_value) = if small_diff { - // As small as it gets, one byte to denote zeroing out the value - (U256::from(1), U256::from(0)) - } else { - // As large as it gets - (U256::from(0), U256::from(2).pow(255.into())) - }; - - StateDiffRecord { - address, - key, - derived_key: u256_to_h256(i.into()).0, - enumeration_index: enumeration_index as u64, - initial_value, - final_value, - } - }) - .collect() -} - -// A valid zkEVM bytecode has odd number of 32 byte words -fn get_valid_bytecode_length(length: usize) -> usize { - // Firstly ensure that the length is divisible by 32 - let length_padded_to_32 = if length % 32 == 0 { - length - } else { - length + 32 - (length % 32) - }; - - // Then we ensure that the number returned by division by 32 is odd - if length_padded_to_32 % 64 == 0 { - length_padded_to_32 + 32 - } else { - length_padded_to_32 - } -} - -#[test] -#[allow(clippy::vec_init_then_push)] -fn test_dry_run_upper_bound() { - // Some of the pubdata is consumed by constant fields (such as length of messages, number of logs, etc.). - // While this leaves some room for error, at the end of the test we require that the `BOOTLOADER_BATCH_TIP_OVERHEAD` - // is sufficient with a very large margin, so it is okay to ignore 1% of possible pubdata. - const MAX_EFFECTIVE_PUBDATA_PER_BATCH: usize = - (MAX_VM_PUBDATA_PER_BATCH as f64 * 0.99) as usize; - - // We are re-using the `ComplexUpgrade` contract as it already has the `mimicCall` functionality. - // To get the upper bound, we'll try to do the following: - // 1. Max number of logs. - // 2. Lots of small L2->L1 messages / one large L2->L1 message. - // 3. Lots of small bytecodes / one large bytecode. - // 4. Lots of storage slot updates. - - let mut statistics = Vec::new(); - - // max logs - statistics.push(StatisticsTagged { - statistics: execute_test(L1MessengerTestData { - l2_to_l1_logs: MAX_EFFECTIVE_PUBDATA_PER_BATCH / L2ToL1Log::SERIALIZED_SIZE, - ..Default::default() - }), - tag: "max_logs".to_string(), - }); - - // max messages - statistics.push(StatisticsTagged { - statistics: execute_test(L1MessengerTestData { - // Each L2->L1 message is accompanied by a Log + its length, which is a 4 byte number, - // so the max number of pubdata is bound by it - messages: vec![ - vec![0; 0]; - MAX_EFFECTIVE_PUBDATA_PER_BATCH / (L2ToL1Log::SERIALIZED_SIZE + 4) - ], - ..Default::default() - }), - tag: "max_messages".to_string(), - }); - - // long message - statistics.push(StatisticsTagged { - statistics: execute_test(L1MessengerTestData { - // Each L2->L1 message is accompanied by a Log, so the max number of pubdata is bound by it - messages: vec![vec![0; MAX_EFFECTIVE_PUBDATA_PER_BATCH]; 1], - ..Default::default() - }), - tag: "long_message".to_string(), - }); - - // max bytecodes - statistics.push(StatisticsTagged { - statistics: execute_test(L1MessengerTestData { - // Each bytecode must be at least 32 bytes long. - // Each uncompressed bytecode is accompanied by its length, which is a 4 byte number - bytecodes: vec![vec![0; 32]; MAX_EFFECTIVE_PUBDATA_PER_BATCH / (32 + 4)], - ..Default::default() - }), - tag: "max_bytecodes".to_string(), - }); - - // long bytecode - statistics.push(StatisticsTagged { - statistics: execute_test(L1MessengerTestData { - bytecodes: vec![vec![0; get_valid_bytecode_length(MAX_EFFECTIVE_PUBDATA_PER_BATCH)]; 1], - ..Default::default() - }), - tag: "long_bytecode".to_string(), - }); - - // lots of small repeated writes - statistics.push(StatisticsTagged { - statistics: execute_test(L1MessengerTestData { - // In theory each state diff can require only 5 bytes to be published (enum index + 4 bytes for the key) - state_diffs: generate_state_diffs(true, true, MAX_EFFECTIVE_PUBDATA_PER_BATCH / 5), - ..Default::default() - }), - tag: "small_repeated_writes".to_string(), - }); - - // lots of big repeated writes - statistics.push(StatisticsTagged { - statistics: execute_test(L1MessengerTestData { - // Each big repeated write will approximately require 4 bytes for key + 1 byte for encoding type + 32 bytes for value - state_diffs: generate_state_diffs(true, false, MAX_EFFECTIVE_PUBDATA_PER_BATCH / 37), - ..Default::default() - }), - tag: "big_repeated_writes".to_string(), - }); - - // lots of small initial writes - statistics.push(StatisticsTagged { - statistics: execute_test(L1MessengerTestData { - // Each small initial write will take at least 32 bytes for derived key + 1 bytes encoding zeroing out - state_diffs: generate_state_diffs(false, true, MAX_EFFECTIVE_PUBDATA_PER_BATCH / 33), - ..Default::default() - }), - tag: "small_initial_writes".to_string(), - }); - - // lots of large initial writes - statistics.push(StatisticsTagged { - statistics: execute_test(L1MessengerTestData { - // Each big write will take at least 32 bytes for derived key + 1 byte for encoding type + 32 bytes for value - state_diffs: generate_state_diffs(false, false, MAX_EFFECTIVE_PUBDATA_PER_BATCH / 65), - ..Default::default() - }), - tag: "big_initial_writes".to_string(), - }); - - // We use 2x overhead for the batch tip compared to the worst estimated scenario. - let max_used_gas = statistics - .iter() - .map(|s| (s.statistics.max_used_gas, s.tag.clone())) - .max() - .unwrap(); - assert!( - max_used_gas.0 * 2 <= BOOTLOADER_BATCH_TIP_OVERHEAD, - "BOOTLOADER_BATCH_TIP_OVERHEAD is too low for {} with result {}, BOOTLOADER_BATCH_TIP_OVERHEAD = {}", - max_used_gas.1, - max_used_gas.0, - BOOTLOADER_BATCH_TIP_OVERHEAD - ); - - let circuit_statistics = statistics - .iter() - .map(|s| (s.statistics.circuit_statistics, s.tag.clone())) - .max() - .unwrap(); - assert!( - circuit_statistics.0 * 2 <= BOOTLOADER_BATCH_TIP_CIRCUIT_STATISTICS_OVERHEAD as u64, - "BOOTLOADER_BATCH_TIP_CIRCUIT_STATISTICS_OVERHEAD is too low for {} with result {}, BOOTLOADER_BATCH_TIP_CIRCUIT_STATISTICS_OVERHEAD = {}", - circuit_statistics.1, - circuit_statistics.0, - BOOTLOADER_BATCH_TIP_CIRCUIT_STATISTICS_OVERHEAD - ); - - let execution_metrics_size = statistics - .iter() - .map(|s| (s.statistics.execution_metrics_size, s.tag.clone())) - .max() - .unwrap(); - assert!( - execution_metrics_size.0 * 2 <= BOOTLOADER_BATCH_TIP_METRICS_SIZE_OVERHEAD as u64, - "BOOTLOADER_BATCH_TIP_METRICS_SIZE_OVERHEAD is too low for {} with result {}, BOOTLOADER_BATCH_TIP_METRICS_SIZE_OVERHEAD = {}", - execution_metrics_size.1, - execution_metrics_size.0, - BOOTLOADER_BATCH_TIP_METRICS_SIZE_OVERHEAD - ); -} diff --git a/core/lib/multivm/src/versions/vm_1_4_2/tests/bootloader.rs b/core/lib/multivm/src/versions/vm_1_4_2/tests/bootloader.rs deleted file mode 100644 index 8d69d05c4444..000000000000 --- a/core/lib/multivm/src/versions/vm_1_4_2/tests/bootloader.rs +++ /dev/null @@ -1,55 +0,0 @@ -use zksync_types::U256; - -use crate::{ - interface::{ExecutionResult, Halt, TxExecutionMode, VmExecutionMode, VmInterface}, - vm_1_4_2::{ - constants::BOOTLOADER_HEAP_PAGE, - tests::{ - tester::VmTesterBuilder, - utils::{get_bootloader, verify_required_memory, BASE_SYSTEM_CONTRACTS}, - }, - }, -}; - -#[test] -fn test_dummy_bootloader() { - let mut base_system_contracts = BASE_SYSTEM_CONTRACTS.clone(); - base_system_contracts.bootloader = get_bootloader("dummy"); - - let mut vm = VmTesterBuilder::new(crate::vm_latest::HistoryEnabled) - .with_empty_in_memory_storage() - .with_base_system_smart_contracts(base_system_contracts) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .build(); - - let result = vm.vm.execute(VmExecutionMode::Batch); - assert!(!result.result.is_failed()); - - let correct_first_cell = U256::from_str_radix("123123123", 16).unwrap(); - verify_required_memory( - &vm.vm.state, - vec![(correct_first_cell, BOOTLOADER_HEAP_PAGE, 0)], - ); -} - -#[test] -fn test_bootloader_out_of_gas() { - let mut base_system_contracts = BASE_SYSTEM_CONTRACTS.clone(); - base_system_contracts.bootloader = get_bootloader("dummy"); - - let mut vm = VmTesterBuilder::new(crate::vm_latest::HistoryEnabled) - .with_empty_in_memory_storage() - .with_base_system_smart_contracts(base_system_contracts) - .with_bootloader_gas_limit(10) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .build(); - - let res = vm.vm.execute(VmExecutionMode::Batch); - - assert_matches!( - res.result, - ExecutionResult::Halt { - reason: Halt::BootloaderOutOfGas - } - ); -} diff --git a/core/lib/multivm/src/versions/vm_1_4_2/tests/bytecode_publishing.rs b/core/lib/multivm/src/versions/vm_1_4_2/tests/bytecode_publishing.rs deleted file mode 100644 index dd91d6d94a92..000000000000 --- a/core/lib/multivm/src/versions/vm_1_4_2/tests/bytecode_publishing.rs +++ /dev/null @@ -1,40 +0,0 @@ -use zksync_types::event::extract_long_l2_to_l1_messages; -use zksync_utils::bytecode::compress_bytecode; - -use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface}, - vm_1_4_2::tests::{ - tester::{DeployContractsTx, TxType, VmTesterBuilder}, - utils::read_test_contract, - }, -}; - -#[test] -fn test_bytecode_publishing() { - // In this test, we aim to ensure that the contents of the compressed bytecodes - // are included as part of the L2->L1 long messages - let mut vm = VmTesterBuilder::new(crate::vm_latest::HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let counter = read_test_contract(); - let account = &mut vm.rich_accounts[0]; - - let compressed_bytecode = compress_bytecode(&counter).unwrap(); - - let DeployContractsTx { tx, .. } = account.get_deploy_tx(&counter, None, TxType::L2); - vm.vm.push_transaction(tx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!result.result.is_failed(), "Transaction wasn't successful"); - - vm.vm.execute(VmExecutionMode::Batch); - - let state = vm.vm.get_current_execution_state(); - let long_messages = extract_long_l2_to_l1_messages(&state.events); - assert!( - long_messages.contains(&compressed_bytecode), - "Bytecode not published" - ); -} diff --git a/core/lib/multivm/src/versions/vm_1_4_2/tests/call_tracer.rs b/core/lib/multivm/src/versions/vm_1_4_2/tests/call_tracer.rs deleted file mode 100644 index 2fafb7e51aa9..000000000000 --- a/core/lib/multivm/src/versions/vm_1_4_2/tests/call_tracer.rs +++ /dev/null @@ -1,92 +0,0 @@ -use std::sync::Arc; - -use once_cell::sync::OnceCell; -use zksync_types::{Address, Execute}; - -use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface}, - tracers::CallTracer, - vm_1_4_2::{ - constants::BLOCK_GAS_LIMIT, - tests::{ - tester::VmTesterBuilder, - utils::{read_max_depth_contract, read_test_contract}, - }, - ToTracerPointer, - }, -}; - -// This test is ultra slow, so it's ignored by default. -#[test] -#[ignore] -fn test_max_depth() { - let contarct = read_max_depth_contract(); - let address = Address::random(); - let mut vm = VmTesterBuilder::new(crate::vm_latest::HistoryEnabled) - .with_empty_in_memory_storage() - .with_random_rich_accounts(1) - .with_deployer() - .with_bootloader_gas_limit(BLOCK_GAS_LIMIT) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_custom_contracts(vec![(contarct, address, true)]) - .build(); - - let account = &mut vm.rich_accounts[0]; - let tx = account.get_l2_tx_for_execute( - Execute { - contract_address: address, - calldata: vec![], - value: Default::default(), - factory_deps: None, - }, - None, - ); - - let result = Arc::new(OnceCell::new()); - let call_tracer = CallTracer::new(result.clone()).into_tracer_pointer(); - vm.vm.push_transaction(tx); - let res = vm.vm.inspect(call_tracer.into(), VmExecutionMode::OneTx); - assert!(result.get().is_some()); - assert!(res.result.is_failed()); -} - -#[test] -fn test_basic_behavior() { - let contarct = read_test_contract(); - let address = Address::random(); - let mut vm = VmTesterBuilder::new(crate::vm_latest::HistoryEnabled) - .with_empty_in_memory_storage() - .with_random_rich_accounts(1) - .with_deployer() - .with_bootloader_gas_limit(BLOCK_GAS_LIMIT) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_custom_contracts(vec![(contarct, address, true)]) - .build(); - - let increment_by_6_calldata = - "7cf5dab00000000000000000000000000000000000000000000000000000000000000006"; - - let account = &mut vm.rich_accounts[0]; - let tx = account.get_l2_tx_for_execute( - Execute { - contract_address: address, - calldata: hex::decode(increment_by_6_calldata).unwrap(), - value: Default::default(), - factory_deps: None, - }, - None, - ); - - let result = Arc::new(OnceCell::new()); - let call_tracer = CallTracer::new(result.clone()).into_tracer_pointer(); - vm.vm.push_transaction(tx); - let res = vm.vm.inspect(call_tracer.into(), VmExecutionMode::OneTx); - - let call_tracer_result = result.get().unwrap(); - - assert_eq!(call_tracer_result.len(), 1); - // Expect that there are a plenty of subcalls underneath. - let subcall = &call_tracer_result[0].calls; - assert!(subcall.len() > 10); - assert!(!res.result.is_failed()); -} diff --git a/core/lib/multivm/src/versions/vm_1_4_2/tests/circuits.rs b/core/lib/multivm/src/versions/vm_1_4_2/tests/circuits.rs deleted file mode 100644 index 7d0dfd1ed0ea..000000000000 --- a/core/lib/multivm/src/versions/vm_1_4_2/tests/circuits.rs +++ /dev/null @@ -1,69 +0,0 @@ -use zksync_types::{Address, Execute, U256}; - -use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface}, - vm_1_4_2::{constants::BLOCK_GAS_LIMIT, tests::tester::VmTesterBuilder}, -}; - -// Checks that estimated number of circuits for simple transfer doesn't differ much -// from hardcoded expected value. -#[test] -fn test_circuits() { - let mut vm = VmTesterBuilder::new(crate::vm_latest::HistoryEnabled) - .with_empty_in_memory_storage() - .with_random_rich_accounts(1) - .with_deployer() - .with_bootloader_gas_limit(BLOCK_GAS_LIMIT) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .build(); - - let account = &mut vm.rich_accounts[0]; - let tx = account.get_l2_tx_for_execute( - Execute { - contract_address: Address::random(), - calldata: Vec::new(), - value: U256::from(1u8), - factory_deps: None, - }, - None, - ); - vm.vm.push_transaction(tx); - let res = vm.vm.inspect(Default::default(), VmExecutionMode::OneTx); - - let s = res.statistics.circuit_statistic; - // Check `circuit_statistic`. - const EXPECTED: [f32; 11] = [ - 1.1979, 0.1390, 1.5455, 0.0031, 1.0573, 0.00059, 0.003438, 0.00077, 0.1195, 0.1429, 0.0, - ]; - let actual = [ - (s.main_vm, "main_vm"), - (s.ram_permutation, "ram_permutation"), - (s.storage_application, "storage_application"), - (s.storage_sorter, "storage_sorter"), - (s.code_decommitter, "code_decommitter"), - (s.code_decommitter_sorter, "code_decommitter_sorter"), - (s.log_demuxer, "log_demuxer"), - (s.events_sorter, "events_sorter"), - (s.keccak256, "keccak256"), - (s.ecrecover, "ecrecover"), - (s.sha256, "sha256"), - ]; - for ((actual, name), expected) in actual.iter().zip(EXPECTED) { - if expected == 0.0 { - assert_eq!( - *actual, expected, - "Check failed for {}, expected {}, actual {}", - name, expected, actual - ); - } else { - let diff = (actual - expected) / expected; - assert!( - diff.abs() < 0.1, - "Check failed for {}, expected {}, actual {}", - name, - expected, - actual - ); - } - } -} diff --git a/core/lib/multivm/src/versions/vm_1_4_2/tests/default_aa.rs b/core/lib/multivm/src/versions/vm_1_4_2/tests/default_aa.rs deleted file mode 100644 index b0717a57c565..000000000000 --- a/core/lib/multivm/src/versions/vm_1_4_2/tests/default_aa.rs +++ /dev/null @@ -1,77 +0,0 @@ -use zksync_system_constants::L2_ETH_TOKEN_ADDRESS; -use zksync_types::{ - get_code_key, get_known_code_key, get_nonce_key, - system_contracts::{DEPLOYMENT_NONCE_INCREMENT, TX_NONCE_INCREMENT}, - AccountTreeId, U256, -}; -use zksync_utils::u256_to_h256; - -use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface}, - vm_1_4_2::{ - tests::{ - tester::{DeployContractsTx, TxType, VmTesterBuilder}, - utils::{get_balance, read_test_contract, verify_required_storage}, - }, - utils::fee::get_batch_base_fee, - }, -}; - -#[test] -fn test_default_aa_interaction() { - // In this test, we aim to test whether a simple account interaction (without any fee logic) - // will work. The account will try to deploy a simple contract from integration tests. - let mut vm = VmTesterBuilder::new(crate::vm_latest::HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let counter = read_test_contract(); - let account = &mut vm.rich_accounts[0]; - let DeployContractsTx { - tx, - bytecode_hash, - address, - } = account.get_deploy_tx(&counter, None, TxType::L2); - let maximal_fee = tx.gas_limit() * get_batch_base_fee(&vm.vm.batch_env); - - vm.vm.push_transaction(tx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!result.result.is_failed(), "Transaction wasn't successful"); - - vm.vm.execute(VmExecutionMode::Batch); - vm.vm.get_current_execution_state(); - - // Both deployment and ordinary nonce should be incremented by one. - let account_nonce_key = get_nonce_key(&account.address); - let expected_nonce = TX_NONCE_INCREMENT + DEPLOYMENT_NONCE_INCREMENT; - - // The code hash of the deployed contract should be marked as republished. - let known_codes_key = get_known_code_key(&bytecode_hash); - - // The contract should be deployed successfully. - let account_code_key = get_code_key(&address); - - let expected_slots = vec![ - (u256_to_h256(expected_nonce), account_nonce_key), - (u256_to_h256(U256::from(1u32)), known_codes_key), - (bytecode_hash, account_code_key), - ]; - - verify_required_storage(&vm.vm.state, expected_slots); - - let expected_fee = maximal_fee - - U256::from(result.refunds.gas_refunded) - * U256::from(get_batch_base_fee(&vm.vm.batch_env)); - let operator_balance = get_balance( - AccountTreeId::new(L2_ETH_TOKEN_ADDRESS), - &vm.fee_account, - vm.vm.state.storage.storage.get_ptr(), - ); - - assert_eq!( - operator_balance, expected_fee, - "Operator did not receive his fee" - ); -} diff --git a/core/lib/multivm/src/versions/vm_1_4_2/tests/gas_limit.rs b/core/lib/multivm/src/versions/vm_1_4_2/tests/gas_limit.rs deleted file mode 100644 index b84e9d32126c..000000000000 --- a/core/lib/multivm/src/versions/vm_1_4_2/tests/gas_limit.rs +++ /dev/null @@ -1,44 +0,0 @@ -use zksync_types::{fee::Fee, Execute}; - -use crate::{ - interface::{TxExecutionMode, VmInterface}, - vm_1_4_2::{ - constants::{BOOTLOADER_HEAP_PAGE, TX_DESCRIPTION_OFFSET, TX_GAS_LIMIT_OFFSET}, - tests::tester::VmTesterBuilder, - }, -}; - -/// Checks that `TX_GAS_LIMIT_OFFSET` constant is correct. -#[test] -fn test_tx_gas_limit_offset() { - let mut vm = VmTesterBuilder::new(crate::vm_latest::HistoryDisabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let gas_limit = 9999.into(); - let tx = vm.rich_accounts[0].get_l2_tx_for_execute( - Execute { - contract_address: Some(Default::default()), - ..Default::default() - }, - Some(Fee { - gas_limit, - ..Default::default() - }), - ); - - vm.vm.push_transaction(tx); - - let gas_limit_from_memory = vm - .vm - .state - .memory - .read_slot( - BOOTLOADER_HEAP_PAGE as usize, - TX_DESCRIPTION_OFFSET + TX_GAS_LIMIT_OFFSET, - ) - .value; - assert_eq!(gas_limit_from_memory, gas_limit); -} diff --git a/core/lib/multivm/src/versions/vm_1_4_2/tests/get_used_contracts.rs b/core/lib/multivm/src/versions/vm_1_4_2/tests/get_used_contracts.rs deleted file mode 100644 index cfe3e1bfc235..000000000000 --- a/core/lib/multivm/src/versions/vm_1_4_2/tests/get_used_contracts.rs +++ /dev/null @@ -1,109 +0,0 @@ -use std::collections::{HashMap, HashSet}; - -use itertools::Itertools; -use crate::interface::storage::WriteStorage; -use zksync_system_constants::CONTRACT_DEPLOYER_ADDRESS; -use zksync_test_account::Account; -use zksync_types::{Execute, U256}; -use zksync_utils::{bytecode::hash_bytecode, h256_to_u256}; - -use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface}, - vm_1_4_2::{ - tests::{ - tester::{TxType, VmTesterBuilder}, - utils::{read_test_contract, BASE_SYSTEM_CONTRACTS}, - }, - Vm, - }, - HistoryMode, -}; - -#[test] -fn test_get_used_contracts() { - let mut vm = VmTesterBuilder::new(crate::vm_latest::HistoryDisabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .build(); - - assert!(known_bytecodes_without_aa_code(&vm.vm).is_empty()); - - // create and push and execute some not-empty factory deps transaction with success status - // to check that `get_used_contracts()` updates - let contract_code = read_test_contract(); - let mut account = Account::random(); - let tx = account.get_deploy_tx(&contract_code, None, TxType::L1 { serial_id: 0 }); - vm.vm.push_transaction(tx.tx.clone()); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!result.result.is_failed()); - - assert!(vm - .vm - .get_used_contracts() - .contains(&h256_to_u256(tx.bytecode_hash))); - - // Note: `Default_AA` will be in the list of used contracts if L2 tx is used - assert_eq!( - vm.vm - .get_used_contracts() - .into_iter() - .collect::>(), - known_bytecodes_without_aa_code(&vm.vm) - .keys() - .cloned() - .collect::>() - ); - - // create push and execute some non-empty factory deps transaction that fails - // (`known_bytecodes` will be updated but we expect `get_used_contracts()` to not be updated) - - let calldata = [1, 2, 3]; - let big_calldata: Vec = calldata - .iter() - .cycle() - .take(calldata.len() * 1024) - .cloned() - .collect(); - let account2 = Account::random(); - let tx2 = account2.get_l1_tx( - Execute { - contract_address: CONTRACT_DEPLOYER_ADDRESS, - calldata: big_calldata, - value: Default::default(), - factory_deps: Some(vec![vec![1; 32]]), - }, - 1, - ); - - vm.vm.push_transaction(tx2.clone()); - - let res2 = vm.vm.execute(VmExecutionMode::OneTx); - - assert!(res2.result.is_failed()); - - for factory_dep in tx2.execute.factory_deps.unwrap() { - let hash = hash_bytecode(&factory_dep); - let hash_to_u256 = h256_to_u256(hash); - assert!(known_bytecodes_without_aa_code(&vm.vm) - .keys() - .contains(&hash_to_u256)); - assert!(!vm.vm.get_used_contracts().contains(&hash_to_u256)); - } -} - -fn known_bytecodes_without_aa_code( - vm: &Vm, -) -> HashMap> { - let mut known_bytecodes_without_aa_code = vm - .state - .decommittment_processor - .known_bytecodes - .inner() - .clone(); - - known_bytecodes_without_aa_code - .remove(&h256_to_u256(BASE_SYSTEM_CONTRACTS.default_aa.hash)) - .unwrap(); - - known_bytecodes_without_aa_code -} diff --git a/core/lib/multivm/src/versions/vm_1_4_2/tests/invalid_bytecode.rs b/core/lib/multivm/src/versions/vm_1_4_2/tests/invalid_bytecode.rs deleted file mode 100644 index c79fcd8ba8eb..000000000000 --- a/core/lib/multivm/src/versions/vm_1_4_2/tests/invalid_bytecode.rs +++ /dev/null @@ -1,120 +0,0 @@ -use zksync_types::H256; -use zksync_utils::h256_to_u256; - -use crate::vm_1_4_2::tests::tester::VmTesterBuilder; -use crate::vm_1_4_2::types::inputs::system_env::TxExecutionMode; -use crate::vm_1_4_2::{HistoryEnabled, TxRevertReason}; - -// TODO this test requires a lot of hacks for bypassing the bytecode checks in the VM. -// Port it later, it's not significant. for now - -#[test] -fn test_invalid_bytecode() { - let mut vm_builder = VmTesterBuilder::new(crate::vm_latest::HistoryEnabled) - .with_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1); - let mut storage = vm_builder.take_storage(); - let mut vm = vm_builder.build(&mut storage); - - let block_gas_per_pubdata = vm_test_env - .block_context - .context - .block_gas_price_per_pubdata(); - - let mut test_vm_with_custom_bytecode_hash = - |bytecode_hash: H256, expected_revert_reason: Option| { - let mut oracle_tools = - OracleTools::new(vm_test_env.storage_ptr.as_mut(), HistoryEnabled); - - let (encoded_tx, predefined_overhead) = get_l1_tx_with_custom_bytecode_hash( - h256_to_u256(bytecode_hash), - block_gas_per_pubdata as u32, - ); - - run_vm_with_custom_factory_deps( - &mut oracle_tools, - vm_test_env.block_context.context, - &vm_test_env.block_properties, - encoded_tx, - predefined_overhead, - expected_revert_reason, - ); - }; - - let failed_to_mark_factory_deps = |msg: &str, data: Vec| { - TxRevertReason::FailedToMarkFactoryDependencies(VmRevertReason::General { - msg: msg.to_string(), - data, - }) - }; - - // Here we provide the correctly-formatted bytecode hash of - // odd length, so it should work. - test_vm_with_custom_bytecode_hash( - H256([ - 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, - ]), - None, - ); - - // Here we provide correctly formatted bytecode of even length, so - // it should fail. - test_vm_with_custom_bytecode_hash( - H256([ - 1, 0, 2, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, - ]), - Some(failed_to_mark_factory_deps( - "Code length in words must be odd", - vec![ - 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 32, 67, 111, 100, 101, 32, 108, 101, 110, - 103, 116, 104, 32, 105, 110, 32, 119, 111, 114, 100, 115, 32, 109, 117, 115, 116, - 32, 98, 101, 32, 111, 100, 100, - ], - )), - ); - - // Here we provide incorrectly formatted bytecode of odd length, so - // it should fail. - test_vm_with_custom_bytecode_hash( - H256([ - 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, - ]), - Some(failed_to_mark_factory_deps( - "Incorrectly formatted bytecodeHash", - vec![ - 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 34, 73, 110, 99, 111, 114, 114, 101, 99, - 116, 108, 121, 32, 102, 111, 114, 109, 97, 116, 116, 101, 100, 32, 98, 121, 116, - 101, 99, 111, 100, 101, 72, 97, 115, 104, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - ], - )), - ); - - // Here we provide incorrectly formatted bytecode of odd length, so - // it should fail. - test_vm_with_custom_bytecode_hash( - H256([ - 2, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, - ]), - Some(failed_to_mark_factory_deps( - "Incorrectly formatted bytecodeHash", - vec![ - 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 34, 73, 110, 99, 111, 114, 114, 101, 99, - 116, 108, 121, 32, 102, 111, 114, 109, 97, 116, 116, 101, 100, 32, 98, 121, 116, - 101, 99, 111, 100, 101, 72, 97, 115, 104, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - ], - )), - ); -} diff --git a/core/lib/multivm/src/versions/vm_1_4_2/tests/is_write_initial.rs b/core/lib/multivm/src/versions/vm_1_4_2/tests/is_write_initial.rs deleted file mode 100644 index 7da250ef7a9f..000000000000 --- a/core/lib/multivm/src/versions/vm_1_4_2/tests/is_write_initial.rs +++ /dev/null @@ -1,45 +0,0 @@ -use crate::interface::storage::ReadStorage; -use zksync_types::get_nonce_key; - -use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface}, - vm_1_4_2::tests::{ - tester::{Account, TxType, VmTesterBuilder}, - utils::read_test_contract, - }, -}; - -#[test] -fn test_is_write_initial_behaviour() { - // In this test, we check result of `is_write_initial` at different stages. - // The main idea is to check that `is_write_initial` storage uses the correct cache for initial_writes and doesn't - // messed up it with the repeated writes during the one batch execution. - - let mut account = Account::random(); - let mut vm = VmTesterBuilder::new(crate::vm_latest::HistoryDisabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_rich_accounts(vec![account.clone()]) - .build(); - - let nonce_key = get_nonce_key(&account.address); - // Check that the next write to the nonce key will be initial. - assert!(vm - .storage - .as_ref() - .borrow_mut() - .is_write_initial(&nonce_key)); - - let contract_code = read_test_contract(); - let tx = account.get_deploy_tx(&contract_code, None, TxType::L2).tx; - - vm.vm.push_transaction(tx); - vm.vm.execute(VmExecutionMode::OneTx); - - // Check that `is_write_initial` still returns true for the nonce key. - assert!(vm - .storage - .as_ref() - .borrow_mut() - .is_write_initial(&nonce_key)); -} diff --git a/core/lib/multivm/src/versions/vm_1_4_2/tests/l1_tx_execution.rs b/core/lib/multivm/src/versions/vm_1_4_2/tests/l1_tx_execution.rs deleted file mode 100644 index 021f55548734..000000000000 --- a/core/lib/multivm/src/versions/vm_1_4_2/tests/l1_tx_execution.rs +++ /dev/null @@ -1,188 +0,0 @@ -use ethabi::Token; -use zksync_contracts::l1_messenger_contract; -use zksync_system_constants::{BOOTLOADER_ADDRESS, L1_MESSENGER_ADDRESS}; -use zksync_types::{ - get_code_key, get_known_code_key, - l2_to_l1_log::{L2ToL1Log, UserL2ToL1Log}, - storage_writes_deduplicator::StorageWritesDeduplicator, - Execute, ExecuteTransactionCommon, U256, -}; -use zksync_utils::u256_to_h256; - -use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface}, - vm_1_4_2::{ - tests::{ - tester::{TxType, VmTesterBuilder}, - utils::{read_test_contract, verify_required_storage, BASE_SYSTEM_CONTRACTS}, - }, - types::internals::TransactionData, - }, -}; - -#[test] -fn test_l1_tx_execution() { - // In this test, we try to execute a contract deployment from L1 - // Here instead of marking code hash via the bootloader means, we will be - // using L1->L2 communication, the same it would likely be done during the priority mode. - - // There are always at least 7 initial writes here, because we pay fees from l1: - // - `totalSupply` of ETH token - // - balance of the refund recipient - // - balance of the bootloader - // - `tx_rolling` hash - // - rolling hash of L2->L1 logs - // - transaction number in block counter - // - L2->L1 log counter in `L1Messenger` - - // TODO(PLA-537): right now we are using 4 slots instead of 7 due to 0 fee for transaction. - let basic_initial_writes = 4; - - let mut vm = VmTesterBuilder::new(crate::vm_latest::HistoryEnabled) - .with_empty_in_memory_storage() - .with_base_system_smart_contracts(BASE_SYSTEM_CONTRACTS.clone()) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let contract_code = read_test_contract(); - let account = &mut vm.rich_accounts[0]; - let deploy_tx = account.get_deploy_tx(&contract_code, None, TxType::L1 { serial_id: 1 }); - let tx_data: TransactionData = deploy_tx.tx.clone().into(); - - let required_l2_to_l1_logs: Vec<_> = vec![L2ToL1Log { - shard_id: 0, - is_service: true, - tx_number_in_block: 0, - sender: BOOTLOADER_ADDRESS, - key: tx_data.tx_hash(0.into()), - value: u256_to_h256(U256::from(1u32)), - }] - .into_iter() - .map(UserL2ToL1Log) - .collect(); - - vm.vm.push_transaction(deploy_tx.tx.clone()); - - let res = vm.vm.execute(VmExecutionMode::OneTx); - - // The code hash of the deployed contract should be marked as republished. - let known_codes_key = get_known_code_key(&deploy_tx.bytecode_hash); - - // The contract should be deployed successfully. - let account_code_key = get_code_key(&deploy_tx.address); - - let expected_slots = vec![ - (u256_to_h256(U256::from(1u32)), known_codes_key), - (deploy_tx.bytecode_hash, account_code_key), - ]; - assert!(!res.result.is_failed()); - - verify_required_storage(&vm.vm.state, expected_slots); - - assert_eq!(res.logs.user_l2_to_l1_logs, required_l2_to_l1_logs); - - let tx = account.get_test_contract_transaction( - deploy_tx.address, - true, - None, - false, - TxType::L1 { serial_id: 0 }, - ); - vm.vm.push_transaction(tx); - let res = vm.vm.execute(VmExecutionMode::OneTx); - let storage_logs = res.logs.storage_logs; - let res = StorageWritesDeduplicator::apply_on_empty_state(&storage_logs); - - // Tx panicked - assert_eq!(res.initial_storage_writes - basic_initial_writes, 0); - - let tx = account.get_test_contract_transaction( - deploy_tx.address, - false, - None, - false, - TxType::L1 { serial_id: 0 }, - ); - vm.vm.push_transaction(tx.clone()); - let res = vm.vm.execute(VmExecutionMode::OneTx); - let storage_logs = res.logs.storage_logs; - let res = StorageWritesDeduplicator::apply_on_empty_state(&storage_logs); - // We changed one slot inside contract - assert_eq!(res.initial_storage_writes - basic_initial_writes, 1); - - // No repeated writes - let repeated_writes = res.repeated_storage_writes; - assert_eq!(res.repeated_storage_writes, 0); - - vm.vm.push_transaction(tx); - let storage_logs = vm.vm.execute(VmExecutionMode::OneTx).logs.storage_logs; - let res = StorageWritesDeduplicator::apply_on_empty_state(&storage_logs); - // We do the same storage write, it will be deduplicated, so still 4 initial write and 0 repeated - assert_eq!(res.initial_storage_writes - basic_initial_writes, 1); - assert_eq!(res.repeated_storage_writes, repeated_writes); - - let tx = account.get_test_contract_transaction( - deploy_tx.address, - false, - Some(10.into()), - false, - TxType::L1 { serial_id: 1 }, - ); - vm.vm.push_transaction(tx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - // Method is not payable tx should fail - assert!(result.result.is_failed(), "The transaction should fail"); - - let res = StorageWritesDeduplicator::apply_on_empty_state(&result.logs.storage_logs); - // There are only basic initial writes - assert_eq!(res.initial_storage_writes - basic_initial_writes, 2); -} - -#[test] -fn test_l1_tx_execution_high_gas_limit() { - // In this test, we try to execute an L1->L2 transaction with a high gas limit. - // Usually priority transactions with dangerously gas limit should even pass the checks on the L1, - // however, they might pass during the transition period to the new fee model, so we check that we can safely process those. - - let mut vm = VmTesterBuilder::new(crate::vm_latest::HistoryEnabled) - .with_empty_in_memory_storage() - .with_base_system_smart_contracts(BASE_SYSTEM_CONTRACTS.clone()) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let account = &mut vm.rich_accounts[0]; - - let l1_messenger = l1_messenger_contract(); - - let contract_function = l1_messenger.function("sendToL1").unwrap(); - let params = [ - // Even a message of size 100k should not be able to be sent by a priority transaction - Token::Bytes(vec![0u8; 100_000]), - ]; - let calldata = contract_function.encode_input(¶ms).unwrap(); - - let mut tx = account.get_l1_tx( - Execute { - contract_address: L1_MESSENGER_ADDRESS, - value: 0.into(), - factory_deps: None, - calldata, - }, - 0, - ); - - if let ExecuteTransactionCommon::L1(data) = &mut tx.common_data { - // Using some large gas limit - data.gas_limit = 300_000_000.into(); - } else { - unreachable!() - }; - - vm.vm.push_transaction(tx); - - let res = vm.vm.execute(VmExecutionMode::OneTx); - - assert!(res.result.is_failed(), "The transaction should've failed"); -} diff --git a/core/lib/multivm/src/versions/vm_1_4_2/tests/l2_blocks.rs b/core/lib/multivm/src/versions/vm_1_4_2/tests/l2_blocks.rs deleted file mode 100644 index f722890f474b..000000000000 --- a/core/lib/multivm/src/versions/vm_1_4_2/tests/l2_blocks.rs +++ /dev/null @@ -1,437 +0,0 @@ -//! -//! Tests for the bootloader -//! The description for each of the tests can be found in the corresponding `.yul` file. -//! - -use zk_evm_1_4_1::aux_structures::Timestamp; -use crate::interface::storage::WriteStorage; -use zksync_system_constants::REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_BYTE; -use zksync_types::{ - block::{pack_block_info, MiniblockHasher}, - AccountTreeId, Execute, ExecuteTransactionCommon, L1BatchNumber, L1TxCommonData, - MiniblockNumber, ProtocolVersionId, StorageKey, Transaction, H160, H256, - SYSTEM_CONTEXT_ADDRESS, SYSTEM_CONTEXT_BLOCK_INFO_POSITION, - SYSTEM_CONTEXT_CURRENT_L2_BLOCK_INFO_POSITION, SYSTEM_CONTEXT_CURRENT_TX_ROLLING_HASH_POSITION, - U256, -}; -use zksync_utils::{h256_to_u256, u256_to_h256}; - -use crate::{ - interface::{ExecutionResult, Halt, L2BlockEnv, TxExecutionMode, VmExecutionMode, VmInterface}, - vm_1_4_2::{ - constants::{ - BOOTLOADER_HEAP_PAGE, TX_OPERATOR_L2_BLOCK_INFO_OFFSET, - TX_OPERATOR_SLOTS_PER_L2_BLOCK_INFO, - }, - tests::tester::{default_l1_batch, VmTesterBuilder}, - utils::l2_blocks::get_l2_block_hash_key, - Vm, - }, - HistoryMode, -}; - -fn get_l1_noop() -> Transaction { - Transaction { - common_data: ExecuteTransactionCommon::L1(L1TxCommonData { - sender: H160::random(), - gas_limit: U256::from(2000000u32), - gas_per_pubdata_limit: REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_BYTE.into(), - ..Default::default() - }), - execute: Execute { - contract_address: H160::zero(), - calldata: vec![], - value: U256::zero(), - factory_deps: None, - }, - received_timestamp_ms: 0, - raw_bytes: None, - } -} - -#[test] -fn test_l2_block_initialization_timestamp() { - // This test checks that the L2 block initialization works correctly. - // Here we check that that the first block must have timestamp that is greater or equal to the timestamp - // of the current batch. - - let mut vm = VmTesterBuilder::new(crate::vm_latest::HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - // Override the timestamp of the current miniblock to be 0. - vm.vm.bootloader_state.push_l2_block(L2BlockEnv { - number: 1, - timestamp: 0, - prev_block_hash: MiniblockHasher::legacy_hash(MiniblockNumber(0)), - max_virtual_blocks_to_create: 1, - }); - let l1_tx = get_l1_noop(); - - vm.vm.push_transaction(l1_tx); - let res = vm.vm.execute(VmExecutionMode::OneTx); - - assert_eq!( - res.result, - ExecutionResult::Halt {reason: Halt::FailedToSetL2Block("The timestamp of the L2 block must be greater than or equal to the timestamp of the current batch".to_string())} - ); -} - -#[test] -fn test_l2_block_initialization_number_non_zero() { - // This test checks that the L2 block initialization works correctly. - // Here we check that the first miniblock number can not be zero. - - let l1_batch = default_l1_batch(L1BatchNumber(1)); - let first_l2_block = L2BlockEnv { - number: 0, - timestamp: l1_batch.timestamp, - prev_block_hash: MiniblockHasher::legacy_hash(MiniblockNumber(0)), - max_virtual_blocks_to_create: 1, - }; - - let mut vm = VmTesterBuilder::new(crate::vm_latest::HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_l1_batch_env(l1_batch) - .with_random_rich_accounts(1) - .build(); - - let l1_tx = get_l1_noop(); - - vm.vm.push_transaction(l1_tx); - - let timestamp = Timestamp(vm.vm.state.local_state.timestamp); - set_manual_l2_block_info(&mut vm.vm, 0, first_l2_block, timestamp); - - let res = vm.vm.execute(VmExecutionMode::OneTx); - - assert_eq!( - res.result, - ExecutionResult::Halt { - reason: Halt::FailedToSetL2Block( - "L2 block number is never expected to be zero".to_string() - ) - } - ); -} - -fn test_same_l2_block( - expected_error: Option, - override_timestamp: Option, - override_prev_block_hash: Option, -) { - let mut l1_batch = default_l1_batch(L1BatchNumber(1)); - l1_batch.timestamp = 1; - let mut vm = VmTesterBuilder::new(crate::vm_latest::HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_l1_batch_env(l1_batch) - .with_random_rich_accounts(1) - .build(); - - let l1_tx = get_l1_noop(); - vm.vm.push_transaction(l1_tx.clone()); - let res = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!res.result.is_failed()); - - let mut current_l2_block = vm.vm.batch_env.first_l2_block; - - if let Some(timestamp) = override_timestamp { - current_l2_block.timestamp = timestamp; - } - if let Some(prev_block_hash) = override_prev_block_hash { - current_l2_block.prev_block_hash = prev_block_hash; - } - - if (None, None) == (override_timestamp, override_prev_block_hash) { - current_l2_block.max_virtual_blocks_to_create = 0; - } - - vm.vm.push_transaction(l1_tx); - let timestamp = Timestamp(vm.vm.state.local_state.timestamp); - set_manual_l2_block_info(&mut vm.vm, 1, current_l2_block, timestamp); - - let result = vm.vm.execute(VmExecutionMode::OneTx); - - if let Some(err) = expected_error { - assert_eq!(result.result, ExecutionResult::Halt { reason: err }); - } else { - assert_eq!(result.result, ExecutionResult::Success { output: vec![] }); - } -} - -#[test] -fn test_l2_block_same_l2_block() { - // This test aims to test the case when there are multiple transactions inside the same L2 block. - - // Case 1: Incorrect timestamp - test_same_l2_block( - Some(Halt::FailedToSetL2Block( - "The timestamp of the same L2 block must be same".to_string(), - )), - Some(0), - None, - ); - - // Case 2: Incorrect previous block hash - test_same_l2_block( - Some(Halt::FailedToSetL2Block( - "The previous hash of the same L2 block must be same".to_string(), - )), - None, - Some(H256::zero()), - ); - - // Case 3: Correct continuation of the same L2 block - test_same_l2_block(None, None, None); -} - -fn test_new_l2_block( - first_l2_block: L2BlockEnv, - overriden_second_block_number: Option, - overriden_second_block_timestamp: Option, - overriden_second_block_prev_block_hash: Option, - expected_error: Option, -) { - let mut l1_batch = default_l1_batch(L1BatchNumber(1)); - l1_batch.timestamp = 1; - l1_batch.first_l2_block = first_l2_block; - - let mut vm = VmTesterBuilder::new(crate::vm_latest::HistoryEnabled) - .with_empty_in_memory_storage() - .with_l1_batch_env(l1_batch) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let l1_tx = get_l1_noop(); - - // Firstly we execute the first transaction - vm.vm.push_transaction(l1_tx.clone()); - vm.vm.execute(VmExecutionMode::OneTx); - - let mut second_l2_block = vm.vm.batch_env.first_l2_block; - second_l2_block.number += 1; - second_l2_block.timestamp += 1; - second_l2_block.prev_block_hash = vm.vm.bootloader_state.last_l2_block().get_hash(); - - if let Some(block_number) = overriden_second_block_number { - second_l2_block.number = block_number; - } - if let Some(timestamp) = overriden_second_block_timestamp { - second_l2_block.timestamp = timestamp; - } - if let Some(prev_block_hash) = overriden_second_block_prev_block_hash { - second_l2_block.prev_block_hash = prev_block_hash; - } - - vm.vm.bootloader_state.push_l2_block(second_l2_block); - - vm.vm.push_transaction(l1_tx); - - let result = vm.vm.execute(VmExecutionMode::OneTx); - if let Some(err) = expected_error { - assert_eq!(result.result, ExecutionResult::Halt { reason: err }); - } else { - assert_eq!(result.result, ExecutionResult::Success { output: vec![] }); - } -} - -#[test] -fn test_l2_block_new_l2_block() { - // This test is aimed to cover potential issue - - let correct_first_block = L2BlockEnv { - number: 1, - timestamp: 1, - prev_block_hash: MiniblockHasher::legacy_hash(MiniblockNumber(0)), - max_virtual_blocks_to_create: 1, - }; - - // Case 1: Block number increasing by more than 1 - test_new_l2_block( - correct_first_block, - Some(3), - None, - None, - Some(Halt::FailedToSetL2Block( - "Invalid new L2 block number".to_string(), - )), - ); - - // Case 2: Timestamp not increasing - test_new_l2_block( - correct_first_block, - None, - Some(1), - None, - Some(Halt::FailedToSetL2Block("The timestamp of the new L2 block must be greater than the timestamp of the previous L2 block".to_string())), - ); - - // Case 3: Incorrect previous block hash - test_new_l2_block( - correct_first_block, - None, - None, - Some(H256::zero()), - Some(Halt::FailedToSetL2Block( - "The current L2 block hash is incorrect".to_string(), - )), - ); - - // Case 4: Correct new block - test_new_l2_block(correct_first_block, None, None, None, None); -} - -#[allow(clippy::too_many_arguments)] -fn test_first_in_batch( - miniblock_timestamp: u64, - miniblock_number: u32, - pending_txs_hash: H256, - batch_timestamp: u64, - new_batch_timestamp: u64, - batch_number: u32, - proposed_block: L2BlockEnv, - expected_error: Option, -) { - let mut l1_batch = default_l1_batch(L1BatchNumber(1)); - l1_batch.number += 1; - l1_batch.timestamp = new_batch_timestamp; - - let mut vm = VmTesterBuilder::new(crate::vm_latest::HistoryEnabled) - .with_empty_in_memory_storage() - .with_l1_batch_env(l1_batch) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - let l1_tx = get_l1_noop(); - - // Setting the values provided. - let storage_ptr = vm.vm.state.storage.storage.get_ptr(); - let miniblock_info_slot = StorageKey::new( - AccountTreeId::new(SYSTEM_CONTEXT_ADDRESS), - SYSTEM_CONTEXT_CURRENT_L2_BLOCK_INFO_POSITION, - ); - let pending_txs_hash_slot = StorageKey::new( - AccountTreeId::new(SYSTEM_CONTEXT_ADDRESS), - SYSTEM_CONTEXT_CURRENT_TX_ROLLING_HASH_POSITION, - ); - let batch_info_slot = StorageKey::new( - AccountTreeId::new(SYSTEM_CONTEXT_ADDRESS), - SYSTEM_CONTEXT_BLOCK_INFO_POSITION, - ); - let prev_block_hash_position = get_l2_block_hash_key(miniblock_number - 1); - - storage_ptr.borrow_mut().set_value( - miniblock_info_slot, - u256_to_h256(pack_block_info( - miniblock_number as u64, - miniblock_timestamp, - )), - ); - storage_ptr - .borrow_mut() - .set_value(pending_txs_hash_slot, pending_txs_hash); - storage_ptr.borrow_mut().set_value( - batch_info_slot, - u256_to_h256(pack_block_info(batch_number as u64, batch_timestamp)), - ); - storage_ptr.borrow_mut().set_value( - prev_block_hash_position, - MiniblockHasher::legacy_hash(MiniblockNumber(miniblock_number - 1)), - ); - - // In order to skip checks from the Rust side of the VM, we firstly use some definitely correct L2 block info. - // And then override it with the user-provided value - - let last_l2_block = vm.vm.bootloader_state.last_l2_block(); - let new_l2_block = L2BlockEnv { - number: last_l2_block.number + 1, - timestamp: last_l2_block.timestamp + 1, - prev_block_hash: last_l2_block.get_hash(), - max_virtual_blocks_to_create: last_l2_block.max_virtual_blocks_to_create, - }; - - vm.vm.bootloader_state.push_l2_block(new_l2_block); - vm.vm.push_transaction(l1_tx); - let timestamp = Timestamp(vm.vm.state.local_state.timestamp); - set_manual_l2_block_info(&mut vm.vm, 0, proposed_block, timestamp); - - let result = vm.vm.execute(VmExecutionMode::OneTx); - if let Some(err) = expected_error { - assert_eq!(result.result, ExecutionResult::Halt { reason: err }); - } else { - assert_eq!(result.result, ExecutionResult::Success { output: vec![] }); - } -} - -#[test] -fn test_l2_block_first_in_batch() { - let prev_block_hash = MiniblockHasher::legacy_hash(MiniblockNumber(0)); - let prev_block_hash = MiniblockHasher::new(MiniblockNumber(1), 1, prev_block_hash) - .finalize(ProtocolVersionId::latest()); - test_first_in_batch( - 1, - 1, - H256::zero(), - 1, - 2, - 1, - L2BlockEnv { - number: 2, - timestamp: 2, - prev_block_hash, - max_virtual_blocks_to_create: 1, - }, - None, - ); - - let prev_block_hash = MiniblockHasher::legacy_hash(MiniblockNumber(0)); - let prev_block_hash = MiniblockHasher::new(MiniblockNumber(1), 8, prev_block_hash) - .finalize(ProtocolVersionId::latest()); - test_first_in_batch( - 8, - 1, - H256::zero(), - 5, - 12, - 1, - L2BlockEnv { - number: 2, - timestamp: 9, - prev_block_hash, - max_virtual_blocks_to_create: 1, - }, - Some(Halt::FailedToSetL2Block("The timestamp of the L2 block must be greater than or equal to the timestamp of the current batch".to_string())), - ); -} - -fn set_manual_l2_block_info( - vm: &mut Vm, - tx_number: usize, - block_info: L2BlockEnv, - timestamp: Timestamp, -) { - let fictive_miniblock_position = - TX_OPERATOR_L2_BLOCK_INFO_OFFSET + TX_OPERATOR_SLOTS_PER_L2_BLOCK_INFO * tx_number; - - vm.state.memory.populate_page( - BOOTLOADER_HEAP_PAGE as usize, - vec![ - (fictive_miniblock_position, block_info.number.into()), - (fictive_miniblock_position + 1, block_info.timestamp.into()), - ( - fictive_miniblock_position + 2, - h256_to_u256(block_info.prev_block_hash), - ), - ( - fictive_miniblock_position + 3, - block_info.max_virtual_blocks_to_create.into(), - ), - ], - timestamp, - ) -} diff --git a/core/lib/multivm/src/versions/vm_1_4_2/tests/mod.rs b/core/lib/multivm/src/versions/vm_1_4_2/tests/mod.rs deleted file mode 100644 index a07608121bc1..000000000000 --- a/core/lib/multivm/src/versions/vm_1_4_2/tests/mod.rs +++ /dev/null @@ -1,23 +0,0 @@ -mod bootloader; -mod default_aa; -// TODO - fix this test -// `mod invalid_bytecode;` -mod block_tip; -mod bytecode_publishing; -mod call_tracer; -mod circuits; -mod gas_limit; -mod get_used_contracts; -mod is_write_initial; -mod l1_tx_execution; -mod l2_blocks; -mod nonce_holder; -mod precompiles; -mod refunds; -mod require_eip712; -mod rollbacks; -mod simple_execution; -mod tester; -mod tracing_execution_error; -mod upgrade; -mod utils; diff --git a/core/lib/multivm/src/versions/vm_1_4_2/tests/nonce_holder.rs b/core/lib/multivm/src/versions/vm_1_4_2/tests/nonce_holder.rs deleted file mode 100644 index 9f1be4ec9476..000000000000 --- a/core/lib/multivm/src/versions/vm_1_4_2/tests/nonce_holder.rs +++ /dev/null @@ -1,187 +0,0 @@ -use zksync_types::{Execute, Nonce}; - -use crate::{ - interface::{ - ExecutionResult, Halt, TxExecutionMode, TxRevertReason, VmExecutionMode, VmInterface, - VmRevertReason, - }, - vm_1_4_2::{ - tests::{ - tester::{Account, VmTesterBuilder}, - utils::read_nonce_holder_tester, - }, - types::internals::TransactionData, - }, -}; - -pub enum NonceHolderTestMode { - SetValueUnderNonce, - IncreaseMinNonceBy5, - IncreaseMinNonceTooMuch, - LeaveNonceUnused, - IncreaseMinNonceBy1, - SwitchToArbitraryOrdering, -} - -impl From for u8 { - fn from(mode: NonceHolderTestMode) -> u8 { - match mode { - NonceHolderTestMode::SetValueUnderNonce => 0, - NonceHolderTestMode::IncreaseMinNonceBy5 => 1, - NonceHolderTestMode::IncreaseMinNonceTooMuch => 2, - NonceHolderTestMode::LeaveNonceUnused => 3, - NonceHolderTestMode::IncreaseMinNonceBy1 => 4, - NonceHolderTestMode::SwitchToArbitraryOrdering => 5, - } - } -} - -#[test] -fn test_nonce_holder() { - let mut account = Account::random(); - - let mut vm = VmTesterBuilder::new(crate::vm_latest::HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_deployer() - .with_custom_contracts(vec![( - read_nonce_holder_tester().to_vec(), - account.address, - true, - )]) - .with_rich_accounts(vec![account.clone()]) - .build(); - - let mut run_nonce_test = |nonce: u32, - test_mode: NonceHolderTestMode, - error_message: Option, - comment: &'static str| { - // In this test we have to reset VM state after each test case. Because once bootloader failed during the validation of the transaction, - // it will fail again and again. At the same time we have to keep the same storage, because we want to keep the nonce holder contract state. - // The easiest way in terms of lifetimes is to reuse `vm_builder` to achieve it. - vm.reset_state(true); - let mut transaction_data: TransactionData = account - .get_l2_tx_for_execute_with_nonce( - Execute { - contract_address: account.address, - calldata: vec![12], - value: Default::default(), - factory_deps: None, - }, - None, - Nonce(nonce), - ) - .into(); - - transaction_data.signature = vec![test_mode.into()]; - vm.vm.push_raw_transaction(transaction_data, 0, 0, true); - let result = vm.vm.execute(VmExecutionMode::OneTx); - - if let Some(msg) = error_message { - let expected_error = - TxRevertReason::Halt(Halt::ValidationFailed(VmRevertReason::General { - msg, - data: vec![], - })); - let ExecutionResult::Halt { reason } = result.result else { - panic!("Expected revert, got {:?}", result.result); - }; - assert_eq!( - reason.to_string(), - expected_error.to_string(), - "{}", - comment - ); - } else { - assert!(!result.result.is_failed(), "{}", comment); - } - }; - // Test 1: trying to set value under non sequential nonce value. - run_nonce_test( - 1u32, - NonceHolderTestMode::SetValueUnderNonce, - Some("Previous nonce has not been used".to_string()), - "Allowed to set value under non sequential value", - ); - - // Test 2: increase min nonce by 1 with sequential nonce ordering: - run_nonce_test( - 0u32, - NonceHolderTestMode::IncreaseMinNonceBy1, - None, - "Failed to increment nonce by 1 for sequential account", - ); - - // Test 3: correctly set value under nonce with sequential nonce ordering: - run_nonce_test( - 1u32, - NonceHolderTestMode::SetValueUnderNonce, - None, - "Failed to set value under nonce sequential value", - ); - - // Test 5: migrate to the arbitrary nonce ordering: - run_nonce_test( - 2u32, - NonceHolderTestMode::SwitchToArbitraryOrdering, - None, - "Failed to switch to arbitrary ordering", - ); - - // Test 6: increase min nonce by 5 - run_nonce_test( - 6u32, - NonceHolderTestMode::IncreaseMinNonceBy5, - None, - "Failed to increase min nonce by 5", - ); - - // Test 7: since the nonces in range [6,10] are no longer allowed, the - // tx with nonce 10 should not be allowed - run_nonce_test( - 10u32, - NonceHolderTestMode::IncreaseMinNonceBy5, - Some("Reusing the same nonce twice".to_string()), - "Allowed to reuse nonce below the minimal one", - ); - - // Test 8: we should be able to use nonce 13 - run_nonce_test( - 13u32, - NonceHolderTestMode::SetValueUnderNonce, - None, - "Did not allow to use unused nonce 10", - ); - - // Test 9: we should not be able to reuse nonce 13 - run_nonce_test( - 13u32, - NonceHolderTestMode::IncreaseMinNonceBy5, - Some("Reusing the same nonce twice".to_string()), - "Allowed to reuse the same nonce twice", - ); - - // Test 10: we should be able to simply use nonce 14, while bumping the minimal nonce by 5 - run_nonce_test( - 14u32, - NonceHolderTestMode::IncreaseMinNonceBy5, - None, - "Did not allow to use a bumped nonce", - ); - - // Test 11: Do not allow bumping nonce by too much - run_nonce_test( - 16u32, - NonceHolderTestMode::IncreaseMinNonceTooMuch, - Some("The value for incrementing the nonce is too high".to_string()), - "Allowed for incrementing min nonce too much", - ); - - // Test 12: Do not allow not setting a nonce as used - run_nonce_test( - 16u32, - NonceHolderTestMode::LeaveNonceUnused, - Some("The nonce was not set as used".to_string()), - "Allowed to leave nonce as unused", - ); -} diff --git a/core/lib/multivm/src/versions/vm_1_4_2/tests/precompiles.rs b/core/lib/multivm/src/versions/vm_1_4_2/tests/precompiles.rs deleted file mode 100644 index 0a799288204c..000000000000 --- a/core/lib/multivm/src/versions/vm_1_4_2/tests/precompiles.rs +++ /dev/null @@ -1,135 +0,0 @@ -use zk_evm_1_4_1::zk_evm_abstractions::precompiles::PrecompileAddress; -use zksync_types::{Address, Execute}; - -use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface}, - vm_1_4_2::{ - constants::BLOCK_GAS_LIMIT, - tests::{tester::VmTesterBuilder, utils::read_precompiles_contract}, - }, -}; - -#[test] -fn test_keccak() { - // Execute special transaction and check that at least 1000 keccak calls were made. - let contract = read_precompiles_contract(); - let address = Address::random(); - let mut vm = VmTesterBuilder::new(crate::vm_latest::HistoryEnabled) - .with_empty_in_memory_storage() - .with_random_rich_accounts(1) - .with_deployer() - .with_bootloader_gas_limit(BLOCK_GAS_LIMIT) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_custom_contracts(vec![(contract, address, true)]) - .build(); - - // calldata for `doKeccak(1000)`. - let keccak1000_calldata = - "370f20ac00000000000000000000000000000000000000000000000000000000000003e8"; - - let account = &mut vm.rich_accounts[0]; - let tx = account.get_l2_tx_for_execute( - Execute { - contract_address: address, - calldata: hex::decode(keccak1000_calldata).unwrap(), - value: Default::default(), - factory_deps: None, - }, - None, - ); - vm.vm.push_transaction(tx); - let _ = vm.vm.inspect(Default::default(), VmExecutionMode::OneTx); - - let keccak_count = vm - .vm - .state - .precompiles_processor - .precompile_cycles_history - .inner() - .iter() - .filter(|(precompile, _)| precompile == &PrecompileAddress::Keccak256) - .count(); - - assert!(keccak_count >= 1000); -} - -#[test] -fn test_sha256() { - // Execute special transaction and check that at least 1000 `sha256` calls were made. - let contract = read_precompiles_contract(); - let address = Address::random(); - let mut vm = VmTesterBuilder::new(crate::vm_latest::HistoryEnabled) - .with_empty_in_memory_storage() - .with_random_rich_accounts(1) - .with_deployer() - .with_bootloader_gas_limit(BLOCK_GAS_LIMIT) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_custom_contracts(vec![(contract, address, true)]) - .build(); - - // calldata for `doSha256(1000)`. - let sha1000_calldata = - "5d0b4fb500000000000000000000000000000000000000000000000000000000000003e8"; - - let account = &mut vm.rich_accounts[0]; - let tx = account.get_l2_tx_for_execute( - Execute { - contract_address: address, - calldata: hex::decode(sha1000_calldata).unwrap(), - value: Default::default(), - factory_deps: None, - }, - None, - ); - vm.vm.push_transaction(tx); - let _ = vm.vm.inspect(Default::default(), VmExecutionMode::OneTx); - - let sha_count = vm - .vm - .state - .precompiles_processor - .precompile_cycles_history - .inner() - .iter() - .filter(|(precompile, _)| precompile == &PrecompileAddress::SHA256) - .count(); - - assert!(sha_count >= 1000); -} - -#[test] -fn test_ecrecover() { - // Execute simple transfer and check that exactly 1 `ecrecover` call was made (it's done during tx validation). - let mut vm = VmTesterBuilder::new(crate::vm_latest::HistoryEnabled) - .with_empty_in_memory_storage() - .with_random_rich_accounts(1) - .with_deployer() - .with_bootloader_gas_limit(BLOCK_GAS_LIMIT) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .build(); - - let account = &mut vm.rich_accounts[0]; - let tx = account.get_l2_tx_for_execute( - Execute { - contract_address: account.address, - calldata: Vec::new(), - value: Default::default(), - factory_deps: None, - }, - None, - ); - vm.vm.push_transaction(tx); - let _ = vm.vm.inspect(Default::default(), VmExecutionMode::OneTx); - - let ecrecover_count = vm - .vm - .state - .precompiles_processor - .precompile_cycles_history - .inner() - .iter() - .filter(|(precompile, _)| precompile == &PrecompileAddress::Ecrecover) - .count(); - - assert_eq!(ecrecover_count, 1); -} diff --git a/core/lib/multivm/src/versions/vm_1_4_2/tests/prestate_tracer.rs b/core/lib/multivm/src/versions/vm_1_4_2/tests/prestate_tracer.rs deleted file mode 100644 index 5586450f34be..000000000000 --- a/core/lib/multivm/src/versions/vm_1_4_2/tests/prestate_tracer.rs +++ /dev/null @@ -1,143 +0,0 @@ -use std::sync::Arc; - -use once_cell::sync::OnceCell; -use zksync_test_account::TxType; -use zksync_types::{utils::deployed_address_create, Execute, U256}; - -use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface}, - tracers::PrestateTracer, - vm_1_4_2::{ - constants::BLOCK_GAS_LIMIT, - tests::{tester::VmTesterBuilder, utils::read_simple_transfer_contract}, - HistoryEnabled, ToTracerPointer, - }, -}; - -#[test] -fn test_prestate_tracer() { - let mut vm = VmTesterBuilder::new(crate::vm_latest::HistoryEnabled) - .with_empty_in_memory_storage() - .with_random_rich_accounts(1) - .with_deployer() - .with_gas_limit(BLOCK_GAS_LIMIT) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .build(); - - vm.deploy_test_contract(); - let account = &mut vm.rich_accounts[0]; - - let tx1 = account.get_test_contract_transaction( - vm.test_contract.unwrap(), - false, - Default::default(), - true, - TxType::L2, - ); - vm.vm.push_transaction(tx1); - - let contract_address = vm.test_contract.unwrap(); - let prestate_tracer_result = Arc::new(OnceCell::default()); - let prestate_tracer = PrestateTracer::new(false, prestate_tracer_result.clone()); - let tracer_ptr = prestate_tracer.into_tracer_pointer(); - vm.vm.inspect(tracer_ptr.into(), VmExecutionMode::Batch); - - let prestate_result = Arc::try_unwrap(prestate_tracer_result) - .unwrap() - .take() - .unwrap_or_default(); - - assert!(prestate_result.1.contains_key(&contract_address)); -} - -#[test] -fn test_prestate_tracer_diff_mode() { - let mut vm = VmTesterBuilder::new(crate::vm_latest::HistoryEnabled) - .with_empty_in_memory_storage() - .with_random_rich_accounts(1) - .with_deployer() - .with_gas_limit(BLOCK_GAS_LIMIT) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .build(); - let contract = read_simple_transfer_contract(); - let tx = vm - .deployer - .as_mut() - .expect("You have to initialize builder with deployer") - .get_deploy_tx(&contract, None, TxType::L2) - .tx; - let nonce = tx.nonce().unwrap().0.into(); - vm.vm.push_transaction(tx); - vm.vm.execute(VmExecutionMode::OneTx); - let deployed_address = deployed_address_create(vm.deployer.as_ref().unwrap().address, nonce); - vm.test_contract = Some(deployed_address); - - // Deploy a second copy of the contract to see its appearance in the pre-state - let tx2 = vm - .deployer - .as_mut() - .expect("You have to initialize builder with deployer") - .get_deploy_tx(&contract, None, TxType::L2) - .tx; - let nonce2 = tx2.nonce().unwrap().0.into(); - vm.vm.push_transaction(tx2); - vm.vm.execute(VmExecutionMode::OneTx); - let deployed_address2 = deployed_address_create(vm.deployer.as_ref().unwrap().address, nonce2); - - let account = &mut vm.rich_accounts[0]; - - //enter ether to contract to see difference in the balance post execution - let tx0 = Execute { - contract_address: vm.test_contract.unwrap(), - calldata: Default::default(), - value: U256::from(100000), - factory_deps: None, - }; - - vm.vm - .push_transaction(account.get_l2_tx_for_execute(tx0.clone(), None)); - - let tx1 = Execute { - contract_address: deployed_address2, - calldata: Default::default(), - value: U256::from(200000), - factory_deps: None, - }; - - vm.vm - .push_transaction(account.get_l2_tx_for_execute(tx1, None)); - let prestate_tracer_result = Arc::new(OnceCell::default()); - let prestate_tracer = PrestateTracer::new(true, prestate_tracer_result.clone()); - let tracer_ptr = prestate_tracer.into_tracer_pointer(); - vm.vm - .inspect(tracer_ptr.into(), VmExecutionMode::Bootloader); - - let prestate_result = Arc::try_unwrap(prestate_tracer_result) - .unwrap() - .take() - .unwrap_or_default(); - - //assert that the pre-state contains both deployed contracts with balance zero - assert!(prestate_result.0.contains_key(&deployed_address)); - assert!(prestate_result.0.contains_key(&deployed_address2)); - assert_eq!( - prestate_result.0[&deployed_address].balance, - Some(U256::zero()) - ); - assert_eq!( - prestate_result.0[&deployed_address2].balance, - Some(U256::zero()) - ); - - //assert that the post-state contains both deployed contracts with the correct balance - assert!(prestate_result.1.contains_key(&deployed_address)); - assert!(prestate_result.1.contains_key(&deployed_address2)); - assert_eq!( - prestate_result.1[&deployed_address].balance, - Some(U256::from(100000)) - ); - assert_eq!( - prestate_result.1[&deployed_address2].balance, - Some(U256::from(200000)) - ); -} diff --git a/core/lib/multivm/src/versions/vm_1_4_2/tests/refunds.rs b/core/lib/multivm/src/versions/vm_1_4_2/tests/refunds.rs deleted file mode 100644 index 401c2c12a436..000000000000 --- a/core/lib/multivm/src/versions/vm_1_4_2/tests/refunds.rs +++ /dev/null @@ -1,169 +0,0 @@ -use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface}, - vm_1_4_2::{ - tests::{ - tester::{DeployContractsTx, TxType, VmTesterBuilder}, - utils::read_test_contract, - }, - types::internals::TransactionData, - }, -}; - -#[test] -fn test_predetermined_refunded_gas() { - // In this test, we compare the execution of the bootloader with the predefined - // refunded gas and without them - - let mut vm = VmTesterBuilder::new(crate::vm_latest::HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - let l1_batch = vm.vm.batch_env.clone(); - - let counter = read_test_contract(); - let account = &mut vm.rich_accounts[0]; - - let DeployContractsTx { - tx, - bytecode_hash: _, - address: _, - } = account.get_deploy_tx(&counter, None, TxType::L2); - vm.vm.push_transaction(tx.clone()); - let result = vm.vm.execute(VmExecutionMode::OneTx); - - assert!(!result.result.is_failed()); - - // If the refund provided by the operator or the final refund are the 0 - // there is no impact of the operator's refund at all and so this test does not - // make much sense. - assert!( - result.refunds.operator_suggested_refund > 0, - "The operator's refund is 0" - ); - assert!(result.refunds.gas_refunded > 0, "The final refund is 0"); - - let result_without_predefined_refunds = vm.vm.execute(VmExecutionMode::Batch); - let mut current_state_without_predefined_refunds = vm.vm.get_current_execution_state(); - assert!(!result_without_predefined_refunds.result.is_failed(),); - - // Here we want to provide the same refund from the operator and check that it's the correct one. - // We execute the whole block without refund tracer, because refund tracer will eventually override the provided refund. - // But the overall result should be the same - - let mut vm = VmTesterBuilder::new(crate::vm_latest::HistoryEnabled) - .with_empty_in_memory_storage() - .with_l1_batch_env(l1_batch.clone()) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_rich_accounts(vec![account.clone()]) - .build(); - - let tx: TransactionData = tx.into(); - // Overhead - let overhead = tx.overhead_gas(); - vm.vm.push_raw_transaction( - tx.clone(), - overhead, - result.refunds.gas_refunded as u32, - true, - ); - - let result_with_predefined_refunds = vm.vm.execute(VmExecutionMode::Batch); - let mut current_state_with_predefined_refunds = vm.vm.get_current_execution_state(); - - assert!(!result_with_predefined_refunds.result.is_failed()); - - // We need to sort these lists as those are flattened from HashMaps - current_state_with_predefined_refunds - .used_contract_hashes - .sort(); - current_state_without_predefined_refunds - .used_contract_hashes - .sort(); - - assert_eq!( - current_state_with_predefined_refunds.events, - current_state_without_predefined_refunds.events - ); - - assert_eq!( - current_state_with_predefined_refunds.user_l2_to_l1_logs, - current_state_without_predefined_refunds.user_l2_to_l1_logs - ); - - assert_eq!( - current_state_with_predefined_refunds.system_logs, - current_state_without_predefined_refunds.system_logs - ); - - assert_eq!( - current_state_with_predefined_refunds.storage_log_queries, - current_state_without_predefined_refunds.storage_log_queries - ); - assert_eq!( - current_state_with_predefined_refunds.used_contract_hashes, - current_state_without_predefined_refunds.used_contract_hashes - ); - - // In this test we put the different refund from the operator. - // We still can't use the refund tracer, because it will override the refund. - // But we can check that the logs and events have changed. - let mut vm = VmTesterBuilder::new(crate::vm_latest::HistoryEnabled) - .with_empty_in_memory_storage() - .with_l1_batch_env(l1_batch) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_rich_accounts(vec![account.clone()]) - .build(); - - let changed_operator_suggested_refund = result.refunds.gas_refunded + 1000; - vm.vm - .push_raw_transaction(tx, overhead, changed_operator_suggested_refund as u32, true); - let result = vm.vm.execute(VmExecutionMode::Batch); - let mut current_state_with_changed_predefined_refunds = vm.vm.get_current_execution_state(); - - assert!(!result.result.is_failed()); - current_state_with_changed_predefined_refunds - .used_contract_hashes - .sort(); - current_state_without_predefined_refunds - .used_contract_hashes - .sort(); - - assert_eq!( - current_state_with_changed_predefined_refunds.events.len(), - current_state_without_predefined_refunds.events.len() - ); - - assert_ne!( - current_state_with_changed_predefined_refunds.events, - current_state_without_predefined_refunds.events - ); - - assert_eq!( - current_state_with_changed_predefined_refunds.user_l2_to_l1_logs, - current_state_without_predefined_refunds.user_l2_to_l1_logs - ); - - assert_ne!( - current_state_with_changed_predefined_refunds.system_logs, - current_state_without_predefined_refunds.system_logs - ); - - assert_eq!( - current_state_with_changed_predefined_refunds - .storage_log_queries - .len(), - current_state_without_predefined_refunds - .storage_log_queries - .len() - ); - - assert_ne!( - current_state_with_changed_predefined_refunds.storage_log_queries, - current_state_without_predefined_refunds.storage_log_queries - ); - assert_eq!( - current_state_with_changed_predefined_refunds.used_contract_hashes, - current_state_without_predefined_refunds.used_contract_hashes - ); -} diff --git a/core/lib/multivm/src/versions/vm_1_4_2/tests/require_eip712.rs b/core/lib/multivm/src/versions/vm_1_4_2/tests/require_eip712.rs deleted file mode 100644 index 15f4504d6e1d..000000000000 --- a/core/lib/multivm/src/versions/vm_1_4_2/tests/require_eip712.rs +++ /dev/null @@ -1,164 +0,0 @@ -use std::convert::TryInto; - -use ethabi::Token; -use zksync_eth_signer::{raw_ethereum_tx::TransactionParameters, EthereumSigner}; -use zksync_system_constants::L2_ETH_TOKEN_ADDRESS; -use zksync_types::{ - fee::Fee, l2::L2Tx, transaction_request::TransactionRequest, - utils::storage_key_for_standard_token_balance, AccountTreeId, Address, Eip712Domain, Execute, - L2ChainId, Nonce, Transaction, U256, -}; - -use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface}, - vm_1_4_2::tests::{ - tester::{Account, VmTester, VmTesterBuilder}, - utils::read_many_owners_custom_account_contract, - }, -}; - -impl VmTester { - pub(crate) fn get_eth_balance(&mut self, address: Address) -> U256 { - let key = storage_key_for_standard_token_balance( - AccountTreeId::new(L2_ETH_TOKEN_ADDRESS), - &address, - ); - self.vm.state.storage.storage.read_from_storage(&key) - } -} - -// TODO refactor this test it use too much internal details of the VM -#[tokio::test] -/// This test deploys 'buggy' account abstraction code, and then tries accessing it both with legacy -/// and EIP712 transactions. -/// Currently we support both, but in the future, we should allow only EIP712 transactions to access the AA accounts. -async fn test_require_eip712() { - // Use 3 accounts: - // - `private_address` - EOA account, where we have the key - // - `account_address` - AA account, where the contract is deployed - // - beneficiary - an EOA account, where we'll try to transfer the tokens. - let account_abstraction = Account::random(); - let mut private_account = Account::random(); - let beneficiary = Account::random(); - - let (bytecode, contract) = read_many_owners_custom_account_contract(); - let mut vm = VmTesterBuilder::new(crate::vm_latest::HistoryDisabled) - .with_empty_in_memory_storage() - .with_custom_contracts(vec![(bytecode, account_abstraction.address, true)]) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_rich_accounts(vec![account_abstraction.clone(), private_account.clone()]) - .build(); - - assert_eq!(vm.get_eth_balance(beneficiary.address), U256::from(0)); - - let chain_id: u32 = 270; - - // First, let's set the owners of the AA account to the `private_address`. - // (so that messages signed by `private_address`, are authorized to act on behalf of the AA account). - let set_owners_function = contract.function("setOwners").unwrap(); - let encoded_input = set_owners_function - .encode_input(&[Token::Array(vec![Token::Address(private_account.address)])]) - .unwrap(); - - let tx = private_account.get_l2_tx_for_execute( - Execute { - contract_address: account_abstraction.address, - calldata: encoded_input, - value: Default::default(), - factory_deps: None, - }, - None, - ); - - vm.vm.push_transaction(tx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!result.result.is_failed()); - - let private_account_balance = vm.get_eth_balance(private_account.address); - - // And now let's do the transfer from the 'account abstraction' to 'beneficiary' (using 'legacy' transaction). - // Normally this would not work - unless the operator is malicious. - let aa_raw_tx = TransactionParameters { - nonce: U256::from(0), - to: Some(beneficiary.address), - gas: U256::from(100000000), - gas_price: Some(U256::from(10000000)), - value: U256::from(888000088), - data: vec![], - chain_id: 270, - transaction_type: None, - access_list: None, - max_fee_per_gas: U256::from(1000000000), - max_priority_fee_per_gas: U256::from(1000000000), - max_fee_per_blob_gas: None, - blob_versioned_hashes: None, - }; - - let aa_tx = private_account.sign_legacy_tx(aa_raw_tx).await; - let (tx_request, hash) = TransactionRequest::from_bytes(&aa_tx, L2ChainId::from(270)).unwrap(); - - let mut l2_tx: L2Tx = L2Tx::from_request(tx_request, 10000).unwrap(); - l2_tx.set_input(aa_tx, hash); - // Pretend that operator is malicious and sets the initiator to the AA account. - l2_tx.common_data.initiator_address = account_abstraction.address; - let transaction: Transaction = l2_tx.try_into().unwrap(); - - vm.vm.push_transaction(transaction); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!result.result.is_failed()); - assert_eq!( - vm.get_eth_balance(beneficiary.address), - U256::from(888000088) - ); - // Make sure that the tokens were transferred from the AA account. - assert_eq!( - private_account_balance, - vm.get_eth_balance(private_account.address) - ); - - // // Now send the 'classic' EIP712 transaction - let tx_712 = L2Tx::new( - beneficiary.address, - vec![], - Nonce(1), - Fee { - gas_limit: U256::from(1000000000), - max_fee_per_gas: U256::from(1000000000), - max_priority_fee_per_gas: U256::from(1000000000), - gas_per_pubdata_limit: U256::from(1000000000), - }, - account_abstraction.address, - U256::from(28374938), - None, - Default::default(), - ); - - let transaction_request: TransactionRequest = tx_712.into(); - - let domain = Eip712Domain::new(L2ChainId::from(chain_id)); - let signature = private_account - .get_pk_signer() - .sign_typed_data(&domain, &transaction_request) - .await - .unwrap(); - let encoded_tx = transaction_request.get_signed_bytes(&signature, L2ChainId::from(chain_id)); - - let (aa_txn_request, aa_hash) = - TransactionRequest::from_bytes(&encoded_tx, L2ChainId::from(chain_id)).unwrap(); - - let mut l2_tx = L2Tx::from_request(aa_txn_request, 100000).unwrap(); - l2_tx.set_input(encoded_tx, aa_hash); - - let transaction: Transaction = l2_tx.try_into().unwrap(); - vm.vm.push_transaction(transaction); - vm.vm.execute(VmExecutionMode::OneTx); - - assert_eq!( - vm.get_eth_balance(beneficiary.address), - U256::from(916375026) - ); - assert_eq!( - private_account_balance, - vm.get_eth_balance(private_account.address) - ); -} diff --git a/core/lib/multivm/src/versions/vm_1_4_2/tests/rollbacks.rs b/core/lib/multivm/src/versions/vm_1_4_2/tests/rollbacks.rs deleted file mode 100644 index 2ce18cc01361..000000000000 --- a/core/lib/multivm/src/versions/vm_1_4_2/tests/rollbacks.rs +++ /dev/null @@ -1,263 +0,0 @@ -use ethabi::Token; -use zksync_contracts::{get_loadnext_contract, test_contracts::LoadnextContractExecutionParams}; -use crate::interface::storage::WriteStorage; -use zksync_types::{get_nonce_key, Execute, U256}; - -use crate::{ - interface::{ - dyn_tracers::vm_1_4_1::DynTracer, - tracer::{TracerExecutionStatus, TracerExecutionStopReason}, - TxExecutionMode, VmExecutionMode, VmInterface, VmInterfaceHistoryEnabled, - }, - vm_1_4_2::{ - tests::{ - tester::{DeployContractsTx, TransactionTestInfo, TxModifier, TxType, VmTesterBuilder}, - utils::read_test_contract, - }, - types::internals::ZkSyncVmState, - BootloaderState, HistoryMode, SimpleMemory, ToTracerPointer, VmTracer, - }, -}; - -#[test] -fn test_vm_rollbacks() { - let mut vm = VmTesterBuilder::new(crate::vm_latest::HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let mut account = vm.rich_accounts[0].clone(); - let counter = read_test_contract(); - let tx_0 = account.get_deploy_tx(&counter, None, TxType::L2).tx; - let tx_1 = account.get_deploy_tx(&counter, None, TxType::L2).tx; - let tx_2 = account.get_deploy_tx(&counter, None, TxType::L2).tx; - - let result_without_rollbacks = vm.execute_and_verify_txs(&vec![ - TransactionTestInfo::new_processed(tx_0.clone(), false), - TransactionTestInfo::new_processed(tx_1.clone(), false), - TransactionTestInfo::new_processed(tx_2.clone(), false), - ]); - - // reset vm - vm.reset_with_empty_storage(); - - let result_with_rollbacks = vm.execute_and_verify_txs(&vec![ - TransactionTestInfo::new_rejected(tx_0.clone(), TxModifier::WrongSignatureLength.into()), - TransactionTestInfo::new_rejected(tx_0.clone(), TxModifier::WrongMagicValue.into()), - TransactionTestInfo::new_rejected(tx_0.clone(), TxModifier::WrongSignature.into()), - // The correct nonce is 0, this tx will fail - TransactionTestInfo::new_rejected(tx_2.clone(), TxModifier::WrongNonce.into()), - // This tx will succeed - TransactionTestInfo::new_processed(tx_0.clone(), false), - // The correct nonce is 1, this tx will fail - TransactionTestInfo::new_rejected(tx_0.clone(), TxModifier::NonceReused.into()), - // The correct nonce is 1, this tx will fail - TransactionTestInfo::new_rejected(tx_2.clone(), TxModifier::WrongNonce.into()), - // This tx will succeed - TransactionTestInfo::new_processed(tx_1, false), - // The correct nonce is 2, this tx will fail - TransactionTestInfo::new_rejected(tx_0.clone(), TxModifier::NonceReused.into()), - // This tx will succeed - TransactionTestInfo::new_processed(tx_2.clone(), false), - // This tx will fail - TransactionTestInfo::new_rejected(tx_2, TxModifier::NonceReused.into()), - TransactionTestInfo::new_rejected(tx_0, TxModifier::NonceReused.into()), - ]); - - assert_eq!(result_without_rollbacks, result_with_rollbacks); -} - -#[test] -fn test_vm_loadnext_rollbacks() { - let mut vm = VmTesterBuilder::new(crate::vm_latest::HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - let mut account = vm.rich_accounts[0].clone(); - - let loadnext_contract = get_loadnext_contract(); - let loadnext_constructor_data = &[Token::Uint(U256::from(100))]; - let DeployContractsTx { - tx: loadnext_deploy_tx, - address, - .. - } = account.get_deploy_tx_with_factory_deps( - &loadnext_contract.bytecode, - Some(loadnext_constructor_data), - loadnext_contract.factory_deps.clone(), - TxType::L2, - ); - - let loadnext_tx_1 = account.get_l2_tx_for_execute( - Execute { - contract_address: address, - calldata: LoadnextContractExecutionParams { - reads: 100, - writes: 100, - events: 100, - hashes: 500, - recursive_calls: 10, - deploys: 60, - } - .to_bytes(), - value: Default::default(), - factory_deps: None, - }, - None, - ); - - let loadnext_tx_2 = account.get_l2_tx_for_execute( - Execute { - contract_address: address, - calldata: LoadnextContractExecutionParams { - reads: 100, - writes: 100, - events: 100, - hashes: 500, - recursive_calls: 10, - deploys: 60, - } - .to_bytes(), - value: Default::default(), - factory_deps: None, - }, - None, - ); - - let result_without_rollbacks = vm.execute_and_verify_txs(&vec![ - TransactionTestInfo::new_processed(loadnext_deploy_tx.clone(), false), - TransactionTestInfo::new_processed(loadnext_tx_1.clone(), false), - TransactionTestInfo::new_processed(loadnext_tx_2.clone(), false), - ]); - - // reset vm - vm.reset_with_empty_storage(); - - let result_with_rollbacks = vm.execute_and_verify_txs(&vec![ - TransactionTestInfo::new_processed(loadnext_deploy_tx.clone(), false), - TransactionTestInfo::new_processed(loadnext_tx_1.clone(), true), - TransactionTestInfo::new_rejected( - loadnext_deploy_tx.clone(), - TxModifier::NonceReused.into(), - ), - TransactionTestInfo::new_processed(loadnext_tx_1, false), - TransactionTestInfo::new_processed(loadnext_tx_2.clone(), true), - TransactionTestInfo::new_processed(loadnext_tx_2.clone(), true), - TransactionTestInfo::new_rejected(loadnext_deploy_tx, TxModifier::NonceReused.into()), - TransactionTestInfo::new_processed(loadnext_tx_2, false), - ]); - - assert_eq!(result_without_rollbacks, result_with_rollbacks); -} - -// Testing tracer that does not allow the recursion to go deeper than a certain limit -struct MaxRecursionTracer { - max_recursion_depth: usize, -} - -/// Tracer responsible for calculating the number of storage invocations and -/// stopping the VM execution if the limit is reached. -impl DynTracer> for MaxRecursionTracer {} - -impl VmTracer for MaxRecursionTracer { - fn finish_cycle( - &mut self, - state: &mut ZkSyncVmState, - _bootloader_state: &mut BootloaderState, - ) -> TracerExecutionStatus { - let current_depth = state.local_state.callstack.depth(); - - if current_depth > self.max_recursion_depth { - TracerExecutionStatus::Stop(TracerExecutionStopReason::Finish) - } else { - TracerExecutionStatus::Continue - } - } -} - -#[test] -fn test_layered_rollback() { - // This test checks that the layered rollbacks work correctly, i.e. - // the rollback by the operator will always revert all the changes - - let mut vm = VmTesterBuilder::new(crate::vm_latest::HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let account = &mut vm.rich_accounts[0]; - let loadnext_contract = get_loadnext_contract().bytecode; - - let DeployContractsTx { - tx: deploy_tx, - address, - .. - } = account.get_deploy_tx( - &loadnext_contract, - Some(&[Token::Uint(0.into())]), - TxType::L2, - ); - vm.vm.push_transaction(deploy_tx); - let deployment_res = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!deployment_res.result.is_failed(), "transaction failed"); - - let loadnext_transaction = account.get_loadnext_transaction( - address, - LoadnextContractExecutionParams { - writes: 1, - recursive_calls: 20, - ..LoadnextContractExecutionParams::empty() - }, - TxType::L2, - ); - - let nonce_val = vm - .vm - .state - .storage - .storage - .read_from_storage(&get_nonce_key(&account.address)); - - vm.vm.make_snapshot(); - - vm.vm.push_transaction(loadnext_transaction.clone()); - vm.vm.inspect( - MaxRecursionTracer { - max_recursion_depth: 15, - } - .into_tracer_pointer() - .into(), - VmExecutionMode::OneTx, - ); - - let nonce_val2 = vm - .vm - .state - .storage - .storage - .read_from_storage(&get_nonce_key(&account.address)); - - // The tracer stopped after the validation has passed, so nonce has already been increased - assert_eq!(nonce_val + U256::one(), nonce_val2, "nonce did not change"); - - vm.vm.rollback_to_the_latest_snapshot(); - - let nonce_val_after_rollback = vm - .vm - .state - .storage - .storage - .read_from_storage(&get_nonce_key(&account.address)); - - assert_eq!( - nonce_val, nonce_val_after_rollback, - "nonce changed after rollback" - ); - - vm.vm.push_transaction(loadnext_transaction); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!result.result.is_failed(), "transaction must not fail"); -} diff --git a/core/lib/multivm/src/versions/vm_1_4_2/tests/simple_execution.rs b/core/lib/multivm/src/versions/vm_1_4_2/tests/simple_execution.rs deleted file mode 100644 index 57b37e67b769..000000000000 --- a/core/lib/multivm/src/versions/vm_1_4_2/tests/simple_execution.rs +++ /dev/null @@ -1,78 +0,0 @@ -use crate::{ - interface::{ExecutionResult, VmExecutionMode, VmInterface}, - vm_1_4_2::tests::tester::{TxType, VmTesterBuilder}, -}; - -#[test] -fn estimate_fee() { - let mut vm_tester = VmTesterBuilder::new(crate::vm_latest::HistoryDisabled) - .with_empty_in_memory_storage() - .with_deployer() - .with_random_rich_accounts(1) - .build(); - - vm_tester.deploy_test_contract(); - let account = &mut vm_tester.rich_accounts[0]; - - let tx = account.get_test_contract_transaction( - vm_tester.test_contract.unwrap(), - false, - Default::default(), - false, - TxType::L2, - ); - - vm_tester.vm.push_transaction(tx); - - let result = vm_tester.vm.execute(VmExecutionMode::OneTx); - assert_matches!(result.result, ExecutionResult::Success { .. }); -} - -#[test] -fn simple_execute() { - let mut vm_tester = VmTesterBuilder::new(crate::vm_latest::HistoryDisabled) - .with_empty_in_memory_storage() - .with_deployer() - .with_random_rich_accounts(1) - .build(); - - vm_tester.deploy_test_contract(); - - let account = &mut vm_tester.rich_accounts[0]; - - let tx1 = account.get_test_contract_transaction( - vm_tester.test_contract.unwrap(), - false, - Default::default(), - false, - TxType::L1 { serial_id: 1 }, - ); - - let tx2 = account.get_test_contract_transaction( - vm_tester.test_contract.unwrap(), - true, - Default::default(), - false, - TxType::L1 { serial_id: 1 }, - ); - - let tx3 = account.get_test_contract_transaction( - vm_tester.test_contract.unwrap(), - false, - Default::default(), - false, - TxType::L1 { serial_id: 1 }, - ); - let vm = &mut vm_tester.vm; - vm.push_transaction(tx1); - vm.push_transaction(tx2); - vm.push_transaction(tx3); - let tx = vm.execute(VmExecutionMode::OneTx); - assert_matches!(tx.result, ExecutionResult::Success { .. }); - let tx = vm.execute(VmExecutionMode::OneTx); - assert_matches!(tx.result, ExecutionResult::Revert { .. }); - let tx = vm.execute(VmExecutionMode::OneTx); - assert_matches!(tx.result, ExecutionResult::Success { .. }); - let block_tip = vm.execute(VmExecutionMode::Batch); - assert_matches!(block_tip.result, ExecutionResult::Success { .. }); -} diff --git a/core/lib/multivm/src/versions/vm_1_4_2/tests/tester/inner_state.rs b/core/lib/multivm/src/versions/vm_1_4_2/tests/tester/inner_state.rs deleted file mode 100644 index d6c072d1b1ed..000000000000 --- a/core/lib/multivm/src/versions/vm_1_4_2/tests/tester/inner_state.rs +++ /dev/null @@ -1,131 +0,0 @@ -use std::collections::HashMap; - -use zk_evm_1_4_1::{aux_structures::Timestamp, vm_state::VmLocalState}; -use crate::interface::storage::WriteStorage; -use zksync_types::{StorageKey, StorageValue, U256}; - -use crate::{ - vm_1_4_2::{ - old_vm::{ - event_sink::InMemoryEventSink, - history_recorder::{AppDataFrameManagerWithHistory, HistoryRecorder}, - }, - utils::logs::StorageLogQuery, - HistoryEnabled, HistoryMode, SimpleMemory, Vm, - }, - HistoryMode as CommonHistoryMode, -}; - -#[derive(Clone, Debug)] -pub(crate) struct ModifiedKeysMap(HashMap); - -// We consider hashmaps to be equal even if there is a key -// that is not present in one but has zero value in another. -impl PartialEq for ModifiedKeysMap { - fn eq(&self, other: &Self) -> bool { - for (key, value) in self.0.iter() { - if *value != other.0.get(key).cloned().unwrap_or_default() { - return false; - } - } - for (key, value) in other.0.iter() { - if *value != self.0.get(key).cloned().unwrap_or_default() { - return false; - } - } - true - } -} - -#[derive(Clone, PartialEq, Debug)] -pub(crate) struct DecommitterTestInnerState { - /// There is no way to "truly" compare the storage pointer, - /// so we just compare the modified keys. This is reasonable enough. - pub(crate) modified_storage_keys: ModifiedKeysMap, - pub(crate) known_bytecodes: HistoryRecorder>, H>, - pub(crate) decommitted_code_hashes: HistoryRecorder, HistoryEnabled>, -} - -#[derive(Clone, PartialEq, Debug)] -pub(crate) struct StorageOracleInnerState { - /// There is no way to "truly" compare the storage pointer, - /// so we just compare the modified keys. This is reasonable enough. - pub(crate) modified_storage_keys: ModifiedKeysMap, - - pub(crate) frames_stack: AppDataFrameManagerWithHistory, H>, - - pub(crate) pre_paid_changes: HistoryRecorder, H>, - pub(crate) paid_changes: HistoryRecorder, H>, - pub(crate) initial_values: HistoryRecorder, H>, - pub(crate) returned_refunds: HistoryRecorder, H>, -} - -#[derive(Clone, PartialEq, Debug)] -pub(crate) struct PrecompileProcessorTestInnerState { - pub(crate) timestamp_history: HistoryRecorder, H>, -} - -/// A struct that encapsulates the state of the VM's oracles -/// The state is to be used in tests. -#[derive(Clone, PartialEq, Debug)] -pub(crate) struct VmInstanceInnerState { - event_sink: InMemoryEventSink, - precompile_processor_state: PrecompileProcessorTestInnerState, - memory: SimpleMemory, - decommitter_state: DecommitterTestInnerState, - storage_oracle_state: StorageOracleInnerState, - local_state: VmLocalState, -} - -impl Vm { - // Dump inner state of the VM. - pub(crate) fn dump_inner_state(&self) -> VmInstanceInnerState { - let event_sink = self.state.event_sink.clone(); - let precompile_processor_state = PrecompileProcessorTestInnerState { - timestamp_history: self.state.precompiles_processor.timestamp_history.clone(), - }; - let memory = self.state.memory.clone(); - let decommitter_state = DecommitterTestInnerState { - modified_storage_keys: ModifiedKeysMap( - self.state - .decommittment_processor - .get_storage() - .borrow() - .modified_storage_keys() - .clone(), - ), - known_bytecodes: self.state.decommittment_processor.known_bytecodes.clone(), - decommitted_code_hashes: self - .state - .decommittment_processor - .get_decommitted_code_hashes_with_history() - .clone(), - }; - let storage_oracle_state = StorageOracleInnerState { - modified_storage_keys: ModifiedKeysMap( - self.state - .storage - .storage - .get_ptr() - .borrow() - .modified_storage_keys() - .clone(), - ), - frames_stack: self.state.storage.frames_stack.clone(), - pre_paid_changes: self.state.storage.pre_paid_changes.clone(), - paid_changes: self.state.storage.paid_changes.clone(), - initial_values: self.state.storage.initial_values.clone(), - returned_refunds: self.state.storage.returned_refunds.clone(), - }; - let local_state = self.state.local_state.clone(); - - VmInstanceInnerState { - event_sink, - precompile_processor_state, - memory, - decommitter_state, - storage_oracle_state, - local_state, - } - } -} diff --git a/core/lib/multivm/src/versions/vm_1_4_2/tests/tester/mod.rs b/core/lib/multivm/src/versions/vm_1_4_2/tests/tester/mod.rs deleted file mode 100644 index c3cc5d8d9803..000000000000 --- a/core/lib/multivm/src/versions/vm_1_4_2/tests/tester/mod.rs +++ /dev/null @@ -1,9 +0,0 @@ -pub(crate) use transaction_test_info::{ExpectedError, TransactionTestInfo, TxModifier}; -pub(crate) use vm_tester::{ - default_l1_batch, get_empty_storage, InMemoryStorageView, VmTester, VmTesterBuilder, -}; -pub(crate) use zksync_test_account::{Account, DeployContractsTx, TxType}; - -mod inner_state; -mod transaction_test_info; -mod vm_tester; diff --git a/core/lib/multivm/src/versions/vm_1_4_2/tests/tester/transaction_test_info.rs b/core/lib/multivm/src/versions/vm_1_4_2/tests/tester/transaction_test_info.rs deleted file mode 100644 index cb81c4c5ed7a..000000000000 --- a/core/lib/multivm/src/versions/vm_1_4_2/tests/tester/transaction_test_info.rs +++ /dev/null @@ -1,217 +0,0 @@ -use zksync_types::{ExecuteTransactionCommon, Transaction}; - -use crate::{ - interface::{ - CurrentExecutionState, ExecutionResult, Halt, TxRevertReason, VmExecutionMode, - VmExecutionResultAndLogs, VmInterface, VmInterfaceHistoryEnabled, VmRevertReason, - }, - vm_1_4_2::tests::tester::vm_tester::VmTester, -}; - -#[derive(Debug, Clone)] -pub(crate) enum TxModifier { - WrongSignatureLength, - WrongSignature, - WrongMagicValue, - WrongNonce, - NonceReused, -} - -#[derive(Debug, Clone)] -pub(crate) enum TxExpectedResult { - Rejected { error: ExpectedError }, - Processed { rollback: bool }, -} - -#[derive(Debug, Clone)] -pub(crate) struct TransactionTestInfo { - tx: Transaction, - result: TxExpectedResult, -} - -#[derive(Debug, Clone)] -pub(crate) struct ExpectedError { - pub(crate) revert_reason: TxRevertReason, - pub(crate) modifier: Option, -} - -impl From for ExpectedError { - fn from(value: TxModifier) -> Self { - let revert_reason = match value { - TxModifier::WrongSignatureLength => { - Halt::ValidationFailed(VmRevertReason::General { - msg: "Signature length is incorrect".to_string(), - data: vec![ - 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 29, 83, 105, 103, 110, 97, 116, 117, 114, 101, 32, - 108, 101, 110, 103, 116, 104, 32, 105, 115, 32, 105, 110, 99, 111, 114, 114, 101, 99, - 116, 0, 0, 0, - ], - }) - } - TxModifier::WrongSignature => { - Halt::ValidationFailed(VmRevertReason::General { - msg: "Account validation returned invalid magic value. Most often this means that the signature is incorrect".to_string(), - data: vec![], - }) - } - TxModifier::WrongMagicValue => { - Halt::ValidationFailed(VmRevertReason::General { - msg: "v is neither 27 nor 28".to_string(), - data: vec![ - 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 22, 118, 32, 105, 115, 32, 110, 101, 105, 116, 104, - 101, 114, 32, 50, 55, 32, 110, 111, 114, 32, 50, 56, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - ], - }) - - } - TxModifier::WrongNonce => { - Halt::ValidationFailed(VmRevertReason::General { - msg: "Incorrect nonce".to_string(), - data: vec![ - 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 15, 73, 110, 99, 111, 114, 114, 101, 99, 116, 32, 110, - 111, 110, 99, 101, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - ], - }) - } - TxModifier::NonceReused => { - Halt::ValidationFailed(VmRevertReason::General { - msg: "Reusing the same nonce twice".to_string(), - data: vec![ - 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 28, 82, 101, 117, 115, 105, 110, 103, 32, 116, 104, - 101, 32, 115, 97, 109, 101, 32, 110, 111, 110, 99, 101, 32, 116, 119, 105, 99, 101, 0, - 0, 0, 0, - ], - }) - } - }; - - ExpectedError { - revert_reason: TxRevertReason::Halt(revert_reason), - modifier: Some(value), - } - } -} - -impl TransactionTestInfo { - pub(crate) fn new_rejected( - mut transaction: Transaction, - expected_error: ExpectedError, - ) -> Self { - transaction.common_data = match transaction.common_data { - ExecuteTransactionCommon::L2(mut data) => { - if let Some(modifier) = &expected_error.modifier { - match modifier { - TxModifier::WrongSignatureLength => { - data.signature = data.signature[..data.signature.len() - 20].to_vec() - } - TxModifier::WrongSignature => data.signature = vec![27u8; 65], - TxModifier::WrongMagicValue => data.signature = vec![1u8; 65], - TxModifier::WrongNonce => { - // Do not need to modify signature for nonce error - } - TxModifier::NonceReused => { - // Do not need to modify signature for nonce error - } - } - } - ExecuteTransactionCommon::L2(data) - } - _ => panic!("L1 transactions are not supported"), - }; - - Self { - tx: transaction, - result: TxExpectedResult::Rejected { - error: expected_error, - }, - } - } - - pub(crate) fn new_processed(transaction: Transaction, should_be_rollbacked: bool) -> Self { - Self { - tx: transaction, - result: TxExpectedResult::Processed { - rollback: should_be_rollbacked, - }, - } - } - - fn verify_result(&self, result: &VmExecutionResultAndLogs) { - match &self.result { - TxExpectedResult::Rejected { error } => match &result.result { - ExecutionResult::Success { .. } => { - panic!("Transaction should be reverted {:?}", self.tx.nonce()) - } - ExecutionResult::Revert { output } => match &error.revert_reason { - TxRevertReason::TxReverted(expected) => { - assert_eq!(output, expected) - } - _ => { - panic!("Error types mismatch"); - } - }, - ExecutionResult::Halt { reason } => match &error.revert_reason { - TxRevertReason::Halt(expected) => { - assert_eq!(reason, expected) - } - _ => { - panic!("Error types mismatch"); - } - }, - }, - TxExpectedResult::Processed { .. } => { - assert!(!result.result.is_failed()); - } - } - } - - fn should_rollback(&self) -> bool { - match &self.result { - TxExpectedResult::Rejected { .. } => true, - TxExpectedResult::Processed { rollback } => *rollback, - } - } -} - -impl VmTester { - pub(crate) fn execute_and_verify_txs( - &mut self, - txs: &[TransactionTestInfo], - ) -> CurrentExecutionState { - for tx_test_info in txs { - self.execute_tx_and_verify(tx_test_info.clone()); - } - self.vm.execute(VmExecutionMode::Batch); - let mut state = self.vm.get_current_execution_state(); - state.used_contract_hashes.sort(); - state - } - - pub(crate) fn execute_tx_and_verify( - &mut self, - tx_test_info: TransactionTestInfo, - ) -> VmExecutionResultAndLogs { - let inner_state_before = self.vm.dump_inner_state(); - self.vm.make_snapshot(); - self.vm.push_transaction(tx_test_info.tx.clone()); - let result = self.vm.execute(VmExecutionMode::OneTx); - tx_test_info.verify_result(&result); - if tx_test_info.should_rollback() { - self.vm.rollback_to_the_latest_snapshot(); - let inner_state_after = self.vm.dump_inner_state(); - assert_eq!( - inner_state_before, inner_state_after, - "Inner state before and after rollback should be equal" - ); - } - result - } -} diff --git a/core/lib/multivm/src/versions/vm_1_4_2/tests/tester/vm_tester.rs b/core/lib/multivm/src/versions/vm_1_4_2/tests/tester/vm_tester.rs deleted file mode 100644 index 44f861f8d331..000000000000 --- a/core/lib/multivm/src/versions/vm_1_4_2/tests/tester/vm_tester.rs +++ /dev/null @@ -1,298 +0,0 @@ -use std::marker::PhantomData; - -use zksync_contracts::BaseSystemContracts; -use crate::interface::storage::{InMemoryStorage, StoragePtr, StorageView, WriteStorage}; -use zksync_types::{ - block::MiniblockHasher, - fee_model::BatchFeeInput, - get_code_key, get_is_account_key, - helpers::unix_timestamp_ms, - utils::{deployed_address_create, storage_key_for_eth_balance}, - Address, L1BatchNumber, L2ChainId, MiniblockNumber, Nonce, ProtocolVersionId, U256, -}; -use zksync_utils::{bytecode::hash_bytecode, u256_to_h256}; - -use crate::{ - interface::{ - L1BatchEnv, L2Block, L2BlockEnv, SystemEnv, TxExecutionMode, VmExecutionMode, VmInterface, - }, - vm_1_4_2::{ - constants::BLOCK_GAS_LIMIT, - tests::{ - tester::{Account, TxType}, - utils::read_test_contract, - }, - utils::l2_blocks::load_last_l2_block, - Vm, - }, - HistoryMode, -}; - -pub(crate) type InMemoryStorageView = StorageView; - -pub(crate) struct VmTester { - pub(crate) vm: Vm, - pub(crate) storage: StoragePtr, - pub(crate) fee_account: Address, - pub(crate) deployer: Option, - pub(crate) test_contract: Option
, - pub(crate) rich_accounts: Vec, - pub(crate) custom_contracts: Vec, - _phantom: std::marker::PhantomData, -} - -impl VmTester { - pub(crate) fn deploy_test_contract(&mut self) { - let contract = read_test_contract(); - let tx = self - .deployer - .as_mut() - .expect("You have to initialize builder with deployer") - .get_deploy_tx(&contract, None, TxType::L2) - .tx; - let nonce = tx.nonce().unwrap().0.into(); - self.vm.push_transaction(tx); - self.vm.execute(VmExecutionMode::OneTx); - let deployed_address = - deployed_address_create(self.deployer.as_ref().unwrap().address, nonce); - self.test_contract = Some(deployed_address); - } - - pub(crate) fn reset_with_empty_storage(&mut self) { - self.storage = StorageView::new(get_empty_storage()).to_rc_ptr(); - self.reset_state(false); - } - - /// Reset the state of the VM to the initial state. - /// If `use_latest_l2_block` is true, then the VM will use the latest L2 block from storage, - /// otherwise it will use the first L2 block of l1 batch env - pub(crate) fn reset_state(&mut self, use_latest_l2_block: bool) { - for account in self.rich_accounts.iter_mut() { - account.nonce = Nonce(0); - make_account_rich(self.storage.clone(), account); - } - if let Some(deployer) = &self.deployer { - make_account_rich(self.storage.clone(), deployer); - } - - if !self.custom_contracts.is_empty() { - println!("Inserting custom contracts is not yet supported") - // `insert_contracts(&mut self.storage, &self.custom_contracts);` - } - - let mut l1_batch = self.vm.batch_env.clone(); - if use_latest_l2_block { - let last_l2_block = load_last_l2_block(self.storage.clone()).unwrap_or(L2Block { - number: 0, - timestamp: 0, - hash: MiniblockHasher::legacy_hash(MiniblockNumber(0)), - }); - l1_batch.first_l2_block = L2BlockEnv { - number: last_l2_block.number + 1, - timestamp: std::cmp::max(last_l2_block.timestamp + 1, l1_batch.timestamp), - prev_block_hash: last_l2_block.hash, - max_virtual_blocks_to_create: 1, - }; - } - - let vm = Vm::new(l1_batch, self.vm.system_env.clone(), self.storage.clone()); - - if self.test_contract.is_some() { - self.deploy_test_contract(); - } - - self.vm = vm; - } -} - -pub(crate) type ContractsToDeploy = (Vec, Address, bool); - -pub(crate) struct VmTesterBuilder { - storage: Option, - l1_batch_env: Option, - system_env: SystemEnv, - deployer: Option, - rich_accounts: Vec, - custom_contracts: Vec, - _phantom: PhantomData, -} - -impl Clone for VmTesterBuilder { - fn clone(&self) -> Self { - Self { - storage: None, - l1_batch_env: self.l1_batch_env.clone(), - system_env: self.system_env.clone(), - deployer: self.deployer.clone(), - rich_accounts: self.rich_accounts.clone(), - custom_contracts: self.custom_contracts.clone(), - _phantom: PhantomData, - } - } -} - -#[allow(dead_code)] -impl VmTesterBuilder { - pub(crate) fn new(_: H) -> Self { - Self { - storage: None, - l1_batch_env: None, - system_env: SystemEnv { - zk_porter_available: false, - version: ProtocolVersionId::latest(), - base_system_smart_contracts: BaseSystemContracts::playground(), - bootloader_gas_limit: BLOCK_GAS_LIMIT, - execution_mode: TxExecutionMode::VerifyExecute, - default_validation_computational_gas_limit: BLOCK_GAS_LIMIT, - chain_id: L2ChainId::from(270), - }, - deployer: None, - rich_accounts: vec![], - custom_contracts: vec![], - _phantom: PhantomData, - } - } - - pub(crate) fn with_l1_batch_env(mut self, l1_batch_env: L1BatchEnv) -> Self { - self.l1_batch_env = Some(l1_batch_env); - self - } - - pub(crate) fn with_system_env(mut self, system_env: SystemEnv) -> Self { - self.system_env = system_env; - self - } - - pub(crate) fn with_storage(mut self, storage: InMemoryStorage) -> Self { - self.storage = Some(storage); - self - } - - pub(crate) fn with_base_system_smart_contracts( - mut self, - base_system_smart_contracts: BaseSystemContracts, - ) -> Self { - self.system_env.base_system_smart_contracts = base_system_smart_contracts; - self - } - - pub(crate) fn with_bootloader_gas_limit(mut self, gas_limit: u32) -> Self { - self.system_env.bootloader_gas_limit = gas_limit; - self - } - - pub(crate) fn with_execution_mode(mut self, execution_mode: TxExecutionMode) -> Self { - self.system_env.execution_mode = execution_mode; - self - } - - pub(crate) fn with_empty_in_memory_storage(mut self) -> Self { - self.storage = Some(get_empty_storage()); - self - } - - pub(crate) fn with_random_rich_accounts(mut self, number: u32) -> Self { - for _ in 0..number { - let account = Account::random(); - self.rich_accounts.push(account); - } - self - } - - pub(crate) fn with_rich_accounts(mut self, accounts: Vec) -> Self { - self.rich_accounts.extend(accounts); - self - } - - pub(crate) fn with_deployer(mut self) -> Self { - let deployer = Account::random(); - self.deployer = Some(deployer); - self - } - - pub(crate) fn with_custom_contracts(mut self, contracts: Vec) -> Self { - self.custom_contracts = contracts; - self - } - - pub(crate) fn build(self) -> VmTester { - let l1_batch_env = self - .l1_batch_env - .unwrap_or_else(|| default_l1_batch(L1BatchNumber(1))); - - let mut raw_storage = self.storage.unwrap_or_else(get_empty_storage); - insert_contracts(&mut raw_storage, &self.custom_contracts); - let storage_ptr = StorageView::new(raw_storage).to_rc_ptr(); - for account in self.rich_accounts.iter() { - make_account_rich(storage_ptr.clone(), account); - } - if let Some(deployer) = &self.deployer { - make_account_rich(storage_ptr.clone(), deployer); - } - let fee_account = l1_batch_env.fee_account; - - let vm = Vm::new(l1_batch_env, self.system_env, storage_ptr.clone()); - - VmTester { - vm, - storage: storage_ptr, - fee_account, - deployer: self.deployer, - test_contract: None, - rich_accounts: self.rich_accounts.clone(), - custom_contracts: self.custom_contracts.clone(), - _phantom: PhantomData, - } - } -} - -pub(crate) fn default_l1_batch(number: L1BatchNumber) -> L1BatchEnv { - let timestamp = unix_timestamp_ms(); - L1BatchEnv { - previous_batch_hash: None, - number, - timestamp, - fee_input: BatchFeeInput::l1_pegged( - 50_000_000_000, // 50 gwei - 250_000_000, // 0.25 gwei - ), - fee_account: Address::random(), - enforced_base_fee: None, - first_l2_block: L2BlockEnv { - number: 1, - timestamp, - prev_block_hash: MiniblockHasher::legacy_hash(MiniblockNumber(0)), - max_virtual_blocks_to_create: 100, - }, - } -} - -pub(crate) fn make_account_rich(storage: StoragePtr, account: &Account) { - let key = storage_key_for_eth_balance(&account.address); - storage - .as_ref() - .borrow_mut() - .set_value(key, u256_to_h256(U256::from(10u64.pow(19)))); -} - -pub(crate) fn get_empty_storage() -> InMemoryStorage { - InMemoryStorage::with_system_contracts(hash_bytecode) -} - -// Inserts the contracts into the test environment, bypassing the -// deployer system contract. Besides the reference to storage -// it accepts a `contracts` tuple of information about the contract -// and whether or not it is an account. -fn insert_contracts(raw_storage: &mut InMemoryStorage, contracts: &[ContractsToDeploy]) { - for (contract, address, is_account) in contracts { - let deployer_code_key = get_code_key(address); - raw_storage.set_value(deployer_code_key, hash_bytecode(contract)); - - if *is_account { - let is_account_key = get_is_account_key(address); - raw_storage.set_value(is_account_key, u256_to_h256(1_u32.into())); - } - - raw_storage.store_factory_dep(hash_bytecode(contract), contract.clone()); - } -} diff --git a/core/lib/multivm/src/versions/vm_1_4_2/tests/tracing_execution_error.rs b/core/lib/multivm/src/versions/vm_1_4_2/tests/tracing_execution_error.rs deleted file mode 100644 index 138e8041e6a0..000000000000 --- a/core/lib/multivm/src/versions/vm_1_4_2/tests/tracing_execution_error.rs +++ /dev/null @@ -1,51 +0,0 @@ -use zksync_types::{Execute, H160}; - -use crate::{ - interface::{TxExecutionMode, TxRevertReason, VmRevertReason}, - vm_1_4_2::tests::{ - tester::{ExpectedError, TransactionTestInfo, VmTesterBuilder}, - utils::{get_execute_error_calldata, read_error_contract, BASE_SYSTEM_CONTRACTS}, - }, -}; - -#[test] -fn test_tracing_of_execution_errors() { - let contract_address = H160::random(); - let mut vm = VmTesterBuilder::new(crate::vm_latest::HistoryEnabled) - .with_empty_in_memory_storage() - .with_base_system_smart_contracts(BASE_SYSTEM_CONTRACTS.clone()) - .with_custom_contracts(vec![(read_error_contract(), contract_address, false)]) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_deployer() - .with_random_rich_accounts(1) - .build(); - - let account = &mut vm.rich_accounts[0]; - - let tx = account.get_l2_tx_for_execute( - Execute { - contract_address, - calldata: get_execute_error_calldata(), - value: Default::default(), - factory_deps: Some(vec![]), - }, - None, - ); - - vm.execute_tx_and_verify(TransactionTestInfo::new_rejected( - tx, - ExpectedError { - revert_reason: TxRevertReason::TxReverted(VmRevertReason::General { - msg: "short".to_string(), - data: vec![ - 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 5, 115, 104, 111, 114, 116, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, - ], - }), - modifier: None, - }, - )); -} diff --git a/core/lib/multivm/src/versions/vm_1_4_2/tests/upgrade.rs b/core/lib/multivm/src/versions/vm_1_4_2/tests/upgrade.rs deleted file mode 100644 index 2af2928b1c44..000000000000 --- a/core/lib/multivm/src/versions/vm_1_4_2/tests/upgrade.rs +++ /dev/null @@ -1,352 +0,0 @@ -use zk_evm_1_4_1::aux_structures::Timestamp; -use zksync_contracts::{deployer_contract, load_sys_contract, read_bytecode}; -use crate::interface::storage::WriteStorage; -use zksync_test_account::TxType; -use zksync_types::{ - ethabi::{Contract, Token}, - get_code_key, get_known_code_key, - protocol_upgrade::ProtocolUpgradeTxCommonData, - Address, Execute, ExecuteTransactionCommon, Transaction, COMPLEX_UPGRADER_ADDRESS, - CONTRACT_DEPLOYER_ADDRESS, CONTRACT_FORCE_DEPLOYER_ADDRESS, H160, H256, - REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_BYTE, U256, -}; -use zksync_utils::{bytecode::hash_bytecode, bytes_to_be_words, h256_to_u256, u256_to_h256}; - -use super::utils::{get_complex_upgrade_abi, read_test_contract}; -use crate::{ - interface::{ - ExecutionResult, Halt, TxExecutionMode, VmExecutionMode, VmInterface, - VmInterfaceHistoryEnabled, - }, - vm_1_4_2::tests::{ - tester::VmTesterBuilder, - utils::{read_complex_upgrade, verify_required_storage}, - }, -}; - -/// In this test we ensure that the requirements for protocol upgrade transactions are enforced by the bootloader: -/// - This transaction must be the only one in block -/// - If present, this transaction must be the first one in block -#[test] -fn test_protocol_upgrade_is_first() { - let mut vm = VmTesterBuilder::new(crate::vm_latest::HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let bytecode_hash = hash_bytecode(&read_test_contract()); - vm.vm - .storage - .borrow_mut() - .set_value(get_known_code_key(&bytecode_hash), u256_to_h256(1.into())); - - // Here we just use some random transaction of protocol upgrade type: - let protocol_upgrade_transaction = get_forced_deploy_tx(&[ForceDeployment { - // The bytecode hash to put on an address - bytecode_hash, - // The address on which to deploy the bytecode hash to - address: H160::random(), - // Whether to run the constructor on the force deployment - call_constructor: false, - // The value with which to initialize a contract - value: U256::zero(), - // The constructor calldata - input: vec![], - }]); - - // Another random upgrade transaction - let another_protocol_upgrade_transaction = get_forced_deploy_tx(&[ForceDeployment { - // The bytecode hash to put on an address - bytecode_hash, - // The address on which to deploy the bytecode hash to - address: H160::random(), - // Whether to run the constructor on the force deployment - call_constructor: false, - // The value with which to initialize a contract - value: U256::zero(), - // The constructor calldata - input: vec![], - }]); - - let normal_l1_transaction = vm.rich_accounts[0] - .get_deploy_tx(&read_test_contract(), None, TxType::L1 { serial_id: 0 }) - .tx; - - let expected_error = - Halt::UnexpectedVMBehavior("Assertion error: Protocol upgrade tx not first".to_string()); - - vm.vm.make_snapshot(); - // Test 1: there must be only one system transaction in block - vm.vm.push_transaction(protocol_upgrade_transaction.clone()); - vm.vm.push_transaction(normal_l1_transaction.clone()); - vm.vm.push_transaction(another_protocol_upgrade_transaction); - - vm.vm.execute(VmExecutionMode::OneTx); - vm.vm.execute(VmExecutionMode::OneTx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert_eq!( - result.result, - ExecutionResult::Halt { - reason: expected_error.clone() - } - ); - - // Test 2: the protocol upgrade tx must be the first one in block - vm.vm.rollback_to_the_latest_snapshot(); - vm.vm.make_snapshot(); - vm.vm.push_transaction(normal_l1_transaction.clone()); - vm.vm.push_transaction(protocol_upgrade_transaction.clone()); - - vm.vm.execute(VmExecutionMode::OneTx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert_eq!( - result.result, - ExecutionResult::Halt { - reason: expected_error - } - ); - - vm.vm.rollback_to_the_latest_snapshot(); - vm.vm.make_snapshot(); - vm.vm.push_transaction(protocol_upgrade_transaction); - vm.vm.push_transaction(normal_l1_transaction); - - vm.vm.execute(VmExecutionMode::OneTx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!result.result.is_failed()); -} - -/// In this test we try to test how force deployments could be done via protocol upgrade transactions. -#[test] -fn test_force_deploy_upgrade() { - let mut vm = VmTesterBuilder::new(crate::vm_latest::HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let storage_view = vm.storage.clone(); - let bytecode_hash = hash_bytecode(&read_test_contract()); - - let known_code_key = get_known_code_key(&bytecode_hash); - // It is generally expected that all the keys will be set as known prior to the protocol upgrade. - storage_view - .borrow_mut() - .set_value(known_code_key, u256_to_h256(1.into())); - drop(storage_view); - - let address_to_deploy = H160::random(); - // Here we just use some random transaction of protocol upgrade type: - let transaction = get_forced_deploy_tx(&[ForceDeployment { - // The bytecode hash to put on an address - bytecode_hash, - // The address on which to deploy the bytecode hash to - address: address_to_deploy, - // Whether to run the constructor on the force deployment - call_constructor: false, - // The value with which to initialize a contract - value: U256::zero(), - // The constructor calldata - input: vec![], - }]); - - vm.vm.push_transaction(transaction); - - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!( - !result.result.is_failed(), - "The force upgrade was not successful" - ); - - let expected_slots = vec![(bytecode_hash, get_code_key(&address_to_deploy))]; - - // Verify that the bytecode has been set correctly - verify_required_storage(&vm.vm.state, expected_slots); -} - -/// Here we show how the work with the complex upgrader could be done -#[test] -fn test_complex_upgrader() { - let mut vm = VmTesterBuilder::new(crate::vm_latest::HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let storage_view = vm.storage.clone(); - - let bytecode_hash = hash_bytecode(&read_complex_upgrade()); - let msg_sender_test_hash = hash_bytecode(&read_msg_sender_test()); - - // Let's assume that the bytecode for the implementation of the complex upgrade - // is already deployed in some address in user space - let upgrade_impl = H160::random(); - let account_code_key = get_code_key(&upgrade_impl); - - storage_view - .borrow_mut() - .set_value(get_known_code_key(&bytecode_hash), u256_to_h256(1.into())); - storage_view.borrow_mut().set_value( - get_known_code_key(&msg_sender_test_hash), - u256_to_h256(1.into()), - ); - storage_view - .borrow_mut() - .set_value(account_code_key, bytecode_hash); - drop(storage_view); - - vm.vm.state.decommittment_processor.populate( - vec![ - ( - h256_to_u256(bytecode_hash), - bytes_to_be_words(read_complex_upgrade()), - ), - ( - h256_to_u256(msg_sender_test_hash), - bytes_to_be_words(read_msg_sender_test()), - ), - ], - Timestamp(0), - ); - - let address_to_deploy1 = H160::random(); - let address_to_deploy2 = H160::random(); - - let transaction = get_complex_upgrade_tx( - upgrade_impl, - address_to_deploy1, - address_to_deploy2, - bytecode_hash, - ); - - vm.vm.push_transaction(transaction); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!( - !result.result.is_failed(), - "The force upgrade was not successful" - ); - - let expected_slots = vec![ - (bytecode_hash, get_code_key(&address_to_deploy1)), - (bytecode_hash, get_code_key(&address_to_deploy2)), - ]; - - // Verify that the bytecode has been set correctly - verify_required_storage(&vm.vm.state, expected_slots); -} - -#[derive(Debug, Clone)] -struct ForceDeployment { - // The bytecode hash to put on an address - bytecode_hash: H256, - // The address on which to deploy the bytecode hash to - address: Address, - // Whether to run the constructor on the force deployment - call_constructor: bool, - // The value with which to initialize a contract - value: U256, - // The constructor calldata - input: Vec, -} - -fn get_forced_deploy_tx(deployment: &[ForceDeployment]) -> Transaction { - let deployer = deployer_contract(); - let contract_function = deployer.function("forceDeployOnAddresses").unwrap(); - - let encoded_deployments: Vec<_> = deployment - .iter() - .map(|deployment| { - Token::Tuple(vec![ - Token::FixedBytes(deployment.bytecode_hash.as_bytes().to_vec()), - Token::Address(deployment.address), - Token::Bool(deployment.call_constructor), - Token::Uint(deployment.value), - Token::Bytes(deployment.input.clone()), - ]) - }) - .collect(); - - let params = [Token::Array(encoded_deployments)]; - - let calldata = contract_function - .encode_input(¶ms) - .expect("failed to encode parameters"); - - let execute = Execute { - contract_address: CONTRACT_DEPLOYER_ADDRESS, - calldata, - factory_deps: None, - value: U256::zero(), - }; - - Transaction { - common_data: ExecuteTransactionCommon::ProtocolUpgrade(ProtocolUpgradeTxCommonData { - sender: CONTRACT_FORCE_DEPLOYER_ADDRESS, - gas_limit: U256::from(200_000_000u32), - gas_per_pubdata_limit: REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_BYTE.into(), - ..Default::default() - }), - execute, - received_timestamp_ms: 0, - raw_bytes: None, - } -} - -// Returns the transaction that performs a complex protocol upgrade. -// The first param is the address of the implementation of the complex upgrade -// in user-space, while the next 3 params are params of the implementation itself -// For the explanation for the parameters, please refer to: -// etc/contracts-test-data/complex-upgrade/complex-upgrade.sol -fn get_complex_upgrade_tx( - implementation_address: Address, - address1: Address, - address2: Address, - bytecode_hash: H256, -) -> Transaction { - let impl_contract = get_complex_upgrade_abi(); - let impl_function = impl_contract.function("someComplexUpgrade").unwrap(); - let impl_calldata = impl_function - .encode_input(&[ - Token::Address(address1), - Token::Address(address2), - Token::FixedBytes(bytecode_hash.as_bytes().to_vec()), - ]) - .unwrap(); - - let complex_upgrader = get_complex_upgrader_abi(); - let upgrade_function = complex_upgrader.function("upgrade").unwrap(); - let complex_upgrader_calldata = upgrade_function - .encode_input(&[ - Token::Address(implementation_address), - Token::Bytes(impl_calldata), - ]) - .unwrap(); - - let execute = Execute { - contract_address: COMPLEX_UPGRADER_ADDRESS, - calldata: complex_upgrader_calldata, - factory_deps: None, - value: U256::zero(), - }; - - Transaction { - common_data: ExecuteTransactionCommon::ProtocolUpgrade(ProtocolUpgradeTxCommonData { - sender: CONTRACT_FORCE_DEPLOYER_ADDRESS, - gas_limit: U256::from(200_000_000u32), - gas_per_pubdata_limit: REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_BYTE.into(), - ..Default::default() - }), - execute, - received_timestamp_ms: 0, - raw_bytes: None, - } -} - -fn read_msg_sender_test() -> Vec { - read_bytecode("etc/contracts-test-data/artifacts-zk/contracts/complex-upgrade/msg-sender.sol/MsgSenderTest.json") -} - -fn get_complex_upgrader_abi() -> Contract { - load_sys_contract("ComplexUpgrader") -} diff --git a/core/lib/multivm/src/versions/vm_1_4_2/tests/utils.rs b/core/lib/multivm/src/versions/vm_1_4_2/tests/utils.rs deleted file mode 100644 index 5655e90fb4ee..000000000000 --- a/core/lib/multivm/src/versions/vm_1_4_2/tests/utils.rs +++ /dev/null @@ -1,121 +0,0 @@ -use ethabi::Contract; -use once_cell::sync::Lazy; -use zksync_contracts::{ - load_contract, read_bytecode, read_zbin_bytecode, BaseSystemContracts, SystemContractCode, -}; -use crate::interface::storage::{StoragePtr, WriteStorage}; -use zksync_types::{ - utils::storage_key_for_standard_token_balance, AccountTreeId, Address, StorageKey, H256, U256, -}; -use zksync_utils::{bytecode::hash_bytecode, bytes_to_be_words, h256_to_u256, u256_to_h256}; - -use crate::vm_1_4_2::{ - tests::tester::InMemoryStorageView, types::internals::ZkSyncVmState, HistoryMode, -}; - -pub(crate) static BASE_SYSTEM_CONTRACTS: Lazy = - Lazy::new(BaseSystemContracts::load_from_disk); - -// Probably make it a part of vm tester -pub(crate) fn verify_required_storage( - state: &ZkSyncVmState, - required_values: Vec<(H256, StorageKey)>, -) { - for (required_value, key) in required_values { - let current_value = state.storage.storage.read_from_storage(&key); - - assert_eq!( - u256_to_h256(current_value), - required_value, - "Invalid value at key {key:?}" - ); - } -} - -pub(crate) fn verify_required_memory( - state: &ZkSyncVmState, - required_values: Vec<(U256, u32, u32)>, -) { - for (required_value, memory_page, cell) in required_values { - let current_value = state - .memory - .read_slot(memory_page as usize, cell as usize) - .value; - assert_eq!(current_value, required_value); - } -} - -pub(crate) fn get_balance( - token_id: AccountTreeId, - account: &Address, - main_storage: StoragePtr, -) -> U256 { - let key = storage_key_for_standard_token_balance(token_id, account); - h256_to_u256(main_storage.borrow_mut().read_value(&key)) -} - -pub(crate) fn read_test_contract() -> Vec { - read_bytecode("etc/contracts-test-data/artifacts-zk/contracts/counter/counter.sol/Counter.json") -} - -pub(crate) fn get_bootloader(test: &str) -> SystemContractCode { - let bootloader_code = read_zbin_bytecode(format!( - "contracts/system-contracts/bootloader/tests/artifacts/{}.yul.zbin", - test - )); - - let bootloader_hash = hash_bytecode(&bootloader_code); - SystemContractCode { - code: bytes_to_be_words(bootloader_code), - hash: bootloader_hash, - } -} - -pub(crate) fn read_nonce_holder_tester() -> Vec { - read_bytecode("etc/contracts-test-data/artifacts-zk/contracts/custom-account/nonce-holder-test.sol/NonceHolderTest.json") -} - -pub(crate) fn read_error_contract() -> Vec { - read_bytecode( - "etc/contracts-test-data/artifacts-zk/contracts/error/error.sol/SimpleRequire.json", - ) -} - -pub(crate) fn get_execute_error_calldata() -> Vec { - let test_contract = load_contract( - "etc/contracts-test-data/artifacts-zk/contracts/error/error.sol/SimpleRequire.json", - ); - - let function = test_contract.function("require_short").unwrap(); - - function - .encode_input(&[]) - .expect("failed to encode parameters") -} - -pub(crate) fn read_many_owners_custom_account_contract() -> (Vec, Contract) { - let path = "etc/contracts-test-data/artifacts-zk/contracts/custom-account/many-owners-custom-account.sol/ManyOwnersCustomAccount.json"; - (read_bytecode(path), load_contract(path)) -} - -pub(crate) fn read_max_depth_contract() -> Vec { - read_zbin_bytecode( - "core/tests/ts-integration/contracts/zkasm/artifacts/deep_stak.zkasm/deep_stak.zkasm.zbin", - ) -} - -pub(crate) fn read_precompiles_contract() -> Vec { - read_bytecode( - "etc/contracts-test-data/artifacts-zk/contracts/precompiles/precompiles.sol/Precompiles.json", - ) -} - -pub(crate) fn read_complex_upgrade() -> Vec { - read_bytecode("etc/contracts-test-data/artifacts-zk/contracts/complex-upgrade/complex-upgrade.sol/ComplexUpgrade.json") -} - -pub(crate) fn get_complex_upgrade_abi() -> Contract { - load_contract( - "etc/contracts-test-data/artifacts-zk/contracts/complex-upgrade/complex-upgrade.sol/ComplexUpgrade.json" - ) -} diff --git a/core/lib/multivm/src/versions/vm_1_4_2/vm.rs b/core/lib/multivm/src/versions/vm_1_4_2/vm.rs index d6e1fbc68a8f..fe2015debd2b 100644 --- a/core/lib/multivm/src/versions/vm_1_4_2/vm.rs +++ b/core/lib/multivm/src/versions/vm_1_4_2/vm.rs @@ -13,7 +13,6 @@ use crate::{ BytecodeCompressionError, BytecodeCompressionResult, CurrentExecutionState, FinishedL1Batch, L1BatchEnv, L2BlockEnv, SystemEnv, VmExecutionMode, VmExecutionResultAndLogs, VmFactory, VmInterface, VmInterfaceHistoryEnabled, - VmMemoryMetrics, }, utils::events::extract_l2tol1logs_from_l1_messenger, vm_1_4_2::{ @@ -126,10 +125,6 @@ impl VmInterface for Vm { } } - fn record_vm_memory_metrics(&self) -> VmMemoryMetrics { - self.record_vm_memory_metrics_inner() - } - fn finish_batch(&mut self) -> FinishedL1Batch { let result = self.inspect(&mut TracerDispatcher::default(), VmExecutionMode::Batch); let execution_state = self.get_current_execution_state(); diff --git a/core/lib/multivm/src/versions/vm_boojum_integration/bootloader_state/utils.rs b/core/lib/multivm/src/versions/vm_boojum_integration/bootloader_state/utils.rs index 1a1c620c2b26..c97d3ff30e49 100644 --- a/core/lib/multivm/src/versions/vm_boojum_integration/bootloader_state/utils.rs +++ b/core/lib/multivm/src/versions/vm_boojum_integration/bootloader_state/utils.rs @@ -167,8 +167,8 @@ pub(super) fn assemble_tx_meta(execution_mode: TxExecutionMode, execute_tx: bool // Set 0 byte (execution mode) output[0] = match execution_mode { TxExecutionMode::VerifyExecute => 0x00, - TxExecutionMode::EstimateFee { .. } => 0x00, - TxExecutionMode::EthCall { .. } => 0x02, + TxExecutionMode::EstimateFee => 0x00, + TxExecutionMode::EthCall => 0x02, }; // Set 31 byte (marker for tx execution) diff --git a/core/lib/multivm/src/versions/vm_boojum_integration/implementation/execution.rs b/core/lib/multivm/src/versions/vm_boojum_integration/implementation/execution.rs index 79669eddd566..b8b939f86731 100644 --- a/core/lib/multivm/src/versions/vm_boojum_integration/implementation/execution.rs +++ b/core/lib/multivm/src/versions/vm_boojum_integration/implementation/execution.rs @@ -93,6 +93,7 @@ impl Vm { logs, statistics, refunds, + new_known_factory_deps: None, }; (stop_reason, result) diff --git a/core/lib/multivm/src/versions/vm_boojum_integration/implementation/statistics.rs b/core/lib/multivm/src/versions/vm_boojum_integration/implementation/statistics.rs index 46f8bc2f400b..015d5acd340b 100644 --- a/core/lib/multivm/src/versions/vm_boojum_integration/implementation/statistics.rs +++ b/core/lib/multivm/src/versions/vm_boojum_integration/implementation/statistics.rs @@ -57,7 +57,7 @@ impl Vm { } /// Returns the info about all oracles' sizes. - pub(crate) fn record_vm_memory_metrics_inner(&self) -> VmMemoryMetrics { + pub(crate) fn record_vm_memory_metrics(&self) -> VmMemoryMetrics { VmMemoryMetrics { event_sink_inner: self.state.event_sink.get_size(), event_sink_history: self.state.event_sink.get_history_size(), diff --git a/core/lib/multivm/src/versions/vm_boojum_integration/tests/bootloader.rs b/core/lib/multivm/src/versions/vm_boojum_integration/tests/bootloader.rs deleted file mode 100644 index 57229abb0978..000000000000 --- a/core/lib/multivm/src/versions/vm_boojum_integration/tests/bootloader.rs +++ /dev/null @@ -1,56 +0,0 @@ -use zksync_types::U256; - -use crate::{ - interface::{ExecutionResult, Halt, TxExecutionMode, VmExecutionMode, VmInterface}, - vm_boojum_integration::{ - constants::BOOTLOADER_HEAP_PAGE, - tests::{ - tester::VmTesterBuilder, - utils::{get_bootloader, verify_required_memory, BASE_SYSTEM_CONTRACTS}, - }, - HistoryEnabled, - }, -}; - -#[test] -fn test_dummy_bootloader() { - let mut base_system_contracts = BASE_SYSTEM_CONTRACTS.clone(); - base_system_contracts.bootloader = get_bootloader("dummy"); - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_base_system_smart_contracts(base_system_contracts) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .build(); - - let result = vm.vm.execute(VmExecutionMode::Batch); - assert!(!result.result.is_failed()); - - let correct_first_cell = U256::from_str_radix("123123123", 16).unwrap(); - verify_required_memory( - &vm.vm.state, - vec![(correct_first_cell, BOOTLOADER_HEAP_PAGE, 0)], - ); -} - -#[test] -fn test_bootloader_out_of_gas() { - let mut base_system_contracts = BASE_SYSTEM_CONTRACTS.clone(); - base_system_contracts.bootloader = get_bootloader("dummy"); - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_base_system_smart_contracts(base_system_contracts) - .with_gas_limit(10) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .build(); - - let res = vm.vm.execute(VmExecutionMode::Batch); - - assert_matches!( - res.result, - ExecutionResult::Halt { - reason: Halt::BootloaderOutOfGas - } - ); -} diff --git a/core/lib/multivm/src/versions/vm_boojum_integration/tests/bytecode_publishing.rs b/core/lib/multivm/src/versions/vm_boojum_integration/tests/bytecode_publishing.rs deleted file mode 100644 index ad1b0f26036e..000000000000 --- a/core/lib/multivm/src/versions/vm_boojum_integration/tests/bytecode_publishing.rs +++ /dev/null @@ -1,43 +0,0 @@ -use zksync_types::event::extract_long_l2_to_l1_messages; -use zksync_utils::bytecode::compress_bytecode; - -use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface}, - vm_boojum_integration::{ - tests::{ - tester::{DeployContractsTx, TxType, VmTesterBuilder}, - utils::read_test_contract, - }, - HistoryEnabled, - }, -}; - -#[test] -fn test_bytecode_publishing() { - // In this test, we aim to ensure that the contents of the compressed bytecodes - // are included as part of the L2->L1 long messages - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let counter = read_test_contract(); - let account = &mut vm.rich_accounts[0]; - - let compressed_bytecode = compress_bytecode(&counter).unwrap(); - - let DeployContractsTx { tx, .. } = account.get_deploy_tx(&counter, None, TxType::L2); - vm.vm.push_transaction(tx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!result.result.is_failed(), "Transaction wasn't successful"); - - vm.vm.execute(VmExecutionMode::Batch); - - let state = vm.vm.get_current_execution_state(); - let long_messages = extract_long_l2_to_l1_messages(&state.events); - assert!( - long_messages.contains(&compressed_bytecode), - "Bytecode not published" - ); -} diff --git a/core/lib/multivm/src/versions/vm_boojum_integration/tests/call_tracer.rs b/core/lib/multivm/src/versions/vm_boojum_integration/tests/call_tracer.rs deleted file mode 100644 index e9df4fa80ff0..000000000000 --- a/core/lib/multivm/src/versions/vm_boojum_integration/tests/call_tracer.rs +++ /dev/null @@ -1,92 +0,0 @@ -use std::sync::Arc; - -use once_cell::sync::OnceCell; -use zksync_types::{Address, Execute}; - -use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface}, - tracers::CallTracer, - vm_boojum_integration::{ - constants::BLOCK_GAS_LIMIT, - tests::{ - tester::VmTesterBuilder, - utils::{read_max_depth_contract, read_test_contract}, - }, - HistoryEnabled, ToTracerPointer, - }, -}; - -// This test is ultra slow, so it's ignored by default. -#[test] -#[ignore] -fn test_max_depth() { - let contarct = read_max_depth_contract(); - let address = Address::random(); - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_random_rich_accounts(1) - .with_deployer() - .with_gas_limit(BLOCK_GAS_LIMIT) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_custom_contracts(vec![(contarct, address, true)]) - .build(); - - let account = &mut vm.rich_accounts[0]; - let tx = account.get_l2_tx_for_execute( - Execute { - contract_address: address, - calldata: vec![], - value: Default::default(), - factory_deps: None, - }, - None, - ); - - let result = Arc::new(OnceCell::new()); - let call_tracer = CallTracer::new(result.clone()).into_tracer_pointer(); - vm.vm.push_transaction(tx); - let res = vm.vm.inspect(call_tracer.into(), VmExecutionMode::OneTx); - assert!(result.get().is_some()); - assert!(res.result.is_failed()); -} - -#[test] -fn test_basic_behavior() { - let contarct = read_test_contract(); - let address = Address::random(); - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_random_rich_accounts(1) - .with_deployer() - .with_gas_limit(BLOCK_GAS_LIMIT) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_custom_contracts(vec![(contarct, address, true)]) - .build(); - - let increment_by_6_calldata = - "7cf5dab00000000000000000000000000000000000000000000000000000000000000006"; - - let account = &mut vm.rich_accounts[0]; - let tx = account.get_l2_tx_for_execute( - Execute { - contract_address: address, - calldata: hex::decode(increment_by_6_calldata).unwrap(), - value: Default::default(), - factory_deps: None, - }, - None, - ); - - let result = Arc::new(OnceCell::new()); - let call_tracer = CallTracer::new(result.clone()).into_tracer_pointer(); - vm.vm.push_transaction(tx); - let res = vm.vm.inspect(call_tracer.into(), VmExecutionMode::OneTx); - - let call_tracer_result = result.get().unwrap(); - - assert_eq!(call_tracer_result.len(), 1); - // Expect that there are a plenty of subcalls underneath. - let subcall = &call_tracer_result[0].calls; - assert!(subcall.len() > 10); - assert!(!res.result.is_failed()); -} diff --git a/core/lib/multivm/src/versions/vm_boojum_integration/tests/circuits.rs b/core/lib/multivm/src/versions/vm_boojum_integration/tests/circuits.rs deleted file mode 100644 index b0cffa7d3c8f..000000000000 --- a/core/lib/multivm/src/versions/vm_boojum_integration/tests/circuits.rs +++ /dev/null @@ -1,66 +0,0 @@ -use circuit_sequencer_api_1_4_0::geometry_config::get_geometry_config; -use zksync_types::{Address, Execute, U256}; - -use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface}, - vm_boojum_integration::{ - constants::BLOCK_GAS_LIMIT, tests::tester::VmTesterBuilder, HistoryEnabled, - }, -}; - -// Checks that estimated number of circuits for simple transfer doesn't differ much -// from hardcoded expected value. -#[test] -fn test_circuits() { - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_random_rich_accounts(1) - .with_deployer() - .with_gas_limit(BLOCK_GAS_LIMIT) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .build(); - - let account = &mut vm.rich_accounts[0]; - let tx = account.get_l2_tx_for_execute( - Execute { - contract_address: Address::random(), - calldata: Vec::new(), - value: U256::from(1u8), - factory_deps: None, - }, - None, - ); - vm.vm.push_transaction(tx); - let res = vm.vm.inspect(Default::default(), VmExecutionMode::OneTx); - - let statistic = res.statistics.circuit_statistic; - // Check `circuit_statistic`. - assert!(statistic.main_vm > f32::EPSILON); - assert!(statistic.ram_permutation > f32::EPSILON); - assert!(statistic.storage_application > f32::EPSILON); - assert!(statistic.storage_sorter > f32::EPSILON); - assert!(statistic.code_decommitter > f32::EPSILON); - assert!(statistic.code_decommitter_sorter > f32::EPSILON); - assert!(statistic.log_demuxer > f32::EPSILON); - assert!(statistic.events_sorter > f32::EPSILON); - assert!(statistic.keccak256 > f32::EPSILON); - // Single `ecrecover` should be used to validate tx signature. - assert_eq!( - statistic.ecrecover, - 1.0 / get_geometry_config().cycles_per_ecrecover_circuit as f32 - ); - // `sha256` shouldn't be used. - assert_eq!(statistic.sha256, 0.0); - - const EXPECTED_CIRCUITS_USED: f32 = 4.6363; - let delta = (statistic.total_f32() - EXPECTED_CIRCUITS_USED) / EXPECTED_CIRCUITS_USED; - - if delta.abs() > 0.1 { - panic!( - "Estimation differs from expected result by too much: {}%, expected value: {}, got {}", - delta * 100.0, - EXPECTED_CIRCUITS_USED, - statistic.total_f32(), - ); - } -} diff --git a/core/lib/multivm/src/versions/vm_boojum_integration/tests/default_aa.rs b/core/lib/multivm/src/versions/vm_boojum_integration/tests/default_aa.rs deleted file mode 100644 index a8c20cfebc1d..000000000000 --- a/core/lib/multivm/src/versions/vm_boojum_integration/tests/default_aa.rs +++ /dev/null @@ -1,76 +0,0 @@ -use zksync_system_constants::L2_ETH_TOKEN_ADDRESS; -use zksync_types::{ - get_code_key, get_known_code_key, get_nonce_key, - system_contracts::{DEPLOYMENT_NONCE_INCREMENT, TX_NONCE_INCREMENT}, - AccountTreeId, U256, -}; -use zksync_utils::u256_to_h256; - -use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface}, - vm_boojum_integration::{ - tests::{ - tester::{DeployContractsTx, TxType, VmTesterBuilder}, - utils::{get_balance, read_test_contract, verify_required_storage}, - }, - HistoryEnabled, - }, -}; - -#[test] -fn test_default_aa_interaction() { - // In this test, we aim to test whether a simple account interaction (without any fee logic) - // will work. The account will try to deploy a simple contract from integration tests. - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let counter = read_test_contract(); - let account = &mut vm.rich_accounts[0]; - let DeployContractsTx { - tx, - bytecode_hash, - address, - } = account.get_deploy_tx(&counter, None, TxType::L2); - let maximal_fee = tx.gas_limit() * vm.vm.batch_env.base_fee(); - - vm.vm.push_transaction(tx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!result.result.is_failed(), "Transaction wasn't successful"); - - vm.vm.execute(VmExecutionMode::Batch); - vm.vm.get_current_execution_state(); - - // Both deployment and ordinary nonce should be incremented by one. - let account_nonce_key = get_nonce_key(&account.address); - let expected_nonce = TX_NONCE_INCREMENT + DEPLOYMENT_NONCE_INCREMENT; - - // The code hash of the deployed contract should be marked as republished. - let known_codes_key = get_known_code_key(&bytecode_hash); - - // The contract should be deployed successfully. - let account_code_key = get_code_key(&address); - - let expected_slots = vec![ - (u256_to_h256(expected_nonce), account_nonce_key), - (u256_to_h256(U256::from(1u32)), known_codes_key), - (bytecode_hash, account_code_key), - ]; - - verify_required_storage(&vm.vm.state, expected_slots); - - let expected_fee = maximal_fee - - U256::from(result.refunds.gas_refunded) * U256::from(vm.vm.batch_env.base_fee()); - let operator_balance = get_balance( - AccountTreeId::new(L2_ETH_TOKEN_ADDRESS), - &vm.fee_account, - vm.vm.state.storage.storage.get_ptr(), - ); - - assert_eq!( - operator_balance, expected_fee, - "Operator did not receive his fee" - ); -} diff --git a/core/lib/multivm/src/versions/vm_boojum_integration/tests/gas_limit.rs b/core/lib/multivm/src/versions/vm_boojum_integration/tests/gas_limit.rs deleted file mode 100644 index 637fd94c1c89..000000000000 --- a/core/lib/multivm/src/versions/vm_boojum_integration/tests/gas_limit.rs +++ /dev/null @@ -1,45 +0,0 @@ -use zksync_types::{fee::Fee, Execute}; - -use crate::{ - interface::{TxExecutionMode, VmInterface}, - vm_boojum_integration::{ - constants::{BOOTLOADER_HEAP_PAGE, TX_DESCRIPTION_OFFSET, TX_GAS_LIMIT_OFFSET}, - tests::tester::VmTesterBuilder, - HistoryDisabled, - }, -}; - -/// Checks that `TX_GAS_LIMIT_OFFSET` constant is correct. -#[test] -fn test_tx_gas_limit_offset() { - let mut vm = VmTesterBuilder::new(HistoryDisabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let gas_limit = 9999.into(); - let tx = vm.rich_accounts[0].get_l2_tx_for_execute( - Execute { - contract_address: Some(Default::default()), - ..Default::default() - }, - Some(Fee { - gas_limit, - ..Default::default() - }), - ); - - vm.vm.push_transaction(tx); - - let gas_limit_from_memory = vm - .vm - .state - .memory - .read_slot( - BOOTLOADER_HEAP_PAGE as usize, - TX_DESCRIPTION_OFFSET + TX_GAS_LIMIT_OFFSET, - ) - .value; - assert_eq!(gas_limit_from_memory, gas_limit); -} diff --git a/core/lib/multivm/src/versions/vm_boojum_integration/tests/get_used_contracts.rs b/core/lib/multivm/src/versions/vm_boojum_integration/tests/get_used_contracts.rs deleted file mode 100644 index 658bcd75b059..000000000000 --- a/core/lib/multivm/src/versions/vm_boojum_integration/tests/get_used_contracts.rs +++ /dev/null @@ -1,109 +0,0 @@ -use std::collections::{HashMap, HashSet}; - -use itertools::Itertools; -use crate::interface::storage::WriteStorage; -use zksync_system_constants::CONTRACT_DEPLOYER_ADDRESS; -use zksync_test_account::Account; -use zksync_types::{Execute, U256}; -use zksync_utils::{bytecode::hash_bytecode, h256_to_u256}; - -use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface}, - vm_boojum_integration::{ - tests::{ - tester::{TxType, VmTesterBuilder}, - utils::{read_test_contract, BASE_SYSTEM_CONTRACTS}, - }, - HistoryDisabled, Vm, - }, - HistoryMode, -}; - -#[test] -fn test_get_used_contracts() { - let mut vm = VmTesterBuilder::new(HistoryDisabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .build(); - - assert!(known_bytecodes_without_aa_code(&vm.vm).is_empty()); - - // create and push and execute some not-empty factory deps transaction with success status - // to check that get_used_contracts() updates - let contract_code = read_test_contract(); - let mut account = Account::random(); - let tx = account.get_deploy_tx(&contract_code, None, TxType::L1 { serial_id: 0 }); - vm.vm.push_transaction(tx.tx.clone()); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!result.result.is_failed()); - - assert!(vm - .vm - .get_used_contracts() - .contains(&h256_to_u256(tx.bytecode_hash))); - - // Note: Default_AA will be in the list of used contracts if l2 tx is used - assert_eq!( - vm.vm - .get_used_contracts() - .into_iter() - .collect::>(), - known_bytecodes_without_aa_code(&vm.vm) - .keys() - .cloned() - .collect::>() - ); - - // create push and execute some non-empty factory deps transaction that fails - // (known_bytecodes will be updated but we expect get_used_contracts() to not be updated) - - let calldata = [1, 2, 3]; - let big_calldata: Vec = calldata - .iter() - .cycle() - .take(calldata.len() * 1024) - .cloned() - .collect(); - let account2 = Account::random(); - let tx2 = account2.get_l1_tx( - Execute { - contract_address: CONTRACT_DEPLOYER_ADDRESS, - calldata: big_calldata, - value: Default::default(), - factory_deps: Some(vec![vec![1; 32]]), - }, - 1, - ); - - vm.vm.push_transaction(tx2.clone()); - - let res2 = vm.vm.execute(VmExecutionMode::OneTx); - - assert!(res2.result.is_failed()); - - for factory_dep in tx2.execute.factory_deps.unwrap() { - let hash = hash_bytecode(&factory_dep); - let hash_to_u256 = h256_to_u256(hash); - assert!(known_bytecodes_without_aa_code(&vm.vm) - .keys() - .contains(&hash_to_u256)); - assert!(!vm.vm.get_used_contracts().contains(&hash_to_u256)); - } -} - -fn known_bytecodes_without_aa_code( - vm: &Vm, -) -> HashMap> { - let mut known_bytecodes_without_aa_code = vm - .state - .decommittment_processor - .known_bytecodes - .inner() - .clone(); - - known_bytecodes_without_aa_code - .remove(&h256_to_u256(BASE_SYSTEM_CONTRACTS.default_aa.hash)) - .unwrap(); - - known_bytecodes_without_aa_code -} diff --git a/core/lib/multivm/src/versions/vm_boojum_integration/tests/invalid_bytecode.rs b/core/lib/multivm/src/versions/vm_boojum_integration/tests/invalid_bytecode.rs deleted file mode 100644 index 079e6d61b6c2..000000000000 --- a/core/lib/multivm/src/versions/vm_boojum_integration/tests/invalid_bytecode.rs +++ /dev/null @@ -1,120 +0,0 @@ -use zksync_types::H256; -use zksync_utils::h256_to_u256; - -use crate::vm_boojum_integration::tests::tester::VmTesterBuilder; -use crate::vm_boojum_integration::types::inputs::system_env::TxExecutionMode; -use crate::vm_boojum_integration::{HistoryEnabled, TxRevertReason}; - -// TODO this test requires a lot of hacks for bypassing the bytecode checks in the VM. -// Port it later, it's not significant. for now - -#[test] -fn test_invalid_bytecode() { - let mut vm_builder = VmTesterBuilder::new(HistoryEnabled) - .with_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1); - let mut storage = vm_builder.take_storage(); - let mut vm = vm_builder.build(&mut storage); - - let block_gas_per_pubdata = vm_test_env - .block_context - .context - .block_gas_price_per_pubdata(); - - let mut test_vm_with_custom_bytecode_hash = - |bytecode_hash: H256, expected_revert_reason: Option| { - let mut oracle_tools = - OracleTools::new(vm_test_env.storage_ptr.as_mut(), HistoryEnabled); - - let (encoded_tx, predefined_overhead) = get_l1_tx_with_custom_bytecode_hash( - h256_to_u256(bytecode_hash), - block_gas_per_pubdata as u32, - ); - - run_vm_with_custom_factory_deps( - &mut oracle_tools, - vm_test_env.block_context.context, - &vm_test_env.block_properties, - encoded_tx, - predefined_overhead, - expected_revert_reason, - ); - }; - - let failed_to_mark_factory_deps = |msg: &str, data: Vec| { - TxRevertReason::FailedToMarkFactoryDependencies(VmRevertReason::General { - msg: msg.to_string(), - data, - }) - }; - - // Here we provide the correctly-formatted bytecode hash of - // odd length, so it should work. - test_vm_with_custom_bytecode_hash( - H256([ - 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, - ]), - None, - ); - - // Here we provide correctly formatted bytecode of even length, so - // it should fail. - test_vm_with_custom_bytecode_hash( - H256([ - 1, 0, 2, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, - ]), - Some(failed_to_mark_factory_deps( - "Code length in words must be odd", - vec![ - 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 32, 67, 111, 100, 101, 32, 108, 101, 110, - 103, 116, 104, 32, 105, 110, 32, 119, 111, 114, 100, 115, 32, 109, 117, 115, 116, - 32, 98, 101, 32, 111, 100, 100, - ], - )), - ); - - // Here we provide incorrectly formatted bytecode of odd length, so - // it should fail. - test_vm_with_custom_bytecode_hash( - H256([ - 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, - ]), - Some(failed_to_mark_factory_deps( - "Incorrectly formatted bytecodeHash", - vec![ - 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 34, 73, 110, 99, 111, 114, 114, 101, 99, - 116, 108, 121, 32, 102, 111, 114, 109, 97, 116, 116, 101, 100, 32, 98, 121, 116, - 101, 99, 111, 100, 101, 72, 97, 115, 104, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - ], - )), - ); - - // Here we provide incorrectly formatted bytecode of odd length, so - // it should fail. - test_vm_with_custom_bytecode_hash( - H256([ - 2, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, - ]), - Some(failed_to_mark_factory_deps( - "Incorrectly formatted bytecodeHash", - vec![ - 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 34, 73, 110, 99, 111, 114, 114, 101, 99, - 116, 108, 121, 32, 102, 111, 114, 109, 97, 116, 116, 101, 100, 32, 98, 121, 116, - 101, 99, 111, 100, 101, 72, 97, 115, 104, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - ], - )), - ); -} diff --git a/core/lib/multivm/src/versions/vm_boojum_integration/tests/is_write_initial.rs b/core/lib/multivm/src/versions/vm_boojum_integration/tests/is_write_initial.rs deleted file mode 100644 index 67901490edfa..000000000000 --- a/core/lib/multivm/src/versions/vm_boojum_integration/tests/is_write_initial.rs +++ /dev/null @@ -1,48 +0,0 @@ -use crate::interface::storage::ReadStorage; -use zksync_types::get_nonce_key; - -use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface}, - vm_boojum_integration::{ - tests::{ - tester::{Account, TxType, VmTesterBuilder}, - utils::read_test_contract, - }, - HistoryDisabled, - }, -}; - -#[test] -fn test_is_write_initial_behaviour() { - // In this test, we check result of `is_write_initial` at different stages. - // The main idea is to check that `is_write_initial` storage uses the correct cache for initial_writes and doesn't - // messed up it with the repeated writes during the one batch execution. - - let mut account = Account::random(); - let mut vm = VmTesterBuilder::new(HistoryDisabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_rich_accounts(vec![account.clone()]) - .build(); - - let nonce_key = get_nonce_key(&account.address); - // Check that the next write to the nonce key will be initial. - assert!(vm - .storage - .as_ref() - .borrow_mut() - .is_write_initial(&nonce_key)); - - let contract_code = read_test_contract(); - let tx = account.get_deploy_tx(&contract_code, None, TxType::L2).tx; - - vm.vm.push_transaction(tx); - vm.vm.execute(VmExecutionMode::OneTx); - - // Check that `is_write_initial` still returns true for the nonce key. - assert!(vm - .storage - .as_ref() - .borrow_mut() - .is_write_initial(&nonce_key)); -} diff --git a/core/lib/multivm/src/versions/vm_boojum_integration/tests/l1_tx_execution.rs b/core/lib/multivm/src/versions/vm_boojum_integration/tests/l1_tx_execution.rs deleted file mode 100644 index b547f346d28c..000000000000 --- a/core/lib/multivm/src/versions/vm_boojum_integration/tests/l1_tx_execution.rs +++ /dev/null @@ -1,139 +0,0 @@ -use zksync_system_constants::BOOTLOADER_ADDRESS; -use zksync_types::{ - get_code_key, get_known_code_key, - l2_to_l1_log::{L2ToL1Log, UserL2ToL1Log}, - storage_writes_deduplicator::StorageWritesDeduplicator, - U256, -}; -use zksync_utils::u256_to_h256; - -use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface}, - vm_boojum_integration::{ - tests::{ - tester::{TxType, VmTesterBuilder}, - utils::{read_test_contract, verify_required_storage, BASE_SYSTEM_CONTRACTS}, - }, - types::internals::TransactionData, - HistoryEnabled, - }, -}; - -#[test] -fn test_l1_tx_execution() { - // In this test, we try to execute a contract deployment from L1 - // Here instead of marking code hash via the bootloader means, we will be - // using L1->L2 communication, the same it would likely be done during the priority mode. - - // There are always at least 7 initial writes here, because we pay fees from l1: - // - totalSupply of ETH token - // - balance of the refund recipient - // - balance of the bootloader - // - tx_rolling hash - // - rolling hash of L2->L1 logs - // - transaction number in block counter - // - L2->L1 log counter in L1Messenger - - // TODO(PLA-537): right now we are using 4 slots instead of 7 due to 0 fee for transaction. - let basic_initial_writes = 4; - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_base_system_smart_contracts(BASE_SYSTEM_CONTRACTS.clone()) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let contract_code = read_test_contract(); - let account = &mut vm.rich_accounts[0]; - let deploy_tx = account.get_deploy_tx(&contract_code, None, TxType::L1 { serial_id: 1 }); - let tx_data: TransactionData = deploy_tx.tx.clone().into(); - - let required_l2_to_l1_logs: Vec<_> = vec![L2ToL1Log { - shard_id: 0, - is_service: true, - tx_number_in_block: 0, - sender: BOOTLOADER_ADDRESS, - key: tx_data.tx_hash(0.into()), - value: u256_to_h256(U256::from(1u32)), - }] - .into_iter() - .map(UserL2ToL1Log) - .collect(); - - vm.vm.push_transaction(deploy_tx.tx.clone()); - - let res = vm.vm.execute(VmExecutionMode::OneTx); - - // The code hash of the deployed contract should be marked as republished. - let known_codes_key = get_known_code_key(&deploy_tx.bytecode_hash); - - // The contract should be deployed successfully. - let account_code_key = get_code_key(&deploy_tx.address); - - let expected_slots = vec![ - (u256_to_h256(U256::from(1u32)), known_codes_key), - (deploy_tx.bytecode_hash, account_code_key), - ]; - assert!(!res.result.is_failed()); - - verify_required_storage(&vm.vm.state, expected_slots); - - assert_eq!(res.logs.user_l2_to_l1_logs, required_l2_to_l1_logs); - - let tx = account.get_test_contract_transaction( - deploy_tx.address, - true, - None, - false, - TxType::L1 { serial_id: 0 }, - ); - vm.vm.push_transaction(tx); - let res = vm.vm.execute(VmExecutionMode::OneTx); - let storage_logs = res.logs.storage_logs; - let res = StorageWritesDeduplicator::apply_on_empty_state(&storage_logs); - - // Tx panicked - assert_eq!(res.initial_storage_writes - basic_initial_writes, 0); - - let tx = account.get_test_contract_transaction( - deploy_tx.address, - false, - None, - false, - TxType::L1 { serial_id: 0 }, - ); - vm.vm.push_transaction(tx.clone()); - let res = vm.vm.execute(VmExecutionMode::OneTx); - let storage_logs = res.logs.storage_logs; - let res = StorageWritesDeduplicator::apply_on_empty_state(&storage_logs); - // We changed one slot inside contract - assert_eq!(res.initial_storage_writes - basic_initial_writes, 1); - - // No repeated writes - let repeated_writes = res.repeated_storage_writes; - assert_eq!(res.repeated_storage_writes, 0); - - vm.vm.push_transaction(tx); - let storage_logs = vm.vm.execute(VmExecutionMode::OneTx).logs.storage_logs; - let res = StorageWritesDeduplicator::apply_on_empty_state(&storage_logs); - // We do the same storage write, it will be deduplicated, so still 4 initial write and 0 repeated - assert_eq!(res.initial_storage_writes - basic_initial_writes, 1); - assert_eq!(res.repeated_storage_writes, repeated_writes); - - let tx = account.get_test_contract_transaction( - deploy_tx.address, - false, - Some(10.into()), - false, - TxType::L1 { serial_id: 1 }, - ); - vm.vm.push_transaction(tx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - // Method is not payable tx should fail - assert!(result.result.is_failed(), "The transaction should fail"); - - let res = StorageWritesDeduplicator::apply_on_empty_state(&result.logs.storage_logs); - // There are only basic initial writes - assert_eq!(res.initial_storage_writes - basic_initial_writes, 2); -} diff --git a/core/lib/multivm/src/versions/vm_boojum_integration/tests/l2_blocks.rs b/core/lib/multivm/src/versions/vm_boojum_integration/tests/l2_blocks.rs deleted file mode 100644 index d637d583c0ec..000000000000 --- a/core/lib/multivm/src/versions/vm_boojum_integration/tests/l2_blocks.rs +++ /dev/null @@ -1,437 +0,0 @@ -//! -//! Tests for the bootloader -//! The description for each of the tests can be found in the corresponding `.yul` file. -//! - -use zk_evm_1_4_0::aux_structures::Timestamp; -use crate::interface::storage::WriteStorage; -use zksync_system_constants::REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_BYTE; -use zksync_types::{ - block::{pack_block_info, MiniblockHasher}, - AccountTreeId, Execute, ExecuteTransactionCommon, L1BatchNumber, L1TxCommonData, - MiniblockNumber, ProtocolVersionId, StorageKey, Transaction, H160, H256, - SYSTEM_CONTEXT_ADDRESS, SYSTEM_CONTEXT_BLOCK_INFO_POSITION, - SYSTEM_CONTEXT_CURRENT_L2_BLOCK_INFO_POSITION, SYSTEM_CONTEXT_CURRENT_TX_ROLLING_HASH_POSITION, - U256, -}; -use zksync_utils::{h256_to_u256, u256_to_h256}; - -use crate::{ - interface::{ExecutionResult, Halt, L2BlockEnv, TxExecutionMode, VmExecutionMode, VmInterface}, - vm_boojum_integration::{ - constants::{ - BOOTLOADER_HEAP_PAGE, TX_OPERATOR_L2_BLOCK_INFO_OFFSET, - TX_OPERATOR_SLOTS_PER_L2_BLOCK_INFO, - }, - tests::tester::{default_l1_batch, VmTesterBuilder}, - utils::l2_blocks::get_l2_block_hash_key, - HistoryEnabled, Vm, - }, - HistoryMode, -}; - -fn get_l1_noop() -> Transaction { - Transaction { - common_data: ExecuteTransactionCommon::L1(L1TxCommonData { - sender: H160::random(), - gas_limit: U256::from(2000000u32), - gas_per_pubdata_limit: REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_BYTE.into(), - ..Default::default() - }), - execute: Execute { - contract_address: H160::zero(), - calldata: vec![], - value: U256::zero(), - factory_deps: None, - }, - received_timestamp_ms: 0, - raw_bytes: None, - } -} - -#[test] -fn test_l2_block_initialization_timestamp() { - // This test checks that the L2 block initialization works correctly. - // Here we check that that the first block must have timestamp that is greater or equal to the timestamp - // of the current batch. - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - // Override the timestamp of the current miniblock to be 0. - vm.vm.bootloader_state.push_l2_block(L2BlockEnv { - number: 1, - timestamp: 0, - prev_block_hash: MiniblockHasher::legacy_hash(MiniblockNumber(0)), - max_virtual_blocks_to_create: 1, - }); - let l1_tx = get_l1_noop(); - - vm.vm.push_transaction(l1_tx); - let res = vm.vm.execute(VmExecutionMode::OneTx); - - assert_eq!( - res.result, - ExecutionResult::Halt {reason: Halt::FailedToSetL2Block("The timestamp of the L2 block must be greater than or equal to the timestamp of the current batch".to_string())} - ); -} - -#[test] -fn test_l2_block_initialization_number_non_zero() { - // This test checks that the L2 block initialization works correctly. - // Here we check that the first miniblock number can not be zero. - - let l1_batch = default_l1_batch(L1BatchNumber(1)); - let first_l2_block = L2BlockEnv { - number: 0, - timestamp: l1_batch.timestamp, - prev_block_hash: MiniblockHasher::legacy_hash(MiniblockNumber(0)), - max_virtual_blocks_to_create: 1, - }; - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_l1_batch_env(l1_batch) - .with_random_rich_accounts(1) - .build(); - - let l1_tx = get_l1_noop(); - - vm.vm.push_transaction(l1_tx); - - let timestamp = Timestamp(vm.vm.state.local_state.timestamp); - set_manual_l2_block_info(&mut vm.vm, 0, first_l2_block, timestamp); - - let res = vm.vm.execute(VmExecutionMode::OneTx); - - assert_eq!( - res.result, - ExecutionResult::Halt { - reason: Halt::FailedToSetL2Block( - "L2 block number is never expected to be zero".to_string() - ) - } - ); -} - -fn test_same_l2_block( - expected_error: Option, - override_timestamp: Option, - override_prev_block_hash: Option, -) { - let mut l1_batch = default_l1_batch(L1BatchNumber(1)); - l1_batch.timestamp = 1; - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_l1_batch_env(l1_batch) - .with_random_rich_accounts(1) - .build(); - - let l1_tx = get_l1_noop(); - vm.vm.push_transaction(l1_tx.clone()); - let res = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!res.result.is_failed()); - - let mut current_l2_block = vm.vm.batch_env.first_l2_block; - - if let Some(timestamp) = override_timestamp { - current_l2_block.timestamp = timestamp; - } - if let Some(prev_block_hash) = override_prev_block_hash { - current_l2_block.prev_block_hash = prev_block_hash; - } - - if (None, None) == (override_timestamp, override_prev_block_hash) { - current_l2_block.max_virtual_blocks_to_create = 0; - } - - vm.vm.push_transaction(l1_tx); - let timestamp = Timestamp(vm.vm.state.local_state.timestamp); - set_manual_l2_block_info(&mut vm.vm, 1, current_l2_block, timestamp); - - let result = vm.vm.execute(VmExecutionMode::OneTx); - - if let Some(err) = expected_error { - assert_eq!(result.result, ExecutionResult::Halt { reason: err }); - } else { - assert_eq!(result.result, ExecutionResult::Success { output: vec![] }); - } -} - -#[test] -fn test_l2_block_same_l2_block() { - // This test aims to test the case when there are multiple transactions inside the same L2 block. - - // Case 1: Incorrect timestamp - test_same_l2_block( - Some(Halt::FailedToSetL2Block( - "The timestamp of the same L2 block must be same".to_string(), - )), - Some(0), - None, - ); - - // Case 2: Incorrect previous block hash - test_same_l2_block( - Some(Halt::FailedToSetL2Block( - "The previous hash of the same L2 block must be same".to_string(), - )), - None, - Some(H256::zero()), - ); - - // Case 3: Correct continuation of the same L2 block - test_same_l2_block(None, None, None); -} - -fn test_new_l2_block( - first_l2_block: L2BlockEnv, - overriden_second_block_number: Option, - overriden_second_block_timestamp: Option, - overriden_second_block_prev_block_hash: Option, - expected_error: Option, -) { - let mut l1_batch = default_l1_batch(L1BatchNumber(1)); - l1_batch.timestamp = 1; - l1_batch.first_l2_block = first_l2_block; - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_l1_batch_env(l1_batch) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let l1_tx = get_l1_noop(); - - // Firstly we execute the first transaction - vm.vm.push_transaction(l1_tx.clone()); - vm.vm.execute(VmExecutionMode::OneTx); - - let mut second_l2_block = vm.vm.batch_env.first_l2_block; - second_l2_block.number += 1; - second_l2_block.timestamp += 1; - second_l2_block.prev_block_hash = vm.vm.bootloader_state.last_l2_block().get_hash(); - - if let Some(block_number) = overriden_second_block_number { - second_l2_block.number = block_number; - } - if let Some(timestamp) = overriden_second_block_timestamp { - second_l2_block.timestamp = timestamp; - } - if let Some(prev_block_hash) = overriden_second_block_prev_block_hash { - second_l2_block.prev_block_hash = prev_block_hash; - } - - vm.vm.bootloader_state.push_l2_block(second_l2_block); - - vm.vm.push_transaction(l1_tx); - - let result = vm.vm.execute(VmExecutionMode::OneTx); - if let Some(err) = expected_error { - assert_eq!(result.result, ExecutionResult::Halt { reason: err }); - } else { - assert_eq!(result.result, ExecutionResult::Success { output: vec![] }); - } -} - -#[test] -fn test_l2_block_new_l2_block() { - // This test is aimed to cover potential issue - - let correct_first_block = L2BlockEnv { - number: 1, - timestamp: 1, - prev_block_hash: MiniblockHasher::legacy_hash(MiniblockNumber(0)), - max_virtual_blocks_to_create: 1, - }; - - // Case 1: Block number increasing by more than 1 - test_new_l2_block( - correct_first_block, - Some(3), - None, - None, - Some(Halt::FailedToSetL2Block( - "Invalid new L2 block number".to_string(), - )), - ); - - // Case 2: Timestamp not increasing - test_new_l2_block( - correct_first_block, - None, - Some(1), - None, - Some(Halt::FailedToSetL2Block("The timestamp of the new L2 block must be greater than the timestamp of the previous L2 block".to_string())), - ); - - // Case 3: Incorrect previous block hash - test_new_l2_block( - correct_first_block, - None, - None, - Some(H256::zero()), - Some(Halt::FailedToSetL2Block( - "The current L2 block hash is incorrect".to_string(), - )), - ); - - // Case 4: Correct new block - test_new_l2_block(correct_first_block, None, None, None, None); -} - -#[allow(clippy::too_many_arguments)] -fn test_first_in_batch( - miniblock_timestamp: u64, - miniblock_number: u32, - pending_txs_hash: H256, - batch_timestamp: u64, - new_batch_timestamp: u64, - batch_number: u32, - proposed_block: L2BlockEnv, - expected_error: Option, -) { - let mut l1_batch = default_l1_batch(L1BatchNumber(1)); - l1_batch.number += 1; - l1_batch.timestamp = new_batch_timestamp; - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_l1_batch_env(l1_batch) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - let l1_tx = get_l1_noop(); - - // Setting the values provided. - let storage_ptr = vm.vm.state.storage.storage.get_ptr(); - let miniblock_info_slot = StorageKey::new( - AccountTreeId::new(SYSTEM_CONTEXT_ADDRESS), - SYSTEM_CONTEXT_CURRENT_L2_BLOCK_INFO_POSITION, - ); - let pending_txs_hash_slot = StorageKey::new( - AccountTreeId::new(SYSTEM_CONTEXT_ADDRESS), - SYSTEM_CONTEXT_CURRENT_TX_ROLLING_HASH_POSITION, - ); - let batch_info_slot = StorageKey::new( - AccountTreeId::new(SYSTEM_CONTEXT_ADDRESS), - SYSTEM_CONTEXT_BLOCK_INFO_POSITION, - ); - let prev_block_hash_position = get_l2_block_hash_key(miniblock_number - 1); - - storage_ptr.borrow_mut().set_value( - miniblock_info_slot, - u256_to_h256(pack_block_info( - miniblock_number as u64, - miniblock_timestamp, - )), - ); - storage_ptr - .borrow_mut() - .set_value(pending_txs_hash_slot, pending_txs_hash); - storage_ptr.borrow_mut().set_value( - batch_info_slot, - u256_to_h256(pack_block_info(batch_number as u64, batch_timestamp)), - ); - storage_ptr.borrow_mut().set_value( - prev_block_hash_position, - MiniblockHasher::legacy_hash(MiniblockNumber(miniblock_number - 1)), - ); - - // In order to skip checks from the Rust side of the VM, we firstly use some definitely correct L2 block info. - // And then override it with the user-provided value - - let last_l2_block = vm.vm.bootloader_state.last_l2_block(); - let new_l2_block = L2BlockEnv { - number: last_l2_block.number + 1, - timestamp: last_l2_block.timestamp + 1, - prev_block_hash: last_l2_block.get_hash(), - max_virtual_blocks_to_create: last_l2_block.max_virtual_blocks_to_create, - }; - - vm.vm.bootloader_state.push_l2_block(new_l2_block); - vm.vm.push_transaction(l1_tx); - let timestamp = Timestamp(vm.vm.state.local_state.timestamp); - set_manual_l2_block_info(&mut vm.vm, 0, proposed_block, timestamp); - - let result = vm.vm.execute(VmExecutionMode::OneTx); - if let Some(err) = expected_error { - assert_eq!(result.result, ExecutionResult::Halt { reason: err }); - } else { - assert_eq!(result.result, ExecutionResult::Success { output: vec![] }); - } -} - -#[test] -fn test_l2_block_first_in_batch() { - let prev_block_hash = MiniblockHasher::legacy_hash(MiniblockNumber(0)); - let prev_block_hash = MiniblockHasher::new(MiniblockNumber(1), 1, prev_block_hash) - .finalize(ProtocolVersionId::latest()); - test_first_in_batch( - 1, - 1, - H256::zero(), - 1, - 2, - 1, - L2BlockEnv { - number: 2, - timestamp: 2, - prev_block_hash, - max_virtual_blocks_to_create: 1, - }, - None, - ); - - let prev_block_hash = MiniblockHasher::legacy_hash(MiniblockNumber(0)); - let prev_block_hash = MiniblockHasher::new(MiniblockNumber(1), 8, prev_block_hash) - .finalize(ProtocolVersionId::latest()); - test_first_in_batch( - 8, - 1, - H256::zero(), - 5, - 12, - 1, - L2BlockEnv { - number: 2, - timestamp: 9, - prev_block_hash, - max_virtual_blocks_to_create: 1, - }, - Some(Halt::FailedToSetL2Block("The timestamp of the L2 block must be greater than or equal to the timestamp of the current batch".to_string())), - ); -} - -fn set_manual_l2_block_info( - vm: &mut Vm, - tx_number: usize, - block_info: L2BlockEnv, - timestamp: Timestamp, -) { - let fictive_miniblock_position = - TX_OPERATOR_L2_BLOCK_INFO_OFFSET + TX_OPERATOR_SLOTS_PER_L2_BLOCK_INFO * tx_number; - - vm.state.memory.populate_page( - BOOTLOADER_HEAP_PAGE as usize, - vec![ - (fictive_miniblock_position, block_info.number.into()), - (fictive_miniblock_position + 1, block_info.timestamp.into()), - ( - fictive_miniblock_position + 2, - h256_to_u256(block_info.prev_block_hash), - ), - ( - fictive_miniblock_position + 3, - block_info.max_virtual_blocks_to_create.into(), - ), - ], - timestamp, - ) -} diff --git a/core/lib/multivm/src/versions/vm_boojum_integration/tests/mod.rs b/core/lib/multivm/src/versions/vm_boojum_integration/tests/mod.rs deleted file mode 100644 index 95377232b3e3..000000000000 --- a/core/lib/multivm/src/versions/vm_boojum_integration/tests/mod.rs +++ /dev/null @@ -1,22 +0,0 @@ -mod bootloader; -mod default_aa; -// TODO - fix this test -// mod invalid_bytecode; -mod bytecode_publishing; -mod call_tracer; -mod circuits; -mod gas_limit; -mod get_used_contracts; -mod is_write_initial; -mod l1_tx_execution; -mod l2_blocks; -mod nonce_holder; -mod precompiles; -mod refunds; -mod require_eip712; -mod rollbacks; -mod simple_execution; -mod tester; -mod tracing_execution_error; -mod upgrade; -mod utils; diff --git a/core/lib/multivm/src/versions/vm_boojum_integration/tests/nonce_holder.rs b/core/lib/multivm/src/versions/vm_boojum_integration/tests/nonce_holder.rs deleted file mode 100644 index 44ba3e4e323a..000000000000 --- a/core/lib/multivm/src/versions/vm_boojum_integration/tests/nonce_holder.rs +++ /dev/null @@ -1,188 +0,0 @@ -use zksync_types::{Execute, Nonce}; - -use crate::{ - interface::{ - ExecutionResult, Halt, TxExecutionMode, TxRevertReason, VmExecutionMode, VmInterface, - VmRevertReason, - }, - vm_boojum_integration::{ - tests::{ - tester::{Account, VmTesterBuilder}, - utils::read_nonce_holder_tester, - }, - types::internals::TransactionData, - HistoryEnabled, - }, -}; - -pub enum NonceHolderTestMode { - SetValueUnderNonce, - IncreaseMinNonceBy5, - IncreaseMinNonceTooMuch, - LeaveNonceUnused, - IncreaseMinNonceBy1, - SwitchToArbitraryOrdering, -} - -impl From for u8 { - fn from(mode: NonceHolderTestMode) -> u8 { - match mode { - NonceHolderTestMode::SetValueUnderNonce => 0, - NonceHolderTestMode::IncreaseMinNonceBy5 => 1, - NonceHolderTestMode::IncreaseMinNonceTooMuch => 2, - NonceHolderTestMode::LeaveNonceUnused => 3, - NonceHolderTestMode::IncreaseMinNonceBy1 => 4, - NonceHolderTestMode::SwitchToArbitraryOrdering => 5, - } - } -} - -#[test] -fn test_nonce_holder() { - let mut account = Account::random(); - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_deployer() - .with_custom_contracts(vec![( - read_nonce_holder_tester().to_vec(), - account.address, - true, - )]) - .with_rich_accounts(vec![account.clone()]) - .build(); - - let mut run_nonce_test = |nonce: u32, - test_mode: NonceHolderTestMode, - error_message: Option, - comment: &'static str| { - // In this test we have to reset VM state after each test case. Because once bootloader failed during the validation of the transaction, - // it will fail again and again. At the same time we have to keep the same storage, because we want to keep the nonce holder contract state. - // The easiest way in terms of lifetimes is to reuse vm_builder to achieve it. - vm.reset_state(true); - let mut transaction_data: TransactionData = account - .get_l2_tx_for_execute_with_nonce( - Execute { - contract_address: account.address, - calldata: vec![12], - value: Default::default(), - factory_deps: None, - }, - None, - Nonce(nonce), - ) - .into(); - - transaction_data.signature = vec![test_mode.into()]; - vm.vm.push_raw_transaction(transaction_data, 0, 0, true); - let result = vm.vm.execute(VmExecutionMode::OneTx); - - if let Some(msg) = error_message { - let expected_error = - TxRevertReason::Halt(Halt::ValidationFailed(VmRevertReason::General { - msg, - data: vec![], - })); - let ExecutionResult::Halt { reason } = result.result else { - panic!("Expected revert, got {:?}", result.result); - }; - assert_eq!( - reason.to_string(), - expected_error.to_string(), - "{}", - comment - ); - } else { - assert!(!result.result.is_failed(), "{}", comment); - } - }; - // Test 1: trying to set value under non sequential nonce value. - run_nonce_test( - 1u32, - NonceHolderTestMode::SetValueUnderNonce, - Some("Previous nonce has not been used".to_string()), - "Allowed to set value under non sequential value", - ); - - // Test 2: increase min nonce by 1 with sequential nonce ordering: - run_nonce_test( - 0u32, - NonceHolderTestMode::IncreaseMinNonceBy1, - None, - "Failed to increment nonce by 1 for sequential account", - ); - - // Test 3: correctly set value under nonce with sequential nonce ordering: - run_nonce_test( - 1u32, - NonceHolderTestMode::SetValueUnderNonce, - None, - "Failed to set value under nonce sequential value", - ); - - // Test 5: migrate to the arbitrary nonce ordering: - run_nonce_test( - 2u32, - NonceHolderTestMode::SwitchToArbitraryOrdering, - None, - "Failed to switch to arbitrary ordering", - ); - - // Test 6: increase min nonce by 5 - run_nonce_test( - 6u32, - NonceHolderTestMode::IncreaseMinNonceBy5, - None, - "Failed to increase min nonce by 5", - ); - - // Test 7: since the nonces in range [6,10] are no longer allowed, the - // tx with nonce 10 should not be allowed - run_nonce_test( - 10u32, - NonceHolderTestMode::IncreaseMinNonceBy5, - Some("Reusing the same nonce twice".to_string()), - "Allowed to reuse nonce below the minimal one", - ); - - // Test 8: we should be able to use nonce 13 - run_nonce_test( - 13u32, - NonceHolderTestMode::SetValueUnderNonce, - None, - "Did not allow to use unused nonce 10", - ); - - // Test 9: we should not be able to reuse nonce 13 - run_nonce_test( - 13u32, - NonceHolderTestMode::IncreaseMinNonceBy5, - Some("Reusing the same nonce twice".to_string()), - "Allowed to reuse the same nonce twice", - ); - - // Test 10: we should be able to simply use nonce 14, while bumping the minimal nonce by 5 - run_nonce_test( - 14u32, - NonceHolderTestMode::IncreaseMinNonceBy5, - None, - "Did not allow to use a bumped nonce", - ); - - // Test 11: Do not allow bumping nonce by too much - run_nonce_test( - 16u32, - NonceHolderTestMode::IncreaseMinNonceTooMuch, - Some("The value for incrementing the nonce is too high".to_string()), - "Allowed for incrementing min nonce too much", - ); - - // Test 12: Do not allow not setting a nonce as used - run_nonce_test( - 16u32, - NonceHolderTestMode::LeaveNonceUnused, - Some("The nonce was not set as used".to_string()), - "Allowed to leave nonce as unused", - ); -} diff --git a/core/lib/multivm/src/versions/vm_boojum_integration/tests/precompiles.rs b/core/lib/multivm/src/versions/vm_boojum_integration/tests/precompiles.rs deleted file mode 100644 index 516331d574f4..000000000000 --- a/core/lib/multivm/src/versions/vm_boojum_integration/tests/precompiles.rs +++ /dev/null @@ -1,136 +0,0 @@ -use zk_evm_1_4_0::zk_evm_abstractions::precompiles::PrecompileAddress; -use zksync_types::{Address, Execute}; - -use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface}, - vm_boojum_integration::{ - constants::BLOCK_GAS_LIMIT, - tests::{tester::VmTesterBuilder, utils::read_precompiles_contract}, - HistoryEnabled, - }, -}; - -#[test] -fn test_keccak() { - // Execute special transaction and check that at least 1000 keccak calls were made. - let contract = read_precompiles_contract(); - let address = Address::random(); - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_random_rich_accounts(1) - .with_deployer() - .with_gas_limit(BLOCK_GAS_LIMIT) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_custom_contracts(vec![(contract, address, true)]) - .build(); - - // calldata for `doKeccak(1000)`. - let keccak1000_calldata = - "370f20ac00000000000000000000000000000000000000000000000000000000000003e8"; - - let account = &mut vm.rich_accounts[0]; - let tx = account.get_l2_tx_for_execute( - Execute { - contract_address: address, - calldata: hex::decode(keccak1000_calldata).unwrap(), - value: Default::default(), - factory_deps: None, - }, - None, - ); - vm.vm.push_transaction(tx); - let _ = vm.vm.inspect(Default::default(), VmExecutionMode::OneTx); - - let keccak_count = vm - .vm - .state - .precompiles_processor - .precompile_cycles_history - .inner() - .iter() - .filter(|(precompile, _)| precompile == &PrecompileAddress::Keccak256) - .count(); - - assert!(keccak_count >= 1000); -} - -#[test] -fn test_sha256() { - // Execute special transaction and check that at least 1000 sha256 calls were made. - let contract = read_precompiles_contract(); - let address = Address::random(); - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_random_rich_accounts(1) - .with_deployer() - .with_gas_limit(BLOCK_GAS_LIMIT) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_custom_contracts(vec![(contract, address, true)]) - .build(); - - // calldata for `doSha256(1000)`. - let sha1000_calldata = - "5d0b4fb500000000000000000000000000000000000000000000000000000000000003e8"; - - let account = &mut vm.rich_accounts[0]; - let tx = account.get_l2_tx_for_execute( - Execute { - contract_address: address, - calldata: hex::decode(sha1000_calldata).unwrap(), - value: Default::default(), - factory_deps: None, - }, - None, - ); - vm.vm.push_transaction(tx); - let _ = vm.vm.inspect(Default::default(), VmExecutionMode::OneTx); - - let sha_count = vm - .vm - .state - .precompiles_processor - .precompile_cycles_history - .inner() - .iter() - .filter(|(precompile, _)| precompile == &PrecompileAddress::SHA256) - .count(); - - assert!(sha_count >= 1000); -} - -#[test] -fn test_ecrecover() { - // Execute simple transfer and check that exactly 1 ecrecover call was made (it's done during tx validation). - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_random_rich_accounts(1) - .with_deployer() - .with_gas_limit(BLOCK_GAS_LIMIT) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .build(); - - let account = &mut vm.rich_accounts[0]; - let tx = account.get_l2_tx_for_execute( - Execute { - contract_address: account.address, - calldata: Vec::new(), - value: Default::default(), - factory_deps: None, - }, - None, - ); - vm.vm.push_transaction(tx); - let _ = vm.vm.inspect(Default::default(), VmExecutionMode::OneTx); - - let ecrecover_count = vm - .vm - .state - .precompiles_processor - .precompile_cycles_history - .inner() - .iter() - .filter(|(precompile, _)| precompile == &PrecompileAddress::Ecrecover) - .count(); - - assert_eq!(ecrecover_count, 1); -} diff --git a/core/lib/multivm/src/versions/vm_boojum_integration/tests/refunds.rs b/core/lib/multivm/src/versions/vm_boojum_integration/tests/refunds.rs deleted file mode 100644 index 521bd81f2ef4..000000000000 --- a/core/lib/multivm/src/versions/vm_boojum_integration/tests/refunds.rs +++ /dev/null @@ -1,167 +0,0 @@ -use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface}, - vm_boojum_integration::{ - tests::{ - tester::{DeployContractsTx, TxType, VmTesterBuilder}, - utils::read_test_contract, - }, - types::internals::TransactionData, - HistoryEnabled, - }, -}; - -#[test] -fn test_predetermined_refunded_gas() { - // In this test, we compare the execution of the bootloader with the predefined - // refunded gas and without them - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - let l1_batch = vm.vm.batch_env.clone(); - - let counter = read_test_contract(); - let account = &mut vm.rich_accounts[0]; - - let DeployContractsTx { - tx, - bytecode_hash: _, - address: _, - } = account.get_deploy_tx(&counter, None, TxType::L2); - vm.vm.push_transaction(tx.clone()); - let result = vm.vm.execute(VmExecutionMode::OneTx); - - assert!(!result.result.is_failed()); - - // If the refund provided by the operator or the final refund are the 0 - // there is no impact of the operator's refund at all and so this test does not - // make much sense. - assert!( - result.refunds.operator_suggested_refund > 0, - "The operator's refund is 0" - ); - assert!(result.refunds.gas_refunded > 0, "The final refund is 0"); - - let result_without_predefined_refunds = vm.vm.execute(VmExecutionMode::Batch); - let mut current_state_without_predefined_refunds = vm.vm.get_current_execution_state(); - assert!(!result_without_predefined_refunds.result.is_failed(),); - - // Here we want to provide the same refund from the operator and check that it's the correct one. - // We execute the whole block without refund tracer, because refund tracer will eventually override the provided refund. - // But the overall result should be the same - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_l1_batch_env(l1_batch.clone()) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_rich_accounts(vec![account.clone()]) - .build(); - - let tx: TransactionData = tx.into(); - let block_gas_per_pubdata_byte = vm.vm.batch_env.block_gas_price_per_pubdata(); - // Overhead - let overhead = tx.overhead_gas(block_gas_per_pubdata_byte as u32); - vm.vm - .push_raw_transaction(tx.clone(), overhead, result.refunds.gas_refunded, true); - - let result_with_predefined_refunds = vm.vm.execute(VmExecutionMode::Batch); - let mut current_state_with_predefined_refunds = vm.vm.get_current_execution_state(); - - assert!(!result_with_predefined_refunds.result.is_failed()); - - // We need to sort these lists as those are flattened from HashMaps - current_state_with_predefined_refunds - .used_contract_hashes - .sort(); - current_state_without_predefined_refunds - .used_contract_hashes - .sort(); - - assert_eq!( - current_state_with_predefined_refunds.events, - current_state_without_predefined_refunds.events - ); - - assert_eq!( - current_state_with_predefined_refunds.user_l2_to_l1_logs, - current_state_without_predefined_refunds.user_l2_to_l1_logs - ); - - assert_eq!( - current_state_with_predefined_refunds.system_logs, - current_state_without_predefined_refunds.system_logs - ); - - assert_eq!( - current_state_with_predefined_refunds.storage_log_queries, - current_state_without_predefined_refunds.storage_log_queries - ); - assert_eq!( - current_state_with_predefined_refunds.used_contract_hashes, - current_state_without_predefined_refunds.used_contract_hashes - ); - - // In this test we put the different refund from the operator. - // We still can't use the refund tracer, because it will override the refund. - // But we can check that the logs and events have changed. - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_l1_batch_env(l1_batch) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_rich_accounts(vec![account.clone()]) - .build(); - - let changed_operator_suggested_refund = result.refunds.gas_refunded + 1000; - vm.vm - .push_raw_transaction(tx, overhead, changed_operator_suggested_refund, true); - let result = vm.vm.execute(VmExecutionMode::Batch); - let mut current_state_with_changed_predefined_refunds = vm.vm.get_current_execution_state(); - - assert!(!result.result.is_failed()); - current_state_with_changed_predefined_refunds - .used_contract_hashes - .sort(); - current_state_without_predefined_refunds - .used_contract_hashes - .sort(); - - assert_eq!( - current_state_with_changed_predefined_refunds.events.len(), - current_state_without_predefined_refunds.events.len() - ); - - assert_ne!( - current_state_with_changed_predefined_refunds.events, - current_state_without_predefined_refunds.events - ); - - assert_eq!( - current_state_with_changed_predefined_refunds.user_l2_to_l1_logs, - current_state_without_predefined_refunds.user_l2_to_l1_logs - ); - - assert_ne!( - current_state_with_changed_predefined_refunds.system_logs, - current_state_without_predefined_refunds.system_logs - ); - - assert_eq!( - current_state_with_changed_predefined_refunds - .storage_log_queries - .len(), - current_state_without_predefined_refunds - .storage_log_queries - .len() - ); - - assert_ne!( - current_state_with_changed_predefined_refunds.storage_log_queries, - current_state_without_predefined_refunds.storage_log_queries - ); - assert_eq!( - current_state_with_changed_predefined_refunds.used_contract_hashes, - current_state_without_predefined_refunds.used_contract_hashes - ); -} diff --git a/core/lib/multivm/src/versions/vm_boojum_integration/tests/require_eip712.rs b/core/lib/multivm/src/versions/vm_boojum_integration/tests/require_eip712.rs deleted file mode 100644 index 90c3206b24bf..000000000000 --- a/core/lib/multivm/src/versions/vm_boojum_integration/tests/require_eip712.rs +++ /dev/null @@ -1,165 +0,0 @@ -use std::convert::TryInto; - -use ethabi::Token; -use zksync_eth_signer::{raw_ethereum_tx::TransactionParameters, EthereumSigner}; -use zksync_system_constants::L2_ETH_TOKEN_ADDRESS; -use zksync_types::{ - fee::Fee, l2::L2Tx, transaction_request::TransactionRequest, - utils::storage_key_for_standard_token_balance, AccountTreeId, Address, Eip712Domain, Execute, - L2ChainId, Nonce, Transaction, U256, -}; - -use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface}, - vm_boojum_integration::{ - tests::{ - tester::{Account, VmTester, VmTesterBuilder}, - utils::read_many_owners_custom_account_contract, - }, - HistoryDisabled, - }, -}; - -impl VmTester { - pub(crate) fn get_eth_balance(&mut self, address: Address) -> U256 { - let key = storage_key_for_standard_token_balance( - AccountTreeId::new(L2_ETH_TOKEN_ADDRESS), - &address, - ); - self.vm.state.storage.storage.read_from_storage(&key) - } -} - -// TODO refactor this test it use too much internal details of the VM -#[tokio::test] -/// This test deploys 'buggy' account abstraction code, and then tries accessing it both with legacy -/// and EIP712 transactions. -/// Currently we support both, but in the future, we should allow only EIP712 transactions to access the AA accounts. -async fn test_require_eip712() { - // Use 3 accounts: - // - private_address - EOA account, where we have the key - // - account_address - AA account, where the contract is deployed - // - beneficiary - an EOA account, where we'll try to transfer the tokens. - let account_abstraction = Account::random(); - let mut private_account = Account::random(); - let beneficiary = Account::random(); - - let (bytecode, contract) = read_many_owners_custom_account_contract(); - let mut vm = VmTesterBuilder::new(HistoryDisabled) - .with_empty_in_memory_storage() - .with_custom_contracts(vec![(bytecode, account_abstraction.address, true)]) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_rich_accounts(vec![account_abstraction.clone(), private_account.clone()]) - .build(); - - assert_eq!(vm.get_eth_balance(beneficiary.address), U256::from(0)); - - let chain_id: u32 = 270; - - // First, let's set the owners of the AA account to the private_address. - // (so that messages signed by private_address, are authorized to act on behalf of the AA account). - let set_owners_function = contract.function("setOwners").unwrap(); - let encoded_input = set_owners_function - .encode_input(&[Token::Array(vec![Token::Address(private_account.address)])]) - .unwrap(); - - let tx = private_account.get_l2_tx_for_execute( - Execute { - contract_address: account_abstraction.address, - calldata: encoded_input, - value: Default::default(), - factory_deps: None, - }, - None, - ); - - vm.vm.push_transaction(tx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!result.result.is_failed()); - - let private_account_balance = vm.get_eth_balance(private_account.address); - - // And now let's do the transfer from the 'account abstraction' to 'beneficiary' (using 'legacy' transaction). - // Normally this would not work - unless the operator is malicious. - let aa_raw_tx = TransactionParameters { - nonce: U256::from(0), - to: Some(beneficiary.address), - gas: U256::from(100000000), - gas_price: Some(U256::from(10000000)), - value: U256::from(888000088), - data: vec![], - chain_id: 270, - transaction_type: None, - access_list: None, - max_fee_per_gas: U256::from(1000000000), - max_priority_fee_per_gas: U256::from(1000000000), - }; - - let aa_tx = private_account.sign_legacy_tx(aa_raw_tx).await; - let (tx_request, hash) = TransactionRequest::from_bytes(&aa_tx, L2ChainId::from(270)).unwrap(); - - let mut l2_tx: L2Tx = L2Tx::from_request(tx_request, 10000).unwrap(); - l2_tx.set_input(aa_tx, hash); - // Pretend that operator is malicious and sets the initiator to the AA account. - l2_tx.common_data.initiator_address = account_abstraction.address; - let transaction: Transaction = l2_tx.try_into().unwrap(); - - vm.vm.push_transaction(transaction); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!result.result.is_failed()); - assert_eq!( - vm.get_eth_balance(beneficiary.address), - U256::from(888000088) - ); - // Make sure that the tokens were transferred from the AA account. - assert_eq!( - private_account_balance, - vm.get_eth_balance(private_account.address) - ); - - // // Now send the 'classic' EIP712 transaction - let tx_712 = L2Tx::new( - beneficiary.address, - vec![], - Nonce(1), - Fee { - gas_limit: U256::from(1000000000), - max_fee_per_gas: U256::from(1000000000), - max_priority_fee_per_gas: U256::from(1000000000), - gas_per_pubdata_limit: U256::from(1000000000), - }, - account_abstraction.address, - U256::from(28374938), - None, - Default::default(), - ); - - let transaction_request: TransactionRequest = tx_712.into(); - - let domain = Eip712Domain::new(L2ChainId::from(chain_id)); - let signature = private_account - .get_pk_signer() - .sign_typed_data(&domain, &transaction_request) - .await - .unwrap(); - let encoded_tx = transaction_request.get_signed_bytes(&signature, L2ChainId::from(chain_id)); - - let (aa_txn_request, aa_hash) = - TransactionRequest::from_bytes(&encoded_tx, L2ChainId::from(chain_id)).unwrap(); - - let mut l2_tx = L2Tx::from_request(aa_txn_request, 100000).unwrap(); - l2_tx.set_input(encoded_tx, aa_hash); - - let transaction: Transaction = l2_tx.try_into().unwrap(); - vm.vm.push_transaction(transaction); - vm.vm.execute(VmExecutionMode::OneTx); - - assert_eq!( - vm.get_eth_balance(beneficiary.address), - U256::from(916375026) - ); - assert_eq!( - private_account_balance, - vm.get_eth_balance(private_account.address) - ); -} diff --git a/core/lib/multivm/src/versions/vm_boojum_integration/tests/rollbacks.rs b/core/lib/multivm/src/versions/vm_boojum_integration/tests/rollbacks.rs deleted file mode 100644 index cfaf1952c702..000000000000 --- a/core/lib/multivm/src/versions/vm_boojum_integration/tests/rollbacks.rs +++ /dev/null @@ -1,263 +0,0 @@ -use ethabi::Token; -use zksync_contracts::{get_loadnext_contract, test_contracts::LoadnextContractExecutionParams}; -use crate::interface::storage::WriteStorage; -use zksync_types::{get_nonce_key, Execute, U256}; - -use crate::{ - interface::{ - dyn_tracers::vm_1_4_0::DynTracer, - tracer::{TracerExecutionStatus, TracerExecutionStopReason}, - TxExecutionMode, VmExecutionMode, VmInterface, VmInterfaceHistoryEnabled, - }, - vm_boojum_integration::{ - tests::{ - tester::{DeployContractsTx, TransactionTestInfo, TxModifier, TxType, VmTesterBuilder}, - utils::read_test_contract, - }, - types::internals::ZkSyncVmState, - BootloaderState, HistoryEnabled, HistoryMode, SimpleMemory, ToTracerPointer, VmTracer, - }, -}; - -#[test] -fn test_vm_rollbacks() { - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let mut account = vm.rich_accounts[0].clone(); - let counter = read_test_contract(); - let tx_0 = account.get_deploy_tx(&counter, None, TxType::L2).tx; - let tx_1 = account.get_deploy_tx(&counter, None, TxType::L2).tx; - let tx_2 = account.get_deploy_tx(&counter, None, TxType::L2).tx; - - let result_without_rollbacks = vm.execute_and_verify_txs(&vec![ - TransactionTestInfo::new_processed(tx_0.clone(), false), - TransactionTestInfo::new_processed(tx_1.clone(), false), - TransactionTestInfo::new_processed(tx_2.clone(), false), - ]); - - // reset vm - vm.reset_with_empty_storage(); - - let result_with_rollbacks = vm.execute_and_verify_txs(&vec![ - TransactionTestInfo::new_rejected(tx_0.clone(), TxModifier::WrongSignatureLength.into()), - TransactionTestInfo::new_rejected(tx_0.clone(), TxModifier::WrongMagicValue.into()), - TransactionTestInfo::new_rejected(tx_0.clone(), TxModifier::WrongSignature.into()), - // The correct nonce is 0, this tx will fail - TransactionTestInfo::new_rejected(tx_2.clone(), TxModifier::WrongNonce.into()), - // This tx will succeed - TransactionTestInfo::new_processed(tx_0.clone(), false), - // The correct nonce is 1, this tx will fail - TransactionTestInfo::new_rejected(tx_0.clone(), TxModifier::NonceReused.into()), - // The correct nonce is 1, this tx will fail - TransactionTestInfo::new_rejected(tx_2.clone(), TxModifier::WrongNonce.into()), - // This tx will succeed - TransactionTestInfo::new_processed(tx_1, false), - // The correct nonce is 2, this tx will fail - TransactionTestInfo::new_rejected(tx_0.clone(), TxModifier::NonceReused.into()), - // This tx will succeed - TransactionTestInfo::new_processed(tx_2.clone(), false), - // This tx will fail - TransactionTestInfo::new_rejected(tx_2, TxModifier::NonceReused.into()), - TransactionTestInfo::new_rejected(tx_0, TxModifier::NonceReused.into()), - ]); - - assert_eq!(result_without_rollbacks, result_with_rollbacks); -} - -#[test] -fn test_vm_loadnext_rollbacks() { - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - let mut account = vm.rich_accounts[0].clone(); - - let loadnext_contract = get_loadnext_contract(); - let loadnext_constructor_data = &[Token::Uint(U256::from(100))]; - let DeployContractsTx { - tx: loadnext_deploy_tx, - address, - .. - } = account.get_deploy_tx_with_factory_deps( - &loadnext_contract.bytecode, - Some(loadnext_constructor_data), - loadnext_contract.factory_deps.clone(), - TxType::L2, - ); - - let loadnext_tx_1 = account.get_l2_tx_for_execute( - Execute { - contract_address: address, - calldata: LoadnextContractExecutionParams { - reads: 100, - writes: 100, - events: 100, - hashes: 500, - recursive_calls: 10, - deploys: 60, - } - .to_bytes(), - value: Default::default(), - factory_deps: None, - }, - None, - ); - - let loadnext_tx_2 = account.get_l2_tx_for_execute( - Execute { - contract_address: address, - calldata: LoadnextContractExecutionParams { - reads: 100, - writes: 100, - events: 100, - hashes: 500, - recursive_calls: 10, - deploys: 60, - } - .to_bytes(), - value: Default::default(), - factory_deps: None, - }, - None, - ); - - let result_without_rollbacks = vm.execute_and_verify_txs(&vec![ - TransactionTestInfo::new_processed(loadnext_deploy_tx.clone(), false), - TransactionTestInfo::new_processed(loadnext_tx_1.clone(), false), - TransactionTestInfo::new_processed(loadnext_tx_2.clone(), false), - ]); - - // reset vm - vm.reset_with_empty_storage(); - - let result_with_rollbacks = vm.execute_and_verify_txs(&vec![ - TransactionTestInfo::new_processed(loadnext_deploy_tx.clone(), false), - TransactionTestInfo::new_processed(loadnext_tx_1.clone(), true), - TransactionTestInfo::new_rejected( - loadnext_deploy_tx.clone(), - TxModifier::NonceReused.into(), - ), - TransactionTestInfo::new_processed(loadnext_tx_1, false), - TransactionTestInfo::new_processed(loadnext_tx_2.clone(), true), - TransactionTestInfo::new_processed(loadnext_tx_2.clone(), true), - TransactionTestInfo::new_rejected(loadnext_deploy_tx, TxModifier::NonceReused.into()), - TransactionTestInfo::new_processed(loadnext_tx_2, false), - ]); - - assert_eq!(result_without_rollbacks, result_with_rollbacks); -} - -// Testing tracer that does not allow the recursion to go deeper than a certain limit -struct MaxRecursionTracer { - max_recursion_depth: usize, -} - -/// Tracer responsible for calculating the number of storage invocations and -/// stopping the VM execution if the limit is reached. -impl DynTracer> for MaxRecursionTracer {} - -impl VmTracer for MaxRecursionTracer { - fn finish_cycle( - &mut self, - state: &mut ZkSyncVmState, - _bootloader_state: &mut BootloaderState, - ) -> TracerExecutionStatus { - let current_depth = state.local_state.callstack.depth(); - - if current_depth > self.max_recursion_depth { - TracerExecutionStatus::Stop(TracerExecutionStopReason::Finish) - } else { - TracerExecutionStatus::Continue - } - } -} - -#[test] -fn test_layered_rollback() { - // This test checks that the layered rollbacks work correctly, i.e. - // the rollback by the operator will always revert all the changes - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let account = &mut vm.rich_accounts[0]; - let loadnext_contract = get_loadnext_contract().bytecode; - - let DeployContractsTx { - tx: deploy_tx, - address, - .. - } = account.get_deploy_tx( - &loadnext_contract, - Some(&[Token::Uint(0.into())]), - TxType::L2, - ); - vm.vm.push_transaction(deploy_tx); - let deployment_res = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!deployment_res.result.is_failed(), "transaction failed"); - - let loadnext_transaction = account.get_loadnext_transaction( - address, - LoadnextContractExecutionParams { - writes: 1, - recursive_calls: 20, - ..LoadnextContractExecutionParams::empty() - }, - TxType::L2, - ); - - let nonce_val = vm - .vm - .state - .storage - .storage - .read_from_storage(&get_nonce_key(&account.address)); - - vm.vm.make_snapshot(); - - vm.vm.push_transaction(loadnext_transaction.clone()); - vm.vm.inspect( - MaxRecursionTracer { - max_recursion_depth: 15, - } - .into_tracer_pointer() - .into(), - VmExecutionMode::OneTx, - ); - - let nonce_val2 = vm - .vm - .state - .storage - .storage - .read_from_storage(&get_nonce_key(&account.address)); - - // The tracer stopped after the validation has passed, so nonce has already been increased - assert_eq!(nonce_val + U256::one(), nonce_val2, "nonce did not change"); - - vm.vm.rollback_to_the_latest_snapshot(); - - let nonce_val_after_rollback = vm - .vm - .state - .storage - .storage - .read_from_storage(&get_nonce_key(&account.address)); - - assert_eq!( - nonce_val, nonce_val_after_rollback, - "nonce changed after rollback" - ); - - vm.vm.push_transaction(loadnext_transaction); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!result.result.is_failed(), "transaction must not fail"); -} diff --git a/core/lib/multivm/src/versions/vm_boojum_integration/tests/simple_execution.rs b/core/lib/multivm/src/versions/vm_boojum_integration/tests/simple_execution.rs deleted file mode 100644 index f6b1d83e02a3..000000000000 --- a/core/lib/multivm/src/versions/vm_boojum_integration/tests/simple_execution.rs +++ /dev/null @@ -1,81 +0,0 @@ -use crate::{ - interface::{ExecutionResult, VmExecutionMode, VmInterface}, - vm_boojum_integration::{ - tests::tester::{TxType, VmTesterBuilder}, - HistoryDisabled, - }, -}; - -#[test] -fn estimate_fee() { - let mut vm_tester = VmTesterBuilder::new(HistoryDisabled) - .with_empty_in_memory_storage() - .with_deployer() - .with_random_rich_accounts(1) - .build(); - - vm_tester.deploy_test_contract(); - let account = &mut vm_tester.rich_accounts[0]; - - let tx = account.get_test_contract_transaction( - vm_tester.test_contract.unwrap(), - false, - Default::default(), - false, - TxType::L2, - ); - - vm_tester.vm.push_transaction(tx); - - let result = vm_tester.vm.execute(VmExecutionMode::OneTx); - assert_matches!(result.result, ExecutionResult::Success { .. }); -} - -#[test] -fn simple_execute() { - let mut vm_tester = VmTesterBuilder::new(HistoryDisabled) - .with_empty_in_memory_storage() - .with_deployer() - .with_random_rich_accounts(1) - .build(); - - vm_tester.deploy_test_contract(); - - let account = &mut vm_tester.rich_accounts[0]; - - let tx1 = account.get_test_contract_transaction( - vm_tester.test_contract.unwrap(), - false, - Default::default(), - false, - TxType::L1 { serial_id: 1 }, - ); - - let tx2 = account.get_test_contract_transaction( - vm_tester.test_contract.unwrap(), - true, - Default::default(), - false, - TxType::L1 { serial_id: 1 }, - ); - - let tx3 = account.get_test_contract_transaction( - vm_tester.test_contract.unwrap(), - false, - Default::default(), - false, - TxType::L1 { serial_id: 1 }, - ); - let vm = &mut vm_tester.vm; - vm.push_transaction(tx1); - vm.push_transaction(tx2); - vm.push_transaction(tx3); - let tx = vm.execute(VmExecutionMode::OneTx); - assert_matches!(tx.result, ExecutionResult::Success { .. }); - let tx = vm.execute(VmExecutionMode::OneTx); - assert_matches!(tx.result, ExecutionResult::Revert { .. }); - let tx = vm.execute(VmExecutionMode::OneTx); - assert_matches!(tx.result, ExecutionResult::Success { .. }); - let block_tip = vm.execute(VmExecutionMode::Batch); - assert_matches!(block_tip.result, ExecutionResult::Success { .. }); -} diff --git a/core/lib/multivm/src/versions/vm_boojum_integration/tests/tester/inner_state.rs b/core/lib/multivm/src/versions/vm_boojum_integration/tests/tester/inner_state.rs deleted file mode 100644 index 078a971e4bf1..000000000000 --- a/core/lib/multivm/src/versions/vm_boojum_integration/tests/tester/inner_state.rs +++ /dev/null @@ -1,130 +0,0 @@ -use std::collections::HashMap; - -use zk_evm_1_4_0::{aux_structures::Timestamp, vm_state::VmLocalState}; -use crate::interface::storage::WriteStorage; -use zksync_types::{StorageKey, StorageLogQuery, StorageValue, U256}; - -use crate::{ - vm_boojum_integration::{ - old_vm::{ - event_sink::InMemoryEventSink, - history_recorder::{AppDataFrameManagerWithHistory, HistoryRecorder}, - }, - HistoryEnabled, HistoryMode, SimpleMemory, Vm, - }, - HistoryMode as CommonHistoryMode, -}; - -#[derive(Clone, Debug)] -pub(crate) struct ModifiedKeysMap(HashMap); - -// We consider hashmaps to be equal even if there is a key -// that is not present in one but has zero value in another. -impl PartialEq for ModifiedKeysMap { - fn eq(&self, other: &Self) -> bool { - for (key, value) in self.0.iter() { - if *value != other.0.get(key).cloned().unwrap_or_default() { - return false; - } - } - for (key, value) in other.0.iter() { - if *value != self.0.get(key).cloned().unwrap_or_default() { - return false; - } - } - true - } -} - -#[derive(Clone, PartialEq, Debug)] -pub(crate) struct DecommitterTestInnerState { - /// There is no way to "truly" compare the storage pointer, - /// so we just compare the modified keys. This is reasonable enough. - pub(crate) modified_storage_keys: ModifiedKeysMap, - pub(crate) known_bytecodes: HistoryRecorder>, H>, - pub(crate) decommitted_code_hashes: HistoryRecorder, HistoryEnabled>, -} - -#[derive(Clone, PartialEq, Debug)] -pub(crate) struct StorageOracleInnerState { - /// There is no way to "truly" compare the storage pointer, - /// so we just compare the modified keys. This is reasonable enough. - pub(crate) modified_storage_keys: ModifiedKeysMap, - - pub(crate) frames_stack: AppDataFrameManagerWithHistory, H>, - - pub(crate) pre_paid_changes: HistoryRecorder, H>, - pub(crate) paid_changes: HistoryRecorder, H>, - pub(crate) initial_values: HistoryRecorder, H>, - pub(crate) returned_refunds: HistoryRecorder, H>, -} - -#[derive(Clone, PartialEq, Debug)] -pub(crate) struct PrecompileProcessorTestInnerState { - pub(crate) timestamp_history: HistoryRecorder, H>, -} - -/// A struct that encapsulates the state of the VM's oracles -/// The state is to be used in tests. -#[derive(Clone, PartialEq, Debug)] -pub(crate) struct VmInstanceInnerState { - event_sink: InMemoryEventSink, - precompile_processor_state: PrecompileProcessorTestInnerState, - memory: SimpleMemory, - decommitter_state: DecommitterTestInnerState, - storage_oracle_state: StorageOracleInnerState, - local_state: VmLocalState, -} - -impl Vm { - // Dump inner state of the VM. - pub(crate) fn dump_inner_state(&self) -> VmInstanceInnerState { - let event_sink = self.state.event_sink.clone(); - let precompile_processor_state = PrecompileProcessorTestInnerState { - timestamp_history: self.state.precompiles_processor.timestamp_history.clone(), - }; - let memory = self.state.memory.clone(); - let decommitter_state = DecommitterTestInnerState { - modified_storage_keys: ModifiedKeysMap( - self.state - .decommittment_processor - .get_storage() - .borrow() - .modified_storage_keys() - .clone(), - ), - known_bytecodes: self.state.decommittment_processor.known_bytecodes.clone(), - decommitted_code_hashes: self - .state - .decommittment_processor - .get_decommitted_code_hashes_with_history() - .clone(), - }; - let storage_oracle_state = StorageOracleInnerState { - modified_storage_keys: ModifiedKeysMap( - self.state - .storage - .storage - .get_ptr() - .borrow() - .modified_storage_keys() - .clone(), - ), - frames_stack: self.state.storage.frames_stack.clone(), - pre_paid_changes: self.state.storage.pre_paid_changes.clone(), - paid_changes: self.state.storage.paid_changes.clone(), - initial_values: self.state.storage.initial_values.clone(), - returned_refunds: self.state.storage.returned_refunds.clone(), - }; - let local_state = self.state.local_state.clone(); - - VmInstanceInnerState { - event_sink, - precompile_processor_state, - memory, - decommitter_state, - storage_oracle_state, - local_state, - } - } -} diff --git a/core/lib/multivm/src/versions/vm_boojum_integration/tests/tester/mod.rs b/core/lib/multivm/src/versions/vm_boojum_integration/tests/tester/mod.rs deleted file mode 100644 index dfe8905a7e08..000000000000 --- a/core/lib/multivm/src/versions/vm_boojum_integration/tests/tester/mod.rs +++ /dev/null @@ -1,7 +0,0 @@ -pub(crate) use transaction_test_info::{ExpectedError, TransactionTestInfo, TxModifier}; -pub(crate) use vm_tester::{default_l1_batch, InMemoryStorageView, VmTester, VmTesterBuilder}; -pub(crate) use zksync_test_account::{Account, DeployContractsTx, TxType}; - -mod inner_state; -mod transaction_test_info; -mod vm_tester; diff --git a/core/lib/multivm/src/versions/vm_boojum_integration/tests/tester/transaction_test_info.rs b/core/lib/multivm/src/versions/vm_boojum_integration/tests/tester/transaction_test_info.rs deleted file mode 100644 index 4d6572fe78a2..000000000000 --- a/core/lib/multivm/src/versions/vm_boojum_integration/tests/tester/transaction_test_info.rs +++ /dev/null @@ -1,217 +0,0 @@ -use zksync_types::{ExecuteTransactionCommon, Transaction}; - -use crate::{ - interface::{ - CurrentExecutionState, ExecutionResult, Halt, TxRevertReason, VmExecutionMode, - VmExecutionResultAndLogs, VmInterface, VmInterfaceHistoryEnabled, VmRevertReason, - }, - vm_boojum_integration::{tests::tester::vm_tester::VmTester, HistoryEnabled}, -}; - -#[derive(Debug, Clone)] -pub(crate) enum TxModifier { - WrongSignatureLength, - WrongSignature, - WrongMagicValue, - WrongNonce, - NonceReused, -} - -#[derive(Debug, Clone)] -pub(crate) enum TxExpectedResult { - Rejected { error: ExpectedError }, - Processed { rollback: bool }, -} - -#[derive(Debug, Clone)] -pub(crate) struct TransactionTestInfo { - tx: Transaction, - result: TxExpectedResult, -} - -#[derive(Debug, Clone)] -pub(crate) struct ExpectedError { - pub(crate) revert_reason: TxRevertReason, - pub(crate) modifier: Option, -} - -impl From for ExpectedError { - fn from(value: TxModifier) -> Self { - let revert_reason = match value { - TxModifier::WrongSignatureLength => { - Halt::ValidationFailed(VmRevertReason::General { - msg: "Signature length is incorrect".to_string(), - data: vec![ - 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 29, 83, 105, 103, 110, 97, 116, 117, 114, 101, 32, - 108, 101, 110, 103, 116, 104, 32, 105, 115, 32, 105, 110, 99, 111, 114, 114, 101, 99, - 116, 0, 0, 0, - ], - }) - } - TxModifier::WrongSignature => { - Halt::ValidationFailed(VmRevertReason::General { - msg: "Account validation returned invalid magic value. Most often this means that the signature is incorrect".to_string(), - data: vec![], - }) - } - TxModifier::WrongMagicValue => { - Halt::ValidationFailed(VmRevertReason::General { - msg: "v is neither 27 nor 28".to_string(), - data: vec![ - 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 22, 118, 32, 105, 115, 32, 110, 101, 105, 116, 104, - 101, 114, 32, 50, 55, 32, 110, 111, 114, 32, 50, 56, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - ], - }) - - } - TxModifier::WrongNonce => { - Halt::ValidationFailed(VmRevertReason::General { - msg: "Incorrect nonce".to_string(), - data: vec![ - 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 15, 73, 110, 99, 111, 114, 114, 101, 99, 116, 32, 110, - 111, 110, 99, 101, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - ], - }) - } - TxModifier::NonceReused => { - Halt::ValidationFailed(VmRevertReason::General { - msg: "Reusing the same nonce twice".to_string(), - data: vec![ - 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 28, 82, 101, 117, 115, 105, 110, 103, 32, 116, 104, - 101, 32, 115, 97, 109, 101, 32, 110, 111, 110, 99, 101, 32, 116, 119, 105, 99, 101, 0, - 0, 0, 0, - ], - }) - } - }; - - ExpectedError { - revert_reason: TxRevertReason::Halt(revert_reason), - modifier: Some(value), - } - } -} - -impl TransactionTestInfo { - pub(crate) fn new_rejected( - mut transaction: Transaction, - expected_error: ExpectedError, - ) -> Self { - transaction.common_data = match transaction.common_data { - ExecuteTransactionCommon::L2(mut data) => { - if let Some(modifier) = &expected_error.modifier { - match modifier { - TxModifier::WrongSignatureLength => { - data.signature = data.signature[..data.signature.len() - 20].to_vec() - } - TxModifier::WrongSignature => data.signature = vec![27u8; 65], - TxModifier::WrongMagicValue => data.signature = vec![1u8; 65], - TxModifier::WrongNonce => { - // Do not need to modify signature for nonce error - } - TxModifier::NonceReused => { - // Do not need to modify signature for nonce error - } - } - } - ExecuteTransactionCommon::L2(data) - } - _ => panic!("L1 transactions are not supported"), - }; - - Self { - tx: transaction, - result: TxExpectedResult::Rejected { - error: expected_error, - }, - } - } - - pub(crate) fn new_processed(transaction: Transaction, should_be_rollbacked: bool) -> Self { - Self { - tx: transaction, - result: TxExpectedResult::Processed { - rollback: should_be_rollbacked, - }, - } - } - - fn verify_result(&self, result: &VmExecutionResultAndLogs) { - match &self.result { - TxExpectedResult::Rejected { error } => match &result.result { - ExecutionResult::Success { .. } => { - panic!("Transaction should be reverted {:?}", self.tx.nonce()) - } - ExecutionResult::Revert { output } => match &error.revert_reason { - TxRevertReason::TxReverted(expected) => { - assert_eq!(output, expected) - } - _ => { - panic!("Error types mismatch"); - } - }, - ExecutionResult::Halt { reason } => match &error.revert_reason { - TxRevertReason::Halt(expected) => { - assert_eq!(reason, expected) - } - _ => { - panic!("Error types mismatch"); - } - }, - }, - TxExpectedResult::Processed { .. } => { - assert!(!result.result.is_failed()); - } - } - } - - fn should_rollback(&self) -> bool { - match &self.result { - TxExpectedResult::Rejected { .. } => true, - TxExpectedResult::Processed { rollback } => *rollback, - } - } -} - -impl VmTester { - pub(crate) fn execute_and_verify_txs( - &mut self, - txs: &[TransactionTestInfo], - ) -> CurrentExecutionState { - for tx_test_info in txs { - self.execute_tx_and_verify(tx_test_info.clone()); - } - self.vm.execute(VmExecutionMode::Batch); - let mut state = self.vm.get_current_execution_state(); - state.used_contract_hashes.sort(); - state - } - - pub(crate) fn execute_tx_and_verify( - &mut self, - tx_test_info: TransactionTestInfo, - ) -> VmExecutionResultAndLogs { - let inner_state_before = self.vm.dump_inner_state(); - self.vm.make_snapshot(); - self.vm.push_transaction(tx_test_info.tx.clone()); - let result = self.vm.execute(VmExecutionMode::OneTx); - tx_test_info.verify_result(&result); - if tx_test_info.should_rollback() { - self.vm.rollback_to_the_latest_snapshot(); - let inner_state_after = self.vm.dump_inner_state(); - assert_eq!( - inner_state_before, inner_state_after, - "Inner state before and after rollback should be equal" - ); - } - result - } -} diff --git a/core/lib/multivm/src/versions/vm_boojum_integration/tests/tester/vm_tester.rs b/core/lib/multivm/src/versions/vm_boojum_integration/tests/tester/vm_tester.rs deleted file mode 100644 index fcea03e12cc8..000000000000 --- a/core/lib/multivm/src/versions/vm_boojum_integration/tests/tester/vm_tester.rs +++ /dev/null @@ -1,295 +0,0 @@ -use std::marker::PhantomData; - -use zksync_contracts::BaseSystemContracts; -use crate::interface::storage::{InMemoryStorage, StoragePtr, StorageView, WriteStorage}; -use zksync_types::{ - block::MiniblockHasher, - get_code_key, get_is_account_key, - helpers::unix_timestamp_ms, - utils::{deployed_address_create, storage_key_for_eth_balance}, - Address, L1BatchNumber, L2ChainId, MiniblockNumber, Nonce, ProtocolVersionId, U256, -}; -use zksync_utils::{bytecode::hash_bytecode, u256_to_h256}; - -use crate::{ - interface::{ - L1BatchEnv, L2Block, L2BlockEnv, SystemEnv, TxExecutionMode, VmExecutionMode, VmInterface, - }, - vm_boojum_integration::{ - constants::BLOCK_GAS_LIMIT, - tests::{ - tester::{Account, TxType}, - utils::read_test_contract, - }, - utils::l2_blocks::load_last_l2_block, - Vm, - }, - HistoryMode, -}; - -pub(crate) type InMemoryStorageView = StorageView; - -pub(crate) struct VmTester { - pub(crate) vm: Vm, - pub(crate) storage: StoragePtr, - pub(crate) fee_account: Address, - pub(crate) deployer: Option, - pub(crate) test_contract: Option
, - pub(crate) rich_accounts: Vec, - pub(crate) custom_contracts: Vec, - _phantom: std::marker::PhantomData, -} - -impl VmTester { - pub(crate) fn deploy_test_contract(&mut self) { - let contract = read_test_contract(); - let tx = self - .deployer - .as_mut() - .expect("You have to initialize builder with deployer") - .get_deploy_tx(&contract, None, TxType::L2) - .tx; - let nonce = tx.nonce().unwrap().0.into(); - self.vm.push_transaction(tx); - self.vm.execute(VmExecutionMode::OneTx); - let deployed_address = - deployed_address_create(self.deployer.as_ref().unwrap().address, nonce); - self.test_contract = Some(deployed_address); - } - - pub(crate) fn reset_with_empty_storage(&mut self) { - self.storage = StorageView::new(get_empty_storage()).to_rc_ptr(); - self.reset_state(false); - } - - /// Reset the state of the VM to the initial state. - /// If `use_latest_l2_block` is true, then the VM will use the latest L2 block from storage, - /// otherwise it will use the first L2 block of l1 batch env - pub(crate) fn reset_state(&mut self, use_latest_l2_block: bool) { - for account in self.rich_accounts.iter_mut() { - account.nonce = Nonce(0); - make_account_rich(self.storage.clone(), account); - } - if let Some(deployer) = &self.deployer { - make_account_rich(self.storage.clone(), deployer); - } - - if !self.custom_contracts.is_empty() { - println!("Inserting custom contracts is not yet supported") - // insert_contracts(&mut self.storage, &self.custom_contracts); - } - - let mut l1_batch = self.vm.batch_env.clone(); - if use_latest_l2_block { - let last_l2_block = load_last_l2_block(self.storage.clone()).unwrap_or(L2Block { - number: 0, - timestamp: 0, - hash: MiniblockHasher::legacy_hash(MiniblockNumber(0)), - }); - l1_batch.first_l2_block = L2BlockEnv { - number: last_l2_block.number + 1, - timestamp: std::cmp::max(last_l2_block.timestamp + 1, l1_batch.timestamp), - prev_block_hash: last_l2_block.hash, - max_virtual_blocks_to_create: 1, - }; - } - - let vm = Vm::new(l1_batch, self.vm.system_env.clone(), self.storage.clone()); - - if self.test_contract.is_some() { - self.deploy_test_contract(); - } - - self.vm = vm; - } -} - -pub(crate) type ContractsToDeploy = (Vec, Address, bool); - -pub(crate) struct VmTesterBuilder { - storage: Option, - l1_batch_env: Option, - system_env: SystemEnv, - deployer: Option, - rich_accounts: Vec, - custom_contracts: Vec, - _phantom: PhantomData, -} - -impl Clone for VmTesterBuilder { - fn clone(&self) -> Self { - Self { - storage: None, - l1_batch_env: self.l1_batch_env.clone(), - system_env: self.system_env.clone(), - deployer: self.deployer.clone(), - rich_accounts: self.rich_accounts.clone(), - custom_contracts: self.custom_contracts.clone(), - _phantom: PhantomData, - } - } -} - -#[allow(dead_code)] -impl VmTesterBuilder { - pub(crate) fn new(_: H) -> Self { - Self { - storage: None, - l1_batch_env: None, - system_env: SystemEnv { - zk_porter_available: false, - version: ProtocolVersionId::latest(), - base_system_smart_contracts: BaseSystemContracts::playground(), - gas_limit: BLOCK_GAS_LIMIT, - execution_mode: TxExecutionMode::VerifyExecute, - default_validation_computational_gas_limit: BLOCK_GAS_LIMIT, - chain_id: L2ChainId::from(270), - }, - deployer: None, - rich_accounts: vec![], - custom_contracts: vec![], - _phantom: PhantomData, - } - } - - pub(crate) fn with_l1_batch_env(mut self, l1_batch_env: L1BatchEnv) -> Self { - self.l1_batch_env = Some(l1_batch_env); - self - } - - pub(crate) fn with_system_env(mut self, system_env: SystemEnv) -> Self { - self.system_env = system_env; - self - } - - pub(crate) fn with_storage(mut self, storage: InMemoryStorage) -> Self { - self.storage = Some(storage); - self - } - - pub(crate) fn with_base_system_smart_contracts( - mut self, - base_system_smart_contracts: BaseSystemContracts, - ) -> Self { - self.system_env.base_system_smart_contracts = base_system_smart_contracts; - self - } - - pub(crate) fn with_gas_limit(mut self, gas_limit: u32) -> Self { - self.system_env.gas_limit = gas_limit; - self - } - - pub(crate) fn with_execution_mode(mut self, execution_mode: TxExecutionMode) -> Self { - self.system_env.execution_mode = execution_mode; - self - } - - pub(crate) fn with_empty_in_memory_storage(mut self) -> Self { - self.storage = Some(get_empty_storage()); - self - } - - pub(crate) fn with_random_rich_accounts(mut self, number: u32) -> Self { - for _ in 0..number { - let account = Account::random(); - self.rich_accounts.push(account); - } - self - } - - pub(crate) fn with_rich_accounts(mut self, accounts: Vec) -> Self { - self.rich_accounts.extend(accounts); - self - } - - pub(crate) fn with_deployer(mut self) -> Self { - let deployer = Account::random(); - self.deployer = Some(deployer); - self - } - - pub(crate) fn with_custom_contracts(mut self, contracts: Vec) -> Self { - self.custom_contracts = contracts; - self - } - - pub(crate) fn build(self) -> VmTester { - let l1_batch_env = self - .l1_batch_env - .unwrap_or_else(|| default_l1_batch(L1BatchNumber(1))); - - let mut raw_storage = self.storage.unwrap_or_else(get_empty_storage); - insert_contracts(&mut raw_storage, &self.custom_contracts); - let storage_ptr = StorageView::new(raw_storage).to_rc_ptr(); - for account in self.rich_accounts.iter() { - make_account_rich(storage_ptr.clone(), account); - } - if let Some(deployer) = &self.deployer { - make_account_rich(storage_ptr.clone(), deployer); - } - let fee_account = l1_batch_env.fee_account; - - let vm = Vm::new(l1_batch_env, self.system_env, storage_ptr.clone()); - - VmTester { - vm, - storage: storage_ptr, - fee_account, - deployer: self.deployer, - test_contract: None, - rich_accounts: self.rich_accounts.clone(), - custom_contracts: self.custom_contracts.clone(), - _phantom: PhantomData, - } - } -} - -pub(crate) fn default_l1_batch(number: L1BatchNumber) -> L1BatchEnv { - let timestamp = unix_timestamp_ms(); - L1BatchEnv { - previous_batch_hash: None, - number, - timestamp, - l1_gas_price: 50_000_000_000, // 50 gwei - fair_l2_gas_price: 250_000_000, // 0.25 gwei - fee_account: Address::random(), - enforced_base_fee: None, - first_l2_block: L2BlockEnv { - number: 1, - timestamp, - prev_block_hash: MiniblockHasher::legacy_hash(MiniblockNumber(0)), - max_virtual_blocks_to_create: 100, - }, - } -} - -pub(crate) fn make_account_rich(storage: StoragePtr, account: &Account) { - let key = storage_key_for_eth_balance(&account.address); - storage - .as_ref() - .borrow_mut() - .set_value(key, u256_to_h256(U256::from(10u64.pow(19)))); -} - -pub(crate) fn get_empty_storage() -> InMemoryStorage { - InMemoryStorage::with_system_contracts(hash_bytecode) -} - -// Inserts the contracts into the test environment, bypassing the -// deployer system contract. Besides the reference to storage -// it accepts a `contracts` tuple of information about the contract -// and whether or not it is an account. -fn insert_contracts(raw_storage: &mut InMemoryStorage, contracts: &[ContractsToDeploy]) { - for (contract, address, is_account) in contracts { - let deployer_code_key = get_code_key(address); - raw_storage.set_value(deployer_code_key, hash_bytecode(contract)); - - if *is_account { - let is_account_key = get_is_account_key(address); - raw_storage.set_value(is_account_key, u256_to_h256(1_u32.into())); - } - - raw_storage.store_factory_dep(hash_bytecode(contract), contract.clone()); - } -} diff --git a/core/lib/multivm/src/versions/vm_boojum_integration/tests/tracing_execution_error.rs b/core/lib/multivm/src/versions/vm_boojum_integration/tests/tracing_execution_error.rs deleted file mode 100644 index 8c538dcf9bf2..000000000000 --- a/core/lib/multivm/src/versions/vm_boojum_integration/tests/tracing_execution_error.rs +++ /dev/null @@ -1,54 +0,0 @@ -use zksync_types::{Execute, H160}; - -use crate::{ - interface::{TxExecutionMode, TxRevertReason, VmRevertReason}, - vm_boojum_integration::{ - tests::{ - tester::{ExpectedError, TransactionTestInfo, VmTesterBuilder}, - utils::{get_execute_error_calldata, read_error_contract, BASE_SYSTEM_CONTRACTS}, - }, - HistoryEnabled, - }, -}; - -#[test] -fn test_tracing_of_execution_errors() { - let contract_address = H160::random(); - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_base_system_smart_contracts(BASE_SYSTEM_CONTRACTS.clone()) - .with_custom_contracts(vec![(read_error_contract(), contract_address, false)]) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_deployer() - .with_random_rich_accounts(1) - .build(); - - let account = &mut vm.rich_accounts[0]; - - let tx = account.get_l2_tx_for_execute( - Execute { - contract_address, - calldata: get_execute_error_calldata(), - value: Default::default(), - factory_deps: Some(vec![]), - }, - None, - ); - - vm.execute_tx_and_verify(TransactionTestInfo::new_rejected( - tx, - ExpectedError { - revert_reason: TxRevertReason::TxReverted(VmRevertReason::General { - msg: "short".to_string(), - data: vec![ - 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 5, 115, 104, 111, 114, 116, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, - ], - }), - modifier: None, - }, - )); -} diff --git a/core/lib/multivm/src/versions/vm_boojum_integration/tests/upgrade.rs b/core/lib/multivm/src/versions/vm_boojum_integration/tests/upgrade.rs deleted file mode 100644 index bc3d62f62a19..000000000000 --- a/core/lib/multivm/src/versions/vm_boojum_integration/tests/upgrade.rs +++ /dev/null @@ -1,362 +0,0 @@ -use zk_evm_1_4_0::aux_structures::Timestamp; -use zksync_contracts::{deployer_contract, load_contract, load_sys_contract, read_bytecode}; -use crate::interface::storage::WriteStorage; -use zksync_test_account::TxType; -use zksync_types::{ - ethabi::{Contract, Token}, - get_code_key, get_known_code_key, - protocol_version::ProtocolUpgradeTxCommonData, - Address, Execute, ExecuteTransactionCommon, Transaction, COMPLEX_UPGRADER_ADDRESS, - CONTRACT_DEPLOYER_ADDRESS, CONTRACT_FORCE_DEPLOYER_ADDRESS, H160, H256, - REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_BYTE, U256, -}; -use zksync_utils::{bytecode::hash_bytecode, bytes_to_be_words, h256_to_u256, u256_to_h256}; - -use super::utils::read_test_contract; -use crate::{ - interface::{ - ExecutionResult, Halt, TxExecutionMode, VmExecutionMode, VmInterface, - VmInterfaceHistoryEnabled, - }, - vm_boojum_integration::{ - tests::{tester::VmTesterBuilder, utils::verify_required_storage}, - HistoryEnabled, - }, -}; - -/// In this test we ensure that the requirements for protocol upgrade transactions are enforced by the bootloader: -/// - This transaction must be the only one in block -/// - If present, this transaction must be the first one in block -#[test] -fn test_protocol_upgrade_is_first() { - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let bytecode_hash = hash_bytecode(&read_test_contract()); - vm.vm - .storage - .borrow_mut() - .set_value(get_known_code_key(&bytecode_hash), u256_to_h256(1.into())); - - // Here we just use some random transaction of protocol upgrade type: - let protocol_upgrade_transaction = get_forced_deploy_tx(&[ForceDeployment { - // The bytecode hash to put on an address - bytecode_hash, - // The address on which to deploy the bytecodehash to - address: H160::random(), - // Whether to run the constructor on the force deployment - call_constructor: false, - // The value with which to initialize a contract - value: U256::zero(), - // The constructor calldata - input: vec![], - }]); - - // Another random upgrade transaction - let another_protocol_upgrade_transaction = get_forced_deploy_tx(&[ForceDeployment { - // The bytecode hash to put on an address - bytecode_hash, - // The address on which to deploy the bytecodehash to - address: H160::random(), - // Whether to run the constructor on the force deployment - call_constructor: false, - // The value with which to initialize a contract - value: U256::zero(), - // The constructor calldata - input: vec![], - }]); - - let normal_l1_transaction = vm.rich_accounts[0] - .get_deploy_tx(&read_test_contract(), None, TxType::L1 { serial_id: 0 }) - .tx; - - let expected_error = - Halt::UnexpectedVMBehavior("Assertion error: Protocol upgrade tx not first".to_string()); - - vm.vm.make_snapshot(); - // Test 1: there must be only one system transaction in block - vm.vm.push_transaction(protocol_upgrade_transaction.clone()); - vm.vm.push_transaction(normal_l1_transaction.clone()); - vm.vm.push_transaction(another_protocol_upgrade_transaction); - - vm.vm.execute(VmExecutionMode::OneTx); - vm.vm.execute(VmExecutionMode::OneTx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert_eq!( - result.result, - ExecutionResult::Halt { - reason: expected_error.clone() - } - ); - - // Test 2: the protocol upgrade tx must be the first one in block - vm.vm.rollback_to_the_latest_snapshot(); - vm.vm.make_snapshot(); - vm.vm.push_transaction(normal_l1_transaction.clone()); - vm.vm.push_transaction(protocol_upgrade_transaction.clone()); - - vm.vm.execute(VmExecutionMode::OneTx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert_eq!( - result.result, - ExecutionResult::Halt { - reason: expected_error - } - ); - - vm.vm.rollback_to_the_latest_snapshot(); - vm.vm.make_snapshot(); - vm.vm.push_transaction(protocol_upgrade_transaction); - vm.vm.push_transaction(normal_l1_transaction); - - vm.vm.execute(VmExecutionMode::OneTx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!result.result.is_failed()); -} - -/// In this test we try to test how force deployments could be done via protocol upgrade transactions. -#[test] -fn test_force_deploy_upgrade() { - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let storage_view = vm.storage.clone(); - let bytecode_hash = hash_bytecode(&read_test_contract()); - - let known_code_key = get_known_code_key(&bytecode_hash); - // It is generally expected that all the keys will be set as known prior to the protocol upgrade. - storage_view - .borrow_mut() - .set_value(known_code_key, u256_to_h256(1.into())); - drop(storage_view); - - let address_to_deploy = H160::random(); - // Here we just use some random transaction of protocol upgrade type: - let transaction = get_forced_deploy_tx(&[ForceDeployment { - // The bytecode hash to put on an address - bytecode_hash, - // The address on which to deploy the bytecodehash to - address: address_to_deploy, - // Whether to run the constructor on the force deployment - call_constructor: false, - // The value with which to initialize a contract - value: U256::zero(), - // The constructor calldata - input: vec![], - }]); - - vm.vm.push_transaction(transaction); - - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!( - !result.result.is_failed(), - "The force upgrade was not successful" - ); - - let expected_slots = vec![(bytecode_hash, get_code_key(&address_to_deploy))]; - - // Verify that the bytecode has been set correctly - verify_required_storage(&vm.vm.state, expected_slots); -} - -/// Here we show how the work with the complex upgrader could be done -#[test] -fn test_complex_upgrader() { - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let storage_view = vm.storage.clone(); - - let bytecode_hash = hash_bytecode(&read_complex_upgrade()); - let msg_sender_test_hash = hash_bytecode(&read_msg_sender_test()); - - // Let's assume that the bytecode for the implementation of the complex upgrade - // is already deployed in some address in userspace - let upgrade_impl = H160::random(); - let account_code_key = get_code_key(&upgrade_impl); - - storage_view - .borrow_mut() - .set_value(get_known_code_key(&bytecode_hash), u256_to_h256(1.into())); - storage_view.borrow_mut().set_value( - get_known_code_key(&msg_sender_test_hash), - u256_to_h256(1.into()), - ); - storage_view - .borrow_mut() - .set_value(account_code_key, bytecode_hash); - drop(storage_view); - - vm.vm.state.decommittment_processor.populate( - vec![ - ( - h256_to_u256(bytecode_hash), - bytes_to_be_words(read_complex_upgrade()), - ), - ( - h256_to_u256(msg_sender_test_hash), - bytes_to_be_words(read_msg_sender_test()), - ), - ], - Timestamp(0), - ); - - let address_to_deploy1 = H160::random(); - let address_to_deploy2 = H160::random(); - - let transaction = get_complex_upgrade_tx( - upgrade_impl, - address_to_deploy1, - address_to_deploy2, - bytecode_hash, - ); - - vm.vm.push_transaction(transaction); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!( - !result.result.is_failed(), - "The force upgrade was not successful" - ); - - let expected_slots = vec![ - (bytecode_hash, get_code_key(&address_to_deploy1)), - (bytecode_hash, get_code_key(&address_to_deploy2)), - ]; - - // Verify that the bytecode has been set correctly - verify_required_storage(&vm.vm.state, expected_slots); -} - -#[derive(Debug, Clone)] -struct ForceDeployment { - // The bytecode hash to put on an address - bytecode_hash: H256, - // The address on which to deploy the bytecodehash to - address: Address, - // Whether to run the constructor on the force deployment - call_constructor: bool, - // The value with which to initialize a contract - value: U256, - // The constructor calldata - input: Vec, -} - -fn get_forced_deploy_tx(deployment: &[ForceDeployment]) -> Transaction { - let deployer = deployer_contract(); - let contract_function = deployer.function("forceDeployOnAddresses").unwrap(); - - let encoded_deployments: Vec<_> = deployment - .iter() - .map(|deployment| { - Token::Tuple(vec![ - Token::FixedBytes(deployment.bytecode_hash.as_bytes().to_vec()), - Token::Address(deployment.address), - Token::Bool(deployment.call_constructor), - Token::Uint(deployment.value), - Token::Bytes(deployment.input.clone()), - ]) - }) - .collect(); - - let params = [Token::Array(encoded_deployments)]; - - let calldata = contract_function - .encode_input(¶ms) - .expect("failed to encode parameters"); - - let execute = Execute { - contract_address: CONTRACT_DEPLOYER_ADDRESS, - calldata, - factory_deps: None, - value: U256::zero(), - }; - - Transaction { - common_data: ExecuteTransactionCommon::ProtocolUpgrade(ProtocolUpgradeTxCommonData { - sender: CONTRACT_FORCE_DEPLOYER_ADDRESS, - gas_limit: U256::from(200_000_000u32), - gas_per_pubdata_limit: REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_BYTE.into(), - ..Default::default() - }), - execute, - received_timestamp_ms: 0, - raw_bytes: None, - } -} - -// Returns the transaction that performs a complex protocol upgrade. -// The first param is the address of the implementation of the complex upgrade -// in user-space, while the next 3 params are params of the implenentaiton itself -// For the explanatation for the parameters, please refer to: -// etc/contracts-test-data/complex-upgrade/complex-upgrade.sol -fn get_complex_upgrade_tx( - implementation_address: Address, - address1: Address, - address2: Address, - bytecode_hash: H256, -) -> Transaction { - let impl_contract = get_complex_upgrade_abi(); - let impl_function = impl_contract.function("someComplexUpgrade").unwrap(); - let impl_calldata = impl_function - .encode_input(&[ - Token::Address(address1), - Token::Address(address2), - Token::FixedBytes(bytecode_hash.as_bytes().to_vec()), - ]) - .unwrap(); - - let complex_upgrader = get_complex_upgrader_abi(); - let upgrade_function = complex_upgrader.function("upgrade").unwrap(); - let complex_upgrader_calldata = upgrade_function - .encode_input(&[ - Token::Address(implementation_address), - Token::Bytes(impl_calldata), - ]) - .unwrap(); - - let execute = Execute { - contract_address: COMPLEX_UPGRADER_ADDRESS, - calldata: complex_upgrader_calldata, - factory_deps: None, - value: U256::zero(), - }; - - Transaction { - common_data: ExecuteTransactionCommon::ProtocolUpgrade(ProtocolUpgradeTxCommonData { - sender: CONTRACT_FORCE_DEPLOYER_ADDRESS, - gas_limit: U256::from(200_000_000u32), - gas_per_pubdata_limit: REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_BYTE.into(), - ..Default::default() - }), - execute, - received_timestamp_ms: 0, - raw_bytes: None, - } -} - -fn read_complex_upgrade() -> Vec { - read_bytecode("etc/contracts-test-data/artifacts-zk/contracts/complex-upgrade/complex-upgrade.sol/ComplexUpgrade.json") -} - -fn read_msg_sender_test() -> Vec { - read_bytecode("etc/contracts-test-data/artifacts-zk/contracts/complex-upgrade/msg-sender.sol/MsgSenderTest.json") -} - -fn get_complex_upgrade_abi() -> Contract { - load_contract( - "etc/contracts-test-data/artifacts-zk/contracts/complex-upgrade/complex-upgrade.sol/ComplexUpgrade.json" - ) -} - -fn get_complex_upgrader_abi() -> Contract { - load_sys_contract("ComplexUpgrader") -} diff --git a/core/lib/multivm/src/versions/vm_boojum_integration/tests/utils.rs b/core/lib/multivm/src/versions/vm_boojum_integration/tests/utils.rs deleted file mode 100644 index 4fba188ac5b7..000000000000 --- a/core/lib/multivm/src/versions/vm_boojum_integration/tests/utils.rs +++ /dev/null @@ -1,111 +0,0 @@ -use ethabi::Contract; -use once_cell::sync::Lazy; -use zksync_contracts::{ - load_contract, read_bytecode, read_zbin_bytecode, BaseSystemContracts, SystemContractCode, -}; -use crate::interface::storage::{StoragePtr, WriteStorage}; -use zksync_types::{ - utils::storage_key_for_standard_token_balance, AccountTreeId, Address, StorageKey, H256, U256, -}; -use zksync_utils::{bytecode::hash_bytecode, bytes_to_be_words, h256_to_u256, u256_to_h256}; - -use crate::vm_boojum_integration::{ - tests::tester::InMemoryStorageView, types::internals::ZkSyncVmState, HistoryMode, -}; - -pub(crate) static BASE_SYSTEM_CONTRACTS: Lazy = - Lazy::new(BaseSystemContracts::load_from_disk); - -// Probably make it a part of vm tester -pub(crate) fn verify_required_storage( - state: &ZkSyncVmState, - required_values: Vec<(H256, StorageKey)>, -) { - for (required_value, key) in required_values { - let current_value = state.storage.storage.read_from_storage(&key); - - assert_eq!( - u256_to_h256(current_value), - required_value, - "Invalid value at key {key:?}" - ); - } -} - -pub(crate) fn verify_required_memory( - state: &ZkSyncVmState, - required_values: Vec<(U256, u32, u32)>, -) { - for (required_value, memory_page, cell) in required_values { - let current_value = state - .memory - .read_slot(memory_page as usize, cell as usize) - .value; - assert_eq!(current_value, required_value); - } -} - -pub(crate) fn get_balance( - token_id: AccountTreeId, - account: &Address, - main_storage: StoragePtr, -) -> U256 { - let key = storage_key_for_standard_token_balance(token_id, account); - h256_to_u256(main_storage.borrow_mut().read_value(&key)) -} - -pub(crate) fn read_test_contract() -> Vec { - read_bytecode("etc/contracts-test-data/artifacts-zk/contracts/counter/counter.sol/Counter.json") -} - -pub(crate) fn get_bootloader(test: &str) -> SystemContractCode { - let bootloader_code = read_zbin_bytecode(format!( - "contracts/system-contracts/bootloader/tests/artifacts/{}.yul.zbin", - test - )); - - let bootloader_hash = hash_bytecode(&bootloader_code); - SystemContractCode { - code: bytes_to_be_words(bootloader_code), - hash: bootloader_hash, - } -} - -pub(crate) fn read_nonce_holder_tester() -> Vec { - read_bytecode("etc/contracts-test-data/artifacts-zk/contracts/custom-account/nonce-holder-test.sol/NonceHolderTest.json") -} - -pub(crate) fn read_error_contract() -> Vec { - read_bytecode( - "etc/contracts-test-data/artifacts-zk/contracts/error/error.sol/SimpleRequire.json", - ) -} - -pub(crate) fn get_execute_error_calldata() -> Vec { - let test_contract = load_contract( - "etc/contracts-test-data/artifacts-zk/contracts/error/error.sol/SimpleRequire.json", - ); - - let function = test_contract.function("require_short").unwrap(); - - function - .encode_input(&[]) - .expect("failed to encode parameters") -} - -pub(crate) fn read_many_owners_custom_account_contract() -> (Vec, Contract) { - let path = "etc/contracts-test-data/artifacts-zk/contracts/custom-account/many-owners-custom-account.sol/ManyOwnersCustomAccount.json"; - (read_bytecode(path), load_contract(path)) -} - -pub(crate) fn read_max_depth_contract() -> Vec { - read_zbin_bytecode( - "core/tests/ts-integration/contracts/zkasm/artifacts/deep_stak.zkasm/deep_stak.zkasm.zbin", - ) -} - -pub(crate) fn read_precompiles_contract() -> Vec { - read_bytecode( - "etc/contracts-test-data/artifacts-zk/contracts/precompiles/precompiles.sol/Precompiles.json", - ) -} diff --git a/core/lib/multivm/src/versions/vm_boojum_integration/vm.rs b/core/lib/multivm/src/versions/vm_boojum_integration/vm.rs index 17ce8365a0aa..ebc0a511d203 100644 --- a/core/lib/multivm/src/versions/vm_boojum_integration/vm.rs +++ b/core/lib/multivm/src/versions/vm_boojum_integration/vm.rs @@ -11,7 +11,6 @@ use crate::{ BytecodeCompressionError, BytecodeCompressionResult, CurrentExecutionState, FinishedL1Batch, L1BatchEnv, L2BlockEnv, SystemEnv, VmExecutionMode, VmExecutionResultAndLogs, VmFactory, VmInterface, VmInterfaceHistoryEnabled, - VmMemoryMetrics, }, utils::events::extract_l2tol1logs_from_l1_messenger, vm_boojum_integration::{ @@ -125,10 +124,6 @@ impl VmInterface for Vm { } } - fn record_vm_memory_metrics(&self) -> VmMemoryMetrics { - self.record_vm_memory_metrics_inner() - } - fn finish_batch(&mut self) -> FinishedL1Batch { let result = self.inspect(&mut TracerDispatcher::default(), VmExecutionMode::Batch); let execution_state = self.get_current_execution_state(); diff --git a/core/lib/multivm/src/versions/vm_fast/bootloader_state/utils.rs b/core/lib/multivm/src/versions/vm_fast/bootloader_state/utils.rs index f280f56a828a..770f232019bf 100644 --- a/core/lib/multivm/src/versions/vm_fast/bootloader_state/utils.rs +++ b/core/lib/multivm/src/versions/vm_fast/bootloader_state/utils.rs @@ -171,8 +171,8 @@ pub(super) fn assemble_tx_meta(execution_mode: TxExecutionMode, execute_tx: bool // Set 0 byte (execution mode) output[0] = match execution_mode { TxExecutionMode::VerifyExecute => 0x00, - TxExecutionMode::EstimateFee { .. } => 0x00, - TxExecutionMode::EthCall { .. } => 0x02, + TxExecutionMode::EstimateFee => 0x00, + TxExecutionMode::EthCall => 0x02, }; // Set 31 byte (marker for tx execution) diff --git a/core/lib/multivm/src/versions/vm_fast/tests/get_used_contracts.rs b/core/lib/multivm/src/versions/vm_fast/tests/get_used_contracts.rs index 62fa82f52f23..0447304f69f4 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/get_used_contracts.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/get_used_contracts.rs @@ -31,7 +31,7 @@ fn test_get_used_contracts() { .with_execution_mode(TxExecutionMode::VerifyExecute) .build(); - assert!(known_bytecodes_without_aa_code(&vm.vm).is_empty()); + assert!(known_bytecodes_without_base_system_contracts(&vm.vm).is_empty()); // create and push and execute some not-empty factory deps transaction with success status // to check that `get_decommitted_hashes()` updates @@ -50,7 +50,7 @@ fn test_get_used_contracts() { // Note: `Default_AA` will be in the list of used contracts if L2 tx is used assert_eq!( vm.vm.decommitted_hashes().collect::>(), - known_bytecodes_without_aa_code(&vm.vm) + known_bytecodes_without_base_system_contracts(&vm.vm) ); // create push and execute some non-empty factory deps transaction that fails @@ -83,20 +83,26 @@ fn test_get_used_contracts() { for factory_dep in tx2.execute.factory_deps { let hash = hash_bytecode(&factory_dep); let hash_to_u256 = h256_to_u256(hash); - assert!(known_bytecodes_without_aa_code(&vm.vm).contains(&hash_to_u256)); + assert!(known_bytecodes_without_base_system_contracts(&vm.vm).contains(&hash_to_u256)); assert!(!vm.vm.decommitted_hashes().contains(&hash_to_u256)); } } -fn known_bytecodes_without_aa_code(vm: &Vm) -> HashSet { - let mut known_bytecodes_without_aa_code = vm +fn known_bytecodes_without_base_system_contracts(vm: &Vm) -> HashSet { + let mut known_bytecodes_without_base_system_contracts = vm .world .bytecode_cache .keys() .cloned() .collect::>(); - known_bytecodes_without_aa_code.remove(&h256_to_u256(BASE_SYSTEM_CONTRACTS.default_aa.hash)); - known_bytecodes_without_aa_code + known_bytecodes_without_base_system_contracts + .remove(&h256_to_u256(BASE_SYSTEM_CONTRACTS.default_aa.hash)); + if let Some(evm_emulator) = &BASE_SYSTEM_CONTRACTS.evm_emulator { + let was_removed = + known_bytecodes_without_base_system_contracts.remove(&h256_to_u256(evm_emulator.hash)); + assert!(was_removed); + } + known_bytecodes_without_base_system_contracts } /// Counter test contract bytecode inflated by appending lots of `NOP` opcodes at the end. This leads to non-trivial diff --git a/core/lib/multivm/src/versions/vm_fast/tests/require_eip712.rs b/core/lib/multivm/src/versions/vm_fast/tests/require_eip712.rs index e119cea01143..b4448683cf71 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/require_eip712.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/require_eip712.rs @@ -1,5 +1,5 @@ use ethabi::Token; -use zksync_eth_signer::{EthereumSigner, TransactionParameters}; +use zksync_eth_signer::TransactionParameters; use zksync_system_constants::L2_BASE_TOKEN_ADDRESS; use zksync_types::{ fee::Fee, l2::L2Tx, transaction_request::TransactionRequest, @@ -38,8 +38,8 @@ impl VmTester<()> { /// This test deploys 'buggy' account abstraction code, and then tries accessing it both with legacy /// and EIP712 transactions. /// Currently we support both, but in the future, we should allow only EIP712 transactions to access the AA accounts. -#[tokio::test] -async fn test_require_eip712() { +#[test] +fn test_require_eip712() { // Use 3 accounts: // - `private_address` - EOA account, where we have the key // - `account_address` - AA account, where the contract is deployed @@ -104,10 +104,10 @@ async fn test_require_eip712() { blob_versioned_hashes: None, }; - let aa_tx = private_account.sign_legacy_tx(aa_raw_tx).await; + let aa_tx = private_account.sign_legacy_tx(aa_raw_tx); let (tx_request, hash) = TransactionRequest::from_bytes(&aa_tx, L2ChainId::from(270)).unwrap(); - let mut l2_tx: L2Tx = L2Tx::from_request(tx_request, 10000).unwrap(); + let mut l2_tx: L2Tx = L2Tx::from_request(tx_request, 10000, false).unwrap(); l2_tx.set_input(aa_tx, hash); // Pretend that operator is malicious and sets the initiator to the AA account. l2_tx.common_data.initiator_address = account_abstraction.address; @@ -151,14 +151,13 @@ async fn test_require_eip712() { let signature = private_account .get_pk_signer() .sign_typed_data(&domain, &transaction_request) - .await .unwrap(); let encoded_tx = transaction_request.get_signed_bytes(&signature).unwrap(); let (aa_txn_request, aa_hash) = TransactionRequest::from_bytes(&encoded_tx, L2ChainId::from(chain_id)).unwrap(); - let mut l2_tx = L2Tx::from_request(aa_txn_request, 100000).unwrap(); + let mut l2_tx = L2Tx::from_request(aa_txn_request, 100000, false).unwrap(); l2_tx.set_input(encoded_tx, aa_hash); let transaction: Transaction = l2_tx.into(); diff --git a/core/lib/multivm/src/versions/vm_fast/tests/rollbacks.rs b/core/lib/multivm/src/versions/vm_fast/tests/rollbacks.rs index e7b3f2043385..cff72d8ec5aa 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/rollbacks.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/rollbacks.rs @@ -1,9 +1,12 @@ +use assert_matches::assert_matches; use ethabi::Token; use zksync_contracts::{get_loadnext_contract, test_contracts::LoadnextContractExecutionParams}; -use zksync_types::{Execute, U256}; +use zksync_types::{Address, Execute, U256}; +use zksync_vm_interface::VmInterfaceExt; use crate::{ - interface::TxExecutionMode, + interface::{ExecutionResult, TxExecutionMode}, + versions::testonly::ContractToDeploy, vm_fast::tests::{ tester::{DeployContractsTx, TransactionTestInfo, TxModifier, TxType, VmTesterBuilder}, utils::read_test_contract, @@ -142,3 +145,32 @@ fn test_vm_loadnext_rollbacks() { assert_eq!(result_without_rollbacks, result_with_rollbacks); } + +#[test] +fn rollback_in_call_mode() { + let counter_bytecode = read_test_contract(); + let counter_address = Address::repeat_byte(1); + + let mut vm = VmTesterBuilder::new() + .with_empty_in_memory_storage() + .with_execution_mode(TxExecutionMode::EthCall) + .with_custom_contracts(vec![ContractToDeploy::new( + counter_bytecode, + counter_address, + )]) + .with_random_rich_accounts(1) + .build(); + let account = &mut vm.rich_accounts[0]; + let tx = account.get_test_contract_transaction(counter_address, true, None, false, TxType::L2); + + let (compression_result, vm_result) = vm + .vm + .execute_transaction_with_bytecode_compression(tx, true); + compression_result.unwrap(); + assert_matches!( + vm_result.result, + ExecutionResult::Revert { output } + if output.to_string().contains("This method always reverts") + ); + assert_eq!(vm_result.logs.storage_logs, []); +} diff --git a/core/lib/multivm/src/versions/vm_fast/vm.rs b/core/lib/multivm/src/versions/vm_fast/vm.rs index 36698de105c1..0c20af57e033 100644 --- a/core/lib/multivm/src/versions/vm_fast/vm.rs +++ b/core/lib/multivm/src/versions/vm_fast/vm.rs @@ -1,6 +1,8 @@ use std::{collections::HashMap, fmt, mem}; -use zk_evm_1_5_0::zkevm_opcode_defs::system_params::INITIAL_FRAME_FORMAL_EH_LOCATION; +use zk_evm_1_5_0::{ + aux_structures::LogQuery, zkevm_opcode_defs::system_params::INITIAL_FRAME_FORMAL_EH_LOCATION, +}; use zksync_contracts::SystemContractCode; use zksync_types::{ l1::is_l1_tx_type, @@ -17,7 +19,7 @@ use zksync_types::{ use zksync_utils::{bytecode::hash_bytecode, h256_to_u256, u256_to_h256}; use zksync_vm2::{ interface::{CallframeInterface, HeapId, StateInterface, Tracer}, - ExecutionEnd, FatPointer, Program, Settings, VirtualMachine, + ExecutionEnd, FatPointer, Program, Settings, StorageSlot, VirtualMachine, }; use super::{ @@ -35,8 +37,8 @@ use crate::{ BytecodeCompressionError, BytecodeCompressionResult, CurrentExecutionState, ExecutionResult, FinishedL1Batch, Halt, L1BatchEnv, L2BlockEnv, Refunds, SystemEnv, TxRevertReason, VmEvent, VmExecutionLogs, VmExecutionMode, VmExecutionResultAndLogs, - VmExecutionStatistics, VmFactory, VmInterface, VmInterfaceHistoryEnabled, VmMemoryMetrics, - VmRevertReason, VmTrackingContracts, + VmExecutionStatistics, VmFactory, VmInterface, VmInterfaceHistoryEnabled, VmRevertReason, + VmTrackingContracts, }, utils::events::extract_l2tol1logs_from_l1_messenger, vm_fast::{ @@ -58,6 +60,31 @@ const VM_VERSION: MultiVMSubversion = MultiVMSubversion::IncreasedBootloaderMemo type FullTracer = (Tr, CircuitsTracer); +#[derive(Debug)] +struct VmRunResult { + execution_result: ExecutionResult, + /// `true` if VM execution has terminated (as opposed to being stopped on a hook, e.g. when executing a single transaction + /// in a batch). Used for `execution_result == Revert { .. }` to understand whether VM logs should be reverted. + execution_ended: bool, + refunds: Refunds, + /// This value is used in stats. It's defined in the old VM as the latest value used when computing refunds (see the refunds tracer for `vm_latest`). + /// This is **not** equal to the pubdata diff before and after VM execution; e.g., when executing a batch tip, + /// `pubdata_published` is always 0 (since no refunds are computed). + pubdata_published: u32, +} + +impl VmRunResult { + fn should_ignore_vm_logs(&self) -> bool { + match &self.execution_result { + ExecutionResult::Success { .. } => false, + ExecutionResult::Halt { .. } => true, + // Logs generated during reverts should only be ignored if the revert has reached the root (bootloader) call frame, + // which is only possible with `TxExecutionMode::EthCall`. + ExecutionResult::Revert { .. } => self.execution_ended, + } + } +} + /// Fast VM wrapper. /// /// The wrapper is parametric by the storage and tracer types. Besides the [`Tracer`] trait, a tracer must have `'static` lifetime @@ -140,32 +167,35 @@ impl Vm { execution_mode: VmExecutionMode, tracer: &mut (Tr, CircuitsTracer), track_refunds: bool, - ) -> (ExecutionResult, Refunds) { + ) -> VmRunResult { let mut refunds = Refunds { gas_refunded: 0, operator_suggested_refund: 0, }; let mut last_tx_result = None; let mut pubdata_before = self.inner.pubdata() as u32; + let mut pubdata_published = 0; - let result = loop { + let (execution_result, execution_ended) = loop { let hook = match self.inner.run(&mut self.world, tracer) { ExecutionEnd::SuspendedOnHook(hook) => hook, - ExecutionEnd::ProgramFinished(output) => break ExecutionResult::Success { output }, + ExecutionEnd::ProgramFinished(output) => { + break (ExecutionResult::Success { output }, true); + } ExecutionEnd::Reverted(output) => { - break match TxRevertReason::parse_error(&output) { + let result = match TxRevertReason::parse_error(&output) { TxRevertReason::TxReverted(output) => ExecutionResult::Revert { output }, TxRevertReason::Halt(reason) => ExecutionResult::Halt { reason }, - } + }; + break (result, true); } ExecutionEnd::Panicked => { - break ExecutionResult::Halt { - reason: if self.gas_remaining() == 0 { - Halt::BootloaderOutOfGas - } else { - Halt::VMPanic - }, - } + let reason = if self.gas_remaining() == 0 { + Halt::BootloaderOutOfGas + } else { + Halt::VMPanic + }; + break (ExecutionResult::Halt { reason }, true); } }; @@ -175,7 +205,7 @@ impl Vm { } Hook::TxHasEnded => { if let VmExecutionMode::OneTx = execution_mode { - break last_tx_result.take().unwrap(); + break (last_tx_result.take().unwrap(), false); } } Hook::AskOperatorForRefund => { @@ -192,7 +222,8 @@ impl Vm { ) .as_u64(); - let pubdata_published = self.inner.pubdata() as u32; + let pubdata_after = self.inner.pubdata() as u32; + pubdata_published = pubdata_after.saturating_sub(pubdata_before); refunds.operator_suggested_refund = compute_refund( &self.batch_env, @@ -200,7 +231,7 @@ impl Vm { gas_spent_on_pubdata.as_u64(), tx_gas_limit, gas_per_pubdata_byte.low_u32(), - pubdata_published.saturating_sub(pubdata_before), + pubdata_published, self.bootloader_state .last_l2_block() .txs @@ -209,7 +240,7 @@ impl Vm { .hash, ); - pubdata_before = pubdata_published; + pubdata_before = pubdata_after; let refund_value = refunds.operator_suggested_refund; self.write_to_bootloader_heap([( OPERATOR_REFUNDS_OFFSET + current_tx_index, @@ -305,7 +336,12 @@ impl Vm { } }; - (result, refunds) + VmRunResult { + execution_result, + execution_ended, + refunds, + pubdata_published, + } } fn get_hook_params(&self) -> [U256; 3] { @@ -430,24 +466,24 @@ impl Vm { } let storage = &mut self.world.storage; - let diffs = self.inner.world_diff().get_storage_changes().map( - move |((address, key), (initial_value, final_value))| { - let storage_key = StorageKey::new(AccountTreeId::new(address), u256_to_h256(key)); - StateDiffRecord { - address, - key, - derived_key: - zk_evm_1_5_0::aux_structures::LogQuery::derive_final_address_for_params( - &address, &key, - ), - enumeration_index: storage - .get_enumeration_index(&storage_key) - .unwrap_or_default(), - initial_value: initial_value.unwrap_or_default(), - final_value, - } - }, - ); + let diffs = + self.inner + .world_diff() + .get_storage_changes() + .map(move |((address, key), change)| { + let storage_key = + StorageKey::new(AccountTreeId::new(address), u256_to_h256(key)); + StateDiffRecord { + address, + key, + derived_key: LogQuery::derive_final_address_for_params(&address, &key), + enumeration_index: storage + .get_enumeration_index(&storage_key) + .unwrap_or_default(), + initial_value: change.before, + final_value: change.after, + } + }); diffs .filter(|diff| diff.address != L1_MESSENGER_ADDRESS) .collect() @@ -477,9 +513,9 @@ impl Vm { events, deduplicated_storage_logs: world_diff .get_storage_changes() - .map(|((address, key), (_, value))| StorageLog { + .map(|((address, key), change)| StorageLog { key: StorageKey::new(AccountTreeId::new(address), u256_to_h256(key)), - value: u256_to_h256(value), + value: u256_to_h256(change.after), kind: StorageLogKind::RepeatedWrite, // Initialness doesn't matter here }) .collect(), @@ -527,18 +563,19 @@ impl VmInterface for Vm { } let start = self.inner.world_diff().snapshot(); - let pubdata_before = self.inner.pubdata(); let gas_before = self.gas_remaining(); let mut full_tracer = (mem::take(tracer), CircuitsTracer::default()); - let (result, refunds) = self.run(execution_mode, &mut full_tracer, track_refunds); + let result = self.run(execution_mode, &mut full_tracer, track_refunds); *tracer = full_tracer.0; // place the tracer back - let ignore_world_diff = matches!(execution_mode, VmExecutionMode::OneTx) - && matches!(result, ExecutionResult::Halt { .. }); + let ignore_world_diff = + matches!(execution_mode, VmExecutionMode::OneTx) && result.should_ignore_vm_logs(); // If the execution is halted, the VM changes are expected to be rolled back by the caller. // Earlier VMs return empty execution logs in this case, so we follow this behavior. + // Likewise, if a revert has reached the bootloader frame (possible with `TxExecutionMode::EthCall`; otherwise, the bootloader catches reverts), + // old VMs revert all logs; the new VM doesn't do that automatically, so we recreate this behavior here. let logs = if ignore_world_diff { VmExecutionLogs::default() } else { @@ -556,7 +593,7 @@ impl VmInterface for Vm { StorageLogKind::RepeatedWrite }, }, - previous_value: u256_to_h256(change.before.unwrap_or_default()), + previous_value: u256_to_h256(change.before), }) .collect(); let events = merge_events( @@ -584,23 +621,25 @@ impl VmInterface for Vm { } }; - let pubdata_after = self.inner.pubdata(); let gas_remaining = self.gas_remaining(); + let gas_used = gas_before - gas_remaining; + VmExecutionResultAndLogs { - result, + result: result.execution_result, logs, // TODO (PLA-936): Fill statistics; investigate whether they should be zeroed on `Halt` statistics: VmExecutionStatistics { + gas_used: gas_used.into(), + gas_remaining, + computational_gas_used: gas_used, // since 1.5.0, this always has the same value as `gas_used` + pubdata_published: result.pubdata_published, + circuit_statistic: full_tracer.1.circuit_statistic(), contracts_used: 0, cycles_used: 0, - gas_used: (gas_before - gas_remaining).into(), - gas_remaining, - computational_gas_used: 0, total_log_queries: 0, - pubdata_published: (pubdata_after - pubdata_before).max(0) as u32, - circuit_statistic: full_tracer.1.circuit_statistic(), }, - refunds, + refunds: result.refunds, + new_known_factory_deps: None, } } @@ -628,10 +667,6 @@ impl VmInterface for Vm { self.bootloader_state.start_new_l2_block(l2_block_env) } - fn record_vm_memory_metrics(&self) -> VmMemoryMetrics { - todo!("Unused during batch execution") - } - fn finish_batch(&mut self) -> FinishedL1Batch { let result = self.inspect(&mut Tr::default(), VmExecutionMode::Batch); let execution_state = self.get_current_execution_state(); @@ -744,20 +779,27 @@ impl World { } impl zksync_vm2::StorageInterface for World { - fn read_storage(&mut self, contract: H160, key: U256) -> Option { + fn read_storage(&mut self, contract: H160, key: U256) -> StorageSlot { let key = &StorageKey::new(AccountTreeId::new(contract), u256_to_h256(key)); - if self.storage.is_write_initial(key) { - None - } else { - Some(self.storage.read_value(key).as_bytes().into()) + let value = U256::from_big_endian(self.storage.read_value(key).as_bytes()); + // `is_write_initial` value can be true even if the slot has previously been written to / has non-zero value! + // This can happen during oneshot execution (i.e., executing a single transaction) since it emulates + // execution starting in the middle of a batch in the general case. Hence, a slot that was first written to in the batch + // must still be considered an initial write by the refund logic. + let is_write_initial = self.storage.is_write_initial(key); + StorageSlot { + value, + is_write_initial, } } - fn cost_of_writing_storage(&mut self, initial_value: Option, new_value: U256) -> u32 { - let is_initial = initial_value.is_none(); - let initial_value = initial_value.unwrap_or_default(); + fn read_storage_value(&mut self, contract: H160, key: U256) -> U256 { + let key = &StorageKey::new(AccountTreeId::new(contract), u256_to_h256(key)); + U256::from_big_endian(self.storage.read_value(key).as_bytes()) + } - if initial_value == new_value { + fn cost_of_writing_storage(&mut self, slot: StorageSlot, new_value: U256) -> u32 { + if slot.value == new_value { return 0; } @@ -771,10 +813,9 @@ impl zksync_vm2::StorageInterface for World { // For value compression, we use a metadata byte which holds the length of the value and the operation from the // previous state to the new state, and the compressed value. The maximum for this is 33 bytes. // Total bytes for initial writes then becomes 65 bytes and repeated writes becomes 38 bytes. - let compressed_value_size = - compress_with_best_strategy(initial_value, new_value).len() as u32; + let compressed_value_size = compress_with_best_strategy(slot.value, new_value).len() as u32; - if is_initial { + if slot.is_write_initial { (BYTES_PER_DERIVED_KEY as u32) + compressed_value_size } else { (BYTES_PER_ENUMERATION_INDEX as u32) + compressed_value_size diff --git a/core/lib/multivm/src/versions/vm_latest/bootloader_state/utils.rs b/core/lib/multivm/src/versions/vm_latest/bootloader_state/utils.rs index 4931082d6daf..23c079202c1f 100644 --- a/core/lib/multivm/src/versions/vm_latest/bootloader_state/utils.rs +++ b/core/lib/multivm/src/versions/vm_latest/bootloader_state/utils.rs @@ -175,8 +175,8 @@ pub(super) fn assemble_tx_meta(execution_mode: TxExecutionMode, execute_tx: bool // Set 0 byte (execution mode) output[0] = match execution_mode { TxExecutionMode::VerifyExecute => 0x00, - TxExecutionMode::EstimateFee { .. } => 0x00, - TxExecutionMode::EthCall { .. } => 0x02, + TxExecutionMode::EstimateFee => 0x00, + TxExecutionMode::EthCall => 0x02, }; // Set 31 byte (marker for tx execution) diff --git a/core/lib/multivm/src/versions/vm_latest/implementation/execution.rs b/core/lib/multivm/src/versions/vm_latest/implementation/execution.rs index b8242fa7ca85..e70f05f85ef2 100644 --- a/core/lib/multivm/src/versions/vm_latest/implementation/execution.rs +++ b/core/lib/multivm/src/versions/vm_latest/implementation/execution.rs @@ -14,6 +14,7 @@ use crate::{ circuits_capacity::circuit_statistic_from_cycles, dispatcher::TracerDispatcher, DefaultExecutionTracer, PubdataTracer, RefundsTracer, }, + utils::extract_bytecodes_marked_as_known, vm::Vm, }, HistoryMode, @@ -55,6 +56,10 @@ impl Vm { .then_some(RefundsTracer::new(self.batch_env.clone(), self.subversion)); let mut tx_tracer: DefaultExecutionTracer = DefaultExecutionTracer::new( self.system_env.default_validation_computational_gas_limit, + self.system_env + .base_system_smart_contracts + .evm_emulator + .is_some(), execution_mode, mem::take(dispatcher), self.storage.clone(), @@ -95,6 +100,8 @@ impl Vm { circuit_statistic_from_cycles(tx_tracer.circuits_tracer.statistics), ); let result = tx_tracer.result_tracer.into_result(); + let factory_deps_marked_as_known = extract_bytecodes_marked_as_known(&logs.events); + let new_known_factory_deps = self.decommit_bytecodes(&factory_deps_marked_as_known); *dispatcher = tx_tracer.dispatcher; let result = VmExecutionResultAndLogs { @@ -102,6 +109,7 @@ impl Vm { logs, statistics, refunds, + new_known_factory_deps: Some(new_known_factory_deps), }; (stop_reason, result) diff --git a/core/lib/multivm/src/versions/vm_latest/implementation/statistics.rs b/core/lib/multivm/src/versions/vm_latest/implementation/statistics.rs index 34c1e1f81da1..c1cf15043562 100644 --- a/core/lib/multivm/src/versions/vm_latest/implementation/statistics.rs +++ b/core/lib/multivm/src/versions/vm_latest/implementation/statistics.rs @@ -51,7 +51,7 @@ impl Vm { } /// Returns the info about all oracles' sizes. - pub(crate) fn record_vm_memory_metrics_inner(&self) -> VmMemoryMetrics { + pub(crate) fn record_vm_memory_metrics(&self) -> VmMemoryMetrics { VmMemoryMetrics { event_sink_inner: self.state.event_sink.get_size(), event_sink_history: self.state.event_sink.get_history_size(), diff --git a/core/lib/multivm/src/versions/vm_latest/implementation/tx.rs b/core/lib/multivm/src/versions/vm_latest/implementation/tx.rs index 98d71efa00f3..6dd73866adf2 100644 --- a/core/lib/multivm/src/versions/vm_latest/implementation/tx.rs +++ b/core/lib/multivm/src/versions/vm_latest/implementation/tx.rs @@ -59,7 +59,12 @@ impl Vm { tx: Transaction, with_compression: bool, ) { - let tx: TransactionData = tx.into(); + let use_evm_emulator = self + .system_env + .base_system_smart_contracts + .evm_emulator + .is_some(); + let tx = TransactionData::new(tx, use_evm_emulator); let overhead = tx.overhead_gas(); self.push_raw_transaction(tx, overhead, 0, with_compression); } diff --git a/core/lib/multivm/src/versions/vm_latest/old_vm/oracles/decommitter.rs b/core/lib/multivm/src/versions/vm_latest/old_vm/oracles/decommitter.rs index 0315aa38327d..d91fbfdb24df 100644 --- a/core/lib/multivm/src/versions/vm_latest/old_vm/oracles/decommitter.rs +++ b/core/lib/multivm/src/versions/vm_latest/old_vm/oracles/decommitter.rs @@ -5,9 +5,7 @@ use zk_evm_1_5_0::{ aux_structures::{ DecommittmentQuery, MemoryIndex, MemoryLocation, MemoryPage, MemoryQuery, Timestamp, }, - zkevm_opcode_defs::{ - ContractCodeSha256, VersionedHashDef, VersionedHashHeader, VersionedHashNormalizedPreimage, - }, + zkevm_opcode_defs::{VersionedHashHeader, VersionedHashNormalizedPreimage}, }; use zksync_types::{H256, U256}; use zksync_utils::{bytes_to_be_words, h256_to_u256, u256_to_h256}; @@ -166,8 +164,8 @@ impl DecommittmentProcess _monotonic_cycle_counter: u32, mut partial_query: DecommittmentQuery, ) -> anyhow::Result { - let (stored_hash, length) = stored_hash_from_query(&partial_query); - partial_query.decommitted_length = length; + let versioned_hash = VersionedCodeHash::from_query(&partial_query); + let stored_hash = versioned_hash.to_stored_hash(); if let Some(memory_page) = self .decommitted_code_hashes @@ -178,10 +176,10 @@ impl DecommittmentProcess { partial_query.is_fresh = false; partial_query.memory_page = MemoryPage(memory_page); + partial_query.decommitted_length = versioned_hash.get_preimage_length() as u16; Ok(partial_query) } else { - partial_query.is_fresh = true; if self .decommitted_code_hashes .inner() @@ -190,7 +188,9 @@ impl DecommittmentProcess { self.decommitted_code_hashes .insert(stored_hash, None, partial_query.timestamp); - } + }; + partial_query.is_fresh = true; + partial_query.decommitted_length = versioned_hash.get_preimage_length() as u16; Ok(partial_query) } @@ -204,11 +204,10 @@ impl DecommittmentProcess memory: &mut M, ) -> anyhow::Result>> { assert!(partial_query.is_fresh); - self.decommitment_requests.push((), partial_query.timestamp); - let stored_hash = stored_hash_from_query(&partial_query).0; - + let versioned_hash = VersionedCodeHash::from_query(&partial_query); + let stored_hash = versioned_hash.to_stored_hash(); // We are fetching a fresh bytecode that we didn't read before. let values = self.get_bytecode(stored_hash, partial_query.timestamp); let page_to_use = partial_query.memory_page; @@ -251,28 +250,49 @@ impl DecommittmentProcess } } -fn concat_header_and_preimage( - header: VersionedHashHeader, - normalized_preimage: VersionedHashNormalizedPreimage, -) -> [u8; 32] { - let mut buffer = [0u8; 32]; +#[derive(Debug)] +// TODO: consider moving this to the zk-evm crate +enum VersionedCodeHash { + ZkEVM(VersionedHashHeader, VersionedHashNormalizedPreimage), + Evm(VersionedHashHeader, VersionedHashNormalizedPreimage), +} - buffer[0..4].copy_from_slice(&header.0); - buffer[4..32].copy_from_slice(&normalized_preimage.0); +impl VersionedCodeHash { + fn from_query(query: &DecommittmentQuery) -> Self { + match query.header.0[0] { + 1 => Self::ZkEVM(query.header, query.normalized_preimage), + 2 => Self::Evm(query.header, query.normalized_preimage), + _ => panic!("Unsupported hash version"), + } + } - buffer -} + /// Returns the hash in the format it is stored in the DB. + fn to_stored_hash(&self) -> U256 { + let (header, preimage) = match self { + Self::ZkEVM(header, preimage) => (header, preimage), + Self::Evm(header, preimage) => (header, preimage), + }; -/// For a given decommitment query, returns a pair of the stored hash as U256 and the length of the preimage in 32-byte words. -fn stored_hash_from_query(partial_query: &DecommittmentQuery) -> (U256, u16) { - let full_hash = - concat_header_and_preimage(partial_query.header, partial_query.normalized_preimage); + let mut hash = [0u8; 32]; + hash[0..4].copy_from_slice(&header.0); + hash[4..32].copy_from_slice(&preimage.0); - let versioned_hash = - ContractCodeSha256::try_deserialize(full_hash).expect("Invalid ContractCodeSha256 hash"); + // Hash[1] is used in both of the versions to denote whether the bytecode is being constructed. + // We ignore this param. + hash[1] = 0; - let stored_hash = H256(ContractCodeSha256::serialize_to_stored(versioned_hash).unwrap()); - let length = versioned_hash.code_length_in_words; + h256_to_u256(H256(hash)) + } - (h256_to_u256(stored_hash), length) + fn get_preimage_length(&self) -> u32 { + // In zkEVM the hash[2..3] denotes the length of the preimage in words, while + // in EVM the hash[2..3] denotes the length of the preimage in bytes. + match self { + Self::ZkEVM(header, _) => { + let length_in_words = header.0[2] as u32 * 256 + header.0[3] as u32; + length_in_words * 32 + } + Self::Evm(header, _) => header.0[2] as u32 * 256 + header.0[3] as u32, + } + } } diff --git a/core/lib/multivm/src/versions/vm_latest/tests/evm_emulator.rs b/core/lib/multivm/src/versions/vm_latest/tests/evm_emulator.rs new file mode 100644 index 000000000000..ca8157b170d4 --- /dev/null +++ b/core/lib/multivm/src/versions/vm_latest/tests/evm_emulator.rs @@ -0,0 +1,76 @@ +use ethabi::Token; +use zksync_contracts::read_bytecode; +use zksync_system_constants::{CONTRACT_DEPLOYER_ADDRESS, KNOWN_CODES_STORAGE_ADDRESS}; +use zksync_types::{get_code_key, get_known_code_key, Execute, H256}; +use zksync_utils::{be_words_to_bytes, bytecode::hash_bytecode, h256_to_u256}; +use zksync_vm_interface::VmInterfaceExt; + +use crate::{ + interface::{storage::InMemoryStorage, TxExecutionMode}, + versions::testonly::default_system_env, + vm_latest::{tests::tester::VmTesterBuilder, utils::hash_evm_bytecode, HistoryEnabled}, +}; + +const MOCK_DEPLOYER_PATH: &str = "etc/contracts-test-data/artifacts-zk/contracts/mock-evm/mock-evm.sol/MockContractDeployer.json"; +const MOCK_KNOWN_CODE_STORAGE_PATH: &str = "etc/contracts-test-data/artifacts-zk/contracts/mock-evm/mock-evm.sol/MockKnownCodeStorage.json"; + +#[test] +fn tracing_evm_contract_deployment() { + let mock_deployer = read_bytecode(MOCK_DEPLOYER_PATH); + let mock_deployer_hash = hash_bytecode(&mock_deployer); + let mock_known_code_storage = read_bytecode(MOCK_KNOWN_CODE_STORAGE_PATH); + let mock_known_code_storage_hash = hash_bytecode(&mock_known_code_storage); + + // Override + let mut storage = InMemoryStorage::with_system_contracts(hash_bytecode); + storage.set_value(get_code_key(&CONTRACT_DEPLOYER_ADDRESS), mock_deployer_hash); + storage.set_value( + get_known_code_key(&mock_deployer_hash), + H256::from_low_u64_be(1), + ); + storage.set_value( + get_code_key(&KNOWN_CODES_STORAGE_ADDRESS), + mock_known_code_storage_hash, + ); + storage.set_value( + get_known_code_key(&mock_known_code_storage_hash), + H256::from_low_u64_be(1), + ); + storage.store_factory_dep(mock_deployer_hash, mock_deployer); + storage.store_factory_dep(mock_known_code_storage_hash, mock_known_code_storage); + + let mut system_env = default_system_env(); + // The EVM emulator will not be accessed, so we set it to a dummy value. + system_env.base_system_smart_contracts.evm_emulator = + Some(system_env.base_system_smart_contracts.default_aa.clone()); + let mut vm = VmTesterBuilder::new(HistoryEnabled) + .with_system_env(system_env) + .with_storage(storage) + .with_execution_mode(TxExecutionMode::VerifyExecute) + .with_random_rich_accounts(1) + .build(); + let account = &mut vm.rich_accounts[0]; + + let args = [Token::Bytes((0..=u8::MAX).collect())]; + let evm_bytecode = ethabi::encode(&args); + let expected_bytecode_hash = hash_evm_bytecode(&evm_bytecode); + let execute = Execute::for_deploy(expected_bytecode_hash, vec![0; 32], &args); + let deploy_tx = account.get_l2_tx_for_execute(execute, None); + let (_, vm_result) = vm + .vm + .execute_transaction_with_bytecode_compression(deploy_tx, true); + assert!(!vm_result.result.is_failed(), "{:?}", vm_result.result); + + // Check that the surrogate EVM bytecode was added to the decommitter. + let known_bytecodes = vm.vm.state.decommittment_processor.known_bytecodes.inner(); + let known_evm_bytecode = + be_words_to_bytes(&known_bytecodes[&h256_to_u256(expected_bytecode_hash)]); + assert_eq!(known_evm_bytecode, evm_bytecode); + + let new_known_factory_deps = vm_result.new_known_factory_deps.unwrap(); + assert_eq!(new_known_factory_deps.len(), 2); // the deployed EraVM contract + EVM contract + assert_eq!( + new_known_factory_deps[&expected_bytecode_hash], + evm_bytecode + ); +} diff --git a/core/lib/multivm/src/versions/vm_latest/tests/get_used_contracts.rs b/core/lib/multivm/src/versions/vm_latest/tests/get_used_contracts.rs index ef19717a627c..d7cadc54b442 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/get_used_contracts.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/get_used_contracts.rs @@ -41,7 +41,7 @@ fn test_get_used_contracts() { .with_execution_mode(TxExecutionMode::VerifyExecute) .build(); - assert!(known_bytecodes_without_aa_code(&vm.vm).is_empty()); + assert!(known_bytecodes_without_base_system_contracts(&vm.vm).is_empty()); // create and push and execute some not-empty factory deps transaction with success status // to check that `get_used_contracts()` updates @@ -63,7 +63,7 @@ fn test_get_used_contracts() { .get_used_contracts() .into_iter() .collect::>(), - known_bytecodes_without_aa_code(&vm.vm) + known_bytecodes_without_base_system_contracts(&vm.vm) .keys() .cloned() .collect::>() @@ -99,7 +99,7 @@ fn test_get_used_contracts() { for factory_dep in tx2.execute.factory_deps { let hash = hash_bytecode(&factory_dep); let hash_to_u256 = h256_to_u256(hash); - assert!(known_bytecodes_without_aa_code(&vm.vm) + assert!(known_bytecodes_without_base_system_contracts(&vm.vm) .keys() .contains(&hash_to_u256)); assert!(!vm.vm.get_used_contracts().contains(&hash_to_u256)); @@ -147,19 +147,24 @@ fn test_contract_is_used_right_after_prepare_to_decommit() { assert_eq!(vm.vm.get_used_contracts(), vec![bytecode_hash]); } -fn known_bytecodes_without_aa_code( +fn known_bytecodes_without_base_system_contracts( vm: &Vm, ) -> HashMap> { - let mut known_bytecodes_without_aa_code = vm + let mut known_bytecodes_without_base_system_contracts = vm .state .decommittment_processor .known_bytecodes .inner() .clone(); - known_bytecodes_without_aa_code + known_bytecodes_without_base_system_contracts .remove(&h256_to_u256(BASE_SYSTEM_CONTRACTS.default_aa.hash)) .unwrap(); - known_bytecodes_without_aa_code + if let Some(evm_emulator) = &BASE_SYSTEM_CONTRACTS.evm_emulator { + known_bytecodes_without_base_system_contracts + .remove(&h256_to_u256(evm_emulator.hash)) + .unwrap(); + } + known_bytecodes_without_base_system_contracts } /// Counter test contract bytecode inflated by appending lots of `NOP` opcodes at the end. This leads to non-trivial diff --git a/core/lib/multivm/src/versions/vm_latest/tests/l1_tx_execution.rs b/core/lib/multivm/src/versions/vm_latest/tests/l1_tx_execution.rs index 0fc12848227e..4bb32cdf7ae0 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/l1_tx_execution.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/l1_tx_execution.rs @@ -52,7 +52,7 @@ fn test_l1_tx_execution() { let contract_code = read_test_contract(); let account = &mut vm.rich_accounts[0]; let deploy_tx = account.get_deploy_tx(&contract_code, None, TxType::L1 { serial_id: 1 }); - let tx_data: TransactionData = deploy_tx.tx.clone().into(); + let tx_data = TransactionData::new(deploy_tx.tx.clone(), false); let required_l2_to_l1_logs: Vec<_> = vec![L2ToL1Log { shard_id: 0, diff --git a/core/lib/multivm/src/versions/vm_latest/tests/mod.rs b/core/lib/multivm/src/versions/vm_latest/tests/mod.rs index 1203d61b80b7..112be637fe02 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/mod.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/mod.rs @@ -8,6 +8,7 @@ mod call_tracer; mod circuits; mod code_oracle; mod constants; +mod evm_emulator; mod gas_limit; mod get_used_contracts; mod is_write_initial; diff --git a/core/lib/multivm/src/versions/vm_latest/tests/nonce_holder.rs b/core/lib/multivm/src/versions/vm_latest/tests/nonce_holder.rs index 91d78c69a931..6be49367d39e 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/nonce_holder.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/nonce_holder.rs @@ -61,19 +61,17 @@ fn test_nonce_holder() { // it will fail again and again. At the same time we have to keep the same storage, because we want to keep the nonce holder contract state. // The easiest way in terms of lifetimes is to reuse `vm_builder` to achieve it. vm.reset_state(true); - let mut transaction_data: TransactionData = account - .get_l2_tx_for_execute_with_nonce( - Execute { - contract_address: Some(account.address), - calldata: vec![12], - value: Default::default(), - factory_deps: vec![], - }, - None, - Nonce(nonce), - ) - .into(); - + let tx = account.get_l2_tx_for_execute_with_nonce( + Execute { + contract_address: Some(account.address), + calldata: vec![12], + value: Default::default(), + factory_deps: vec![], + }, + None, + Nonce(nonce), + ); + let mut transaction_data = TransactionData::new(tx, false); transaction_data.signature = vec![test_mode.into()]; vm.vm.push_raw_transaction(transaction_data, 0, 0, true); let result = vm.vm.execute(VmExecutionMode::OneTx); diff --git a/core/lib/multivm/src/versions/vm_latest/tests/refunds.rs b/core/lib/multivm/src/versions/vm_latest/tests/refunds.rs index cc0085f20252..c00192aa8f10 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/refunds.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/refunds.rs @@ -62,7 +62,7 @@ fn test_predetermined_refunded_gas() { .with_rich_accounts(vec![account.clone()]) .build(); - let tx: TransactionData = tx.into(); + let tx = TransactionData::new(tx, false); // Overhead let overhead = tx.overhead_gas(); vm.vm diff --git a/core/lib/multivm/src/versions/vm_latest/tests/require_eip712.rs b/core/lib/multivm/src/versions/vm_latest/tests/require_eip712.rs index cdd71354c8de..1f38c6f947e3 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/require_eip712.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/require_eip712.rs @@ -1,5 +1,5 @@ use ethabi::Token; -use zksync_eth_signer::{EthereumSigner, TransactionParameters}; +use zksync_eth_signer::TransactionParameters; use zksync_system_constants::L2_BASE_TOKEN_ADDRESS; use zksync_types::{ fee::Fee, l2::L2Tx, transaction_request::TransactionRequest, @@ -29,11 +29,11 @@ impl VmTester { } // TODO refactor this test it use too much internal details of the VM -#[tokio::test] +#[test] /// This test deploys 'buggy' account abstraction code, and then tries accessing it both with legacy /// and EIP712 transactions. /// Currently we support both, but in the future, we should allow only EIP712 transactions to access the AA accounts. -async fn test_require_eip712() { +fn test_require_eip712() { // Use 3 accounts: // - `private_address` - EOA account, where we have the key // - `account_address` - AA account, where the contract is deployed @@ -95,10 +95,10 @@ async fn test_require_eip712() { blob_versioned_hashes: None, }; - let aa_tx = private_account.sign_legacy_tx(aa_raw_tx).await; + let aa_tx = private_account.sign_legacy_tx(aa_raw_tx); let (tx_request, hash) = TransactionRequest::from_bytes(&aa_tx, L2ChainId::from(270)).unwrap(); - let mut l2_tx: L2Tx = L2Tx::from_request(tx_request, 10000).unwrap(); + let mut l2_tx: L2Tx = L2Tx::from_request(tx_request, 10000, false).unwrap(); l2_tx.set_input(aa_tx, hash); // Pretend that operator is malicious and sets the initiator to the AA account. l2_tx.common_data.initiator_address = account_abstraction.address; @@ -142,14 +142,13 @@ async fn test_require_eip712() { let signature = private_account .get_pk_signer() .sign_typed_data(&domain, &transaction_request) - .await .unwrap(); let encoded_tx = transaction_request.get_signed_bytes(&signature).unwrap(); let (aa_txn_request, aa_hash) = TransactionRequest::from_bytes(&encoded_tx, L2ChainId::from(chain_id)).unwrap(); - let mut l2_tx = L2Tx::from_request(aa_txn_request, 100000).unwrap(); + let mut l2_tx = L2Tx::from_request(aa_txn_request, 100000, false).unwrap(); l2_tx.set_input(encoded_tx, aa_hash); let transaction: Transaction = l2_tx.into(); diff --git a/core/lib/multivm/src/versions/vm_latest/tests/rollbacks.rs b/core/lib/multivm/src/versions/vm_latest/tests/rollbacks.rs index 880f189fd892..00a5d6494fe1 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/rollbacks.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/rollbacks.rs @@ -1,12 +1,14 @@ +use assert_matches::assert_matches; use ethabi::Token; use zksync_contracts::{get_loadnext_contract, test_contracts::LoadnextContractExecutionParams}; -use zksync_types::{get_nonce_key, Execute, U256}; +use zksync_types::{get_nonce_key, Address, Execute, U256}; use crate::{ interface::{ storage::WriteStorage, tracer::{TracerExecutionStatus, TracerExecutionStopReason}, - TxExecutionMode, VmExecutionMode, VmInterface, VmInterfaceExt, VmInterfaceHistoryEnabled, + ExecutionResult, TxExecutionMode, VmExecutionMode, VmInterface, VmInterfaceExt, + VmInterfaceHistoryEnabled, }, tracers::dynamic::vm_1_5_0::DynTracer, vm_latest::{ @@ -258,3 +260,37 @@ fn test_layered_rollback() { let result = vm.vm.execute(VmExecutionMode::OneTx); assert!(!result.result.is_failed(), "transaction must not fail"); } + +#[test] +fn rollback_in_call_mode() { + let counter_bytecode = read_test_contract(); + let counter_address = Address::repeat_byte(1); + + let mut vm = VmTesterBuilder::new(HistoryEnabled) + .with_empty_in_memory_storage() + .with_execution_mode(TxExecutionMode::EthCall) + .with_custom_contracts(vec![(counter_bytecode, counter_address, false)]) + .with_random_rich_accounts(1) + .build(); + let account = &mut vm.rich_accounts[0]; + let tx = account.get_test_contract_transaction(counter_address, true, None, false, TxType::L2); + + let (compression_result, vm_result) = vm + .vm + .execute_transaction_with_bytecode_compression(tx, true); + compression_result.unwrap(); + assert_matches!( + vm_result.result, + ExecutionResult::Revert { output } + if output.to_string().contains("This method always reverts") + ); + + let storage_logs = vm + .vm + .get_current_execution_state() + .deduplicated_storage_logs; + assert!( + storage_logs.iter().all(|log| !log.is_write()), + "{storage_logs:?}" + ); +} diff --git a/core/lib/multivm/src/versions/vm_latest/tracers/default_tracers.rs b/core/lib/multivm/src/versions/vm_latest/tracers/default_tracers.rs index 6a908c2a73ed..2ae5e81a328c 100755 --- a/core/lib/multivm/src/versions/vm_latest/tracers/default_tracers.rs +++ b/core/lib/multivm/src/versions/vm_latest/tracers/default_tracers.rs @@ -13,7 +13,7 @@ use zk_evm_1_5_0::{ zkevm_opcode_defs::{decoding::EncodingModeProduction, Opcode, RetOpcode}, }; -use super::PubdataTracer; +use super::{EvmDeployTracer, PubdataTracer}; use crate::{ glue::GlueInto, interface::{ @@ -38,7 +38,7 @@ use crate::{ }; /// Default tracer for the VM. It manages the other tracers execution and stop the vm when needed. -pub(crate) struct DefaultExecutionTracer { +pub struct DefaultExecutionTracer { tx_has_been_processed: bool, execution_mode: VmExecutionMode, @@ -63,14 +63,18 @@ pub(crate) struct DefaultExecutionTracer { // It only takes into account circuits that are generated for actual execution. It doesn't // take into account e.g circuits produced by the initial bootloader memory commitment. pub(crate) circuits_tracer: CircuitsTracer, + // This tracer is responsible for handling EVM deployments and providing the data to the code decommitter. + pub(crate) evm_deploy_tracer: Option>, subversion: MultiVMSubversion, storage: StoragePtr, _phantom: PhantomData, } impl DefaultExecutionTracer { + #[allow(clippy::too_many_arguments)] pub(crate) fn new( computational_gas_limit: u32, + use_evm_emulator: bool, execution_mode: VmExecutionMode, dispatcher: TracerDispatcher, storage: StoragePtr, @@ -92,6 +96,7 @@ impl DefaultExecutionTracer { pubdata_tracer, ret_from_the_bootloader: None, circuits_tracer: CircuitsTracer::new(), + evm_deploy_tracer: use_evm_emulator.then(EvmDeployTracer::new), storage, _phantom: PhantomData, } @@ -172,6 +177,9 @@ macro_rules! dispatch_tracers { tracer.$function($( $params ),*); } $self.circuits_tracer.$function($( $params ),*); + if let Some(tracer) = &mut $self.evm_deploy_tracer { + tracer.$function($( $params ),*); + } }; } @@ -289,6 +297,12 @@ impl DefaultExecutionTracer { .finish_cycle(state, bootloader_state) .stricter(&result); + if let Some(evm_deploy_tracer) = &mut self.evm_deploy_tracer { + result = evm_deploy_tracer + .finish_cycle(state, bootloader_state) + .stricter(&result); + } + result.stricter(&self.should_stop_execution()) } diff --git a/core/lib/multivm/src/versions/vm_latest/tracers/evm_deploy_tracer.rs b/core/lib/multivm/src/versions/vm_latest/tracers/evm_deploy_tracer.rs new file mode 100644 index 000000000000..d91ee13a920a --- /dev/null +++ b/core/lib/multivm/src/versions/vm_latest/tracers/evm_deploy_tracer.rs @@ -0,0 +1,105 @@ +use std::{marker::PhantomData, mem}; + +use zk_evm_1_5_0::{ + aux_structures::Timestamp, + tracing::{AfterExecutionData, VmLocalStateData}, + zkevm_opcode_defs::{ + FarCallOpcode, FatPointer, Opcode, CALL_IMPLICIT_CALLDATA_FAT_PTR_REGISTER, + }, +}; +use zksync_types::{CONTRACT_DEPLOYER_ADDRESS, KNOWN_CODES_STORAGE_ADDRESS}; +use zksync_utils::{bytes_to_be_words, h256_to_u256}; +use zksync_vm_interface::storage::StoragePtr; + +use super::{traits::VmTracer, utils::read_pointer}; +use crate::{ + interface::{storage::WriteStorage, tracer::TracerExecutionStatus}, + tracers::dynamic::vm_1_5_0::DynTracer, + vm_latest::{ + utils::hash_evm_bytecode, BootloaderState, HistoryMode, SimpleMemory, ZkSyncVmState, + }, +}; + +/// Tracer responsible for collecting information about EVM deploys and providing those +/// to the code decommitter. +#[derive(Debug)] +pub(crate) struct EvmDeployTracer { + tracked_signature: [u8; 4], + pending_bytecodes: Vec>, + _phantom: PhantomData, +} + +impl EvmDeployTracer { + pub(crate) fn new() -> Self { + let tracked_signature = + ethabi::short_signature("publishEVMBytecode", &[ethabi::ParamType::Bytes]); + + Self { + tracked_signature, + pending_bytecodes: vec![], + _phantom: PhantomData, + } + } +} + +impl DynTracer> for EvmDeployTracer { + fn after_execution( + &mut self, + state: VmLocalStateData<'_>, + data: AfterExecutionData, + memory: &SimpleMemory, + _storage: StoragePtr, + ) { + if !matches!( + data.opcode.variant.opcode, + Opcode::FarCall(FarCallOpcode::Normal) + ) { + return; + }; + + let current = state.vm_local_state.callstack.current; + let from = current.msg_sender; + let to = current.this_address; + if from != CONTRACT_DEPLOYER_ADDRESS || to != KNOWN_CODES_STORAGE_ADDRESS { + return; + } + + let calldata_ptr = + state.vm_local_state.registers[usize::from(CALL_IMPLICIT_CALLDATA_FAT_PTR_REGISTER)]; + let data = read_pointer(memory, FatPointer::from_u256(calldata_ptr.value)); + if data.len() < 4 { + return; + } + let (signature, data) = data.split_at(4); + if signature != self.tracked_signature { + return; + } + + match ethabi::decode(&[ethabi::ParamType::Bytes], data) { + Ok(decoded) => { + let published_bytecode = decoded.into_iter().next().unwrap().into_bytes().unwrap(); + self.pending_bytecodes.push(published_bytecode); + } + Err(err) => tracing::error!("Unable to decode `publishEVMBytecode` call: {err}"), + } + } +} + +impl VmTracer for EvmDeployTracer { + fn finish_cycle( + &mut self, + state: &mut ZkSyncVmState, + _bootloader_state: &mut BootloaderState, + ) -> TracerExecutionStatus { + for published_bytecode in mem::take(&mut self.pending_bytecodes) { + let hash = hash_evm_bytecode(&published_bytecode); + let as_words = bytes_to_be_words(published_bytecode); + + state.decommittment_processor.populate( + vec![(h256_to_u256(hash), as_words)], + Timestamp(state.local_state.timestamp), + ); + } + TracerExecutionStatus::Continue + } +} diff --git a/core/lib/multivm/src/versions/vm_latest/tracers/mod.rs b/core/lib/multivm/src/versions/vm_latest/tracers/mod.rs index fe916e19e8ca..82721a322640 100755 --- a/core/lib/multivm/src/versions/vm_latest/tracers/mod.rs +++ b/core/lib/multivm/src/versions/vm_latest/tracers/mod.rs @@ -1,11 +1,13 @@ pub(crate) use circuits_tracer::CircuitsTracer; pub(crate) use default_tracers::DefaultExecutionTracer; +pub(crate) use evm_deploy_tracer::EvmDeployTracer; pub(crate) use pubdata_tracer::PubdataTracer; pub(crate) use refunds::RefundsTracer; pub(crate) use result_tracer::ResultTracer; pub(crate) mod circuits_tracer; pub(crate) mod default_tracers; +pub(crate) mod evm_deploy_tracer; pub(crate) mod pubdata_tracer; pub(crate) mod refunds; pub(crate) mod result_tracer; diff --git a/core/lib/multivm/src/versions/vm_latest/types/internals/transaction_data.rs b/core/lib/multivm/src/versions/vm_latest/types/internals/transaction_data.rs index 2ec86eb3ceaf..90948f2f89fd 100644 --- a/core/lib/multivm/src/versions/vm_latest/types/internals/transaction_data.rs +++ b/core/lib/multivm/src/versions/vm_latest/types/internals/transaction_data.rs @@ -46,8 +46,8 @@ pub(crate) struct TransactionData { pub(crate) raw_bytes: Option>, } -impl From for TransactionData { - fn from(execute_tx: Transaction) -> Self { +impl TransactionData { + pub(crate) fn new(execute_tx: Transaction, use_evm_emulator: bool) -> Self { match execute_tx.common_data { ExecuteTransactionCommon::L2(common_data) => { let nonce = U256::from_big_endian(&common_data.nonce.to_be_bytes()); @@ -62,6 +62,19 @@ impl From for TransactionData { U256::zero() }; + let should_deploy_contract = if execute_tx.execute.contract_address.is_none() { + // Transactions with no `contract_address` should be filtered out by the API server, + // so this is more of a sanity check. + assert!( + use_evm_emulator, + "`execute.contract_address` not set for transaction {:?} with EVM emulation disabled", + common_data.hash() + ); + U256([1, 0, 0, 0]) + } else { + U256::zero() + }; + // Ethereum transactions do not sign gas per pubdata limit, and so for them we need to use // some default value. We use the maximum possible value that is allowed by the bootloader // (i.e. we can not use u64::MAX, because the bootloader requires gas per pubdata for such @@ -85,7 +98,7 @@ impl From for TransactionData { value: execute_tx.execute.value, reserved: [ should_check_chain_id, - U256::zero(), + should_deploy_contract, U256::zero(), U256::zero(), ], diff --git a/core/lib/multivm/src/versions/vm_latest/types/internals/vm_state.rs b/core/lib/multivm/src/versions/vm_latest/types/internals/vm_state.rs index 6f9522572ad8..cb4b13eecdf0 100644 --- a/core/lib/multivm/src/versions/vm_latest/types/internals/vm_state.rs +++ b/core/lib/multivm/src/versions/vm_latest/types/internals/vm_state.rs @@ -98,6 +98,13 @@ pub(crate) fn new_vm_state( Timestamp(0), ); + if let Some(evm_emulator) = &system_env.base_system_smart_contracts.evm_emulator { + decommittment_processor.populate( + vec![(h256_to_u256(evm_emulator.hash), evm_emulator.code.clone())], + Timestamp(0), + ); + } + memory.populate( vec![( BOOTLOADER_CODE_PAGE, @@ -117,6 +124,13 @@ pub(crate) fn new_vm_state( Timestamp(0), ); + // By convention, default AA is used as a fallback if the EVM emulator is not available. + let evm_emulator_code_hash = system_env + .base_system_smart_contracts + .evm_emulator + .as_ref() + .unwrap_or(&system_env.base_system_smart_contracts.default_aa) + .hash; let mut vm = VmState::empty_state( storage_oracle, memory, @@ -128,11 +142,7 @@ pub(crate) fn new_vm_state( default_aa_code_hash: h256_to_u256( system_env.base_system_smart_contracts.default_aa.hash, ), - // For now, the default account hash is used as the code hash for the EVM simulator. - // In the 1.5.0 version, it is not possible to instantiate EVM bytecode. - evm_simulator_code_hash: h256_to_u256( - system_env.base_system_smart_contracts.default_aa.hash, - ), + evm_simulator_code_hash: h256_to_u256(evm_emulator_code_hash), zkporter_is_available: system_env.zk_porter_available, }, ); diff --git a/core/lib/multivm/src/versions/vm_latest/utils/mod.rs b/core/lib/multivm/src/versions/vm_latest/utils/mod.rs index 0fb803de5d4e..e07d3eda7c4c 100644 --- a/core/lib/multivm/src/versions/vm_latest/utils/mod.rs +++ b/core/lib/multivm/src/versions/vm_latest/utils/mod.rs @@ -1,6 +1,57 @@ -/// Utility functions for the VM. +//! Utility functions for the VM. + +use once_cell::sync::Lazy; +use zk_evm_1_5_0::{ + aux_structures::MemoryPage, + sha2, + zkevm_opcode_defs::{BlobSha256Format, VersionedHashLen32}, +}; +use zksync_types::{H256, KNOWN_CODES_STORAGE_ADDRESS}; +use zksync_vm_interface::VmEvent; + pub mod fee; pub mod l2_blocks; pub(crate) mod logs; pub mod overhead; pub mod transaction_encoding; + +pub(crate) fn hash_evm_bytecode(bytecode: &[u8]) -> H256 { + use sha2::{Digest, Sha256}; + let mut hasher = Sha256::new(); + let len = bytecode.len() as u16; + hasher.update(bytecode); + let result = hasher.finalize(); + + let mut output = [0u8; 32]; + output[..].copy_from_slice(result.as_slice()); + output[0] = BlobSha256Format::VERSION_BYTE; + output[1] = 0; + output[2..4].copy_from_slice(&len.to_be_bytes()); + + H256(output) +} + +pub const fn heap_page_from_base(base: MemoryPage) -> MemoryPage { + MemoryPage(base.0 + 2) +} + +/// Extracts all bytecodes marked as known on the system contracts. +pub fn extract_bytecodes_marked_as_known(all_generated_events: &[VmEvent]) -> Vec { + static PUBLISHED_BYTECODE_SIGNATURE: Lazy = Lazy::new(|| { + ethabi::long_signature( + "MarkedAsKnown", + &[ethabi::ParamType::FixedBytes(32), ethabi::ParamType::Bool], + ) + }); + + all_generated_events + .iter() + .filter(|event| { + // Filter events from the deployer contract that match the expected signature. + event.address == KNOWN_CODES_STORAGE_ADDRESS + && event.indexed_topics.len() == 3 + && event.indexed_topics[0] == *PUBLISHED_BYTECODE_SIGNATURE + }) + .map(|event| event.indexed_topics[1]) + .collect() +} diff --git a/core/lib/multivm/src/versions/vm_latest/utils/transaction_encoding.rs b/core/lib/multivm/src/versions/vm_latest/utils/transaction_encoding.rs index 86c49a3eb15d..ed532f89dbc6 100644 --- a/core/lib/multivm/src/versions/vm_latest/utils/transaction_encoding.rs +++ b/core/lib/multivm/src/versions/vm_latest/utils/transaction_encoding.rs @@ -10,7 +10,9 @@ pub trait TransactionVmExt { impl TransactionVmExt for Transaction { fn bootloader_encoding_size(&self) -> usize { - let transaction_data: TransactionData = self.clone().into(); + // Since we want to just measure the encoding size, `use_evm_emulator` arg doesn't matter here, + // so we use a more lenient option. + let transaction_data = TransactionData::new(self.clone(), true); transaction_data.into_tokens().len() } } diff --git a/core/lib/multivm/src/versions/vm_latest/vm.rs b/core/lib/multivm/src/versions/vm_latest/vm.rs index 506b6666ecd9..f4cc1580e935 100644 --- a/core/lib/multivm/src/versions/vm_latest/vm.rs +++ b/core/lib/multivm/src/versions/vm_latest/vm.rs @@ -1,10 +1,12 @@ +use std::collections::HashMap; + use circuit_sequencer_api_1_5_0::sort_storage_access::sort_storage_access_queries; use zksync_types::{ l2_to_l1_log::{SystemL2ToL1Log, UserL2ToL1Log}, vm::VmVersion, Transaction, H256, }; -use zksync_utils::u256_to_h256; +use zksync_utils::{be_words_to_bytes, h256_to_u256, u256_to_h256}; use crate::{ glue::GlueInto, @@ -13,7 +15,7 @@ use crate::{ BytecodeCompressionError, BytecodeCompressionResult, CurrentExecutionState, FinishedL1Batch, L1BatchEnv, L2BlockEnv, SystemEnv, VmExecutionMode, VmExecutionResultAndLogs, VmFactory, VmInterface, VmInterfaceHistoryEnabled, - VmMemoryMetrics, VmTrackingContracts, + VmTrackingContracts, }, utils::events::extract_l2tol1logs_from_l1_messenger, vm_latest::{ @@ -79,6 +81,20 @@ impl Vm { self.state.local_state.callstack.current.ergs_remaining } + pub(crate) fn decommit_bytecodes(&self, hashes: &[H256]) -> HashMap> { + let bytecodes = hashes.iter().map(|&hash| { + let bytecode_words = self + .state + .decommittment_processor + .known_bytecodes + .inner() + .get(&h256_to_u256(hash)) + .unwrap_or_else(|| panic!("Bytecode with hash {hash:?} not found")); + (hash, be_words_to_bytes(bytecode_words)) + }); + bytecodes.collect() + } + // visible for testing pub(super) fn get_current_execution_state(&self) -> CurrentExecutionState { let (raw_events, l1_messages) = self.state.event_sink.flatten(); @@ -161,10 +177,6 @@ impl VmInterface for Vm { } } - fn record_vm_memory_metrics(&self) -> VmMemoryMetrics { - self.record_vm_memory_metrics_inner() - } - fn finish_batch(&mut self) -> FinishedL1Batch { let result = self.inspect(&mut TracerDispatcher::default(), VmExecutionMode::Batch); let execution_state = self.get_current_execution_state(); diff --git a/core/lib/multivm/src/versions/vm_m5/vm.rs b/core/lib/multivm/src/versions/vm_m5/vm.rs index 40f66659f29f..5a26506f3463 100644 --- a/core/lib/multivm/src/versions/vm_m5/vm.rs +++ b/core/lib/multivm/src/versions/vm_m5/vm.rs @@ -50,6 +50,10 @@ impl Vm { _phantom: Default::default(), } } + + pub(crate) fn record_vm_memory_metrics(&self) -> VmMemoryMetrics { + VmMemoryMetrics::default() + } } impl VmInterface for Vm { @@ -106,19 +110,6 @@ impl VmInterface for Vm { ) } - fn record_vm_memory_metrics(&self) -> VmMemoryMetrics { - VmMemoryMetrics { - event_sink_inner: 0, - event_sink_history: 0, - memory_inner: 0, - memory_history: 0, - decommittment_processor_inner: 0, - decommittment_processor_history: 0, - storage_inner: 0, - storage_history: 0, - } - } - fn finish_batch(&mut self) -> FinishedL1Batch { self.vm .execute_till_block_end( diff --git a/core/lib/multivm/src/versions/vm_m6/vm.rs b/core/lib/multivm/src/versions/vm_m6/vm.rs index 627687a5524a..1fdc8ae64f80 100644 --- a/core/lib/multivm/src/versions/vm_m6/vm.rs +++ b/core/lib/multivm/src/versions/vm_m6/vm.rs @@ -50,6 +50,23 @@ impl Vm { system_env, } } + + pub(crate) fn record_vm_memory_metrics(&self) -> VmMemoryMetrics { + VmMemoryMetrics { + event_sink_inner: self.vm.state.event_sink.get_size(), + event_sink_history: self.vm.state.event_sink.get_history_size(), + memory_inner: self.vm.state.memory.get_size(), + memory_history: self.vm.state.memory.get_history_size(), + decommittment_processor_inner: self.vm.state.decommittment_processor.get_size(), + decommittment_processor_history: self + .vm + .state + .decommittment_processor + .get_history_size(), + storage_inner: self.vm.state.storage.get_size(), + storage_history: self.vm.state.storage.get_history_size(), + } + } } impl VmInterface for Vm { @@ -186,23 +203,6 @@ impl VmInterface for Vm { } } - fn record_vm_memory_metrics(&self) -> VmMemoryMetrics { - VmMemoryMetrics { - event_sink_inner: self.vm.state.event_sink.get_size(), - event_sink_history: self.vm.state.event_sink.get_history_size(), - memory_inner: self.vm.state.memory.get_size(), - memory_history: self.vm.state.memory.get_history_size(), - decommittment_processor_inner: self.vm.state.decommittment_processor.get_size(), - decommittment_processor_history: self - .vm - .state - .decommittment_processor - .get_history_size(), - storage_inner: self.vm.state.storage.get_size(), - storage_history: self.vm.state.storage.get_history_size(), - } - } - fn finish_batch(&mut self) -> FinishedL1Batch { self.vm .execute_till_block_end( diff --git a/core/lib/multivm/src/versions/vm_refunds_enhancement/bootloader_state/utils.rs b/core/lib/multivm/src/versions/vm_refunds_enhancement/bootloader_state/utils.rs index 7bd488f90a9c..14c895d7a0b4 100644 --- a/core/lib/multivm/src/versions/vm_refunds_enhancement/bootloader_state/utils.rs +++ b/core/lib/multivm/src/versions/vm_refunds_enhancement/bootloader_state/utils.rs @@ -133,8 +133,8 @@ pub(super) fn assemble_tx_meta(execution_mode: TxExecutionMode, execute_tx: bool // Set 0 byte (execution mode) output[0] = match execution_mode { TxExecutionMode::VerifyExecute => 0x00, - TxExecutionMode::EstimateFee { .. } => 0x00, - TxExecutionMode::EthCall { .. } => 0x02, + TxExecutionMode::EstimateFee => 0x00, + TxExecutionMode::EthCall => 0x02, }; // Set 31 byte (marker for tx execution) diff --git a/core/lib/multivm/src/versions/vm_refunds_enhancement/implementation/execution.rs b/core/lib/multivm/src/versions/vm_refunds_enhancement/implementation/execution.rs index 8196760a621b..9462a89be2ab 100644 --- a/core/lib/multivm/src/versions/vm_refunds_enhancement/implementation/execution.rs +++ b/core/lib/multivm/src/versions/vm_refunds_enhancement/implementation/execution.rs @@ -90,6 +90,7 @@ impl Vm { logs, statistics, refunds, + new_known_factory_deps: None, }; (stop_reason, result) diff --git a/core/lib/multivm/src/versions/vm_refunds_enhancement/implementation/statistics.rs b/core/lib/multivm/src/versions/vm_refunds_enhancement/implementation/statistics.rs index dcda1457b765..a73c212db29c 100644 --- a/core/lib/multivm/src/versions/vm_refunds_enhancement/implementation/statistics.rs +++ b/core/lib/multivm/src/versions/vm_refunds_enhancement/implementation/statistics.rs @@ -56,7 +56,7 @@ impl Vm { } /// Returns the info about all oracles' sizes. - pub(crate) fn record_vm_memory_metrics_inner(&self) -> VmMemoryMetrics { + pub(crate) fn record_vm_memory_metrics(&self) -> VmMemoryMetrics { VmMemoryMetrics { event_sink_inner: self.state.event_sink.get_size(), event_sink_history: self.state.event_sink.get_history_size(), diff --git a/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/bootloader.rs b/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/bootloader.rs deleted file mode 100644 index 23b250d485b7..000000000000 --- a/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/bootloader.rs +++ /dev/null @@ -1,54 +0,0 @@ -use zksync_types::U256; - -use crate::interface::{Halt, TxExecutionMode, VmExecutionMode}; -use crate::vm_refunds_enhancement::constants::BOOTLOADER_HEAP_PAGE; -use crate::vm_refunds_enhancement::tests::tester::VmTesterBuilder; -use crate::vm_refunds_enhancement::tests::utils::{ - get_bootloader, verify_required_memory, BASE_SYSTEM_CONTRACTS, -}; - -use crate::interface::ExecutionResult; -use crate::vm_refunds_enhancement::HistoryEnabled; - -#[test] -fn test_dummy_bootloader() { - let mut base_system_contracts = BASE_SYSTEM_CONTRACTS.clone(); - base_system_contracts.bootloader = get_bootloader("dummy"); - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_base_system_smart_contracts(base_system_contracts) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .build(); - - let result = vm.vm.execute(VmExecutionMode::Batch); - assert!(!result.result.is_failed()); - - let correct_first_cell = U256::from_str_radix("123123123", 16).unwrap(); - verify_required_memory( - &vm.vm.state, - vec![(correct_first_cell, BOOTLOADER_HEAP_PAGE, 0)], - ); -} - -#[test] -fn test_bootloader_out_of_gas() { - let mut base_system_contracts = BASE_SYSTEM_CONTRACTS.clone(); - base_system_contracts.bootloader = get_bootloader("dummy"); - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_base_system_smart_contracts(base_system_contracts) - .with_gas_limit(10) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .build(); - - let res = vm.vm.execute(VmExecutionMode::Batch); - - assert_matches!( - res.result, - ExecutionResult::Halt { - reason: Halt::BootloaderOutOfGas - } - ); -} diff --git a/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/bytecode_publishing.rs b/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/bytecode_publishing.rs deleted file mode 100644 index b2c126dea00e..000000000000 --- a/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/bytecode_publishing.rs +++ /dev/null @@ -1,37 +0,0 @@ -use zksync_types::event::extract_long_l2_to_l1_messages; -use zksync_utils::bytecode::compress_bytecode; - -use crate::interface::{TxExecutionMode, VmExecutionMode}; -use crate::vm_refunds_enhancement::tests::tester::{DeployContractsTx, TxType, VmTesterBuilder}; -use crate::vm_refunds_enhancement::tests::utils::read_test_contract; -use crate::vm_refunds_enhancement::HistoryEnabled; - -#[test] -fn test_bytecode_publishing() { - // In this test, we aim to ensure that the contents of the compressed bytecodes - // are included as part of the L2->L1 long messages - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let counter = read_test_contract(); - let account = &mut vm.rich_accounts[0]; - - let compressed_bytecode = compress_bytecode(&counter).unwrap(); - - let DeployContractsTx { tx, .. } = account.get_deploy_tx(&counter, None, TxType::L2); - vm.vm.push_transaction(tx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!result.result.is_failed(), "Transaction wasn't successful"); - - vm.vm.execute(VmExecutionMode::Batch); - - let state = vm.vm.get_current_execution_state(); - let long_messages = extract_long_l2_to_l1_messages(&state.events); - assert!( - long_messages.contains(&compressed_bytecode), - "Bytecode not published" - ); -} diff --git a/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/call_tracer.rs b/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/call_tracer.rs deleted file mode 100644 index fb2d3389407d..000000000000 --- a/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/call_tracer.rs +++ /dev/null @@ -1,87 +0,0 @@ -use crate::interface::{TxExecutionMode, VmExecutionMode}; -use crate::vm_refunds_enhancement::constants::BLOCK_GAS_LIMIT; -use crate::vm_refunds_enhancement::tests::tester::VmTesterBuilder; -use crate::vm_refunds_enhancement::tests::utils::{read_max_depth_contract, read_test_contract}; -use crate::vm_refunds_enhancement::{CallTracer, HistoryEnabled}; -use once_cell::sync::OnceCell; -use std::sync::Arc; -use zksync_types::{Address, Execute}; - -// This test is ultra slow, so it's ignored by default. -#[test] -#[ignore] -fn test_max_depth() { - let contarct = read_max_depth_contract(); - let address = Address::random(); - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_random_rich_accounts(1) - .with_deployer() - .with_gas_limit(BLOCK_GAS_LIMIT) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_custom_contracts(vec![(contarct, address, true)]) - .build(); - - let account = &mut vm.rich_accounts[0]; - let tx = account.get_l2_tx_for_execute( - Execute { - contract_address: address, - calldata: vec![], - value: Default::default(), - factory_deps: None, - }, - None, - ); - - let result = Arc::new(OnceCell::new()); - let call_tracer = CallTracer::new(result.clone(), HistoryEnabled); - vm.vm.push_transaction(tx); - let res = vm - .vm - .inspect(vec![Box::new(call_tracer)], VmExecutionMode::OneTx); - assert!(result.get().is_some()); - assert!(res.result.is_failed()); -} - -#[test] -fn test_basic_behavior() { - let contarct = read_test_contract(); - let address = Address::random(); - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_random_rich_accounts(1) - .with_deployer() - .with_gas_limit(BLOCK_GAS_LIMIT) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_custom_contracts(vec![(contarct, address, true)]) - .build(); - - let increment_by_6_calldata = - "7cf5dab00000000000000000000000000000000000000000000000000000000000000006"; - - let account = &mut vm.rich_accounts[0]; - let tx = account.get_l2_tx_for_execute( - Execute { - contract_address: address, - calldata: hex::decode(increment_by_6_calldata).unwrap(), - value: Default::default(), - factory_deps: None, - }, - None, - ); - - let result = Arc::new(OnceCell::new()); - let call_tracer = CallTracer::new(result.clone(), HistoryEnabled); - vm.vm.push_transaction(tx); - let res = vm - .vm - .inspect(vec![Box::new(call_tracer)], VmExecutionMode::OneTx); - - let call_tracer_result = result.get().unwrap(); - - assert_eq!(call_tracer_result.len(), 1); - // Expect that there are a plenty of subcalls underneath. - let subcall = &call_tracer_result[0].calls; - assert!(subcall.len() > 10); - assert!(!res.result.is_failed()); -} diff --git a/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/default_aa.rs b/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/default_aa.rs deleted file mode 100644 index 92e043ae96f3..000000000000 --- a/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/default_aa.rs +++ /dev/null @@ -1,70 +0,0 @@ -use zksync_system_constants::L2_ETH_TOKEN_ADDRESS; -use zksync_types::system_contracts::{DEPLOYMENT_NONCE_INCREMENT, TX_NONCE_INCREMENT}; - -use zksync_types::{get_code_key, get_known_code_key, get_nonce_key, AccountTreeId, U256}; -use zksync_utils::u256_to_h256; - -use crate::interface::{TxExecutionMode, VmExecutionMode}; -use crate::vm_refunds_enhancement::tests::tester::{DeployContractsTx, TxType, VmTesterBuilder}; -use crate::vm_refunds_enhancement::tests::utils::{ - get_balance, read_test_contract, verify_required_storage, -}; -use crate::vm_refunds_enhancement::HistoryEnabled; - -#[test] -fn test_default_aa_interaction() { - // In this test, we aim to test whether a simple account interaction (without any fee logic) - // will work. The account will try to deploy a simple contract from integration tests. - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let counter = read_test_contract(); - let account = &mut vm.rich_accounts[0]; - let DeployContractsTx { - tx, - bytecode_hash, - address, - } = account.get_deploy_tx(&counter, None, TxType::L2); - let maximal_fee = tx.gas_limit() * vm.vm.batch_env.base_fee(); - - vm.vm.push_transaction(tx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!result.result.is_failed(), "Transaction wasn't successful"); - - vm.vm.execute(VmExecutionMode::Batch); - vm.vm.get_current_execution_state(); - - // Both deployment and ordinary nonce should be incremented by one. - let account_nonce_key = get_nonce_key(&account.address); - let expected_nonce = TX_NONCE_INCREMENT + DEPLOYMENT_NONCE_INCREMENT; - - // The code hash of the deployed contract should be marked as republished. - let known_codes_key = get_known_code_key(&bytecode_hash); - - // The contract should be deployed successfully. - let account_code_key = get_code_key(&address); - - let expected_slots = vec![ - (u256_to_h256(expected_nonce), account_nonce_key), - (u256_to_h256(U256::from(1u32)), known_codes_key), - (bytecode_hash, account_code_key), - ]; - - verify_required_storage(&vm.vm.state, expected_slots); - - let expected_fee = maximal_fee - - U256::from(result.refunds.gas_refunded) * U256::from(vm.vm.batch_env.base_fee()); - let operator_balance = get_balance( - AccountTreeId::new(L2_ETH_TOKEN_ADDRESS), - &vm.fee_account, - vm.vm.state.storage.storage.get_ptr(), - ); - - assert_eq!( - operator_balance, expected_fee, - "Operator did not receive his fee" - ); -} diff --git a/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/gas_limit.rs b/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/gas_limit.rs deleted file mode 100644 index 1ff6ce12557f..000000000000 --- a/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/gas_limit.rs +++ /dev/null @@ -1,45 +0,0 @@ -use zksync_types::fee::Fee; -use zksync_types::Execute; - -use crate::vm_refunds_enhancement::constants::{ - BOOTLOADER_HEAP_PAGE, TX_DESCRIPTION_OFFSET, TX_GAS_LIMIT_OFFSET, -}; -use crate::vm_refunds_enhancement::tests::tester::VmTesterBuilder; - -use crate::interface::TxExecutionMode; -use crate::vm_refunds_enhancement::HistoryDisabled; - -/// Checks that `TX_GAS_LIMIT_OFFSET` constant is correct. -#[test] -fn test_tx_gas_limit_offset() { - let mut vm = VmTesterBuilder::new(HistoryDisabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let gas_limit = 9999.into(); - let tx = vm.rich_accounts[0].get_l2_tx_for_execute( - Execute { - contract_address: Some(Default::default()), - ..Default::default() - }, - Some(Fee { - gas_limit, - ..Default::default() - }), - ); - - vm.vm.push_transaction(tx); - - let gas_limit_from_memory = vm - .vm - .state - .memory - .read_slot( - BOOTLOADER_HEAP_PAGE as usize, - TX_DESCRIPTION_OFFSET + TX_GAS_LIMIT_OFFSET, - ) - .value; - assert_eq!(gas_limit_from_memory, gas_limit); -} diff --git a/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/get_used_contracts.rs b/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/get_used_contracts.rs deleted file mode 100644 index 8c121db3e43e..000000000000 --- a/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/get_used_contracts.rs +++ /dev/null @@ -1,104 +0,0 @@ -use std::collections::{HashMap, HashSet}; - -use itertools::Itertools; - -use crate::interface::storage::WriteStorage; -use zksync_system_constants::CONTRACT_DEPLOYER_ADDRESS; -use zksync_test_account::Account; -use zksync_types::{Execute, U256}; -use zksync_utils::bytecode::hash_bytecode; -use zksync_utils::h256_to_u256; - -use crate::interface::{TxExecutionMode, VmExecutionMode}; -use crate::vm_refunds_enhancement::tests::tester::{TxType, VmTesterBuilder}; -use crate::vm_refunds_enhancement::tests::utils::{read_test_contract, BASE_SYSTEM_CONTRACTS}; -use crate::vm_refunds_enhancement::{HistoryDisabled, HistoryMode, Vm}; - -#[test] -fn test_get_used_contracts() { - let mut vm = VmTesterBuilder::new(HistoryDisabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .build(); - - assert!(known_bytecodes_without_aa_code(&vm.vm).is_empty()); - - // create and push and execute some not-empty factory deps transaction with success status - // to check that get_used_contracts() updates - let contract_code = read_test_contract(); - let mut account = Account::random(); - let tx = account.get_deploy_tx(&contract_code, None, TxType::L1 { serial_id: 0 }); - vm.vm.push_transaction(tx.tx.clone()); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!result.result.is_failed()); - - assert!(vm - .vm - .get_used_contracts() - .contains(&h256_to_u256(tx.bytecode_hash))); - - // Note: Default_AA will be in the list of used contracts if l2 tx is used - assert_eq!( - vm.vm - .get_used_contracts() - .into_iter() - .collect::>(), - known_bytecodes_without_aa_code(&vm.vm) - .keys() - .cloned() - .collect::>() - ); - - // create push and execute some non-empty factory deps transaction that fails - // (known_bytecodes will be updated but we expect get_used_contracts() to not be updated) - - let calldata = [1, 2, 3]; - let big_calldata: Vec = calldata - .iter() - .cycle() - .take(calldata.len() * 1024) - .cloned() - .collect(); - let account2 = Account::random(); - let tx2 = account2.get_l1_tx( - Execute { - contract_address: CONTRACT_DEPLOYER_ADDRESS, - calldata: big_calldata, - value: Default::default(), - factory_deps: Some(vec![vec![1; 32]]), - }, - 1, - ); - - vm.vm.push_transaction(tx2.clone()); - - let res2 = vm.vm.execute(VmExecutionMode::OneTx); - - assert!(res2.result.is_failed()); - - for factory_dep in tx2.execute.factory_deps.unwrap() { - let hash = hash_bytecode(&factory_dep); - let hash_to_u256 = h256_to_u256(hash); - assert!(known_bytecodes_without_aa_code(&vm.vm) - .keys() - .contains(&hash_to_u256)); - assert!(!vm.vm.get_used_contracts().contains(&hash_to_u256)); - } -} - -fn known_bytecodes_without_aa_code( - vm: &Vm, -) -> HashMap> { - let mut known_bytecodes_without_aa_code = vm - .state - .decommittment_processor - .known_bytecodes - .inner() - .clone(); - - known_bytecodes_without_aa_code - .remove(&h256_to_u256(BASE_SYSTEM_CONTRACTS.default_aa.hash)) - .unwrap(); - - known_bytecodes_without_aa_code -} diff --git a/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/invalid_bytecode.rs b/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/invalid_bytecode.rs deleted file mode 100644 index 88ed141630a2..000000000000 --- a/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/invalid_bytecode.rs +++ /dev/null @@ -1,120 +0,0 @@ -use zksync_types::H256; -use zksync_utils::h256_to_u256; - -use crate::vm_refunds_enhancement::tests::tester::VmTesterBuilder; -use crate::vm_refunds_enhancement::types::inputs::system_env::TxExecutionMode; -use crate::vm_refunds_enhancement::{HistoryEnabled, TxRevertReason}; - -// TODO this test requires a lot of hacks for bypassing the bytecode checks in the VM. -// Port it later, it's not significant. for now - -#[test] -fn test_invalid_bytecode() { - let mut vm_builder = VmTesterBuilder::new(HistoryEnabled) - .with_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1); - let mut storage = vm_builder.take_storage(); - let mut vm = vm_builder.build(&mut storage); - - let block_gas_per_pubdata = vm_test_env - .block_context - .context - .block_gas_price_per_pubdata(); - - let mut test_vm_with_custom_bytecode_hash = - |bytecode_hash: H256, expected_revert_reason: Option| { - let mut oracle_tools = - OracleTools::new(vm_test_env.storage_ptr.as_mut(), HistoryEnabled); - - let (encoded_tx, predefined_overhead) = get_l1_tx_with_custom_bytecode_hash( - h256_to_u256(bytecode_hash), - block_gas_per_pubdata as u32, - ); - - run_vm_with_custom_factory_deps( - &mut oracle_tools, - vm_test_env.block_context.context, - &vm_test_env.block_properties, - encoded_tx, - predefined_overhead, - expected_revert_reason, - ); - }; - - let failed_to_mark_factory_deps = |msg: &str, data: Vec| { - TxRevertReason::FailedToMarkFactoryDependencies(VmRevertReason::General { - msg: msg.to_string(), - data, - }) - }; - - // Here we provide the correctly-formatted bytecode hash of - // odd length, so it should work. - test_vm_with_custom_bytecode_hash( - H256([ - 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, - ]), - None, - ); - - // Here we provide correctly formatted bytecode of even length, so - // it should fail. - test_vm_with_custom_bytecode_hash( - H256([ - 1, 0, 2, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, - ]), - Some(failed_to_mark_factory_deps( - "Code length in words must be odd", - vec![ - 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 32, 67, 111, 100, 101, 32, 108, 101, 110, - 103, 116, 104, 32, 105, 110, 32, 119, 111, 114, 100, 115, 32, 109, 117, 115, 116, - 32, 98, 101, 32, 111, 100, 100, - ], - )), - ); - - // Here we provide incorrectly formatted bytecode of odd length, so - // it should fail. - test_vm_with_custom_bytecode_hash( - H256([ - 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, - ]), - Some(failed_to_mark_factory_deps( - "Incorrectly formatted bytecodeHash", - vec![ - 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 34, 73, 110, 99, 111, 114, 114, 101, 99, - 116, 108, 121, 32, 102, 111, 114, 109, 97, 116, 116, 101, 100, 32, 98, 121, 116, - 101, 99, 111, 100, 101, 72, 97, 115, 104, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - ], - )), - ); - - // Here we provide incorrectly formatted bytecode of odd length, so - // it should fail. - test_vm_with_custom_bytecode_hash( - H256([ - 2, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, - ]), - Some(failed_to_mark_factory_deps( - "Incorrectly formatted bytecodeHash", - vec![ - 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 34, 73, 110, 99, 111, 114, 114, 101, 99, - 116, 108, 121, 32, 102, 111, 114, 109, 97, 116, 116, 101, 100, 32, 98, 121, 116, - 101, 99, 111, 100, 101, 72, 97, 115, 104, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - ], - )), - ); -} diff --git a/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/is_write_initial.rs b/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/is_write_initial.rs deleted file mode 100644 index d7b961330002..000000000000 --- a/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/is_write_initial.rs +++ /dev/null @@ -1,42 +0,0 @@ -use crate::interface::storage::ReadStorage; -use zksync_types::get_nonce_key; - -use crate::interface::{TxExecutionMode, VmExecutionMode}; -use crate::vm_refunds_enhancement::tests::tester::{Account, TxType, VmTesterBuilder}; -use crate::vm_refunds_enhancement::tests::utils::read_test_contract; -use crate::vm_refunds_enhancement::HistoryDisabled; - -#[test] -fn test_is_write_initial_behaviour() { - // In this test, we check result of `is_write_initial` at different stages. - // The main idea is to check that `is_write_initial` storage uses the correct cache for initial_writes and doesn't - // messed up it with the repeated writes during the one batch execution. - - let mut account = Account::random(); - let mut vm = VmTesterBuilder::new(HistoryDisabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_rich_accounts(vec![account.clone()]) - .build(); - - let nonce_key = get_nonce_key(&account.address); - // Check that the next write to the nonce key will be initial. - assert!(vm - .storage - .as_ref() - .borrow_mut() - .is_write_initial(&nonce_key)); - - let contract_code = read_test_contract(); - let tx = account.get_deploy_tx(&contract_code, None, TxType::L2).tx; - - vm.vm.push_transaction(tx); - vm.vm.execute(VmExecutionMode::OneTx); - - // Check that `is_write_initial` still returns true for the nonce key. - assert!(vm - .storage - .as_ref() - .borrow_mut() - .is_write_initial(&nonce_key)); -} diff --git a/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/l1_tx_execution.rs b/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/l1_tx_execution.rs deleted file mode 100644 index 138879cd7ede..000000000000 --- a/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/l1_tx_execution.rs +++ /dev/null @@ -1,125 +0,0 @@ -use zksync_system_constants::BOOTLOADER_ADDRESS; -use zksync_types::l2_to_l1_log::L2ToL1Log; -use zksync_types::storage_writes_deduplicator::StorageWritesDeduplicator; -use zksync_types::{get_code_key, get_known_code_key, U256}; -use zksync_utils::u256_to_h256; - -use crate::interface::{TxExecutionMode, VmExecutionMode}; -use crate::vm_refunds_enhancement::tests::tester::{TxType, VmTesterBuilder}; -use crate::vm_refunds_enhancement::tests::utils::{ - read_test_contract, verify_required_storage, BASE_SYSTEM_CONTRACTS, -}; -use crate::vm_refunds_enhancement::types::internals::TransactionData; -use crate::vm_refunds_enhancement::HistoryEnabled; - -#[test] -fn test_l1_tx_execution() { - // In this test, we try to execute a contract deployment from L1 - // Here instead of marking code hash via the bootloader means, we will be - // using L1->L2 communication, the same it would likely be done during the priority mode. - - // There are always at least 3 initial writes here, because we pay fees from l1: - // - totalSupply of ETH token - // - balance of the refund recipient - // - balance of the bootloader - // - tx_rollout hash - - let basic_initial_writes = 1; - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_base_system_smart_contracts(BASE_SYSTEM_CONTRACTS.clone()) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let contract_code = read_test_contract(); - let account = &mut vm.rich_accounts[0]; - let deploy_tx = account.get_deploy_tx(&contract_code, None, TxType::L1 { serial_id: 1 }); - let tx_data: TransactionData = deploy_tx.tx.clone().into(); - - let required_l2_to_l1_logs = vec![L2ToL1Log { - shard_id: 0, - is_service: true, - tx_number_in_block: 0, - sender: BOOTLOADER_ADDRESS, - key: tx_data.tx_hash(0.into()), - value: u256_to_h256(U256::from(1u32)), - }]; - - vm.vm.push_transaction(deploy_tx.tx.clone()); - - let res = vm.vm.execute(VmExecutionMode::OneTx); - - // The code hash of the deployed contract should be marked as republished. - let known_codes_key = get_known_code_key(&deploy_tx.bytecode_hash); - - // The contract should be deployed successfully. - let account_code_key = get_code_key(&deploy_tx.address); - - let expected_slots = vec![ - (u256_to_h256(U256::from(1u32)), known_codes_key), - (deploy_tx.bytecode_hash, account_code_key), - ]; - assert!(!res.result.is_failed()); - - verify_required_storage(&vm.vm.state, expected_slots); - - assert_eq!(res.logs.user_l2_to_l1_logs, required_l2_to_l1_logs); - - let tx = account.get_test_contract_transaction( - deploy_tx.address, - true, - None, - false, - TxType::L1 { serial_id: 0 }, - ); - vm.vm.push_transaction(tx); - let res = vm.vm.execute(VmExecutionMode::OneTx); - let storage_logs = res.logs.storage_logs; - let res = StorageWritesDeduplicator::apply_on_empty_state(&storage_logs); - - // Tx panicked - assert_eq!(res.initial_storage_writes - basic_initial_writes, 0); - - let tx = account.get_test_contract_transaction( - deploy_tx.address, - false, - None, - false, - TxType::L1 { serial_id: 0 }, - ); - vm.vm.push_transaction(tx.clone()); - let res = vm.vm.execute(VmExecutionMode::OneTx); - let storage_logs = res.logs.storage_logs; - let res = StorageWritesDeduplicator::apply_on_empty_state(&storage_logs); - // We changed one slot inside contract - assert_eq!(res.initial_storage_writes - basic_initial_writes, 1); - - // No repeated writes - let repeated_writes = res.repeated_storage_writes; - assert_eq!(res.repeated_storage_writes, 0); - - vm.vm.push_transaction(tx); - let storage_logs = vm.vm.execute(VmExecutionMode::OneTx).logs.storage_logs; - let res = StorageWritesDeduplicator::apply_on_empty_state(&storage_logs); - // We do the same storage write, it will be deduplicated, so still 4 initial write and 0 repeated - assert_eq!(res.initial_storage_writes - basic_initial_writes, 1); - assert_eq!(res.repeated_storage_writes, repeated_writes); - - let tx = account.get_test_contract_transaction( - deploy_tx.address, - false, - Some(10.into()), - false, - TxType::L1 { serial_id: 1 }, - ); - vm.vm.push_transaction(tx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - // Method is not payable tx should fail - assert!(result.result.is_failed(), "The transaction should fail"); - - let res = StorageWritesDeduplicator::apply_on_empty_state(&result.logs.storage_logs); - // There are only basic initial writes - assert_eq!(res.initial_storage_writes - basic_initial_writes, 2); -} diff --git a/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/l2_blocks.rs b/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/l2_blocks.rs deleted file mode 100644 index 269b6cf396c6..000000000000 --- a/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/l2_blocks.rs +++ /dev/null @@ -1,498 +0,0 @@ -//! -//! Tests for the bootloader -//! The description for each of the tests can be found in the corresponding `.yul` file. -//! - -use crate::interface::{ExecutionResult, Halt, L2BlockEnv, TxExecutionMode, VmExecutionMode}; -use crate::vm_refunds_enhancement::constants::{ - BOOTLOADER_HEAP_PAGE, TX_OPERATOR_L2_BLOCK_INFO_OFFSET, TX_OPERATOR_SLOTS_PER_L2_BLOCK_INFO, -}; -use crate::vm_refunds_enhancement::tests::tester::default_l1_batch; -use crate::vm_refunds_enhancement::tests::tester::VmTesterBuilder; -use crate::vm_refunds_enhancement::utils::l2_blocks::get_l2_block_hash_key; -use crate::vm_refunds_enhancement::{HistoryEnabled, HistoryMode, Vm}; -use zk_evm_1_3_3::aux_structures::Timestamp; -use crate::interface::storage::{ReadStorage, WriteStorage}; -use zksync_system_constants::{ - CURRENT_VIRTUAL_BLOCK_INFO_POSITION, REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_BYTE, -}; -use zksync_types::block::{pack_block_info, unpack_block_info}; -use zksync_types::{ - block::{legacy_miniblock_hash, miniblock_hash}, - get_code_key, AccountTreeId, Execute, ExecuteTransactionCommon, L1BatchNumber, L1TxCommonData, - MiniblockNumber, StorageKey, Transaction, H160, H256, SYSTEM_CONTEXT_ADDRESS, - SYSTEM_CONTEXT_BLOCK_INFO_POSITION, SYSTEM_CONTEXT_CURRENT_L2_BLOCK_INFO_POSITION, - SYSTEM_CONTEXT_CURRENT_TX_ROLLING_HASH_POSITION, U256, -}; -use zksync_utils::{h256_to_u256, u256_to_h256}; - -fn get_l1_noop() -> Transaction { - Transaction { - common_data: ExecuteTransactionCommon::L1(L1TxCommonData { - sender: H160::random(), - gas_limit: U256::from(2000000u32), - gas_per_pubdata_limit: REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_BYTE.into(), - ..Default::default() - }), - execute: Execute { - contract_address: H160::zero(), - calldata: vec![], - value: U256::zero(), - factory_deps: None, - }, - received_timestamp_ms: 0, - raw_bytes: None, - } -} - -#[test] -fn test_l2_block_initialization_timestamp() { - // This test checks that the L2 block initialization works correctly. - // Here we check that that the first block must have timestamp that is greater or equal to the timestamp - // of the current batch. - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - // Override the timestamp of the current miniblock to be 0. - vm.vm.bootloader_state.push_l2_block(L2BlockEnv { - number: 1, - timestamp: 0, - prev_block_hash: legacy_miniblock_hash(MiniblockNumber(0)), - max_virtual_blocks_to_create: 1, - }); - let l1_tx = get_l1_noop(); - - vm.vm.push_transaction(l1_tx); - let res = vm.vm.execute(VmExecutionMode::OneTx); - - assert_eq!( - res.result, - ExecutionResult::Halt {reason: Halt::FailedToSetL2Block("The timestamp of the L2 block must be greater than or equal to the timestamp of the current batch".to_string())} - ); -} - -#[test] -fn test_l2_block_initialization_number_non_zero() { - // This test checks that the L2 block initialization works correctly. - // Here we check that the first miniblock number can not be zero. - - let l1_batch = default_l1_batch(L1BatchNumber(1)); - let first_l2_block = L2BlockEnv { - number: 0, - timestamp: l1_batch.timestamp, - prev_block_hash: legacy_miniblock_hash(MiniblockNumber(0)), - max_virtual_blocks_to_create: 1, - }; - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_l1_batch_env(l1_batch) - .with_random_rich_accounts(1) - .build(); - - let l1_tx = get_l1_noop(); - - vm.vm.push_transaction(l1_tx); - - let timestamp = Timestamp(vm.vm.state.local_state.timestamp); - set_manual_l2_block_info(&mut vm.vm, 0, first_l2_block, timestamp); - - let res = vm.vm.execute(VmExecutionMode::OneTx); - - assert_eq!( - res.result, - ExecutionResult::Halt { - reason: Halt::FailedToSetL2Block( - "L2 block number is never expected to be zero".to_string() - ) - } - ); -} - -fn test_same_l2_block( - expected_error: Option, - override_timestamp: Option, - override_prev_block_hash: Option, -) { - let mut l1_batch = default_l1_batch(L1BatchNumber(1)); - l1_batch.timestamp = 1; - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_l1_batch_env(l1_batch) - .with_random_rich_accounts(1) - .build(); - - let l1_tx = get_l1_noop(); - vm.vm.push_transaction(l1_tx.clone()); - let res = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!res.result.is_failed()); - - let mut current_l2_block = vm.vm.batch_env.first_l2_block; - - if let Some(timestamp) = override_timestamp { - current_l2_block.timestamp = timestamp; - } - if let Some(prev_block_hash) = override_prev_block_hash { - current_l2_block.prev_block_hash = prev_block_hash; - } - - if (None, None) == (override_timestamp, override_prev_block_hash) { - current_l2_block.max_virtual_blocks_to_create = 0; - } - - vm.vm.push_transaction(l1_tx); - let timestamp = Timestamp(vm.vm.state.local_state.timestamp); - set_manual_l2_block_info(&mut vm.vm, 1, current_l2_block, timestamp); - - let result = vm.vm.execute(VmExecutionMode::OneTx); - - if let Some(err) = expected_error { - assert_eq!(result.result, ExecutionResult::Halt { reason: err }); - } else { - assert_eq!(result.result, ExecutionResult::Success { output: vec![] }); - } -} - -#[test] -fn test_l2_block_same_l2_block() { - // This test aims to test the case when there are multiple transactions inside the same L2 block. - - // Case 1: Incorrect timestamp - test_same_l2_block( - Some(Halt::FailedToSetL2Block( - "The timestamp of the same L2 block must be same".to_string(), - )), - Some(0), - None, - ); - - // Case 2: Incorrect previous block hash - test_same_l2_block( - Some(Halt::FailedToSetL2Block( - "The previous hash of the same L2 block must be same".to_string(), - )), - None, - Some(H256::zero()), - ); - - // Case 3: Correct continuation of the same L2 block - test_same_l2_block(None, None, None); -} - -fn test_new_l2_block( - first_l2_block: L2BlockEnv, - overriden_second_block_number: Option, - overriden_second_block_timestamp: Option, - overriden_second_block_prev_block_hash: Option, - expected_error: Option, -) { - let mut l1_batch = default_l1_batch(L1BatchNumber(1)); - l1_batch.timestamp = 1; - l1_batch.first_l2_block = first_l2_block; - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_l1_batch_env(l1_batch) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let l1_tx = get_l1_noop(); - - // Firstly we execute the first transaction - vm.vm.push_transaction(l1_tx.clone()); - vm.vm.execute(VmExecutionMode::OneTx); - - let mut second_l2_block = vm.vm.batch_env.first_l2_block; - second_l2_block.number += 1; - second_l2_block.timestamp += 1; - second_l2_block.prev_block_hash = vm.vm.bootloader_state.last_l2_block().get_hash(); - - if let Some(block_number) = overriden_second_block_number { - second_l2_block.number = block_number; - } - if let Some(timestamp) = overriden_second_block_timestamp { - second_l2_block.timestamp = timestamp; - } - if let Some(prev_block_hash) = overriden_second_block_prev_block_hash { - second_l2_block.prev_block_hash = prev_block_hash; - } - - vm.vm.bootloader_state.push_l2_block(second_l2_block); - - vm.vm.push_transaction(l1_tx); - - let result = vm.vm.execute(VmExecutionMode::OneTx); - if let Some(err) = expected_error { - assert_eq!(result.result, ExecutionResult::Halt { reason: err }); - } else { - assert_eq!(result.result, ExecutionResult::Success { output: vec![] }); - } -} - -#[test] -fn test_l2_block_new_l2_block() { - // This test is aimed to cover potential issue - - let correct_first_block = L2BlockEnv { - number: 1, - timestamp: 1, - prev_block_hash: legacy_miniblock_hash(MiniblockNumber(0)), - max_virtual_blocks_to_create: 1, - }; - - // Case 1: Block number increasing by more than 1 - test_new_l2_block( - correct_first_block, - Some(3), - None, - None, - Some(Halt::FailedToSetL2Block( - "Invalid new L2 block number".to_string(), - )), - ); - - // Case 2: Timestamp not increasing - test_new_l2_block( - correct_first_block, - None, - Some(1), - None, - Some(Halt::FailedToSetL2Block("The timestamp of the new L2 block must be greater than the timestamp of the previous L2 block".to_string())), - ); - - // Case 3: Incorrect previous block hash - test_new_l2_block( - correct_first_block, - None, - None, - Some(H256::zero()), - Some(Halt::FailedToSetL2Block( - "The current L2 block hash is incorrect".to_string(), - )), - ); - - // Case 4: Correct new block - test_new_l2_block(correct_first_block, None, None, None, None); -} - -#[allow(clippy::too_many_arguments)] -fn test_first_in_batch( - miniblock_timestamp: u64, - miniblock_number: u32, - pending_txs_hash: H256, - batch_timestamp: u64, - new_batch_timestamp: u64, - batch_number: u32, - proposed_block: L2BlockEnv, - expected_error: Option, -) { - let mut l1_batch = default_l1_batch(L1BatchNumber(1)); - l1_batch.number += 1; - l1_batch.timestamp = new_batch_timestamp; - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_l1_batch_env(l1_batch) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - let l1_tx = get_l1_noop(); - - // Setting the values provided. - let storage_ptr = vm.vm.state.storage.storage.get_ptr(); - let miniblock_info_slot = StorageKey::new( - AccountTreeId::new(SYSTEM_CONTEXT_ADDRESS), - SYSTEM_CONTEXT_CURRENT_L2_BLOCK_INFO_POSITION, - ); - let pending_txs_hash_slot = StorageKey::new( - AccountTreeId::new(SYSTEM_CONTEXT_ADDRESS), - SYSTEM_CONTEXT_CURRENT_TX_ROLLING_HASH_POSITION, - ); - let batch_info_slot = StorageKey::new( - AccountTreeId::new(SYSTEM_CONTEXT_ADDRESS), - SYSTEM_CONTEXT_BLOCK_INFO_POSITION, - ); - let prev_block_hash_position = get_l2_block_hash_key(miniblock_number - 1); - - storage_ptr.borrow_mut().set_value( - miniblock_info_slot, - u256_to_h256(pack_block_info( - miniblock_number as u64, - miniblock_timestamp, - )), - ); - storage_ptr - .borrow_mut() - .set_value(pending_txs_hash_slot, pending_txs_hash); - storage_ptr.borrow_mut().set_value( - batch_info_slot, - u256_to_h256(pack_block_info(batch_number as u64, batch_timestamp)), - ); - storage_ptr.borrow_mut().set_value( - prev_block_hash_position, - legacy_miniblock_hash(MiniblockNumber(miniblock_number - 1)), - ); - - // In order to skip checks from the Rust side of the VM, we firstly use some definitely correct L2 block info. - // And then override it with the user-provided value - - let last_l2_block = vm.vm.bootloader_state.last_l2_block(); - let new_l2_block = L2BlockEnv { - number: last_l2_block.number + 1, - timestamp: last_l2_block.timestamp + 1, - prev_block_hash: last_l2_block.get_hash(), - max_virtual_blocks_to_create: last_l2_block.max_virtual_blocks_to_create, - }; - - vm.vm.bootloader_state.push_l2_block(new_l2_block); - vm.vm.push_transaction(l1_tx); - let timestamp = Timestamp(vm.vm.state.local_state.timestamp); - set_manual_l2_block_info(&mut vm.vm, 0, proposed_block, timestamp); - - let result = vm.vm.execute(VmExecutionMode::OneTx); - if let Some(err) = expected_error { - assert_eq!(result.result, ExecutionResult::Halt { reason: err }); - } else { - assert_eq!(result.result, ExecutionResult::Success { output: vec![] }); - } -} - -#[test] -fn test_l2_block_first_in_batch() { - test_first_in_batch( - 1, - 1, - H256::zero(), - 1, - 2, - 1, - L2BlockEnv { - number: 2, - timestamp: 2, - prev_block_hash: miniblock_hash( - MiniblockNumber(1), - 1, - legacy_miniblock_hash(MiniblockNumber(0)), - H256::zero(), - ), - max_virtual_blocks_to_create: 1, - }, - None, - ); - - test_first_in_batch( - 8, - 1, - H256::zero(), - 5, - 12, - 1, - L2BlockEnv { - number: 2, - timestamp: 9, - prev_block_hash: miniblock_hash(MiniblockNumber(1), 8, legacy_miniblock_hash(MiniblockNumber(0)), H256::zero()), - max_virtual_blocks_to_create: 1 - }, - Some(Halt::FailedToSetL2Block("The timestamp of the L2 block must be greater than or equal to the timestamp of the current batch".to_string())), - ); -} - -#[test] -fn test_l2_block_upgrade() { - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - vm.vm - .state - .storage - .storage - .get_ptr() - .borrow_mut() - .set_value(get_code_key(&SYSTEM_CONTEXT_ADDRESS), H256::default()); - - let l1_tx = get_l1_noop(); - // Firstly we execute the first transaction - vm.vm.push_transaction(l1_tx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!result.result.is_failed(), "No revert reason expected"); - let result = vm.vm.execute(VmExecutionMode::Batch); - assert!(!result.result.is_failed(), "No revert reason expected"); -} - -#[test] -fn test_l2_block_upgrade_ending() { - let mut l1_batch = default_l1_batch(L1BatchNumber(1)); - l1_batch.timestamp = 1; - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_l1_batch_env(l1_batch.clone()) - .with_random_rich_accounts(1) - .build(); - - let l1_tx = get_l1_noop(); - - let storage = vm.storage.clone(); - - storage - .borrow_mut() - .set_value(get_code_key(&SYSTEM_CONTEXT_ADDRESS), H256::default()); - - vm.vm.push_transaction(l1_tx.clone()); - let result = vm.vm.execute(VmExecutionMode::OneTx); - - assert!(!result.result.is_failed(), "No revert reason expected"); - - let virtual_block_info = storage.borrow_mut().read_value(&StorageKey::new( - AccountTreeId::new(SYSTEM_CONTEXT_ADDRESS), - CURRENT_VIRTUAL_BLOCK_INFO_POSITION, - )); - - let (virtual_block_number, virtual_block_timestamp) = - unpack_block_info(h256_to_u256(virtual_block_info)); - - assert_eq!(virtual_block_number as u32, l1_batch.first_l2_block.number); - assert_eq!(virtual_block_timestamp, l1_batch.first_l2_block.timestamp); - vm.vm.push_transaction(l1_tx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!result.result.is_failed(), "No revert reason expected"); - let result = vm.vm.execute(VmExecutionMode::Batch); - assert!(!result.result.is_failed(), "No revert reason expected"); -} - -fn set_manual_l2_block_info( - vm: &mut Vm, - tx_number: usize, - block_info: L2BlockEnv, - timestamp: Timestamp, -) { - let fictive_miniblock_position = - TX_OPERATOR_L2_BLOCK_INFO_OFFSET + TX_OPERATOR_SLOTS_PER_L2_BLOCK_INFO * tx_number; - - vm.state.memory.populate_page( - BOOTLOADER_HEAP_PAGE as usize, - vec![ - (fictive_miniblock_position, block_info.number.into()), - (fictive_miniblock_position + 1, block_info.timestamp.into()), - ( - fictive_miniblock_position + 2, - h256_to_u256(block_info.prev_block_hash), - ), - ( - fictive_miniblock_position + 3, - block_info.max_virtual_blocks_to_create.into(), - ), - ], - timestamp, - ) -} diff --git a/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/mod.rs b/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/mod.rs deleted file mode 100644 index ffb38dd3725a..000000000000 --- a/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/mod.rs +++ /dev/null @@ -1,20 +0,0 @@ -mod bootloader; -mod default_aa; -// TODO - fix this test -// mod invalid_bytecode; -mod bytecode_publishing; -mod call_tracer; -mod gas_limit; -mod get_used_contracts; -mod is_write_initial; -mod l1_tx_execution; -mod l2_blocks; -mod nonce_holder; -mod refunds; -mod require_eip712; -mod rollbacks; -mod simple_execution; -mod tester; -mod tracing_execution_error; -mod upgrade; -mod utils; diff --git a/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/nonce_holder.rs b/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/nonce_holder.rs deleted file mode 100644 index 219594619065..000000000000 --- a/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/nonce_holder.rs +++ /dev/null @@ -1,181 +0,0 @@ -use zksync_types::{Execute, Nonce}; - -use crate::interface::TxExecutionMode; -use crate::interface::VmRevertReason; -use crate::interface::{ExecutionResult, Halt, TxRevertReason, VmExecutionMode}; -use crate::vm_refunds_enhancement::tests::tester::{Account, VmTesterBuilder}; -use crate::vm_refunds_enhancement::tests::utils::read_nonce_holder_tester; -use crate::vm_refunds_enhancement::types::internals::TransactionData; -use crate::vm_refunds_enhancement::HistoryEnabled; - -pub enum NonceHolderTestMode { - SetValueUnderNonce, - IncreaseMinNonceBy5, - IncreaseMinNonceTooMuch, - LeaveNonceUnused, - IncreaseMinNonceBy1, - SwitchToArbitraryOrdering, -} - -impl From for u8 { - fn from(mode: NonceHolderTestMode) -> u8 { - match mode { - NonceHolderTestMode::SetValueUnderNonce => 0, - NonceHolderTestMode::IncreaseMinNonceBy5 => 1, - NonceHolderTestMode::IncreaseMinNonceTooMuch => 2, - NonceHolderTestMode::LeaveNonceUnused => 3, - NonceHolderTestMode::IncreaseMinNonceBy1 => 4, - NonceHolderTestMode::SwitchToArbitraryOrdering => 5, - } - } -} - -#[test] -fn test_nonce_holder() { - let mut account = Account::random(); - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_deployer() - .with_custom_contracts(vec![( - read_nonce_holder_tester().to_vec(), - account.address, - true, - )]) - .with_rich_accounts(vec![account.clone()]) - .build(); - - let mut run_nonce_test = |nonce: u32, - test_mode: NonceHolderTestMode, - error_message: Option, - comment: &'static str| { - // In this test we have to reset VM state after each test case. Because once bootloader failed during the validation of the transaction, - // it will fail again and again. At the same time we have to keep the same storage, because we want to keep the nonce holder contract state. - // The easiest way in terms of lifetimes is to reuse vm_builder to achieve it. - vm.reset_state(true); - let mut transaction_data: TransactionData = account - .get_l2_tx_for_execute_with_nonce( - Execute { - contract_address: account.address, - calldata: vec![12], - value: Default::default(), - factory_deps: None, - }, - None, - Nonce(nonce), - ) - .into(); - - transaction_data.signature = vec![test_mode.into()]; - vm.vm.push_raw_transaction(transaction_data, 0, 0, true); - let result = vm.vm.execute(VmExecutionMode::OneTx); - - if let Some(msg) = error_message { - let expected_error = - TxRevertReason::Halt(Halt::ValidationFailed(VmRevertReason::General { - msg, - data: vec![], - })); - let ExecutionResult::Halt { reason } = result.result else { - panic!("Expected revert, got {:?}", result.result); - }; - assert_eq!( - reason.to_string(), - expected_error.to_string(), - "{}", - comment - ); - } else { - assert!(!result.result.is_failed(), "{}", comment); - } - }; - // Test 1: trying to set value under non sequential nonce value. - run_nonce_test( - 1u32, - NonceHolderTestMode::SetValueUnderNonce, - Some("Previous nonce has not been used".to_string()), - "Allowed to set value under non sequential value", - ); - - // Test 2: increase min nonce by 1 with sequential nonce ordering: - run_nonce_test( - 0u32, - NonceHolderTestMode::IncreaseMinNonceBy1, - None, - "Failed to increment nonce by 1 for sequential account", - ); - - // Test 3: correctly set value under nonce with sequential nonce ordering: - run_nonce_test( - 1u32, - NonceHolderTestMode::SetValueUnderNonce, - None, - "Failed to set value under nonce sequential value", - ); - - // Test 5: migrate to the arbitrary nonce ordering: - run_nonce_test( - 2u32, - NonceHolderTestMode::SwitchToArbitraryOrdering, - None, - "Failed to switch to arbitrary ordering", - ); - - // Test 6: increase min nonce by 5 - run_nonce_test( - 6u32, - NonceHolderTestMode::IncreaseMinNonceBy5, - None, - "Failed to increase min nonce by 5", - ); - - // Test 7: since the nonces in range [6,10] are no longer allowed, the - // tx with nonce 10 should not be allowed - run_nonce_test( - 10u32, - NonceHolderTestMode::IncreaseMinNonceBy5, - Some("Reusing the same nonce twice".to_string()), - "Allowed to reuse nonce below the minimal one", - ); - - // Test 8: we should be able to use nonce 13 - run_nonce_test( - 13u32, - NonceHolderTestMode::SetValueUnderNonce, - None, - "Did not allow to use unused nonce 10", - ); - - // Test 9: we should not be able to reuse nonce 13 - run_nonce_test( - 13u32, - NonceHolderTestMode::IncreaseMinNonceBy5, - Some("Reusing the same nonce twice".to_string()), - "Allowed to reuse the same nonce twice", - ); - - // Test 10: we should be able to simply use nonce 14, while bumping the minimal nonce by 5 - run_nonce_test( - 14u32, - NonceHolderTestMode::IncreaseMinNonceBy5, - None, - "Did not allow to use a bumped nonce", - ); - - // Test 11: Do not allow bumping nonce by too much - run_nonce_test( - 16u32, - NonceHolderTestMode::IncreaseMinNonceTooMuch, - Some("The value for incrementing the nonce is too high".to_string()), - "Allowed for incrementing min nonce too much", - ); - - // Test 12: Do not allow not setting a nonce as used - run_nonce_test( - 16u32, - NonceHolderTestMode::LeaveNonceUnused, - Some("The nonce was not set as used".to_string()), - "Allowed to leave nonce as unused", - ); -} diff --git a/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/refunds.rs b/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/refunds.rs deleted file mode 100644 index 54c281a99398..000000000000 --- a/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/refunds.rs +++ /dev/null @@ -1,172 +0,0 @@ -use crate::interface::{TxExecutionMode, VmExecutionMode}; -use crate::vm_refunds_enhancement::tests::tester::{DeployContractsTx, TxType, VmTesterBuilder}; -use crate::vm_refunds_enhancement::tests::utils::read_test_contract; - -use crate::vm_refunds_enhancement::types::internals::TransactionData; -use crate::vm_refunds_enhancement::HistoryEnabled; - -#[test] -fn test_predetermined_refunded_gas() { - // In this test, we compare the execution of the bootloader with the predefined - // refunded gas and without them - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - let l1_batch = vm.vm.batch_env.clone(); - - let counter = read_test_contract(); - let account = &mut vm.rich_accounts[0]; - - let DeployContractsTx { - tx, - bytecode_hash: _, - address: _, - } = account.get_deploy_tx(&counter, None, TxType::L2); - vm.vm.push_transaction(tx.clone()); - let result = vm.vm.execute(VmExecutionMode::OneTx); - - assert!(!result.result.is_failed()); - - // If the refund provided by the operator or the final refund are the 0 - // there is no impact of the operator's refund at all and so this test does not - // make much sense. - assert!( - result.refunds.operator_suggested_refund > 0, - "The operator's refund is 0" - ); - assert!(result.refunds.gas_refunded > 0, "The final refund is 0"); - - let result_without_predefined_refunds = vm.vm.execute(VmExecutionMode::Batch); - let mut current_state_without_predefined_refunds = vm.vm.get_current_execution_state(); - assert!(!result_without_predefined_refunds.result.is_failed(),); - - // Here we want to provide the same refund from the operator and check that it's the correct one. - // We execute the whole block without refund tracer, because refund tracer will eventually override the provided refund. - // But the overall result should be the same - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_l1_batch_env(l1_batch.clone()) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_rich_accounts(vec![account.clone()]) - .build(); - - let tx: TransactionData = tx.into(); - let block_gas_per_pubdata_byte = vm.vm.batch_env.block_gas_price_per_pubdata(); - // Overhead - let overhead = tx.overhead_gas(block_gas_per_pubdata_byte as u32); - vm.vm - .push_raw_transaction(tx.clone(), overhead, result.refunds.gas_refunded, true); - - let result_with_predefined_refunds = vm.vm.execute(VmExecutionMode::Batch); - let mut current_state_with_predefined_refunds = vm.vm.get_current_execution_state(); - - assert!(!result_with_predefined_refunds.result.is_failed()); - - // We need to sort these lists as those are flattened from HashMaps - current_state_with_predefined_refunds - .used_contract_hashes - .sort(); - current_state_without_predefined_refunds - .used_contract_hashes - .sort(); - - assert_eq!( - current_state_with_predefined_refunds.events, - current_state_without_predefined_refunds.events - ); - - assert_eq!( - current_state_with_predefined_refunds.user_l2_to_l1_logs, - current_state_without_predefined_refunds.user_l2_to_l1_logs - ); - - assert_eq!( - current_state_with_predefined_refunds.system_logs, - current_state_without_predefined_refunds.system_logs - ); - - assert_eq!( - current_state_with_predefined_refunds.deduplicated_events_logs, - current_state_without_predefined_refunds.deduplicated_events_logs - ); - - assert_eq!( - current_state_with_predefined_refunds.storage_log_queries, - current_state_without_predefined_refunds.storage_log_queries - ); - assert_eq!( - current_state_with_predefined_refunds.used_contract_hashes, - current_state_without_predefined_refunds.used_contract_hashes - ); - - // In this test we put the different refund from the operator. - // We still can't use the refund tracer, because it will override the refund. - // But we can check that the logs and events have changed. - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_l1_batch_env(l1_batch) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_rich_accounts(vec![account.clone()]) - .build(); - - let changed_operator_suggested_refund = result.refunds.gas_refunded + 1000; - vm.vm - .push_raw_transaction(tx, overhead, changed_operator_suggested_refund, true); - let result = vm.vm.execute(VmExecutionMode::Batch); - let mut current_state_with_changed_predefined_refunds = vm.vm.get_current_execution_state(); - - assert!(!result.result.is_failed()); - current_state_with_changed_predefined_refunds - .used_contract_hashes - .sort(); - current_state_without_predefined_refunds - .used_contract_hashes - .sort(); - - assert_eq!( - current_state_with_changed_predefined_refunds.events.len(), - current_state_without_predefined_refunds.events.len() - ); - - assert_ne!( - current_state_with_changed_predefined_refunds.events, - current_state_without_predefined_refunds.events - ); - - assert_eq!( - current_state_with_changed_predefined_refunds.user_l2_to_l1_logs, - current_state_without_predefined_refunds.user_l2_to_l1_logs - ); - - assert_eq!( - current_state_with_changed_predefined_refunds.system_logs, - current_state_without_predefined_refunds.system_logs - ); - - assert_eq!( - current_state_with_changed_predefined_refunds.deduplicated_events_logs, - current_state_without_predefined_refunds.deduplicated_events_logs - ); - - assert_eq!( - current_state_with_changed_predefined_refunds - .storage_log_queries - .len(), - current_state_without_predefined_refunds - .storage_log_queries - .len() - ); - - assert_ne!( - current_state_with_changed_predefined_refunds.storage_log_queries, - current_state_without_predefined_refunds.storage_log_queries - ); - assert_eq!( - current_state_with_changed_predefined_refunds.used_contract_hashes, - current_state_without_predefined_refunds.used_contract_hashes - ); -} diff --git a/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/require_eip712.rs b/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/require_eip712.rs deleted file mode 100644 index 03a704841b04..000000000000 --- a/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/require_eip712.rs +++ /dev/null @@ -1,163 +0,0 @@ -use std::convert::TryInto; - -use ethabi::Token; - -use zksync_eth_signer::raw_ethereum_tx::TransactionParameters; -use zksync_eth_signer::EthereumSigner; -use zksync_system_constants::L2_ETH_TOKEN_ADDRESS; -use zksync_types::fee::Fee; -use zksync_types::l2::L2Tx; -use zksync_types::transaction_request::TransactionRequest; -use zksync_types::utils::storage_key_for_standard_token_balance; -use zksync_types::{ - AccountTreeId, Address, Eip712Domain, Execute, L2ChainId, Nonce, Transaction, U256, -}; - -use crate::interface::{TxExecutionMode, VmExecutionMode}; -use crate::vm_refunds_enhancement::tests::tester::{Account, VmTester, VmTesterBuilder}; -use crate::vm_refunds_enhancement::tests::utils::read_many_owners_custom_account_contract; -use crate::vm_refunds_enhancement::HistoryDisabled; - -impl VmTester { - pub(crate) fn get_eth_balance(&mut self, address: Address) -> U256 { - let key = storage_key_for_standard_token_balance( - AccountTreeId::new(L2_ETH_TOKEN_ADDRESS), - &address, - ); - self.vm.state.storage.storage.read_from_storage(&key) - } -} - -// TODO refactor this test it use too much internal details of the VM -#[tokio::test] -/// This test deploys 'buggy' account abstraction code, and then tries accessing it both with legacy -/// and EIP712 transactions. -/// Currently we support both, but in the future, we should allow only EIP712 transactions to access the AA accounts. -async fn test_require_eip712() { - // Use 3 accounts: - // - private_address - EOA account, where we have the key - // - account_address - AA account, where the contract is deployed - // - beneficiary - an EOA account, where we'll try to transfer the tokens. - let account_abstraction = Account::random(); - let mut private_account = Account::random(); - let beneficiary = Account::random(); - - let (bytecode, contract) = read_many_owners_custom_account_contract(); - let mut vm = VmTesterBuilder::new(HistoryDisabled) - .with_empty_in_memory_storage() - .with_custom_contracts(vec![(bytecode, account_abstraction.address, true)]) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_rich_accounts(vec![account_abstraction.clone(), private_account.clone()]) - .build(); - - assert_eq!(vm.get_eth_balance(beneficiary.address), U256::from(0)); - - let chain_id: u32 = 270; - - // First, let's set the owners of the AA account to the private_address. - // (so that messages signed by private_address, are authorized to act on behalf of the AA account). - let set_owners_function = contract.function("setOwners").unwrap(); - let encoded_input = set_owners_function - .encode_input(&[Token::Array(vec![Token::Address(private_account.address)])]) - .unwrap(); - - let tx = private_account.get_l2_tx_for_execute( - Execute { - contract_address: account_abstraction.address, - calldata: encoded_input, - value: Default::default(), - factory_deps: None, - }, - None, - ); - - vm.vm.push_transaction(tx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!result.result.is_failed()); - - let private_account_balance = vm.get_eth_balance(private_account.address); - - // And now let's do the transfer from the 'account abstraction' to 'beneficiary' (using 'legacy' transaction). - // Normally this would not work - unless the operator is malicious. - let aa_raw_tx = TransactionParameters { - nonce: U256::from(0), - to: Some(beneficiary.address), - gas: U256::from(100000000), - gas_price: Some(U256::from(10000000)), - value: U256::from(888000088), - data: vec![], - chain_id: 270, - transaction_type: None, - access_list: None, - max_fee_per_gas: U256::from(1000000000), - max_priority_fee_per_gas: U256::from(1000000000), - }; - - let aa_tx = private_account.sign_legacy_tx(aa_raw_tx).await; - let (tx_request, hash) = TransactionRequest::from_bytes(&aa_tx, L2ChainId::from(270)).unwrap(); - - let mut l2_tx: L2Tx = L2Tx::from_request(tx_request, 10000).unwrap(); - l2_tx.set_input(aa_tx, hash); - // Pretend that operator is malicious and sets the initiator to the AA account. - l2_tx.common_data.initiator_address = account_abstraction.address; - let transaction: Transaction = l2_tx.try_into().unwrap(); - - vm.vm.push_transaction(transaction); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!result.result.is_failed()); - assert_eq!( - vm.get_eth_balance(beneficiary.address), - U256::from(888000088) - ); - // Make sure that the tokens were transferred from the AA account. - assert_eq!( - private_account_balance, - vm.get_eth_balance(private_account.address) - ); - - // // Now send the 'classic' EIP712 transaction - let tx_712 = L2Tx::new( - beneficiary.address, - vec![], - Nonce(1), - Fee { - gas_limit: U256::from(1000000000), - max_fee_per_gas: U256::from(1000000000), - max_priority_fee_per_gas: U256::from(1000000000), - gas_per_pubdata_limit: U256::from(1000000000), - }, - account_abstraction.address, - U256::from(28374938), - None, - Default::default(), - ); - - let transaction_request: TransactionRequest = tx_712.into(); - - let domain = Eip712Domain::new(L2ChainId::from(chain_id)); - let signature = private_account - .get_pk_signer() - .sign_typed_data(&domain, &transaction_request) - .await - .unwrap(); - let encoded_tx = transaction_request.get_signed_bytes(&signature, L2ChainId::from(chain_id)); - - let (aa_txn_request, aa_hash) = - TransactionRequest::from_bytes(&encoded_tx, L2ChainId::from(chain_id)).unwrap(); - - let mut l2_tx = L2Tx::from_request(aa_txn_request, 100000).unwrap(); - l2_tx.set_input(encoded_tx, aa_hash); - - let transaction: Transaction = l2_tx.try_into().unwrap(); - vm.vm.push_transaction(transaction); - vm.vm.execute(VmExecutionMode::OneTx); - - assert_eq!( - vm.get_eth_balance(beneficiary.address), - U256::from(916375026) - ); - assert_eq!( - private_account_balance, - vm.get_eth_balance(private_account.address) - ); -} diff --git a/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/rollbacks.rs b/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/rollbacks.rs deleted file mode 100644 index 8107ddcdabf6..000000000000 --- a/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/rollbacks.rs +++ /dev/null @@ -1,259 +0,0 @@ -use ethabi::Token; - -use zksync_contracts::get_loadnext_contract; -use zksync_contracts::test_contracts::LoadnextContractExecutionParams; - -use crate::interface::storage::WriteStorage; -use zksync_types::{get_nonce_key, Execute, U256}; - -use crate::interface::{TxExecutionMode, VmExecutionMode}; -use crate::vm_refunds_enhancement::tests::tester::{ - DeployContractsTx, TransactionTestInfo, TxModifier, TxType, VmTesterBuilder, -}; -use crate::vm_refunds_enhancement::tests::utils::read_test_contract; -use crate::vm_refunds_enhancement::types::internals::ZkSyncVmState; -use crate::vm_refunds_enhancement::{ - BootloaderState, DynTracer, HistoryEnabled, HistoryMode, TracerExecutionStatus, - TracerExecutionStopReason, VmTracer, -}; - -#[test] -fn test_vm_rollbacks() { - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let mut account = vm.rich_accounts[0].clone(); - let counter = read_test_contract(); - let tx_0 = account.get_deploy_tx(&counter, None, TxType::L2).tx; - let tx_1 = account.get_deploy_tx(&counter, None, TxType::L2).tx; - let tx_2 = account.get_deploy_tx(&counter, None, TxType::L2).tx; - - let result_without_rollbacks = vm.execute_and_verify_txs(&vec![ - TransactionTestInfo::new_processed(tx_0.clone(), false), - TransactionTestInfo::new_processed(tx_1.clone(), false), - TransactionTestInfo::new_processed(tx_2.clone(), false), - ]); - - // reset vm - vm.reset_with_empty_storage(); - - let result_with_rollbacks = vm.execute_and_verify_txs(&vec![ - TransactionTestInfo::new_rejected(tx_0.clone(), TxModifier::WrongSignatureLength.into()), - TransactionTestInfo::new_rejected(tx_0.clone(), TxModifier::WrongMagicValue.into()), - TransactionTestInfo::new_rejected(tx_0.clone(), TxModifier::WrongSignature.into()), - // The correct nonce is 0, this tx will fail - TransactionTestInfo::new_rejected(tx_2.clone(), TxModifier::WrongNonce.into()), - // This tx will succeed - TransactionTestInfo::new_processed(tx_0.clone(), false), - // The correct nonce is 1, this tx will fail - TransactionTestInfo::new_rejected(tx_0.clone(), TxModifier::NonceReused.into()), - // The correct nonce is 1, this tx will fail - TransactionTestInfo::new_rejected(tx_2.clone(), TxModifier::WrongNonce.into()), - // This tx will succeed - TransactionTestInfo::new_processed(tx_1, false), - // The correct nonce is 2, this tx will fail - TransactionTestInfo::new_rejected(tx_0.clone(), TxModifier::NonceReused.into()), - // This tx will succeed - TransactionTestInfo::new_processed(tx_2.clone(), false), - // This tx will fail - TransactionTestInfo::new_rejected(tx_2, TxModifier::NonceReused.into()), - TransactionTestInfo::new_rejected(tx_0, TxModifier::NonceReused.into()), - ]); - - assert_eq!(result_without_rollbacks, result_with_rollbacks); -} - -#[test] -fn test_vm_loadnext_rollbacks() { - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - let mut account = vm.rich_accounts[0].clone(); - - let loadnext_contract = get_loadnext_contract(); - let loadnext_constructor_data = &[Token::Uint(U256::from(100))]; - let DeployContractsTx { - tx: loadnext_deploy_tx, - address, - .. - } = account.get_deploy_tx_with_factory_deps( - &loadnext_contract.bytecode, - Some(loadnext_constructor_data), - loadnext_contract.factory_deps.clone(), - TxType::L2, - ); - - let loadnext_tx_1 = account.get_l2_tx_for_execute( - Execute { - contract_address: address, - calldata: LoadnextContractExecutionParams { - reads: 100, - writes: 100, - events: 100, - hashes: 500, - recursive_calls: 10, - deploys: 60, - } - .to_bytes(), - value: Default::default(), - factory_deps: None, - }, - None, - ); - - let loadnext_tx_2 = account.get_l2_tx_for_execute( - Execute { - contract_address: address, - calldata: LoadnextContractExecutionParams { - reads: 100, - writes: 100, - events: 100, - hashes: 500, - recursive_calls: 10, - deploys: 60, - } - .to_bytes(), - value: Default::default(), - factory_deps: None, - }, - None, - ); - - let result_without_rollbacks = vm.execute_and_verify_txs(&vec![ - TransactionTestInfo::new_processed(loadnext_deploy_tx.clone(), false), - TransactionTestInfo::new_processed(loadnext_tx_1.clone(), false), - TransactionTestInfo::new_processed(loadnext_tx_2.clone(), false), - ]); - - // reset vm - vm.reset_with_empty_storage(); - - let result_with_rollbacks = vm.execute_and_verify_txs(&vec![ - TransactionTestInfo::new_processed(loadnext_deploy_tx.clone(), false), - TransactionTestInfo::new_processed(loadnext_tx_1.clone(), true), - TransactionTestInfo::new_rejected( - loadnext_deploy_tx.clone(), - TxModifier::NonceReused.into(), - ), - TransactionTestInfo::new_processed(loadnext_tx_1, false), - TransactionTestInfo::new_processed(loadnext_tx_2.clone(), true), - TransactionTestInfo::new_processed(loadnext_tx_2.clone(), true), - TransactionTestInfo::new_rejected(loadnext_deploy_tx, TxModifier::NonceReused.into()), - TransactionTestInfo::new_processed(loadnext_tx_2, false), - ]); - - assert_eq!(result_without_rollbacks, result_with_rollbacks); -} - -// Testing tracer that does not allow the recursion to go deeper than a certain limit -struct MaxRecursionTracer { - max_recursion_depth: usize, -} - -/// Tracer responsible for calculating the number of storage invocations and -/// stopping the VM execution if the limit is reached. -impl DynTracer for MaxRecursionTracer {} - -impl VmTracer for MaxRecursionTracer { - fn finish_cycle( - &mut self, - state: &mut ZkSyncVmState, - _bootloader_state: &mut BootloaderState, - ) -> TracerExecutionStatus { - let current_depth = state.local_state.callstack.depth(); - - if current_depth > self.max_recursion_depth { - TracerExecutionStatus::Stop(TracerExecutionStopReason::Finish) - } else { - TracerExecutionStatus::Continue - } - } -} - -#[test] -fn test_layered_rollback() { - // This test checks that the layered rollbacks work correctly, i.e. - // the rollback by the operator will always revert all the changes - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let account = &mut vm.rich_accounts[0]; - let loadnext_contract = get_loadnext_contract().bytecode; - - let DeployContractsTx { - tx: deploy_tx, - address, - .. - } = account.get_deploy_tx( - &loadnext_contract, - Some(&[Token::Uint(0.into())]), - TxType::L2, - ); - vm.vm.push_transaction(deploy_tx); - let deployment_res = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!deployment_res.result.is_failed(), "transaction failed"); - - let loadnext_transaction = account.get_loadnext_transaction( - address, - LoadnextContractExecutionParams { - writes: 1, - recursive_calls: 20, - ..LoadnextContractExecutionParams::empty() - }, - TxType::L2, - ); - - let nonce_val = vm - .vm - .state - .storage - .storage - .read_from_storage(&get_nonce_key(&account.address)); - - vm.vm.make_snapshot(); - - vm.vm.push_transaction(loadnext_transaction.clone()); - vm.vm.inspect( - vec![Box::new(MaxRecursionTracer { - max_recursion_depth: 15, - })], - VmExecutionMode::OneTx, - ); - - let nonce_val2 = vm - .vm - .state - .storage - .storage - .read_from_storage(&get_nonce_key(&account.address)); - - // The tracer stopped after the validation has passed, so nonce has already been increased - assert_eq!(nonce_val + U256::one(), nonce_val2, "nonce did not change"); - - vm.vm.rollback_to_the_latest_snapshot(); - - let nonce_val_after_rollback = vm - .vm - .state - .storage - .storage - .read_from_storage(&get_nonce_key(&account.address)); - - assert_eq!( - nonce_val, nonce_val_after_rollback, - "nonce changed after rollback" - ); - - vm.vm.push_transaction(loadnext_transaction); - let result = vm.vm.inspect(vec![], VmExecutionMode::OneTx); - assert!(!result.result.is_failed(), "transaction must not fail"); -} diff --git a/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/simple_execution.rs b/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/simple_execution.rs deleted file mode 100644 index eb5e38798379..000000000000 --- a/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/simple_execution.rs +++ /dev/null @@ -1,77 +0,0 @@ -use crate::interface::{ExecutionResult, VmExecutionMode}; -use crate::vm_refunds_enhancement::tests::tester::{TxType, VmTesterBuilder}; -use crate::vm_refunds_enhancement::HistoryDisabled; - -#[test] -fn estimate_fee() { - let mut vm_tester = VmTesterBuilder::new(HistoryDisabled) - .with_empty_in_memory_storage() - .with_deployer() - .with_random_rich_accounts(1) - .build(); - - vm_tester.deploy_test_contract(); - let account = &mut vm_tester.rich_accounts[0]; - - let tx = account.get_test_contract_transaction( - vm_tester.test_contract.unwrap(), - false, - Default::default(), - false, - TxType::L2, - ); - - vm_tester.vm.push_transaction(tx); - - let result = vm_tester.vm.execute(VmExecutionMode::OneTx); - assert_matches!(result.result, ExecutionResult::Success { .. }); -} - -#[test] -fn simple_execute() { - let mut vm_tester = VmTesterBuilder::new(HistoryDisabled) - .with_empty_in_memory_storage() - .with_deployer() - .with_random_rich_accounts(1) - .build(); - - vm_tester.deploy_test_contract(); - - let account = &mut vm_tester.rich_accounts[0]; - - let tx1 = account.get_test_contract_transaction( - vm_tester.test_contract.unwrap(), - false, - Default::default(), - false, - TxType::L1 { serial_id: 1 }, - ); - - let tx2 = account.get_test_contract_transaction( - vm_tester.test_contract.unwrap(), - true, - Default::default(), - false, - TxType::L1 { serial_id: 1 }, - ); - - let tx3 = account.get_test_contract_transaction( - vm_tester.test_contract.unwrap(), - false, - Default::default(), - false, - TxType::L1 { serial_id: 1 }, - ); - let vm = &mut vm_tester.vm; - vm.push_transaction(tx1); - vm.push_transaction(tx2); - vm.push_transaction(tx3); - let tx = vm.execute(VmExecutionMode::OneTx); - assert_matches!(tx.result, ExecutionResult::Success { .. }); - let tx = vm.execute(VmExecutionMode::OneTx); - assert_matches!(tx.result, ExecutionResult::Revert { .. }); - let tx = vm.execute(VmExecutionMode::OneTx); - assert_matches!(tx.result, ExecutionResult::Success { .. }); - let block_tip = vm.execute(VmExecutionMode::Batch); - assert_matches!(block_tip.result, ExecutionResult::Success { .. }); -} diff --git a/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/tester/inner_state.rs b/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/tester/inner_state.rs deleted file mode 100644 index 3158fc494441..000000000000 --- a/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/tester/inner_state.rs +++ /dev/null @@ -1,127 +0,0 @@ -use std::collections::HashMap; - -use zk_evm_1_3_3::aux_structures::Timestamp; -use zk_evm_1_3_3::vm_state::VmLocalState; -use crate::interface::storage::WriteStorage; - -use zksync_types::{StorageKey, StorageLogQuery, StorageValue, U256}; - -use crate::vm_refunds_enhancement::old_vm::event_sink::InMemoryEventSink; -use crate::vm_refunds_enhancement::old_vm::history_recorder::{ - AppDataFrameManagerWithHistory, HistoryRecorder, -}; -use crate::vm_refunds_enhancement::{HistoryEnabled, HistoryMode, SimpleMemory, Vm}; - -#[derive(Clone, Debug)] -pub(crate) struct ModifiedKeysMap(HashMap); - -// We consider hashmaps to be equal even if there is a key -// that is not present in one but has zero value in another. -impl PartialEq for ModifiedKeysMap { - fn eq(&self, other: &Self) -> bool { - for (key, value) in self.0.iter() { - if *value != other.0.get(key).cloned().unwrap_or_default() { - return false; - } - } - for (key, value) in other.0.iter() { - if *value != self.0.get(key).cloned().unwrap_or_default() { - return false; - } - } - true - } -} - -#[derive(Clone, PartialEq, Debug)] -pub(crate) struct DecommitterTestInnerState { - /// There is no way to "truly" compare the storage pointer, - /// so we just compare the modified keys. This is reasonable enough. - pub(crate) modified_storage_keys: ModifiedKeysMap, - pub(crate) known_bytecodes: HistoryRecorder>, H>, - pub(crate) decommitted_code_hashes: HistoryRecorder, HistoryEnabled>, -} - -#[derive(Clone, PartialEq, Debug)] -pub(crate) struct StorageOracleInnerState { - /// There is no way to "truly" compare the storage pointer, - /// so we just compare the modified keys. This is reasonable enough. - pub(crate) modified_storage_keys: ModifiedKeysMap, - - pub(crate) frames_stack: AppDataFrameManagerWithHistory, H>, - - pub(crate) pre_paid_changes: HistoryRecorder, H>, - pub(crate) paid_changes: HistoryRecorder, H>, - pub(crate) initial_values: HistoryRecorder, H>, - pub(crate) returned_refunds: HistoryRecorder, H>, -} - -#[derive(Clone, PartialEq, Debug)] -pub(crate) struct PrecompileProcessorTestInnerState { - pub(crate) timestamp_history: HistoryRecorder, H>, -} - -/// A struct that encapsulates the state of the VM's oracles -/// The state is to be used in tests. -#[derive(Clone, PartialEq, Debug)] -pub(crate) struct VmInstanceInnerState { - event_sink: InMemoryEventSink, - precompile_processor_state: PrecompileProcessorTestInnerState, - memory: SimpleMemory, - decommitter_state: DecommitterTestInnerState, - storage_oracle_state: StorageOracleInnerState, - local_state: VmLocalState, -} - -impl Vm { - // Dump inner state of the VM. - pub(crate) fn dump_inner_state(&self) -> VmInstanceInnerState { - let event_sink = self.state.event_sink.clone(); - let precompile_processor_state = PrecompileProcessorTestInnerState { - timestamp_history: self.state.precompiles_processor.timestamp_history.clone(), - }; - let memory = self.state.memory.clone(); - let decommitter_state = DecommitterTestInnerState { - modified_storage_keys: ModifiedKeysMap( - self.state - .decommittment_processor - .get_storage() - .borrow() - .modified_storage_keys() - .clone(), - ), - known_bytecodes: self.state.decommittment_processor.known_bytecodes.clone(), - decommitted_code_hashes: self - .state - .decommittment_processor - .get_decommitted_code_hashes_with_history() - .clone(), - }; - let storage_oracle_state = StorageOracleInnerState { - modified_storage_keys: ModifiedKeysMap( - self.state - .storage - .storage - .get_ptr() - .borrow() - .modified_storage_keys() - .clone(), - ), - frames_stack: self.state.storage.frames_stack.clone(), - pre_paid_changes: self.state.storage.pre_paid_changes.clone(), - paid_changes: self.state.storage.paid_changes.clone(), - initial_values: self.state.storage.initial_values.clone(), - returned_refunds: self.state.storage.returned_refunds.clone(), - }; - let local_state = self.state.local_state.clone(); - - VmInstanceInnerState { - event_sink, - precompile_processor_state, - memory, - decommitter_state, - storage_oracle_state, - local_state, - } - } -} diff --git a/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/tester/mod.rs b/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/tester/mod.rs deleted file mode 100644 index dfe8905a7e08..000000000000 --- a/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/tester/mod.rs +++ /dev/null @@ -1,7 +0,0 @@ -pub(crate) use transaction_test_info::{ExpectedError, TransactionTestInfo, TxModifier}; -pub(crate) use vm_tester::{default_l1_batch, InMemoryStorageView, VmTester, VmTesterBuilder}; -pub(crate) use zksync_test_account::{Account, DeployContractsTx, TxType}; - -mod inner_state; -mod transaction_test_info; -mod vm_tester; diff --git a/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/tester/transaction_test_info.rs b/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/tester/transaction_test_info.rs deleted file mode 100644 index 8f7ecc0a7339..000000000000 --- a/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/tester/transaction_test_info.rs +++ /dev/null @@ -1,217 +0,0 @@ -use zksync_types::{ExecuteTransactionCommon, Transaction}; - -use crate::interface::VmRevertReason; -use crate::interface::{ - CurrentExecutionState, ExecutionResult, Halt, TxRevertReason, VmExecutionMode, - VmExecutionResultAndLogs, -}; -use crate::vm_refunds_enhancement::tests::tester::vm_tester::VmTester; -use crate::vm_refunds_enhancement::HistoryEnabled; - -#[derive(Debug, Clone)] -pub(crate) enum TxModifier { - WrongSignatureLength, - WrongSignature, - WrongMagicValue, - WrongNonce, - NonceReused, -} - -#[derive(Debug, Clone)] -pub(crate) enum TxExpectedResult { - Rejected { error: ExpectedError }, - Processed { rollback: bool }, -} - -#[derive(Debug, Clone)] -pub(crate) struct TransactionTestInfo { - tx: Transaction, - result: TxExpectedResult, -} - -#[derive(Debug, Clone)] -pub(crate) struct ExpectedError { - pub(crate) revert_reason: TxRevertReason, - pub(crate) modifier: Option, -} - -impl From for ExpectedError { - fn from(value: TxModifier) -> Self { - let revert_reason = match value { - TxModifier::WrongSignatureLength => { - Halt::ValidationFailed(VmRevertReason::General { - msg: "Signature length is incorrect".to_string(), - data: vec![ - 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 29, 83, 105, 103, 110, 97, 116, 117, 114, 101, 32, - 108, 101, 110, 103, 116, 104, 32, 105, 115, 32, 105, 110, 99, 111, 114, 114, 101, 99, - 116, 0, 0, 0, - ], - }) - } - TxModifier::WrongSignature => { - Halt::ValidationFailed(VmRevertReason::General { - msg: "Account validation returned invalid magic value. Most often this means that the signature is incorrect".to_string(), - data: vec![], - }) - } - TxModifier::WrongMagicValue => { - Halt::ValidationFailed(VmRevertReason::General { - msg: "v is neither 27 nor 28".to_string(), - data: vec![ - 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 22, 118, 32, 105, 115, 32, 110, 101, 105, 116, 104, - 101, 114, 32, 50, 55, 32, 110, 111, 114, 32, 50, 56, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - ], - }) - - } - TxModifier::WrongNonce => { - Halt::ValidationFailed(VmRevertReason::General { - msg: "Incorrect nonce".to_string(), - data: vec![ - 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 15, 73, 110, 99, 111, 114, 114, 101, 99, 116, 32, 110, - 111, 110, 99, 101, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - ], - }) - } - TxModifier::NonceReused => { - Halt::ValidationFailed(VmRevertReason::General { - msg: "Reusing the same nonce twice".to_string(), - data: vec![ - 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 28, 82, 101, 117, 115, 105, 110, 103, 32, 116, 104, - 101, 32, 115, 97, 109, 101, 32, 110, 111, 110, 99, 101, 32, 116, 119, 105, 99, 101, 0, - 0, 0, 0, - ], - }) - } - }; - - ExpectedError { - revert_reason: TxRevertReason::Halt(revert_reason), - modifier: Some(value), - } - } -} - -impl TransactionTestInfo { - pub(crate) fn new_rejected( - mut transaction: Transaction, - expected_error: ExpectedError, - ) -> Self { - transaction.common_data = match transaction.common_data { - ExecuteTransactionCommon::L2(mut data) => { - if let Some(modifier) = &expected_error.modifier { - match modifier { - TxModifier::WrongSignatureLength => { - data.signature = data.signature[..data.signature.len() - 20].to_vec() - } - TxModifier::WrongSignature => data.signature = vec![27u8; 65], - TxModifier::WrongMagicValue => data.signature = vec![1u8; 65], - TxModifier::WrongNonce => { - // Do not need to modify signature for nonce error - } - TxModifier::NonceReused => { - // Do not need to modify signature for nonce error - } - } - } - ExecuteTransactionCommon::L2(data) - } - _ => panic!("L1 transactions are not supported"), - }; - - Self { - tx: transaction, - result: TxExpectedResult::Rejected { - error: expected_error, - }, - } - } - - pub(crate) fn new_processed(transaction: Transaction, should_be_rollbacked: bool) -> Self { - Self { - tx: transaction, - result: TxExpectedResult::Processed { - rollback: should_be_rollbacked, - }, - } - } - - fn verify_result(&self, result: &VmExecutionResultAndLogs) { - match &self.result { - TxExpectedResult::Rejected { error } => match &result.result { - ExecutionResult::Success { .. } => { - panic!("Transaction should be reverted {:?}", self.tx.nonce()) - } - ExecutionResult::Revert { output } => match &error.revert_reason { - TxRevertReason::TxReverted(expected) => { - assert_eq!(output, expected) - } - _ => { - panic!("Error types mismatch"); - } - }, - ExecutionResult::Halt { reason } => match &error.revert_reason { - TxRevertReason::Halt(expected) => { - assert_eq!(reason, expected) - } - _ => { - panic!("Error types mismatch"); - } - }, - }, - TxExpectedResult::Processed { .. } => { - assert!(!result.result.is_failed()); - } - } - } - - fn should_rollback(&self) -> bool { - match &self.result { - TxExpectedResult::Rejected { .. } => true, - TxExpectedResult::Processed { rollback } => *rollback, - } - } -} - -impl VmTester { - pub(crate) fn execute_and_verify_txs( - &mut self, - txs: &[TransactionTestInfo], - ) -> CurrentExecutionState { - for tx_test_info in txs { - self.execute_tx_and_verify(tx_test_info.clone()); - } - self.vm.execute(VmExecutionMode::Batch); - let mut state = self.vm.get_current_execution_state(); - state.used_contract_hashes.sort(); - state - } - - pub(crate) fn execute_tx_and_verify( - &mut self, - tx_test_info: TransactionTestInfo, - ) -> VmExecutionResultAndLogs { - let inner_state_before = self.vm.dump_inner_state(); - self.vm.make_snapshot(); - self.vm.push_transaction(tx_test_info.tx.clone()); - let result = self.vm.execute(VmExecutionMode::OneTx); - tx_test_info.verify_result(&result); - if tx_test_info.should_rollback() { - self.vm.rollback_to_the_latest_snapshot(); - let inner_state_after = self.vm.dump_inner_state(); - assert_eq!( - inner_state_before, inner_state_after, - "Inner state before and after rollback should be equal" - ); - } - result - } -} diff --git a/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/tester/vm_tester.rs b/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/tester/vm_tester.rs deleted file mode 100644 index 800af517ed3c..000000000000 --- a/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/tester/vm_tester.rs +++ /dev/null @@ -1,300 +0,0 @@ -use zksync_contracts::BaseSystemContracts; -use crate::interface::storage::{InMemoryStorage, StoragePtr, StorageView, WriteStorage}; - -use zksync_types::block::legacy_miniblock_hash; -use zksync_types::helpers::unix_timestamp_ms; -use zksync_types::utils::{deployed_address_create, storage_key_for_eth_balance}; -use zksync_types::{ - get_code_key, get_is_account_key, Address, L1BatchNumber, L2ChainId, MiniblockNumber, Nonce, - ProtocolVersionId, U256, -}; -use zksync_utils::bytecode::hash_bytecode; -use zksync_utils::u256_to_h256; - -use crate::vm_refunds_enhancement::constants::BLOCK_GAS_LIMIT; - -use crate::interface::{ - L1BatchEnv, L2Block, L2BlockEnv, SystemEnv, TxExecutionMode, VmExecutionMode, -}; -use crate::vm_refunds_enhancement::tests::tester::Account; -use crate::vm_refunds_enhancement::tests::tester::TxType; -use crate::vm_refunds_enhancement::tests::utils::read_test_contract; -use crate::vm_refunds_enhancement::utils::l2_blocks::load_last_l2_block; -use crate::vm_refunds_enhancement::{HistoryMode, Vm}; - -pub(crate) type InMemoryStorageView = StorageView; - -pub(crate) struct VmTester { - pub(crate) vm: Vm, - pub(crate) storage: StoragePtr, - pub(crate) fee_account: Address, - pub(crate) deployer: Option, - pub(crate) test_contract: Option
, - pub(crate) rich_accounts: Vec, - pub(crate) custom_contracts: Vec, - history_mode: H, -} - -impl VmTester { - pub(crate) fn deploy_test_contract(&mut self) { - let contract = read_test_contract(); - let tx = self - .deployer - .as_mut() - .expect("You have to initialize builder with deployer") - .get_deploy_tx(&contract, None, TxType::L2) - .tx; - let nonce = tx.nonce().unwrap().0.into(); - self.vm.push_transaction(tx); - self.vm.execute(VmExecutionMode::OneTx); - let deployed_address = - deployed_address_create(self.deployer.as_ref().unwrap().address, nonce); - self.test_contract = Some(deployed_address); - } - - pub(crate) fn reset_with_empty_storage(&mut self) { - self.storage = StorageView::new(get_empty_storage()).to_rc_ptr(); - self.reset_state(false); - } - - /// Reset the state of the VM to the initial state. - /// If `use_latest_l2_block` is true, then the VM will use the latest L2 block from storage, - /// otherwise it will use the first L2 block of l1 batch env - pub(crate) fn reset_state(&mut self, use_latest_l2_block: bool) { - for account in self.rich_accounts.iter_mut() { - account.nonce = Nonce(0); - make_account_rich(self.storage.clone(), account); - } - if let Some(deployer) = &self.deployer { - make_account_rich(self.storage.clone(), deployer); - } - - if !self.custom_contracts.is_empty() { - println!("Inserting custom contracts is not yet supported") - // insert_contracts(&mut self.storage, &self.custom_contracts); - } - - let mut l1_batch = self.vm.batch_env.clone(); - if use_latest_l2_block { - let last_l2_block = load_last_l2_block(self.storage.clone()).unwrap_or(L2Block { - number: 0, - timestamp: 0, - hash: legacy_miniblock_hash(MiniblockNumber(0)), - }); - l1_batch.first_l2_block = L2BlockEnv { - number: last_l2_block.number + 1, - timestamp: std::cmp::max(last_l2_block.timestamp + 1, l1_batch.timestamp), - prev_block_hash: last_l2_block.hash, - max_virtual_blocks_to_create: 1, - }; - } - - let vm = Vm::new( - l1_batch, - self.vm.system_env.clone(), - self.storage.clone(), - self.history_mode.clone(), - ); - - if self.test_contract.is_some() { - self.deploy_test_contract(); - } - - self.vm = vm; - } -} - -pub(crate) type ContractsToDeploy = (Vec, Address, bool); - -pub(crate) struct VmTesterBuilder { - history_mode: H, - storage: Option, - l1_batch_env: Option, - system_env: SystemEnv, - deployer: Option, - rich_accounts: Vec, - custom_contracts: Vec, -} - -impl Clone for VmTesterBuilder { - fn clone(&self) -> Self { - Self { - history_mode: self.history_mode.clone(), - storage: None, - l1_batch_env: self.l1_batch_env.clone(), - system_env: self.system_env.clone(), - deployer: self.deployer.clone(), - rich_accounts: self.rich_accounts.clone(), - custom_contracts: self.custom_contracts.clone(), - } - } -} - -#[allow(dead_code)] -impl VmTesterBuilder { - pub(crate) fn new(history_mode: H) -> Self { - Self { - history_mode, - storage: None, - l1_batch_env: None, - system_env: SystemEnv { - zk_porter_available: false, - version: ProtocolVersionId::latest(), - base_system_smart_contracts: BaseSystemContracts::playground(), - gas_limit: BLOCK_GAS_LIMIT, - execution_mode: TxExecutionMode::VerifyExecute, - default_validation_computational_gas_limit: BLOCK_GAS_LIMIT, - chain_id: L2ChainId::from(270), - }, - deployer: None, - rich_accounts: vec![], - custom_contracts: vec![], - } - } - - pub(crate) fn with_l1_batch_env(mut self, l1_batch_env: L1BatchEnv) -> Self { - self.l1_batch_env = Some(l1_batch_env); - self - } - - pub(crate) fn with_system_env(mut self, system_env: SystemEnv) -> Self { - self.system_env = system_env; - self - } - - pub(crate) fn with_storage(mut self, storage: InMemoryStorage) -> Self { - self.storage = Some(storage); - self - } - - pub(crate) fn with_base_system_smart_contracts( - mut self, - base_system_smart_contracts: BaseSystemContracts, - ) -> Self { - self.system_env.base_system_smart_contracts = base_system_smart_contracts; - self - } - - pub(crate) fn with_gas_limit(mut self, gas_limit: u32) -> Self { - self.system_env.gas_limit = gas_limit; - self - } - - pub(crate) fn with_execution_mode(mut self, execution_mode: TxExecutionMode) -> Self { - self.system_env.execution_mode = execution_mode; - self - } - - pub(crate) fn with_empty_in_memory_storage(mut self) -> Self { - self.storage = Some(get_empty_storage()); - self - } - - pub(crate) fn with_random_rich_accounts(mut self, number: u32) -> Self { - for _ in 0..number { - let account = Account::random(); - self.rich_accounts.push(account); - } - self - } - - pub(crate) fn with_rich_accounts(mut self, accounts: Vec) -> Self { - self.rich_accounts.extend(accounts); - self - } - - pub(crate) fn with_deployer(mut self) -> Self { - let deployer = Account::random(); - self.deployer = Some(deployer); - self - } - - pub(crate) fn with_custom_contracts(mut self, contracts: Vec) -> Self { - self.custom_contracts = contracts; - self - } - - pub(crate) fn build(self) -> VmTester { - let l1_batch_env = self - .l1_batch_env - .unwrap_or_else(|| default_l1_batch(L1BatchNumber(1))); - - let mut raw_storage = self.storage.unwrap_or_else(get_empty_storage); - insert_contracts(&mut raw_storage, &self.custom_contracts); - let storage_ptr = StorageView::new(raw_storage).to_rc_ptr(); - for account in self.rich_accounts.iter() { - make_account_rich(storage_ptr.clone(), account); - } - if let Some(deployer) = &self.deployer { - make_account_rich(storage_ptr.clone(), deployer); - } - let fee_account = l1_batch_env.fee_account; - - let vm = Vm::new( - l1_batch_env, - self.system_env, - storage_ptr.clone(), - self.history_mode.clone(), - ); - - VmTester { - vm, - storage: storage_ptr, - fee_account, - deployer: self.deployer, - test_contract: None, - rich_accounts: self.rich_accounts.clone(), - custom_contracts: self.custom_contracts.clone(), - history_mode: self.history_mode, - } - } -} - -pub(crate) fn default_l1_batch(number: L1BatchNumber) -> L1BatchEnv { - let timestamp = unix_timestamp_ms(); - L1BatchEnv { - previous_batch_hash: None, - number, - timestamp, - l1_gas_price: 50_000_000_000, // 50 gwei - fair_l2_gas_price: 250_000_000, // 0.25 gwei - fee_account: Address::random(), - enforced_base_fee: None, - first_l2_block: L2BlockEnv { - number: 1, - timestamp, - prev_block_hash: legacy_miniblock_hash(MiniblockNumber(0)), - max_virtual_blocks_to_create: 100, - }, - } -} - -pub(crate) fn make_account_rich(storage: StoragePtr, account: &Account) { - let key = storage_key_for_eth_balance(&account.address); - storage - .as_ref() - .borrow_mut() - .set_value(key, u256_to_h256(U256::from(10u64.pow(19)))); -} - -pub(crate) fn get_empty_storage() -> InMemoryStorage { - InMemoryStorage::with_system_contracts(hash_bytecode) -} - -// Inserts the contracts into the test environment, bypassing the -// deployer system contract. Besides the reference to storage -// it accepts a `contracts` tuple of information about the contract -// and whether or not it is an account. -fn insert_contracts(raw_storage: &mut InMemoryStorage, contracts: &[ContractsToDeploy]) { - for (contract, address, is_account) in contracts { - let deployer_code_key = get_code_key(address); - raw_storage.set_value(deployer_code_key, hash_bytecode(contract)); - - if *is_account { - let is_account_key = get_is_account_key(address); - raw_storage.set_value(is_account_key, u256_to_h256(1_u32.into())); - } - - raw_storage.store_factory_dep(hash_bytecode(contract), contract.clone()); - } -} diff --git a/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/tracing_execution_error.rs b/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/tracing_execution_error.rs deleted file mode 100644 index a839f4708ad4..000000000000 --- a/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/tracing_execution_error.rs +++ /dev/null @@ -1,53 +0,0 @@ -use zksync_types::{Execute, H160}; - -use crate::interface::TxExecutionMode; -use crate::interface::{TxRevertReason, VmRevertReason}; -use crate::vm_refunds_enhancement::tests::tester::{ - ExpectedError, TransactionTestInfo, VmTesterBuilder, -}; -use crate::vm_refunds_enhancement::tests::utils::{ - get_execute_error_calldata, read_error_contract, BASE_SYSTEM_CONTRACTS, -}; -use crate::vm_refunds_enhancement::HistoryEnabled; - -#[test] -fn test_tracing_of_execution_errors() { - let contract_address = H160::random(); - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_base_system_smart_contracts(BASE_SYSTEM_CONTRACTS.clone()) - .with_custom_contracts(vec![(read_error_contract(), contract_address, false)]) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_deployer() - .with_random_rich_accounts(1) - .build(); - - let account = &mut vm.rich_accounts[0]; - - let tx = account.get_l2_tx_for_execute( - Execute { - contract_address, - calldata: get_execute_error_calldata(), - value: Default::default(), - factory_deps: Some(vec![]), - }, - None, - ); - - vm.execute_tx_and_verify(TransactionTestInfo::new_rejected( - tx, - ExpectedError { - revert_reason: TxRevertReason::TxReverted(VmRevertReason::General { - msg: "short".to_string(), - data: vec![ - 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 5, 115, 104, 111, 114, 116, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, - ], - }), - modifier: None, - }, - )); -} diff --git a/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/upgrade.rs b/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/upgrade.rs deleted file mode 100644 index cbbec9a83d5e..000000000000 --- a/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/upgrade.rs +++ /dev/null @@ -1,342 +0,0 @@ -use zk_evm_1_3_3::aux_structures::Timestamp; - -use zksync_types::{ - ethabi::Contract, - Execute, COMPLEX_UPGRADER_ADDRESS, CONTRACT_DEPLOYER_ADDRESS, CONTRACT_FORCE_DEPLOYER_ADDRESS, - REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_BYTE, - {ethabi::Token, Address, ExecuteTransactionCommon, Transaction, H256, U256}, - {get_code_key, get_known_code_key, H160}, -}; - -use zksync_utils::{bytecode::hash_bytecode, bytes_to_be_words, h256_to_u256, u256_to_h256}; - -use zksync_contracts::{deployer_contract, load_contract, load_sys_contract, read_bytecode}; -use crate::interface::storage::WriteStorage; -use zksync_test_account::TxType; - -use crate::interface::{ExecutionResult, Halt, TxExecutionMode, VmExecutionMode}; -use crate::vm_refunds_enhancement::tests::tester::VmTesterBuilder; -use crate::vm_refunds_enhancement::tests::utils::verify_required_storage; -use crate::vm_refunds_enhancement::HistoryEnabled; -use zksync_types::protocol_version::ProtocolUpgradeTxCommonData; - -use super::utils::read_test_contract; - -/// In this test we ensure that the requirements for protocol upgrade transactions are enforced by the bootloader: -/// - This transaction must be the only one in block -/// - If present, this transaction must be the first one in block -#[test] -fn test_protocol_upgrade_is_first() { - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let bytecode_hash = hash_bytecode(&read_test_contract()); - - // Here we just use some random transaction of protocol upgrade type: - let protocol_upgrade_transaction = get_forced_deploy_tx(&[ForceDeployment { - // The bytecode hash to put on an address - bytecode_hash, - // The address on which to deploy the bytecodehash to - address: H160::random(), - // Whether to run the constructor on the force deployment - call_constructor: false, - // The value with which to initialize a contract - value: U256::zero(), - // The constructor calldata - input: vec![], - }]); - - let normal_l1_transaction = vm.rich_accounts[0] - .get_deploy_tx(&read_test_contract(), None, TxType::L1 { serial_id: 0 }) - .tx; - - let expected_error = - Halt::UnexpectedVMBehavior("Assertion error: Protocol upgrade tx not first".to_string()); - - vm.vm.make_snapshot(); - // Test 1: there must be only one system transaction in block - vm.vm.push_transaction(protocol_upgrade_transaction.clone()); - vm.vm.push_transaction(normal_l1_transaction.clone()); - vm.vm.push_transaction(protocol_upgrade_transaction.clone()); - - vm.vm.execute(VmExecutionMode::OneTx); - vm.vm.execute(VmExecutionMode::OneTx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert_eq!( - result.result, - ExecutionResult::Halt { - reason: expected_error.clone() - } - ); - - // Test 2: the protocol upgrade tx must be the first one in block - vm.vm.rollback_to_the_latest_snapshot(); - vm.vm.make_snapshot(); - vm.vm.push_transaction(normal_l1_transaction.clone()); - vm.vm.push_transaction(protocol_upgrade_transaction.clone()); - - vm.vm.execute(VmExecutionMode::OneTx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert_eq!( - result.result, - ExecutionResult::Halt { - reason: expected_error - } - ); - - vm.vm.rollback_to_the_latest_snapshot(); - vm.vm.make_snapshot(); - vm.vm.push_transaction(protocol_upgrade_transaction); - vm.vm.push_transaction(normal_l1_transaction); - - vm.vm.execute(VmExecutionMode::OneTx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!result.result.is_failed()); -} - -/// In this test we try to test how force deployments could be done via protocol upgrade transactions. -#[test] -fn test_force_deploy_upgrade() { - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let storage_view = vm.storage.clone(); - let bytecode_hash = hash_bytecode(&read_test_contract()); - - let known_code_key = get_known_code_key(&bytecode_hash); - // It is generally expected that all the keys will be set as known prior to the protocol upgrade. - storage_view - .borrow_mut() - .set_value(known_code_key, u256_to_h256(1.into())); - drop(storage_view); - - let address_to_deploy = H160::random(); - // Here we just use some random transaction of protocol upgrade type: - let transaction = get_forced_deploy_tx(&[ForceDeployment { - // The bytecode hash to put on an address - bytecode_hash, - // The address on which to deploy the bytecodehash to - address: address_to_deploy, - // Whether to run the constructor on the force deployment - call_constructor: false, - // The value with which to initialize a contract - value: U256::zero(), - // The constructor calldata - input: vec![], - }]); - - vm.vm.push_transaction(transaction); - - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!( - !result.result.is_failed(), - "The force upgrade was not successful" - ); - - let expected_slots = vec![(bytecode_hash, get_code_key(&address_to_deploy))]; - - // Verify that the bytecode has been set correctly - verify_required_storage(&vm.vm.state, expected_slots); -} - -/// Here we show how the work with the complex upgrader could be done -#[test] -fn test_complex_upgrader() { - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let storage_view = vm.storage.clone(); - - let bytecode_hash = hash_bytecode(&read_complex_upgrade()); - let msg_sender_test_hash = hash_bytecode(&read_msg_sender_test()); - - // Let's assume that the bytecode for the implementation of the complex upgrade - // is already deployed in some address in userspace - let upgrade_impl = H160::random(); - let account_code_key = get_code_key(&upgrade_impl); - - storage_view - .borrow_mut() - .set_value(get_known_code_key(&bytecode_hash), u256_to_h256(1.into())); - storage_view.borrow_mut().set_value( - get_known_code_key(&msg_sender_test_hash), - u256_to_h256(1.into()), - ); - storage_view - .borrow_mut() - .set_value(account_code_key, bytecode_hash); - drop(storage_view); - - vm.vm.state.decommittment_processor.populate( - vec![ - ( - h256_to_u256(bytecode_hash), - bytes_to_be_words(read_complex_upgrade()), - ), - ( - h256_to_u256(msg_sender_test_hash), - bytes_to_be_words(read_msg_sender_test()), - ), - ], - Timestamp(0), - ); - - let address_to_deploy1 = H160::random(); - let address_to_deploy2 = H160::random(); - - let transaction = get_complex_upgrade_tx( - upgrade_impl, - address_to_deploy1, - address_to_deploy2, - bytecode_hash, - ); - - vm.vm.push_transaction(transaction); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!( - !result.result.is_failed(), - "The force upgrade was not successful" - ); - - let expected_slots = vec![ - (bytecode_hash, get_code_key(&address_to_deploy1)), - (bytecode_hash, get_code_key(&address_to_deploy2)), - ]; - - // Verify that the bytecode has been set correctly - verify_required_storage(&vm.vm.state, expected_slots); -} - -#[derive(Debug, Clone)] -struct ForceDeployment { - // The bytecode hash to put on an address - bytecode_hash: H256, - // The address on which to deploy the bytecodehash to - address: Address, - // Whether to run the constructor on the force deployment - call_constructor: bool, - // The value with which to initialize a contract - value: U256, - // The constructor calldata - input: Vec, -} - -fn get_forced_deploy_tx(deployment: &[ForceDeployment]) -> Transaction { - let deployer = deployer_contract(); - let contract_function = deployer.function("forceDeployOnAddresses").unwrap(); - - let encoded_deployments: Vec<_> = deployment - .iter() - .map(|deployment| { - Token::Tuple(vec![ - Token::FixedBytes(deployment.bytecode_hash.as_bytes().to_vec()), - Token::Address(deployment.address), - Token::Bool(deployment.call_constructor), - Token::Uint(deployment.value), - Token::Bytes(deployment.input.clone()), - ]) - }) - .collect(); - - let params = [Token::Array(encoded_deployments)]; - - let calldata = contract_function - .encode_input(¶ms) - .expect("failed to encode parameters"); - - let execute = Execute { - contract_address: CONTRACT_DEPLOYER_ADDRESS, - calldata, - factory_deps: None, - value: U256::zero(), - }; - - Transaction { - common_data: ExecuteTransactionCommon::ProtocolUpgrade(ProtocolUpgradeTxCommonData { - sender: CONTRACT_FORCE_DEPLOYER_ADDRESS, - gas_limit: U256::from(200_000_000u32), - gas_per_pubdata_limit: REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_BYTE.into(), - ..Default::default() - }), - execute, - received_timestamp_ms: 0, - raw_bytes: None, - } -} - -// Returns the transaction that performs a complex protocol upgrade. -// The first param is the address of the implementation of the complex upgrade -// in user-space, while the next 3 params are params of the implenentaiton itself -// For the explanatation for the parameters, please refer to: -// etc/contracts-test-data/complex-upgrade/complex-upgrade.sol -fn get_complex_upgrade_tx( - implementation_address: Address, - address1: Address, - address2: Address, - bytecode_hash: H256, -) -> Transaction { - let impl_contract = get_complex_upgrade_abi(); - let impl_function = impl_contract.function("someComplexUpgrade").unwrap(); - let impl_calldata = impl_function - .encode_input(&[ - Token::Address(address1), - Token::Address(address2), - Token::FixedBytes(bytecode_hash.as_bytes().to_vec()), - ]) - .unwrap(); - - let complex_upgrader = get_complex_upgrader_abi(); - let upgrade_function = complex_upgrader.function("upgrade").unwrap(); - let complex_upgrader_calldata = upgrade_function - .encode_input(&[ - Token::Address(implementation_address), - Token::Bytes(impl_calldata), - ]) - .unwrap(); - - let execute = Execute { - contract_address: COMPLEX_UPGRADER_ADDRESS, - calldata: complex_upgrader_calldata, - factory_deps: None, - value: U256::zero(), - }; - - Transaction { - common_data: ExecuteTransactionCommon::ProtocolUpgrade(ProtocolUpgradeTxCommonData { - sender: CONTRACT_FORCE_DEPLOYER_ADDRESS, - gas_limit: U256::from(200_000_000u32), - gas_per_pubdata_limit: REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_BYTE.into(), - ..Default::default() - }), - execute, - received_timestamp_ms: 0, - raw_bytes: None, - } -} - -fn read_complex_upgrade() -> Vec { - read_bytecode("etc/contracts-test-data/artifacts-zk/contracts/complex-upgrade/complex-upgrade.sol/ComplexUpgrade.json") -} - -fn read_msg_sender_test() -> Vec { - read_bytecode("etc/contracts-test-data/artifacts-zk/contracts/complex-upgrade/msg-sender.sol/MsgSenderTest.json") -} - -fn get_complex_upgrade_abi() -> Contract { - load_contract( - "etc/contracts-test-data/artifacts-zk/contracts/complex-upgrade/complex-upgrade.sol/ComplexUpgrade.json" - ) -} - -fn get_complex_upgrader_abi() -> Contract { - load_sys_contract("ComplexUpgrader") -} diff --git a/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/utils.rs b/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/utils.rs deleted file mode 100644 index ffbb9d892607..000000000000 --- a/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/utils.rs +++ /dev/null @@ -1,106 +0,0 @@ -use ethabi::Contract; -use once_cell::sync::Lazy; - -use crate::vm_refunds_enhancement::tests::tester::InMemoryStorageView; -use zksync_contracts::{ - load_contract, read_bytecode, read_zbin_bytecode, BaseSystemContracts, SystemContractCode, -}; -use crate::interface::storage::{StoragePtr, WriteStorage}; -use zksync_types::utils::storage_key_for_standard_token_balance; -use zksync_types::{AccountTreeId, Address, StorageKey, H256, U256}; -use zksync_utils::bytecode::hash_bytecode; -use zksync_utils::{bytes_to_be_words, h256_to_u256, u256_to_h256}; - -use crate::vm_refunds_enhancement::types::internals::ZkSyncVmState; -use crate::vm_refunds_enhancement::HistoryMode; - -pub(crate) static BASE_SYSTEM_CONTRACTS: Lazy = - Lazy::new(BaseSystemContracts::load_from_disk); - -// Probably make it a part of vm tester -pub(crate) fn verify_required_storage( - state: &ZkSyncVmState, - required_values: Vec<(H256, StorageKey)>, -) { - for (required_value, key) in required_values { - let current_value = state.storage.storage.read_from_storage(&key); - - assert_eq!( - u256_to_h256(current_value), - required_value, - "Invalid value at key {key:?}" - ); - } -} - -pub(crate) fn verify_required_memory( - state: &ZkSyncVmState, - required_values: Vec<(U256, u32, u32)>, -) { - for (required_value, memory_page, cell) in required_values { - let current_value = state - .memory - .read_slot(memory_page as usize, cell as usize) - .value; - assert_eq!(current_value, required_value); - } -} - -pub(crate) fn get_balance( - token_id: AccountTreeId, - account: &Address, - main_storage: StoragePtr, -) -> U256 { - let key = storage_key_for_standard_token_balance(token_id, account); - h256_to_u256(main_storage.borrow_mut().read_value(&key)) -} - -pub(crate) fn read_test_contract() -> Vec { - read_bytecode("etc/contracts-test-data/artifacts-zk/contracts/counter/counter.sol/Counter.json") -} - -pub(crate) fn get_bootloader(test: &str) -> SystemContractCode { - let bootloader_code = read_zbin_bytecode(format!( - "contracts/system-contracts/bootloader/tests/artifacts/{}.yul.zbin", - test - )); - - let bootloader_hash = hash_bytecode(&bootloader_code); - SystemContractCode { - code: bytes_to_be_words(bootloader_code), - hash: bootloader_hash, - } -} - -pub(crate) fn read_nonce_holder_tester() -> Vec { - read_bytecode("etc/contracts-test-data/artifacts-zk/contracts/custom-account/nonce-holder-test.sol/NonceHolderTest.json") -} - -pub(crate) fn read_error_contract() -> Vec { - read_bytecode( - "etc/contracts-test-data/artifacts-zk/contracts/error/error.sol/SimpleRequire.json", - ) -} - -pub(crate) fn get_execute_error_calldata() -> Vec { - let test_contract = load_contract( - "etc/contracts-test-data/artifacts-zk/contracts/error/error.sol/SimpleRequire.json", - ); - - let function = test_contract.function("require_short").unwrap(); - - function - .encode_input(&[]) - .expect("failed to encode parameters") -} - -pub(crate) fn read_many_owners_custom_account_contract() -> (Vec, Contract) { - let path = "etc/contracts-test-data/artifacts-zk/contracts/custom-account/many-owners-custom-account.sol/ManyOwnersCustomAccount.json"; - (read_bytecode(path), load_contract(path)) -} - -pub(crate) fn read_max_depth_contract() -> Vec { - read_zbin_bytecode( - "core/tests/ts-integration/contracts/zkasm/artifacts/deep_stak.zkasm/deep_stak.zkasm.zbin", - ) -} diff --git a/core/lib/multivm/src/versions/vm_refunds_enhancement/vm.rs b/core/lib/multivm/src/versions/vm_refunds_enhancement/vm.rs index 735bd29c3b09..d87fd4d104da 100644 --- a/core/lib/multivm/src/versions/vm_refunds_enhancement/vm.rs +++ b/core/lib/multivm/src/versions/vm_refunds_enhancement/vm.rs @@ -8,7 +8,6 @@ use crate::{ BytecodeCompressionError, BytecodeCompressionResult, CurrentExecutionState, FinishedL1Batch, L1BatchEnv, L2BlockEnv, SystemEnv, VmExecutionMode, VmExecutionResultAndLogs, VmFactory, VmInterface, VmInterfaceHistoryEnabled, - VmMemoryMetrics, }, vm_latest::HistoryEnabled, vm_refunds_enhancement::{ @@ -118,10 +117,6 @@ impl VmInterface for Vm { } } - fn record_vm_memory_metrics(&self) -> VmMemoryMetrics { - self.record_vm_memory_metrics_inner() - } - fn finish_batch(&mut self) -> FinishedL1Batch { let result = self.inspect(&mut TracerDispatcher::default(), VmExecutionMode::Batch); let execution_state = self.get_current_execution_state(); diff --git a/core/lib/multivm/src/versions/vm_virtual_blocks/bootloader_state/utils.rs b/core/lib/multivm/src/versions/vm_virtual_blocks/bootloader_state/utils.rs index 2ccedcc6aa94..3e2474835fa4 100644 --- a/core/lib/multivm/src/versions/vm_virtual_blocks/bootloader_state/utils.rs +++ b/core/lib/multivm/src/versions/vm_virtual_blocks/bootloader_state/utils.rs @@ -133,8 +133,8 @@ pub(super) fn assemble_tx_meta(execution_mode: TxExecutionMode, execute_tx: bool // Set 0 byte (execution mode) output[0] = match execution_mode { TxExecutionMode::VerifyExecute => 0x00, - TxExecutionMode::EstimateFee { .. } => 0x00, - TxExecutionMode::EthCall { .. } => 0x02, + TxExecutionMode::EstimateFee => 0x00, + TxExecutionMode::EthCall => 0x02, }; // Set 31 byte (marker for tx execution) diff --git a/core/lib/multivm/src/versions/vm_virtual_blocks/implementation/execution.rs b/core/lib/multivm/src/versions/vm_virtual_blocks/implementation/execution.rs index c48d48edd3b4..b1ad4d257b77 100644 --- a/core/lib/multivm/src/versions/vm_virtual_blocks/implementation/execution.rs +++ b/core/lib/multivm/src/versions/vm_virtual_blocks/implementation/execution.rs @@ -88,6 +88,7 @@ impl Vm { .refund_tracer .map(|r| r.get_refunds()) .unwrap_or_default(), + new_known_factory_deps: None, }; tx_tracer.dispatcher.save_results(&mut result); diff --git a/core/lib/multivm/src/versions/vm_virtual_blocks/implementation/statistics.rs b/core/lib/multivm/src/versions/vm_virtual_blocks/implementation/statistics.rs index d082085a1550..dbd8813035e9 100644 --- a/core/lib/multivm/src/versions/vm_virtual_blocks/implementation/statistics.rs +++ b/core/lib/multivm/src/versions/vm_virtual_blocks/implementation/statistics.rs @@ -56,7 +56,7 @@ impl Vm { } /// Returns the info about all oracles' sizes. - pub(crate) fn record_vm_memory_metrics_inner(&self) -> VmMemoryMetrics { + pub(crate) fn record_vm_memory_metrics(&self) -> VmMemoryMetrics { VmMemoryMetrics { event_sink_inner: self.state.event_sink.get_size(), event_sink_history: self.state.event_sink.get_history_size(), diff --git a/core/lib/multivm/src/versions/vm_virtual_blocks/tests/bootloader.rs b/core/lib/multivm/src/versions/vm_virtual_blocks/tests/bootloader.rs deleted file mode 100644 index a30b5a58f638..000000000000 --- a/core/lib/multivm/src/versions/vm_virtual_blocks/tests/bootloader.rs +++ /dev/null @@ -1,53 +0,0 @@ -use zksync_types::U256; - -use crate::interface::{ExecutionResult, Halt, TxExecutionMode, VmExecutionMode, VmInterface}; -use crate::vm_virtual_blocks::constants::BOOTLOADER_HEAP_PAGE; -use crate::vm_virtual_blocks::tests::tester::VmTesterBuilder; -use crate::vm_virtual_blocks::tests::utils::{ - get_bootloader, verify_required_memory, BASE_SYSTEM_CONTRACTS, -}; - -use crate::vm_latest::HistoryEnabled; - -#[test] -fn test_dummy_bootloader() { - let mut base_system_contracts = BASE_SYSTEM_CONTRACTS.clone(); - base_system_contracts.bootloader = get_bootloader("dummy"); - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_base_system_smart_contracts(base_system_contracts) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .build(); - - let result = vm.vm.execute(VmExecutionMode::Batch); - assert!(!result.result.is_failed()); - - let correct_first_cell = U256::from_str_radix("123123123", 16).unwrap(); - verify_required_memory( - &vm.vm.state, - vec![(correct_first_cell, BOOTLOADER_HEAP_PAGE, 0)], - ); -} - -#[test] -fn test_bootloader_out_of_gas() { - let mut base_system_contracts = BASE_SYSTEM_CONTRACTS.clone(); - base_system_contracts.bootloader = get_bootloader("dummy"); - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_base_system_smart_contracts(base_system_contracts) - .with_gas_limit(10) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .build(); - - let res = vm.vm.execute(VmExecutionMode::Batch); - - assert_matches!( - res.result, - ExecutionResult::Halt { - reason: Halt::BootloaderOutOfGas - } - ); -} diff --git a/core/lib/multivm/src/versions/vm_virtual_blocks/tests/bytecode_publishing.rs b/core/lib/multivm/src/versions/vm_virtual_blocks/tests/bytecode_publishing.rs deleted file mode 100644 index 773aa77e1502..000000000000 --- a/core/lib/multivm/src/versions/vm_virtual_blocks/tests/bytecode_publishing.rs +++ /dev/null @@ -1,37 +0,0 @@ -use zksync_types::event::extract_long_l2_to_l1_messages; -use zksync_utils::bytecode::compress_bytecode; - -use crate::interface::{TxExecutionMode, VmExecutionMode, VmInterface}; -use crate::vm_latest::HistoryEnabled; -use crate::vm_virtual_blocks::tests::tester::{DeployContractsTx, TxType, VmTesterBuilder}; -use crate::vm_virtual_blocks::tests::utils::read_test_contract; - -#[test] -fn test_bytecode_publishing() { - // In this test, we aim to ensure that the contents of the compressed bytecodes - // are included as part of the L2->L1 long messages - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let counter = read_test_contract(); - let account = &mut vm.rich_accounts[0]; - - let compressed_bytecode = compress_bytecode(&counter).unwrap(); - - let DeployContractsTx { tx, .. } = account.get_deploy_tx(&counter, None, TxType::L2); - vm.vm.push_transaction(tx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!result.result.is_failed(), "Transaction wasn't successful"); - - vm.vm.execute(VmExecutionMode::Batch); - - let state = vm.vm.get_current_execution_state(); - let long_messages = extract_long_l2_to_l1_messages(&state.events); - assert!( - long_messages.contains(&compressed_bytecode), - "Bytecode not published" - ); -} diff --git a/core/lib/multivm/src/versions/vm_virtual_blocks/tests/call_tracer.rs b/core/lib/multivm/src/versions/vm_virtual_blocks/tests/call_tracer.rs deleted file mode 100644 index 7ee647ee1f7d..000000000000 --- a/core/lib/multivm/src/versions/vm_virtual_blocks/tests/call_tracer.rs +++ /dev/null @@ -1,88 +0,0 @@ -use crate::interface::{TxExecutionMode, VmExecutionMode, VmInterface}; -use crate::tracers::CallTracer; -use crate::vm_latest::HistoryEnabled; -use crate::vm_virtual_blocks::constants::BLOCK_GAS_LIMIT; -use crate::vm_virtual_blocks::tests::tester::VmTesterBuilder; -use crate::vm_virtual_blocks::tests::utils::{read_max_depth_contract, read_test_contract}; -use crate::vm_virtual_blocks::tracers::traits::ToTracerPointer; -use once_cell::sync::OnceCell; -use std::sync::Arc; -use zksync_types::{Address, Execute}; - -// This test is ultra slow, so it's ignored by default. -#[test] -#[ignore] -fn test_max_depth() { - let contarct = read_max_depth_contract(); - let address = Address::random(); - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_random_rich_accounts(1) - .with_deployer() - .with_gas_limit(BLOCK_GAS_LIMIT) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_custom_contracts(vec![(contarct, address, true)]) - .build(); - - let account = &mut vm.rich_accounts[0]; - let tx = account.get_l2_tx_for_execute( - Execute { - contract_address: address, - calldata: vec![], - value: Default::default(), - factory_deps: None, - }, - None, - ); - - let result = Arc::new(OnceCell::new()); - let call_tracer = CallTracer::new(result.clone()).into_tracer_pointer(); - vm.vm.push_transaction(tx); - let res = vm.vm.inspect(call_tracer.into(), VmExecutionMode::OneTx); - assert!(result.get().is_some()); - assert!(res.result.is_failed()); -} - -#[test] -fn test_basic_behavior() { - let contarct = read_test_contract(); - let address = Address::random(); - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_random_rich_accounts(1) - .with_deployer() - .with_gas_limit(BLOCK_GAS_LIMIT) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_custom_contracts(vec![(contarct, address, true)]) - .build(); - - let increment_by_6_calldata = - "7cf5dab00000000000000000000000000000000000000000000000000000000000000006"; - - let account = &mut vm.rich_accounts[0]; - let tx = account.get_l2_tx_for_execute( - Execute { - contract_address: address, - calldata: hex::decode(increment_by_6_calldata).unwrap(), - value: Default::default(), - factory_deps: None, - }, - None, - ); - - let result = Arc::new(OnceCell::new()); - let call_tracer = CallTracer::new(result.clone()); - vm.vm.push_transaction(tx); - let res = vm.vm.inspect( - call_tracer.into_tracer_pointer().into(), - VmExecutionMode::OneTx, - ); - - let call_tracer_result = result.get().unwrap(); - - assert_eq!(call_tracer_result.len(), 1); - // Expect that there are a plenty of subcalls underneath. - let subcall = &call_tracer_result[0].calls; - assert!(subcall.len() > 10); - assert!(!res.result.is_failed()); -} diff --git a/core/lib/multivm/src/versions/vm_virtual_blocks/tests/default_aa.rs b/core/lib/multivm/src/versions/vm_virtual_blocks/tests/default_aa.rs deleted file mode 100644 index 02a69a6a5d2d..000000000000 --- a/core/lib/multivm/src/versions/vm_virtual_blocks/tests/default_aa.rs +++ /dev/null @@ -1,70 +0,0 @@ -use zksync_system_constants::L2_ETH_TOKEN_ADDRESS; -use zksync_types::system_contracts::{DEPLOYMENT_NONCE_INCREMENT, TX_NONCE_INCREMENT}; - -use zksync_types::{get_code_key, get_known_code_key, get_nonce_key, AccountTreeId, U256}; -use zksync_utils::u256_to_h256; - -use crate::interface::{TxExecutionMode, VmExecutionMode, VmInterface}; -use crate::vm_latest::HistoryEnabled; -use crate::vm_virtual_blocks::tests::tester::{DeployContractsTx, TxType, VmTesterBuilder}; -use crate::vm_virtual_blocks::tests::utils::{ - get_balance, read_test_contract, verify_required_storage, -}; - -#[test] -fn test_default_aa_interaction() { - // In this test, we aim to test whether a simple account interaction (without any fee logic) - // will work. The account will try to deploy a simple contract from integration tests. - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let counter = read_test_contract(); - let account = &mut vm.rich_accounts[0]; - let DeployContractsTx { - tx, - bytecode_hash, - address, - } = account.get_deploy_tx(&counter, None, TxType::L2); - let maximal_fee = tx.gas_limit() * vm.vm.batch_env.base_fee(); - - vm.vm.push_transaction(tx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!result.result.is_failed(), "Transaction wasn't successful"); - - vm.vm.execute(VmExecutionMode::Batch); - vm.vm.get_current_execution_state(); - - // Both deployment and ordinary nonce should be incremented by one. - let account_nonce_key = get_nonce_key(&account.address); - let expected_nonce = TX_NONCE_INCREMENT + DEPLOYMENT_NONCE_INCREMENT; - - // The code hash of the deployed contract should be marked as republished. - let known_codes_key = get_known_code_key(&bytecode_hash); - - // The contract should be deployed successfully. - let account_code_key = get_code_key(&address); - - let expected_slots = vec![ - (u256_to_h256(expected_nonce), account_nonce_key), - (u256_to_h256(U256::from(1u32)), known_codes_key), - (bytecode_hash, account_code_key), - ]; - - verify_required_storage(&vm.vm.state, expected_slots); - - let expected_fee = maximal_fee - - U256::from(result.refunds.gas_refunded) * U256::from(vm.vm.batch_env.base_fee()); - let operator_balance = get_balance( - AccountTreeId::new(L2_ETH_TOKEN_ADDRESS), - &vm.fee_account, - vm.vm.state.storage.storage.get_ptr(), - ); - - assert_eq!( - operator_balance, expected_fee, - "Operator did not receive his fee" - ); -} diff --git a/core/lib/multivm/src/versions/vm_virtual_blocks/tests/gas_limit.rs b/core/lib/multivm/src/versions/vm_virtual_blocks/tests/gas_limit.rs deleted file mode 100644 index e51b8cab570e..000000000000 --- a/core/lib/multivm/src/versions/vm_virtual_blocks/tests/gas_limit.rs +++ /dev/null @@ -1,45 +0,0 @@ -use zksync_types::fee::Fee; -use zksync_types::Execute; - -use crate::vm_virtual_blocks::constants::{ - BOOTLOADER_HEAP_PAGE, TX_DESCRIPTION_OFFSET, TX_GAS_LIMIT_OFFSET, -}; -use crate::vm_virtual_blocks::tests::tester::VmTesterBuilder; - -use crate::interface::{TxExecutionMode, VmInterface}; -use crate::vm_latest::HistoryDisabled; - -/// Checks that `TX_GAS_LIMIT_OFFSET` constant is correct. -#[test] -fn test_tx_gas_limit_offset() { - let mut vm = VmTesterBuilder::new(HistoryDisabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let gas_limit = 9999.into(); - let tx = vm.rich_accounts[0].get_l2_tx_for_execute( - Execute { - contract_address: Some(Default::default()), - ..Default::default() - }, - Some(Fee { - gas_limit, - ..Default::default() - }), - ); - - vm.vm.push_transaction(tx); - - let gas_limit_from_memory = vm - .vm - .state - .memory - .read_slot( - BOOTLOADER_HEAP_PAGE as usize, - TX_DESCRIPTION_OFFSET + TX_GAS_LIMIT_OFFSET, - ) - .value; - assert_eq!(gas_limit_from_memory, gas_limit); -} diff --git a/core/lib/multivm/src/versions/vm_virtual_blocks/tests/get_used_contracts.rs b/core/lib/multivm/src/versions/vm_virtual_blocks/tests/get_used_contracts.rs deleted file mode 100644 index 06d8191310bc..000000000000 --- a/core/lib/multivm/src/versions/vm_virtual_blocks/tests/get_used_contracts.rs +++ /dev/null @@ -1,106 +0,0 @@ -use std::collections::{HashMap, HashSet}; - -use itertools::Itertools; - -use crate::HistoryMode; -use crate::interface::storage::WriteStorage; -use zksync_system_constants::CONTRACT_DEPLOYER_ADDRESS; -use zksync_test_account::Account; -use zksync_types::{Execute, U256}; -use zksync_utils::bytecode::hash_bytecode; -use zksync_utils::h256_to_u256; - -use crate::interface::{TxExecutionMode, VmExecutionMode, VmInterface}; -use crate::vm_latest::HistoryDisabled; -use crate::vm_virtual_blocks::tests::tester::{TxType, VmTesterBuilder}; -use crate::vm_virtual_blocks::tests::utils::{read_test_contract, BASE_SYSTEM_CONTRACTS}; -use crate::vm_virtual_blocks::Vm; - -#[test] -fn test_get_used_contracts() { - let mut vm = VmTesterBuilder::new(HistoryDisabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .build(); - - assert!(known_bytecodes_without_aa_code(&vm.vm).is_empty()); - - // create and push and execute some not-empty factory deps transaction with success status - // to check that get_used_contracts() updates - let contract_code = read_test_contract(); - let mut account = Account::random(); - let tx = account.get_deploy_tx(&contract_code, None, TxType::L1 { serial_id: 0 }); - vm.vm.push_transaction(tx.tx.clone()); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!result.result.is_failed()); - - assert!(vm - .vm - .get_used_contracts() - .contains(&h256_to_u256(tx.bytecode_hash))); - - // Note: Default_AA will be in the list of used contracts if l2 tx is used - assert_eq!( - vm.vm - .get_used_contracts() - .into_iter() - .collect::>(), - known_bytecodes_without_aa_code(&vm.vm) - .keys() - .cloned() - .collect::>() - ); - - // create push and execute some non-empty factory deps transaction that fails - // (known_bytecodes will be updated but we expect get_used_contracts() to not be updated) - - let calldata = [1, 2, 3]; - let big_calldata: Vec = calldata - .iter() - .cycle() - .take(calldata.len() * 1024) - .cloned() - .collect(); - let account2 = Account::random(); - let tx2 = account2.get_l1_tx( - Execute { - contract_address: CONTRACT_DEPLOYER_ADDRESS, - calldata: big_calldata, - value: Default::default(), - factory_deps: Some(vec![vec![1; 32]]), - }, - 1, - ); - - vm.vm.push_transaction(tx2.clone()); - - let res2 = vm.vm.execute(VmExecutionMode::OneTx); - - assert!(res2.result.is_failed()); - - for factory_dep in tx2.execute.factory_deps.unwrap() { - let hash = hash_bytecode(&factory_dep); - let hash_to_u256 = h256_to_u256(hash); - assert!(known_bytecodes_without_aa_code(&vm.vm) - .keys() - .contains(&hash_to_u256)); - assert!(!vm.vm.get_used_contracts().contains(&hash_to_u256)); - } -} - -fn known_bytecodes_without_aa_code( - vm: &Vm, -) -> HashMap> { - let mut known_bytecodes_without_aa_code = vm - .state - .decommittment_processor - .known_bytecodes - .inner() - .clone(); - - known_bytecodes_without_aa_code - .remove(&h256_to_u256(BASE_SYSTEM_CONTRACTS.default_aa.hash)) - .unwrap(); - - known_bytecodes_without_aa_code -} diff --git a/core/lib/multivm/src/versions/vm_virtual_blocks/tests/invalid_bytecode.rs b/core/lib/multivm/src/versions/vm_virtual_blocks/tests/invalid_bytecode.rs deleted file mode 100644 index f8074c1db107..000000000000 --- a/core/lib/multivm/src/versions/vm_virtual_blocks/tests/invalid_bytecode.rs +++ /dev/null @@ -1,120 +0,0 @@ -use zksync_types::H256; -use zksync_utils::h256_to_u256; - -use crate::interface::TxExecutionMode; -use crate::vm_virtual_blocks::tests::tester::VmTesterBuilder; -use crate::vm_virtual_blocks::{HistoryEnabled, TxRevertReason}; - -// TODO this test requires a lot of hacks for bypassing the bytecode checks in the VM. -// Port it later, it's not significant. for now - -#[test] -fn test_invalid_bytecode() { - let mut vm_builder = VmTesterBuilder::new(HistoryEnabled) - .with_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1); - let mut storage = vm_builder.take_storage(); - let mut vm = vm_builder.build(&mut storage); - - let block_gas_per_pubdata = vm_test_env - .block_context - .context - .block_gas_price_per_pubdata(); - - let mut test_vm_with_custom_bytecode_hash = - |bytecode_hash: H256, expected_revert_reason: Option| { - let mut oracle_tools = - OracleTools::new(vm_test_env.storage_ptr.as_mut(), HistoryEnabled); - - let (encoded_tx, predefined_overhead) = get_l1_tx_with_custom_bytecode_hash( - h256_to_u256(bytecode_hash), - block_gas_per_pubdata as u32, - ); - - run_vm_with_custom_factory_deps( - &mut oracle_tools, - vm_test_env.block_context.context, - &vm_test_env.block_properties, - encoded_tx, - predefined_overhead, - expected_revert_reason, - ); - }; - - let failed_to_mark_factory_deps = |msg: &str, data: Vec| { - TxRevertReason::FailedToMarkFactoryDependencies(VmRevertReason::General { - msg: msg.to_string(), - data, - }) - }; - - // Here we provide the correctly-formatted bytecode hash of - // odd length, so it should work. - test_vm_with_custom_bytecode_hash( - H256([ - 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, - ]), - None, - ); - - // Here we provide correctly formatted bytecode of even length, so - // it should fail. - test_vm_with_custom_bytecode_hash( - H256([ - 1, 0, 2, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, - ]), - Some(failed_to_mark_factory_deps( - "Code length in words must be odd", - vec![ - 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 32, 67, 111, 100, 101, 32, 108, 101, 110, - 103, 116, 104, 32, 105, 110, 32, 119, 111, 114, 100, 115, 32, 109, 117, 115, 116, - 32, 98, 101, 32, 111, 100, 100, - ], - )), - ); - - // Here we provide incorrectly formatted bytecode of odd length, so - // it should fail. - test_vm_with_custom_bytecode_hash( - H256([ - 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, - ]), - Some(failed_to_mark_factory_deps( - "Incorrectly formatted bytecodeHash", - vec![ - 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 34, 73, 110, 99, 111, 114, 114, 101, 99, - 116, 108, 121, 32, 102, 111, 114, 109, 97, 116, 116, 101, 100, 32, 98, 121, 116, - 101, 99, 111, 100, 101, 72, 97, 115, 104, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - ], - )), - ); - - // Here we provide incorrectly formatted bytecode of odd length, so - // it should fail. - test_vm_with_custom_bytecode_hash( - H256([ - 2, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, - ]), - Some(failed_to_mark_factory_deps( - "Incorrectly formatted bytecodeHash", - vec![ - 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 34, 73, 110, 99, 111, 114, 114, 101, 99, - 116, 108, 121, 32, 102, 111, 114, 109, 97, 116, 116, 101, 100, 32, 98, 121, 116, - 101, 99, 111, 100, 101, 72, 97, 115, 104, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - ], - )), - ); -} diff --git a/core/lib/multivm/src/versions/vm_virtual_blocks/tests/is_write_initial.rs b/core/lib/multivm/src/versions/vm_virtual_blocks/tests/is_write_initial.rs deleted file mode 100644 index 2c7ef4a8d11a..000000000000 --- a/core/lib/multivm/src/versions/vm_virtual_blocks/tests/is_write_initial.rs +++ /dev/null @@ -1,42 +0,0 @@ -use crate::interface::storage::ReadStorage; -use zksync_types::get_nonce_key; - -use crate::interface::{TxExecutionMode, VmExecutionMode, VmInterface}; -use crate::vm_latest::HistoryDisabled; -use crate::vm_virtual_blocks::tests::tester::{Account, TxType, VmTesterBuilder}; -use crate::vm_virtual_blocks::tests::utils::read_test_contract; - -#[test] -fn test_is_write_initial_behaviour() { - // In this test, we check result of `is_write_initial` at different stages. - // The main idea is to check that `is_write_initial` storage uses the correct cache for initial_writes and doesn't - // messed up it with the repeated writes during the one batch execution. - - let mut account = Account::random(); - let mut vm = VmTesterBuilder::new(HistoryDisabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_rich_accounts(vec![account.clone()]) - .build(); - - let nonce_key = get_nonce_key(&account.address); - // Check that the next write to the nonce key will be initial. - assert!(vm - .storage - .as_ref() - .borrow_mut() - .is_write_initial(&nonce_key)); - - let contract_code = read_test_contract(); - let tx = account.get_deploy_tx(&contract_code, None, TxType::L2).tx; - - vm.vm.push_transaction(tx); - vm.vm.execute(VmExecutionMode::OneTx); - - // Check that `is_write_initial` still returns true for the nonce key. - assert!(vm - .storage - .as_ref() - .borrow_mut() - .is_write_initial(&nonce_key)); -} diff --git a/core/lib/multivm/src/versions/vm_virtual_blocks/tests/l1_tx_execution.rs b/core/lib/multivm/src/versions/vm_virtual_blocks/tests/l1_tx_execution.rs deleted file mode 100644 index 64d9f98ddb35..000000000000 --- a/core/lib/multivm/src/versions/vm_virtual_blocks/tests/l1_tx_execution.rs +++ /dev/null @@ -1,125 +0,0 @@ -use zksync_system_constants::BOOTLOADER_ADDRESS; -use zksync_types::l2_to_l1_log::L2ToL1Log; -use zksync_types::storage_writes_deduplicator::StorageWritesDeduplicator; -use zksync_types::{get_code_key, get_known_code_key, U256}; -use zksync_utils::u256_to_h256; - -use crate::interface::{TxExecutionMode, VmExecutionMode, VmInterface}; -use crate::vm_latest::HistoryEnabled; -use crate::vm_virtual_blocks::tests::tester::{TxType, VmTesterBuilder}; -use crate::vm_virtual_blocks::tests::utils::{ - read_test_contract, verify_required_storage, BASE_SYSTEM_CONTRACTS, -}; -use crate::vm_virtual_blocks::types::internals::TransactionData; - -#[test] -fn test_l1_tx_execution() { - // In this test, we try to execute a contract deployment from L1 - // Here instead of marking code hash via the bootloader means, we will be - // using L1->L2 communication, the same it would likely be done during the priority mode. - - // There are always at least 3 initial writes here, because we pay fees from l1: - // - totalSupply of ETH token - // - balance of the refund recipient - // - balance of the bootloader - // - tx_rollout hash - - let basic_initial_writes = 1; - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_base_system_smart_contracts(BASE_SYSTEM_CONTRACTS.clone()) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let contract_code = read_test_contract(); - let account = &mut vm.rich_accounts[0]; - let deploy_tx = account.get_deploy_tx(&contract_code, None, TxType::L1 { serial_id: 1 }); - let tx_data: TransactionData = deploy_tx.tx.clone().into(); - - let required_l2_to_l1_logs = vec![L2ToL1Log { - shard_id: 0, - is_service: true, - tx_number_in_block: 0, - sender: BOOTLOADER_ADDRESS, - key: tx_data.tx_hash(0.into()), - value: u256_to_h256(U256::from(1u32)), - }]; - - vm.vm.push_transaction(deploy_tx.tx.clone()); - - let res = vm.vm.execute(VmExecutionMode::OneTx); - - // The code hash of the deployed contract should be marked as republished. - let known_codes_key = get_known_code_key(&deploy_tx.bytecode_hash); - - // The contract should be deployed successfully. - let account_code_key = get_code_key(&deploy_tx.address); - - let expected_slots = vec![ - (u256_to_h256(U256::from(1u32)), known_codes_key), - (deploy_tx.bytecode_hash, account_code_key), - ]; - assert!(!res.result.is_failed()); - - verify_required_storage(&vm.vm.state, expected_slots); - - assert_eq!(res.logs.l2_to_l1_logs, required_l2_to_l1_logs); - - let tx = account.get_test_contract_transaction( - deploy_tx.address, - true, - None, - false, - TxType::L1 { serial_id: 0 }, - ); - vm.vm.push_transaction(tx); - let res = vm.vm.execute(VmExecutionMode::OneTx); - let storage_logs = res.logs.storage_logs; - let res = StorageWritesDeduplicator::apply_on_empty_state(&storage_logs); - - // Tx panicked - assert_eq!(res.initial_storage_writes - basic_initial_writes, 0); - - let tx = account.get_test_contract_transaction( - deploy_tx.address, - false, - None, - false, - TxType::L1 { serial_id: 0 }, - ); - vm.vm.push_transaction(tx.clone()); - let res = vm.vm.execute(VmExecutionMode::OneTx); - let storage_logs = res.logs.storage_logs; - let res = StorageWritesDeduplicator::apply_on_empty_state(&storage_logs); - // We changed one slot inside contract - assert_eq!(res.initial_storage_writes - basic_initial_writes, 1); - - // No repeated writes - let repeated_writes = res.repeated_storage_writes; - assert_eq!(res.repeated_storage_writes, 0); - - vm.vm.push_transaction(tx); - let storage_logs = vm.vm.execute(VmExecutionMode::OneTx).logs.storage_logs; - let res = StorageWritesDeduplicator::apply_on_empty_state(&storage_logs); - // We do the same storage write, it will be deduplicated, so still 4 initial write and 0 repeated - assert_eq!(res.initial_storage_writes - basic_initial_writes, 1); - assert_eq!(res.repeated_storage_writes, repeated_writes); - - let tx = account.get_test_contract_transaction( - deploy_tx.address, - false, - Some(10.into()), - false, - TxType::L1 { serial_id: 1 }, - ); - vm.vm.push_transaction(tx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - // Method is not payable tx should fail - assert!(result.result.is_failed(), "The transaction should fail"); - - let res = StorageWritesDeduplicator::apply_on_empty_state(&result.logs.storage_logs); - // There are only basic initial writes - assert_eq!(res.initial_storage_writes - basic_initial_writes, 2); -} diff --git a/core/lib/multivm/src/versions/vm_virtual_blocks/tests/l2_blocks.rs b/core/lib/multivm/src/versions/vm_virtual_blocks/tests/l2_blocks.rs deleted file mode 100644 index cba534deeaf6..000000000000 --- a/core/lib/multivm/src/versions/vm_virtual_blocks/tests/l2_blocks.rs +++ /dev/null @@ -1,502 +0,0 @@ -//! -//! Tests for the bootloader -//! The description for each of the tests can be found in the corresponding `.yul` file. -//! - -use crate::interface::{ - ExecutionResult, Halt, L2BlockEnv, TxExecutionMode, VmExecutionMode, VmInterface, -}; -use crate::vm_latest::HistoryEnabled; -use crate::vm_virtual_blocks::constants::{ - BOOTLOADER_HEAP_PAGE, TX_OPERATOR_L2_BLOCK_INFO_OFFSET, TX_OPERATOR_SLOTS_PER_L2_BLOCK_INFO, -}; -use crate::vm_virtual_blocks::tests::tester::default_l1_batch; -use crate::vm_virtual_blocks::tests::tester::VmTesterBuilder; -use crate::vm_virtual_blocks::utils::l2_blocks::get_l2_block_hash_key; -use crate::vm_virtual_blocks::Vm; -use crate::HistoryMode; -use zk_evm_1_3_3::aux_structures::Timestamp; -use crate::interface::storage::{ReadStorage, WriteStorage}; -use zksync_system_constants::{ - CURRENT_VIRTUAL_BLOCK_INFO_POSITION, REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_BYTE, -}; -use zksync_types::block::{pack_block_info, unpack_block_info}; -use zksync_types::{ - block::{legacy_miniblock_hash, miniblock_hash}, - get_code_key, AccountTreeId, Execute, ExecuteTransactionCommon, L1BatchNumber, L1TxCommonData, - MiniblockNumber, StorageKey, Transaction, H160, H256, SYSTEM_CONTEXT_ADDRESS, - SYSTEM_CONTEXT_BLOCK_INFO_POSITION, SYSTEM_CONTEXT_CURRENT_L2_BLOCK_INFO_POSITION, - SYSTEM_CONTEXT_CURRENT_TX_ROLLING_HASH_POSITION, U256, -}; -use zksync_utils::{h256_to_u256, u256_to_h256}; - -fn get_l1_noop() -> Transaction { - Transaction { - common_data: ExecuteTransactionCommon::L1(L1TxCommonData { - sender: H160::random(), - gas_limit: U256::from(2000000u32), - gas_per_pubdata_limit: REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_BYTE.into(), - ..Default::default() - }), - execute: Execute { - contract_address: H160::zero(), - calldata: vec![], - value: U256::zero(), - factory_deps: None, - }, - received_timestamp_ms: 0, - raw_bytes: None, - } -} - -#[test] -fn test_l2_block_initialization_timestamp() { - // This test checks that the L2 block initialization works correctly. - // Here we check that that the first block must have timestamp that is greater or equal to the timestamp - // of the current batch. - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - // Override the timestamp of the current miniblock to be 0. - vm.vm.bootloader_state.push_l2_block(L2BlockEnv { - number: 1, - timestamp: 0, - prev_block_hash: legacy_miniblock_hash(MiniblockNumber(0)), - max_virtual_blocks_to_create: 1, - }); - let l1_tx = get_l1_noop(); - - vm.vm.push_transaction(l1_tx); - let res = vm.vm.execute(VmExecutionMode::OneTx); - - assert_eq!( - res.result, - ExecutionResult::Halt {reason: Halt::FailedToSetL2Block("The timestamp of the L2 block must be greater than or equal to the timestamp of the current batch".to_string())} - ); -} - -#[test] -fn test_l2_block_initialization_number_non_zero() { - // This test checks that the L2 block initialization works correctly. - // Here we check that the first miniblock number can not be zero. - - let l1_batch = default_l1_batch(L1BatchNumber(1)); - let first_l2_block = L2BlockEnv { - number: 0, - timestamp: l1_batch.timestamp, - prev_block_hash: legacy_miniblock_hash(MiniblockNumber(0)), - max_virtual_blocks_to_create: 1, - }; - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_l1_batch_env(l1_batch) - .with_random_rich_accounts(1) - .build(); - - let l1_tx = get_l1_noop(); - - vm.vm.push_transaction(l1_tx); - - let timestamp = Timestamp(vm.vm.state.local_state.timestamp); - set_manual_l2_block_info(&mut vm.vm, 0, first_l2_block, timestamp); - - let res = vm.vm.execute(VmExecutionMode::OneTx); - - assert_eq!( - res.result, - ExecutionResult::Halt { - reason: Halt::FailedToSetL2Block( - "L2 block number is never expected to be zero".to_string() - ) - } - ); -} - -fn test_same_l2_block( - expected_error: Option, - override_timestamp: Option, - override_prev_block_hash: Option, -) { - let mut l1_batch = default_l1_batch(L1BatchNumber(1)); - l1_batch.timestamp = 1; - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_l1_batch_env(l1_batch) - .with_random_rich_accounts(1) - .build(); - - let l1_tx = get_l1_noop(); - vm.vm.push_transaction(l1_tx.clone()); - let res = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!res.result.is_failed()); - - let mut current_l2_block = vm.vm.batch_env.first_l2_block; - - if let Some(timestamp) = override_timestamp { - current_l2_block.timestamp = timestamp; - } - if let Some(prev_block_hash) = override_prev_block_hash { - current_l2_block.prev_block_hash = prev_block_hash; - } - - if (None, None) == (override_timestamp, override_prev_block_hash) { - current_l2_block.max_virtual_blocks_to_create = 0; - } - - vm.vm.push_transaction(l1_tx); - let timestamp = Timestamp(vm.vm.state.local_state.timestamp); - set_manual_l2_block_info(&mut vm.vm, 1, current_l2_block, timestamp); - - let result = vm.vm.execute(VmExecutionMode::OneTx); - - if let Some(err) = expected_error { - assert_eq!(result.result, ExecutionResult::Halt { reason: err }); - } else { - assert_eq!(result.result, ExecutionResult::Success { output: vec![] }); - } -} - -#[test] -fn test_l2_block_same_l2_block() { - // This test aims to test the case when there are multiple transactions inside the same L2 block. - - // Case 1: Incorrect timestamp - test_same_l2_block( - Some(Halt::FailedToSetL2Block( - "The timestamp of the same L2 block must be same".to_string(), - )), - Some(0), - None, - ); - - // Case 2: Incorrect previous block hash - test_same_l2_block( - Some(Halt::FailedToSetL2Block( - "The previous hash of the same L2 block must be same".to_string(), - )), - None, - Some(H256::zero()), - ); - - // Case 3: Correct continuation of the same L2 block - test_same_l2_block(None, None, None); -} - -fn test_new_l2_block( - first_l2_block: L2BlockEnv, - overriden_second_block_number: Option, - overriden_second_block_timestamp: Option, - overriden_second_block_prev_block_hash: Option, - expected_error: Option, -) { - let mut l1_batch = default_l1_batch(L1BatchNumber(1)); - l1_batch.timestamp = 1; - l1_batch.first_l2_block = first_l2_block; - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_l1_batch_env(l1_batch) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let l1_tx = get_l1_noop(); - - // Firstly we execute the first transaction - vm.vm.push_transaction(l1_tx.clone()); - vm.vm.execute(VmExecutionMode::OneTx); - - let mut second_l2_block = vm.vm.batch_env.first_l2_block; - second_l2_block.number += 1; - second_l2_block.timestamp += 1; - second_l2_block.prev_block_hash = vm.vm.bootloader_state.last_l2_block().get_hash(); - - if let Some(block_number) = overriden_second_block_number { - second_l2_block.number = block_number; - } - if let Some(timestamp) = overriden_second_block_timestamp { - second_l2_block.timestamp = timestamp; - } - if let Some(prev_block_hash) = overriden_second_block_prev_block_hash { - second_l2_block.prev_block_hash = prev_block_hash; - } - - vm.vm.bootloader_state.push_l2_block(second_l2_block); - - vm.vm.push_transaction(l1_tx); - - let result = vm.vm.execute(VmExecutionMode::OneTx); - if let Some(err) = expected_error { - assert_eq!(result.result, ExecutionResult::Halt { reason: err }); - } else { - assert_eq!(result.result, ExecutionResult::Success { output: vec![] }); - } -} - -#[test] -fn test_l2_block_new_l2_block() { - // This test is aimed to cover potential issue - - let correct_first_block = L2BlockEnv { - number: 1, - timestamp: 1, - prev_block_hash: legacy_miniblock_hash(MiniblockNumber(0)), - max_virtual_blocks_to_create: 1, - }; - - // Case 1: Block number increasing by more than 1 - test_new_l2_block( - correct_first_block, - Some(3), - None, - None, - Some(Halt::FailedToSetL2Block( - "Invalid new L2 block number".to_string(), - )), - ); - - // Case 2: Timestamp not increasing - test_new_l2_block( - correct_first_block, - None, - Some(1), - None, - Some(Halt::FailedToSetL2Block("The timestamp of the new L2 block must be greater than the timestamp of the previous L2 block".to_string())), - ); - - // Case 3: Incorrect previous block hash - test_new_l2_block( - correct_first_block, - None, - None, - Some(H256::zero()), - Some(Halt::FailedToSetL2Block( - "The current L2 block hash is incorrect".to_string(), - )), - ); - - // Case 4: Correct new block - test_new_l2_block(correct_first_block, None, None, None, None); -} - -#[allow(clippy::too_many_arguments)] -fn test_first_in_batch( - miniblock_timestamp: u64, - miniblock_number: u32, - pending_txs_hash: H256, - batch_timestamp: u64, - new_batch_timestamp: u64, - batch_number: u32, - proposed_block: L2BlockEnv, - expected_error: Option, -) { - let mut l1_batch = default_l1_batch(L1BatchNumber(1)); - l1_batch.number += 1; - l1_batch.timestamp = new_batch_timestamp; - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_l1_batch_env(l1_batch) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - let l1_tx = get_l1_noop(); - - // Setting the values provided. - let storage_ptr = vm.vm.state.storage.storage.get_ptr(); - let miniblock_info_slot = StorageKey::new( - AccountTreeId::new(SYSTEM_CONTEXT_ADDRESS), - SYSTEM_CONTEXT_CURRENT_L2_BLOCK_INFO_POSITION, - ); - let pending_txs_hash_slot = StorageKey::new( - AccountTreeId::new(SYSTEM_CONTEXT_ADDRESS), - SYSTEM_CONTEXT_CURRENT_TX_ROLLING_HASH_POSITION, - ); - let batch_info_slot = StorageKey::new( - AccountTreeId::new(SYSTEM_CONTEXT_ADDRESS), - SYSTEM_CONTEXT_BLOCK_INFO_POSITION, - ); - let prev_block_hash_position = get_l2_block_hash_key(miniblock_number - 1); - - storage_ptr.borrow_mut().set_value( - miniblock_info_slot, - u256_to_h256(pack_block_info( - miniblock_number as u64, - miniblock_timestamp, - )), - ); - storage_ptr - .borrow_mut() - .set_value(pending_txs_hash_slot, pending_txs_hash); - storage_ptr.borrow_mut().set_value( - batch_info_slot, - u256_to_h256(pack_block_info(batch_number as u64, batch_timestamp)), - ); - storage_ptr.borrow_mut().set_value( - prev_block_hash_position, - legacy_miniblock_hash(MiniblockNumber(miniblock_number - 1)), - ); - - // In order to skip checks from the Rust side of the VM, we firstly use some definitely correct L2 block info. - // And then override it with the user-provided value - - let last_l2_block = vm.vm.bootloader_state.last_l2_block(); - let new_l2_block = L2BlockEnv { - number: last_l2_block.number + 1, - timestamp: last_l2_block.timestamp + 1, - prev_block_hash: last_l2_block.get_hash(), - max_virtual_blocks_to_create: last_l2_block.max_virtual_blocks_to_create, - }; - - vm.vm.bootloader_state.push_l2_block(new_l2_block); - vm.vm.push_transaction(l1_tx); - let timestamp = Timestamp(vm.vm.state.local_state.timestamp); - set_manual_l2_block_info(&mut vm.vm, 0, proposed_block, timestamp); - - let result = vm.vm.execute(VmExecutionMode::OneTx); - if let Some(err) = expected_error { - assert_eq!(result.result, ExecutionResult::Halt { reason: err }); - } else { - assert_eq!(result.result, ExecutionResult::Success { output: vec![] }); - } -} - -#[test] -fn test_l2_block_first_in_batch() { - test_first_in_batch( - 1, - 1, - H256::zero(), - 1, - 2, - 1, - L2BlockEnv { - number: 2, - timestamp: 2, - prev_block_hash: miniblock_hash( - MiniblockNumber(1), - 1, - legacy_miniblock_hash(MiniblockNumber(0)), - H256::zero(), - ), - max_virtual_blocks_to_create: 1, - }, - None, - ); - - test_first_in_batch( - 8, - 1, - H256::zero(), - 5, - 12, - 1, - L2BlockEnv { - number: 2, - timestamp: 9, - prev_block_hash: miniblock_hash(MiniblockNumber(1), 8, legacy_miniblock_hash(MiniblockNumber(0)), H256::zero()), - max_virtual_blocks_to_create: 1 - }, - Some(Halt::FailedToSetL2Block("The timestamp of the L2 block must be greater than or equal to the timestamp of the current batch".to_string())), - ); -} - -#[test] -fn test_l2_block_upgrade() { - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - vm.vm - .state - .storage - .storage - .get_ptr() - .borrow_mut() - .set_value(get_code_key(&SYSTEM_CONTEXT_ADDRESS), H256::default()); - - let l1_tx = get_l1_noop(); - // Firstly we execute the first transaction - vm.vm.push_transaction(l1_tx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!result.result.is_failed(), "No revert reason expected"); - let result = vm.vm.execute(VmExecutionMode::Batch); - assert!(!result.result.is_failed(), "No revert reason expected"); -} - -#[test] -fn test_l2_block_upgrade_ending() { - let mut l1_batch = default_l1_batch(L1BatchNumber(1)); - l1_batch.timestamp = 1; - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_l1_batch_env(l1_batch.clone()) - .with_random_rich_accounts(1) - .build(); - - let l1_tx = get_l1_noop(); - - let storage = vm.storage.clone(); - - storage - .borrow_mut() - .set_value(get_code_key(&SYSTEM_CONTEXT_ADDRESS), H256::default()); - - vm.vm.push_transaction(l1_tx.clone()); - let result = vm.vm.execute(VmExecutionMode::OneTx); - - assert!(!result.result.is_failed(), "No revert reason expected"); - - let virtual_block_info = storage.borrow_mut().read_value(&StorageKey::new( - AccountTreeId::new(SYSTEM_CONTEXT_ADDRESS), - CURRENT_VIRTUAL_BLOCK_INFO_POSITION, - )); - - let (virtual_block_number, virtual_block_timestamp) = - unpack_block_info(h256_to_u256(virtual_block_info)); - - assert_eq!(virtual_block_number as u32, l1_batch.first_l2_block.number); - assert_eq!(virtual_block_timestamp, l1_batch.first_l2_block.timestamp); - vm.vm.push_transaction(l1_tx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!result.result.is_failed(), "No revert reason expected"); - let result = vm.vm.execute(VmExecutionMode::Batch); - assert!(!result.result.is_failed(), "No revert reason expected"); -} - -fn set_manual_l2_block_info( - vm: &mut Vm, - tx_number: usize, - block_info: L2BlockEnv, - timestamp: Timestamp, -) { - let fictive_miniblock_position = - TX_OPERATOR_L2_BLOCK_INFO_OFFSET + TX_OPERATOR_SLOTS_PER_L2_BLOCK_INFO * tx_number; - - vm.state.memory.populate_page( - BOOTLOADER_HEAP_PAGE as usize, - vec![ - (fictive_miniblock_position, block_info.number.into()), - (fictive_miniblock_position + 1, block_info.timestamp.into()), - ( - fictive_miniblock_position + 2, - h256_to_u256(block_info.prev_block_hash), - ), - ( - fictive_miniblock_position + 3, - block_info.max_virtual_blocks_to_create.into(), - ), - ], - timestamp, - ) -} diff --git a/core/lib/multivm/src/versions/vm_virtual_blocks/tests/mod.rs b/core/lib/multivm/src/versions/vm_virtual_blocks/tests/mod.rs deleted file mode 100644 index ffb38dd3725a..000000000000 --- a/core/lib/multivm/src/versions/vm_virtual_blocks/tests/mod.rs +++ /dev/null @@ -1,20 +0,0 @@ -mod bootloader; -mod default_aa; -// TODO - fix this test -// mod invalid_bytecode; -mod bytecode_publishing; -mod call_tracer; -mod gas_limit; -mod get_used_contracts; -mod is_write_initial; -mod l1_tx_execution; -mod l2_blocks; -mod nonce_holder; -mod refunds; -mod require_eip712; -mod rollbacks; -mod simple_execution; -mod tester; -mod tracing_execution_error; -mod upgrade; -mod utils; diff --git a/core/lib/multivm/src/versions/vm_virtual_blocks/tests/nonce_holder.rs b/core/lib/multivm/src/versions/vm_virtual_blocks/tests/nonce_holder.rs deleted file mode 100644 index 162a3f46cb10..000000000000 --- a/core/lib/multivm/src/versions/vm_virtual_blocks/tests/nonce_holder.rs +++ /dev/null @@ -1,182 +0,0 @@ -use zksync_types::{Execute, Nonce}; - -use crate::interface::{ - ExecutionResult, Halt, TxExecutionMode, TxRevertReason, VmExecutionMode, VmInterface, - VmRevertReason, -}; -use crate::vm_latest::HistoryEnabled; -use crate::vm_virtual_blocks::tests::tester::{Account, VmTesterBuilder}; -use crate::vm_virtual_blocks::tests::utils::read_nonce_holder_tester; -use crate::vm_virtual_blocks::types::internals::TransactionData; - -pub enum NonceHolderTestMode { - SetValueUnderNonce, - IncreaseMinNonceBy5, - IncreaseMinNonceTooMuch, - LeaveNonceUnused, - IncreaseMinNonceBy1, - SwitchToArbitraryOrdering, -} - -impl From for u8 { - fn from(mode: NonceHolderTestMode) -> u8 { - match mode { - NonceHolderTestMode::SetValueUnderNonce => 0, - NonceHolderTestMode::IncreaseMinNonceBy5 => 1, - NonceHolderTestMode::IncreaseMinNonceTooMuch => 2, - NonceHolderTestMode::LeaveNonceUnused => 3, - NonceHolderTestMode::IncreaseMinNonceBy1 => 4, - NonceHolderTestMode::SwitchToArbitraryOrdering => 5, - } - } -} - -#[test] -fn test_nonce_holder() { - let mut account = Account::random(); - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_deployer() - .with_custom_contracts(vec![( - read_nonce_holder_tester().to_vec(), - account.address, - true, - )]) - .with_rich_accounts(vec![account.clone()]) - .build(); - - let mut run_nonce_test = |nonce: u32, - test_mode: NonceHolderTestMode, - error_message: Option, - comment: &'static str| { - // In this test we have to reset VM state after each test case. Because once bootloader failed during the validation of the transaction, - // it will fail again and again. At the same time we have to keep the same storage, because we want to keep the nonce holder contract state. - // The easiest way in terms of lifetimes is to reuse vm_builder to achieve it. - vm.reset_state(true); - let mut transaction_data: TransactionData = account - .get_l2_tx_for_execute_with_nonce( - Execute { - contract_address: account.address, - calldata: vec![12], - value: Default::default(), - factory_deps: None, - }, - None, - Nonce(nonce), - ) - .into(); - - transaction_data.signature = vec![test_mode.into()]; - vm.vm.push_raw_transaction(transaction_data, 0, 0, true); - let result = vm.vm.execute(VmExecutionMode::OneTx); - - if let Some(msg) = error_message { - let expected_error = - TxRevertReason::Halt(Halt::ValidationFailed(VmRevertReason::General { - msg, - data: vec![], - })); - let ExecutionResult::Halt { reason } = result.result else { - panic!("Expected revert, got {:?}", result.result); - }; - assert_eq!( - reason.to_string(), - expected_error.to_string(), - "{}", - comment - ); - } else { - assert!(!result.result.is_failed(), "{}", comment); - } - }; - // Test 1: trying to set value under non sequential nonce value. - run_nonce_test( - 1u32, - NonceHolderTestMode::SetValueUnderNonce, - Some("Previous nonce has not been used".to_string()), - "Allowed to set value under non sequential value", - ); - - // Test 2: increase min nonce by 1 with sequential nonce ordering: - run_nonce_test( - 0u32, - NonceHolderTestMode::IncreaseMinNonceBy1, - None, - "Failed to increment nonce by 1 for sequential account", - ); - - // Test 3: correctly set value under nonce with sequential nonce ordering: - run_nonce_test( - 1u32, - NonceHolderTestMode::SetValueUnderNonce, - None, - "Failed to set value under nonce sequential value", - ); - - // Test 5: migrate to the arbitrary nonce ordering: - run_nonce_test( - 2u32, - NonceHolderTestMode::SwitchToArbitraryOrdering, - None, - "Failed to switch to arbitrary ordering", - ); - - // Test 6: increase min nonce by 5 - run_nonce_test( - 6u32, - NonceHolderTestMode::IncreaseMinNonceBy5, - None, - "Failed to increase min nonce by 5", - ); - - // Test 7: since the nonces in range [6,10] are no longer allowed, the - // tx with nonce 10 should not be allowed - run_nonce_test( - 10u32, - NonceHolderTestMode::IncreaseMinNonceBy5, - Some("Reusing the same nonce twice".to_string()), - "Allowed to reuse nonce below the minimal one", - ); - - // Test 8: we should be able to use nonce 13 - run_nonce_test( - 13u32, - NonceHolderTestMode::SetValueUnderNonce, - None, - "Did not allow to use unused nonce 10", - ); - - // Test 9: we should not be able to reuse nonce 13 - run_nonce_test( - 13u32, - NonceHolderTestMode::IncreaseMinNonceBy5, - Some("Reusing the same nonce twice".to_string()), - "Allowed to reuse the same nonce twice", - ); - - // Test 10: we should be able to simply use nonce 14, while bumping the minimal nonce by 5 - run_nonce_test( - 14u32, - NonceHolderTestMode::IncreaseMinNonceBy5, - None, - "Did not allow to use a bumped nonce", - ); - - // Test 11: Do not allow bumping nonce by too much - run_nonce_test( - 16u32, - NonceHolderTestMode::IncreaseMinNonceTooMuch, - Some("The value for incrementing the nonce is too high".to_string()), - "Allowed for incrementing min nonce too much", - ); - - // Test 12: Do not allow not setting a nonce as used - run_nonce_test( - 16u32, - NonceHolderTestMode::LeaveNonceUnused, - Some("The nonce was not set as used".to_string()), - "Allowed to leave nonce as unused", - ); -} diff --git a/core/lib/multivm/src/versions/vm_virtual_blocks/tests/refunds.rs b/core/lib/multivm/src/versions/vm_virtual_blocks/tests/refunds.rs deleted file mode 100644 index d0b3b7cbee37..000000000000 --- a/core/lib/multivm/src/versions/vm_virtual_blocks/tests/refunds.rs +++ /dev/null @@ -1,152 +0,0 @@ -use crate::interface::{TxExecutionMode, VmExecutionMode, VmInterface}; -use crate::vm_virtual_blocks::tests::tester::{DeployContractsTx, TxType, VmTesterBuilder}; -use crate::vm_virtual_blocks::tests::utils::read_test_contract; - -use crate::vm_latest::HistoryEnabled; -use crate::vm_virtual_blocks::types::internals::TransactionData; - -#[test] -fn test_predetermined_refunded_gas() { - // In this test, we compare the execution of the bootloader with the predefined - // refunded gas and without them - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - let l1_batch = vm.vm.batch_env.clone(); - - let counter = read_test_contract(); - let account = &mut vm.rich_accounts[0]; - - let DeployContractsTx { - tx, - bytecode_hash: _, - address: _, - } = account.get_deploy_tx(&counter, None, TxType::L2); - vm.vm.push_transaction(tx.clone()); - let result = vm.vm.execute(VmExecutionMode::OneTx); - - assert!(!result.result.is_failed()); - - // If the refund provided by the operator or the final refund are the 0 - // there is no impact of the operator's refund at all and so this test does not - // make much sense. - assert!( - result.refunds.operator_suggested_refund > 0, - "The operator's refund is 0" - ); - assert!(result.refunds.gas_refunded > 0, "The final refund is 0"); - - let result_without_predefined_refunds = vm.vm.execute(VmExecutionMode::Batch); - let mut current_state_without_predefined_refunds = vm.vm.get_current_execution_state(); - assert!(!result_without_predefined_refunds.result.is_failed(),); - - // Here we want to provide the same refund from the operator and check that it's the correct one. - // We execute the whole block without refund tracer, because refund tracer will eventually override the provided refund. - // But the overall result should be the same - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_l1_batch_env(l1_batch.clone()) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_rich_accounts(vec![account.clone()]) - .build(); - - let tx: TransactionData = tx.into(); - let block_gas_per_pubdata_byte = vm.vm.batch_env.block_gas_price_per_pubdata(); - // Overhead - let overhead = tx.overhead_gas(block_gas_per_pubdata_byte as u32); - vm.vm - .push_raw_transaction(tx.clone(), overhead, result.refunds.gas_refunded, true); - - let result_with_predefined_refunds = vm.vm.execute(VmExecutionMode::Batch); - let mut current_state_with_predefined_refunds = vm.vm.get_current_execution_state(); - - assert!(!result_with_predefined_refunds.result.is_failed()); - - // We need to sort these lists as those are flattened from HashMaps - current_state_with_predefined_refunds - .used_contract_hashes - .sort(); - current_state_without_predefined_refunds - .used_contract_hashes - .sort(); - - assert_eq!( - current_state_with_predefined_refunds.events, - current_state_without_predefined_refunds.events - ); - - assert_eq!( - current_state_with_predefined_refunds.l2_to_l1_logs, - current_state_without_predefined_refunds.l2_to_l1_logs - ); - - assert_eq!( - current_state_with_predefined_refunds.storage_log_queries, - current_state_without_predefined_refunds.storage_log_queries - ); - assert_eq!( - current_state_with_predefined_refunds.used_contract_hashes, - current_state_without_predefined_refunds.used_contract_hashes - ); - - // In this test we put the different refund from the operator. - // We still can't use the refund tracer, because it will override the refund. - // But we can check that the logs and events have changed. - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_l1_batch_env(l1_batch) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_rich_accounts(vec![account.clone()]) - .build(); - - let changed_operator_suggested_refund = result.refunds.gas_refunded + 1000; - vm.vm - .push_raw_transaction(tx, overhead, changed_operator_suggested_refund, true); - let result = vm.vm.execute(VmExecutionMode::Batch); - let mut current_state_with_changed_predefined_refunds = vm.vm.get_current_execution_state(); - - assert!(!result.result.is_failed()); - current_state_with_changed_predefined_refunds - .used_contract_hashes - .sort(); - current_state_without_predefined_refunds - .used_contract_hashes - .sort(); - - assert_eq!( - current_state_with_changed_predefined_refunds.events.len(), - current_state_without_predefined_refunds.events.len() - ); - - assert_ne!( - current_state_with_changed_predefined_refunds.events, - current_state_without_predefined_refunds.events - ); - - assert_eq!( - current_state_with_changed_predefined_refunds.l2_to_l1_logs, - current_state_without_predefined_refunds.l2_to_l1_logs - ); - - assert_eq!( - current_state_with_changed_predefined_refunds - .storage_log_queries - .len(), - current_state_without_predefined_refunds - .storage_log_queries - .len() - ); - - assert_ne!( - current_state_with_changed_predefined_refunds.storage_log_queries, - current_state_without_predefined_refunds.storage_log_queries - ); - assert_eq!( - current_state_with_changed_predefined_refunds.used_contract_hashes, - current_state_without_predefined_refunds.used_contract_hashes - ); -} diff --git a/core/lib/multivm/src/versions/vm_virtual_blocks/tests/require_eip712.rs b/core/lib/multivm/src/versions/vm_virtual_blocks/tests/require_eip712.rs deleted file mode 100644 index 988841e90ce6..000000000000 --- a/core/lib/multivm/src/versions/vm_virtual_blocks/tests/require_eip712.rs +++ /dev/null @@ -1,161 +0,0 @@ -use std::convert::TryInto; - -use ethabi::Token; - -use zksync_eth_signer::raw_ethereum_tx::TransactionParameters; -use zksync_eth_signer::EthereumSigner; -use zksync_system_constants::L2_ETH_TOKEN_ADDRESS; -use zksync_types::fee::Fee; -use zksync_types::l2::L2Tx; -use zksync_types::transaction_request::TransactionRequest; -use zksync_types::utils::storage_key_for_standard_token_balance; -use zksync_types::{AccountTreeId, Address, Eip712Domain, Execute, Nonce, Transaction, U256}; - -use crate::interface::{TxExecutionMode, VmExecutionMode, VmInterface}; -use crate::vm_latest::HistoryDisabled; -use crate::vm_virtual_blocks::tests::tester::{Account, VmTester, VmTesterBuilder}; -use crate::vm_virtual_blocks::tests::utils::read_many_owners_custom_account_contract; - -impl VmTester { - pub(crate) fn get_eth_balance(&mut self, address: Address) -> U256 { - let key = storage_key_for_standard_token_balance( - AccountTreeId::new(L2_ETH_TOKEN_ADDRESS), - &address, - ); - self.vm.state.storage.storage.read_from_storage(&key) - } -} - -// TODO refactor this test it use too much internal details of the VM -#[tokio::test] -/// This test deploys 'buggy' account abstraction code, and then tries accessing it both with legacy -/// and EIP712 transactions. -/// Currently we support both, but in the future, we should allow only EIP712 transactions to access the AA accounts. -async fn test_require_eip712() { - // Use 3 accounts: - // - private_address - EOA account, where we have the key - // - account_address - AA account, where the contract is deployed - // - beneficiary - an EOA account, where we'll try to transfer the tokens. - let account_abstraction = Account::random(); - let mut private_account = Account::random(); - let beneficiary = Account::random(); - - let (bytecode, contract) = read_many_owners_custom_account_contract(); - let mut vm = VmTesterBuilder::new(HistoryDisabled) - .with_empty_in_memory_storage() - .with_custom_contracts(vec![(bytecode, account_abstraction.address, true)]) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_rich_accounts(vec![account_abstraction.clone(), private_account.clone()]) - .build(); - - assert_eq!(vm.get_eth_balance(beneficiary.address), U256::from(0)); - - let chain_id: u32 = 270; - - // First, let's set the owners of the AA account to the private_address. - // (so that messages signed by private_address, are authorized to act on behalf of the AA account). - let set_owners_function = contract.function("setOwners").unwrap(); - let encoded_input = set_owners_function - .encode_input(&[Token::Array(vec![Token::Address(private_account.address)])]) - .unwrap(); - - let tx = private_account.get_l2_tx_for_execute( - Execute { - contract_address: account_abstraction.address, - calldata: encoded_input, - value: Default::default(), - factory_deps: None, - }, - None, - ); - - vm.vm.push_transaction(tx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!result.result.is_failed()); - - let private_account_balance = vm.get_eth_balance(private_account.address); - - // And now let's do the transfer from the 'account abstraction' to 'beneficiary' (using 'legacy' transaction). - // Normally this would not work - unless the operator is malicious. - let aa_raw_tx = TransactionParameters { - nonce: U256::from(0), - to: Some(beneficiary.address), - gas: U256::from(100000000), - gas_price: Some(U256::from(10000000)), - value: U256::from(888000088), - data: vec![], - chain_id: 270, - transaction_type: None, - access_list: None, - max_fee_per_gas: U256::from(1000000000), - max_priority_fee_per_gas: U256::from(1000000000), - }; - - let aa_tx = private_account.sign_legacy_tx(aa_raw_tx).await; - let (tx_request, hash) = TransactionRequest::from_bytes(&aa_tx, 270.into()).unwrap(); - - let mut l2_tx: L2Tx = L2Tx::from_request(tx_request, 10000).unwrap(); - l2_tx.set_input(aa_tx, hash); - // Pretend that operator is malicious and sets the initiator to the AA account. - l2_tx.common_data.initiator_address = account_abstraction.address; - let transaction: Transaction = l2_tx.try_into().unwrap(); - - vm.vm.push_transaction(transaction); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!result.result.is_failed()); - assert_eq!( - vm.get_eth_balance(beneficiary.address), - U256::from(888000088) - ); - // Make sure that the tokens were transferred from the AA account. - assert_eq!( - private_account_balance, - vm.get_eth_balance(private_account.address) - ); - - // // Now send the 'classic' EIP712 transaction - let tx_712 = L2Tx::new( - beneficiary.address, - vec![], - Nonce(1), - Fee { - gas_limit: U256::from(1000000000), - max_fee_per_gas: U256::from(1000000000), - max_priority_fee_per_gas: U256::from(1000000000), - gas_per_pubdata_limit: U256::from(1000000000), - }, - account_abstraction.address, - U256::from(28374938), - None, - Default::default(), - ); - - let transaction_request: TransactionRequest = tx_712.into(); - - let domain = Eip712Domain::new(chain_id.into()); - let signature = private_account - .get_pk_signer() - .sign_typed_data(&domain, &transaction_request) - .await - .unwrap(); - let encoded_tx = transaction_request.get_signed_bytes(&signature, chain_id.into()); - - let (aa_txn_request, aa_hash) = - TransactionRequest::from_bytes(&encoded_tx, chain_id.into()).unwrap(); - - let mut l2_tx = L2Tx::from_request(aa_txn_request, 100000).unwrap(); - l2_tx.set_input(encoded_tx, aa_hash); - - let transaction: Transaction = l2_tx.try_into().unwrap(); - vm.vm.push_transaction(transaction); - vm.vm.execute(VmExecutionMode::OneTx); - - assert_eq!( - vm.get_eth_balance(beneficiary.address), - U256::from(916375026) - ); - assert_eq!( - private_account_balance, - vm.get_eth_balance(private_account.address) - ); -} diff --git a/core/lib/multivm/src/versions/vm_virtual_blocks/tests/rollbacks.rs b/core/lib/multivm/src/versions/vm_virtual_blocks/tests/rollbacks.rs deleted file mode 100644 index 240b7188377c..000000000000 --- a/core/lib/multivm/src/versions/vm_virtual_blocks/tests/rollbacks.rs +++ /dev/null @@ -1,146 +0,0 @@ -use ethabi::Token; - -use zksync_contracts::get_loadnext_contract; -use zksync_contracts::test_contracts::LoadnextContractExecutionParams; - -use zksync_types::{Execute, U256}; - -use crate::interface::TxExecutionMode; -use crate::vm_latest::HistoryEnabled; -use crate::vm_virtual_blocks::tests::tester::{ - DeployContractsTx, TransactionTestInfo, TxModifier, TxType, VmTesterBuilder, -}; -use crate::vm_virtual_blocks::tests::utils::read_test_contract; - -#[test] -fn test_vm_rollbacks() { - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let mut account = vm.rich_accounts[0].clone(); - let counter = read_test_contract(); - let tx_0 = account.get_deploy_tx(&counter, None, TxType::L2).tx; - let tx_1 = account.get_deploy_tx(&counter, None, TxType::L2).tx; - let tx_2 = account.get_deploy_tx(&counter, None, TxType::L2).tx; - - let result_without_rollbacks = vm.execute_and_verify_txs(&vec![ - TransactionTestInfo::new_processed(tx_0.clone(), false), - TransactionTestInfo::new_processed(tx_1.clone(), false), - TransactionTestInfo::new_processed(tx_2.clone(), false), - ]); - - // reset vm - vm.reset_with_empty_storage(); - - let result_with_rollbacks = vm.execute_and_verify_txs(&vec![ - TransactionTestInfo::new_rejected(tx_0.clone(), TxModifier::WrongSignatureLength.into()), - TransactionTestInfo::new_rejected(tx_0.clone(), TxModifier::WrongMagicValue.into()), - TransactionTestInfo::new_rejected(tx_0.clone(), TxModifier::WrongSignature.into()), - // The correct nonce is 0, this tx will fail - TransactionTestInfo::new_rejected(tx_2.clone(), TxModifier::WrongNonce.into()), - // This tx will succeed - TransactionTestInfo::new_processed(tx_0.clone(), false), - // The correct nonce is 1, this tx will fail - TransactionTestInfo::new_rejected(tx_0.clone(), TxModifier::NonceReused.into()), - // The correct nonce is 1, this tx will fail - TransactionTestInfo::new_rejected(tx_2.clone(), TxModifier::WrongNonce.into()), - // This tx will succeed - TransactionTestInfo::new_processed(tx_1, false), - // The correct nonce is 2, this tx will fail - TransactionTestInfo::new_rejected(tx_0.clone(), TxModifier::NonceReused.into()), - // This tx will succeed - TransactionTestInfo::new_processed(tx_2.clone(), false), - // This tx will fail - TransactionTestInfo::new_rejected(tx_2, TxModifier::NonceReused.into()), - TransactionTestInfo::new_rejected(tx_0, TxModifier::NonceReused.into()), - ]); - - assert_eq!(result_without_rollbacks, result_with_rollbacks); -} - -#[test] -fn test_vm_loadnext_rollbacks() { - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - let mut account = vm.rich_accounts[0].clone(); - - let loadnext_contract = get_loadnext_contract(); - let loadnext_constructor_data = &[Token::Uint(U256::from(100))]; - let DeployContractsTx { - tx: loadnext_deploy_tx, - address, - .. - } = account.get_deploy_tx_with_factory_deps( - &loadnext_contract.bytecode, - Some(loadnext_constructor_data), - loadnext_contract.factory_deps.clone(), - TxType::L2, - ); - - let loadnext_tx_1 = account.get_l2_tx_for_execute( - Execute { - contract_address: address, - calldata: LoadnextContractExecutionParams { - reads: 100, - writes: 100, - events: 100, - hashes: 500, - recursive_calls: 10, - deploys: 60, - } - .to_bytes(), - value: Default::default(), - factory_deps: None, - }, - None, - ); - - let loadnext_tx_2 = account.get_l2_tx_for_execute( - Execute { - contract_address: address, - calldata: LoadnextContractExecutionParams { - reads: 100, - writes: 100, - events: 100, - hashes: 500, - recursive_calls: 10, - deploys: 60, - } - .to_bytes(), - value: Default::default(), - factory_deps: None, - }, - None, - ); - - let result_without_rollbacks = vm.execute_and_verify_txs(&vec![ - TransactionTestInfo::new_processed(loadnext_deploy_tx.clone(), false), - TransactionTestInfo::new_processed(loadnext_tx_1.clone(), false), - TransactionTestInfo::new_processed(loadnext_tx_2.clone(), false), - ]); - - // reset vm - vm.reset_with_empty_storage(); - - let result_with_rollbacks = vm.execute_and_verify_txs(&vec![ - TransactionTestInfo::new_processed(loadnext_deploy_tx.clone(), false), - TransactionTestInfo::new_processed(loadnext_tx_1.clone(), true), - TransactionTestInfo::new_rejected( - loadnext_deploy_tx.clone(), - TxModifier::NonceReused.into(), - ), - TransactionTestInfo::new_processed(loadnext_tx_1, false), - TransactionTestInfo::new_processed(loadnext_tx_2.clone(), true), - TransactionTestInfo::new_processed(loadnext_tx_2.clone(), true), - TransactionTestInfo::new_rejected(loadnext_deploy_tx, TxModifier::NonceReused.into()), - TransactionTestInfo::new_processed(loadnext_tx_2, false), - ]); - - assert_eq!(result_without_rollbacks, result_with_rollbacks); -} diff --git a/core/lib/multivm/src/versions/vm_virtual_blocks/tests/simple_execution.rs b/core/lib/multivm/src/versions/vm_virtual_blocks/tests/simple_execution.rs deleted file mode 100644 index c4eac73499fc..000000000000 --- a/core/lib/multivm/src/versions/vm_virtual_blocks/tests/simple_execution.rs +++ /dev/null @@ -1,77 +0,0 @@ -use crate::interface::{ExecutionResult, VmExecutionMode, VmInterface}; -use crate::vm_latest::HistoryDisabled; -use crate::vm_virtual_blocks::tests::tester::{TxType, VmTesterBuilder}; - -#[test] -fn estimate_fee() { - let mut vm_tester = VmTesterBuilder::new(HistoryDisabled) - .with_empty_in_memory_storage() - .with_deployer() - .with_random_rich_accounts(1) - .build(); - - vm_tester.deploy_test_contract(); - let account = &mut vm_tester.rich_accounts[0]; - - let tx = account.get_test_contract_transaction( - vm_tester.test_contract.unwrap(), - false, - Default::default(), - false, - TxType::L2, - ); - - vm_tester.vm.push_transaction(tx); - - let result = vm_tester.vm.execute(VmExecutionMode::OneTx); - assert_matches!(result.result, ExecutionResult::Success { .. }); -} - -#[test] -fn simple_execute() { - let mut vm_tester = VmTesterBuilder::new(HistoryDisabled) - .with_empty_in_memory_storage() - .with_deployer() - .with_random_rich_accounts(1) - .build(); - - vm_tester.deploy_test_contract(); - - let account = &mut vm_tester.rich_accounts[0]; - - let tx1 = account.get_test_contract_transaction( - vm_tester.test_contract.unwrap(), - false, - Default::default(), - false, - TxType::L1 { serial_id: 1 }, - ); - - let tx2 = account.get_test_contract_transaction( - vm_tester.test_contract.unwrap(), - true, - Default::default(), - false, - TxType::L1 { serial_id: 1 }, - ); - - let tx3 = account.get_test_contract_transaction( - vm_tester.test_contract.unwrap(), - false, - Default::default(), - false, - TxType::L1 { serial_id: 1 }, - ); - let vm = &mut vm_tester.vm; - vm.push_transaction(tx1); - vm.push_transaction(tx2); - vm.push_transaction(tx3); - let tx = vm.execute(VmExecutionMode::OneTx); - assert_matches!(tx.result, ExecutionResult::Success { .. }); - let tx = vm.execute(VmExecutionMode::OneTx); - assert_matches!(tx.result, ExecutionResult::Revert { .. }); - let tx = vm.execute(VmExecutionMode::OneTx); - assert_matches!(tx.result, ExecutionResult::Success { .. }); - let block_tip = vm.execute(VmExecutionMode::Batch); - assert_matches!(block_tip.result, ExecutionResult::Success { .. }); -} diff --git a/core/lib/multivm/src/versions/vm_virtual_blocks/tests/tester/inner_state.rs b/core/lib/multivm/src/versions/vm_virtual_blocks/tests/tester/inner_state.rs deleted file mode 100644 index a5c0db9468b0..000000000000 --- a/core/lib/multivm/src/versions/vm_virtual_blocks/tests/tester/inner_state.rs +++ /dev/null @@ -1,119 +0,0 @@ -use std::collections::HashMap; - -use zk_evm_1_3_3::aux_structures::Timestamp; -use zk_evm_1_3_3::vm_state::VmLocalState; -use crate::interface::storage::WriteStorage; - -use zksync_types::{StorageKey, StorageLogQuery, StorageValue, U256}; - -use crate::vm_virtual_blocks::old_vm::event_sink::InMemoryEventSink; -use crate::vm_virtual_blocks::old_vm::history_recorder::{ - AppDataFrameManagerWithHistory, HistoryRecorder, -}; -use crate::vm_virtual_blocks::{HistoryEnabled, HistoryMode, SimpleMemory, Vm}; -use crate::HistoryMode as CommonHistoryMode; - -#[derive(Clone, Debug)] -pub(crate) struct ModifiedKeysMap(HashMap); - -// We consider hashmaps to be equal even if there is a key -// that is not present in one but has zero value in another. -impl PartialEq for ModifiedKeysMap { - fn eq(&self, other: &Self) -> bool { - for (key, value) in self.0.iter() { - if *value != other.0.get(key).cloned().unwrap_or_default() { - return false; - } - } - for (key, value) in other.0.iter() { - if *value != self.0.get(key).cloned().unwrap_or_default() { - return false; - } - } - true - } -} - -#[derive(Clone, PartialEq, Debug)] -pub(crate) struct DecommitterTestInnerState { - /// There is no way to "truly" compare the storage pointer, - /// so we just compare the modified keys. This is reasonable enough. - pub(crate) modified_storage_keys: ModifiedKeysMap, - pub(crate) known_bytecodes: HistoryRecorder>, H>, - pub(crate) decommitted_code_hashes: HistoryRecorder, HistoryEnabled>, -} - -#[derive(Clone, PartialEq, Debug)] -pub(crate) struct StorageOracleInnerState { - /// There is no way to "truly" compare the storage pointer, - /// so we just compare the modified keys. This is reasonable enough. - pub(crate) modified_storage_keys: ModifiedKeysMap, - - pub(crate) frames_stack: AppDataFrameManagerWithHistory, H>, -} - -#[derive(Clone, PartialEq, Debug)] -pub(crate) struct PrecompileProcessorTestInnerState { - pub(crate) timestamp_history: HistoryRecorder, H>, -} - -/// A struct that encapsulates the state of the VM's oracles -/// The state is to be used in tests. -#[derive(Clone, PartialEq, Debug)] -pub(crate) struct VmInstanceInnerState { - event_sink: InMemoryEventSink, - precompile_processor_state: PrecompileProcessorTestInnerState, - memory: SimpleMemory, - decommitter_state: DecommitterTestInnerState, - storage_oracle_state: StorageOracleInnerState, - local_state: VmLocalState, -} - -impl Vm { - // Dump inner state of the VM. - pub(crate) fn dump_inner_state(&self) -> VmInstanceInnerState { - let event_sink = self.state.event_sink.clone(); - let precompile_processor_state = PrecompileProcessorTestInnerState { - timestamp_history: self.state.precompiles_processor.timestamp_history.clone(), - }; - let memory = self.state.memory.clone(); - let decommitter_state = DecommitterTestInnerState { - modified_storage_keys: ModifiedKeysMap( - self.state - .decommittment_processor - .get_storage() - .borrow() - .modified_storage_keys() - .clone(), - ), - known_bytecodes: self.state.decommittment_processor.known_bytecodes.clone(), - decommitted_code_hashes: self - .state - .decommittment_processor - .get_decommitted_code_hashes_with_history() - .clone(), - }; - let storage_oracle_state = StorageOracleInnerState { - modified_storage_keys: ModifiedKeysMap( - self.state - .storage - .storage - .get_ptr() - .borrow() - .modified_storage_keys() - .clone(), - ), - frames_stack: self.state.storage.frames_stack.clone(), - }; - let local_state = self.state.local_state.clone(); - - VmInstanceInnerState { - event_sink, - precompile_processor_state, - memory, - decommitter_state, - storage_oracle_state, - local_state, - } - } -} diff --git a/core/lib/multivm/src/versions/vm_virtual_blocks/tests/tester/mod.rs b/core/lib/multivm/src/versions/vm_virtual_blocks/tests/tester/mod.rs deleted file mode 100644 index dfe8905a7e08..000000000000 --- a/core/lib/multivm/src/versions/vm_virtual_blocks/tests/tester/mod.rs +++ /dev/null @@ -1,7 +0,0 @@ -pub(crate) use transaction_test_info::{ExpectedError, TransactionTestInfo, TxModifier}; -pub(crate) use vm_tester::{default_l1_batch, InMemoryStorageView, VmTester, VmTesterBuilder}; -pub(crate) use zksync_test_account::{Account, DeployContractsTx, TxType}; - -mod inner_state; -mod transaction_test_info; -mod vm_tester; diff --git a/core/lib/multivm/src/versions/vm_virtual_blocks/tests/tester/transaction_test_info.rs b/core/lib/multivm/src/versions/vm_virtual_blocks/tests/tester/transaction_test_info.rs deleted file mode 100644 index 15d3d98ab1d0..000000000000 --- a/core/lib/multivm/src/versions/vm_virtual_blocks/tests/tester/transaction_test_info.rs +++ /dev/null @@ -1,216 +0,0 @@ -use zksync_types::{ExecuteTransactionCommon, Transaction}; - -use crate::interface::{ - CurrentExecutionState, ExecutionResult, Halt, TxRevertReason, VmExecutionMode, - VmExecutionResultAndLogs, VmInterface, VmInterfaceHistoryEnabled, VmRevertReason, -}; -use crate::vm_latest::HistoryEnabled; -use crate::vm_virtual_blocks::tests::tester::vm_tester::VmTester; - -#[derive(Debug, Clone)] -pub(crate) enum TxModifier { - WrongSignatureLength, - WrongSignature, - WrongMagicValue, - WrongNonce, - NonceReused, -} - -#[derive(Debug, Clone)] -pub(crate) enum TxExpectedResult { - Rejected { error: ExpectedError }, - Processed { rollback: bool }, -} - -#[derive(Debug, Clone)] -pub(crate) struct TransactionTestInfo { - tx: Transaction, - result: TxExpectedResult, -} - -#[derive(Debug, Clone)] -pub(crate) struct ExpectedError { - pub(crate) revert_reason: TxRevertReason, - pub(crate) modifier: Option, -} - -impl From for ExpectedError { - fn from(value: TxModifier) -> Self { - let revert_reason = match value { - TxModifier::WrongSignatureLength => { - Halt::ValidationFailed(VmRevertReason::General { - msg: "Signature length is incorrect".to_string(), - data: vec![ - 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 29, 83, 105, 103, 110, 97, 116, 117, 114, 101, 32, - 108, 101, 110, 103, 116, 104, 32, 105, 115, 32, 105, 110, 99, 111, 114, 114, 101, 99, - 116, 0, 0, 0, - ], - }) - } - TxModifier::WrongSignature => { - Halt::ValidationFailed(VmRevertReason::General { - msg: "Account validation returned invalid magic value. Most often this means that the signature is incorrect".to_string(), - data: vec![], - }) - } - TxModifier::WrongMagicValue => { - Halt::ValidationFailed(VmRevertReason::General { - msg: "v is neither 27 nor 28".to_string(), - data: vec![ - 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 22, 118, 32, 105, 115, 32, 110, 101, 105, 116, 104, - 101, 114, 32, 50, 55, 32, 110, 111, 114, 32, 50, 56, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - ], - }) - - } - TxModifier::WrongNonce => { - Halt::ValidationFailed(VmRevertReason::General { - msg: "Incorrect nonce".to_string(), - data: vec![ - 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 15, 73, 110, 99, 111, 114, 114, 101, 99, 116, 32, 110, - 111, 110, 99, 101, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - ], - }) - } - TxModifier::NonceReused => { - Halt::ValidationFailed(VmRevertReason::General { - msg: "Reusing the same nonce twice".to_string(), - data: vec![ - 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 28, 82, 101, 117, 115, 105, 110, 103, 32, 116, 104, - 101, 32, 115, 97, 109, 101, 32, 110, 111, 110, 99, 101, 32, 116, 119, 105, 99, 101, 0, - 0, 0, 0, - ], - }) - } - }; - - ExpectedError { - revert_reason: TxRevertReason::Halt(revert_reason), - modifier: Some(value), - } - } -} - -impl TransactionTestInfo { - pub(crate) fn new_rejected( - mut transaction: Transaction, - expected_error: ExpectedError, - ) -> Self { - transaction.common_data = match transaction.common_data { - ExecuteTransactionCommon::L2(mut data) => { - if let Some(modifier) = &expected_error.modifier { - match modifier { - TxModifier::WrongSignatureLength => { - data.signature = data.signature[..data.signature.len() - 20].to_vec() - } - TxModifier::WrongSignature => data.signature = vec![27u8; 65], - TxModifier::WrongMagicValue => data.signature = vec![1u8; 65], - TxModifier::WrongNonce => { - // Do not need to modify signature for nonce error - } - TxModifier::NonceReused => { - // Do not need to modify signature for nonce error - } - } - } - ExecuteTransactionCommon::L2(data) - } - _ => panic!("L1 transactions are not supported"), - }; - - Self { - tx: transaction, - result: TxExpectedResult::Rejected { - error: expected_error, - }, - } - } - - pub(crate) fn new_processed(transaction: Transaction, should_be_rollbacked: bool) -> Self { - Self { - tx: transaction, - result: TxExpectedResult::Processed { - rollback: should_be_rollbacked, - }, - } - } - - fn verify_result(&self, result: &VmExecutionResultAndLogs) { - match &self.result { - TxExpectedResult::Rejected { error } => match &result.result { - ExecutionResult::Success { .. } => { - panic!("Transaction should be reverted {:?}", self.tx.nonce()) - } - ExecutionResult::Revert { output } => match &error.revert_reason { - TxRevertReason::TxReverted(expected) => { - assert_eq!(output, expected) - } - _ => { - panic!("Error types mismatch"); - } - }, - ExecutionResult::Halt { reason } => match &error.revert_reason { - TxRevertReason::Halt(expected) => { - assert_eq!(reason, expected) - } - _ => { - panic!("Error types mismatch"); - } - }, - }, - TxExpectedResult::Processed { .. } => { - assert!(!result.result.is_failed()); - } - } - } - - fn should_rollback(&self) -> bool { - match &self.result { - TxExpectedResult::Rejected { .. } => true, - TxExpectedResult::Processed { rollback } => *rollback, - } - } -} - -impl VmTester { - pub(crate) fn execute_and_verify_txs( - &mut self, - txs: &[TransactionTestInfo], - ) -> CurrentExecutionState { - for tx_test_info in txs { - self.execute_tx_and_verify(tx_test_info.clone()); - } - self.vm.execute(VmExecutionMode::Batch); - let mut state = self.vm.get_current_execution_state(); - state.used_contract_hashes.sort(); - state - } - - pub(crate) fn execute_tx_and_verify( - &mut self, - tx_test_info: TransactionTestInfo, - ) -> VmExecutionResultAndLogs { - let inner_state_before = self.vm.dump_inner_state(); - self.vm.make_snapshot(); - self.vm.push_transaction(tx_test_info.tx.clone()); - let result = self.vm.execute(VmExecutionMode::OneTx); - tx_test_info.verify_result(&result); - if tx_test_info.should_rollback() { - self.vm.rollback_to_the_latest_snapshot(); - let inner_state_after = self.vm.dump_inner_state(); - assert_eq!( - inner_state_before, inner_state_after, - "Inner state before and after rollback should be equal" - ); - } - result - } -} diff --git a/core/lib/multivm/src/versions/vm_virtual_blocks/tests/tester/vm_tester.rs b/core/lib/multivm/src/versions/vm_virtual_blocks/tests/tester/vm_tester.rs deleted file mode 100644 index 9fe0635eba39..000000000000 --- a/core/lib/multivm/src/versions/vm_virtual_blocks/tests/tester/vm_tester.rs +++ /dev/null @@ -1,291 +0,0 @@ -use std::marker::PhantomData; -use zksync_contracts::BaseSystemContracts; -use crate::interface::storage::{InMemoryStorage, StoragePtr, StorageView, WriteStorage}; - -use crate::HistoryMode; -use zksync_types::block::legacy_miniblock_hash; -use zksync_types::helpers::unix_timestamp_ms; -use zksync_types::utils::{deployed_address_create, storage_key_for_eth_balance}; -use zksync_types::{ - get_code_key, get_is_account_key, Address, L1BatchNumber, MiniblockNumber, Nonce, - ProtocolVersionId, U256, -}; -use zksync_utils::bytecode::hash_bytecode; -use zksync_utils::u256_to_h256; - -use crate::vm_virtual_blocks::constants::BLOCK_GAS_LIMIT; - -use crate::interface::{L1BatchEnv, L2Block, L2BlockEnv, SystemEnv, VmExecutionMode}; -use crate::interface::{TxExecutionMode, VmInterface}; -use crate::vm_virtual_blocks::tests::tester::Account; -use crate::vm_virtual_blocks::tests::tester::TxType; -use crate::vm_virtual_blocks::tests::utils::read_test_contract; -use crate::vm_virtual_blocks::utils::l2_blocks::load_last_l2_block; -use crate::vm_virtual_blocks::Vm; - -pub(crate) type InMemoryStorageView = StorageView; - -pub(crate) struct VmTester { - pub(crate) vm: Vm, - pub(crate) storage: StoragePtr, - pub(crate) fee_account: Address, - pub(crate) deployer: Option, - pub(crate) test_contract: Option
, - pub(crate) rich_accounts: Vec, - pub(crate) custom_contracts: Vec, - _phantom: PhantomData, -} - -impl VmTester { - pub(crate) fn deploy_test_contract(&mut self) { - let contract = read_test_contract(); - let tx = self - .deployer - .as_mut() - .expect("You have to initialize builder with deployer") - .get_deploy_tx(&contract, None, TxType::L2) - .tx; - let nonce = tx.nonce().unwrap().0.into(); - self.vm.push_transaction(tx); - self.vm.execute(VmExecutionMode::OneTx); - let deployed_address = - deployed_address_create(self.deployer.as_ref().unwrap().address, nonce); - self.test_contract = Some(deployed_address); - } - - pub(crate) fn reset_with_empty_storage(&mut self) { - self.storage = StorageView::new(get_empty_storage()).to_rc_ptr(); - self.reset_state(false); - } - - /// Reset the state of the VM to the initial state. - /// If `use_latest_l2_block` is true, then the VM will use the latest L2 block from storage, - /// otherwise it will use the first L2 block of l1 batch env - pub(crate) fn reset_state(&mut self, use_latest_l2_block: bool) { - for account in self.rich_accounts.iter_mut() { - account.nonce = Nonce(0); - make_account_rich(self.storage.clone(), account); - } - if let Some(deployer) = &self.deployer { - make_account_rich(self.storage.clone(), deployer); - } - - if !self.custom_contracts.is_empty() { - println!("Inserting custom contracts is not yet supported") - // insert_contracts(&mut self.storage, &self.custom_contracts); - } - - let mut l1_batch = self.vm.batch_env.clone(); - if use_latest_l2_block { - let last_l2_block = load_last_l2_block(self.storage.clone()).unwrap_or(L2Block { - number: 0, - timestamp: 0, - hash: legacy_miniblock_hash(MiniblockNumber(0)), - }); - l1_batch.first_l2_block = L2BlockEnv { - number: last_l2_block.number + 1, - timestamp: std::cmp::max(last_l2_block.timestamp + 1, l1_batch.timestamp), - prev_block_hash: last_l2_block.hash, - max_virtual_blocks_to_create: 1, - }; - } - - let vm = Vm::new(l1_batch, self.vm.system_env.clone(), self.storage.clone()); - - if self.test_contract.is_some() { - self.deploy_test_contract(); - } - - self.vm = vm; - } -} - -pub(crate) type ContractsToDeploy = (Vec, Address, bool); - -pub(crate) struct VmTesterBuilder { - _phantom: PhantomData, - storage: Option, - l1_batch_env: Option, - system_env: SystemEnv, - deployer: Option, - rich_accounts: Vec, - custom_contracts: Vec, -} - -impl Clone for VmTesterBuilder { - fn clone(&self) -> Self { - Self { - _phantom: PhantomData, - storage: None, - l1_batch_env: self.l1_batch_env.clone(), - system_env: self.system_env.clone(), - deployer: self.deployer.clone(), - rich_accounts: self.rich_accounts.clone(), - custom_contracts: self.custom_contracts.clone(), - } - } -} - -#[allow(dead_code)] -impl VmTesterBuilder { - pub(crate) fn new(_: H) -> Self { - Self { - _phantom: PhantomData, - storage: None, - l1_batch_env: None, - system_env: SystemEnv { - zk_porter_available: false, - version: ProtocolVersionId::latest(), - base_system_smart_contracts: BaseSystemContracts::playground(), - gas_limit: BLOCK_GAS_LIMIT, - execution_mode: TxExecutionMode::VerifyExecute, - default_validation_computational_gas_limit: BLOCK_GAS_LIMIT, - chain_id: 270.into(), - }, - deployer: None, - rich_accounts: vec![], - custom_contracts: vec![], - } - } - - pub(crate) fn with_l1_batch_env(mut self, l1_batch_env: L1BatchEnv) -> Self { - self.l1_batch_env = Some(l1_batch_env); - self - } - - pub(crate) fn with_system_env(mut self, system_env: SystemEnv) -> Self { - self.system_env = system_env; - self - } - - pub(crate) fn with_storage(mut self, storage: InMemoryStorage) -> Self { - self.storage = Some(storage); - self - } - - pub(crate) fn with_base_system_smart_contracts( - mut self, - base_system_smart_contracts: BaseSystemContracts, - ) -> Self { - self.system_env.base_system_smart_contracts = base_system_smart_contracts; - self - } - - pub(crate) fn with_gas_limit(mut self, gas_limit: u32) -> Self { - self.system_env.gas_limit = gas_limit; - self - } - - pub(crate) fn with_execution_mode(mut self, execution_mode: TxExecutionMode) -> Self { - self.system_env.execution_mode = execution_mode; - self - } - - pub(crate) fn with_empty_in_memory_storage(mut self) -> Self { - self.storage = Some(get_empty_storage()); - self - } - - pub(crate) fn with_random_rich_accounts(mut self, number: u32) -> Self { - for _ in 0..number { - let account = Account::random(); - self.rich_accounts.push(account); - } - self - } - - pub(crate) fn with_rich_accounts(mut self, accounts: Vec) -> Self { - self.rich_accounts.extend(accounts); - self - } - - pub(crate) fn with_deployer(mut self) -> Self { - let deployer = Account::random(); - self.deployer = Some(deployer); - self - } - - pub(crate) fn with_custom_contracts(mut self, contracts: Vec) -> Self { - self.custom_contracts = contracts; - self - } - - pub(crate) fn build(self) -> VmTester { - let l1_batch_env = self - .l1_batch_env - .unwrap_or_else(|| default_l1_batch(L1BatchNumber(1))); - - let mut raw_storage = self.storage.unwrap_or_else(get_empty_storage); - insert_contracts(&mut raw_storage, &self.custom_contracts); - let storage_ptr = StorageView::new(raw_storage).to_rc_ptr(); - for account in self.rich_accounts.iter() { - make_account_rich(storage_ptr.clone(), account); - } - if let Some(deployer) = &self.deployer { - make_account_rich(storage_ptr.clone(), deployer); - } - let fee_account = l1_batch_env.fee_account; - - let vm = Vm::new(l1_batch_env, self.system_env, storage_ptr.clone()); - - VmTester { - vm, - storage: storage_ptr, - fee_account, - deployer: self.deployer, - test_contract: None, - rich_accounts: self.rich_accounts.clone(), - custom_contracts: self.custom_contracts.clone(), - _phantom: PhantomData, - } - } -} - -pub(crate) fn default_l1_batch(number: L1BatchNumber) -> L1BatchEnv { - let timestamp = unix_timestamp_ms(); - L1BatchEnv { - previous_batch_hash: None, - number, - timestamp, - l1_gas_price: 50_000_000_000, // 50 gwei - fair_l2_gas_price: 250_000_000, // 0.25 gwei - fee_account: Address::random(), - enforced_base_fee: None, - first_l2_block: L2BlockEnv { - number: 1, - timestamp, - prev_block_hash: legacy_miniblock_hash(MiniblockNumber(0)), - max_virtual_blocks_to_create: 100, - }, - } -} - -pub(crate) fn make_account_rich(storage: StoragePtr, account: &Account) { - let key = storage_key_for_eth_balance(&account.address); - storage - .as_ref() - .borrow_mut() - .set_value(key, u256_to_h256(U256::from(10u64.pow(19)))); -} - -pub(crate) fn get_empty_storage() -> InMemoryStorage { - InMemoryStorage::with_system_contracts(hash_bytecode) -} - -// Inserts the contracts into the test environment, bypassing the -// deployer system contract. Besides the reference to storage -// it accepts a `contracts` tuple of information about the contract -// and whether or not it is an account. -fn insert_contracts(raw_storage: &mut InMemoryStorage, contracts: &[ContractsToDeploy]) { - for (contract, address, is_account) in contracts { - let deployer_code_key = get_code_key(address); - raw_storage.set_value(deployer_code_key, hash_bytecode(contract)); - - if *is_account { - let is_account_key = get_is_account_key(address); - raw_storage.set_value(is_account_key, u256_to_h256(1_u32.into())); - } - - raw_storage.store_factory_dep(hash_bytecode(contract), contract.clone()); - } -} diff --git a/core/lib/multivm/src/versions/vm_virtual_blocks/tests/tracing_execution_error.rs b/core/lib/multivm/src/versions/vm_virtual_blocks/tests/tracing_execution_error.rs deleted file mode 100644 index 8258abe0685a..000000000000 --- a/core/lib/multivm/src/versions/vm_virtual_blocks/tests/tracing_execution_error.rs +++ /dev/null @@ -1,52 +0,0 @@ -use crate::interface::{TxExecutionMode, TxRevertReason, VmRevertReason}; -use zksync_types::{Execute, H160}; - -use crate::vm_latest::HistoryEnabled; -use crate::vm_virtual_blocks::tests::tester::{ - ExpectedError, TransactionTestInfo, VmTesterBuilder, -}; -use crate::vm_virtual_blocks::tests::utils::{ - get_execute_error_calldata, read_error_contract, BASE_SYSTEM_CONTRACTS, -}; - -#[test] -fn test_tracing_of_execution_errors() { - let contract_address = H160::random(); - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_base_system_smart_contracts(BASE_SYSTEM_CONTRACTS.clone()) - .with_custom_contracts(vec![(read_error_contract(), contract_address, false)]) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_deployer() - .with_random_rich_accounts(1) - .build(); - - let account = &mut vm.rich_accounts[0]; - - let tx = account.get_l2_tx_for_execute( - Execute { - contract_address, - calldata: get_execute_error_calldata(), - value: Default::default(), - factory_deps: Some(vec![]), - }, - None, - ); - - vm.execute_tx_and_verify(TransactionTestInfo::new_rejected( - tx, - ExpectedError { - revert_reason: TxRevertReason::TxReverted(VmRevertReason::General { - msg: "short".to_string(), - data: vec![ - 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 5, 115, 104, 111, 114, 116, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, - ], - }), - modifier: None, - }, - )); -} diff --git a/core/lib/multivm/src/versions/vm_virtual_blocks/tests/upgrade.rs b/core/lib/multivm/src/versions/vm_virtual_blocks/tests/upgrade.rs deleted file mode 100644 index 8b3fa0ea2910..000000000000 --- a/core/lib/multivm/src/versions/vm_virtual_blocks/tests/upgrade.rs +++ /dev/null @@ -1,344 +0,0 @@ -use zk_evm_1_3_3::aux_structures::Timestamp; - -use zksync_types::{ - ethabi::Contract, - Execute, COMPLEX_UPGRADER_ADDRESS, CONTRACT_DEPLOYER_ADDRESS, CONTRACT_FORCE_DEPLOYER_ADDRESS, - REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_BYTE, - {ethabi::Token, Address, ExecuteTransactionCommon, Transaction, H256, U256}, - {get_code_key, get_known_code_key, H160}, -}; - -use zksync_utils::{bytecode::hash_bytecode, bytes_to_be_words, h256_to_u256, u256_to_h256}; - -use zksync_contracts::{deployer_contract, load_contract, load_sys_contract, read_bytecode}; -use crate::interface::storage::WriteStorage; -use zksync_test_account::TxType; - -use crate::interface::{ - ExecutionResult, Halt, TxExecutionMode, VmExecutionMode, VmInterface, VmInterfaceHistoryEnabled, -}; -use crate::vm_latest::HistoryEnabled; -use crate::vm_virtual_blocks::tests::tester::VmTesterBuilder; -use crate::vm_virtual_blocks::tests::utils::verify_required_storage; -use zksync_types::protocol_version::ProtocolUpgradeTxCommonData; - -use super::utils::read_test_contract; - -/// In this test we ensure that the requirements for protocol upgrade transactions are enforced by the bootloader: -/// - This transaction must be the only one in block -/// - If present, this transaction must be the first one in block -#[test] -fn test_protocol_upgrade_is_first() { - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let bytecode_hash = hash_bytecode(&read_test_contract()); - - // Here we just use some random transaction of protocol upgrade type: - let protocol_upgrade_transaction = get_forced_deploy_tx(&[ForceDeployment { - // The bytecode hash to put on an address - bytecode_hash, - // The address on which to deploy the bytecodehash to - address: H160::random(), - // Whether to run the constructor on the force deployment - call_constructor: false, - // The value with which to initialize a contract - value: U256::zero(), - // The constructor calldata - input: vec![], - }]); - - let normal_l1_transaction = vm.rich_accounts[0] - .get_deploy_tx(&read_test_contract(), None, TxType::L1 { serial_id: 0 }) - .tx; - - let expected_error = - Halt::UnexpectedVMBehavior("Assertion error: Protocol upgrade tx not first".to_string()); - - vm.vm.make_snapshot(); - // Test 1: there must be only one system transaction in block - vm.vm.push_transaction(protocol_upgrade_transaction.clone()); - vm.vm.push_transaction(normal_l1_transaction.clone()); - vm.vm.push_transaction(protocol_upgrade_transaction.clone()); - - vm.vm.execute(VmExecutionMode::OneTx); - vm.vm.execute(VmExecutionMode::OneTx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert_eq!( - result.result, - ExecutionResult::Halt { - reason: expected_error.clone() - } - ); - - // Test 2: the protocol upgrade tx must be the first one in block - vm.vm.rollback_to_the_latest_snapshot(); - vm.vm.make_snapshot(); - vm.vm.push_transaction(normal_l1_transaction.clone()); - vm.vm.push_transaction(protocol_upgrade_transaction.clone()); - - vm.vm.execute(VmExecutionMode::OneTx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert_eq!( - result.result, - ExecutionResult::Halt { - reason: expected_error - } - ); - - vm.vm.rollback_to_the_latest_snapshot(); - vm.vm.make_snapshot(); - vm.vm.push_transaction(protocol_upgrade_transaction); - vm.vm.push_transaction(normal_l1_transaction); - - vm.vm.execute(VmExecutionMode::OneTx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!result.result.is_failed()); -} - -/// In this test we try to test how force deployments could be done via protocol upgrade transactions. -#[test] -fn test_force_deploy_upgrade() { - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let storage_view = vm.storage.clone(); - let bytecode_hash = hash_bytecode(&read_test_contract()); - - let known_code_key = get_known_code_key(&bytecode_hash); - // It is generally expected that all the keys will be set as known prior to the protocol upgrade. - storage_view - .borrow_mut() - .set_value(known_code_key, u256_to_h256(1.into())); - drop(storage_view); - - let address_to_deploy = H160::random(); - // Here we just use some random transaction of protocol upgrade type: - let transaction = get_forced_deploy_tx(&[ForceDeployment { - // The bytecode hash to put on an address - bytecode_hash, - // The address on which to deploy the bytecodehash to - address: address_to_deploy, - // Whether to run the constructor on the force deployment - call_constructor: false, - // The value with which to initialize a contract - value: U256::zero(), - // The constructor calldata - input: vec![], - }]); - - vm.vm.push_transaction(transaction); - - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!( - !result.result.is_failed(), - "The force upgrade was not successful" - ); - - let expected_slots = vec![(bytecode_hash, get_code_key(&address_to_deploy))]; - - // Verify that the bytecode has been set correctly - verify_required_storage(&vm.vm.state, expected_slots); -} - -/// Here we show how the work with the complex upgrader could be done -#[test] -fn test_complex_upgrader() { - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let storage_view = vm.storage.clone(); - - let bytecode_hash = hash_bytecode(&read_complex_upgrade()); - let msg_sender_test_hash = hash_bytecode(&read_msg_sender_test()); - - // Let's assume that the bytecode for the implementation of the complex upgrade - // is already deployed in some address in userspace - let upgrade_impl = H160::random(); - let account_code_key = get_code_key(&upgrade_impl); - - storage_view - .borrow_mut() - .set_value(get_known_code_key(&bytecode_hash), u256_to_h256(1.into())); - storage_view.borrow_mut().set_value( - get_known_code_key(&msg_sender_test_hash), - u256_to_h256(1.into()), - ); - storage_view - .borrow_mut() - .set_value(account_code_key, bytecode_hash); - drop(storage_view); - - vm.vm.state.decommittment_processor.populate( - vec![ - ( - h256_to_u256(bytecode_hash), - bytes_to_be_words(read_complex_upgrade()), - ), - ( - h256_to_u256(msg_sender_test_hash), - bytes_to_be_words(read_msg_sender_test()), - ), - ], - Timestamp(0), - ); - - let address_to_deploy1 = H160::random(); - let address_to_deploy2 = H160::random(); - - let transaction = get_complex_upgrade_tx( - upgrade_impl, - address_to_deploy1, - address_to_deploy2, - bytecode_hash, - ); - - vm.vm.push_transaction(transaction); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!( - !result.result.is_failed(), - "The force upgrade was not successful" - ); - - let expected_slots = vec![ - (bytecode_hash, get_code_key(&address_to_deploy1)), - (bytecode_hash, get_code_key(&address_to_deploy2)), - ]; - - // Verify that the bytecode has been set correctly - verify_required_storage(&vm.vm.state, expected_slots); -} - -#[derive(Debug, Clone)] -struct ForceDeployment { - // The bytecode hash to put on an address - bytecode_hash: H256, - // The address on which to deploy the bytecodehash to - address: Address, - // Whether to run the constructor on the force deployment - call_constructor: bool, - // The value with which to initialize a contract - value: U256, - // The constructor calldata - input: Vec, -} - -fn get_forced_deploy_tx(deployment: &[ForceDeployment]) -> Transaction { - let deployer = deployer_contract(); - let contract_function = deployer.function("forceDeployOnAddresses").unwrap(); - - let encoded_deployments: Vec<_> = deployment - .iter() - .map(|deployment| { - Token::Tuple(vec![ - Token::FixedBytes(deployment.bytecode_hash.as_bytes().to_vec()), - Token::Address(deployment.address), - Token::Bool(deployment.call_constructor), - Token::Uint(deployment.value), - Token::Bytes(deployment.input.clone()), - ]) - }) - .collect(); - - let params = [Token::Array(encoded_deployments)]; - - let calldata = contract_function - .encode_input(¶ms) - .expect("failed to encode parameters"); - - let execute = Execute { - contract_address: CONTRACT_DEPLOYER_ADDRESS, - calldata, - factory_deps: None, - value: U256::zero(), - }; - - Transaction { - common_data: ExecuteTransactionCommon::ProtocolUpgrade(ProtocolUpgradeTxCommonData { - sender: CONTRACT_FORCE_DEPLOYER_ADDRESS, - gas_limit: U256::from(200_000_000u32), - gas_per_pubdata_limit: REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_BYTE.into(), - ..Default::default() - }), - execute, - received_timestamp_ms: 0, - raw_bytes: None, - } -} - -// Returns the transaction that performs a complex protocol upgrade. -// The first param is the address of the implementation of the complex upgrade -// in user-space, while the next 3 params are params of the implenentaiton itself -// For the explanatation for the parameters, please refer to: -// etc/contracts-test-data/complex-upgrade/complex-upgrade.sol -fn get_complex_upgrade_tx( - implementation_address: Address, - address1: Address, - address2: Address, - bytecode_hash: H256, -) -> Transaction { - let impl_contract = get_complex_upgrade_abi(); - let impl_function = impl_contract.function("someComplexUpgrade").unwrap(); - let impl_calldata = impl_function - .encode_input(&[ - Token::Address(address1), - Token::Address(address2), - Token::FixedBytes(bytecode_hash.as_bytes().to_vec()), - ]) - .unwrap(); - - let complex_upgrader = get_complex_upgrader_abi(); - let upgrade_function = complex_upgrader.function("upgrade").unwrap(); - let complex_upgrader_calldata = upgrade_function - .encode_input(&[ - Token::Address(implementation_address), - Token::Bytes(impl_calldata), - ]) - .unwrap(); - - let execute = Execute { - contract_address: COMPLEX_UPGRADER_ADDRESS, - calldata: complex_upgrader_calldata, - factory_deps: None, - value: U256::zero(), - }; - - Transaction { - common_data: ExecuteTransactionCommon::ProtocolUpgrade(ProtocolUpgradeTxCommonData { - sender: CONTRACT_FORCE_DEPLOYER_ADDRESS, - gas_limit: U256::from(200_000_000u32), - gas_per_pubdata_limit: REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_BYTE.into(), - ..Default::default() - }), - execute, - received_timestamp_ms: 0, - raw_bytes: None, - } -} - -fn read_complex_upgrade() -> Vec { - read_bytecode("etc/contracts-test-data/artifacts-zk/contracts/complex-upgrade/complex-upgrade.sol/ComplexUpgrade.json") -} - -fn read_msg_sender_test() -> Vec { - read_bytecode("etc/contracts-test-data/artifacts-zk/contracts/complex-upgrade/msg-sender.sol/MsgSenderTest.json") -} - -fn get_complex_upgrade_abi() -> Contract { - load_contract( - "etc/contracts-test-data/artifacts-zk/contracts/complex-upgrade/complex-upgrade.sol/ComplexUpgrade.json" - ) -} - -fn get_complex_upgrader_abi() -> Contract { - load_sys_contract("ComplexUpgrader") -} diff --git a/core/lib/multivm/src/versions/vm_virtual_blocks/tests/utils.rs b/core/lib/multivm/src/versions/vm_virtual_blocks/tests/utils.rs deleted file mode 100644 index e3db232ffceb..000000000000 --- a/core/lib/multivm/src/versions/vm_virtual_blocks/tests/utils.rs +++ /dev/null @@ -1,106 +0,0 @@ -use ethabi::Contract; -use once_cell::sync::Lazy; - -use crate::vm_virtual_blocks::tests::tester::InMemoryStorageView; -use zksync_contracts::{ - load_contract, read_bytecode, read_zbin_bytecode, BaseSystemContracts, SystemContractCode, -}; -use crate::interface::storage::{StoragePtr, WriteStorage}; -use zksync_types::utils::storage_key_for_standard_token_balance; -use zksync_types::{AccountTreeId, Address, StorageKey, H256, U256}; -use zksync_utils::bytecode::hash_bytecode; -use zksync_utils::{bytes_to_be_words, h256_to_u256, u256_to_h256}; - -use crate::vm_virtual_blocks::types::internals::ZkSyncVmState; -use crate::vm_virtual_blocks::HistoryMode; - -pub(crate) static BASE_SYSTEM_CONTRACTS: Lazy = - Lazy::new(BaseSystemContracts::load_from_disk); - -// Probably make it a part of vm tester -pub(crate) fn verify_required_storage( - state: &ZkSyncVmState, - required_values: Vec<(H256, StorageKey)>, -) { - for (required_value, key) in required_values { - let current_value = state.storage.storage.read_from_storage(&key); - - assert_eq!( - u256_to_h256(current_value), - required_value, - "Invalid value at key {key:?}" - ); - } -} - -pub(crate) fn verify_required_memory( - state: &ZkSyncVmState, - required_values: Vec<(U256, u32, u32)>, -) { - for (required_value, memory_page, cell) in required_values { - let current_value = state - .memory - .read_slot(memory_page as usize, cell as usize) - .value; - assert_eq!(current_value, required_value); - } -} - -pub(crate) fn get_balance( - token_id: AccountTreeId, - account: &Address, - main_storage: StoragePtr, -) -> U256 { - let key = storage_key_for_standard_token_balance(token_id, account); - h256_to_u256(main_storage.borrow_mut().read_value(&key)) -} - -pub(crate) fn read_test_contract() -> Vec { - read_bytecode("etc/contracts-test-data/artifacts-zk/contracts/counter/counter.sol/Counter.json") -} - -pub(crate) fn get_bootloader(test: &str) -> SystemContractCode { - let bootloader_code = read_zbin_bytecode(format!( - "contracts/system-contracts/bootloader/tests/artifacts/{}.yul.zbin", - test - )); - - let bootloader_hash = hash_bytecode(&bootloader_code); - SystemContractCode { - code: bytes_to_be_words(bootloader_code), - hash: bootloader_hash, - } -} - -pub(crate) fn read_nonce_holder_tester() -> Vec { - read_bytecode("etc/contracts-test-data/artifacts-zk/contracts/custom-account/nonce-holder-test.sol/NonceHolderTest.json") -} - -pub(crate) fn read_error_contract() -> Vec { - read_bytecode( - "etc/contracts-test-data/artifacts-zk/contracts/error/error.sol/SimpleRequire.json", - ) -} - -pub(crate) fn get_execute_error_calldata() -> Vec { - let test_contract = load_contract( - "etc/contracts-test-data/artifacts-zk/contracts/error/error.sol/SimpleRequire.json", - ); - - let function = test_contract.function("require_short").unwrap(); - - function - .encode_input(&[]) - .expect("failed to encode parameters") -} - -pub(crate) fn read_many_owners_custom_account_contract() -> (Vec, Contract) { - let path = "etc/contracts-test-data/artifacts-zk/contracts/custom-account/many-owners-custom-account.sol/ManyOwnersCustomAccount.json"; - (read_bytecode(path), load_contract(path)) -} - -pub(crate) fn read_max_depth_contract() -> Vec { - read_zbin_bytecode( - "core/tests/ts-integration/contracts/zkasm/artifacts/deep_stak.zkasm/deep_stak.zkasm.zbin", - ) -} diff --git a/core/lib/multivm/src/versions/vm_virtual_blocks/vm.rs b/core/lib/multivm/src/versions/vm_virtual_blocks/vm.rs index 2a9d6eed6c7d..28c09590f2ad 100644 --- a/core/lib/multivm/src/versions/vm_virtual_blocks/vm.rs +++ b/core/lib/multivm/src/versions/vm_virtual_blocks/vm.rs @@ -8,7 +8,6 @@ use crate::{ BytecodeCompressionError, BytecodeCompressionResult, CurrentExecutionState, FinishedL1Batch, L1BatchEnv, L2BlockEnv, SystemEnv, VmExecutionMode, VmExecutionResultAndLogs, VmFactory, VmInterface, VmInterfaceHistoryEnabled, - VmMemoryMetrics, }, vm_latest::HistoryEnabled, vm_virtual_blocks::{ @@ -118,10 +117,6 @@ impl VmInterface for Vm { } } - fn record_vm_memory_metrics(&self) -> VmMemoryMetrics { - self.record_vm_memory_metrics_inner() - } - fn finish_batch(&mut self) -> FinishedL1Batch { let result = self.inspect(&mut TracerDispatcher::default(), VmExecutionMode::Batch); let execution_state = self.get_current_execution_state(); diff --git a/core/lib/multivm/src/vm_instance.rs b/core/lib/multivm/src/vm_instance.rs index 8dd67e1ac4e2..ac5693b61619 100644 --- a/core/lib/multivm/src/vm_instance.rs +++ b/core/lib/multivm/src/vm_instance.rs @@ -87,10 +87,6 @@ impl VmInterface for LegacyVmInstance { )) } - fn record_vm_memory_metrics(&self) -> VmMemoryMetrics { - dispatch_legacy_vm!(self.record_vm_memory_metrics()) - } - /// Return the results of execution of all batch fn finish_batch(&mut self) -> FinishedL1Batch { dispatch_legacy_vm!(self.finish_batch()) @@ -213,6 +209,11 @@ impl LegacyVmInstance { } } } + + /// Returns memory-related oracle metrics. + pub fn record_vm_memory_metrics(&self) -> VmMemoryMetrics { + dispatch_legacy_vm!(self.record_vm_memory_metrics()) + } } /// Fast VM shadowed by the latest legacy VM. @@ -283,10 +284,6 @@ impl VmInterface for FastVmInsta } } - fn record_vm_memory_metrics(&self) -> VmMemoryMetrics { - dispatch_fast_vm!(self.record_vm_memory_metrics()) - } - fn finish_batch(&mut self) -> FinishedL1Batch { dispatch_fast_vm!(self.finish_batch()) } diff --git a/core/lib/protobuf_config/Cargo.toml b/core/lib/protobuf_config/Cargo.toml index 92d9bd53978c..87a0a63567ba 100644 --- a/core/lib/protobuf_config/Cargo.toml +++ b/core/lib/protobuf_config/Cargo.toml @@ -26,6 +26,7 @@ rand.workspace = true hex.workspace = true secrecy.workspace = true tracing.workspace = true +time.workspace = true [build-dependencies] zksync_protobuf_build.workspace = true diff --git a/core/lib/protobuf_config/src/chain.rs b/core/lib/protobuf_config/src/chain.rs index f91bf07e43f8..2f8ac8df07e2 100644 --- a/core/lib/protobuf_config/src/chain.rs +++ b/core/lib/protobuf_config/src/chain.rs @@ -86,6 +86,7 @@ impl ProtoRepr for proto::StateKeeper { // needed during the initialization from files bootloader_hash: None, default_aa_hash: None, + evm_emulator_hash: None, fee_account_addr: None, l1_batch_commit_data_generator_mode: Default::default(), }) diff --git a/core/lib/protobuf_config/src/en.rs b/core/lib/protobuf_config/src/en.rs index 9c07d1d39297..9d1a39310604 100644 --- a/core/lib/protobuf_config/src/en.rs +++ b/core/lib/protobuf_config/src/en.rs @@ -1,4 +1,7 @@ -use std::{num::NonZeroUsize, str::FromStr}; +use std::{ + num::{NonZeroU64, NonZeroUsize}, + str::FromStr, +}; use anyhow::Context; use zksync_basic_types::{url::SensitiveUrl, L1ChainId, L2ChainId}; @@ -36,6 +39,9 @@ impl ProtoRepr for proto::ExternalNode { .as_ref() .map(|a| a.parse().context("gateway_url")) .transpose()?, + bridge_addresses_refresh_interval_sec: self + .bridge_addresses_refresh_interval_sec + .and_then(NonZeroU64::new), }) } @@ -55,6 +61,9 @@ impl ProtoRepr for proto::ExternalNode { .gateway_url .as_ref() .map(|a| a.expose_str().to_string()), + bridge_addresses_refresh_interval_sec: this + .bridge_addresses_refresh_interval_sec + .map(|a| a.get()), } } } diff --git a/core/lib/protobuf_config/src/eth.rs b/core/lib/protobuf_config/src/eth.rs index 273b7f4e3445..c1d95bd30d2b 100644 --- a/core/lib/protobuf_config/src/eth.rs +++ b/core/lib/protobuf_config/src/eth.rs @@ -115,6 +115,9 @@ impl ProtoRepr for proto::Sender { .parse(), tx_aggregation_only_prove_and_execute: self.tx_aggregation_paused.unwrap_or(false), tx_aggregation_paused: self.tx_aggregation_only_prove_and_execute.unwrap_or(false), + time_in_mempool_in_l1_blocks_cap: self + .time_in_mempool_in_l1_blocks_cap + .unwrap_or(Self::Type::default_time_in_mempool_in_l1_blocks_cap()), }) } @@ -147,6 +150,7 @@ impl ProtoRepr for proto::Sender { ), tx_aggregation_only_prove_and_execute: Some(this.tx_aggregation_only_prove_and_execute), tx_aggregation_paused: Some(this.tx_aggregation_paused), + time_in_mempool_in_l1_blocks_cap: Some(this.time_in_mempool_in_l1_blocks_cap), } } } @@ -161,9 +165,9 @@ impl ProtoRepr for proto::GasAdjuster { .and_then(|x| Ok((*x).try_into()?)) .context("max_base_fee_samples")?, pricing_formula_parameter_a: *required(&self.pricing_formula_parameter_a) - .context("pricing_formula_parameter_a")?, + .unwrap_or(&Self::Type::default_pricing_formula_parameter_a()), pricing_formula_parameter_b: *required(&self.pricing_formula_parameter_b) - .context("pricing_formula_parameter_b")?, + .unwrap_or(&Self::Type::default_pricing_formula_parameter_b()), internal_l1_pricing_multiplier: *required(&self.internal_l1_pricing_multiplier) .context("internal_l1_pricing_multiplier")?, internal_enforced_l1_gas_price: self.internal_enforced_l1_gas_price, diff --git a/core/lib/protobuf_config/src/genesis.rs b/core/lib/protobuf_config/src/genesis.rs index 59896aa244d8..7ecc768100fb 100644 --- a/core/lib/protobuf_config/src/genesis.rs +++ b/core/lib/protobuf_config/src/genesis.rs @@ -75,6 +75,12 @@ impl ProtoRepr for proto::Genesis { .and_then(|x| parse_h256(x)) .context("default_aa_hash")?, ), + evm_emulator_hash: self + .evm_emulator_hash + .as_deref() + .map(parse_h256) + .transpose() + .context("evm_emulator_hash")?, l1_chain_id: required(&self.l1_chain_id) .map(|x| L1ChainId(*x)) .context("l1_chain_id")?, @@ -105,6 +111,7 @@ impl ProtoRepr for proto::Genesis { genesis_protocol_semantic_version: this.protocol_version.map(|x| x.to_string()), default_aa_hash: this.default_aa_hash.map(|x| format!("{:?}", x)), bootloader_hash: this.bootloader_hash.map(|x| format!("{:?}", x)), + evm_emulator_hash: this.evm_emulator_hash.map(|x| format!("{:?}", x)), fee_account: Some(format!("{:?}", this.fee_account)), l1_chain_id: Some(this.l1_chain_id.0), l2_chain_id: Some(this.l2_chain_id.as_u64()), diff --git a/core/lib/protobuf_config/src/lib.rs b/core/lib/protobuf_config/src/lib.rs index a0f4d45214f6..48bc5f1ce137 100644 --- a/core/lib/protobuf_config/src/lib.rs +++ b/core/lib/protobuf_config/src/lib.rs @@ -12,11 +12,14 @@ mod commitment_generator; mod consensus; mod contract_verifier; mod contracts; +mod da_client; mod da_dispatcher; mod database; mod en; mod eth; mod experimental; +mod external_price_api_client; +mod external_proof_integration_api; mod general; mod genesis; mod house_keeper; @@ -25,15 +28,12 @@ mod observability; mod proof_data_handler; pub mod proto; mod prover; +mod prover_autoscaler; +mod prover_job_monitor; mod pruning; mod secrets; -mod snapshots_creator; - -mod da_client; -mod external_price_api_client; -mod external_proof_integration_api; -mod prover_job_monitor; mod snapshot_recovery; +mod snapshots_creator; #[cfg(test)] mod tests; mod utils; @@ -71,17 +71,16 @@ pub fn read_optional_repr(field: &Option

) -> Option { .flatten() } -pub fn decode_yaml_repr( +/// Reads a yaml file. +pub fn read_yaml_repr( path: &PathBuf, deny_unknown_fields: bool, ) -> anyhow::Result { let yaml = std::fs::read_to_string(path).with_context(|| path.display().to_string())?; - let d = serde_yaml::Deserializer::from_str(&yaml); - let this: T = zksync_protobuf::serde::Deserialize { + zksync_protobuf::serde::Deserialize { deny_unknown_fields, } - .proto(d)?; - this.read() + .proto_repr_from_yaml::(&yaml) } pub fn encode_yaml_repr(value: &T::Type) -> anyhow::Result> { diff --git a/core/lib/protobuf_config/src/proto/config/en.proto b/core/lib/protobuf_config/src/proto/config/en.proto index d8a13d31d4b9..69412704ea0f 100644 --- a/core/lib/protobuf_config/src/proto/config/en.proto +++ b/core/lib/protobuf_config/src/proto/config/en.proto @@ -10,4 +10,5 @@ message ExternalNode { optional uint64 main_node_rate_limit_rps = 6; // optional optional config.genesis.L1BatchCommitDataGeneratorMode l1_batch_commit_data_generator_mode = 7; // optional, default to rollup optional string gateway_url = 8; // optional + optional uint64 bridge_addresses_refresh_interval_sec = 9; // optional } diff --git a/core/lib/protobuf_config/src/proto/config/eth_sender.proto b/core/lib/protobuf_config/src/proto/config/eth_sender.proto index b102a08be04c..6438573e08df 100644 --- a/core/lib/protobuf_config/src/proto/config/eth_sender.proto +++ b/core/lib/protobuf_config/src/proto/config/eth_sender.proto @@ -48,6 +48,7 @@ message Sender { reserved 19; reserved "proof_loading_mode"; optional bool tx_aggregation_paused = 20; // required optional bool tx_aggregation_only_prove_and_execute = 21; // required + optional uint32 time_in_mempool_in_l1_blocks_cap = 22; // optional } message GasAdjuster { diff --git a/core/lib/protobuf_config/src/proto/config/genesis.proto b/core/lib/protobuf_config/src/proto/config/genesis.proto index 08cbb954fcbc..e3a9a45366f9 100644 --- a/core/lib/protobuf_config/src/proto/config/genesis.proto +++ b/core/lib/protobuf_config/src/proto/config/genesis.proto @@ -28,5 +28,6 @@ message Genesis { optional Prover prover = 10; optional L1BatchCommitDataGeneratorMode l1_batch_commit_data_generator_mode = 29; // optional, default to rollup optional string genesis_protocol_semantic_version = 12; // optional; + optional string evm_emulator_hash = 13; // optional; h256 reserved 11; reserved "shared_bridge"; } diff --git a/core/lib/protobuf_config/src/proto/config/prover_autoscaler.proto b/core/lib/protobuf_config/src/proto/config/prover_autoscaler.proto new file mode 100644 index 000000000000..e1d11b94d8f1 --- /dev/null +++ b/core/lib/protobuf_config/src/proto/config/prover_autoscaler.proto @@ -0,0 +1,46 @@ +syntax = "proto3"; + +package zksync.config.prover_autoscaler; + +import "zksync/std.proto"; +import "zksync/config/observability.proto"; + +message ProverAutoscalerConfig { + optional std.Duration graceful_shutdown_timeout = 1; // optional + optional ProverAutoscalerAgentConfig agent_config = 2; // optional + optional ProverAutoscalerScalerConfig scaler_config = 3; // optional + optional observability.Observability observability = 4; // optional +} + +message ProverAutoscalerAgentConfig { + optional uint32 prometheus_port = 1; // required + optional uint32 http_port = 2; // required + repeated string namespaces = 3; // optional + optional string cluster_name = 4; // optional +} + +message ProtocolVersion { + optional string namespace = 1; // required + optional string protocol_version = 2; // required +} + +message ClusterPriority { + optional string cluster = 1; // required + optional uint32 priority = 2; // required +} + +message ProverSpeed { + optional string gpu = 1; // required + optional uint32 speed = 2; // required +} + +message ProverAutoscalerScalerConfig { + optional uint32 prometheus_port = 1; // required + optional std.Duration scaler_run_interval = 2; // optional + optional string prover_job_monitor_url = 3; // required + repeated string agents = 4; // required at least one + repeated ProtocolVersion protocol_versions = 5; // repeated at least one + repeated ClusterPriority cluster_priorities = 6; // optional + repeated ProverSpeed prover_speed = 7; // optional + optional uint32 long_pending_duration_s = 8; // optional +} diff --git a/core/lib/protobuf_config/src/prover_autoscaler.rs b/core/lib/protobuf_config/src/prover_autoscaler.rs new file mode 100644 index 000000000000..f7da099cb829 --- /dev/null +++ b/core/lib/protobuf_config/src/prover_autoscaler.rs @@ -0,0 +1,172 @@ +use anyhow::Context as _; +use time::Duration; +use zksync_config::configs::{self, prover_autoscaler::Gpu}; +use zksync_protobuf::{read_optional, repr::ProtoRepr, required, ProtoFmt}; + +use crate::{proto::prover_autoscaler as proto, read_optional_repr}; + +impl ProtoRepr for proto::ProverAutoscalerConfig { + type Type = configs::prover_autoscaler::ProverAutoscalerConfig; + fn read(&self) -> anyhow::Result { + Ok(Self::Type { + graceful_shutdown_timeout: read_optional(&self.graceful_shutdown_timeout) + .context("graceful_shutdown_timeout")? + .unwrap_or(Self::Type::default_graceful_shutdown_timeout()), + agent_config: read_optional_repr(&self.agent_config), + scaler_config: read_optional_repr(&self.scaler_config), + observability: read_optional_repr(&self.observability), + }) + } + + fn build(this: &Self::Type) -> Self { + Self { + graceful_shutdown_timeout: Some(ProtoFmt::build(&this.graceful_shutdown_timeout)), + agent_config: this.agent_config.as_ref().map(ProtoRepr::build), + scaler_config: this.scaler_config.as_ref().map(ProtoRepr::build), + observability: this.observability.as_ref().map(ProtoRepr::build), + } + } +} + +impl ProtoRepr for proto::ProverAutoscalerAgentConfig { + type Type = configs::prover_autoscaler::ProverAutoscalerAgentConfig; + fn read(&self) -> anyhow::Result { + Ok(Self::Type { + prometheus_port: required(&self.prometheus_port) + .and_then(|x| Ok((*x).try_into()?)) + .context("prometheus_port")?, + http_port: required(&self.http_port) + .and_then(|x| Ok((*x).try_into()?)) + .context("http_port")?, + namespaces: self.namespaces.to_vec(), + cluster_name: Some("".to_string()), + }) + } + + fn build(this: &Self::Type) -> Self { + Self { + prometheus_port: Some(this.prometheus_port.into()), + http_port: Some(this.http_port.into()), + namespaces: this.namespaces.clone(), + cluster_name: this.cluster_name.clone(), + } + } +} + +impl ProtoRepr for proto::ProverAutoscalerScalerConfig { + type Type = configs::prover_autoscaler::ProverAutoscalerScalerConfig; + fn read(&self) -> anyhow::Result { + Ok(Self::Type { + prometheus_port: required(&self.prometheus_port) + .and_then(|x| Ok((*x).try_into()?)) + .context("prometheus_port")?, + scaler_run_interval: read_optional(&self.scaler_run_interval) + .context("scaler_run_interval")? + .unwrap_or(Self::Type::default_scaler_run_interval()), + prover_job_monitor_url: required(&self.prover_job_monitor_url) + .context("prover_job_monitor_url")? + .clone(), + agents: self.agents.to_vec(), + protocol_versions: self + .protocol_versions + .iter() + .enumerate() + .map(|(i, e)| e.read().context(i)) + .collect::>() + .context("protocol_versions")?, + cluster_priorities: self + .cluster_priorities + .iter() + .enumerate() + .map(|(i, e)| e.read().context(i)) + .collect::>() + .context("cluster_priorities")?, + prover_speed: self + .prover_speed + .iter() + .enumerate() + .map(|(i, e)| e.read().context(i)) + .collect::>() + .context("prover_speed")?, + long_pending_duration: match self.long_pending_duration_s { + Some(s) => Duration::seconds(s.into()), + None => Self::Type::default_long_pending_duration(), + }, + }) + } + + fn build(this: &Self::Type) -> Self { + Self { + prometheus_port: Some(this.prometheus_port.into()), + scaler_run_interval: Some(ProtoFmt::build(&this.scaler_run_interval)), + prover_job_monitor_url: Some(this.prover_job_monitor_url.clone()), + agents: this.agents.clone(), + protocol_versions: this + .protocol_versions + .iter() + .map(|(k, v)| proto::ProtocolVersion::build(&(k.clone(), v.clone()))) + .collect(), + cluster_priorities: this + .cluster_priorities + .iter() + .map(|(k, v)| proto::ClusterPriority::build(&(k.clone(), *v))) + .collect(), + prover_speed: this + .prover_speed + .iter() + .map(|(k, v)| proto::ProverSpeed::build(&(*k, *v))) + .collect(), + long_pending_duration_s: Some(this.long_pending_duration.whole_seconds() as u32), + } + } +} + +impl ProtoRepr for proto::ProtocolVersion { + type Type = (String, String); + fn read(&self) -> anyhow::Result { + Ok(( + required(&self.namespace).context("namespace")?.clone(), + required(&self.protocol_version) + .context("protocol_version")? + .clone(), + )) + } + fn build(this: &Self::Type) -> Self { + Self { + namespace: Some(this.0.clone()), + protocol_version: Some(this.1.clone()), + } + } +} + +impl ProtoRepr for proto::ClusterPriority { + type Type = (String, u32); + fn read(&self) -> anyhow::Result { + Ok(( + required(&self.cluster).context("cluster")?.clone(), + *required(&self.priority).context("priority")?, + )) + } + fn build(this: &Self::Type) -> Self { + Self { + cluster: Some(this.0.clone()), + priority: Some(this.1), + } + } +} + +impl ProtoRepr for proto::ProverSpeed { + type Type = (Gpu, u32); + fn read(&self) -> anyhow::Result { + Ok(( + required(&self.gpu).context("gpu")?.parse()?, + *required(&self.speed).context("speed")?, + )) + } + fn build(this: &Self::Type) -> Self { + Self { + gpu: Some(this.0.to_string()), + speed: Some(this.1), + } + } +} diff --git a/core/lib/protobuf_config/src/tests.rs b/core/lib/protobuf_config/src/tests.rs index d653b9b92bfd..c72bce0bf9a6 100644 --- a/core/lib/protobuf_config/src/tests.rs +++ b/core/lib/protobuf_config/src/tests.rs @@ -2,7 +2,7 @@ use std::{path::PathBuf, str::FromStr}; use zksync_protobuf::testonly::{test_encode_all_formats, ReprConv}; -use crate::{decode_yaml_repr, proto}; +use crate::{proto, read_yaml_repr}; /// Tests config <-> proto (boilerplate) conversions. #[test] @@ -60,14 +60,11 @@ fn test_encoding() { #[test] fn verify_file_parsing() { let base_path = PathBuf::from_str("../../../etc/env/file_based/").unwrap(); - decode_yaml_repr::(&base_path.join("general.yaml"), true) - .unwrap(); + read_yaml_repr::(&base_path.join("general.yaml"), true).unwrap(); // It's allowed to have unknown fields in wallets, e.g. we keep private key for fee account - decode_yaml_repr::(&base_path.join("wallets.yaml"), false).unwrap(); - decode_yaml_repr::(&base_path.join("genesis.yaml"), true).unwrap(); - decode_yaml_repr::(&base_path.join("contracts.yaml"), true) - .unwrap(); - decode_yaml_repr::(&base_path.join("secrets.yaml"), true).unwrap(); - decode_yaml_repr::(&base_path.join("external_node.yaml"), true) - .unwrap(); + read_yaml_repr::(&base_path.join("wallets.yaml"), false).unwrap(); + read_yaml_repr::(&base_path.join("genesis.yaml"), true).unwrap(); + read_yaml_repr::(&base_path.join("contracts.yaml"), true).unwrap(); + read_yaml_repr::(&base_path.join("secrets.yaml"), true).unwrap(); + read_yaml_repr::(&base_path.join("external_node.yaml"), true).unwrap(); } diff --git a/core/lib/prover_interface/src/inputs.rs b/core/lib/prover_interface/src/inputs.rs index 22a20223c8b4..f5f389362ddd 100644 --- a/core/lib/prover_interface/src/inputs.rs +++ b/core/lib/prover_interface/src/inputs.rs @@ -3,7 +3,7 @@ use std::{collections::HashMap, convert::TryInto, fmt::Debug}; use serde::{Deserialize, Serialize}; use serde_with::{serde_as, Bytes}; use zksync_multivm::interface::{L1BatchEnv, SystemEnv}; -use zksync_object_store::{serialize_using_bincode, Bucket, StoredObject}; +use zksync_object_store::{_reexports::BoxedError, serialize_using_bincode, Bucket, StoredObject}; use zksync_types::{ basic_fri_types::Eip4844Blobs, block::L2BlockExecutionData, witness_block_state::WitnessStorageState, L1BatchNumber, ProtocolVersionId, H256, U256, @@ -144,11 +144,45 @@ pub struct VMRunWitnessInputData { pub protocol_version: ProtocolVersionId, pub bootloader_code: Vec<[u8; 32]>, pub default_account_code_hash: U256, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub evm_emulator_code_hash: Option, pub storage_refunds: Vec, pub pubdata_costs: Vec, pub witness_block_state: WitnessStorageState, } +// skip_serializing_if for field evm_emulator_code_hash doesn't work fine with bincode, +// so we are implementing custom deserialization for it +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct VMRunWitnessInputDataLegacy { + pub l1_batch_number: L1BatchNumber, + pub used_bytecodes: HashMap>, + pub initial_heap_content: Vec<(usize, U256)>, + pub protocol_version: ProtocolVersionId, + pub bootloader_code: Vec<[u8; 32]>, + pub default_account_code_hash: U256, + pub storage_refunds: Vec, + pub pubdata_costs: Vec, + pub witness_block_state: WitnessStorageState, +} + +impl From for VMRunWitnessInputData { + fn from(value: VMRunWitnessInputDataLegacy) -> Self { + Self { + l1_batch_number: value.l1_batch_number, + used_bytecodes: value.used_bytecodes, + initial_heap_content: value.initial_heap_content, + protocol_version: value.protocol_version, + bootloader_code: value.bootloader_code, + default_account_code_hash: value.default_account_code_hash, + evm_emulator_code_hash: None, + storage_refunds: value.storage_refunds, + pubdata_costs: value.pubdata_costs, + witness_block_state: value.witness_block_state, + } + } +} + impl StoredObject for VMRunWitnessInputData { const BUCKET: Bucket = Bucket::WitnessInput; @@ -158,7 +192,17 @@ impl StoredObject for VMRunWitnessInputData { format!("vm_run_data_{key}.bin") } - serialize_using_bincode!(); + fn serialize(&self) -> Result, BoxedError> { + zksync_object_store::bincode::serialize(self).map_err(Into::into) + } + + fn deserialize(bytes: Vec) -> Result { + zksync_object_store::bincode::deserialize::(&bytes).or_else(|_| { + zksync_object_store::bincode::deserialize::(&bytes) + .map(Into::into) + .map_err(Into::into) + }) + } } #[derive(Debug, Clone, Serialize, Deserialize)] diff --git a/core/lib/tee_verifier/src/lib.rs b/core/lib/tee_verifier/src/lib.rs index 68b25416d668..86b563f823e8 100644 --- a/core/lib/tee_verifier/src/lib.rs +++ b/core/lib/tee_verifier/src/lib.rs @@ -306,6 +306,7 @@ mod tests { code: vec![U256([1; 4])], hash: H256([1; 32]), }, + evm_emulator: None, }, bootloader_gas_limit: 0, execution_mode: TxExecutionMode::VerifyExecute, diff --git a/core/lib/types/src/api/mod.rs b/core/lib/types/src/api/mod.rs index 1e5a1b3fe65d..103b6de1fb38 100644 --- a/core/lib/types/src/api/mod.rs +++ b/core/lib/types/src/api/mod.rs @@ -643,7 +643,7 @@ pub struct ProtocolVersion { /// Verifier configuration #[deprecated] pub verification_keys_hashes: Option, - /// Hashes of base system contracts (bootloader and default account) + /// Hashes of base system contracts (bootloader, default account and evm simulator) #[deprecated] pub base_system_contracts: Option, /// Bootloader code hash @@ -652,6 +652,9 @@ pub struct ProtocolVersion { /// Default account code hash #[serde(rename = "defaultAccountCodeHash")] pub default_account_code_hash: Option, + /// EVM emulator code hash + #[serde(rename = "evmSimulatorCodeHash")] + pub evm_emulator_code_hash: Option, /// L2 Upgrade transaction hash #[deprecated] pub l2_system_upgrade_tx_hash: Option, @@ -667,6 +670,7 @@ impl ProtocolVersion { timestamp: u64, bootloader_code_hash: H256, default_account_code_hash: H256, + evm_emulator_code_hash: Option, l2_system_upgrade_tx_hash: Option, ) -> Self { Self { @@ -677,9 +681,11 @@ impl ProtocolVersion { base_system_contracts: Some(BaseSystemContractsHashes { bootloader: bootloader_code_hash, default_aa: default_account_code_hash, + evm_emulator: evm_emulator_code_hash, }), bootloader_code_hash: Some(bootloader_code_hash), default_account_code_hash: Some(default_account_code_hash), + evm_emulator_code_hash, l2_system_upgrade_tx_hash, l2_system_upgrade_tx_hash_new: l2_system_upgrade_tx_hash, } @@ -695,6 +701,13 @@ impl ProtocolVersion { .or_else(|| self.base_system_contracts.map(|hashes| hashes.default_aa)) } + pub fn evm_emulator_code_hash(&self) -> Option { + self.evm_emulator_code_hash.or_else(|| { + self.base_system_contracts + .and_then(|hashes| hashes.evm_emulator) + }) + } + pub fn minor_version(&self) -> Option { self.minor_version.or(self.version_id) } @@ -917,6 +930,7 @@ mod tests { base_system_contracts: Some(Default::default()), bootloader_code_hash: Some(Default::default()), default_account_code_hash: Some(Default::default()), + evm_emulator_code_hash: Some(Default::default()), l2_system_upgrade_tx_hash: Default::default(), l2_system_upgrade_tx_hash_new: Default::default(), }; diff --git a/core/lib/types/src/api/state_override.rs b/core/lib/types/src/api/state_override.rs index a2497a65c533..f2986610840a 100644 --- a/core/lib/types/src/api/state_override.rs +++ b/core/lib/types/src/api/state_override.rs @@ -21,6 +21,11 @@ impl StateOverride { self.0.get(address) } + /// Gets mutable overrides for the specified account. + pub fn get_mut(&mut self, address: &Address) -> Option<&mut OverrideAccount> { + self.0.get_mut(address) + } + /// Iterates over all account overrides. pub fn iter(&self) -> impl Iterator + '_ { self.0.iter() @@ -48,6 +53,12 @@ impl Bytecode { } } +impl AsRef<[u8]> for Bytecode { + fn as_ref(&self) -> &[u8] { + &self.0 .0 + } +} + impl Serialize for Bytecode { fn serialize(&self, serializer: S) -> Result { self.0.serialize(serializer) diff --git a/core/lib/types/src/commitment/mod.rs b/core/lib/types/src/commitment/mod.rs index 63d1bad486f3..759ee8947ba9 100644 --- a/core/lib/types/src/commitment/mod.rs +++ b/core/lib/types/src/commitment/mod.rs @@ -467,6 +467,7 @@ pub struct L1BatchMetaParameters { pub zkporter_is_available: bool, pub bootloader_code_hash: H256, pub default_aa_code_hash: H256, + pub evm_emulator_code_hash: Option, pub protocol_version: Option, } @@ -482,10 +483,11 @@ impl L1BatchMetaParameters { .protocol_version .map_or(false, |ver| ver.is_post_1_5_0()) { - // EVM simulator hash for now is the same as the default AA hash. - result.extend(self.default_aa_code_hash.as_bytes()); + let evm_emulator_code_hash = self + .evm_emulator_code_hash + .unwrap_or(self.default_aa_code_hash); + result.extend(evm_emulator_code_hash.as_bytes()); } - result } @@ -551,6 +553,7 @@ impl L1BatchCommitment { zkporter_is_available: ZKPORTER_IS_AVAILABLE, bootloader_code_hash: input.common().bootloader_code_hash, default_aa_code_hash: input.common().default_aa_code_hash, + evm_emulator_code_hash: input.common().evm_emulator_code_hash, protocol_version: Some(input.common().protocol_version), }; @@ -653,6 +656,7 @@ pub struct CommitmentCommonInput { pub rollup_root_hash: H256, pub bootloader_code_hash: H256, pub default_aa_code_hash: H256, + pub evm_emulator_code_hash: Option, pub protocol_version: ProtocolVersionId, } @@ -693,6 +697,7 @@ impl CommitmentInput { rollup_root_hash, bootloader_code_hash: base_system_contracts_hashes.bootloader, default_aa_code_hash: base_system_contracts_hashes.default_aa, + evm_emulator_code_hash: base_system_contracts_hashes.evm_emulator, protocol_version, }; if protocol_version.is_pre_boojum() { diff --git a/core/lib/types/src/commitment/tests/mod.rs b/core/lib/types/src/commitment/tests/mod.rs index 34e308cfd0a9..33fb0142b04d 100644 --- a/core/lib/types/src/commitment/tests/mod.rs +++ b/core/lib/types/src/commitment/tests/mod.rs @@ -50,3 +50,8 @@ fn post_boojum_1_4_2() { fn post_boojum_1_5_0() { run_test("post_boojum_1_5_0_test"); } + +#[test] +fn post_boojum_1_5_0_with_evm() { + run_test("post_boojum_1_5_0_test_with_evm"); +} diff --git a/core/lib/types/src/commitment/tests/post_boojum_1_5_0_test_with_evm.json b/core/lib/types/src/commitment/tests/post_boojum_1_5_0_test_with_evm.json new file mode 100644 index 000000000000..4e8c0e0814a0 --- /dev/null +++ b/core/lib/types/src/commitment/tests/post_boojum_1_5_0_test_with_evm.json @@ -0,0 +1,359 @@ +{ + "input": { + "PostBoojum": { + "common": { + "l2_to_l1_logs": [ + { + "shard_id": 0, + "is_service": true, + "tx_number_in_block": 0, + "sender": "0x0000000000000000000000000000000000008001", + "key": "0x7814f203b8e02f6a676b8f7faefcf732d8b4368bab25239ea4525010aa85d5ee", + "value": "0x0000000000000000000000000000000000000000000000000000000000000001" + } + ], + "rollup_last_leaf_index": 89, + "rollup_root_hash": "0xe47f013d1ecd4ce53b6872f6b762670b393815e7ddacdf2b0886af9c7f3a555b", + "bootloader_code_hash": "0x010007ed0e328b940e241f7666a6303b7ffd4e3fd7e8c154d6e7556befe6cd6d", + "default_aa_code_hash": "0x0100055b7a8be90522251be8be1a186464d056462973502ac8a0437c85e4d2a9", + "evm_emulator_code_hash": "0x01000e53aa35d9d19fa99341c2e2901cf93b3668f01569dd5c6ca409c7696b91", + "protocol_version": "Version23" + }, + "system_logs": [ + { + "shard_id": 0, + "is_service": false, + "tx_number_in_block": 0, + "sender": "0x000000000000000000000000000000000000800b", + "key": "0x0000000000000000000000000000000000000000000000000000000000000004", + "value": "0x55618db5ff24aee4d236921b6f4272101161137115a3b4c4a65f8677b124c01c" + }, + { + "shard_id": 0, + "is_service": false, + "tx_number_in_block": 1, + "sender": "0x000000000000000000000000000000000000800b", + "key": "0x0000000000000000000000000000000000000000000000000000000000000003", + "value": "0x00000000000000000000000065c22f8000000000000000000000000065c22f81" + }, + { + "shard_id": 0, + "is_service": true, + "tx_number_in_block": 1, + "sender": "0x0000000000000000000000000000000000008001", + "key": "0x0000000000000000000000000000000000000000000000000000000000000005", + "value": "0x155c82febe94e07df0065c153e8ed403b5351fd64d657c8dffbfbee8ec3d2ba3" + }, + { + "shard_id": 0, + "is_service": true, + "tx_number_in_block": 1, + "sender": "0x0000000000000000000000000000000000008001", + "key": "0x0000000000000000000000000000000000000000000000000000000000000006", + "value": "0x0000000000000000000000000000000000000000000000000000000000000001" + }, + { + "shard_id": 0, + "is_service": true, + "tx_number_in_block": 1, + "sender": "0x0000000000000000000000000000000000008008", + "key": "0x0000000000000000000000000000000000000000000000000000000000000000", + "value": "0x30ba728b1aac22b122de4f32589dd2711da264412cb90e35bf7b1f735dd357ff" + }, + { + "shard_id": 0, + "is_service": true, + "tx_number_in_block": 1, + "sender": "0x0000000000000000000000000000000000008008", + "key": "0x0000000000000000000000000000000000000000000000000000000000000001", + "value": "0x85a7fb853512ba6575c99ee121dd560559523a4587a2cd7e83cd359cd9ea2aed" + }, + { + "shard_id": 0, + "is_service": true, + "tx_number_in_block": 1, + "sender": "0x0000000000000000000000000000000000008008", + "key": "0x0000000000000000000000000000000000000000000000000000000000000002", + "value": "0xb18f72a4a5b4b8ce1b7e41095fb1332a211a140376bcc2607910875d236708e0" + }, + { + "shard_id": 0, + "is_service": true, + "tx_number_in_block": 1, + "sender": "0x0000000000000000000000000000000000008011", + "key": "0x0000000000000000000000000000000000000000000000000000000000000007", + "value": "0x0000000000000000000000000000000000000000000000000000000000000003" + }, + { + "shard_id": 0, + "is_service": true, + "tx_number_in_block": 1, + "sender": "0x0000000000000000000000000000000000008011", + "key": "0x0000000000000000000000000000000000000000000000000000000000000008", + "value": "0x0000000000000000000000000000000000000000000000000000000000000004" + }, + { + "shard_id": 0, + "is_service": true, + "tx_number_in_block": 1, + "sender": "0x0000000000000000000000000000000000008011", + "key": "0x0000000000000000000000000000000000000000000000000000000000000008", + "value": "0x0000000000000000000000000000000000000000000000000000000000000005" + }, + { + "shard_id": 0, + "is_service": true, + "tx_number_in_block": 1, + "sender": "0x0000000000000000000000000000000000008011", + "key": "0x0000000000000000000000000000000000000000000000000000000000000008", + "value": "0x0000000000000000000000000000000000000000000000000000000000000006" + }, + { + "shard_id": 0, + "is_service": true, + "tx_number_in_block": 1, + "sender": "0x0000000000000000000000000000000000008011", + "key": "0x0000000000000000000000000000000000000000000000000000000000000008", + "value": "0x0000000000000000000000000000000000000000000000000000000000000007" + }, + { + "shard_id": 0, + "is_service": true, + "tx_number_in_block": 1, + "sender": "0x0000000000000000000000000000000000008011", + "key": "0x0000000000000000000000000000000000000000000000000000000000000008", + "value": "0x0000000000000000000000000000000000000000000000000000000000000008" + } + ], + "state_diffs": [ + { + "address": "0x000000000000000000000000000000000000800a", + "key": "0x1", + "derived_key": [ + 113, 233, 23, 33, 249, 145, 133, 118, 215, 96, 240, 47, 3, 202, 196, + 124, 111, 64, 3, 49, 96, 49, 132, 142, 60, 29, 153, 230, 232, 58, + 71, 67 + ], + "enumeration_index": 49, + "initial_value": "0x18776f28c303800", + "final_value": "0x708da482cab20760" + }, + { + "address": "0x000000000000000000000000000000000000800a", + "key": "0x294a00337abeee2b3cd948ffeed92231e2a3acc2eb11210400e0aa9557f23e26", + "derived_key": [ + 45, 90, 105, 98, 204, 206, 229, 212, 173, 180, 138, 54, 187, 191, + 68, 58, 83, 23, 33, 72, 67, 129, 18, 89, 55, 243, 0, 26, 197, 255, + 135, 91 + ], + "enumeration_index": 50, + "initial_value": "0xf5559e28fd66c0", + "final_value": "0xf5a19b324caf80" + }, + { + "address": "0x000000000000000000000000000000000000800a", + "key": "0xeaa2b2fbf0b42c559059e5e9510edc15755f1c1883f0e41d5ba5f9aea4ac201a", + "derived_key": [ + 141, 97, 126, 192, 90, 203, 191, 95, 226, 69, 41, 166, 75, 35, 133, + 169, 106, 173, 67, 240, 155, 225, 173, 169, 44, 112, 64, 49, 220, + 193, 72, 27 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x6f05e193353286a0" + }, + { + "address": "0x000000000000000000000000000000000000800b", + "key": "0x7", + "derived_key": [ + 18, 59, 175, 197, 134, 247, 119, 100, 72, 140, 210, 76, 106, 119, + 84, 110, 90, 15, 232, 189, 251, 79, 162, 3, 207, 175, 252, 54, 204, + 228, 221, 91 + ], + "enumeration_index": 53, + "initial_value": "0x100000000000000000000000065c22e3e", + "final_value": "0x200000000000000000000000065c22f80" + }, + { + "address": "0x000000000000000000000000000000000000800b", + "key": "0x9", + "derived_key": [ + 142, 125, 208, 106, 197, 183, 59, 71, 59, 230, 188, 90, 81, 3, 15, + 76, 116, 55, 101, 124, 183, 178, 155, 243, 118, 197, 100, 184, 209, + 103, 90, 94 + ], + "enumeration_index": 54, + "initial_value": "0x200000000000000000000000065c22e3f", + "final_value": "0x400000000000000000000000065c22f81" + }, + { + "address": "0x000000000000000000000000000000000000800b", + "key": "0xd", + "derived_key": [ + 235, 221, 239, 221, 164, 142, 178, 170, 127, 102, 236, 247, 148, 10, + 40, 14, 158, 243, 251, 46, 149, 219, 9, 149, 83, 132, 64, 166, 42, + 247, 152, 97 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0xebbe609cd3ccd11f273eb94374d6d3a2f7856c5f1039dc4877c6a334188ac7c1" + }, + { + "address": "0x000000000000000000000000000000000000800b", + "key": "0xe", + "derived_key": [ + 70, 64, 215, 56, 69, 54, 78, 198, 145, 246, 222, 251, 96, 106, 58, + 114, 253, 165, 215, 173, 51, 209, 125, 4, 153, 90, 142, 37, 44, 74, + 6, 216 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x708e7fcf68ebab6c87322686cac4bcdb5f2bd4c71f337b18d147fd9a6c44ad13" + }, + { + "address": "0x000000000000000000000000000000000000800b", + "key": "0x10c", + "derived_key": [ + 121, 9, 53, 136, 208, 232, 71, 239, 167, 58, 16, 206, 32, 228, 121, + 159, 177, 228, 102, 66, 214, 86, 23, 199, 229, 33, 63, 160, 73, 137, + 217, 45 + ], + "enumeration_index": 57, + "initial_value": "0x200000000000000000000000065c22e3f", + "final_value": "0x400000000000000000000000065c22f81" + }, + { + "address": "0x000000000000000000000000000000000000800b", + "key": "0xad67d757c34507f157cacfa2e3153e9f260a2244f30428821be7be64587ac55f", + "derived_key": [ + 12, 194, 74, 180, 47, 190, 197, 49, 125, 155, 26, 44, 164, 124, 169, + 185, 59, 158, 195, 109, 121, 142, 253, 124, 218, 167, 57, 36, 22, + 48, 203, 70 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x55618db5ff24aee4d236921b6f4272101161137115a3b4c4a65f8677b124c01c" + } + ], + "aux_commitments": { + "events_queue_commitment": "0x6193a5098eb140796387bdf40700a3855eeb010474b5478f30bf917172c67883", + "bootloader_initial_content_commitment": "0xf031b4491c37f20516c4ebf428f4765156409f67089e64772f4106fd2d9f3351" + }, + "blob_commitments": [ + "0x0000000000000000000000000000000000000000000000000000000000000001", + "0x0000000000000000000000000000000000000000000000000000000000000002", + "0x0000000000000000000000000000000000000000000000000000000000000003", + "0x0000000000000000000000000000000000000000000000000000000000000004", + "0x0000000000000000000000000000000000000000000000000000000000000005", + "0x0000000000000000000000000000000000000000000000000000000000000006", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000" + ] + } + }, + "pass_through_data": { + "shared_states": [ + { + "last_leaf_index": 89, + "root_hash": "0xe47f013d1ecd4ce53b6872f6b762670b393815e7ddacdf2b0886af9c7f3a555b" + }, + { + "last_leaf_index": 0, + "root_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + } + ] + }, + "meta_parameters": { + "zkporter_is_available": false, + "bootloader_code_hash": "0x010007ed0e328b940e241f7666a6303b7ffd4e3fd7e8c154d6e7556befe6cd6d", + "default_aa_code_hash": "0x0100055b7a8be90522251be8be1a186464d056462973502ac8a0437c85e4d2a9", + "evm_emulator_code_hash": "0x01000e53aa35d9d19fa99341c2e2901cf93b3668f01569dd5c6ca409c7696b91", + "protocol_version": "Version23" + }, + "auxiliary_output": { + "PostBoojum": { + "common": { + "l2_l1_logs_merkle_root": "0x30ba728b1aac22b122de4f32589dd2711da264412cb90e35bf7b1f735dd357ff", + "protocol_version": "Version23" + }, + "system_logs_linear_hash": "0x602dacc0a26e3347f0679924c4ae151ff5200e7dd80902fe0fc11c806c4d3ffb", + "state_diffs_compressed": [ + 1, 0, 1, 72, 4, 0, 4, 141, 97, 126, 192, 90, 203, 191, 95, 226, 69, 41, + 166, 75, 35, 133, 169, 106, 173, 67, 240, 155, 225, 173, 169, 44, 112, + 64, 49, 220, 193, 72, 27, 65, 111, 5, 225, 147, 53, 50, 134, 160, 235, + 221, 239, 221, 164, 142, 178, 170, 127, 102, 236, 247, 148, 10, 40, 14, + 158, 243, 251, 46, 149, 219, 9, 149, 83, 132, 64, 166, 42, 247, 152, 97, + 0, 235, 190, 96, 156, 211, 204, 209, 31, 39, 62, 185, 67, 116, 214, 211, + 162, 247, 133, 108, 95, 16, 57, 220, 72, 119, 198, 163, 52, 24, 138, + 199, 193, 70, 64, 215, 56, 69, 54, 78, 198, 145, 246, 222, 251, 96, 106, + 58, 114, 253, 165, 215, 173, 51, 209, 125, 4, 153, 90, 142, 37, 44, 74, + 6, 216, 0, 112, 142, 127, 207, 104, 235, 171, 108, 135, 50, 38, 134, + 202, 196, 188, 219, 95, 43, 212, 199, 31, 51, 123, 24, 209, 71, 253, + 154, 108, 68, 173, 19, 12, 194, 74, 180, 47, 190, 197, 49, 125, 155, 26, + 44, 164, 124, 169, 185, 59, 158, 195, 109, 121, 142, 253, 124, 218, 167, + 57, 36, 22, 48, 203, 70, 0, 85, 97, 141, 181, 255, 36, 174, 228, 210, + 54, 146, 27, 111, 66, 114, 16, 17, 97, 19, 113, 21, 163, 180, 196, 166, + 95, 134, 119, 177, 36, 192, 28, 0, 0, 0, 49, 65, 111, 6, 45, 144, 62, + 129, 207, 96, 0, 0, 0, 50, 49, 75, 253, 9, 79, 72, 192, 0, 0, 0, 53, + 137, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 66, 0, 0, 0, 54, + 137, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 66, 0, 0, 0, 57, + 137, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 66 + ], + "state_diffs_hash": "0xb18f72a4a5b4b8ce1b7e41095fb1332a211a140376bcc2607910875d236708e0", + "aux_commitments": { + "events_queue_commitment": "0x6193a5098eb140796387bdf40700a3855eeb010474b5478f30bf917172c67883", + "bootloader_initial_content_commitment": "0xf031b4491c37f20516c4ebf428f4765156409f67089e64772f4106fd2d9f3351" + }, + "blob_linear_hashes": [ + "0x0000000000000000000000000000000000000000000000000000000000000003", + "0x0000000000000000000000000000000000000000000000000000000000000004", + "0x0000000000000000000000000000000000000000000000000000000000000005", + "0x0000000000000000000000000000000000000000000000000000000000000006", + "0x0000000000000000000000000000000000000000000000000000000000000007", + "0x0000000000000000000000000000000000000000000000000000000000000008", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000" + ], + "blob_commitments": [ + "0x0000000000000000000000000000000000000000000000000000000000000001", + "0x0000000000000000000000000000000000000000000000000000000000000002", + "0x0000000000000000000000000000000000000000000000000000000000000003", + "0x0000000000000000000000000000000000000000000000000000000000000004", + "0x0000000000000000000000000000000000000000000000000000000000000005", + "0x0000000000000000000000000000000000000000000000000000000000000006", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000" + ] + } + }, + "hashes": { + "pass_through_data": "0x6a3ffc0f55d4abce9498b8bcb01a3018bc2b83d96acb27e23772fe9347954725", + "aux_output": "0xadc63d9c45f85598f3e3c232970315d1f6ac96222e379e16ced7a204524a4061", + "meta_parameters": "0x02531e5cc22688523a4ac9317e5097743771f6914015cf1152491cf22084bd58", + "commitment": "0x4fdd8c5b231dfc9fc81aba744a90fbec78627f529ac29f9fc758a7b9e62fa321" + } +} diff --git a/core/lib/types/src/l2/mod.rs b/core/lib/types/src/l2/mod.rs index 036d2a7a036d..48e813e571d2 100644 --- a/core/lib/types/src/l2/mod.rs +++ b/core/lib/types/src/l2/mod.rs @@ -216,7 +216,9 @@ impl L2Tx { let raw = req.get_signed_bytes(&sig).context("get_signed_bytes")?; let (req, hash) = TransactionRequest::from_bytes_unverified(&raw).context("from_bytes_unverified()")?; - let mut tx = L2Tx::from_request_unverified(req).context("from_request_unverified()")?; + // Since we allow users to specify `None` recipient, EVM emulation is implicitly enabled. + let mut tx = + L2Tx::from_request_unverified(req, true).context("from_request_unverified()")?; tx.set_input(raw, hash); Ok(tx) } diff --git a/core/lib/types/src/lib.rs b/core/lib/types/src/lib.rs index 86b2e3f03d51..a50fc8a655b7 100644 --- a/core/lib/types/src/lib.rs +++ b/core/lib/types/src/lib.rs @@ -5,7 +5,7 @@ #![allow(clippy::upper_case_acronyms, clippy::derive_partial_eq_without_eq)] -use std::{fmt, fmt::Debug}; +use std::fmt; use anyhow::Context as _; use fee::encoding_len; @@ -88,9 +88,16 @@ pub struct Transaction { pub raw_bytes: Option, } -impl std::fmt::Debug for Transaction { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - f.debug_tuple("Transaction").field(&self.hash()).finish() +impl fmt::Debug for Transaction { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + if let Some(hash) = self.hash_for_debugging() { + f.debug_tuple("Transaction").field(&hash).finish() + } else { + f.debug_struct("Transaction") + .field("initiator_account", &self.initiator_account()) + .field("nonce", &self.nonce()) + .finish() + } } } @@ -136,6 +143,15 @@ impl Transaction { } } + fn hash_for_debugging(&self) -> Option { + match &self.common_data { + ExecuteTransactionCommon::L1(data) => Some(data.hash()), + ExecuteTransactionCommon::L2(data) if data.input.is_some() => Some(data.hash()), + ExecuteTransactionCommon::L2(_) => None, + ExecuteTransactionCommon::ProtocolUpgrade(data) => Some(data.hash()), + } + } + /// Returns the account that initiated this transaction. pub fn initiator_account(&self) -> Address { match &self.common_data { @@ -315,9 +331,14 @@ impl TryFrom for abi::Transaction { } } -impl TryFrom for Transaction { - type Error = anyhow::Error; - fn try_from(tx: abi::Transaction) -> anyhow::Result { +impl Transaction { + /// Converts a transaction from its ABI representation. + /// + /// # Arguments + /// + /// - `allow_no_target` enables / disables L2 transactions without target (i.e., `to` field). + /// This field can only be absent for EVM deployment transactions. + pub fn from_abi(tx: abi::Transaction, allow_no_target: bool) -> anyhow::Result { Ok(match tx { abi::Transaction::L1 { tx, @@ -389,7 +410,7 @@ impl TryFrom for Transaction { abi::Transaction::L2(raw) => { let (req, hash) = transaction_request::TransactionRequest::from_bytes_unverified(&raw)?; - let mut tx = L2Tx::from_request_unverified(req)?; + let mut tx = L2Tx::from_request_unverified(req, allow_no_target)?; tx.set_input(raw, hash); tx.into() } diff --git a/core/lib/types/src/protocol_upgrade.rs b/core/lib/types/src/protocol_upgrade.rs index 1afb108a0536..48f26dfd5c7f 100644 --- a/core/lib/types/src/protocol_upgrade.rs +++ b/core/lib/types/src/protocol_upgrade.rs @@ -62,6 +62,8 @@ pub struct ProtocolUpgrade { pub bootloader_code_hash: Option, /// New default account code hash. pub default_account_code_hash: Option, + /// New EVM emulator code hash + pub evm_emulator_code_hash: Option, /// New verifier params. pub verifier_params: Option, /// New verifier address. @@ -118,17 +120,21 @@ impl ProtocolUpgrade { bootloader_code_hash: (bootloader_hash != H256::zero()).then_some(bootloader_hash), default_account_code_hash: (default_account_hash != H256::zero()) .then_some(default_account_hash), + evm_emulator_code_hash: None, // EVM emulator upgrades are not supported yet verifier_params: (upgrade.verifier_params != abi::VerifierParams::default()) .then_some(upgrade.verifier_params.into()), verifier_address: (upgrade.verifier != Address::zero()).then_some(upgrade.verifier), timestamp: upgrade.upgrade_timestamp.try_into().unwrap(), tx: (upgrade.l2_protocol_upgrade_tx.tx_type != U256::zero()) .then(|| { - Transaction::try_from(abi::Transaction::L1 { - tx: upgrade.l2_protocol_upgrade_tx, - factory_deps: upgrade.factory_deps, - eth_block: 0, - }) + Transaction::from_abi( + abi::Transaction::L1 { + tx: upgrade.l2_protocol_upgrade_tx, + factory_deps: upgrade.factory_deps, + eth_block: 0, + }, + true, + ) .context("Transaction::try_from()")? .try_into() .map_err(|err| anyhow::format_err!("try_into::(): {err}")) @@ -149,11 +155,14 @@ pub fn decode_set_chain_id_event( .unwrap_or_else(|_| panic!("Version is not supported, packed version: {full_version_id}")); Ok(( protocol_version, - Transaction::try_from(abi::Transaction::L1 { - tx: tx.into(), - eth_block: 0, - factory_deps: vec![], - }) + Transaction::from_abi( + abi::Transaction::L1 { + tx: tx.into(), + eth_block: 0, + factory_deps: vec![], + }, + true, + ) .unwrap() .try_into() .unwrap(), @@ -298,6 +307,9 @@ impl ProtocolVersion { default_aa: upgrade .default_account_code_hash .unwrap_or(self.base_system_contracts_hashes.default_aa), + evm_emulator: upgrade + .evm_emulator_code_hash + .or(self.base_system_contracts_hashes.evm_emulator), }, tx: upgrade.tx, } diff --git a/core/lib/types/src/storage/mod.rs b/core/lib/types/src/storage/mod.rs index a30a57bffa51..9ef037dc29b2 100644 --- a/core/lib/types/src/storage/mod.rs +++ b/core/lib/types/src/storage/mod.rs @@ -5,7 +5,7 @@ pub use log::*; use serde::{Deserialize, Serialize}; use zksync_basic_types::{web3::keccak256, L2ChainId}; pub use zksync_system_constants::*; -use zksync_utils::address_to_h256; +use zksync_utils::{address_to_h256, u256_to_h256}; use crate::{AccountTreeId, Address, H160, H256, U256}; @@ -78,6 +78,10 @@ pub fn get_code_key(account: &Address) -> StorageKey { StorageKey::new(account_code_storage, address_to_h256(account)) } +pub fn get_evm_code_hash_key(account: &Address) -> StorageKey { + get_deployer_key(get_address_mapping_key(account, u256_to_h256(1.into()))) +} + pub fn get_known_code_key(hash: &H256) -> StorageKey { let known_codes_storage = AccountTreeId::new(KNOWN_CODES_STORAGE_ADDRESS); StorageKey::new(known_codes_storage, *hash) @@ -88,6 +92,11 @@ pub fn get_system_context_key(key: H256) -> StorageKey { StorageKey::new(system_context, key) } +pub fn get_deployer_key(key: H256) -> StorageKey { + let deployer_contract = AccountTreeId::new(CONTRACT_DEPLOYER_ADDRESS); + StorageKey::new(deployer_contract, key) +} + pub fn get_is_account_key(account: &Address) -> StorageKey { let deployer = AccountTreeId::new(CONTRACT_DEPLOYER_ADDRESS); diff --git a/core/lib/types/src/system_contracts.rs b/core/lib/types/src/system_contracts.rs index a28c45b8feae..4329680991c8 100644 --- a/core/lib/types/src/system_contracts.rs +++ b/core/lib/types/src/system_contracts.rs @@ -1,11 +1,11 @@ use std::path::PathBuf; -use once_cell::sync::Lazy; use zksync_basic_types::{AccountTreeId, Address, U256}; use zksync_contracts::{read_sys_contract_bytecode, ContractLanguage, SystemContractsRepo}; use zksync_system_constants::{ BOOTLOADER_UTILITIES_ADDRESS, CODE_ORACLE_ADDRESS, COMPRESSOR_ADDRESS, CREATE2_FACTORY_ADDRESS, - EVENT_WRITER_ADDRESS, P256VERIFY_PRECOMPILE_ADDRESS, PUBDATA_CHUNK_PUBLISHER_ADDRESS, + EVENT_WRITER_ADDRESS, EVM_GAS_MANAGER_ADDRESS, P256VERIFY_PRECOMPILE_ADDRESS, + PUBDATA_CHUNK_PUBLISHER_ADDRESS, }; use crate::{ @@ -25,7 +25,7 @@ use crate::{ pub const TX_NONCE_INCREMENT: U256 = U256([1, 0, 0, 0]); // 1 pub const DEPLOYMENT_NONCE_INCREMENT: U256 = U256([0, 0, 1, 0]); // 2^128 -static SYSTEM_CONTRACT_LIST: [(&str, &str, Address, ContractLanguage); 25] = [ +static SYSTEM_CONTRACT_LIST: [(&str, &str, Address, ContractLanguage); 26] = [ ( "", "AccountCodeStorage", @@ -147,6 +147,12 @@ static SYSTEM_CONTRACT_LIST: [(&str, &str, Address, ContractLanguage); 25] = [ COMPLEX_UPGRADER_ADDRESS, ContractLanguage::Sol, ), + ( + "", + "EvmGasManager", + EVM_GAS_MANAGER_ADDRESS, + ContractLanguage::Sol, + ), // For now, only zero address and the bootloader address have empty bytecode at the init // In the future, we might want to set all of the system contracts this way. ("", "EmptyContract", Address::zero(), ContractLanguage::Sol), @@ -170,29 +176,40 @@ static SYSTEM_CONTRACT_LIST: [(&str, &str, Address, ContractLanguage); 25] = [ ), ]; -static SYSTEM_CONTRACTS: Lazy> = Lazy::new(|| { +/// Gets default set of system contracts, based on Cargo workspace location. +pub fn get_system_smart_contracts(use_evm_emulator: bool) -> Vec { SYSTEM_CONTRACT_LIST .iter() - .map(|(path, name, address, contract_lang)| DeployedContract { - account_id: AccountTreeId::new(*address), - bytecode: read_sys_contract_bytecode(path, name, contract_lang.clone()), + .filter_map(|(path, name, address, contract_lang)| { + if *name == "EvmGasManager" && !use_evm_emulator { + None + } else { + Some(DeployedContract { + account_id: AccountTreeId::new(*address), + bytecode: read_sys_contract_bytecode(path, name, contract_lang.clone()), + }) + } }) - .collect::>() -}); - -/// Gets default set of system contracts, based on Cargo workspace location. -pub fn get_system_smart_contracts() -> Vec { - SYSTEM_CONTRACTS.clone() + .collect() } /// Loads system contracts from a given directory. -pub fn get_system_smart_contracts_from_dir(path: PathBuf) -> Vec { +pub fn get_system_smart_contracts_from_dir( + path: PathBuf, + use_evm_emulator: bool, +) -> Vec { let repo = SystemContractsRepo { root: path }; SYSTEM_CONTRACT_LIST .iter() - .map(|(path, name, address, contract_lang)| DeployedContract { - account_id: AccountTreeId::new(*address), - bytecode: repo.read_sys_contract_bytecode(path, name, contract_lang.clone()), + .filter_map(|(path, name, address, contract_lang)| { + if *name == "EvmGasManager" && !use_evm_emulator { + None + } else { + Some(DeployedContract { + account_id: AccountTreeId::new(*address), + bytecode: repo.read_sys_contract_bytecode(path, name, contract_lang.clone()), + }) + } }) .collect::>() } diff --git a/core/lib/types/src/transaction_request.rs b/core/lib/types/src/transaction_request.rs index 5f26b1d6a6a5..a8713f301ba6 100644 --- a/core/lib/types/src/transaction_request.rs +++ b/core/lib/types/src/transaction_request.rs @@ -809,6 +809,7 @@ impl TransactionRequest { impl L2Tx { pub(crate) fn from_request_unverified( mut value: TransactionRequest, + allow_no_target: bool, ) -> Result { let fee = value.get_fee_data_checked()?; let nonce = value.get_nonce_checked()?; @@ -817,8 +818,7 @@ impl L2Tx { let meta = value.eip712_meta.take().unwrap_or_default(); validate_factory_deps(&meta.factory_deps)?; - // TODO: Remove this check when evm equivalence gets enabled - if value.to.is_none() { + if value.to.is_none() && !allow_no_target { return Err(SerializationTransactionError::ToAddressIsNull); } @@ -848,11 +848,18 @@ impl L2Tx { Ok(tx) } + /// Converts a request into a transaction. + /// + /// # Arguments + /// + /// - `allow_no_target` enables / disables transactions without target (i.e., `to` field). + /// This field can only be absent for EVM deployment transactions. pub fn from_request( - value: TransactionRequest, + request: TransactionRequest, max_tx_size: usize, + allow_no_target: bool, ) -> Result { - let tx = Self::from_request_unverified(value)?; + let tx = Self::from_request_unverified(request, allow_no_target)?; tx.check_encoded_size(max_tx_size)?; Ok(tx) } @@ -916,11 +923,19 @@ impl From for TransactionRequest { } } -impl TryFrom for L1Tx { - type Error = SerializationTransactionError; - fn try_from(tx: CallRequest) -> Result { +impl L1Tx { + /// Converts a request into a transaction. + /// + /// # Arguments + /// + /// - `allow_no_target` enables / disables transactions without target (i.e., `to` field). + /// This field can only be absent for EVM deployment transactions. + pub fn from_request( + request: CallRequest, + allow_no_target: bool, + ) -> Result { // L1 transactions have no limitations on the transaction size. - let tx: L2Tx = L2Tx::from_request(tx.into(), MAX_ENCODED_TX_SIZE)?; + let tx: L2Tx = L2Tx::from_request(request.into(), MAX_ENCODED_TX_SIZE, allow_no_target)?; // Note, that while the user has theoretically provided the fee for ETH on L1, // the payment to the operator as well as refunds happen on L2 and so all the ETH @@ -1316,7 +1331,7 @@ mod tests { ..Default::default() }; let execute_tx1: Result = - L2Tx::from_request(tx1, usize::MAX); + L2Tx::from_request(tx1, usize::MAX, true); assert!(execute_tx1.is_ok()); let tx2 = TransactionRequest { @@ -1327,7 +1342,7 @@ mod tests { ..Default::default() }; let execute_tx2: Result = - L2Tx::from_request(tx2, usize::MAX); + L2Tx::from_request(tx2, usize::MAX, true); assert_eq!( execute_tx2.unwrap_err(), SerializationTransactionError::TooBigNonce @@ -1344,7 +1359,7 @@ mod tests { ..Default::default() }; let execute_tx1: Result = - L2Tx::from_request(tx1, usize::MAX); + L2Tx::from_request(tx1, usize::MAX, true); assert_eq!( execute_tx1.unwrap_err(), SerializationTransactionError::MaxFeePerGasNotU64 @@ -1358,7 +1373,7 @@ mod tests { ..Default::default() }; let execute_tx2: Result = - L2Tx::from_request(tx2, usize::MAX); + L2Tx::from_request(tx2, usize::MAX, true); assert_eq!( execute_tx2.unwrap_err(), SerializationTransactionError::MaxPriorityFeePerGasNotU64 @@ -1376,7 +1391,7 @@ mod tests { }; let execute_tx3: Result = - L2Tx::from_request(tx3, usize::MAX); + L2Tx::from_request(tx3, usize::MAX, true); assert_eq!( execute_tx3.unwrap_err(), SerializationTransactionError::MaxFeePerPubdataByteNotU64 @@ -1432,7 +1447,7 @@ mod tests { let request = TransactionRequest::from_bytes(data.as_slice(), L2ChainId::from(270)).unwrap(); assert_matches!( - L2Tx::from_request(request.0, random_tx_max_size), + L2Tx::from_request(request.0, random_tx_max_size, true), Err(SerializationTransactionError::OversizedData(_, _)) ) } @@ -1458,7 +1473,7 @@ mod tests { }; let try_to_l2_tx: Result = - L2Tx::from_request(call_request.into(), random_tx_max_size); + L2Tx::from_request(call_request.into(), random_tx_max_size, true); assert_matches!( try_to_l2_tx, @@ -1483,15 +1498,20 @@ mod tests { access_list: None, eip712_meta: None, }; - let l2_tx = L2Tx::from_request(call_request_with_nonce.clone().into(), MAX_ENCODED_TX_SIZE) - .unwrap(); + let l2_tx = L2Tx::from_request( + call_request_with_nonce.clone().into(), + MAX_ENCODED_TX_SIZE, + true, + ) + .unwrap(); assert_eq!(l2_tx.nonce(), Nonce(123u32)); let mut call_request_without_nonce = call_request_with_nonce; call_request_without_nonce.nonce = None; let l2_tx = - L2Tx::from_request(call_request_without_nonce.into(), MAX_ENCODED_TX_SIZE).unwrap(); + L2Tx::from_request(call_request_without_nonce.into(), MAX_ENCODED_TX_SIZE, true) + .unwrap(); assert_eq!(l2_tx.nonce(), Nonce(0u32)); } diff --git a/core/lib/types/src/tx/execute.rs b/core/lib/types/src/tx/execute.rs index c133261bc232..0edece9e46b4 100644 --- a/core/lib/types/src/tx/execute.rs +++ b/core/lib/types/src/tx/execute.rs @@ -1,6 +1,7 @@ use once_cell::sync::Lazy; use serde::{Deserialize, Serialize}; -use zksync_utils::ZeroPrefixHexSerde; +use zksync_system_constants::CONTRACT_DEPLOYER_ADDRESS; +use zksync_utils::{bytecode::hash_bytecode, ZeroPrefixHexSerde}; use crate::{ethabi, Address, EIP712TypedStructure, StructBuilder, H256, U256}; @@ -89,8 +90,7 @@ impl Execute { &self.calldata } - /// Prepares calldata to invoke deployer contract. - /// This method encodes parameters for the `create` method. + /// Prepares calldata to invoke deployer contract. This method encodes parameters for the `create` method. pub fn encode_deploy_params_create( salt: H256, contract_hash: H256, @@ -116,4 +116,24 @@ impl Execute { FUNCTION_SIGNATURE.iter().copied().chain(params).collect() } + + /// Creates an instance for deploying the specified bytecode without additional dependencies. If necessary, + /// additional deps can be added to `Self.factory_deps` after this call. + pub fn for_deploy( + salt: H256, + contract_bytecode: Vec, + constructor_input: &[ethabi::Token], + ) -> Self { + let bytecode_hash = hash_bytecode(&contract_bytecode); + Self { + contract_address: Some(CONTRACT_DEPLOYER_ADDRESS), + calldata: Self::encode_deploy_params_create( + salt, + bytecode_hash, + ethabi::encode(constructor_input), + ), + value: 0.into(), + factory_deps: vec![contract_bytecode], + } + } } diff --git a/core/lib/vm_executor/src/oneshot/block.rs b/core/lib/vm_executor/src/oneshot/block.rs index 8ba77305ad7d..c820ea794fe3 100644 --- a/core/lib/vm_executor/src/oneshot/block.rs +++ b/core/lib/vm_executor/src/oneshot/block.rs @@ -133,26 +133,33 @@ impl BlockInfo { let protocol_version = l2_block_header .protocol_version .unwrap_or(ProtocolVersionId::last_potentially_undefined()); - + // We cannot use the EVM emulator mentioned in the block as is because of batch vs playground settings etc. + // Instead, we just check whether EVM emulation in general is enabled for a block, and store this binary flag for further use. + let use_evm_emulator = l2_block_header + .base_system_contracts_hashes + .evm_emulator + .is_some(); Ok(ResolvedBlockInfo { state_l2_block_number, state_l2_block_hash: l2_block_header.hash, vm_l1_batch_number, l1_batch_timestamp, protocol_version, + use_evm_emulator, is_pending: self.is_pending_l2_block(), }) } } /// Resolved [`BlockInfo`] containing additional data from VM state. -#[derive(Debug)] +#[derive(Debug, Clone)] pub struct ResolvedBlockInfo { state_l2_block_number: L2BlockNumber, state_l2_block_hash: H256, vm_l1_batch_number: L1BatchNumber, l1_batch_timestamp: u64, protocol_version: ProtocolVersionId, + use_evm_emulator: bool, is_pending: bool, } @@ -161,6 +168,14 @@ impl ResolvedBlockInfo { pub fn state_l2_block_number(&self) -> L2BlockNumber { self.state_l2_block_number } + + pub fn protocol_version(&self) -> ProtocolVersionId { + self.protocol_version + } + + pub fn use_evm_emulator(&self) -> bool { + self.use_evm_emulator + } } impl OneshotEnvParameters { @@ -213,7 +228,10 @@ impl OneshotEnvParameters { version: resolved_block_info.protocol_version, base_system_smart_contracts: self .base_system_contracts - .get_by_protocol_version(resolved_block_info.protocol_version) + .get_by_protocol_version( + resolved_block_info.protocol_version, + resolved_block_info.use_evm_emulator, + ) .clone(), bootloader_gas_limit: BATCH_COMPUTATIONAL_GAS_LIMIT, execution_mode, diff --git a/core/lib/vm_executor/src/oneshot/contracts.rs b/core/lib/vm_executor/src/oneshot/contracts.rs index 3b3a65fe30ba..0e1fb9b2762f 100644 --- a/core/lib/vm_executor/src/oneshot/contracts.rs +++ b/core/lib/vm_executor/src/oneshot/contracts.rs @@ -26,8 +26,12 @@ pub(super) struct MultiVMBaseSystemContracts { impl MultiVMBaseSystemContracts { /// Gets contracts for a certain version. - pub fn get_by_protocol_version(&self, version: ProtocolVersionId) -> &BaseSystemContracts { - match version { + pub fn get_by_protocol_version( + &self, + version: ProtocolVersionId, + use_evm_emulator: bool, + ) -> BaseSystemContracts { + let base = match version { ProtocolVersionId::Version0 | ProtocolVersionId::Version1 | ProtocolVersionId::Version2 @@ -54,6 +58,14 @@ impl MultiVMBaseSystemContracts { ProtocolVersionId::Version24 | ProtocolVersionId::Version25 => { &self.vm_1_5_0_increased_memory } + }; + let base = base.clone(); + + if version.is_post_1_5_0() && use_evm_emulator { + // EVM emulator is not versioned now; the latest version is always checked out + base.with_latest_evm_emulator() + } else { + base } } diff --git a/core/lib/vm_executor/src/oneshot/mock.rs b/core/lib/vm_executor/src/oneshot/mock.rs index 8f3a12603c1a..a7363c633c6c 100644 --- a/core/lib/vm_executor/src/oneshot/mock.rs +++ b/core/lib/vm_executor/src/oneshot/mock.rs @@ -68,6 +68,7 @@ impl MockOneshotExecutor { logs: Default::default(), statistics: Default::default(), refunds: Default::default(), + new_known_factory_deps: None, } }, ) diff --git a/core/lib/vm_executor/src/storage.rs b/core/lib/vm_executor/src/storage.rs index a2369820a5b4..fa0e530c1909 100644 --- a/core/lib/vm_executor/src/storage.rs +++ b/core/lib/vm_executor/src/storage.rs @@ -309,7 +309,11 @@ impl L1BatchParamsProvider { let contract_hashes = first_l2_block_in_batch.header.base_system_contracts_hashes; let base_system_contracts = storage .factory_deps_dal() - .get_base_system_contracts(contract_hashes.bootloader, contract_hashes.default_aa) + .get_base_system_contracts( + contract_hashes.bootloader, + contract_hashes.default_aa, + contract_hashes.evm_emulator, + ) .await .context("failed getting base system contracts")?; diff --git a/core/lib/vm_interface/src/storage/in_memory.rs b/core/lib/vm_interface/src/storage/in_memory.rs index 6a8b56433455..d83f675cd54e 100644 --- a/core/lib/vm_interface/src/storage/in_memory.rs +++ b/core/lib/vm_interface/src/storage/in_memory.rs @@ -36,7 +36,7 @@ impl InMemoryStorage { Self::with_custom_system_contracts_and_chain_id( chain_id, bytecode_hasher, - get_system_smart_contracts(), + get_system_smart_contracts(false), ) } diff --git a/core/lib/vm_interface/src/types/outputs/execution_result.rs b/core/lib/vm_interface/src/types/outputs/execution_result.rs index 3e53aad85f10..018ea075db51 100644 --- a/core/lib/vm_interface/src/types/outputs/execution_result.rs +++ b/core/lib/vm_interface/src/types/outputs/execution_result.rs @@ -1,3 +1,5 @@ +use std::collections::HashMap; + use serde::{Deserialize, Deserializer, Serialize, Serializer}; use zksync_system_constants::{ BOOTLOADER_ADDRESS, KNOWN_CODES_STORAGE_ADDRESS, L1_MESSENGER_ADDRESS, @@ -118,6 +120,10 @@ pub struct VmExecutionResultAndLogs { pub logs: VmExecutionLogs, pub statistics: VmExecutionStatistics, pub refunds: Refunds, + /// Bytecodes decommitted during VM execution. `None` if not computed by the VM. + // FIXME: currently, this is only filled up by `vm_latest`; probably makes sense to narrow down + // to *dynamic* factory deps, so that `HashMap::new()` is a valid value for VMs not supporting EVM emulation. + pub new_known_factory_deps: Option>>, } #[derive(Debug, Clone, PartialEq)] diff --git a/core/lib/vm_interface/src/types/outputs/finished_l1batch.rs b/core/lib/vm_interface/src/types/outputs/finished_l1batch.rs index 27241c2c0fae..8f7c1d4fb0d6 100644 --- a/core/lib/vm_interface/src/types/outputs/finished_l1batch.rs +++ b/core/lib/vm_interface/src/types/outputs/finished_l1batch.rs @@ -26,6 +26,7 @@ impl FinishedL1Batch { logs: VmExecutionLogs::default(), statistics: VmExecutionStatistics::default(), refunds: Refunds::default(), + new_known_factory_deps: None, }, final_execution_state: CurrentExecutionState { events: vec![], diff --git a/core/lib/vm_interface/src/types/outputs/statistic.rs b/core/lib/vm_interface/src/types/outputs/statistic.rs index 095547076d42..f8e3851c8321 100644 --- a/core/lib/vm_interface/src/types/outputs/statistic.rs +++ b/core/lib/vm_interface/src/types/outputs/statistic.rs @@ -109,7 +109,8 @@ pub struct VmExecutionStatistics { pub circuit_statistic: CircuitStatistic, } -/// Oracle metrics of the VM. +/// Oracle metrics reported by legacy VMs. +#[derive(Debug, Default)] pub struct VmMemoryMetrics { pub event_sink_inner: usize, pub event_sink_history: usize, diff --git a/core/lib/vm_interface/src/utils/dump.rs b/core/lib/vm_interface/src/utils/dump.rs index 5dc2351dcf7a..288c6445494d 100644 --- a/core/lib/vm_interface/src/utils/dump.rs +++ b/core/lib/vm_interface/src/utils/dump.rs @@ -7,7 +7,7 @@ use crate::{ storage::{ReadStorage, StoragePtr, StorageSnapshot, StorageView}, BytecodeCompressionResult, FinishedL1Batch, L1BatchEnv, L2BlockEnv, SystemEnv, VmExecutionMode, VmExecutionResultAndLogs, VmFactory, VmInterface, VmInterfaceExt, VmInterfaceHistoryEnabled, - VmMemoryMetrics, VmTrackingContracts, + VmTrackingContracts, }; fn create_storage_snapshot( @@ -177,10 +177,6 @@ impl VmInterface for DumpingVm { .inspect_transaction_with_bytecode_compression(tracer, tx, with_compression) } - fn record_vm_memory_metrics(&self) -> VmMemoryMetrics { - self.inner.record_vm_memory_metrics() - } - fn finish_batch(&mut self) -> FinishedL1Batch { self.inner.finish_batch() } diff --git a/core/lib/vm_interface/src/utils/shadow.rs b/core/lib/vm_interface/src/utils/shadow.rs index 2819e54e9a7b..92eb65a810f7 100644 --- a/core/lib/vm_interface/src/utils/shadow.rs +++ b/core/lib/vm_interface/src/utils/shadow.rs @@ -12,7 +12,7 @@ use crate::{ storage::{ReadStorage, StoragePtr, StorageView}, BytecodeCompressionResult, CurrentExecutionState, FinishedL1Batch, L1BatchEnv, L2BlockEnv, SystemEnv, VmExecutionMode, VmExecutionResultAndLogs, VmFactory, VmInterface, - VmInterfaceHistoryEnabled, VmMemoryMetrics, VmTrackingContracts, + VmInterfaceHistoryEnabled, VmTrackingContracts, }; /// Handler for VM divergences. @@ -202,7 +202,8 @@ where tx: Transaction, with_compression: bool, ) -> (BytecodeCompressionResult<'_>, VmExecutionResultAndLogs) { - let tx_hash = tx.hash(); + let tx_repr = format!("{tx:?}"); // includes little data, so is OK to call proactively + let (main_bytecodes_result, main_tx_result) = self.main.inspect_transaction_with_bytecode_compression( main_tracer, @@ -224,7 +225,7 @@ where errors.check_results_match(&main_tx_result, &shadow_result.1); if let Err(err) = errors.into_result() { let ctx = format!( - "inspecting transaction {tx_hash:?}, with_compression={with_compression:?}" + "inspecting transaction {tx_repr}, with_compression={with_compression:?}" ); self.report(err.context(ctx)); } @@ -232,10 +233,6 @@ where (main_bytecodes_result, main_tx_result) } - fn record_vm_memory_metrics(&self) -> VmMemoryMetrics { - self.main.record_vm_memory_metrics() - } - fn finish_batch(&mut self) -> FinishedL1Batch { let main_batch = self.main.finish_batch(); if let Some(shadow) = self.shadow.get_mut() { @@ -341,10 +338,25 @@ impl DivergenceErrors { &shadow_result.statistics.circuit_statistic, ); self.check_match( - "gas_remaining", + "statistics.pubdata_published", + &main_result.statistics.pubdata_published, + &shadow_result.statistics.pubdata_published, + ); + self.check_match( + "statistics.gas_remaining", &main_result.statistics.gas_remaining, &shadow_result.statistics.gas_remaining, ); + self.check_match( + "statistics.gas_used", + &main_result.statistics.gas_used, + &shadow_result.statistics.gas_used, + ); + self.check_match( + "statistics.computational_gas_used", + &main_result.statistics.computational_gas_used, + &shadow_result.statistics.computational_gas_used, + ); } fn check_match(&mut self, context: &str, main: &T, shadow: &T) { diff --git a/core/lib/vm_interface/src/vm.rs b/core/lib/vm_interface/src/vm.rs index 90ae76be8057..37e33a92b509 100644 --- a/core/lib/vm_interface/src/vm.rs +++ b/core/lib/vm_interface/src/vm.rs @@ -15,7 +15,7 @@ use zksync_types::{Transaction, H256}; use crate::{ storage::StoragePtr, BytecodeCompressionResult, FinishedL1Batch, L1BatchEnv, L2BlockEnv, - SystemEnv, VmExecutionMode, VmExecutionResultAndLogs, VmMemoryMetrics, + SystemEnv, VmExecutionMode, VmExecutionResultAndLogs, }; pub trait VmInterface { @@ -44,9 +44,6 @@ pub trait VmInterface { with_compression: bool, ) -> (BytecodeCompressionResult<'_>, VmExecutionResultAndLogs); - /// Record VM memory metrics. - fn record_vm_memory_metrics(&self) -> VmMemoryMetrics; - /// Execute batch till the end and return the result, with final execution state /// and bootloader memory. fn finish_batch(&mut self) -> FinishedL1Batch; diff --git a/core/node/api_server/src/execution_sandbox/execute.rs b/core/node/api_server/src/execution_sandbox/execute.rs index d974f2e9aa1d..14ac37e59368 100644 --- a/core/node/api_server/src/execution_sandbox/execute.rs +++ b/core/node/api_server/src/execution_sandbox/execute.rs @@ -175,7 +175,7 @@ impl SandboxExecutor { let initialization_stage = SANDBOX_METRICS.sandbox[&SandboxStage::Initialization].start(); let resolve_started_at = Instant::now(); let resolve_time = resolve_started_at.elapsed(); - let resolved_block_info = block_args.inner.resolve(&mut connection).await?; + let resolved_block_info = &block_args.resolved; // We don't want to emit too many logs. if resolve_time > Duration::from_millis(10) { tracing::debug!("Resolved block numbers (took {resolve_time:?})"); @@ -185,7 +185,7 @@ impl SandboxExecutor { SandboxAction::Execution { fee_input, tx } => { self.options .eth_call - .to_execute_env(&mut connection, &resolved_block_info, *fee_input, tx) + .to_execute_env(&mut connection, resolved_block_info, *fee_input, tx) .await? } &SandboxAction::Call { @@ -197,7 +197,7 @@ impl SandboxExecutor { .eth_call .to_call_env( &mut connection, - &resolved_block_info, + resolved_block_info, fee_input, enforced_base_fee, ) @@ -210,7 +210,7 @@ impl SandboxExecutor { } => { self.options .estimate_gas - .to_env(&mut connection, &resolved_block_info, fee_input, base_fee) + .to_env(&mut connection, resolved_block_info, fee_input, base_fee) .await? } }; diff --git a/core/node/api_server/src/execution_sandbox/mod.rs b/core/node/api_server/src/execution_sandbox/mod.rs index 36f10b8e9b08..b560d161ab52 100644 --- a/core/node/api_server/src/execution_sandbox/mod.rs +++ b/core/node/api_server/src/execution_sandbox/mod.rs @@ -10,7 +10,7 @@ use zksync_multivm::utils::get_eth_call_gas_limit; use zksync_types::{ api, fee_model::BatchFeeInput, L1BatchNumber, L2BlockNumber, ProtocolVersionId, U256, }; -use zksync_vm_executor::oneshot::BlockInfo; +use zksync_vm_executor::oneshot::{BlockInfo, ResolvedBlockInfo}; use self::vm_metrics::SandboxStage; pub(super) use self::{ @@ -285,21 +285,32 @@ pub enum BlockArgsError { } /// Information about a block provided to VM. -#[derive(Debug, Clone, Copy)] +#[derive(Debug, Clone)] pub(crate) struct BlockArgs { inner: BlockInfo, + resolved: ResolvedBlockInfo, block_id: api::BlockId, } impl BlockArgs { pub async fn pending(connection: &mut Connection<'_, Core>) -> anyhow::Result { let inner = BlockInfo::pending(connection).await?; + let resolved = inner.resolve(connection).await?; Ok(Self { inner, + resolved, block_id: api::BlockId::Number(api::BlockNumber::Pending), }) } + pub fn protocol_version(&self) -> ProtocolVersionId { + self.resolved.protocol_version() + } + + pub fn use_evm_emulator(&self) -> bool { + self.resolved.use_evm_emulator() + } + /// Loads block information from DB. pub async fn new( connection: &mut Connection<'_, Core>, @@ -326,8 +337,10 @@ impl BlockArgs { return Err(BlockArgsError::Missing); }; + let inner = BlockInfo::for_existing_block(connection, block_number).await?; Ok(Self { - inner: BlockInfo::for_existing_block(connection, block_number).await?, + inner, + resolved: inner.resolve(connection).await?, block_id, }) } diff --git a/core/node/api_server/src/execution_sandbox/tests.rs b/core/node/api_server/src/execution_sandbox/tests.rs index c4c1ee314dbb..75788d480581 100644 --- a/core/node/api_server/src/execution_sandbox/tests.rs +++ b/core/node/api_server/src/execution_sandbox/tests.rs @@ -7,10 +7,11 @@ use test_casing::test_casing; use zksync_dal::ConnectionPool; use zksync_multivm::{interface::ExecutionResult, utils::derive_base_fee_and_gas_per_pubdata}; use zksync_node_genesis::{insert_genesis_batch, GenesisParams}; -use zksync_node_test_utils::{create_l2_block, prepare_recovery_snapshot}; +use zksync_node_test_utils::{create_l1_batch, create_l2_block, prepare_recovery_snapshot}; use zksync_state::PostgresStorageCaches; use zksync_types::{ api::state_override::{OverrideAccount, StateOverride}, + fee::Fee, fee_model::BatchFeeInput, K256PrivateKey, ProtocolVersionId, Transaction, U256, }; @@ -92,17 +93,6 @@ async fn creating_block_args_after_snapshot_recovery() { let snapshot_recovery = prepare_recovery_snapshot(&mut storage, L1BatchNumber(23), L2BlockNumber(42), &[]).await; - let pending_block_args = BlockArgs::pending(&mut storage).await.unwrap(); - assert_eq!( - pending_block_args.block_id, - api::BlockId::Number(api::BlockNumber::Pending) - ); - assert_eq!( - pending_block_args.resolved_block_number(), - snapshot_recovery.l2_block_number + 1 - ); - assert!(pending_block_args.is_pending()); - let start_info = BlockStartInfo::new(&mut storage, Duration::MAX) .await .unwrap(); @@ -121,6 +111,35 @@ async fn creating_block_args_after_snapshot_recovery() { .unwrap_err(); assert_matches!(err, BlockArgsError::Missing); + // Ensure there is a batch in the storage. + let l2_block = create_l2_block(snapshot_recovery.l2_block_number.0 + 1); + storage + .blocks_dal() + .insert_l2_block(&l2_block) + .await + .unwrap(); + storage + .blocks_dal() + .insert_mock_l1_batch(&create_l1_batch(snapshot_recovery.l1_batch_number.0 + 1)) + .await + .unwrap(); + storage + .blocks_dal() + .mark_l2_blocks_as_executed_in_l1_batch(snapshot_recovery.l1_batch_number + 1) + .await + .unwrap(); + + let pending_block_args = BlockArgs::pending(&mut storage).await.unwrap(); + assert_eq!( + pending_block_args.block_id, + api::BlockId::Number(api::BlockNumber::Pending) + ); + assert_eq!( + pending_block_args.resolved_block_number(), + snapshot_recovery.l2_block_number + 2 + ); + assert!(pending_block_args.is_pending()); + let pruned_blocks = [ api::BlockNumber::Earliest, 0.into(), @@ -146,13 +165,6 @@ async fn creating_block_args_after_snapshot_recovery() { assert_matches!(err, BlockArgsError::Missing); } - let l2_block = create_l2_block(snapshot_recovery.l2_block_number.0 + 1); - storage - .blocks_dal() - .insert_l2_block(&l2_block) - .await - .unwrap(); - let latest_block_args = BlockArgs::new(&mut storage, latest_block, &start_info) .await .unwrap(); @@ -210,11 +222,16 @@ async fn test_instantiating_vm(connection: Connection<'static, Core>, block_args let fee_input = BatchFeeInput::l1_pegged(55, 555); let (base_fee, gas_per_pubdata) = derive_base_fee_and_gas_per_pubdata(fee_input, ProtocolVersionId::latest().into()); - let tx = Transaction::from(K256PrivateKey::random().create_transfer( + let tx = K256PrivateKey::random().create_transfer_with_fee( 0.into(), - base_fee, - gas_per_pubdata, - )); + Fee { + gas_limit: 200_000.into(), + max_fee_per_gas: base_fee.into(), + max_priority_fee_per_gas: 0.into(), + gas_per_pubdata_limit: gas_per_pubdata.into(), + }, + ); + let tx = Transaction::from(tx); let (limiter, _) = VmConcurrencyLimiter::new(1); let vm_permit = limiter.acquire().await.unwrap(); @@ -253,7 +270,15 @@ async fn validating_transaction(set_balance: bool) { let fee_input = BatchFeeInput::l1_pegged(55, 555); let (base_fee, gas_per_pubdata) = derive_base_fee_and_gas_per_pubdata(fee_input, ProtocolVersionId::latest().into()); - let tx = K256PrivateKey::random().create_transfer(0.into(), base_fee, gas_per_pubdata); + let tx = K256PrivateKey::random().create_transfer_with_fee( + 0.into(), + Fee { + gas_limit: 200_000.into(), + max_fee_per_gas: base_fee.into(), + max_priority_fee_per_gas: 0.into(), + gas_per_pubdata_limit: gas_per_pubdata.into(), + }, + ); let (limiter, _) = VmConcurrencyLimiter::new(1); let vm_permit = limiter.acquire().await.unwrap(); diff --git a/core/node/api_server/src/testonly.rs b/core/node/api_server/src/testonly.rs index 5ee9cfb8ef12..8dc7915385a1 100644 --- a/core/node/api_server/src/testonly.rs +++ b/core/node/api_server/src/testonly.rs @@ -1,53 +1,44 @@ //! Test utils shared among multiple modules. -use std::iter; +use std::{collections::HashMap, iter}; use zk_evm_1_5_0::zkevm_opcode_defs::decoding::{EncodingModeProduction, VmEncodingMode}; use zksync_contracts::{ - get_loadnext_contract, load_contract, read_bytecode, + eth_contract, get_loadnext_contract, load_contract, read_bytecode, test_contracts::LoadnextContractExecutionParams, }; +use zksync_dal::{Connection, Core, CoreDal}; +use zksync_multivm::utils::derive_base_fee_and_gas_per_pubdata; +use zksync_node_fee_model::BatchFeeModelInputProvider; +use zksync_system_constants::L2_BASE_TOKEN_ADDRESS; use zksync_types::{ - ethabi::Token, fee::Fee, l2::L2Tx, transaction_request::PaymasterParams, Address, - K256PrivateKey, L2ChainId, Nonce, H256, U256, + api::state_override::{Bytecode, OverrideAccount, OverrideState, StateOverride}, + ethabi, + ethabi::Token, + fee::Fee, + fee_model::FeeParams, + get_code_key, get_known_code_key, + l2::L2Tx, + transaction_request::{CallRequest, PaymasterParams}, + utils::storage_key_for_eth_balance, + AccountTreeId, Address, K256PrivateKey, L2BlockNumber, L2ChainId, Nonce, ProtocolVersionId, + StorageKey, StorageLog, H256, U256, }; - -pub(crate) const LOAD_TEST_ADDRESS: Address = Address::repeat_byte(1); +use zksync_utils::{address_to_u256, u256_to_h256}; const EXPENSIVE_CONTRACT_PATH: &str = "etc/contracts-test-data/artifacts-zk/contracts/expensive/expensive.sol/Expensive.json"; -pub(crate) const EXPENSIVE_CONTRACT_ADDRESS: Address = Address::repeat_byte(2); - const PRECOMPILES_CONTRACT_PATH: &str = "etc/contracts-test-data/artifacts-zk/contracts/precompiles/precompiles.sol/Precompiles.json"; -pub(crate) const PRECOMPILES_CONTRACT_ADDRESS: Address = Address::repeat_byte(3); - const COUNTER_CONTRACT_PATH: &str = "etc/contracts-test-data/artifacts-zk/contracts/counter/counter.sol/Counter.json"; -pub(crate) const COUNTER_CONTRACT_ADDRESS: Address = Address::repeat_byte(4); - const INFINITE_LOOP_CONTRACT_PATH: &str = "etc/contracts-test-data/artifacts-zk/contracts/infinite/infinite.sol/InfiniteLoop.json"; -pub(crate) const INFINITE_LOOP_CONTRACT_ADDRESS: Address = Address::repeat_byte(5); - -pub(crate) fn read_expensive_contract_bytecode() -> Vec { - read_bytecode(EXPENSIVE_CONTRACT_PATH) -} - -pub(crate) fn read_precompiles_contract_bytecode() -> Vec { - read_bytecode(PRECOMPILES_CONTRACT_PATH) -} - -pub(crate) fn read_counter_contract_bytecode() -> Vec { - read_bytecode(COUNTER_CONTRACT_PATH) -} - -pub(crate) fn read_infinite_loop_contract_bytecode() -> Vec { - read_bytecode(INFINITE_LOOP_CONTRACT_PATH) -} +const MULTICALL3_CONTRACT_PATH: &str = + "contracts/l2-contracts/artifacts-zk/contracts/dev-contracts/Multicall3.sol/Multicall3.json"; /// Inflates the provided bytecode by appending the specified amount of NOP instructions at the end. -pub(crate) fn inflate_bytecode(bytecode: &mut Vec, nop_count: usize) { +fn inflate_bytecode(bytecode: &mut Vec, nop_count: usize) { bytecode.extend( iter::repeat(EncodingModeProduction::nop_encoding().to_be_bytes()) .take(nop_count) @@ -56,25 +47,270 @@ pub(crate) fn inflate_bytecode(bytecode: &mut Vec, nop_count: usize) { } fn default_fee() -> Fee { + let fee_input = ::default_batch_fee_input_scaled( + FeeParams::sensible_v1_default(), + 1.0, + 1.0, + ); + let (max_fee_per_gas, gas_per_pubdata_limit) = + derive_base_fee_and_gas_per_pubdata(fee_input, ProtocolVersionId::default().into()); Fee { - gas_limit: 200_000.into(), - max_fee_per_gas: 55.into(), + gas_limit: 10_000_000.into(), + max_fee_per_gas: max_fee_per_gas.into(), max_priority_fee_per_gas: 0_u64.into(), - gas_per_pubdata_limit: 555.into(), + gas_per_pubdata_limit: gas_per_pubdata_limit.into(), } } +#[derive(Debug, Default)] +pub(crate) struct StateBuilder { + inner: HashMap, +} + +impl StateBuilder { + pub(crate) const LOAD_TEST_ADDRESS: Address = Address::repeat_byte(1); + pub(crate) const EXPENSIVE_CONTRACT_ADDRESS: Address = Address::repeat_byte(2); + pub(crate) const PRECOMPILES_CONTRACT_ADDRESS: Address = Address::repeat_byte(3); + const COUNTER_CONTRACT_ADDRESS: Address = Address::repeat_byte(4); + const INFINITE_LOOP_CONTRACT_ADDRESS: Address = Address::repeat_byte(5); + const MULTICALL3_ADDRESS: Address = Address::repeat_byte(6); + + pub fn with_contract(mut self, address: Address, bytecode: Vec) -> Self { + self.inner.insert( + address, + OverrideAccount { + code: Some(Bytecode::new(bytecode).unwrap()), + ..OverrideAccount::default() + }, + ); + self + } + + pub fn inflate_bytecode(mut self, address: Address, nop_count: usize) -> Self { + let account_override = self.inner.get_mut(&address).expect("no contract"); + let bytecode = account_override.code.take().expect("no code override"); + let mut bytecode = bytecode.into_bytes(); + inflate_bytecode(&mut bytecode, nop_count); + account_override.code = Some(Bytecode::new(bytecode).unwrap()); + self + } + + pub fn with_load_test_contract(mut self) -> Self { + // Set the array length in the load test contract to 100, so that reads don't fail. + let state = HashMap::from([(H256::zero(), H256::from_low_u64_be(100))]); + self.inner.insert( + Self::LOAD_TEST_ADDRESS, + OverrideAccount { + code: Some(Bytecode::new(get_loadnext_contract().bytecode).unwrap()), + state: Some(OverrideState::State(state)), + ..OverrideAccount::default() + }, + ); + self + } + + pub fn with_balance(mut self, address: Address, balance: U256) -> Self { + self.inner.entry(address).or_default().balance = Some(balance); + self + } + + pub fn with_expensive_contract(self) -> Self { + self.with_contract( + Self::EXPENSIVE_CONTRACT_ADDRESS, + read_bytecode(EXPENSIVE_CONTRACT_PATH), + ) + } + + pub fn with_precompiles_contract(self) -> Self { + self.with_contract( + Self::PRECOMPILES_CONTRACT_ADDRESS, + read_bytecode(PRECOMPILES_CONTRACT_PATH), + ) + } + + pub fn with_counter_contract(self, initial_value: u64) -> Self { + let mut this = self.with_contract( + Self::COUNTER_CONTRACT_ADDRESS, + read_bytecode(COUNTER_CONTRACT_PATH), + ); + if initial_value != 0 { + let state = HashMap::from([(H256::zero(), H256::from_low_u64_be(initial_value))]); + this.inner + .get_mut(&Self::COUNTER_CONTRACT_ADDRESS) + .unwrap() + .state = Some(OverrideState::State(state)); + } + this + } + + pub fn with_infinite_loop_contract(self) -> Self { + self.with_contract( + Self::INFINITE_LOOP_CONTRACT_ADDRESS, + read_bytecode(INFINITE_LOOP_CONTRACT_PATH), + ) + } + + pub fn with_multicall3_contract(self) -> Self { + self.with_contract( + Self::MULTICALL3_ADDRESS, + read_bytecode(MULTICALL3_CONTRACT_PATH), + ) + } + + pub fn build(self) -> StateOverride { + StateOverride::new(self.inner) + } + + /// Applies these state overrides to Postgres storage, which is assumed to be empty (other than genesis data). + pub async fn apply(self, connection: &mut Connection<'_, Core>) { + let mut storage_logs = vec![]; + let mut factory_deps = HashMap::new(); + for (address, account) in self.inner { + if let Some(balance) = account.balance { + let balance_key = storage_key_for_eth_balance(&address); + storage_logs.push(StorageLog::new_write_log( + balance_key, + u256_to_h256(balance), + )); + } + if let Some(code) = account.code { + let code_hash = code.hash(); + storage_logs.extend([ + StorageLog::new_write_log(get_code_key(&address), code_hash), + StorageLog::new_write_log( + get_known_code_key(&code_hash), + H256::from_low_u64_be(1), + ), + ]); + factory_deps.insert(code_hash, code.into_bytes()); + } + if let Some(state) = account.state { + let state_slots = match state { + OverrideState::State(slots) | OverrideState::StateDiff(slots) => slots, + }; + let state_logs = state_slots.into_iter().map(|(key, value)| { + let key = StorageKey::new(AccountTreeId::new(address), key); + StorageLog::new_write_log(key, value) + }); + storage_logs.extend(state_logs); + } + } + + connection + .storage_logs_dal() + .append_storage_logs(L2BlockNumber(0), &storage_logs) + .await + .unwrap(); + connection + .factory_deps_dal() + .insert_factory_deps(L2BlockNumber(0), &factory_deps) + .await + .unwrap(); + } +} + +#[derive(Debug)] +pub(crate) struct Call3Value { + target: Address, + allow_failure: bool, + value: U256, + calldata: Vec, +} + +impl Call3Value { + pub fn allow_failure(mut self) -> Self { + self.allow_failure = true; + self + } + + fn to_token(&self) -> Token { + Token::Tuple(vec![ + Token::Address(self.target), + Token::Bool(self.allow_failure), + Token::Uint(self.value), + Token::Bytes(self.calldata.clone()), + ]) + } +} + +impl From for Call3Value { + fn from(req: CallRequest) -> Self { + Self { + target: req.to.unwrap(), + allow_failure: false, + value: req.value.unwrap_or_default(), + calldata: req.data.unwrap_or_default().0, + } + } +} + +impl From for Call3Value { + fn from(tx: L2Tx) -> Self { + Self { + target: tx.recipient_account().unwrap(), + allow_failure: false, + value: tx.execute.value, + calldata: tx.execute.calldata, + } + } +} + +#[derive(Debug)] +pub(crate) struct Call3Result { + pub success: bool, + pub return_data: Vec, +} + +impl Call3Result { + pub fn parse(raw: &[u8]) -> Vec { + let mut tokens = load_contract(MULTICALL3_CONTRACT_PATH) + .function("aggregate3Value") + .expect("no `aggregate3Value` function") + .decode_output(raw) + .expect("failed decoding `aggregate3Value` output"); + assert_eq!(tokens.len(), 1, "Invalid output length"); + let Token::Array(results) = tokens.pop().unwrap() else { + panic!("Invalid token type, expected an array"); + }; + results.into_iter().map(Self::parse_single).collect() + } + + fn parse_single(token: Token) -> Self { + let Token::Tuple(mut tokens) = token else { + panic!("Invalid token type, expected a tuple"); + }; + assert_eq!(tokens.len(), 2); + let return_data = tokens.pop().unwrap().into_bytes().expect("expected bytes"); + let success = tokens.pop().unwrap().into_bool().expect("expected bool"); + Self { + success, + return_data, + } + } + + pub fn as_u256(&self) -> U256 { + decode_u256_output(&self.return_data) + } +} + +pub(crate) fn decode_u256_output(raw_output: &[u8]) -> U256 { + let mut tokens = ethabi::decode_whole(&[ethabi::ParamType::Uint(256)], raw_output) + .expect("unexpected return data"); + assert_eq!(tokens.len(), 1); + tokens.pop().unwrap().into_uint().unwrap() +} + pub(crate) trait TestAccount { - fn create_transfer(&self, value: U256, fee_per_gas: u64, gas_per_pubdata: u64) -> L2Tx { + fn create_transfer(&self, value: U256) -> L2Tx { let fee = Fee { gas_limit: 200_000.into(), - max_fee_per_gas: fee_per_gas.into(), - max_priority_fee_per_gas: 0_u64.into(), - gas_per_pubdata_limit: gas_per_pubdata.into(), + ..default_fee() }; self.create_transfer_with_fee(value, fee) } + fn query_base_token_balance(&self) -> CallRequest; + fn create_transfer_with_fee(&self, value: U256, fee: Fee) -> L2Tx; fn create_load_test_tx(&self, params: LoadnextContractExecutionParams) -> L2Tx; @@ -85,9 +321,13 @@ pub(crate) trait TestAccount { fn create_code_oracle_tx(&self, bytecode_hash: H256, expected_keccak_hash: H256) -> L2Tx; - fn create_reverting_counter_tx(&self) -> L2Tx; + fn create_counter_tx(&self, increment: U256, revert: bool) -> L2Tx; + + fn query_counter_value(&self) -> CallRequest; fn create_infinite_loop_tx(&self) -> L2Tx; + + fn multicall_with_value(&self, value: U256, calls: &[Call3Value]) -> CallRequest; } impl TestAccount for K256PrivateKey { @@ -106,9 +346,23 @@ impl TestAccount for K256PrivateKey { .unwrap() } + fn query_base_token_balance(&self) -> CallRequest { + let data = eth_contract() + .function("balanceOf") + .expect("No `balanceOf` function in contract") + .encode_input(&[Token::Uint(address_to_u256(&self.address()))]) + .expect("failed encoding `balanceOf` function"); + CallRequest { + from: Some(self.address()), + to: Some(L2_BASE_TOKEN_ADDRESS), + data: Some(data.into()), + ..CallRequest::default() + } + } + fn create_load_test_tx(&self, params: LoadnextContractExecutionParams) -> L2Tx { L2Tx::new_signed( - Some(LOAD_TEST_ADDRESS), + Some(StateBuilder::LOAD_TEST_ADDRESS), params.to_bytes(), Nonce(0), default_fee(), @@ -132,7 +386,7 @@ impl TestAccount for K256PrivateKey { .encode_input(&[Token::Uint(write_count.into())]) .expect("failed encoding `expensive` function"); L2Tx::new_signed( - Some(EXPENSIVE_CONTRACT_ADDRESS), + Some(StateBuilder::EXPENSIVE_CONTRACT_ADDRESS), calldata, Nonce(0), default_fee(), @@ -152,7 +406,7 @@ impl TestAccount for K256PrivateKey { .encode_input(&[]) .expect("failed encoding `cleanUp` input"); L2Tx::new_signed( - Some(EXPENSIVE_CONTRACT_ADDRESS), + Some(StateBuilder::EXPENSIVE_CONTRACT_ADDRESS), calldata, Nonce(0), default_fee(), @@ -175,7 +429,7 @@ impl TestAccount for K256PrivateKey { ]) .expect("failed encoding `callCodeOracle` input"); L2Tx::new_signed( - Some(PRECOMPILES_CONTRACT_ADDRESS), + Some(StateBuilder::PRECOMPILES_CONTRACT_ADDRESS), calldata, Nonce(0), default_fee(), @@ -188,14 +442,14 @@ impl TestAccount for K256PrivateKey { .unwrap() } - fn create_reverting_counter_tx(&self) -> L2Tx { + fn create_counter_tx(&self, increment: U256, revert: bool) -> L2Tx { let calldata = load_contract(COUNTER_CONTRACT_PATH) .function("incrementWithRevert") .expect("no `incrementWithRevert` function") - .encode_input(&[Token::Uint(1.into()), Token::Bool(true)]) + .encode_input(&[Token::Uint(increment), Token::Bool(revert)]) .expect("failed encoding `incrementWithRevert` input"); L2Tx::new_signed( - Some(COUNTER_CONTRACT_ADDRESS), + Some(StateBuilder::COUNTER_CONTRACT_ADDRESS), calldata, Nonce(0), default_fee(), @@ -208,6 +462,20 @@ impl TestAccount for K256PrivateKey { .unwrap() } + fn query_counter_value(&self) -> CallRequest { + let calldata = load_contract(COUNTER_CONTRACT_PATH) + .function("get") + .expect("no `get` function") + .encode_input(&[]) + .expect("failed encoding `get` input"); + CallRequest { + from: Some(self.address()), + to: Some(StateBuilder::COUNTER_CONTRACT_ADDRESS), + data: Some(calldata.into()), + ..CallRequest::default() + } + } + fn create_infinite_loop_tx(&self) -> L2Tx { let calldata = load_contract(INFINITE_LOOP_CONTRACT_PATH) .function("infiniteLoop") @@ -215,7 +483,7 @@ impl TestAccount for K256PrivateKey { .encode_input(&[]) .expect("failed encoding `infiniteLoop` input"); L2Tx::new_signed( - Some(INFINITE_LOOP_CONTRACT_ADDRESS), + Some(StateBuilder::INFINITE_LOOP_CONTRACT_ADDRESS), calldata, Nonce(0), default_fee(), @@ -227,4 +495,20 @@ impl TestAccount for K256PrivateKey { ) .unwrap() } + + fn multicall_with_value(&self, value: U256, calls: &[Call3Value]) -> CallRequest { + let call_tokens = calls.iter().map(Call3Value::to_token).collect(); + let calldata = load_contract(MULTICALL3_CONTRACT_PATH) + .function("aggregate3Value") + .expect("no `aggregate3Value` function") + .encode_input(&[Token::Array(call_tokens)]) + .expect("failed encoding `aggregate3Value` input"); + CallRequest { + from: Some(self.address()), + to: Some(StateBuilder::MULTICALL3_ADDRESS), + value: Some(value), + data: Some(calldata.into()), + ..CallRequest::default() + } + } } diff --git a/core/node/api_server/src/tx_sender/gas_estimation.rs b/core/node/api_server/src/tx_sender/gas_estimation.rs index f5e42875a3d6..44e568ce4183 100644 --- a/core/node/api_server/src/tx_sender/gas_estimation.rs +++ b/core/node/api_server/src/tx_sender/gas_estimation.rs @@ -44,13 +44,14 @@ impl TxSender { pub async fn get_txs_fee_in_wei( &self, tx: Transaction, + block_args: BlockArgs, estimated_fee_scale_factor: f64, acceptable_overestimation: u64, state_override: Option, kind: BinarySearchKind, ) -> Result { let estimation_started_at = Instant::now(); - let mut estimator = GasEstimator::new(self, tx, state_override).await?; + let mut estimator = GasEstimator::new(self, tx, block_args, state_override).await?; estimator.adjust_transaction_fee(); let initial_estimate = estimator.initialize().await?; @@ -309,16 +310,10 @@ impl<'a> GasEstimator<'a> { pub(super) async fn new( sender: &'a TxSender, mut transaction: Transaction, + block_args: BlockArgs, state_override: Option, ) -> Result { - let mut connection = sender.acquire_replica_connection().await?; - let block_args = BlockArgs::pending(&mut connection).await?; - let protocol_version = connection - .blocks_dal() - .pending_protocol_version() - .await - .context("failed getting pending protocol version")?; - drop(connection); + let protocol_version = block_args.protocol_version(); let max_gas_limit = get_max_batch_gas_limit(protocol_version.into()); let fee_input = adjust_pubdata_price_for_tx( diff --git a/core/node/api_server/src/tx_sender/mod.rs b/core/node/api_server/src/tx_sender/mod.rs index ad8e38ef3cc2..2dbc0d5a0dd6 100644 --- a/core/node/api_server/src/tx_sender/mod.rs +++ b/core/node/api_server/src/tx_sender/mod.rs @@ -280,13 +280,11 @@ impl TxSender { pub async fn submit_tx( &self, tx: L2Tx, + block_args: BlockArgs, ) -> Result<(L2TxSubmissionResult, VmExecutionResultAndLogs), SubmitTxError> { let tx_hash = tx.hash(); let stage_latency = SANDBOX_METRICS.start_tx_submit_stage(tx_hash, SubmitTxStage::Validate); - let mut connection = self.acquire_replica_connection().await?; - let protocol_version = connection.blocks_dal().pending_protocol_version().await?; - drop(connection); - self.validate_tx(&tx, protocol_version).await?; + self.validate_tx(&tx, block_args.protocol_version()).await?; stage_latency.observe(); let stage_latency = SANDBOX_METRICS.start_tx_submit_stage(tx_hash, SubmitTxStage::DryRun); @@ -305,9 +303,7 @@ impl TxSender { tx: tx.clone(), }; let vm_permit = vm_permit.ok_or(SubmitTxError::ServerShuttingDown)?; - let mut connection = self.acquire_replica_connection().await?; - let block_args = BlockArgs::pending(&mut connection).await?; - + let connection = self.acquire_replica_connection().await?; let execution_output = self .0 .executor diff --git a/core/node/api_server/src/tx_sender/tests.rs b/core/node/api_server/src/tx_sender/tests.rs deleted file mode 100644 index 36c95fa5db0b..000000000000 --- a/core/node/api_server/src/tx_sender/tests.rs +++ /dev/null @@ -1,805 +0,0 @@ -//! Tests for the transaction sender. - -use std::{collections::HashMap, time::Duration}; - -use assert_matches::assert_matches; -use test_casing::{test_casing, Product, TestCases}; -use zksync_contracts::{get_loadnext_contract, test_contracts::LoadnextContractExecutionParams}; -use zksync_multivm::interface::ExecutionResult; -use zksync_node_fee_model::MockBatchFeeParamsProvider; -use zksync_node_genesis::{insert_genesis_batch, GenesisParams}; -use zksync_node_test_utils::{create_l2_block, create_l2_transaction, prepare_recovery_snapshot}; -use zksync_system_constants::CODE_ORACLE_ADDRESS; -use zksync_types::{ - api, - api::state_override::{Bytecode, OverrideAccount, OverrideState}, - get_nonce_key, - web3::keccak256, - K256PrivateKey, L1BatchNumber, L2BlockNumber, StorageLog, -}; -use zksync_utils::{bytecode::hash_bytecode, u256_to_h256}; -use zksync_vm_executor::oneshot::MockOneshotExecutor; - -use super::{gas_estimation::GasEstimator, *}; -use crate::{ - execution_sandbox::BlockStartInfo, - testonly::{ - inflate_bytecode, read_counter_contract_bytecode, read_expensive_contract_bytecode, - read_infinite_loop_contract_bytecode, read_precompiles_contract_bytecode, TestAccount, - COUNTER_CONTRACT_ADDRESS, EXPENSIVE_CONTRACT_ADDRESS, INFINITE_LOOP_CONTRACT_ADDRESS, - LOAD_TEST_ADDRESS, PRECOMPILES_CONTRACT_ADDRESS, - }, - web3::testonly::create_test_tx_sender, -}; - -/// Initial pivot multiplier empirically sufficient for most tx types. -const DEFAULT_MULTIPLIER: f64 = 64.0 / 63.0; - -#[tokio::test] -async fn getting_nonce_for_account() { - let l2_chain_id = L2ChainId::default(); - let test_address = Address::repeat_byte(1); - let pool = ConnectionPool::::test_pool().await; - let mut storage = pool.connection().await.unwrap(); - insert_genesis_batch(&mut storage, &GenesisParams::mock()) - .await - .unwrap(); - // Manually insert a nonce for the address. - let nonce_key = get_nonce_key(&test_address); - let nonce_log = StorageLog::new_write_log(nonce_key, H256::from_low_u64_be(123)); - storage - .storage_logs_dal() - .append_storage_logs(L2BlockNumber(0), &[nonce_log]) - .await - .unwrap(); - - let tx_executor = MockOneshotExecutor::default(); - let tx_executor = SandboxExecutor::mock(tx_executor).await; - let (tx_sender, _) = create_test_tx_sender(pool.clone(), l2_chain_id, tx_executor).await; - - let nonce = tx_sender.get_expected_nonce(test_address).await.unwrap(); - assert_eq!(nonce, Nonce(123)); - - // Insert another L2 block with a new nonce log. - storage - .blocks_dal() - .insert_l2_block(&create_l2_block(1)) - .await - .unwrap(); - let nonce_log = StorageLog { - value: H256::from_low_u64_be(321), - ..nonce_log - }; - storage - .storage_logs_dal() - .insert_storage_logs(L2BlockNumber(1), &[nonce_log]) - .await - .unwrap(); - - let nonce = tx_sender.get_expected_nonce(test_address).await.unwrap(); - assert_eq!(nonce, Nonce(321)); - let missing_address = Address::repeat_byte(0xff); - let nonce = tx_sender.get_expected_nonce(missing_address).await.unwrap(); - assert_eq!(nonce, Nonce(0)); -} - -#[tokio::test] -async fn getting_nonce_for_account_after_snapshot_recovery() { - const SNAPSHOT_L2_BLOCK_NUMBER: L2BlockNumber = L2BlockNumber(42); - - let pool = ConnectionPool::::test_pool().await; - let mut storage = pool.connection().await.unwrap(); - let test_address = Address::repeat_byte(1); - let other_address = Address::repeat_byte(2); - let nonce_logs = [ - StorageLog::new_write_log(get_nonce_key(&test_address), H256::from_low_u64_be(123)), - StorageLog::new_write_log(get_nonce_key(&other_address), H256::from_low_u64_be(25)), - ]; - prepare_recovery_snapshot( - &mut storage, - L1BatchNumber(23), - SNAPSHOT_L2_BLOCK_NUMBER, - &nonce_logs, - ) - .await; - - let l2_chain_id = L2ChainId::default(); - let tx_executor = MockOneshotExecutor::default(); - let tx_executor = SandboxExecutor::mock(tx_executor).await; - let (tx_sender, _) = create_test_tx_sender(pool.clone(), l2_chain_id, tx_executor).await; - - storage - .blocks_dal() - .insert_l2_block(&create_l2_block(SNAPSHOT_L2_BLOCK_NUMBER.0 + 1)) - .await - .unwrap(); - let new_nonce_logs = vec![StorageLog::new_write_log( - get_nonce_key(&test_address), - H256::from_low_u64_be(321), - )]; - storage - .storage_logs_dal() - .insert_storage_logs(SNAPSHOT_L2_BLOCK_NUMBER + 1, &new_nonce_logs) - .await - .unwrap(); - - let nonce = tx_sender.get_expected_nonce(test_address).await.unwrap(); - assert_eq!(nonce, Nonce(321)); - let nonce = tx_sender.get_expected_nonce(other_address).await.unwrap(); - assert_eq!(nonce, Nonce(25)); - let missing_address = Address::repeat_byte(0xff); - let nonce = tx_sender.get_expected_nonce(missing_address).await.unwrap(); - assert_eq!(nonce, Nonce(0)); -} - -#[tokio::test] -async fn submitting_tx_requires_one_connection() { - let pool = ConnectionPool::::constrained_test_pool(1).await; - let mut storage = pool.connection().await.unwrap(); - insert_genesis_batch(&mut storage, &GenesisParams::mock()) - .await - .unwrap(); - - let l2_chain_id = L2ChainId::default(); - let fee_input = MockBatchFeeParamsProvider::default() - .get_batch_fee_input_scaled(1.0, 1.0) - .await - .unwrap(); - let (base_fee, gas_per_pubdata) = - derive_base_fee_and_gas_per_pubdata(fee_input, ProtocolVersionId::latest().into()); - let tx = create_l2_transaction(base_fee, gas_per_pubdata); - let tx_hash = tx.hash(); - - // Manually set sufficient balance for the tx initiator. - let balance_key = storage_key_for_eth_balance(&tx.initiator_account()); - let storage_log = StorageLog::new_write_log(balance_key, u256_to_h256(U256::one() << 64)); - storage - .storage_logs_dal() - .append_storage_logs(L2BlockNumber(0), &[storage_log]) - .await - .unwrap(); - drop(storage); - - let mut tx_executor = MockOneshotExecutor::default(); - tx_executor.set_tx_responses(move |received_tx, _| { - assert_eq!(received_tx.hash(), tx_hash); - ExecutionResult::Success { output: vec![] } - }); - let tx_executor = SandboxExecutor::mock(tx_executor).await; - let (tx_sender, _) = create_test_tx_sender(pool.clone(), l2_chain_id, tx_executor).await; - - let submission_result = tx_sender.submit_tx(tx).await.unwrap(); - assert_matches!(submission_result.0, L2TxSubmissionResult::Added); - - let mut storage = pool.connection().await.unwrap(); - storage - .transactions_web3_dal() - .get_transaction_by_hash(tx_hash, l2_chain_id) - .await - .unwrap() - .expect("transaction is not persisted"); -} - -#[tokio::test] -async fn eth_call_requires_single_connection() { - let pool = ConnectionPool::::constrained_test_pool(1).await; - let mut storage = pool.connection().await.unwrap(); - let genesis_params = GenesisParams::mock(); - insert_genesis_batch(&mut storage, &genesis_params) - .await - .unwrap(); - let start_info = BlockStartInfo::new(&mut storage, Duration::MAX) - .await - .unwrap(); - let block_id = api::BlockId::Number(api::BlockNumber::Latest); - let block_args = BlockArgs::new(&mut storage, block_id, &start_info) - .await - .unwrap(); - drop(storage); - - let tx = create_l2_transaction(10, 100); - let tx_hash = tx.hash(); - - let mut tx_executor = MockOneshotExecutor::default(); - tx_executor.set_call_responses(move |received_tx, _| { - assert_eq!(received_tx.hash(), tx_hash); - ExecutionResult::Success { - output: b"success!".to_vec(), - } - }); - let tx_executor = SandboxExecutor::mock(tx_executor).await; - let (tx_sender, _) = create_test_tx_sender( - pool.clone(), - genesis_params.config().l2_chain_id, - tx_executor, - ) - .await; - let call_overrides = CallOverrides { - enforced_base_fee: None, - }; - let output = tx_sender - .eth_call(block_args, call_overrides, tx, None) - .await - .unwrap(); - assert_eq!(output, b"success!"); -} - -async fn create_real_tx_sender() -> TxSender { - let pool = ConnectionPool::::constrained_test_pool(1).await; - let mut storage = pool.connection().await.unwrap(); - let genesis_params = GenesisParams::mock(); - insert_genesis_batch(&mut storage, &genesis_params) - .await - .unwrap(); - drop(storage); - - let genesis_config = genesis_params.config(); - let executor_options = SandboxExecutorOptions::new( - genesis_config.l2_chain_id, - AccountTreeId::new(genesis_config.fee_account), - u32::MAX, - ) - .await - .unwrap(); - - let pg_caches = PostgresStorageCaches::new(1, 1); - let tx_executor = SandboxExecutor::real(executor_options, pg_caches, usize::MAX); - create_test_tx_sender(pool, genesis_params.config().l2_chain_id, tx_executor) - .await - .0 -} - -#[tokio::test] -async fn initial_gas_estimation_is_somewhat_accurate() { - let tx_sender = create_real_tx_sender().await; - - let alice = K256PrivateKey::random(); - let transfer_value = U256::from(1_000_000_000); - let account_overrides = OverrideAccount { - balance: Some(transfer_value * 2), - ..OverrideAccount::default() - }; - let state_override = StateOverride::new(HashMap::from([(alice.address(), account_overrides)])); - // fee params don't matter; we adjust via `adjust_transaction_fee()` - let tx = alice.create_transfer(transfer_value, 55, 555); - - let mut estimator = GasEstimator::new(&tx_sender, tx.into(), Some(state_override)) - .await - .unwrap(); - estimator.adjust_transaction_fee(); - let initial_estimate = estimator.initialize().await.unwrap(); - assert!(initial_estimate.gas_charged_for_pubdata > 0); - assert!(initial_estimate.operator_overhead > 0); - let total_gas_charged = initial_estimate.total_gas_charged.unwrap(); - assert!( - total_gas_charged - > initial_estimate.gas_charged_for_pubdata + initial_estimate.operator_overhead, - "{initial_estimate:?}" - ); - - // Check that a transaction fails if supplied with the lower bound. - let lower_bound = initial_estimate.lower_gas_bound_without_overhead().unwrap() - + initial_estimate.operator_overhead; - assert!(lower_bound < total_gas_charged, "{initial_estimate:?}"); - let (vm_result, _) = estimator.unadjusted_step(lower_bound).await.unwrap(); - assert!(vm_result.result.is_failed(), "{:?}", vm_result.result); - - // A slightly larger limit should work. - let initial_pivot = total_gas_charged * 64 / 63; - let (vm_result, _) = estimator.unadjusted_step(initial_pivot).await.unwrap(); - assert!(!vm_result.result.is_failed(), "{:?}", vm_result.result); -} - -const LOAD_TEST_CASES: TestCases = test_casing::cases! {[ - LoadnextContractExecutionParams::default(), - // No storage modification - LoadnextContractExecutionParams { - writes: 0, - events: 0, - ..LoadnextContractExecutionParams::default() - }, - // Moderately deep recursion (very deep recursion is tested separately) - LoadnextContractExecutionParams { - recursive_calls: 10, - ..LoadnextContractExecutionParams::default() - }, - // No deploys - LoadnextContractExecutionParams { - deploys: 0, - ..LoadnextContractExecutionParams::default() - }, - // Lots of deploys - LoadnextContractExecutionParams { - deploys: 10, - ..LoadnextContractExecutionParams::default() - }, -]}; - -#[test_casing(5, LOAD_TEST_CASES)] -#[tokio::test] -async fn initial_estimate_for_load_test_transaction(tx_params: LoadnextContractExecutionParams) { - let alice = K256PrivateKey::random(); - // Set the array length in the load test contract to 100, so that reads don't fail. - let load_test_state = HashMap::from([(H256::zero(), H256::from_low_u64_be(100))]); - let load_test_overrides = OverrideAccount { - code: Some(Bytecode::new(get_loadnext_contract().bytecode).unwrap()), - state: Some(OverrideState::State(load_test_state)), - ..OverrideAccount::default() - }; - let state_override = - StateOverride::new(HashMap::from([(LOAD_TEST_ADDRESS, load_test_overrides)])); - let tx = alice.create_load_test_tx(tx_params); - - test_initial_estimate(state_override, tx, DEFAULT_MULTIPLIER).await; -} - -#[test_casing(2, [false, true])] -#[tokio::test] -async fn initial_estimate_for_deep_recursion(with_reads: bool) { - let alice = K256PrivateKey::random(); - let load_test_state = HashMap::from([(H256::zero(), H256::from_low_u64_be(100))]); - let load_test_overrides = OverrideAccount { - code: Some(Bytecode::new(get_loadnext_contract().bytecode).unwrap()), - state: Some(OverrideState::State(load_test_state)), - ..OverrideAccount::default() - }; - let state_override = - StateOverride::new(HashMap::from([(LOAD_TEST_ADDRESS, load_test_overrides)])); - - // Reads are chosen because they represent the worst case. Reads don't influence the amount of pubdata; - // i.e., they don't make it easier to execute a transaction because of additional gas reserved for pubdata. - // OTOH, reads still increase the amount of computational gas used on each nested call. - // - // Initial pivot multipliers below are the smallest ones with 0.1 precision. `DEFAULT_MULTIPLIER` works for smaller - // recursion depths because the transaction emits enough pubdata to cover gas deductions due to the 63/64 rule. - let depths_and_multipliers: &[_] = if with_reads { - &[(25, DEFAULT_MULTIPLIER), (50, 1.2), (75, 1.4), (100, 1.7)] - } else { - &[ - (50, DEFAULT_MULTIPLIER), - (75, 1.2), - (100, 1.4), - (125, 1.7), - (150, 2.1), - ] - }; - for &(recursion_depth, multiplier) in depths_and_multipliers { - println!("Testing recursion depth {recursion_depth}"); - let tx = alice.create_load_test_tx(LoadnextContractExecutionParams { - recursive_calls: recursion_depth, - reads: if with_reads { 10 } else { 0 }, - ..LoadnextContractExecutionParams::empty() - }); - test_initial_estimate(state_override.clone(), tx, multiplier).await; - } -} - -#[tokio::test] -async fn initial_estimate_for_deep_recursion_with_large_bytecode() { - let alice = K256PrivateKey::random(); - let mut contract_bytecode = get_loadnext_contract().bytecode; - inflate_bytecode(&mut contract_bytecode, 50_000); - let load_test_overrides = OverrideAccount { - code: Some(Bytecode::new(contract_bytecode).unwrap()), - ..OverrideAccount::default() - }; - let state_override = - StateOverride::new(HashMap::from([(LOAD_TEST_ADDRESS, load_test_overrides)])); - let tx = alice.create_load_test_tx(LoadnextContractExecutionParams { - recursive_calls: 100, - ..LoadnextContractExecutionParams::empty() - }); - - test_initial_estimate(state_override, tx, 1.35).await; -} - -/// Tests the lower bound and initial pivot extracted from the initial estimate (one with effectively infinite gas amount). -/// Returns the VM result for a VM run with the initial pivot. -async fn test_initial_estimate( - state_override: StateOverride, - tx: L2Tx, - initial_pivot_multiplier: f64, -) -> VmExecutionResultAndLogs { - let tx_sender = create_real_tx_sender().await; - let mut estimator = GasEstimator::new(&tx_sender, tx.into(), Some(state_override)) - .await - .unwrap(); - estimator.adjust_transaction_fee(); - let initial_estimate = estimator.initialize().await.unwrap(); - - let lower_bound = initial_estimate.lower_gas_bound_without_overhead().unwrap() - + initial_estimate.operator_overhead; - let (vm_result, _) = estimator.unadjusted_step(lower_bound).await.unwrap(); - assert!(vm_result.result.is_failed(), "{:?}", vm_result.result); - - // A slightly larger limit should work. - let initial_pivot = - (initial_estimate.total_gas_charged.unwrap() as f64 * initial_pivot_multiplier) as u64; - let (vm_result, _) = estimator.unadjusted_step(initial_pivot).await.unwrap(); - assert!(!vm_result.result.is_failed(), "{:?}", vm_result.result); - vm_result -} - -async fn test_initial_estimate_error(state_override: StateOverride, tx: L2Tx) -> SubmitTxError { - let tx_sender = create_real_tx_sender().await; - let mut estimator = GasEstimator::new(&tx_sender, tx.into(), Some(state_override)) - .await - .unwrap(); - estimator.adjust_transaction_fee(); - estimator.initialize().await.unwrap_err() -} - -/// Estimates both transactions with initial writes and cleanup. -#[test_casing(4, [10, 50, 200, 1_000])] -#[tokio::test] -async fn initial_estimate_for_expensive_contract(write_count: usize) { - let alice = K256PrivateKey::random(); - let contract_bytecode = read_expensive_contract_bytecode(); - let mut contract_overrides = OverrideAccount { - code: Some(Bytecode::new(contract_bytecode).unwrap()), - ..OverrideAccount::default() - }; - let state_override = StateOverride::new(HashMap::from([( - EXPENSIVE_CONTRACT_ADDRESS, - contract_overrides.clone(), - )])); - let tx = alice.create_expensive_tx(write_count); - - let vm_result = test_initial_estimate(state_override, tx, DEFAULT_MULTIPLIER).await; - - let contract_logs = vm_result.logs.storage_logs.into_iter().filter_map(|log| { - (*log.log.key.address() == EXPENSIVE_CONTRACT_ADDRESS) - .then_some((*log.log.key.key(), log.log.value)) - }); - let contract_logs: HashMap<_, _> = contract_logs.collect(); - assert!(contract_logs.len() >= write_count, "{contract_logs:?}"); - contract_overrides.state = Some(OverrideState::StateDiff(contract_logs)); - - let state_override = StateOverride::new(HashMap::from([( - EXPENSIVE_CONTRACT_ADDRESS, - contract_overrides, - )])); - let tx = alice.create_expensive_cleanup_tx(); - - test_initial_estimate(state_override, tx, DEFAULT_MULTIPLIER).await; -} - -#[tokio::test] -async fn initial_estimate_for_code_oracle_tx() { - let alice = K256PrivateKey::random(); - let contract_bytecode = read_precompiles_contract_bytecode(); - let contract_bytecode_hash = hash_bytecode(&contract_bytecode); - let contract_keccak_hash = H256(keccak256(&contract_bytecode)); - let contract_overrides = OverrideAccount { - code: Some(Bytecode::new(contract_bytecode).unwrap()), - ..OverrideAccount::default() - }; - - // Add another contract that is never executed, but has a large bytecode. - let huge_contact_address = Address::repeat_byte(23); - let huge_contract_bytecode = vec![0_u8; 10_001 * 32]; - let huge_contract_bytecode_hash = hash_bytecode(&huge_contract_bytecode); - let huge_contract_keccak_hash = H256(keccak256(&huge_contract_bytecode)); - let huge_contract_overrides = OverrideAccount { - code: Some(Bytecode::new(huge_contract_bytecode).unwrap()), - ..OverrideAccount::default() - }; - - let state_override = StateOverride::new(HashMap::from([ - (PRECOMPILES_CONTRACT_ADDRESS, contract_overrides), - (huge_contact_address, huge_contract_overrides), - ])); - - // Test contracts that are already decommitted when requested from the precompiles test contract. - let genesis_params = GenesisParams::mock(); - let code_oracle_bytecode = genesis_params - .system_contracts() - .iter() - .find_map(|contract| { - (*contract.account_id.address() == CODE_ORACLE_ADDRESS).then_some(&contract.bytecode) - }) - .expect("no code oracle"); - let code_oracle_bytecode_hash = hash_bytecode(code_oracle_bytecode); - let code_oracle_keccak_hash = H256(keccak256(code_oracle_bytecode)); - - let warm_bytecode_hashes = [ - (code_oracle_bytecode_hash, code_oracle_keccak_hash), - (contract_bytecode_hash, contract_keccak_hash), - ]; - let mut decomitter_stats = 0.0; - for (hash, keccak_hash) in warm_bytecode_hashes { - println!("Testing bytecode: {hash:?}"); - let tx = alice.create_code_oracle_tx(hash, keccak_hash); - let vm_result = test_initial_estimate(state_override.clone(), tx, DEFAULT_MULTIPLIER).await; - let stats = &vm_result.statistics.circuit_statistic; - decomitter_stats = stats.code_decommitter.max(decomitter_stats); - } - assert!(decomitter_stats > 0.0); - - println!("Testing large bytecode"); - let tx = alice.create_code_oracle_tx(huge_contract_bytecode_hash, huge_contract_keccak_hash); - let vm_result = test_initial_estimate(state_override, tx, 1.05).await; - // Sanity check: the transaction should spend significantly more on decommitment compared to previous ones - let new_decomitter_stats = vm_result.statistics.circuit_statistic.code_decommitter; - assert!( - new_decomitter_stats > decomitter_stats * 1.5, - "old={decomitter_stats}, new={new_decomitter_stats}" - ); -} - -#[tokio::test] -async fn initial_estimate_with_large_free_bytecode() { - let alice = K256PrivateKey::random(); - let mut contract_bytecode = read_precompiles_contract_bytecode(); - inflate_bytecode(&mut contract_bytecode, 50_000); - let contract_bytecode_hash = hash_bytecode(&contract_bytecode); - let contract_keccak_hash = H256(keccak256(&contract_bytecode)); - let contract_overrides = OverrideAccount { - code: Some(Bytecode::new(contract_bytecode).unwrap()), - ..OverrideAccount::default() - }; - - let state_override = StateOverride::new(HashMap::from([( - PRECOMPILES_CONTRACT_ADDRESS, - contract_overrides, - )])); - // Ask the test contract to decommit itself. This should refund the decommit costs, but it will be charged at first. - let tx = alice.create_code_oracle_tx(contract_bytecode_hash, contract_keccak_hash); - test_initial_estimate(state_override, tx, 1.05).await; -} - -#[tokio::test] -async fn revert_during_initial_estimate() { - let alice = K256PrivateKey::random(); - let contract_bytecode = read_counter_contract_bytecode(); - let contract_overrides = OverrideAccount { - code: Some(Bytecode::new(contract_bytecode).unwrap()), - ..OverrideAccount::default() - }; - let state_override = StateOverride::new(HashMap::from([( - COUNTER_CONTRACT_ADDRESS, - contract_overrides, - )])); - - let tx = alice.create_reverting_counter_tx(); - let err = test_initial_estimate_error(state_override, tx).await; - let SubmitTxError::ExecutionReverted(err, _) = err else { - panic!("Unexpected error: {err:?}"); - }; - assert_eq!(err, "This method always reverts"); -} - -#[tokio::test] -async fn out_of_gas_during_initial_estimate() { - let alice = K256PrivateKey::random(); - let contract_bytecode = read_infinite_loop_contract_bytecode(); - let contract_overrides = OverrideAccount { - code: Some(Bytecode::new(contract_bytecode).unwrap()), - ..OverrideAccount::default() - }; - let state_override = StateOverride::new(HashMap::from([( - INFINITE_LOOP_CONTRACT_ADDRESS, - contract_overrides, - )])); - - let tx = alice.create_infinite_loop_tx(); - let err = test_initial_estimate_error(state_override, tx).await; - // Unfortunately, we don't provide human-readable out-of-gas errors at the time - assert_matches!(err, SubmitTxError::ExecutionReverted(msg, _) if msg.is_empty()); -} - -#[tokio::test] -async fn insufficient_funds_error_for_transfer() { - let tx_sender = create_real_tx_sender().await; - - let alice = K256PrivateKey::random(); - let transfer_value = 1_000_000_000.into(); - // fee params don't matter; they should be overwritten by the estimation logic - let tx = alice.create_transfer(transfer_value, 55, 555); - let fee_scale_factor = 1.0; - // Without overrides, the transaction should fail because of insufficient balance. - let err = tx_sender - .get_txs_fee_in_wei( - tx.clone().into(), - fee_scale_factor, - 1_000, - None, - BinarySearchKind::Full, - ) - .await - .unwrap_err(); - assert_matches!(err, SubmitTxError::InsufficientFundsForTransfer); -} - -async fn test_estimating_gas( - state_override: StateOverride, - tx: L2Tx, - acceptable_overestimation: u64, -) { - let tx_sender = create_real_tx_sender().await; - - let fee_scale_factor = 1.0; - let fee = tx_sender - .get_txs_fee_in_wei( - tx.clone().into(), - fee_scale_factor, - acceptable_overestimation, - Some(state_override.clone()), - BinarySearchKind::Full, - ) - .await - .unwrap(); - // Sanity-check gas limit - let gas_limit_after_full_search = u64::try_from(fee.gas_limit).unwrap(); - assert!( - (10_000..10_000_000).contains(&gas_limit_after_full_search), - "{fee:?}" - ); - - let fee = tx_sender - .get_txs_fee_in_wei( - tx.into(), - fee_scale_factor, - acceptable_overestimation, - Some(state_override.clone()), - BinarySearchKind::Optimized, - ) - .await - .unwrap(); - let gas_limit_after_optimized_search = u64::try_from(fee.gas_limit).unwrap(); - - let diff = gas_limit_after_full_search.abs_diff(gas_limit_after_optimized_search); - assert!( - diff <= acceptable_overestimation, - "full={gas_limit_after_full_search}, optimized={gas_limit_after_optimized_search}" - ); -} - -#[test_casing(3, [0, 100, 1_000])] -#[tokio::test] -async fn estimating_gas_for_transfer(acceptable_overestimation: u64) { - let alice = K256PrivateKey::random(); - let transfer_value = 1_000_000_000.into(); - let account_overrides = OverrideAccount { - balance: Some(transfer_value * 2), - ..OverrideAccount::default() - }; - let state_override = StateOverride::new(HashMap::from([(alice.address(), account_overrides)])); - // fee params don't matter; they should be overwritten by the estimation logic - let tx = alice.create_transfer(transfer_value, 55, 555); - - test_estimating_gas(state_override, tx, acceptable_overestimation).await; -} - -#[test_casing(10, Product((LOAD_TEST_CASES, [0, 100])))] -#[tokio::test] -async fn estimating_gas_for_load_test_tx( - tx_params: LoadnextContractExecutionParams, - acceptable_overestimation: u64, -) { - let alice = K256PrivateKey::random(); - // Set the array length in the load test contract to 100, so that reads don't fail. - let load_test_state = HashMap::from([(H256::zero(), H256::from_low_u64_be(100))]); - let load_test_overrides = OverrideAccount { - code: Some(Bytecode::new(get_loadnext_contract().bytecode).unwrap()), - state: Some(OverrideState::State(load_test_state)), - ..OverrideAccount::default() - }; - let state_override = - StateOverride::new(HashMap::from([(LOAD_TEST_ADDRESS, load_test_overrides)])); - let tx = alice.create_load_test_tx(tx_params); - - test_estimating_gas(state_override, tx, acceptable_overestimation).await; -} - -#[test_casing(4, [10, 50, 100, 200])] -#[tokio::test] -async fn estimating_gas_for_expensive_txs(write_count: usize) { - let alice = K256PrivateKey::random(); - let contract_bytecode = read_expensive_contract_bytecode(); - let contract_overrides = OverrideAccount { - code: Some(Bytecode::new(contract_bytecode).unwrap()), - ..OverrideAccount::default() - }; - let state_override = StateOverride::new(HashMap::from([( - EXPENSIVE_CONTRACT_ADDRESS, - contract_overrides.clone(), - )])); - let tx = alice.create_expensive_tx(write_count); - - test_estimating_gas(state_override, tx, 0).await; -} - -#[tokio::test] -async fn estimating_gas_for_code_oracle_tx() { - let alice = K256PrivateKey::random(); - let contract_bytecode = read_precompiles_contract_bytecode(); - let contract_overrides = OverrideAccount { - code: Some(Bytecode::new(contract_bytecode).unwrap()), - ..OverrideAccount::default() - }; - - // Add another contract that is never executed, but has a large bytecode. - let huge_contact_address = Address::repeat_byte(23); - let huge_contract_bytecode = vec![0_u8; 10_001 * 32]; - let huge_contract_bytecode_hash = hash_bytecode(&huge_contract_bytecode); - let huge_contract_keccak_hash = H256(keccak256(&huge_contract_bytecode)); - let huge_contract_overrides = OverrideAccount { - code: Some(Bytecode::new(huge_contract_bytecode).unwrap()), - ..OverrideAccount::default() - }; - - let state_override = StateOverride::new(HashMap::from([ - (PRECOMPILES_CONTRACT_ADDRESS, contract_overrides), - (huge_contact_address, huge_contract_overrides), - ])); - let tx = alice.create_code_oracle_tx(huge_contract_bytecode_hash, huge_contract_keccak_hash); - - test_estimating_gas(state_override, tx, 0).await; -} - -#[tokio::test] -async fn estimating_gas_for_reverting_tx() { - let alice = K256PrivateKey::random(); - let contract_bytecode = read_counter_contract_bytecode(); - let contract_overrides = OverrideAccount { - code: Some(Bytecode::new(contract_bytecode).unwrap()), - ..OverrideAccount::default() - }; - let state_override = StateOverride::new(HashMap::from([( - COUNTER_CONTRACT_ADDRESS, - contract_overrides, - )])); - - let tx = alice.create_reverting_counter_tx(); - let tx_sender = create_real_tx_sender().await; - - let fee_scale_factor = 1.0; - let acceptable_overestimation = 0; - for binary_search_kind in [BinarySearchKind::Full, BinarySearchKind::Optimized] { - let err = tx_sender - .get_txs_fee_in_wei( - tx.clone().into(), - fee_scale_factor, - acceptable_overestimation, - Some(state_override.clone()), - binary_search_kind, - ) - .await - .unwrap_err(); - assert_matches!(err, SubmitTxError::ExecutionReverted(..)); - } -} - -#[tokio::test] -async fn estimating_gas_for_infinite_loop_tx() { - let alice = K256PrivateKey::random(); - let contract_bytecode = read_infinite_loop_contract_bytecode(); - let contract_overrides = OverrideAccount { - code: Some(Bytecode::new(contract_bytecode).unwrap()), - ..OverrideAccount::default() - }; - let state_override = StateOverride::new(HashMap::from([( - INFINITE_LOOP_CONTRACT_ADDRESS, - contract_overrides, - )])); - - let tx = alice.create_infinite_loop_tx(); - let tx_sender = create_real_tx_sender().await; - - let fee_scale_factor = 1.0; - let acceptable_overestimation = 0; - for binary_search_kind in [BinarySearchKind::Full, BinarySearchKind::Optimized] { - let err = tx_sender - .get_txs_fee_in_wei( - tx.clone().into(), - fee_scale_factor, - acceptable_overestimation, - Some(state_override.clone()), - binary_search_kind, - ) - .await - .unwrap_err(); - assert_matches!(err, SubmitTxError::ExecutionReverted(msg, _) if msg.is_empty()); - } -} diff --git a/core/node/api_server/src/tx_sender/tests/call.rs b/core/node/api_server/src/tx_sender/tests/call.rs new file mode 100644 index 000000000000..e43f55b2b9af --- /dev/null +++ b/core/node/api_server/src/tx_sender/tests/call.rs @@ -0,0 +1,253 @@ +//! Tests for `eth_call`. + +use std::collections::HashMap; + +use assert_matches::assert_matches; +use zksync_multivm::interface::ExecutionResult; +use zksync_node_test_utils::create_l2_transaction; +use zksync_types::{ + api::state_override::OverrideAccount, transaction_request::CallRequest, K256PrivateKey, +}; + +use super::*; +use crate::testonly::{decode_u256_output, Call3Result, Call3Value, StateBuilder, TestAccount}; + +#[tokio::test] +async fn eth_call_requires_single_connection() { + let pool = ConnectionPool::::constrained_test_pool(1).await; + let mut storage = pool.connection().await.unwrap(); + let genesis_params = GenesisParams::mock(); + insert_genesis_batch(&mut storage, &genesis_params) + .await + .unwrap(); + let block_args = BlockArgs::pending(&mut storage).await.unwrap(); + drop(storage); + + let tx = create_l2_transaction(10, 100); + let tx_hash = tx.hash(); + + let mut tx_executor = MockOneshotExecutor::default(); + tx_executor.set_call_responses(move |received_tx, _| { + assert_eq!(received_tx.hash(), tx_hash); + ExecutionResult::Success { + output: b"success!".to_vec(), + } + }); + let tx_executor = SandboxExecutor::mock(tx_executor).await; + let (tx_sender, _) = create_test_tx_sender( + pool.clone(), + genesis_params.config().l2_chain_id, + tx_executor, + ) + .await; + let call_overrides = CallOverrides { + enforced_base_fee: None, + }; + let output = tx_sender + .eth_call(block_args, call_overrides, tx, None) + .await + .unwrap(); + assert_eq!(output, b"success!"); +} + +async fn test_call( + tx_sender: &TxSender, + state_override: StateOverride, + mut call: CallRequest, +) -> Result, SubmitTxError> { + call.gas = call.gas.max(Some(10_000_000.into())); + let call = L2Tx::from_request(call.into(), usize::MAX, true).unwrap(); + + let mut storage = tx_sender + .0 + .replica_connection_pool + .connection() + .await + .unwrap(); + let block_args = BlockArgs::pending(&mut storage).await.unwrap(); + drop(storage); + let call_overrides = CallOverrides { + enforced_base_fee: None, + }; + + tx_sender + .eth_call(block_args, call_overrides, call, Some(state_override)) + .await +} + +#[tokio::test] +async fn eth_call_with_balance() { + let alice = K256PrivateKey::random(); + let initial_balance = 123_456_789.into(); + let account_overrides = OverrideAccount { + balance: Some(initial_balance), + ..OverrideAccount::default() + }; + let state_override = StateOverride::new(HashMap::from([(alice.address(), account_overrides)])); + + let pool = ConnectionPool::::constrained_test_pool(1).await; + let tx_sender = create_real_tx_sender(pool).await; + let call = alice.query_base_token_balance(); + let output = test_call(&tx_sender, state_override, call).await.unwrap(); + assert_eq!(decode_u256_output(&output), initial_balance); +} + +#[tokio::test] +async fn eth_call_with_transfer() { + let alice = K256PrivateKey::random(); + let transfer_value = 1_000_000_000.into(); + let initial_balance = transfer_value * 5 / 3; + let state_override = StateBuilder::default() + .with_multicall3_contract() + .with_balance(alice.address(), initial_balance) + .build(); + + let transfer = alice.create_transfer(transfer_value); + let multicall = alice.multicall_with_value( + transfer_value, + &[transfer.into(), alice.query_base_token_balance().into()], + ); + + let pool = ConnectionPool::::constrained_test_pool(1).await; + let tx_sender = create_real_tx_sender(pool).await; + let output = test_call(&tx_sender, state_override, multicall) + .await + .unwrap(); + let call_results = Call3Result::parse(&output); + assert_eq!(call_results.len(), 2); + assert!( + call_results[0].success && call_results[1].success, + "{call_results:?}" + ); + assert!(call_results[0].return_data.is_empty(), "{call_results:?}"); + + let balance = call_results[1].as_u256(); + // The bootloader doesn't compute gas refunds in the call mode, so the equality is exact + assert_eq!(balance, initial_balance - transfer_value); +} + +#[tokio::test] +async fn eth_call_with_counter() { + let alice = K256PrivateKey::random(); + let state_override = StateBuilder::default().with_counter_contract(42).build(); + + let pool = ConnectionPool::::constrained_test_pool(1).await; + let tx_sender = create_real_tx_sender(pool).await; + let output = test_call( + &tx_sender, + state_override.clone(), + alice.query_counter_value(), + ) + .await + .unwrap(); + assert_eq!(decode_u256_output(&output), 42.into()); + + let tx_as_call = alice.create_counter_tx(3.into(), false).into(); + let output = test_call(&tx_sender, state_override.clone(), tx_as_call) + .await + .unwrap(); + assert_eq!(decode_u256_output(&output), 45.into()); + + let tx_as_call = alice.create_counter_tx(3.into(), true).into(); + let err = test_call(&tx_sender, state_override, tx_as_call) + .await + .unwrap_err(); + assert_matches!( + err, + SubmitTxError::ExecutionReverted(msg, _) if msg.contains("This method always reverts") + ); +} + +#[tokio::test] +async fn eth_call_with_counter_transactions() { + let alice = K256PrivateKey::random(); + let state_override = StateBuilder::default() + .with_multicall3_contract() + .with_counter_contract(0) + .build(); + + let multicall = alice.multicall_with_value( + 0.into(), + &[ + alice.create_counter_tx(1.into(), false).into(), + Call3Value::from(alice.create_counter_tx(2.into(), true)).allow_failure(), + alice.query_counter_value().into(), + alice.create_counter_tx(3.into(), false).into(), + ], + ); + let pool = ConnectionPool::::constrained_test_pool(1).await; + let tx_sender = create_real_tx_sender(pool).await; + let output = test_call(&tx_sender, state_override, multicall) + .await + .unwrap(); + let call_results = Call3Result::parse(&output); + + assert_eq!( + call_results + .iter() + .map(|result| result.success) + .collect::>(), + [true, false, true, true] + ); + let counter_values: Vec<_> = call_results + .iter() + .filter_map(|result| { + if !result.success { + return None; + } + Some(decode_u256_output(&result.return_data).as_u32()) + }) + .collect(); + assert_eq!(counter_values, [1, 1, 4]); +} + +#[tokio::test] +async fn eth_call_out_of_gas() { + let alice = K256PrivateKey::random(); + let state_override = StateBuilder::default() + .with_infinite_loop_contract() + .build(); + + let pool = ConnectionPool::::constrained_test_pool(1).await; + let tx_sender = create_real_tx_sender(pool).await; + let tx_as_call = alice.create_infinite_loop_tx().into(); + let err = test_call(&tx_sender, state_override, tx_as_call) + .await + .unwrap_err(); + assert_matches!(err, SubmitTxError::ExecutionReverted(..)); +} + +#[tokio::test] +async fn eth_call_with_load_test_transactions() { + let alice = K256PrivateKey::random(); + let state_override = StateBuilder::default().with_load_test_contract().build(); + + let pool = ConnectionPool::::constrained_test_pool(1).await; + let tx_sender = create_real_tx_sender(pool).await; + + // Deploys (naturally) don't work for calls, hence a separate set of test cases. + let load_test_cases_for_call = [ + LoadnextContractExecutionParams { + deploys: 0, + ..LoadnextContractExecutionParams::default() + }, + LoadnextContractExecutionParams { + deploys: 0, + recursive_calls: 20, + ..LoadnextContractExecutionParams::default() + }, + LoadnextContractExecutionParams { + reads: 100, + writes: 100, + ..LoadnextContractExecutionParams::empty() + }, + ]; + + for tx_params in load_test_cases_for_call { + println!("Executing {tx_params:?}"); + let tx_as_call = alice.create_load_test_tx(tx_params).into(); + test_call(&tx_sender, state_override.clone(), tx_as_call) + .await + .unwrap(); + } +} diff --git a/core/node/api_server/src/tx_sender/tests/gas_estimation.rs b/core/node/api_server/src/tx_sender/tests/gas_estimation.rs new file mode 100644 index 000000000000..3fd5fcb51881 --- /dev/null +++ b/core/node/api_server/src/tx_sender/tests/gas_estimation.rs @@ -0,0 +1,478 @@ +//! Tests for gas estimation (mostly with the real oneshot VM executor). + +use std::collections::HashMap; + +use assert_matches::assert_matches; +use test_casing::{test_casing, Product}; +use zksync_system_constants::CODE_ORACLE_ADDRESS; +use zksync_types::{ + api::state_override::{OverrideAccount, OverrideState}, + web3::keccak256, + K256PrivateKey, +}; +use zksync_utils::bytecode::hash_bytecode; + +use super::*; +use crate::{ + testonly::{StateBuilder, TestAccount}, + tx_sender::gas_estimation::GasEstimator, +}; + +/// Initial pivot multiplier empirically sufficient for most tx types. +const DEFAULT_MULTIPLIER: f64 = 64.0 / 63.0; + +#[tokio::test] +async fn initial_gas_estimation_is_somewhat_accurate() { + let pool = ConnectionPool::::constrained_test_pool(1).await; + let tx_sender = create_real_tx_sender(pool).await; + let block_args = pending_block_args(&tx_sender).await; + + let alice = K256PrivateKey::random(); + let transfer_value = U256::from(1_000_000_000); + let account_overrides = OverrideAccount { + balance: Some(transfer_value * 2), + ..OverrideAccount::default() + }; + let state_override = StateOverride::new(HashMap::from([(alice.address(), account_overrides)])); + let tx = alice.create_transfer(transfer_value); + + let mut estimator = GasEstimator::new(&tx_sender, tx.into(), block_args, Some(state_override)) + .await + .unwrap(); + estimator.adjust_transaction_fee(); + let initial_estimate = estimator.initialize().await.unwrap(); + assert!(initial_estimate.gas_charged_for_pubdata > 0); + assert!(initial_estimate.operator_overhead > 0); + let total_gas_charged = initial_estimate.total_gas_charged.unwrap(); + assert!( + total_gas_charged + > initial_estimate.gas_charged_for_pubdata + initial_estimate.operator_overhead, + "{initial_estimate:?}" + ); + + // Check that a transaction fails if supplied with the lower bound. + let lower_bound = initial_estimate.lower_gas_bound_without_overhead().unwrap() + + initial_estimate.operator_overhead; + assert!(lower_bound < total_gas_charged, "{initial_estimate:?}"); + let (vm_result, _) = estimator.unadjusted_step(lower_bound).await.unwrap(); + assert!(vm_result.result.is_failed(), "{:?}", vm_result.result); + + // A slightly larger limit should work. + let initial_pivot = total_gas_charged * 64 / 63; + let (vm_result, _) = estimator.unadjusted_step(initial_pivot).await.unwrap(); + assert!(!vm_result.result.is_failed(), "{:?}", vm_result.result); +} + +#[test_casing(5, LOAD_TEST_CASES)] +#[tokio::test] +async fn initial_estimate_for_load_test_transaction(tx_params: LoadnextContractExecutionParams) { + let alice = K256PrivateKey::random(); + // Set the array length in the load test contract to 100, so that reads don't fail. + let state_override = StateBuilder::default().with_load_test_contract().build(); + let tx = alice.create_load_test_tx(tx_params); + + test_initial_estimate(state_override, tx, DEFAULT_MULTIPLIER).await; +} + +#[test_casing(2, [false, true])] +#[tokio::test] +async fn initial_estimate_for_deep_recursion(with_reads: bool) { + let alice = K256PrivateKey::random(); + let state_override = StateBuilder::default().with_load_test_contract().build(); + + // Reads are chosen because they represent the worst case. Reads don't influence the amount of pubdata; + // i.e., they don't make it easier to execute a transaction because of additional gas reserved for pubdata. + // OTOH, reads still increase the amount of computational gas used on each nested call. + // + // Initial pivot multipliers below are the smallest ones with 0.1 precision. `DEFAULT_MULTIPLIER` works for smaller + // recursion depths because the transaction emits enough pubdata to cover gas deductions due to the 63/64 rule. + let depths_and_multipliers: &[_] = if with_reads { + &[(25, DEFAULT_MULTIPLIER), (50, 1.2), (75, 1.4), (100, 1.7)] + } else { + &[ + (50, DEFAULT_MULTIPLIER), + (75, 1.2), + (100, 1.4), + (125, 1.7), + (150, 2.1), + ] + }; + for &(recursion_depth, multiplier) in depths_and_multipliers { + println!("Testing recursion depth {recursion_depth}"); + let tx = alice.create_load_test_tx(LoadnextContractExecutionParams { + recursive_calls: recursion_depth, + reads: if with_reads { 10 } else { 0 }, + ..LoadnextContractExecutionParams::empty() + }); + test_initial_estimate(state_override.clone(), tx, multiplier).await; + } +} + +#[tokio::test] +async fn initial_estimate_for_deep_recursion_with_large_bytecode() { + let alice = K256PrivateKey::random(); + let state_override = StateBuilder::default() + .with_load_test_contract() + .inflate_bytecode(StateBuilder::LOAD_TEST_ADDRESS, 50_000) + .build(); + let tx = alice.create_load_test_tx(LoadnextContractExecutionParams { + recursive_calls: 100, + ..LoadnextContractExecutionParams::empty() + }); + + test_initial_estimate(state_override, tx, 1.35).await; +} + +/// Tests the lower bound and initial pivot extracted from the initial estimate (one with effectively infinite gas amount). +/// Returns the VM result for a VM run with the initial pivot. +async fn test_initial_estimate( + state_override: StateOverride, + tx: L2Tx, + initial_pivot_multiplier: f64, +) -> VmExecutionResultAndLogs { + let pool = ConnectionPool::::constrained_test_pool(1).await; + let tx_sender = create_real_tx_sender(pool).await; + let block_args = pending_block_args(&tx_sender).await; + let mut estimator = GasEstimator::new(&tx_sender, tx.into(), block_args, Some(state_override)) + .await + .unwrap(); + estimator.adjust_transaction_fee(); + let initial_estimate = estimator.initialize().await.unwrap(); + + let lower_bound = initial_estimate.lower_gas_bound_without_overhead().unwrap() + + initial_estimate.operator_overhead; + let (vm_result, _) = estimator.unadjusted_step(lower_bound).await.unwrap(); + assert!(vm_result.result.is_failed(), "{:?}", vm_result.result); + + // A slightly larger limit should work. + let initial_pivot = + (initial_estimate.total_gas_charged.unwrap() as f64 * initial_pivot_multiplier) as u64; + let (vm_result, _) = estimator.unadjusted_step(initial_pivot).await.unwrap(); + assert!(!vm_result.result.is_failed(), "{:?}", vm_result.result); + vm_result +} + +async fn test_initial_estimate_error(state_override: StateOverride, tx: L2Tx) -> SubmitTxError { + let pool = ConnectionPool::::constrained_test_pool(1).await; + let tx_sender = create_real_tx_sender(pool).await; + let block_args = pending_block_args(&tx_sender).await; + let mut estimator = GasEstimator::new(&tx_sender, tx.into(), block_args, Some(state_override)) + .await + .unwrap(); + estimator.adjust_transaction_fee(); + estimator.initialize().await.unwrap_err() +} + +/// Estimates both transactions with initial writes and cleanup. +#[test_casing(4, [10, 50, 200, 1_000])] +#[tokio::test] +async fn initial_estimate_for_expensive_contract(write_count: usize) { + let alice = K256PrivateKey::random(); + let mut state_override = StateBuilder::default().with_expensive_contract().build(); + let tx = alice.create_expensive_tx(write_count); + + let vm_result = test_initial_estimate(state_override.clone(), tx, DEFAULT_MULTIPLIER).await; + + let contract_logs = vm_result.logs.storage_logs.into_iter().filter_map(|log| { + (*log.log.key.address() == StateBuilder::EXPENSIVE_CONTRACT_ADDRESS) + .then_some((*log.log.key.key(), log.log.value)) + }); + let contract_logs: HashMap<_, _> = contract_logs.collect(); + assert!(contract_logs.len() >= write_count, "{contract_logs:?}"); + + state_override + .get_mut(&StateBuilder::EXPENSIVE_CONTRACT_ADDRESS) + .unwrap() + .state = Some(OverrideState::StateDiff(contract_logs)); + let tx = alice.create_expensive_cleanup_tx(); + test_initial_estimate(state_override, tx, DEFAULT_MULTIPLIER).await; +} + +#[tokio::test] +async fn initial_estimate_for_code_oracle_tx() { + let alice = K256PrivateKey::random(); + // Add another contract that is never executed, but has a large bytecode. + let huge_contact_address = Address::repeat_byte(23); + let huge_contract_bytecode = vec![0_u8; 10_001 * 32]; + let huge_contract_bytecode_hash = hash_bytecode(&huge_contract_bytecode); + let huge_contract_keccak_hash = H256(keccak256(&huge_contract_bytecode)); + + let state_override = StateBuilder::default() + .with_precompiles_contract() + .with_contract(huge_contact_address, huge_contract_bytecode) + .build(); + + let contract_override = state_override + .get(&StateBuilder::PRECOMPILES_CONTRACT_ADDRESS) + .unwrap(); + let contract_bytecode = contract_override.code.as_ref().unwrap(); + let contract_bytecode_hash = contract_bytecode.hash(); + let contract_keccak_hash = H256(keccak256(contract_bytecode.as_ref())); + + // Test contracts that are already decommitted when requested from the precompiles test contract. + let genesis_params = GenesisParams::mock(); + let code_oracle_bytecode = genesis_params + .system_contracts() + .iter() + .find_map(|contract| { + (*contract.account_id.address() == CODE_ORACLE_ADDRESS).then_some(&contract.bytecode) + }) + .expect("no code oracle"); + let code_oracle_bytecode_hash = hash_bytecode(code_oracle_bytecode); + let code_oracle_keccak_hash = H256(keccak256(code_oracle_bytecode)); + + let warm_bytecode_hashes = [ + (code_oracle_bytecode_hash, code_oracle_keccak_hash), + (contract_bytecode_hash, contract_keccak_hash), + ]; + let mut decomitter_stats = 0.0; + for (hash, keccak_hash) in warm_bytecode_hashes { + println!("Testing bytecode: {hash:?}"); + let tx = alice.create_code_oracle_tx(hash, keccak_hash); + let vm_result = test_initial_estimate(state_override.clone(), tx, DEFAULT_MULTIPLIER).await; + let stats = &vm_result.statistics.circuit_statistic; + decomitter_stats = stats.code_decommitter.max(decomitter_stats); + } + assert!(decomitter_stats > 0.0); + + println!("Testing large bytecode"); + let tx = alice.create_code_oracle_tx(huge_contract_bytecode_hash, huge_contract_keccak_hash); + let vm_result = test_initial_estimate(state_override, tx, 1.05).await; + // Sanity check: the transaction should spend significantly more on decommitment compared to previous ones + let new_decomitter_stats = vm_result.statistics.circuit_statistic.code_decommitter; + assert!( + new_decomitter_stats > decomitter_stats * 1.5, + "old={decomitter_stats}, new={new_decomitter_stats}" + ); +} + +#[tokio::test] +async fn initial_estimate_with_large_free_bytecode() { + let alice = K256PrivateKey::random(); + let state_override = StateBuilder::default() + .with_precompiles_contract() + .inflate_bytecode(StateBuilder::PRECOMPILES_CONTRACT_ADDRESS, 50_000) + .build(); + let contract_override = state_override + .get(&StateBuilder::PRECOMPILES_CONTRACT_ADDRESS) + .unwrap(); + let contract_bytecode = contract_override.code.as_ref().unwrap(); + let contract_bytecode_hash = contract_bytecode.hash(); + let contract_keccak_hash = H256(keccak256(contract_bytecode.as_ref())); + + // Ask the test contract to decommit itself. This should refund the decommit costs, but it will be charged at first. + let tx = alice.create_code_oracle_tx(contract_bytecode_hash, contract_keccak_hash); + test_initial_estimate(state_override, tx, 1.05).await; +} + +#[tokio::test] +async fn revert_during_initial_estimate() { + let alice = K256PrivateKey::random(); + let state_override = StateBuilder::default().with_counter_contract(0).build(); + + let tx = alice.create_counter_tx(1.into(), true); + let err = test_initial_estimate_error(state_override, tx).await; + let SubmitTxError::ExecutionReverted(err, _) = err else { + panic!("Unexpected error: {err:?}"); + }; + assert_eq!(err, "This method always reverts"); +} + +#[tokio::test] +async fn out_of_gas_during_initial_estimate() { + let alice = K256PrivateKey::random(); + let state_override = StateBuilder::default() + .with_infinite_loop_contract() + .build(); + + let tx = alice.create_infinite_loop_tx(); + let err = test_initial_estimate_error(state_override, tx).await; + // Unfortunately, we don't provide human-readable out-of-gas errors at the time + assert_matches!(err, SubmitTxError::ExecutionReverted(msg, _) if msg.is_empty()); +} + +#[tokio::test] +async fn insufficient_funds_error_for_transfer() { + let pool = ConnectionPool::::constrained_test_pool(1).await; + let tx_sender = create_real_tx_sender(pool).await; + let block_args = pending_block_args(&tx_sender).await; + + let alice = K256PrivateKey::random(); + let tx = alice.create_transfer(1_000_000_000.into()); + let fee_scale_factor = 1.0; + // Without overrides, the transaction should fail because of insufficient balance. + let err = tx_sender + .get_txs_fee_in_wei( + tx.clone().into(), + block_args, + fee_scale_factor, + 1_000, + None, + BinarySearchKind::Full, + ) + .await + .unwrap_err(); + assert_matches!(err, SubmitTxError::InsufficientFundsForTransfer); +} + +async fn test_estimating_gas( + state_override: StateOverride, + tx: L2Tx, + acceptable_overestimation: u64, +) { + let pool = ConnectionPool::::constrained_test_pool(1).await; + let tx_sender = create_real_tx_sender(pool).await; + let block_args = pending_block_args(&tx_sender).await; + + let fee_scale_factor = 1.0; + let fee = tx_sender + .get_txs_fee_in_wei( + tx.clone().into(), + block_args.clone(), + fee_scale_factor, + acceptable_overestimation, + Some(state_override.clone()), + BinarySearchKind::Full, + ) + .await + .unwrap(); + // Sanity-check gas limit + let gas_limit_after_full_search = u64::try_from(fee.gas_limit).unwrap(); + assert!( + (10_000..10_000_000).contains(&gas_limit_after_full_search), + "{fee:?}" + ); + + let fee = tx_sender + .get_txs_fee_in_wei( + tx.into(), + block_args, + fee_scale_factor, + acceptable_overestimation, + Some(state_override.clone()), + BinarySearchKind::Optimized, + ) + .await + .unwrap(); + let gas_limit_after_optimized_search = u64::try_from(fee.gas_limit).unwrap(); + + let diff = gas_limit_after_full_search.abs_diff(gas_limit_after_optimized_search); + assert!( + diff <= acceptable_overestimation, + "full={gas_limit_after_full_search}, optimized={gas_limit_after_optimized_search}" + ); +} + +#[test_casing(3, [0, 100, 1_000])] +#[tokio::test] +async fn estimating_gas_for_transfer(acceptable_overestimation: u64) { + let alice = K256PrivateKey::random(); + let transfer_value = 1_000_000_000.into(); + let account_overrides = OverrideAccount { + balance: Some(transfer_value * 2), + ..OverrideAccount::default() + }; + let state_override = StateOverride::new(HashMap::from([(alice.address(), account_overrides)])); + let tx = alice.create_transfer(transfer_value); + + test_estimating_gas(state_override, tx, acceptable_overestimation).await; +} + +#[test_casing(10, Product((LOAD_TEST_CASES, [0, 100])))] +#[tokio::test] +async fn estimating_gas_for_load_test_tx( + tx_params: LoadnextContractExecutionParams, + acceptable_overestimation: u64, +) { + let alice = K256PrivateKey::random(); + let state_override = StateBuilder::default().with_load_test_contract().build(); + let tx = alice.create_load_test_tx(tx_params); + + test_estimating_gas(state_override, tx, acceptable_overestimation).await; +} + +#[test_casing(4, [10, 50, 100, 200])] +#[tokio::test] +async fn estimating_gas_for_expensive_txs(write_count: usize) { + let alice = K256PrivateKey::random(); + let state_override = StateBuilder::default().with_expensive_contract().build(); + let tx = alice.create_expensive_tx(write_count); + + test_estimating_gas(state_override, tx, 0).await; +} + +#[tokio::test] +async fn estimating_gas_for_code_oracle_tx() { + let alice = K256PrivateKey::random(); + // Add another contract that is never executed, but has a large bytecode. + let huge_contact_address = Address::repeat_byte(23); + let huge_contract_bytecode = vec![0_u8; 10_001 * 32]; + let huge_contract_bytecode_hash = hash_bytecode(&huge_contract_bytecode); + let huge_contract_keccak_hash = H256(keccak256(&huge_contract_bytecode)); + + let state_override = StateBuilder::default() + .with_precompiles_contract() + .with_contract(huge_contact_address, huge_contract_bytecode) + .build(); + let tx = alice.create_code_oracle_tx(huge_contract_bytecode_hash, huge_contract_keccak_hash); + + test_estimating_gas(state_override, tx, 0).await; +} + +#[tokio::test] +async fn estimating_gas_for_reverting_tx() { + let alice = K256PrivateKey::random(); + let state_override = StateBuilder::default().with_counter_contract(0).build(); + + let tx = alice.create_counter_tx(1.into(), true); + let pool = ConnectionPool::::constrained_test_pool(1).await; + let tx_sender = create_real_tx_sender(pool).await; + let block_args = pending_block_args(&tx_sender).await; + + let fee_scale_factor = 1.0; + let acceptable_overestimation = 0; + for binary_search_kind in [BinarySearchKind::Full, BinarySearchKind::Optimized] { + let err = tx_sender + .get_txs_fee_in_wei( + tx.clone().into(), + block_args.clone(), + fee_scale_factor, + acceptable_overestimation, + Some(state_override.clone()), + binary_search_kind, + ) + .await + .unwrap_err(); + assert_matches!(err, SubmitTxError::ExecutionReverted(..)); + } +} + +#[tokio::test] +async fn estimating_gas_for_infinite_loop_tx() { + let alice = K256PrivateKey::random(); + let state_override = StateBuilder::default() + .with_infinite_loop_contract() + .build(); + + let tx = alice.create_infinite_loop_tx(); + let pool = ConnectionPool::::constrained_test_pool(1).await; + let tx_sender = create_real_tx_sender(pool).await; + let block_args = pending_block_args(&tx_sender).await; + + let fee_scale_factor = 1.0; + let acceptable_overestimation = 0; + for binary_search_kind in [BinarySearchKind::Full, BinarySearchKind::Optimized] { + let err = tx_sender + .get_txs_fee_in_wei( + tx.clone().into(), + block_args.clone(), + fee_scale_factor, + acceptable_overestimation, + Some(state_override.clone()), + binary_search_kind, + ) + .await + .unwrap_err(); + assert_matches!(err, SubmitTxError::ExecutionReverted(msg, _) if msg.is_empty()); + } +} diff --git a/core/node/api_server/src/tx_sender/tests/mod.rs b/core/node/api_server/src/tx_sender/tests/mod.rs new file mode 100644 index 000000000000..cacd616202d2 --- /dev/null +++ b/core/node/api_server/src/tx_sender/tests/mod.rs @@ -0,0 +1,166 @@ +//! Tests for the transaction sender. + +use test_casing::TestCases; +use zksync_contracts::test_contracts::LoadnextContractExecutionParams; +use zksync_node_genesis::{insert_genesis_batch, GenesisParams}; +use zksync_node_test_utils::{create_l2_block, prepare_recovery_snapshot}; +use zksync_types::{get_nonce_key, L1BatchNumber, L2BlockNumber, StorageLog}; +use zksync_vm_executor::oneshot::MockOneshotExecutor; + +use super::*; +use crate::web3::testonly::create_test_tx_sender; + +mod call; +mod gas_estimation; +mod send_tx; + +const LOAD_TEST_CASES: TestCases = test_casing::cases! {[ + LoadnextContractExecutionParams::default(), + // No storage modification + LoadnextContractExecutionParams { + writes: 0, + events: 0, + ..LoadnextContractExecutionParams::default() + }, + // Moderately deep recursion (very deep recursion is tested separately) + LoadnextContractExecutionParams { + recursive_calls: 10, + ..LoadnextContractExecutionParams::default() + }, + // No deploys + LoadnextContractExecutionParams { + deploys: 0, + ..LoadnextContractExecutionParams::default() + }, + // Lots of deploys + LoadnextContractExecutionParams { + deploys: 10, + ..LoadnextContractExecutionParams::default() + }, +]}; + +#[tokio::test] +async fn getting_nonce_for_account() { + let l2_chain_id = L2ChainId::default(); + let test_address = Address::repeat_byte(1); + let pool = ConnectionPool::::test_pool().await; + let mut storage = pool.connection().await.unwrap(); + insert_genesis_batch(&mut storage, &GenesisParams::mock()) + .await + .unwrap(); + // Manually insert a nonce for the address. + let nonce_key = get_nonce_key(&test_address); + let nonce_log = StorageLog::new_write_log(nonce_key, H256::from_low_u64_be(123)); + storage + .storage_logs_dal() + .append_storage_logs(L2BlockNumber(0), &[nonce_log]) + .await + .unwrap(); + + let tx_executor = MockOneshotExecutor::default(); + let tx_executor = SandboxExecutor::mock(tx_executor).await; + let (tx_sender, _) = create_test_tx_sender(pool.clone(), l2_chain_id, tx_executor).await; + + let nonce = tx_sender.get_expected_nonce(test_address).await.unwrap(); + assert_eq!(nonce, Nonce(123)); + + // Insert another L2 block with a new nonce log. + storage + .blocks_dal() + .insert_l2_block(&create_l2_block(1)) + .await + .unwrap(); + let nonce_log = StorageLog { + value: H256::from_low_u64_be(321), + ..nonce_log + }; + storage + .storage_logs_dal() + .insert_storage_logs(L2BlockNumber(1), &[nonce_log]) + .await + .unwrap(); + + let nonce = tx_sender.get_expected_nonce(test_address).await.unwrap(); + assert_eq!(nonce, Nonce(321)); + let missing_address = Address::repeat_byte(0xff); + let nonce = tx_sender.get_expected_nonce(missing_address).await.unwrap(); + assert_eq!(nonce, Nonce(0)); +} + +#[tokio::test] +async fn getting_nonce_for_account_after_snapshot_recovery() { + const SNAPSHOT_L2_BLOCK_NUMBER: L2BlockNumber = L2BlockNumber(42); + + let pool = ConnectionPool::::test_pool().await; + let mut storage = pool.connection().await.unwrap(); + let test_address = Address::repeat_byte(1); + let other_address = Address::repeat_byte(2); + let nonce_logs = [ + StorageLog::new_write_log(get_nonce_key(&test_address), H256::from_low_u64_be(123)), + StorageLog::new_write_log(get_nonce_key(&other_address), H256::from_low_u64_be(25)), + ]; + prepare_recovery_snapshot( + &mut storage, + L1BatchNumber(23), + SNAPSHOT_L2_BLOCK_NUMBER, + &nonce_logs, + ) + .await; + + let l2_chain_id = L2ChainId::default(); + let tx_executor = MockOneshotExecutor::default(); + let tx_executor = SandboxExecutor::mock(tx_executor).await; + let (tx_sender, _) = create_test_tx_sender(pool.clone(), l2_chain_id, tx_executor).await; + + storage + .blocks_dal() + .insert_l2_block(&create_l2_block(SNAPSHOT_L2_BLOCK_NUMBER.0 + 1)) + .await + .unwrap(); + let new_nonce_logs = vec![StorageLog::new_write_log( + get_nonce_key(&test_address), + H256::from_low_u64_be(321), + )]; + storage + .storage_logs_dal() + .insert_storage_logs(SNAPSHOT_L2_BLOCK_NUMBER + 1, &new_nonce_logs) + .await + .unwrap(); + + let nonce = tx_sender.get_expected_nonce(test_address).await.unwrap(); + assert_eq!(nonce, Nonce(321)); + let nonce = tx_sender.get_expected_nonce(other_address).await.unwrap(); + assert_eq!(nonce, Nonce(25)); + let missing_address = Address::repeat_byte(0xff); + let nonce = tx_sender.get_expected_nonce(missing_address).await.unwrap(); + assert_eq!(nonce, Nonce(0)); +} + +async fn create_real_tx_sender(pool: ConnectionPool) -> TxSender { + let mut storage = pool.connection().await.unwrap(); + let genesis_params = GenesisParams::mock(); + insert_genesis_batch(&mut storage, &genesis_params) + .await + .unwrap(); + drop(storage); + + let genesis_config = genesis_params.config(); + let executor_options = SandboxExecutorOptions::new( + genesis_config.l2_chain_id, + AccountTreeId::new(genesis_config.fee_account), + u32::MAX, + ) + .await + .unwrap(); + + let pg_caches = PostgresStorageCaches::new(1, 1); + let tx_executor = SandboxExecutor::real(executor_options, pg_caches, usize::MAX); + create_test_tx_sender(pool, genesis_params.config().l2_chain_id, tx_executor) + .await + .0 +} + +async fn pending_block_args(tx_sender: &TxSender) -> BlockArgs { + let mut storage = tx_sender.acquire_replica_connection().await.unwrap(); + BlockArgs::pending(&mut storage).await.unwrap() +} diff --git a/core/node/api_server/src/tx_sender/tests/send_tx.rs b/core/node/api_server/src/tx_sender/tests/send_tx.rs new file mode 100644 index 000000000000..fdd63254cf07 --- /dev/null +++ b/core/node/api_server/src/tx_sender/tests/send_tx.rs @@ -0,0 +1,300 @@ +//! Tests for sending raw transactions. + +use assert_matches::assert_matches; +use test_casing::test_casing; +use zksync_multivm::interface::ExecutionResult; +use zksync_node_fee_model::MockBatchFeeParamsProvider; +use zksync_node_test_utils::create_l2_transaction; +use zksync_types::K256PrivateKey; + +use super::*; +use crate::testonly::{StateBuilder, TestAccount}; + +#[tokio::test] +async fn submitting_tx_requires_one_connection() { + let pool = ConnectionPool::::constrained_test_pool(1).await; + let mut storage = pool.connection().await.unwrap(); + insert_genesis_batch(&mut storage, &GenesisParams::mock()) + .await + .unwrap(); + + let l2_chain_id = L2ChainId::default(); + let fee_input = MockBatchFeeParamsProvider::default() + .get_batch_fee_input_scaled(1.0, 1.0) + .await + .unwrap(); + let (base_fee, gas_per_pubdata) = + derive_base_fee_and_gas_per_pubdata(fee_input, ProtocolVersionId::latest().into()); + let tx = create_l2_transaction(base_fee, gas_per_pubdata); + let tx_hash = tx.hash(); + + // Manually set sufficient balance for the tx initiator. + StateBuilder::default() + .with_balance(tx.initiator_account(), u64::MAX.into()) + .apply(&mut storage) + .await; + drop(storage); + + let mut tx_executor = MockOneshotExecutor::default(); + tx_executor.set_tx_responses(move |received_tx, _| { + assert_eq!(received_tx.hash(), tx_hash); + ExecutionResult::Success { output: vec![] } + }); + let tx_executor = SandboxExecutor::mock(tx_executor).await; + let (tx_sender, _) = create_test_tx_sender(pool.clone(), l2_chain_id, tx_executor).await; + let block_args = pending_block_args(&tx_sender).await; + + let submission_result = tx_sender.submit_tx(tx, block_args).await.unwrap(); + assert_matches!(submission_result.0, L2TxSubmissionResult::Added); + + let mut storage = pool.connection().await.unwrap(); + storage + .transactions_web3_dal() + .get_transaction_by_hash(tx_hash, l2_chain_id) + .await + .unwrap() + .expect("transaction is not persisted"); +} + +#[tokio::test] +async fn nonce_validation_errors() { + let pool = ConnectionPool::::constrained_test_pool(1).await; + let mut storage = pool.connection().await.unwrap(); + insert_genesis_batch(&mut storage, &GenesisParams::mock()) + .await + .unwrap(); + drop(storage); + + let l2_chain_id = L2ChainId::default(); + let tx_executor = SandboxExecutor::mock(MockOneshotExecutor::default()).await; + let (tx_sender, _) = create_test_tx_sender(pool.clone(), l2_chain_id, tx_executor).await; + let mut tx = create_l2_transaction(55, 555); + + tx_sender.validate_account_nonce(&tx).await.unwrap(); + // There should be some leeway with the nonce validation. + tx.common_data.nonce = Nonce(1); + tx_sender.validate_account_nonce(&tx).await.unwrap(); + + tx.common_data.nonce = Nonce(10_000); + let err = tx_sender.validate_account_nonce(&tx).await.unwrap_err(); + assert_matches!( + err, + SubmitTxError::NonceIsTooHigh(from, _, actual) if actual == 10_000 && from == 0 + ); + + let mut storage = pool.connection().await.unwrap(); + let nonce_key = get_nonce_key(&tx.initiator_account()); + let nonce_log = StorageLog::new_write_log(nonce_key, H256::from_low_u64_be(42)); + storage + .storage_logs_dal() + .append_storage_logs(L2BlockNumber(0), &[nonce_log]) + .await + .unwrap(); + drop(storage); + + let err = tx_sender.validate_account_nonce(&tx).await.unwrap_err(); + assert_matches!( + err, + SubmitTxError::NonceIsTooHigh(from, _, actual) if actual == 10_000 && from == 42 + ); + + tx.common_data.nonce = Nonce(5); + let err = tx_sender.validate_account_nonce(&tx).await.unwrap_err(); + assert_matches!( + err, + SubmitTxError::NonceIsTooLow(from, _, actual) if actual == 5 && from == 42 + ); +} + +#[tokio::test] +async fn fee_validation_errors() { + let pool = ConnectionPool::::constrained_test_pool(1).await; + let mut storage = pool.connection().await.unwrap(); + insert_genesis_batch(&mut storage, &GenesisParams::mock()) + .await + .unwrap(); + + let l2_chain_id = L2ChainId::default(); + let tx_executor = SandboxExecutor::mock(MockOneshotExecutor::default()).await; + let (tx_sender, _) = create_test_tx_sender(pool.clone(), l2_chain_id, tx_executor).await; + let fee_input = MockBatchFeeParamsProvider::default() + .get_batch_fee_input_scaled(1.0, 1.0) + .await + .unwrap(); + let (base_fee, gas_per_pubdata) = + derive_base_fee_and_gas_per_pubdata(fee_input, ProtocolVersionId::latest().into()); + let tx = create_l2_transaction(base_fee, gas_per_pubdata); + + StateBuilder::default() + .with_balance(tx.initiator_account(), u64::MAX.into()) + .apply(&mut storage) + .await; + drop(storage); + + // Sanity check: validation should succeed with reasonable fee params. + tx_sender + .validate_tx(&tx, ProtocolVersionId::latest()) + .await + .unwrap(); + + { + let mut tx = tx.clone(); + tx.common_data.fee.gas_limit = 100.into(); + let err = tx_sender + .validate_tx(&tx, ProtocolVersionId::latest()) + .await + .unwrap_err(); + assert_matches!(err, SubmitTxError::IntrinsicGas); + } + { + let mut tx = tx.clone(); + tx.common_data.fee.gas_limit = u64::MAX.into(); + let err = tx_sender + .validate_tx(&tx, ProtocolVersionId::latest()) + .await + .unwrap_err(); + assert_matches!(err, SubmitTxError::GasLimitIsTooBig); + } + { + let mut tx = tx.clone(); + tx.common_data.fee.max_fee_per_gas = 1.into(); + let err = tx_sender + .validate_tx(&tx, ProtocolVersionId::latest()) + .await + .unwrap_err(); + assert_matches!(err, SubmitTxError::MaxFeePerGasTooLow); + } + { + let mut tx = tx.clone(); + tx.common_data.fee.max_priority_fee_per_gas = tx.common_data.fee.max_fee_per_gas * 2; + let err = tx_sender + .validate_tx(&tx, ProtocolVersionId::latest()) + .await + .unwrap_err(); + assert_matches!(err, SubmitTxError::MaxPriorityFeeGreaterThanMaxFee); + } +} + +#[tokio::test] +async fn sending_transfer() { + let pool = ConnectionPool::::constrained_test_pool(1).await; + let tx_sender = create_real_tx_sender(pool).await; + let block_args = pending_block_args(&tx_sender).await; + let alice = K256PrivateKey::random(); + + // Manually set sufficient balance for the tx initiator. + let mut storage = tx_sender.acquire_replica_connection().await.unwrap(); + StateBuilder::default() + .with_balance(alice.address(), u64::MAX.into()) + .apply(&mut storage) + .await; + drop(storage); + + let transfer = alice.create_transfer(1_000_000_000.into()); + let (sub_result, vm_result) = tx_sender.submit_tx(transfer, block_args).await.unwrap(); + assert_matches!(sub_result, L2TxSubmissionResult::Added); + assert!(!vm_result.result.is_failed(), "{:?}", vm_result.result); +} + +#[tokio::test] +async fn sending_transfer_with_insufficient_balance() { + let pool = ConnectionPool::::constrained_test_pool(1).await; + let tx_sender = create_real_tx_sender(pool).await; + let block_args = pending_block_args(&tx_sender).await; + let alice = K256PrivateKey::random(); + let transfer_value = 1_000_000_000.into(); + + let transfer = alice.create_transfer(transfer_value); + let err = tx_sender.submit_tx(transfer, block_args).await.unwrap_err(); + assert_matches!( + err, + SubmitTxError::NotEnoughBalanceForFeeValue(balance, _, value) if balance.is_zero() + && value == transfer_value + ); +} + +#[tokio::test] +async fn sending_transfer_with_incorrect_signature() { + let pool = ConnectionPool::::constrained_test_pool(1).await; + let tx_sender = create_real_tx_sender(pool).await; + let block_args = pending_block_args(&tx_sender).await; + let alice = K256PrivateKey::random(); + let transfer_value = 1_000_000_000.into(); + + let mut storage = tx_sender.acquire_replica_connection().await.unwrap(); + StateBuilder::default() + .with_balance(alice.address(), u64::MAX.into()) + .apply(&mut storage) + .await; + drop(storage); + + let mut transfer = alice.create_transfer(transfer_value); + transfer.execute.value = transfer_value / 2; // This should invalidate tx signature + let err = tx_sender.submit_tx(transfer, block_args).await.unwrap_err(); + assert_matches!(err, SubmitTxError::ValidationFailed(_)); +} + +#[test_casing(5, LOAD_TEST_CASES)] +#[tokio::test] +async fn sending_load_test_transaction(tx_params: LoadnextContractExecutionParams) { + let pool = ConnectionPool::::constrained_test_pool(1).await; + let tx_sender = create_real_tx_sender(pool).await; + let block_args = pending_block_args(&tx_sender).await; + let alice = K256PrivateKey::random(); + + let mut storage = tx_sender.acquire_replica_connection().await.unwrap(); + StateBuilder::default() + .with_load_test_contract() + .with_balance(alice.address(), u64::MAX.into()) + .apply(&mut storage) + .await; + drop(storage); + + let tx = alice.create_load_test_tx(tx_params); + let (sub_result, vm_result) = tx_sender.submit_tx(tx, block_args).await.unwrap(); + assert_matches!(sub_result, L2TxSubmissionResult::Added); + assert!(!vm_result.result.is_failed(), "{:?}", vm_result.result); +} + +#[tokio::test] +async fn sending_reverting_transaction() { + let pool = ConnectionPool::::constrained_test_pool(1).await; + let tx_sender = create_real_tx_sender(pool).await; + let block_args = pending_block_args(&tx_sender).await; + let alice = K256PrivateKey::random(); + + let mut storage = tx_sender.acquire_replica_connection().await.unwrap(); + StateBuilder::default() + .with_counter_contract(0) + .with_balance(alice.address(), u64::MAX.into()) + .apply(&mut storage) + .await; + drop(storage); + + let tx = alice.create_counter_tx(1.into(), true); + let (_, vm_result) = tx_sender.submit_tx(tx, block_args).await.unwrap(); + assert_matches!( + vm_result.result, + ExecutionResult::Revert { output } if output.to_string().contains("This method always reverts") + ); +} + +#[tokio::test] +async fn sending_transaction_out_of_gas() { + let pool = ConnectionPool::::constrained_test_pool(1).await; + let tx_sender = create_real_tx_sender(pool).await; + let block_args = pending_block_args(&tx_sender).await; + let alice = K256PrivateKey::random(); + + let mut storage = tx_sender.acquire_replica_connection().await.unwrap(); + StateBuilder::default() + .with_infinite_loop_contract() + .with_balance(alice.address(), u64::MAX.into()) + .apply(&mut storage) + .await; + drop(storage); + + let tx = alice.create_infinite_loop_tx(); + let (_, vm_result) = tx_sender.submit_tx(tx, block_args).await.unwrap(); + assert_matches!(vm_result.result, ExecutionResult::Revert { .. }); +} diff --git a/core/node/api_server/src/web3/backend_jsonrpsee/namespaces/zks.rs b/core/node/api_server/src/web3/backend_jsonrpsee/namespaces/zks.rs index f83eb37ad962..31c8f15bb1ea 100644 --- a/core/node/api_server/src/web3/backend_jsonrpsee/namespaces/zks.rs +++ b/core/node/api_server/src/web3/backend_jsonrpsee/namespaces/zks.rs @@ -55,7 +55,7 @@ impl ZksNamespaceServer for ZksNamespace { } async fn get_bridge_contracts(&self) -> RpcResult { - Ok(self.get_bridge_contracts_impl()) + Ok(self.get_bridge_contracts_impl().await) } async fn l1_chain_id(&self) -> RpcResult { diff --git a/core/node/api_server/src/web3/mod.rs b/core/node/api_server/src/web3/mod.rs index bad1b493a5fd..620e9185078e 100644 --- a/core/node/api_server/src/web3/mod.rs +++ b/core/node/api_server/src/web3/mod.rs @@ -47,6 +47,7 @@ use self::{ use crate::{ execution_sandbox::{BlockStartInfo, VmConcurrencyBarrier}, tx_sender::TxSender, + web3::state::BridgeAddressesHandle, }; pub mod backend_jsonrpsee; @@ -143,7 +144,6 @@ struct OptionalApiParams { #[derive(Debug)] pub struct ApiServer { pool: ConnectionPool, - updaters_pool: ConnectionPool, health_updater: Arc, config: InternalApiConfig, transport: ApiTransport, @@ -153,18 +153,21 @@ pub struct ApiServer { namespaces: Vec, method_tracer: Arc, optional: OptionalApiParams, + bridge_addresses_handle: BridgeAddressesHandle, + sealed_l2_block_handle: SealedL2BlockNumber, } #[derive(Debug)] pub struct ApiBuilder { pool: ConnectionPool, - updaters_pool: ConnectionPool, config: InternalApiConfig, polling_interval: Duration, pruning_info_refresh_interval: Duration, // Mandatory params that must be set using builder methods. transport: Option, tx_sender: Option, + bridge_addresses_handle: Option, + sealed_l2_block_handle: Option, // Optional params that may or may not be set using builder methods. We treat `namespaces` // specially because we want to output a warning if they are not set. namespaces: Option>, @@ -178,13 +181,14 @@ impl ApiBuilder { pub fn jsonrpsee_backend(config: InternalApiConfig, pool: ConnectionPool) -> Self { Self { - updaters_pool: pool.clone(), pool, config, polling_interval: Self::DEFAULT_POLLING_INTERVAL, pruning_info_refresh_interval: Self::DEFAULT_PRUNING_INFO_REFRESH_INTERVAL, transport: None, tx_sender: None, + bridge_addresses_handle: None, + sealed_l2_block_handle: None, namespaces: None, method_tracer: Arc::new(MethodTracer::default()), optional: OptionalApiParams::default(), @@ -201,15 +205,6 @@ impl ApiBuilder { self } - /// Configures a dedicated DB pool to be used for updating different information, - /// such as last mined block number or account nonces. This pool is used to execute - /// in a background task. If not called, the main pool will be used. If the API server is under high load, - /// it may make sense to supply a single-connection pool to reduce pool contention with the API methods. - pub fn with_updaters_pool(mut self, pool: ConnectionPool) -> Self { - self.updaters_pool = pool; - self - } - pub fn with_tx_sender(mut self, tx_sender: TxSender) -> Self { self.tx_sender = Some(tx_sender); self @@ -285,6 +280,22 @@ impl ApiBuilder { self } + pub fn with_sealed_l2_block_handle( + mut self, + sealed_l2_block_handle: SealedL2BlockNumber, + ) -> Self { + self.sealed_l2_block_handle = Some(sealed_l2_block_handle); + self + } + + pub fn with_bridge_addresses_handle( + mut self, + bridge_addresses_handle: BridgeAddressesHandle, + ) -> Self { + self.bridge_addresses_handle = Some(bridge_addresses_handle); + self + } + // Intended for tests only. #[doc(hidden)] fn with_pub_sub_events(mut self, sender: mpsc::UnboundedSender) -> Self { @@ -312,7 +323,6 @@ impl ApiBuilder { Ok(ApiServer { pool: self.pool, health_updater: Arc::new(health_updater), - updaters_pool: self.updaters_pool, config: self.config, transport, tx_sender: self.tx_sender.context("Transaction sender not set")?, @@ -326,6 +336,12 @@ impl ApiBuilder { }), method_tracer: self.method_tracer, optional: self.optional, + sealed_l2_block_handle: self + .sealed_l2_block_handle + .context("Sealed l2 block handle not set")?, + bridge_addresses_handle: self + .bridge_addresses_handle + .context("Bridge addresses handle not set")?, }) } } @@ -335,11 +351,8 @@ impl ApiServer { self.health_updater.subscribe() } - async fn build_rpc_state( - self, - last_sealed_l2_block: SealedL2BlockNumber, - ) -> anyhow::Result { - let mut storage = self.updaters_pool.connection_tagged("api").await?; + async fn build_rpc_state(self) -> anyhow::Result { + let mut storage = self.pool.connection_tagged("api").await?; let start_info = BlockStartInfo::new(&mut storage, self.pruning_info_refresh_interval).await?; drop(storage); @@ -363,7 +376,8 @@ impl ApiServer { api_config: self.config, start_info, mempool_cache: self.optional.mempool_cache, - last_sealed_l2_block, + last_sealed_l2_block: self.sealed_l2_block_handle, + bridge_addresses_handle: self.bridge_addresses_handle, tree_api: self.optional.tree_api, }) } @@ -371,11 +385,10 @@ impl ApiServer { async fn build_rpc_module( self, pub_sub: Option, - last_sealed_l2_block: SealedL2BlockNumber, ) -> anyhow::Result> { let namespaces = self.namespaces.clone(); let zksync_network_id = self.config.l2_chain_id; - let rpc_state = self.build_rpc_state(last_sealed_l2_block).await?; + let rpc_state = self.build_rpc_state().await?; // Collect all the methods into a single RPC module. let mut rpc = RpcModule::new(()); @@ -473,21 +486,9 @@ impl ApiServer { self, stop_receiver: watch::Receiver, ) -> anyhow::Result { - // Chosen to be significantly smaller than the interval between L2 blocks, but larger than - // the latency of getting the latest sealed L2 block number from Postgres. If the API server - // processes enough requests, information about the latest sealed L2 block will be updated - // by reporting block difference metrics, so the actual update lag would be much smaller than this value. - const SEALED_L2_BLOCK_UPDATE_INTERVAL: Duration = Duration::from_millis(25); - let transport = self.transport; + let mut tasks = vec![]; - let (last_sealed_l2_block, sealed_l2_block_update_task) = SealedL2BlockNumber::new( - self.updaters_pool.clone(), - SEALED_L2_BLOCK_UPDATE_INTERVAL, - stop_receiver.clone(), - ); - - let mut tasks = vec![tokio::spawn(sealed_l2_block_update_task)]; let pub_sub = if matches!(transport, ApiTransport::WebSocket(_)) && self.namespaces.contains(&Namespace::Pubsub) { @@ -510,12 +511,8 @@ impl ApiServer { // framework it'll no longer be needed. let health_check = self.health_updater.subscribe(); let (local_addr_sender, local_addr) = oneshot::channel(); - let server_task = tokio::spawn(self.run_jsonrpsee_server( - stop_receiver, - pub_sub, - last_sealed_l2_block, - local_addr_sender, - )); + let server_task = + tokio::spawn(self.run_jsonrpsee_server(stop_receiver, pub_sub, local_addr_sender)); tasks.push(server_task); Ok(ApiServerHandles { @@ -584,7 +581,6 @@ impl ApiServer { self, mut stop_receiver: watch::Receiver, pub_sub: Option, - last_sealed_l2_block: SealedL2BlockNumber, local_addr_sender: oneshot::Sender, ) -> anyhow::Result<()> { let transport = self.transport; @@ -640,7 +636,7 @@ impl ApiServer { tracing::info!("Enabled extended call tracing for {transport_str} API server; this might negatively affect performance"); } - let rpc = self.build_rpc_module(pub_sub, last_sealed_l2_block).await?; + let rpc = self.build_rpc_module(pub_sub).await?; let registered_method_names = Arc::new(rpc.method_names().collect::>()); tracing::debug!( "Built RPC module for {transport_str} server with {} methods: {registered_method_names:?}", diff --git a/core/node/api_server/src/web3/namespaces/debug.rs b/core/node/api_server/src/web3/namespaces/debug.rs index 71560e4ddb8c..7e99808dbc77 100644 --- a/core/node/api_server/src/web3/namespaces/debug.rs +++ b/core/node/api_server/src/web3/namespaces/debug.rs @@ -259,7 +259,11 @@ impl DebugNamespace { }; let call_overrides = request.get_call_overrides()?; - let call = L2Tx::from_request(request.into(), MAX_ENCODED_TX_SIZE)?; + let call = L2Tx::from_request( + request.into(), + MAX_ENCODED_TX_SIZE, + false, // Even with EVM emulation enabled, calls must specify `to` field + )?; let vm_permit = self .state diff --git a/core/node/api_server/src/web3/namespaces/en.rs b/core/node/api_server/src/web3/namespaces/en.rs index 290dda1d0bf8..a09a0cb92fc7 100644 --- a/core/node/api_server/src/web3/namespaces/en.rs +++ b/core/node/api_server/src/web3/namespaces/en.rs @@ -208,6 +208,10 @@ impl EnNamespace { genesis_commitment: Some(genesis_batch.metadata.commitment), bootloader_hash: Some(genesis_batch.header.base_system_contracts_hashes.bootloader), default_aa_hash: Some(genesis_batch.header.base_system_contracts_hashes.default_aa), + evm_emulator_hash: genesis_batch + .header + .base_system_contracts_hashes + .evm_emulator, l1_chain_id: self.state.api_config.l1_chain_id, sl_chain_id: Some(self.state.api_config.l1_chain_id.into()), l2_chain_id: self.state.api_config.l2_chain_id, diff --git a/core/node/api_server/src/web3/namespaces/eth.rs b/core/node/api_server/src/web3/namespaces/eth.rs index 1d60d839e4ee..44362dd098e0 100644 --- a/core/node/api_server/src/web3/namespaces/eth.rs +++ b/core/node/api_server/src/web3/namespaces/eth.rs @@ -19,6 +19,7 @@ use zksync_web3_decl::{ }; use crate::{ + execution_sandbox::BlockArgs, tx_sender::BinarySearchKind, utils::open_readonly_transaction, web3::{backend_jsonrpsee::MethodTracer, metrics::API_METRICS, state::RpcState, TypedFilter}, @@ -77,7 +78,11 @@ impl EthNamespace { drop(connection); let call_overrides = request.get_call_overrides()?; - let tx = L2Tx::from_request(request.into(), self.state.api_config.max_tx_size)?; + let tx = L2Tx::from_request( + request.into(), + self.state.api_config.max_tx_size, + false, // Even with EVM emulation enabled, calls must specify `to` field + )?; // It is assumed that the previous checks has already enforced that the `max_fee_per_gas` is at most u64. let call_result: Vec = self @@ -108,10 +113,13 @@ impl EthNamespace { let is_eip712 = request_with_gas_per_pubdata_overridden .eip712_meta .is_some(); - + let mut connection = self.state.acquire_connection().await?; + let block_args = BlockArgs::pending(&mut connection).await?; + drop(connection); let mut tx: L2Tx = L2Tx::from_request( request_with_gas_per_pubdata_overridden.into(), self.state.api_config.max_tx_size, + block_args.use_evm_emulator(), )?; // The user may not include the proper transaction type during the estimation of @@ -137,6 +145,7 @@ impl EthNamespace { .tx_sender .get_txs_fee_in_wei( tx.into(), + block_args, scale_factor, acceptable_overestimation as u64, state_override, @@ -619,10 +628,15 @@ impl EthNamespace { } pub async fn send_raw_transaction_impl(&self, tx_bytes: Bytes) -> Result { - let (mut tx, hash) = self.state.parse_transaction_bytes(&tx_bytes.0)?; + let mut connection = self.state.acquire_connection().await?; + let block_args = BlockArgs::pending(&mut connection).await?; + drop(connection); + let (mut tx, hash) = self + .state + .parse_transaction_bytes(&tx_bytes.0, &block_args)?; tx.set_input(tx_bytes.0, hash); - let submit_result = self.state.tx_sender.submit_tx(tx).await; + let submit_result = self.state.tx_sender.submit_tx(tx, block_args).await; submit_result.map(|_| hash).map_err(|err| { tracing::debug!("Send raw transaction error: {err}"); API_METRICS.submit_tx_error[&err.prom_error_code()].inc(); diff --git a/core/node/api_server/src/web3/namespaces/zks.rs b/core/node/api_server/src/web3/namespaces/zks.rs index 61456095d67c..bcfd7daf3461 100644 --- a/core/node/api_server/src/web3/namespaces/zks.rs +++ b/core/node/api_server/src/web3/namespaces/zks.rs @@ -1,4 +1,4 @@ -use std::{collections::HashMap, convert::TryInto}; +use std::collections::HashMap; use anyhow::Context as _; use zksync_dal::{Connection, Core, CoreDal, DalError}; @@ -30,6 +30,7 @@ use zksync_web3_decl::{ }; use crate::{ + execution_sandbox::BlockArgs, tx_sender::BinarySearchKind, utils::open_readonly_transaction, web3::{backend_jsonrpsee::MethodTracer, metrics::API_METRICS, RpcState}, @@ -63,16 +64,21 @@ impl ZksNamespace { eip712_meta.gas_per_pubdata = U256::from(DEFAULT_L2_TX_GAS_PER_PUBDATA_BYTE); } + let mut connection = self.state.acquire_connection().await?; + let block_args = BlockArgs::pending(&mut connection).await?; + drop(connection); let mut tx = L2Tx::from_request( request_with_gas_per_pubdata_overridden.into(), self.state.api_config.max_tx_size, + block_args.use_evm_emulator(), )?; // When we're estimating fee, we are trying to deduce values related to fee, so we should // not consider provided ones. tx.common_data.fee.max_priority_fee_per_gas = 0u64.into(); tx.common_data.fee.gas_per_pubdata_limit = U256::from(DEFAULT_L2_TX_GAS_PER_PUBDATA_BYTE); - self.estimate_fee(tx.into(), state_override).await + self.estimate_fee(tx.into(), block_args, state_override) + .await } pub async fn estimate_l1_to_l2_gas_impl( @@ -89,17 +95,25 @@ impl ZksNamespace { } } - let tx: L1Tx = request_with_gas_per_pubdata_overridden - .try_into() - .map_err(Web3Error::SerializationError)?; - - let fee = self.estimate_fee(tx.into(), state_override).await?; + let mut connection = self.state.acquire_connection().await?; + let block_args = BlockArgs::pending(&mut connection).await?; + drop(connection); + let tx = L1Tx::from_request( + request_with_gas_per_pubdata_overridden, + block_args.use_evm_emulator(), + ) + .map_err(Web3Error::SerializationError)?; + + let fee = self + .estimate_fee(tx.into(), block_args, state_override) + .await?; Ok(fee.gas_limit) } async fn estimate_fee( &self, tx: Transaction, + block_args: BlockArgs, state_override: Option, ) -> Result { let scale_factor = self.state.api_config.estimate_gas_scale_factor; @@ -112,6 +126,7 @@ impl ZksNamespace { .tx_sender .get_txs_fee_in_wei( tx, + block_args, scale_factor, acceptable_overestimation as u64, state_override, @@ -132,8 +147,8 @@ impl ZksNamespace { self.state.api_config.l2_testnet_paymaster_addr } - pub fn get_bridge_contracts_impl(&self) -> BridgeAddresses { - self.state.api_config.bridge_addresses.clone() + pub async fn get_bridge_contracts_impl(&self) -> BridgeAddresses { + self.state.bridge_addresses_handle.read().await } pub fn l1_chain_id_impl(&self) -> U64 { @@ -583,10 +598,15 @@ impl ZksNamespace { &self, tx_bytes: Bytes, ) -> Result<(H256, VmExecutionResultAndLogs), Web3Error> { - let (mut tx, hash) = self.state.parse_transaction_bytes(&tx_bytes.0)?; + let mut connection = self.state.acquire_connection().await?; + let block_args = BlockArgs::pending(&mut connection).await?; + drop(connection); + let (mut tx, hash) = self + .state + .parse_transaction_bytes(&tx_bytes.0, &block_args)?; tx.set_input(tx_bytes.0, hash); - let submit_result = self.state.tx_sender.submit_tx(tx).await; + let submit_result = self.state.tx_sender.submit_tx(tx, block_args).await; submit_result.map(|result| (hash, result.1)).map_err(|err| { tracing::debug!("Send raw transaction error: {err}"); API_METRICS.submit_tx_error[&err.prom_error_code()].inc(); diff --git a/core/node/api_server/src/web3/state.rs b/core/node/api_server/src/web3/state.rs index 8cbb75103cd9..18c206eaf584 100644 --- a/core/node/api_server/src/web3/state.rs +++ b/core/node/api_server/src/web3/state.rs @@ -4,13 +4,13 @@ use std::{ atomic::{AtomicU32, Ordering}, Arc, }, - time::{Duration, Instant}, + time::Instant, }; use anyhow::Context as _; use futures::TryFutureExt; use lru::LruCache; -use tokio::sync::{watch, Mutex}; +use tokio::sync::{Mutex, RwLock}; use vise::GaugeGuard; use zksync_config::{ configs::{api::Web3JsonRpcConfig, ContractsConfig}, @@ -20,8 +20,9 @@ use zksync_dal::{Connection, ConnectionPool, Core, CoreDal, DalError}; use zksync_metadata_calculator::api_server::TreeApiClient; use zksync_node_sync::SyncState; use zksync_types::{ - api, commitment::L1BatchCommitmentMode, l2::L2Tx, transaction_request::CallRequest, Address, - L1BatchNumber, L1ChainId, L2BlockNumber, L2ChainId, H256, U256, U64, + api, api::BridgeAddresses, commitment::L1BatchCommitmentMode, l2::L2Tx, + transaction_request::CallRequest, Address, L1BatchNumber, L1ChainId, L2BlockNumber, L2ChainId, + H256, U256, U64, }; use zksync_web3_decl::{error::Web3Error, types::Filter}; @@ -173,51 +174,16 @@ impl InternalApiConfig { /// Thread-safe updatable information about the last sealed L2 block number. /// /// The information may be temporarily outdated and thus should only be used where this is OK -/// (e.g., for metrics reporting). The value is updated by [`Self::diff()`] and [`Self::diff_with_block_args()`] -/// and on an interval specified when creating an instance. -#[derive(Debug, Clone)] -pub(crate) struct SealedL2BlockNumber(Arc); +/// (e.g., for metrics reporting). The value is updated by [`Self::diff()`] and [`Self::diff_with_block_args()`]. +#[derive(Debug, Clone, Default)] +pub struct SealedL2BlockNumber(Arc); impl SealedL2BlockNumber { - /// Creates a handle to the last sealed L2 block number together with a task that will update - /// it on a schedule. - pub fn new( - connection_pool: ConnectionPool, - update_interval: Duration, - stop_receiver: watch::Receiver, - ) -> (Self, impl Future>) { - let this = Self(Arc::default()); - let number_updater = this.clone(); - - let update_task = async move { - loop { - if *stop_receiver.borrow() { - tracing::debug!("Stopping latest sealed L2 block updates"); - return Ok(()); - } - - let mut connection = connection_pool.connection_tagged("api").await.unwrap(); - let Some(last_sealed_l2_block) = - connection.blocks_dal().get_sealed_l2_block_number().await? - else { - tokio::time::sleep(update_interval).await; - continue; - }; - drop(connection); - - number_updater.update(last_sealed_l2_block); - tokio::time::sleep(update_interval).await; - } - }; - - (this, update_task) - } - /// Potentially updates the last sealed L2 block number by comparing it to the provided /// sealed L2 block number (not necessarily the last one). /// /// Returns the last sealed L2 block number after the update. - fn update(&self, maybe_newer_l2_block_number: L2BlockNumber) -> L2BlockNumber { + pub fn update(&self, maybe_newer_l2_block_number: L2BlockNumber) -> L2BlockNumber { let prev_value = self .0 .fetch_max(maybe_newer_l2_block_number.0, Ordering::Relaxed); @@ -231,7 +197,7 @@ impl SealedL2BlockNumber { /// Returns the difference between the latest L2 block number and the resolved L2 block number /// from `block_args`. - pub fn diff_with_block_args(&self, block_args: &BlockArgs) -> u32 { + pub(crate) fn diff_with_block_args(&self, block_args: &BlockArgs) -> u32 { // We compute the difference in any case, since it may update the stored value. let diff = self.diff(block_args.resolved_block_number()); @@ -243,6 +209,23 @@ impl SealedL2BlockNumber { } } +#[derive(Debug, Clone)] +pub struct BridgeAddressesHandle(Arc>); + +impl BridgeAddressesHandle { + pub fn new(bridge_addresses: BridgeAddresses) -> Self { + Self(Arc::new(RwLock::new(bridge_addresses))) + } + + pub async fn update(&self, bridge_addresses: BridgeAddresses) { + *self.0.write().await = bridge_addresses; + } + + pub async fn read(&self) -> BridgeAddresses { + self.0.read().await.clone() + } +} + /// Holder for the data required for the API to be functional. #[derive(Debug, Clone)] pub(crate) struct RpcState { @@ -258,15 +241,23 @@ pub(crate) struct RpcState { pub(super) start_info: BlockStartInfo, pub(super) mempool_cache: Option, pub(super) last_sealed_l2_block: SealedL2BlockNumber, + pub(super) bridge_addresses_handle: BridgeAddressesHandle, } impl RpcState { - pub fn parse_transaction_bytes(&self, bytes: &[u8]) -> Result<(L2Tx, H256), Web3Error> { + pub fn parse_transaction_bytes( + &self, + bytes: &[u8], + block_args: &BlockArgs, + ) -> Result<(L2Tx, H256), Web3Error> { let chain_id = self.api_config.l2_chain_id; let (tx_request, hash) = api::TransactionRequest::from_bytes(bytes, chain_id)?; - Ok(( - L2Tx::from_request(tx_request, self.api_config.max_tx_size)?, + L2Tx::from_request( + tx_request, + self.api_config.max_tx_size, + block_args.use_evm_emulator(), + )?, hash, )) } diff --git a/core/node/api_server/src/web3/testonly.rs b/core/node/api_server/src/web3/testonly.rs index 93309fc09cf1..3b05e235c6d4 100644 --- a/core/node/api_server/src/web3/testonly.rs +++ b/core/node/api_server/src/web3/testonly.rs @@ -181,6 +181,8 @@ async fn spawn_server( let mut namespaces = Namespace::DEFAULT.to_vec(); namespaces.extend([Namespace::Debug, Namespace::Snapshots, Namespace::Unstable]); + let sealed_l2_block_handle = SealedL2BlockNumber::default(); + let bridge_addresses_handle = BridgeAddressesHandle::new(api_config.bridge_addresses.clone()); let server_builder = match transport { ApiTransportLabel::Http => ApiBuilder::jsonrpsee_backend(api_config, pool).http(0), @@ -202,6 +204,8 @@ async fn spawn_server( .with_pub_sub_events(pub_sub_events_sender) .with_method_tracer(method_tracer) .enable_api_namespaces(namespaces) + .with_sealed_l2_block_handle(sealed_l2_block_handle) + .with_bridge_addresses_handle(bridge_addresses_handle) .build() .expect("Unable to build API server") .run(stop_receiver) diff --git a/core/node/api_server/src/web3/tests/vm.rs b/core/node/api_server/src/web3/tests/vm.rs index d8086c6c6add..e29ea246213b 100644 --- a/core/node/api_server/src/web3/tests/vm.rs +++ b/core/node/api_server/src/web3/tests/vm.rs @@ -257,12 +257,12 @@ struct SendRawTransactionTest { } impl SendRawTransactionTest { - fn transaction_bytes_and_hash() -> (Vec, H256) { + fn transaction_bytes_and_hash(include_to: bool) -> (Vec, H256) { let private_key = Self::private_key(); let tx_request = api::TransactionRequest { chain_id: Some(L2ChainId::default().as_u64()), from: Some(private_key.address()), - to: Some(Address::repeat_byte(2)), + to: include_to.then(|| Address::repeat_byte(2)), value: 123_456.into(), gas: (get_intrinsic_constants().l2_tx_intrinsic_gas * 2).into(), gas_price: StateKeeperConfig::for_tests().minimal_l2_gas_price.into(), @@ -313,7 +313,7 @@ impl HttpTest for SendRawTransactionTest { L2BlockNumber(1) }; tx_executor.set_tx_responses(move |tx, env| { - assert_eq!(tx.hash(), Self::transaction_bytes_and_hash().1); + assert_eq!(tx.hash(), Self::transaction_bytes_and_hash(true).1); assert_eq!(env.l1_batch.first_l2_block.number, pending_block.0); ExecutionResult::Success { output: vec![] } }); @@ -334,7 +334,7 @@ impl HttpTest for SendRawTransactionTest { .await?; } - let (tx_bytes, tx_hash) = Self::transaction_bytes_and_hash(); + let (tx_bytes, tx_hash) = Self::transaction_bytes_and_hash(true); let send_result = client.send_raw_transaction(tx_bytes.into()).await?; assert_eq!(send_result, tx_hash); Ok(()) @@ -357,6 +357,50 @@ async fn send_raw_transaction_after_snapshot_recovery() { .await; } +fn assert_null_to_address_error(error: &ClientError) { + if let ClientError::Call(error) = error { + assert_eq!(error.code(), 3); + assert!(error.message().contains("toAddressIsNull"), "{error:?}"); + assert!(error.data().is_none(), "{error:?}"); + } else { + panic!("Unexpected error: {error:?}"); + } +} + +#[derive(Debug)] +struct SendRawTransactionWithoutToAddressTest; + +#[async_trait] +impl HttpTest for SendRawTransactionWithoutToAddressTest { + async fn test( + &self, + client: &DynClient, + pool: &ConnectionPool, + ) -> anyhow::Result<()> { + let mut storage = pool.connection().await?; + storage + .storage_logs_dal() + .append_storage_logs( + L2BlockNumber(0), + &[SendRawTransactionTest::balance_storage_log()], + ) + .await?; + + let (tx_bytes, _) = SendRawTransactionTest::transaction_bytes_and_hash(false); + let err = client + .send_raw_transaction(tx_bytes.into()) + .await + .unwrap_err(); + assert_null_to_address_error(&err); + Ok(()) + } +} + +#[tokio::test] +async fn send_raw_transaction_fails_without_to_address() { + test_http_server(SendRawTransactionWithoutToAddressTest).await; +} + #[derive(Debug)] struct SendTransactionWithDetailedOutputTest; @@ -405,7 +449,7 @@ impl SendTransactionWithDetailedOutputTest { impl HttpTest for SendTransactionWithDetailedOutputTest { fn transaction_executor(&self) -> MockOneshotExecutor { let mut tx_executor = MockOneshotExecutor::default(); - let tx_bytes_and_hash = SendRawTransactionTest::transaction_bytes_and_hash(); + let tx_bytes_and_hash = SendRawTransactionTest::transaction_bytes_and_hash(true); let vm_execution_logs = VmExecutionLogs { storage_logs: self.storage_logs(), events: self.vm_events(), @@ -423,6 +467,7 @@ impl HttpTest for SendTransactionWithDetailedOutputTest { logs: vm_execution_logs.clone(), statistics: Default::default(), refunds: Default::default(), + new_known_factory_deps: None, } }); tx_executor @@ -443,7 +488,7 @@ impl HttpTest for SendTransactionWithDetailedOutputTest { ) .await?; - let (tx_bytes, tx_hash) = SendRawTransactionTest::transaction_bytes_and_hash(); + let (tx_bytes, tx_hash) = SendRawTransactionTest::transaction_bytes_and_hash(true); let send_result = client .send_raw_transaction_with_detailed_output(tx_bytes.into()) .await?; @@ -835,3 +880,30 @@ async fn estimate_gas_with_state_override() { let inner = EstimateGasTest::new(false); test_http_server(EstimateGasWithStateOverrideTest { inner }).await; } + +#[derive(Debug)] +struct EstimateGasWithoutToAddessTest; + +#[async_trait] +impl HttpTest for EstimateGasWithoutToAddessTest { + async fn test( + &self, + client: &DynClient, + _pool: &ConnectionPool, + ) -> anyhow::Result<()> { + let mut l2_transaction = create_l2_transaction(10, 100); + l2_transaction.execute.contract_address = None; + l2_transaction.common_data.signature = vec![]; // Remove invalidated signature so that it doesn't trip estimation logic + let err = client + .estimate_gas(l2_transaction.clone().into(), None, None) + .await + .unwrap_err(); + assert_null_to_address_error(&err); + Ok(()) + } +} + +#[tokio::test] +async fn estimate_gas_fails_without_to_address() { + test_http_server(EstimateGasWithoutToAddessTest).await; +} diff --git a/core/node/base_token_adjuster/src/base_token_ratio_persister.rs b/core/node/base_token_adjuster/src/base_token_ratio_persister.rs index 220f100e5dcb..785c9c4dfd7b 100644 --- a/core/node/base_token_adjuster/src/base_token_ratio_persister.rs +++ b/core/node/base_token_adjuster/src/base_token_ratio_persister.rs @@ -90,6 +90,9 @@ impl BaseTokenRatioPersister { result: OperationResult::Success, }] .observe(start_time.elapsed()); + METRICS + .ratio + .set((ratio.numerator.get() as f64) / (ratio.denominator.get() as f64)); return Ok(ratio); } Err(err) => { diff --git a/core/node/base_token_adjuster/src/metrics.rs b/core/node/base_token_adjuster/src/metrics.rs index d84e4da0c0c7..17a48c1b5c32 100644 --- a/core/node/base_token_adjuster/src/metrics.rs +++ b/core/node/base_token_adjuster/src/metrics.rs @@ -18,6 +18,7 @@ pub(crate) struct OperationResultLabels { #[metrics(prefix = "base_token_adjuster")] pub(crate) struct BaseTokenAdjusterMetrics { pub l1_gas_used: Gauge, + pub ratio: Gauge, #[metrics(buckets = Buckets::LATENCIES)] pub external_price_api_latency: Family>, #[metrics(buckets = Buckets::LATENCIES)] diff --git a/core/node/commitment_generator/src/lib.rs b/core/node/commitment_generator/src/lib.rs index 6cb14cfda531..cf6971b041c6 100644 --- a/core/node/commitment_generator/src/lib.rs +++ b/core/node/commitment_generator/src/lib.rs @@ -176,6 +176,7 @@ impl CommitmentGenerator { rollup_root_hash: tree_data.hash, bootloader_code_hash: header.base_system_contracts_hashes.bootloader, default_aa_code_hash: header.base_system_contracts_hashes.default_aa, + evm_emulator_code_hash: header.base_system_contracts_hashes.evm_emulator, protocol_version, }; let touched_slots = connection diff --git a/core/node/consensus/src/storage/testonly.rs b/core/node/consensus/src/storage/testonly.rs index 7d277621b89f..2aed011d23cf 100644 --- a/core/node/consensus/src/storage/testonly.rs +++ b/core/node/consensus/src/storage/testonly.rs @@ -57,7 +57,7 @@ pub(crate) fn mock_genesis_params(protocol_version: ProtocolVersionId) -> Genesi GenesisParams::from_genesis_config( cfg, BaseSystemContracts::load_from_disk(), - get_system_smart_contracts(), + get_system_smart_contracts(false), ) .unwrap() } diff --git a/core/node/eth_sender/src/eth_fees_oracle.rs b/core/node/eth_sender/src/eth_fees_oracle.rs index 2c87848dcc3e..ebd1568edb66 100644 --- a/core/node/eth_sender/src/eth_fees_oracle.rs +++ b/core/node/eth_sender/src/eth_fees_oracle.rs @@ -23,7 +23,7 @@ pub(crate) trait EthFeesOracle: 'static + Sync + Send + fmt::Debug { fn calculate_fees( &self, previous_sent_tx: &Option, - time_in_mempool: u32, + time_in_mempool_in_l1_blocks: u32, operator_type: OperatorType, ) -> Result; } @@ -32,6 +32,7 @@ pub(crate) trait EthFeesOracle: 'static + Sync + Send + fmt::Debug { pub(crate) struct GasAdjusterFeesOracle { pub gas_adjuster: Arc, pub max_acceptable_priority_fee_in_gwei: u64, + pub time_in_mempool_in_l1_blocks_cap: u32, } impl GasAdjusterFeesOracle { @@ -80,11 +81,16 @@ impl GasAdjusterFeesOracle { fn calculate_fees_no_blob_sidecar( &self, previous_sent_tx: &Option, - time_in_mempool: u32, + time_in_mempool_in_l1_blocks: u32, ) -> Result { - // cap it at 6h to not allow nearly infinite values when a tx is stuck for a long time - let capped_time_in_mempool = min(time_in_mempool, 1800); - let mut base_fee_per_gas = self.gas_adjuster.get_base_fee(capped_time_in_mempool); + // we cap it to not allow nearly infinite values when a tx is stuck for a long time + let capped_time_in_mempool_in_l1_blocks = min( + time_in_mempool_in_l1_blocks, + self.time_in_mempool_in_l1_blocks_cap, + ); + let mut base_fee_per_gas = self + .gas_adjuster + .get_base_fee(capped_time_in_mempool_in_l1_blocks); self.assert_fee_is_not_zero(base_fee_per_gas, "base"); if let Some(previous_sent_tx) = previous_sent_tx { self.verify_base_fee_not_too_low_on_resend( @@ -162,14 +168,14 @@ impl EthFeesOracle for GasAdjusterFeesOracle { fn calculate_fees( &self, previous_sent_tx: &Option, - time_in_mempool: u32, + time_in_mempool_in_l1_blocks: u32, operator_type: OperatorType, ) -> Result { let has_blob_sidecar = operator_type == OperatorType::Blob; if has_blob_sidecar { self.calculate_fees_with_blob_sidecar(previous_sent_tx) } else { - self.calculate_fees_no_blob_sidecar(previous_sent_tx, time_in_mempool) + self.calculate_fees_no_blob_sidecar(previous_sent_tx, time_in_mempool_in_l1_blocks) } } } diff --git a/core/node/eth_sender/src/eth_tx_aggregator.rs b/core/node/eth_sender/src/eth_tx_aggregator.rs index 6e9e71d74ea4..a08d16f456a9 100644 --- a/core/node/eth_sender/src/eth_tx_aggregator.rs +++ b/core/node/eth_sender/src/eth_tx_aggregator.rs @@ -144,19 +144,19 @@ impl EthTxAggregator { } pub(super) async fn get_multicall_data(&mut self) -> Result { - let calldata = self.generate_calldata_for_multicall(); + let (calldata, evm_emulator_hash_requested) = self.generate_calldata_for_multicall(); let args = CallFunctionArgs::new(&self.functions.aggregate3.name, calldata).for_contract( self.l1_multicall3_address, &self.functions.multicall_contract, ); let aggregate3_result: Token = args.call((*self.eth_client).as_ref()).await?; - self.parse_multicall_data(aggregate3_result) + self.parse_multicall_data(aggregate3_result, evm_emulator_hash_requested) } // Multicall's aggregate function accepts 1 argument - arrays of different contract calls. // The role of the method below is to tokenize input for multicall, which is actually a vector of tokens. // Each token describes a specific contract call. - pub(super) fn generate_calldata_for_multicall(&self) -> Vec { + pub(super) fn generate_calldata_for_multicall(&self) -> (Vec, bool) { const ALLOW_FAILURE: bool = false; // First zksync contract call @@ -215,14 +215,31 @@ impl EthTxAggregator { calldata: get_protocol_version_input, }; - // Convert structs into tokens and return vector with them - vec![ + let mut token_vec = vec![ get_bootloader_hash_call.into_token(), get_default_aa_hash_call.into_token(), get_verifier_params_call.into_token(), get_verifier_call.into_token(), get_protocol_version_call.into_token(), - ] + ]; + + let mut evm_emulator_hash_requested = false; + let get_l2_evm_emulator_hash_input = self + .functions + .get_evm_emulator_bytecode_hash + .as_ref() + .and_then(|f| f.encode_input(&[]).ok()); + if let Some(input) = get_l2_evm_emulator_hash_input { + let call = Multicall3Call { + target: self.state_transition_chain_contract, + allow_failure: ALLOW_FAILURE, + calldata: input, + }; + token_vec.insert(2, call.into_token()); + evm_emulator_hash_requested = true; + } + + (token_vec, evm_emulator_hash_requested) } // The role of the method below is to de-tokenize multicall call's result, which is actually a token. @@ -230,6 +247,7 @@ impl EthTxAggregator { pub(super) fn parse_multicall_data( &self, token: Token, + evm_emulator_hash_requested: bool, ) -> Result { let parse_error = |tokens: &[Token]| { Err(EthSenderError::Parse(Web3ContractError::InvalidOutputType( @@ -238,8 +256,9 @@ impl EthTxAggregator { }; if let Token::Array(call_results) = token { - // 5 calls are aggregated in multicall - if call_results.len() != 5 { + let number_of_calls = if evm_emulator_hash_requested { 6 } else { 5 }; + // 5 or 6 calls are aggregated in multicall + if call_results.len() != number_of_calls { return parse_error(&call_results); } let mut call_results_iterator = call_results.into_iter(); @@ -268,12 +287,31 @@ impl EthTxAggregator { ))); } let default_aa = H256::from_slice(&multicall3_default_aa); + + let evm_emulator = if evm_emulator_hash_requested { + let multicall3_evm_emulator = + Multicall3Result::from_token(call_results_iterator.next().unwrap())? + .return_data; + if multicall3_evm_emulator.len() != 32 { + return Err(EthSenderError::Parse(Web3ContractError::InvalidOutputType( + format!( + "multicall3 EVM emulator hash data is not of the len of 32: {:?}", + multicall3_evm_emulator + ), + ))); + } + Some(H256::from_slice(&multicall3_evm_emulator)) + } else { + None + }; + let base_system_contracts_hashes = BaseSystemContractsHashes { bootloader, default_aa, + evm_emulator, }; - call_results_iterator.next().unwrap(); + call_results_iterator.next().unwrap(); // FIXME: why is this value requested? let multicall3_verifier_address = Multicall3Result::from_token(call_results_iterator.next().unwrap())?.return_data; diff --git a/core/node/eth_sender/src/eth_tx_manager.rs b/core/node/eth_sender/src/eth_tx_manager.rs index 0d78ab71c62d..7de91a3b7736 100644 --- a/core/node/eth_sender/src/eth_tx_manager.rs +++ b/core/node/eth_sender/src/eth_tx_manager.rs @@ -48,6 +48,7 @@ impl EthTxManager { let fees_oracle = GasAdjusterFeesOracle { gas_adjuster, max_acceptable_priority_fee_in_gwei: config.max_acceptable_priority_fee_in_gwei, + time_in_mempool_in_l1_blocks_cap: config.time_in_mempool_in_l1_blocks_cap, }; let l1_interface = Box::new(RealL1Interface { ethereum_gateway, @@ -111,7 +112,7 @@ impl EthTxManager { &mut self, storage: &mut Connection<'_, Core>, tx: &EthTx, - time_in_mempool: u32, + time_in_mempool_in_l1_blocks: u32, current_block: L1BlockNumber, ) -> Result { let previous_sent_tx = storage @@ -127,7 +128,7 @@ impl EthTxManager { pubdata_price: _, } = self.fees_oracle.calculate_fees( &previous_sent_tx, - time_in_mempool, + time_in_mempool_in_l1_blocks, self.operator_type(tx), )?; @@ -601,13 +602,18 @@ impl EthTxManager { .await? { // New gas price depends on the time this tx spent in mempool. - let time_in_mempool = l1_block_numbers.latest.0 - sent_at_block; + let time_in_mempool_in_l1_blocks = l1_block_numbers.latest.0 - sent_at_block; // We don't want to return early in case resend does not succeed - // the error is logged anyway, but early returns will prevent // sending new operations. let _ = self - .send_eth_tx(storage, &tx, time_in_mempool, l1_block_numbers.latest) + .send_eth_tx( + storage, + &tx, + time_in_mempool_in_l1_blocks, + l1_block_numbers.latest, + ) .await?; } Ok(()) diff --git a/core/node/eth_sender/src/tester.rs b/core/node/eth_sender/src/tester.rs index 9be1384daae2..86a8c477f9fe 100644 --- a/core/node/eth_sender/src/tester.rs +++ b/core/node/eth_sender/src/tester.rs @@ -23,6 +23,8 @@ use crate::{ Aggregator, EthTxAggregator, EthTxManager, }; +pub(super) const STATE_TRANSITION_CONTRACT_ADDRESS: Address = Address::repeat_byte(0xa0); + // Alias to conveniently call static methods of `ETHSender`. type MockEthTxManager = EthTxManager; @@ -172,7 +174,7 @@ impl EthSenderTester { .with_non_ordering_confirmation(non_ordering_confirmations) .with_call_handler(move |call, _| { assert_eq!(call.to, Some(contracts_config.l1_multicall3_addr)); - crate::tests::mock_multicall_response() + crate::tests::mock_multicall_response(call) }) .build(); gateway.advance_block_number(Self::WAIT_CONFIRMATIONS); @@ -192,7 +194,7 @@ impl EthSenderTester { .with_non_ordering_confirmation(non_ordering_confirmations) .with_call_handler(move |call, _| { assert_eq!(call.to, Some(contracts_config.l1_multicall3_addr)); - crate::tests::mock_multicall_response() + crate::tests::mock_multicall_response(call) }) .build(); l2_gateway.advance_block_number(Self::WAIT_CONFIRMATIONS); @@ -212,7 +214,7 @@ impl EthSenderTester { .with_non_ordering_confirmation(non_ordering_confirmations) .with_call_handler(move |call, _| { assert_eq!(call.to, Some(contracts_config.l1_multicall3_addr)); - crate::tests::mock_multicall_response() + crate::tests::mock_multicall_response(call) }) .build(); gateway_blobs.advance_block_number(Self::WAIT_CONFIRMATIONS); @@ -261,7 +263,7 @@ impl EthSenderTester { // ZKsync contract address Address::random(), contracts_config.l1_multicall3_addr, - Address::random(), + STATE_TRANSITION_CONTRACT_ADDRESS, Default::default(), custom_commit_sender_addr, SettlementMode::SettlesToL1, diff --git a/core/node/eth_sender/src/tests.rs b/core/node/eth_sender/src/tests.rs index e03532458f18..9e844a8b8537 100644 --- a/core/node/eth_sender/src/tests.rs +++ b/core/node/eth_sender/src/tests.rs @@ -1,7 +1,9 @@ use assert_matches::assert_matches; use test_casing::{test_casing, Product}; use zksync_dal::{ConnectionPool, Core, CoreDal}; -use zksync_l1_contract_interface::i_executor::methods::ExecuteBatches; +use zksync_l1_contract_interface::{ + i_executor::methods::ExecuteBatches, multicall3::Multicall3Call, Tokenizable, +}; use zksync_node_test_utils::create_l1_batch; use zksync_types::{ aggregated_operations::AggregatedActionType, @@ -9,16 +11,19 @@ use zksync_types::{ commitment::{ L1BatchCommitmentMode, L1BatchMetaParameters, L1BatchMetadata, L1BatchWithMetadata, }, + ethabi, ethabi::Token, helpers::unix_timestamp_ms, + web3, web3::contract::Error, - ProtocolVersionId, H256, + Address, ProtocolVersionId, H256, }; use crate::{ abstract_l1_interface::OperatorType, aggregated_operations::AggregatedOperation, - tester::{EthSenderTester, TestL1Batch}, + tester::{EthSenderTester, TestL1Batch, STATE_TRANSITION_CONTRACT_ADDRESS}, + zksync_functions::ZkSyncFunctions, EthSenderError, }; @@ -37,21 +42,59 @@ const COMMITMENT_MODES: [L1BatchCommitmentMode; 2] = [ L1BatchCommitmentMode::Validium, ]; -pub(crate) fn mock_multicall_response() -> Token { - Token::Array(vec![ - Token::Tuple(vec![Token::Bool(true), Token::Bytes(vec![1u8; 32])]), - Token::Tuple(vec![Token::Bool(true), Token::Bytes(vec![2u8; 32])]), - Token::Tuple(vec![Token::Bool(true), Token::Bytes(vec![3u8; 96])]), - Token::Tuple(vec![Token::Bool(true), Token::Bytes(vec![4u8; 32])]), - Token::Tuple(vec![ - Token::Bool(true), - Token::Bytes( +pub(crate) fn mock_multicall_response(call: &web3::CallRequest) -> Token { + let functions = ZkSyncFunctions::default(); + let evm_emulator_getter_signature = functions + .get_evm_emulator_bytecode_hash + .as_ref() + .map(ethabi::Function::short_signature); + let bootloader_signature = functions.get_l2_bootloader_bytecode_hash.short_signature(); + let default_aa_signature = functions + .get_l2_default_account_bytecode_hash + .short_signature(); + let evm_emulator_getter_signature = evm_emulator_getter_signature.as_ref().map(|sig| &sig[..]); + + let calldata = &call.data.as_ref().expect("no calldata").0; + assert_eq!(calldata[..4], functions.aggregate3.short_signature()); + let mut tokens = functions + .aggregate3 + .decode_input(&calldata[4..]) + .expect("invalid multicall"); + assert_eq!(tokens.len(), 1); + let Token::Array(tokens) = tokens.pop().unwrap() else { + panic!("Unexpected input: {tokens:?}"); + }; + + let calls = tokens.into_iter().map(Multicall3Call::from_token); + let response = calls.map(|call| { + let call = call.unwrap(); + assert_eq!(call.target, STATE_TRANSITION_CONTRACT_ADDRESS); + let output = match &call.calldata[..4] { + selector if selector == bootloader_signature => { + vec![1u8; 32] + } + selector if selector == default_aa_signature => { + vec![2u8; 32] + } + selector if Some(selector) == evm_emulator_getter_signature => { + vec![3u8; 32] + } + selector if selector == functions.get_verifier_params.short_signature() => { + vec![4u8; 96] + } + selector if selector == functions.get_verifier.short_signature() => { + vec![5u8; 32] + } + selector if selector == functions.get_protocol_version.short_signature() => { H256::from_low_u64_be(ProtocolVersionId::default() as u64) .0 - .to_vec(), - ), - ]), - ]) + .to_vec() + } + _ => panic!("unexpected call: {call:?}"), + }; + Token::Tuple(vec![Token::Bool(true), Token::Bytes(output)]) + }); + Token::Array(response.collect()) } pub(crate) fn l1_batch_with_metadata(header: L1BatchHeader) -> L1BatchWithMetadata { @@ -74,6 +117,7 @@ pub(crate) fn default_l1_batch_metadata() -> L1BatchMetadata { zkporter_is_available: false, bootloader_code_hash: H256::default(), default_aa_code_hash: H256::default(), + evm_emulator_code_hash: None, protocol_version: Some(ProtocolVersionId::default()), }, aux_data_hash: H256::default(), @@ -656,22 +700,71 @@ async fn skipped_l1_batch_in_the_middle( Ok(()) } -#[test_casing(2, COMMITMENT_MODES)] +#[test_casing(2, [false, true])] #[test_log::test(tokio::test)] -async fn test_parse_multicall_data(commitment_mode: L1BatchCommitmentMode) { +async fn parsing_multicall_data(with_evm_emulator: bool) { let tester = EthSenderTester::new( ConnectionPool::::test_pool().await, vec![100; 100], false, true, - commitment_mode, + L1BatchCommitmentMode::Rollup, ) .await; - assert!(tester + let mut mock_response = vec![ + Token::Tuple(vec![Token::Bool(true), Token::Bytes(vec![1u8; 32])]), + Token::Tuple(vec![Token::Bool(true), Token::Bytes(vec![2u8; 32])]), + Token::Tuple(vec![Token::Bool(true), Token::Bytes(vec![4u8; 96])]), + Token::Tuple(vec![Token::Bool(true), Token::Bytes(vec![5u8; 32])]), + Token::Tuple(vec![ + Token::Bool(true), + Token::Bytes( + H256::from_low_u64_be(ProtocolVersionId::latest() as u64) + .0 + .to_vec(), + ), + ]), + ]; + if with_evm_emulator { + mock_response.insert( + 2, + Token::Tuple(vec![Token::Bool(true), Token::Bytes(vec![3u8; 32])]), + ); + } + let mock_response = Token::Array(mock_response); + + let parsed = tester .aggregator - .parse_multicall_data(mock_multicall_response()) - .is_ok()); + .parse_multicall_data(mock_response, with_evm_emulator) + .unwrap(); + assert_eq!( + parsed.base_system_contracts_hashes.bootloader, + H256::repeat_byte(1) + ); + assert_eq!( + parsed.base_system_contracts_hashes.default_aa, + H256::repeat_byte(2) + ); + let expected_evm_emulator_hash = with_evm_emulator.then(|| H256::repeat_byte(3)); + assert_eq!( + parsed.base_system_contracts_hashes.evm_emulator, + expected_evm_emulator_hash + ); + assert_eq!(parsed.verifier_address, Address::repeat_byte(5)); + assert_eq!(parsed.protocol_version_id, ProtocolVersionId::latest()); +} + +#[test_log::test(tokio::test)] +async fn parsing_multicall_data_errors() { + let tester = EthSenderTester::new( + ConnectionPool::::test_pool().await, + vec![100; 100], + false, + true, + L1BatchCommitmentMode::Rollup, + ) + .await; let original_wrong_form_data = vec![ // should contain 5 tuples @@ -722,7 +815,7 @@ async fn test_parse_multicall_data(commitment_mode: L1BatchCommitmentMode) { assert_matches!( tester .aggregator - .parse_multicall_data(wrong_data_instance.clone()), + .parse_multicall_data(wrong_data_instance.clone(), true), Err(EthSenderError::Parse(Error::InvalidOutputType(_))) ); } @@ -739,6 +832,17 @@ async fn get_multicall_data(commitment_mode: L1BatchCommitmentMode) { commitment_mode, ) .await; - let multicall_data = tester.aggregator.get_multicall_data().await; - assert!(multicall_data.is_ok()); + + let data = tester.aggregator.get_multicall_data().await.unwrap(); + assert_eq!( + data.base_system_contracts_hashes.bootloader, + H256::repeat_byte(1) + ); + assert_eq!( + data.base_system_contracts_hashes.default_aa, + H256::repeat_byte(2) + ); + assert_eq!(data.base_system_contracts_hashes.evm_emulator, None); + assert_eq!(data.verifier_address, Address::repeat_byte(5)); + assert_eq!(data.protocol_version_id, ProtocolVersionId::latest()); } diff --git a/core/node/eth_sender/src/zksync_functions.rs b/core/node/eth_sender/src/zksync_functions.rs index 8f13f0e63ae8..85508c71c03d 100644 --- a/core/node/eth_sender/src/zksync_functions.rs +++ b/core/node/eth_sender/src/zksync_functions.rs @@ -12,6 +12,7 @@ pub(super) struct ZkSyncFunctions { pub(super) get_l2_bootloader_bytecode_hash: Function, pub(super) get_l2_default_account_bytecode_hash: Function, pub(super) get_verifier: Function, + pub(super) get_evm_emulator_bytecode_hash: Option, pub(super) get_verifier_params: Function, pub(super) get_protocol_version: Function, @@ -59,6 +60,8 @@ impl Default for ZkSyncFunctions { get_function(&zksync_contract, "getL2BootloaderBytecodeHash"); let get_l2_default_account_bytecode_hash = get_function(&zksync_contract, "getL2DefaultAccountBytecodeHash"); + let get_evm_emulator_bytecode_hash = + get_optional_function(&zksync_contract, "getL2EvmSimulatorBytecodeHash"); let get_verifier = get_function(&zksync_contract, "getVerifier"); let get_verifier_params = get_function(&zksync_contract, "getVerifierParams"); let get_protocol_version = get_function(&zksync_contract, "getProtocolVersion"); @@ -74,6 +77,7 @@ impl Default for ZkSyncFunctions { post_shared_bridge_execute, get_l2_bootloader_bytecode_hash, get_l2_default_account_bytecode_hash, + get_evm_emulator_bytecode_hash, get_verifier, get_verifier_params, get_protocol_version, diff --git a/core/node/eth_watch/src/client.rs b/core/node/eth_watch/src/client.rs index 54376bae82eb..ac5fc86c6e9f 100644 --- a/core/node/eth_watch/src/client.rs +++ b/core/node/eth_watch/src/client.rs @@ -46,7 +46,8 @@ pub trait EthClient: 'static + fmt::Debug + Send + Sync { pub const RETRY_LIMIT: usize = 5; const TOO_MANY_RESULTS_INFURA: &str = "query returned more than"; const TOO_MANY_RESULTS_ALCHEMY: &str = "response size exceeded"; -const TOO_MANY_RESULTS_RETH: &str = "query exceeds max block range"; +const TOO_MANY_RESULTS_RETH: &str = "length limit exceeded"; +const TOO_BIG_RANGE_RETH: &str = "query exceeds max block range"; const TOO_MANY_RESULTS_CHAINSTACK: &str = "range limit exceeded"; /// Implementation of [`EthClient`] based on HTTP JSON-RPC (encapsulated via [`EthInterface`]). @@ -149,6 +150,7 @@ impl EthHttpQueryClient { if err_message.contains(TOO_MANY_RESULTS_INFURA) || err_message.contains(TOO_MANY_RESULTS_ALCHEMY) || err_message.contains(TOO_MANY_RESULTS_RETH) + || err_message.contains(TOO_BIG_RANGE_RETH) || err_message.contains(TOO_MANY_RESULTS_CHAINSTACK) { // get the numeric block ids diff --git a/core/node/eth_watch/src/tests.rs b/core/node/eth_watch/src/tests.rs index feb9eff35b5c..d9faf7b664e6 100644 --- a/core/node/eth_watch/src/tests.rs +++ b/core/node/eth_watch/src/tests.rs @@ -245,8 +245,11 @@ fn build_l1_tx(serial_id: u64, eth_block: u64) -> L1Tx { received_timestamp_ms: 0, }; // Convert to abi::Transaction and back, so that canonical_tx_hash is computed. - let tx = - Transaction::try_from(abi::Transaction::try_from(Transaction::from(tx)).unwrap()).unwrap(); + let tx = Transaction::from_abi( + abi::Transaction::try_from(Transaction::from(tx)).unwrap(), + false, + ) + .unwrap(); tx.try_into().unwrap() } @@ -272,10 +275,13 @@ fn build_upgrade_tx(id: ProtocolVersionId, eth_block: u64) -> ProtocolUpgradeTx received_timestamp_ms: 0, }; // Convert to abi::Transaction and back, so that canonical_tx_hash is computed. - Transaction::try_from(abi::Transaction::try_from(Transaction::from(tx)).unwrap()) - .unwrap() - .try_into() - .unwrap() + Transaction::from_abi( + abi::Transaction::try_from(Transaction::from(tx)).unwrap(), + false, + ) + .unwrap() + .try_into() + .unwrap() } async fn create_test_watcher( diff --git a/core/node/fee_model/src/l1_gas_price/gas_adjuster/mod.rs b/core/node/fee_model/src/l1_gas_price/gas_adjuster/mod.rs index e6842b92fdba..27cdc7f5d5e0 100644 --- a/core/node/fee_model/src/l1_gas_price/gas_adjuster/mod.rs +++ b/core/node/fee_model/src/l1_gas_price/gas_adjuster/mod.rs @@ -317,14 +317,14 @@ impl TxParamsProvider for GasAdjuster { // smooth out base_fee increases in general. // In other words, in order to pay less fees, we are ready to wait longer. // But the longer we wait, the more we are ready to pay. - fn get_base_fee(&self, time_in_mempool: u32) -> u64 { + fn get_base_fee(&self, time_in_mempool_in_l1_blocks: u32) -> u64 { let a = self.config.pricing_formula_parameter_a; let b = self.config.pricing_formula_parameter_b; // Currently we use an exponential formula. // The alternative is a linear one: - // `let scale_factor = a + b * time_in_mempool as f64;` - let scale_factor = a * b.powf(time_in_mempool as f64); + // `let scale_factor = a + b * time_in_mempool_in_l1_blocks as f64;` + let scale_factor = a * b.powf(time_in_mempool_in_l1_blocks as f64); let median = self.base_fee_statistics.median(); METRICS.median_base_fee_per_gas.set(median); let new_fee = median as f64 * scale_factor; diff --git a/core/node/fee_model/src/l1_gas_price/mod.rs b/core/node/fee_model/src/l1_gas_price/mod.rs index 2a5d63089ca1..e23bccf27ee1 100644 --- a/core/node/fee_model/src/l1_gas_price/mod.rs +++ b/core/node/fee_model/src/l1_gas_price/mod.rs @@ -16,7 +16,7 @@ mod main_node_fetcher; /// This trait, as a bound, should only be used in components that actually sign and send transactions. pub trait TxParamsProvider: fmt::Debug + 'static + Send + Sync { /// Returns the recommended `max_fee_per_gas` value (EIP1559). - fn get_base_fee(&self, time_in_mempool: u32) -> u64; + fn get_base_fee(&self, time_in_mempool_in_l1_blocks: u32) -> u64; /// Returns the recommended `max_priority_fee_per_gas` value (EIP1559). fn get_priority_fee(&self) -> u64; diff --git a/core/node/genesis/src/lib.rs b/core/node/genesis/src/lib.rs index 1f30d314bb06..5c17add2e987 100644 --- a/core/node/genesis/src/lib.rs +++ b/core/node/genesis/src/lib.rs @@ -104,6 +104,7 @@ impl GenesisParams { default_aa: config .default_aa_hash .ok_or(GenesisError::MalformedConfig("default_aa_hash"))?, + evm_emulator: config.evm_emulator_hash, }; if base_system_contracts_hashes != base_system_contracts.hashes() { return Err(GenesisError::BaseSystemContractsHashes(Box::new( @@ -124,15 +125,18 @@ impl GenesisParams { } pub fn load_genesis_params(config: GenesisConfig) -> Result { - let base_system_contracts = BaseSystemContracts::load_from_disk(); - let system_contracts = get_system_smart_contracts(); + let mut base_system_contracts = BaseSystemContracts::load_from_disk(); + if config.evm_emulator_hash.is_some() { + base_system_contracts = base_system_contracts.with_latest_evm_emulator(); + } + let system_contracts = get_system_smart_contracts(config.evm_emulator_hash.is_some()); Self::from_genesis_config(config, base_system_contracts, system_contracts) } pub fn mock() -> Self { Self { base_system_contracts: BaseSystemContracts::load_from_disk(), - system_contracts: get_system_smart_contracts(), + system_contracts: get_system_smart_contracts(false), config: mock_genesis_config(), } } @@ -172,6 +176,7 @@ pub fn mock_genesis_config() -> GenesisConfig { genesis_commitment: Some(H256::default()), bootloader_hash: Some(base_system_contracts_hashes.bootloader), default_aa_hash: Some(base_system_contracts_hashes.default_aa), + evm_emulator_hash: base_system_contracts_hashes.evm_emulator, l1_chain_id: L1ChainId(9), sl_chain_id: None, l2_chain_id: L2ChainId::default(), @@ -235,6 +240,7 @@ pub async fn insert_genesis_batch( .config .default_aa_hash .ok_or(GenesisError::MalformedConfig("default_aa_hash"))?, + evm_emulator: genesis_params.config.evm_emulator_hash, }; let commitment_input = CommitmentInput::for_genesis_batch( genesis_root_hash, diff --git a/core/node/genesis/src/utils.rs b/core/node/genesis/src/utils.rs index a6c9513dbde8..6042513537cd 100644 --- a/core/node/genesis/src/utils.rs +++ b/core/node/genesis/src/utils.rs @@ -130,7 +130,8 @@ pub(super) async fn insert_base_system_contracts_to_factory_deps( contracts: &BaseSystemContracts, ) -> Result<(), GenesisError> { let factory_deps = [&contracts.bootloader, &contracts.default_aa] - .iter() + .into_iter() + .chain(contracts.evm_emulator.as_ref()) .map(|c| (c.hash, be_words_to_bytes(&c.code))) .collect(); diff --git a/core/node/node_framework/src/implementations/layers/web3_api/server/bridge_addresses.rs b/core/node/node_framework/src/implementations/layers/web3_api/server/bridge_addresses.rs new file mode 100644 index 000000000000..4ba8098c8399 --- /dev/null +++ b/core/node/node_framework/src/implementations/layers/web3_api/server/bridge_addresses.rs @@ -0,0 +1,48 @@ +use std::time::Duration; + +use zksync_node_api_server::web3::state::BridgeAddressesHandle; +use zksync_web3_decl::{ + client::{DynClient, L2}, + namespaces::ZksNamespaceClient, +}; + +use crate::{StopReceiver, Task, TaskId}; + +#[derive(Debug)] +pub struct BridgeAddressesUpdaterTask { + pub bridge_address_updater: BridgeAddressesHandle, + pub main_node_client: Box>, + pub update_interval: Option, +} + +#[async_trait::async_trait] +impl Task for BridgeAddressesUpdaterTask { + fn id(&self) -> TaskId { + "bridge_addresses_updater_task".into() + } + + async fn run(self: Box, mut stop_receiver: StopReceiver) -> anyhow::Result<()> { + const DEFAULT_INTERVAL: Duration = Duration::from_secs(30); + + let update_interval = self.update_interval.unwrap_or(DEFAULT_INTERVAL); + while !*stop_receiver.0.borrow_and_update() { + match self.main_node_client.get_bridge_contracts().await { + Ok(bridge_addresses) => { + self.bridge_address_updater.update(bridge_addresses).await; + } + Err(err) => { + tracing::error!("Failed to query `get_bridge_contracts`, error: {err:?}"); + } + } + + if tokio::time::timeout(update_interval, stop_receiver.0.changed()) + .await + .is_ok() + { + break; + } + } + + Ok(()) + } +} diff --git a/core/node/node_framework/src/implementations/layers/web3_api/server.rs b/core/node/node_framework/src/implementations/layers/web3_api/server/mod.rs similarity index 81% rename from core/node/node_framework/src/implementations/layers/web3_api/server.rs rename to core/node/node_framework/src/implementations/layers/web3_api/server/mod.rs index 0a39ae747c71..390d321647cf 100644 --- a/core/node/node_framework/src/implementations/layers/web3_api/server.rs +++ b/core/node/node_framework/src/implementations/layers/web3_api/server/mod.rs @@ -3,15 +3,24 @@ use std::{num::NonZeroU32, time::Duration}; use tokio::{sync::oneshot, task::JoinHandle}; use zksync_circuit_breaker::replication_lag::ReplicationLagChecker; use zksync_config::configs::api::MaxResponseSize; -use zksync_node_api_server::web3::{state::InternalApiConfig, ApiBuilder, ApiServer, Namespace}; +use zksync_node_api_server::web3::{ + state::{BridgeAddressesHandle, InternalApiConfig, SealedL2BlockNumber}, + ApiBuilder, ApiServer, Namespace, +}; use crate::{ - implementations::resources::{ - circuit_breakers::CircuitBreakersResource, - healthcheck::AppHealthCheckResource, - pools::{PoolResource, ReplicaPool}, - sync_state::SyncStateResource, - web3_api::{MempoolCacheResource, TreeApiClientResource, TxSenderResource}, + implementations::{ + layers::web3_api::server::{ + bridge_addresses::BridgeAddressesUpdaterTask, sealed_l2_block::SealedL2BlockUpdaterTask, + }, + resources::{ + circuit_breakers::CircuitBreakersResource, + healthcheck::AppHealthCheckResource, + main_node_client::MainNodeClientResource, + pools::{PoolResource, ReplicaPool}, + sync_state::SyncStateResource, + web3_api::{MempoolCacheResource, TreeApiClientResource, TxSenderResource}, + }, }, service::StopReceiver, task::{Task, TaskId}, @@ -19,6 +28,9 @@ use crate::{ FromContext, IntoContext, }; +mod bridge_addresses; +mod sealed_l2_block; + /// Set of optional variables that can be altered to modify the behavior of API builder. #[derive(Debug, Default)] pub struct Web3ServerOptionalConfig { @@ -33,6 +45,8 @@ pub struct Web3ServerOptionalConfig { pub replication_lag_limit: Option, // Used by the external node. pub pruning_info_refresh_interval: Option, + // Used by the external node. + pub bridge_addresses_refresh_interval: Option, pub polling_interval: Option, } @@ -61,6 +75,10 @@ impl Web3ServerOptionalConfig { if let Some(polling_interval) = self.polling_interval { api_builder = api_builder.with_polling_interval(polling_interval); } + if let Some(pruning_info_refresh_interval) = self.pruning_info_refresh_interval { + api_builder = + api_builder.with_pruning_info_refresh_interval(pruning_info_refresh_interval); + } api_builder = api_builder.with_extended_tracing(self.with_extended_tracing); api_builder } @@ -109,6 +127,7 @@ pub struct Input { pub circuit_breakers: CircuitBreakersResource, #[context(default)] pub app_health: AppHealthCheckResource, + pub main_node_client: Option, } #[derive(Debug, IntoContext)] @@ -118,6 +137,10 @@ pub struct Output { pub web3_api_task: Web3ApiTask, #[context(task)] pub garbage_collector_task: ApiTaskGarbageCollector, + #[context(task)] + pub sealed_l2_block_updater_task: SealedL2BlockUpdaterTask, + #[context(task)] + pub bridge_addresses_updater_task: Option, } impl Web3ServerLayer { @@ -163,20 +186,39 @@ impl WiringLayer for Web3ServerLayer { async fn wire(self, input: Self::Input) -> Result { // Get required resources. let replica_resource_pool = input.replica_pool; - let updaters_pool = replica_resource_pool.get_custom(2).await?; + let updaters_pool = replica_resource_pool.get_custom(1).await?; let replica_pool = replica_resource_pool.get().await?; let TxSenderResource(tx_sender) = input.tx_sender; let MempoolCacheResource(mempool_cache) = input.mempool_cache; let sync_state = input.sync_state.map(|state| state.0); let tree_api_client = input.tree_api_client.map(|client| client.0); + let sealed_l2_block_handle = SealedL2BlockNumber::default(); + let bridge_addresses_handle = + BridgeAddressesHandle::new(self.internal_api_config.bridge_addresses.clone()); + + let sealed_l2_block_updater_task = SealedL2BlockUpdaterTask { + number_updater: sealed_l2_block_handle.clone(), + pool: updaters_pool, + }; + // Bridge addresses updater task must be started for ENs and only for ENs. + let bridge_addresses_updater_task = + input + .main_node_client + .map(|main_node_client| BridgeAddressesUpdaterTask { + bridge_address_updater: bridge_addresses_handle.clone(), + main_node_client: main_node_client.0, + update_interval: self.optional_config.bridge_addresses_refresh_interval, + }); + // Build server. let mut api_builder = ApiBuilder::jsonrpsee_backend(self.internal_api_config, replica_pool.clone()) - .with_updaters_pool(updaters_pool) .with_tx_sender(tx_sender) .with_mempool_cache(mempool_cache) - .with_extended_tracing(self.optional_config.with_extended_tracing); + .with_extended_tracing(self.optional_config.with_extended_tracing) + .with_sealed_l2_block_handle(sealed_l2_block_handle) + .with_bridge_addresses_handle(bridge_addresses_handle); if let Some(client) = tree_api_client { api_builder = api_builder.with_tree_api(client); } @@ -191,14 +233,9 @@ impl WiringLayer for Web3ServerLayer { if let Some(sync_state) = sync_state { api_builder = api_builder.with_sync_state(sync_state); } - if let Some(pruning_info_refresh_interval) = - self.optional_config.pruning_info_refresh_interval - { - api_builder = - api_builder.with_pruning_info_refresh_interval(pruning_info_refresh_interval); - } let replication_lag_limit = self.optional_config.replication_lag_limit; api_builder = self.optional_config.apply(api_builder); + let server = api_builder.build()?; // Insert healthcheck. @@ -230,6 +267,8 @@ impl WiringLayer for Web3ServerLayer { Ok(Output { web3_api_task, garbage_collector_task, + sealed_l2_block_updater_task, + bridge_addresses_updater_task, }) } } diff --git a/core/node/node_framework/src/implementations/layers/web3_api/server/sealed_l2_block.rs b/core/node/node_framework/src/implementations/layers/web3_api/server/sealed_l2_block.rs new file mode 100644 index 000000000000..02552e212cd6 --- /dev/null +++ b/core/node/node_framework/src/implementations/layers/web3_api/server/sealed_l2_block.rs @@ -0,0 +1,50 @@ +use std::time::Duration; + +use zksync_dal::{Core, CoreDal}; +use zksync_db_connection::connection_pool::ConnectionPool; +use zksync_node_api_server::web3::state::SealedL2BlockNumber; + +use crate::{StopReceiver, Task, TaskId}; + +#[derive(Debug)] +pub struct SealedL2BlockUpdaterTask { + pub number_updater: SealedL2BlockNumber, + pub pool: ConnectionPool, +} + +#[async_trait::async_trait] +impl Task for SealedL2BlockUpdaterTask { + fn id(&self) -> TaskId { + "api_sealed_l2_block_updater_task".into() + } + + async fn run(self: Box, mut stop_receiver: StopReceiver) -> anyhow::Result<()> { + // Chosen to be significantly smaller than the interval between L2 blocks, but larger than + // the latency of getting the latest sealed L2 block number from Postgres. If the API server + // processes enough requests, information about the latest sealed L2 block will be updated + // by reporting block difference metrics, so the actual update lag would be much smaller than this value. + const UPDATE_INTERVAL: Duration = Duration::from_millis(25); + + while !*stop_receiver.0.borrow_and_update() { + let mut connection = self.pool.connection_tagged("api").await.unwrap(); + let Some(last_sealed_l2_block) = + connection.blocks_dal().get_sealed_l2_block_number().await? + else { + tokio::time::sleep(UPDATE_INTERVAL).await; + continue; + }; + drop(connection); + + self.number_updater.update(last_sealed_l2_block); + + if tokio::time::timeout(UPDATE_INTERVAL, stop_receiver.0.changed()) + .await + .is_ok() + { + break; + } + } + + Ok(()) + } +} diff --git a/core/node/node_sync/src/external_io.rs b/core/node/node_sync/src/external_io.rs index 6075ff048bfd..7687595740a8 100644 --- a/core/node/node_sync/src/external_io.rs +++ b/core/node/node_sync/src/external_io.rs @@ -345,6 +345,7 @@ impl StateKeeperIO for ExternalIO { let default_account_code_hash = protocol_version .default_account_code_hash() .context("Missing default account code hash")?; + let evm_emulator_code_hash = protocol_version.evm_emulator_code_hash(); let l2_system_upgrade_tx_hash = protocol_version.l2_system_upgrade_tx_hash(); self.pool .connection_tagged("sync_layer") @@ -362,6 +363,7 @@ impl StateKeeperIO for ExternalIO { BaseSystemContractsHashes { bootloader: bootloader_code_hash, default_aa: default_account_code_hash, + evm_emulator: evm_emulator_code_hash, }, l2_system_upgrade_tx_hash, ) @@ -375,9 +377,22 @@ impl StateKeeperIO for ExternalIO { .get_base_system_contract(default_account_code_hash, cursor.next_l2_block) .await .with_context(|| format!("cannot fetch default AA code for {protocol_version:?}"))?; + let evm_emulator = if let Some(hash) = evm_emulator_code_hash { + Some( + self.get_base_system_contract(hash, cursor.next_l2_block) + .await + .with_context(|| { + format!("cannot fetch EVM emulator code for {protocol_version:?}") + })?, + ) + } else { + None + }; + Ok(BaseSystemContracts { bootloader, default_aa, + evm_emulator, }) } diff --git a/core/node/node_sync/src/genesis.rs b/core/node/node_sync/src/genesis.rs index ccc26b417e98..0ff8d0d448c0 100644 --- a/core/node/node_sync/src/genesis.rs +++ b/core/node/node_sync/src/genesis.rs @@ -38,6 +38,7 @@ async fn create_genesis_params( let base_system_contracts_hashes = BaseSystemContractsHashes { bootloader: config.bootloader_hash.context("Genesis is not finished")?, default_aa: config.default_aa_hash.context("Genesis is not finished")?, + evm_emulator: config.evm_emulator_hash, }; if zksync_chain_id != config.l2_chain_id { @@ -47,10 +48,11 @@ async fn create_genesis_params( // Load the list of addresses that are known to contain system contracts at any point in time. // Not every of these addresses is guaranteed to be present in the genesis state, but we'll iterate through // them and try to fetch the contract bytecode for each of them. - let system_contract_addresses: Vec<_> = get_system_smart_contracts() - .into_iter() - .map(|contract| *contract.account_id.address()) - .collect(); + let system_contract_addresses: Vec<_> = + get_system_smart_contracts(config.evm_emulator_hash.is_some()) + .into_iter() + .map(|contract| *contract.account_id.address()) + .collect(); // These have to be *initial* base contract hashes of main node // (those that were used during genesis), not necessarily the current ones. @@ -103,6 +105,18 @@ async fn fetch_base_system_contracts( .fetch_system_contract_by_hash(contract_hashes.default_aa) .await? .context("default AA bytecode is missing on main node")?; + let evm_emulator = if let Some(hash) = contract_hashes.evm_emulator { + let bytes = client + .fetch_system_contract_by_hash(hash) + .await? + .context("EVM Simulator bytecode is missing on main node")?; + Some(SystemContractCode { + code: zksync_utils::bytes_to_be_words(bytes), + hash, + }) + } else { + None + }; Ok(BaseSystemContracts { bootloader: SystemContractCode { code: zksync_utils::bytes_to_be_words(bootloader_bytecode), @@ -112,5 +126,6 @@ async fn fetch_base_system_contracts( code: zksync_utils::bytes_to_be_words(default_aa_bytecode), hash: contract_hashes.default_aa, }, + evm_emulator, }) } diff --git a/core/node/node_sync/src/tests.rs b/core/node/node_sync/src/tests.rs index d9a98c2bce36..3f5791cdf24c 100644 --- a/core/node/node_sync/src/tests.rs +++ b/core/node/node_sync/src/tests.rs @@ -304,6 +304,7 @@ async fn external_io_works_without_local_protocol_version(snapshot_recovery: boo timestamp: snapshot.l2_block_timestamp + 1, bootloader_code_hash: Some(H256::repeat_byte(1)), default_account_code_hash: Some(H256::repeat_byte(1)), + evm_emulator_code_hash: Some(H256::repeat_byte(1)), ..api::ProtocolVersion::default() }; client.insert_protocol_version(next_protocol_version.clone()); @@ -345,6 +346,13 @@ async fn external_io_works_without_local_protocol_version(snapshot_recovery: boo next_protocol_version.default_account_code_hash.unwrap() ); + assert_eq!( + persisted_protocol_version + .base_system_contracts_hashes + .evm_emulator, + next_protocol_version.evm_emulator_code_hash + ); + let l2_block = storage .blocks_dal() .get_l2_block_header(snapshot.l2_block_number + 1) diff --git a/core/node/proof_data_handler/src/tests.rs b/core/node/proof_data_handler/src/tests.rs index 86cc53234486..8220aef5da0b 100644 --- a/core/node/proof_data_handler/src/tests.rs +++ b/core/node/proof_data_handler/src/tests.rs @@ -62,6 +62,7 @@ async fn request_tee_proof_inputs() { code: vec![U256([1; 4])], hash: H256([1; 32]), }, + evm_emulator: None, }, bootloader_gas_limit: 0, execution_mode: TxExecutionMode::VerifyExecute, diff --git a/core/node/state_keeper/src/executor/mod.rs b/core/node/state_keeper/src/executor/mod.rs index 2fa5c3b9c128..903dae2f1cad 100644 --- a/core/node/state_keeper/src/executor/mod.rs +++ b/core/node/state_keeper/src/executor/mod.rs @@ -40,7 +40,7 @@ impl TxExecutionResult { _ => Self::Success { tx_metrics: Box::new(ExecutionMetricsForCriteria::new(Some(tx), &res.tx_result)), gas_remaining: res.tx_result.statistics.gas_remaining, - tx_result: res.tx_result, + tx_result: res.tx_result.clone(), compressed_bytecodes: res.compressed_bytecodes, call_tracer_result: res.call_traces, }, diff --git a/core/node/state_keeper/src/executor/tests/tester.rs b/core/node/state_keeper/src/executor/tests/tester.rs index 7a1871dbfea9..79072f23aed9 100644 --- a/core/node/state_keeper/src/executor/tests/tester.rs +++ b/core/node/state_keeper/src/executor/tests/tester.rs @@ -259,7 +259,7 @@ impl Tester { patch: 0.into(), }, &BASE_SYSTEM_CONTRACTS, - &get_system_smart_contracts(), + &get_system_smart_contracts(false), Default::default(), ) .await diff --git a/core/node/state_keeper/src/io/persistence.rs b/core/node/state_keeper/src/io/persistence.rs index 24b1ffca631c..97340d6496ab 100644 --- a/core/node/state_keeper/src/io/persistence.rs +++ b/core/node/state_keeper/src/io/persistence.rs @@ -347,7 +347,7 @@ impl StateKeeperOutputHandler for TreeWritesPersistence { #[cfg(test)] mod tests { - use std::collections::HashSet; + use std::collections::{HashMap, HashSet}; use assert_matches::assert_matches; use futures::FutureExt; @@ -462,6 +462,7 @@ mod tests { tx, tx_result, vec![], + HashMap::new(), BlockGasCount::default(), VmExecutionMetrics::default(), vec![], diff --git a/core/node/state_keeper/src/io/tests/mod.rs b/core/node/state_keeper/src/io/tests/mod.rs index 7ea01e6af1e8..e2a90f30691b 100644 --- a/core/node/state_keeper/src/io/tests/mod.rs +++ b/core/node/state_keeper/src/io/tests/mod.rs @@ -1,4 +1,4 @@ -use std::time::Duration; +use std::{collections::HashMap, time::Duration}; use test_casing::test_casing; use zksync_contracts::BaseSystemContractsHashes; @@ -249,6 +249,7 @@ async fn processing_storage_logs_when_sealing_l2_block() { BlockGasCount::default(), VmExecutionMetrics::default(), vec![], + HashMap::new(), vec![], ); @@ -267,6 +268,7 @@ async fn processing_storage_logs_when_sealing_l2_block() { BlockGasCount::default(), VmExecutionMetrics::default(), vec![], + HashMap::new(), vec![], ); @@ -356,6 +358,7 @@ async fn processing_events_when_sealing_l2_block() { BlockGasCount::default(), VmExecutionMetrics::default(), vec![], + HashMap::new(), vec![], ); } @@ -457,6 +460,7 @@ async fn l2_block_processing_after_snapshot_recovery(commitment_mode: L1BatchCom tx.into(), create_execution_result([]), vec![], + HashMap::new(), BlockGasCount::default(), VmExecutionMetrics::default(), vec![], diff --git a/core/node/state_keeper/src/io/tests/tester.rs b/core/node/state_keeper/src/io/tests/tester.rs index 2dc45a5eaaa0..02170283e94b 100644 --- a/core/node/state_keeper/src/io/tests/tester.rs +++ b/core/node/state_keeper/src/io/tests/tester.rs @@ -156,7 +156,7 @@ impl Tester { patch: 0.into(), }, &self.base_system_contracts, - &get_system_smart_contracts(), + &get_system_smart_contracts(false), L1VerifierConfig::default(), ) .await diff --git a/core/node/state_keeper/src/keeper.rs b/core/node/state_keeper/src/keeper.rs index d36ceec7d70c..22f24573070b 100644 --- a/core/node/state_keeper/src/keeper.rs +++ b/core/node/state_keeper/src/keeper.rs @@ -498,8 +498,9 @@ impl ZkSyncStateKeeper { updates_manager.extend_from_executed_transaction( tx, - *tx_result, + *tx_result.clone(), compressed_bytecodes, + tx_result.new_known_factory_deps.unwrap_or_default(), tx_l1_gas_this_tx, tx_execution_metrics, call_tracer_result, @@ -624,8 +625,9 @@ impl ZkSyncStateKeeper { } = *tx_metrics; updates_manager.extend_from_executed_transaction( tx, - *tx_result, + *tx_result.clone(), compressed_bytecodes, + tx_result.new_known_factory_deps.unwrap_or_default(), tx_l1_gas_this_tx, tx_execution_metrics, call_tracer_result, @@ -704,8 +706,9 @@ impl ZkSyncStateKeeper { } = *tx_metrics; updates_manager.extend_from_executed_transaction( tx, - *tx_result, + *tx_result.clone(), compressed_bytecodes, + tx_result.new_known_factory_deps.unwrap_or_default(), tx_l1_gas_this_tx, tx_execution_metrics, vec![], diff --git a/core/node/state_keeper/src/seal_criteria/mod.rs b/core/node/state_keeper/src/seal_criteria/mod.rs index e3fe849e8025..962cc807318b 100644 --- a/core/node/state_keeper/src/seal_criteria/mod.rs +++ b/core/node/state_keeper/src/seal_criteria/mod.rs @@ -277,6 +277,8 @@ impl L2BlockMaxPayloadSizeSealer { #[cfg(test)] mod tests { + use std::collections::HashMap; + use zksync_utils::time::seconds_since_epoch; use super::*; @@ -287,6 +289,7 @@ mod tests { tx, create_execution_result([]), vec![], + HashMap::new(), BlockGasCount::default(), VmExecutionMetrics::default(), vec![], diff --git a/core/node/state_keeper/src/testonly/mod.rs b/core/node/state_keeper/src/testonly/mod.rs index edcf3ccc4f5c..d1e82c44bd6f 100644 --- a/core/node/state_keeper/src/testonly/mod.rs +++ b/core/node/state_keeper/src/testonly/mod.rs @@ -33,6 +33,7 @@ pub(crate) fn successful_exec() -> BatchTransactionExecutionResult { logs: Default::default(), statistics: Default::default(), refunds: Default::default(), + new_known_factory_deps: None, }), compressed_bytecodes: vec![], call_traces: vec![], diff --git a/core/node/state_keeper/src/testonly/test_batch_executor.rs b/core/node/state_keeper/src/testonly/test_batch_executor.rs index ffca8dff8643..cb282f3b7d6d 100644 --- a/core/node/state_keeper/src/testonly/test_batch_executor.rs +++ b/core/node/state_keeper/src/testonly/test_batch_executor.rs @@ -264,6 +264,7 @@ pub(crate) fn successful_exec_with_log() -> BatchTransactionExecutionResult { }, statistics: Default::default(), refunds: Default::default(), + new_known_factory_deps: None, }), compressed_bytecodes: vec![], call_traces: vec![], @@ -278,6 +279,7 @@ pub(crate) fn rejected_exec(reason: Halt) -> BatchTransactionExecutionResult { logs: Default::default(), statistics: Default::default(), refunds: Default::default(), + new_known_factory_deps: None, }), compressed_bytecodes: vec![], call_traces: vec![], diff --git a/core/node/state_keeper/src/tests/mod.rs b/core/node/state_keeper/src/tests/mod.rs index 80de0f0beff9..9e971541b204 100644 --- a/core/node/state_keeper/src/tests/mod.rs +++ b/core/node/state_keeper/src/tests/mod.rs @@ -138,6 +138,7 @@ pub(super) fn create_execution_result( circuit_statistic: Default::default(), }, refunds: Refunds::default(), + new_known_factory_deps: None, } } diff --git a/core/node/state_keeper/src/updates/l1_batch_updates.rs b/core/node/state_keeper/src/updates/l1_batch_updates.rs index aa2e22cac483..2979ebbd8c26 100644 --- a/core/node/state_keeper/src/updates/l1_batch_updates.rs +++ b/core/node/state_keeper/src/updates/l1_batch_updates.rs @@ -49,6 +49,8 @@ impl L1BatchUpdates { #[cfg(test)] mod tests { + use std::collections::HashMap; + use zksync_multivm::vm_latest::TransactionVmExt; use zksync_types::{L2BlockNumber, ProtocolVersionId, H256}; @@ -76,6 +78,7 @@ mod tests { BlockGasCount::default(), VmExecutionMetrics::default(), vec![], + HashMap::new(), vec![], ); diff --git a/core/node/state_keeper/src/updates/l2_block_updates.rs b/core/node/state_keeper/src/updates/l2_block_updates.rs index d8673088dc32..27995b384abe 100644 --- a/core/node/state_keeper/src/updates/l2_block_updates.rs +++ b/core/node/state_keeper/src/updates/l2_block_updates.rs @@ -1,17 +1,14 @@ use std::collections::HashMap; -use once_cell::sync::Lazy; use zksync_multivm::{ interface::{ Call, CompressedBytecodeInfo, ExecutionResult, L2BlockEnv, TransactionExecutionResult, TxExecutionStatus, VmEvent, VmExecutionMetrics, VmExecutionResultAndLogs, }, - vm_latest::TransactionVmExt, + vm_latest::{utils::extract_bytecodes_marked_as_known, TransactionVmExt}, }; -use zksync_system_constants::KNOWN_CODES_STORAGE_ADDRESS; use zksync_types::{ block::{BlockGasCount, L2BlockHasher}, - ethabi, l2_to_l1_log::{SystemL2ToL1Log, UserL2ToL1Log}, L2BlockNumber, ProtocolVersionId, StorageLogWithPreviousValue, Transaction, H256, }; @@ -19,27 +16,6 @@ use zksync_utils::bytecode::hash_bytecode; use crate::metrics::KEEPER_METRICS; -/// Extracts all bytecodes marked as known on the system contracts. -fn extract_bytecodes_marked_as_known(all_generated_events: &[VmEvent]) -> Vec { - static PUBLISHED_BYTECODE_SIGNATURE: Lazy = Lazy::new(|| { - ethabi::long_signature( - "MarkedAsKnown", - &[ethabi::ParamType::FixedBytes(32), ethabi::ParamType::Bool], - ) - }); - - all_generated_events - .iter() - .filter(|event| { - // Filter events from the deployer contract that match the expected signature. - event.address == KNOWN_CODES_STORAGE_ADDRESS - && event.indexed_topics.len() == 3 - && event.indexed_topics[0] == *PUBLISHED_BYTECODE_SIGNATURE - }) - .map(|event| event.indexed_topics[1]) - .collect() -} - #[derive(Debug, Clone, PartialEq)] pub struct L2BlockUpdates { pub executed_transactions: Vec, @@ -104,6 +80,7 @@ impl L2BlockUpdates { self.block_execution_metrics += execution_metrics; } + #[allow(clippy::too_many_arguments)] pub(crate) fn extend_from_executed_transaction( &mut self, tx: Transaction, @@ -111,6 +88,7 @@ impl L2BlockUpdates { tx_l1_gas_this_tx: BlockGasCount, execution_metrics: VmExecutionMetrics, compressed_bytecodes: Vec, + new_known_factory_deps: HashMap>, call_traces: Vec, ) { let saved_factory_deps = @@ -145,12 +123,15 @@ impl L2BlockUpdates { // Get transaction factory deps let factory_deps = &tx.execute.factory_deps; - let tx_factory_deps: HashMap<_, _> = factory_deps + let mut tx_factory_deps: HashMap<_, _> = factory_deps .iter() - .map(|bytecode| (hash_bytecode(bytecode), bytecode)) + .map(|bytecode| (hash_bytecode(bytecode), bytecode.clone())) .collect(); + // Ensure that *dynamic* factory deps (ones that may be created when executing EVM contracts) + // are added into the lookup map as well. + tx_factory_deps.extend(new_known_factory_deps); - // Save all bytecodes that were marked as known on the bootloader + // Save all bytecodes that were marked as known in the bootloader let known_bytecodes = saved_factory_deps.into_iter().map(|bytecode_hash| { let bytecode = tx_factory_deps.get(&bytecode_hash).unwrap_or_else(|| { panic!( @@ -230,6 +211,7 @@ mod tests { BlockGasCount::default(), VmExecutionMetrics::default(), vec![], + HashMap::new(), vec![], ); diff --git a/core/node/state_keeper/src/updates/mod.rs b/core/node/state_keeper/src/updates/mod.rs index 2fad56a99299..0cebc5d8b471 100644 --- a/core/node/state_keeper/src/updates/mod.rs +++ b/core/node/state_keeper/src/updates/mod.rs @@ -1,3 +1,5 @@ +use std::collections::HashMap; + use zksync_contracts::BaseSystemContractsHashes; use zksync_multivm::{ interface::{ @@ -8,7 +10,7 @@ use zksync_multivm::{ }; use zksync_types::{ block::BlockGasCount, fee_model::BatchFeeInput, Address, L1BatchNumber, L2BlockNumber, - ProtocolVersionId, Transaction, + ProtocolVersionId, Transaction, H256, }; pub(crate) use self::{l1_batch_updates::L1BatchUpdates, l2_block_updates::L2BlockUpdates}; @@ -104,11 +106,13 @@ impl UpdatesManager { self.protocol_version } + #[allow(clippy::too_many_arguments)] pub fn extend_from_executed_transaction( &mut self, tx: Transaction, tx_execution_result: VmExecutionResultAndLogs, compressed_bytecodes: Vec, + new_known_factory_deps: HashMap>, tx_l1_gas_this_tx: BlockGasCount, execution_metrics: VmExecutionMetrics, call_traces: Vec, @@ -124,6 +128,7 @@ impl UpdatesManager { tx_l1_gas_this_tx, execution_metrics, compressed_bytecodes, + new_known_factory_deps, call_traces, ); latency.observe(); @@ -233,6 +238,7 @@ mod tests { tx, create_execution_result([]), vec![], + HashMap::new(), new_block_gas_count(), VmExecutionMetrics::default(), vec![], diff --git a/core/node/test_utils/src/lib.rs b/core/node/test_utils/src/lib.rs index b9984b782111..9eb53994eee5 100644 --- a/core/node/test_utils/src/lib.rs +++ b/core/node/test_utils/src/lib.rs @@ -56,6 +56,7 @@ pub fn create_l1_batch(number: u32) -> L1BatchHeader { BaseSystemContractsHashes { bootloader: H256::repeat_byte(1), default_aa: H256::repeat_byte(42), + evm_emulator: None, }, ProtocolVersionId::latest(), ); @@ -88,6 +89,7 @@ pub fn create_l1_batch_metadata(number: u32) -> L1BatchMetadata { zkporter_is_available: ZKPORTER_IS_AVAILABLE, bootloader_code_hash: BaseSystemContractsHashes::default().bootloader, default_aa_code_hash: BaseSystemContractsHashes::default().default_aa, + evm_emulator_code_hash: BaseSystemContractsHashes::default().evm_emulator, protocol_version: Some(ProtocolVersionId::latest()), }, aux_data_hash: H256::zero(), @@ -217,6 +219,7 @@ impl Snapshot { l2_block, factory_deps: [&contracts.bootloader, &contracts.default_aa] .into_iter() + .chain(contracts.evm_emulator.as_ref()) .map(|c| (c.hash, zksync_utils::be_words_to_bytes(&c.code))) .collect(), storage_logs, diff --git a/core/node/vm_runner/src/impls/bwip.rs b/core/node/vm_runner/src/impls/bwip.rs index 6c2933635b4c..dc94752d9886 100644 --- a/core/node/vm_runner/src/impls/bwip.rs +++ b/core/node/vm_runner/src/impls/bwip.rs @@ -209,6 +209,7 @@ async fn get_updates_manager_witness_input_data( ) -> anyhow::Result { let initial_heap_content = output.batch.final_bootloader_memory.clone().unwrap(); // might be just empty let default_aa = system_env.base_system_smart_contracts.hashes().default_aa; + let evm_emulator = system_env.base_system_smart_contracts.hashes().evm_emulator; let bootloader = system_env.base_system_smart_contracts.hashes().bootloader; let bootloader_code_bytes = connection .factory_deps_dal() @@ -240,6 +241,22 @@ async fn get_updates_manager_witness_input_data( used_bytecodes.insert(account_code_hash, account_bytecode); } + let evm_emulator_code_hash = if let Some(evm_emulator) = evm_emulator { + let evm_emulator_code_hash = h256_to_u256(evm_emulator); + if used_contract_hashes.contains(&evm_emulator_code_hash) { + let evm_emulator_bytecode = connection + .factory_deps_dal() + .get_sealed_factory_dep(evm_emulator) + .await? + .ok_or_else(|| anyhow!("EVM Simulator bytecode should exist"))?; + let evm_emulator_bytecode = bytes_to_chunks(&evm_emulator_bytecode); + used_bytecodes.insert(evm_emulator_code_hash, evm_emulator_bytecode); + } + Some(evm_emulator_code_hash) + } else { + None + }; + let storage_refunds = output.batch.final_execution_state.storage_refunds.clone(); let pubdata_costs = output.batch.final_execution_state.pubdata_costs.clone(); let witness_block_state = WitnessStorageState { @@ -254,6 +271,7 @@ async fn get_updates_manager_witness_input_data( protocol_version: system_env.version, bootloader_code, default_account_code_hash: account_code_hash, + evm_emulator_code_hash, storage_refunds, pubdata_costs, witness_block_state, diff --git a/core/node/vm_runner/src/tests/mod.rs b/core/node/vm_runner/src/tests/mod.rs index 53bef106a8f4..575fd59be042 100644 --- a/core/node/vm_runner/src/tests/mod.rs +++ b/core/node/vm_runner/src/tests/mod.rs @@ -322,6 +322,7 @@ async fn store_l1_batches( .iter() .map(|contract| hash_bytecode(&contract.bytecode)) .chain([genesis_params.base_system_contracts().hashes().default_aa]) + .chain(genesis_params.base_system_contracts().hashes().evm_emulator) .map(h256_to_u256) .collect(); diff --git a/core/node/vm_runner/src/tests/output_handler.rs b/core/node/vm_runner/src/tests/output_handler.rs index 1bf30effdbe5..f57814ea4491 100644 --- a/core/node/vm_runner/src/tests/output_handler.rs +++ b/core/node/vm_runner/src/tests/output_handler.rs @@ -66,6 +66,7 @@ impl OutputHandlerTester { code: vec![], hash: Default::default(), }, + evm_emulator: None, }, bootloader_gas_limit: 0, execution_mode: TxExecutionMode::VerifyExecute, diff --git a/core/tests/test_account/src/lib.rs b/core/tests/test_account/src/lib.rs index d0c97abab729..39a366945263 100644 --- a/core/tests/test_account/src/lib.rs +++ b/core/tests/test_account/src/lib.rs @@ -2,7 +2,7 @@ use ethabi::Token; use zksync_contracts::{ deployer_contract, load_contract, test_contracts::LoadnextContractExecutionParams, }; -use zksync_eth_signer::{EthereumSigner, PrivateKeySigner, TransactionParameters}; +use zksync_eth_signer::{PrivateKeySigner, TransactionParameters}; use zksync_system_constants::{ CONTRACT_DEPLOYER_ADDRESS, DEFAULT_L2_TX_GAS_PER_PUBDATA_BYTE, REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_BYTE, @@ -154,7 +154,7 @@ impl Account { let max_fee_per_gas = U256::from(0u32); let gas_limit = U256::from(20_000_000); let factory_deps = execute.factory_deps; - abi::Transaction::L1 { + let tx = abi::Transaction::L1 { tx: abi::L2CanonicalTransaction { tx_type: PRIORITY_OPERATION_L2_TX_TYPE.into(), from: address_to_u256(&self.address), @@ -186,9 +186,8 @@ impl Account { .into(), factory_deps, eth_block: 0, - } - .try_into() - .unwrap() + }; + Transaction::from_abi(tx, false).unwrap() } pub fn get_test_contract_transaction( @@ -255,8 +254,8 @@ impl Account { PrivateKeySigner::new(self.private_key.clone()) } - pub async fn sign_legacy_tx(&self, tx: TransactionParameters) -> Vec { + pub fn sign_legacy_tx(&self, tx: TransactionParameters) -> Vec { let pk_signer = self.get_pk_signer(); - pk_signer.sign_transaction(tx).await.unwrap() + pk_signer.sign_transaction(tx) } } diff --git a/docker/Makefile b/docker/Makefile index 444a94ce2214..c469587c8ffd 100644 --- a/docker/Makefile +++ b/docker/Makefile @@ -56,7 +56,7 @@ check-tools: check-nodejs check-yarn check-rust check-sqlx-cli check-docker chec # Check that contracts are checkout properly check-contracts: @if [ ! -d ../contracts/l1-contracts/lib/forge-std/foundry.toml ] || [ -z "$$(ls -A ../contracts/l1-contracts/lib/forge-std/foundry.toml)" ]; then \ - echo "l1-contracts git submodule is missing. Please re-download repo with `git clone --recurse-submodules https://github.com/matter-labs/zksync-era.git`"; \ + echo "l1-contracts git submodule is missing. Please re-download repo with 'git clone --recurse-submodules https://github.com/matter-labs/zksync-era.git'"; \ exit 1; \ fi @@ -93,9 +93,12 @@ build-witness-generator: check-tools prepare-keys $(DOCKER_BUILD_CMD) --file witness-generator/Dockerfile --load \ --tag witness-generator:$(PROTOCOL_VERSION) $(CONTEXT) +build-external-node: check-tools prepare-contracts + $(DOCKER_BUILD_CMD) --file external-node/Dockerfile --load \ + --tag external-node:$(PROTOCOL_VERSION) $(CONTEXT) # Build all containers -build-all: build-contract-verifier build-server-v2 build-witness-generator build-circuit-prover-gpu cleanup +build-all: build-contract-verifier build-server-v2 build-witness-generator build-circuit-prover-gpu build-external-node cleanup # Clean generated images clean-all: @@ -104,3 +107,4 @@ clean-all: docker rmi server-v2:$(PROTOCOL_VERSION) >/dev/null 2>&1 docker rmi prover:$(PROTOCOL_VERSION) >/dev/null 2>&1 docker rmi witness-generator:$(PROTOCOL_VERSION) >/dev/null 2>&1 + docker rmi external-node:$(PROTOCOL_VERSION) >/dev/null 2>&1 diff --git a/docs/guides/build-docker.md b/docs/guides/build-docker.md index a9e8f5d3e76c..5dd9cff022b9 100644 --- a/docs/guides/build-docker.md +++ b/docs/guides/build-docker.md @@ -25,6 +25,7 @@ contract-verifier:2.0 server-v2:2.0 prover:2.0 witness-generator:2.0 +external-node:2.0 ``` Alternatively, you may build only needed components - available targets are @@ -34,6 +35,7 @@ make -C ./docker build-contract-verifier make -C ./docker build-server-v2 make -C ./docker build-circuit-prover-gpu make -C ./docker build-witness-generator +make -C ./docker build-external-node ``` ## Building updated images diff --git a/etc/contracts-test-data/contracts/mock-evm/mock-evm.sol b/etc/contracts-test-data/contracts/mock-evm/mock-evm.sol new file mode 100644 index 000000000000..5f4de59681fd --- /dev/null +++ b/etc/contracts-test-data/contracts/mock-evm/mock-evm.sol @@ -0,0 +1,92 @@ +// SPDX-License-Identifier: MIT OR Apache-2.0 + +pragma solidity ^0.8.0; + +/** + * Mock `KnownCodeStorage` counterpart producing `MarkedAsKnown` events and having `publishEVMBytecode` method + * added for EVM emulation, calls to which should be traced by the host. + */ +contract MockKnownCodeStorage { + event MarkedAsKnown(bytes32 indexed bytecodeHash, bool indexed sendBytecodeToL1); + + function markFactoryDeps(bool _shouldSendToL1, bytes32[] calldata _hashes) external { + unchecked { + uint256 hashesLen = _hashes.length; + for (uint256 i = 0; i < hashesLen; ++i) { + _markBytecodeAsPublished(_hashes[i], _shouldSendToL1); + } + } + } + + function markBytecodeAsPublished(bytes32 _bytecodeHash) external { + _markBytecodeAsPublished(_bytecodeHash, false); + } + + function _markBytecodeAsPublished(bytes32 _bytecodeHash, bool _shouldSendToL1) internal { + if (getMarker(_bytecodeHash) == 0) { + assembly { + sstore(_bytecodeHash, 1) + } + emit MarkedAsKnown(_bytecodeHash, _shouldSendToL1); + } + } + + bytes32 evmBytecodeHash; // For tests, it's OK to potentially collide with the marker slot for hash `bytes32(0)` + + /// Sets the EVM bytecode hash to be used in the next `publishEVMBytecode` call. + function setEVMBytecodeHash(bytes32 _bytecodeHash) external { + evmBytecodeHash = _bytecodeHash; + } + + function publishEVMBytecode(bytes calldata _bytecode) external { + bytes32 hash = evmBytecodeHash; + require(hash != bytes32(0), "EVM bytecode hash not set"); + + if (getMarker(evmBytecodeHash) == 0) { + assembly { + sstore(hash, 1) + } + } + emit MarkedAsKnown(hash, getMarker(hash) == 0); + evmBytecodeHash = bytes32(0); + } + + function getMarker(bytes32 _hash) public view returns (uint256 marker) { + assembly { + marker := sload(_hash) + } + } +} + +/** + * Mock `ContractDeployer` counterpart focusing on EVM bytecode deployment (via `create`; this isn't how real EVM bytecode deployment works, + * but it's good enough for low-level tests). + */ +contract MockContractDeployer { + enum AccountAbstractionVersion { + None, + Version1 + } + + address constant CODE_ORACLE_ADDR = address(0x8012); + MockKnownCodeStorage constant KNOWN_CODE_STORAGE_CONTRACT = MockKnownCodeStorage(address(0x8004)); + + /// The returned value is obviously incorrect in the general case, but works well enough when called by the bootloader. + function extendedAccountVersion(address _address) public view returns (AccountAbstractionVersion) { + return AccountAbstractionVersion.Version1; + } + + /// Replaces real deployment with publishing a surrogate EVM "bytecode". + /// @param _salt bytecode hash + /// @param _bytecodeHash ignored, since it's not possible to set arbitrarily + /// @param _input bytecode to publish + function create( + bytes32 _salt, + bytes32 _bytecodeHash, + bytes calldata _input + ) external payable returns (address) { + KNOWN_CODE_STORAGE_CONTRACT.setEVMBytecodeHash(_salt); + KNOWN_CODE_STORAGE_CONTRACT.publishEVMBytecode(_input); + return address(0); + } +} diff --git a/etc/env/base/eth_sender.toml b/etc/env/base/eth_sender.toml index 31fe626c87f2..ad5709551c45 100644 --- a/etc/env/base/eth_sender.toml +++ b/etc/env/base/eth_sender.toml @@ -55,8 +55,8 @@ default_priority_fee_per_gas = 1_000_000_000 max_base_fee_samples = 10_000 # These two are parameters of the base_fee_per_gas formula in GasAdjuster. # The possible formulas are: -# 1. base_fee_median * (A + B * time_in_mempool) -# 2. base_fee_median * A * B ^ time_in_mempool +# 1. base_fee_median * (A + B * time_in_mempool_in_l1_blocks) +# 2. base_fee_median * A * B ^ time_in_mempool_in_l1_blocks # Currently the second is used. # To confirm, see core/bin/zksync_core/src/eth_sender/gas_adjuster/mod.rs pricing_formula_parameter_a = 1.5 diff --git a/etc/env/file_based/genesis.yaml b/etc/env/file_based/genesis.yaml index 220a75944e02..b7d4ffebcf98 100644 --- a/etc/env/file_based/genesis.yaml +++ b/etc/env/file_based/genesis.yaml @@ -13,3 +13,5 @@ prover: recursion_scheduler_level_vk_hash: 0x14f97b81e54b35fe673d8708cc1a19e1ea5b5e348e12d31e39824ed4f42bbca2 dummy_verifier: true l1_batch_commit_data_generator_mode: Rollup +# Uncomment to enable EVM emulation (requires to run genesis) +# evm_emulator_hash: 0x01000e53aa35d9d19fa99341c2e2901cf93b3668f01569dd5c6ca409c7696b91 diff --git a/prover/Cargo.lock b/prover/Cargo.lock index ff5a52388672..1d584a473d96 100644 --- a/prover/Cargo.lock +++ b/prover/Cargo.lock @@ -46,6 +46,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e89da841a80418a9b391ebaea17f5c112ffaaa96f621d2c285b5174da76b9011" dependencies = [ "cfg-if", + "getrandom", "once_cell", "version_check", "zerocopy", @@ -208,6 +209,18 @@ dependencies = [ "wait-timeout", ] +[[package]] +name = "async-broadcast" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "20cd0e2e25ea8e5f7e9df04578dc6cf5c83577fd09b1a46aaf5c85e1c33f2a7e" +dependencies = [ + "event-listener", + "event-listener-strategy", + "futures-core", + "pin-project-lite", +] + [[package]] name = "async-stream" version = "0.3.5" @@ -275,9 +288,9 @@ checksum = "0c4b4d0bd25bd0b74681c0ad21497610ce1b7c91b1022cd21c80c6fbdd9476b0" [[package]] name = "aws-lc-rs" -version = "1.8.0" +version = "1.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a8a47f2fb521b70c11ce7369a6c5fa4bd6af7e5d62ec06303875bafe7c6ba245" +checksum = "4ae74d9bd0a7530e8afd1770739ad34b36838829d6ad61818f9230f683f5ad77" dependencies = [ "aws-lc-sys", "mirai-annotations", @@ -287,9 +300,9 @@ dependencies = [ [[package]] name = "aws-lc-sys" -version = "0.19.0" +version = "0.20.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2927c7af777b460b7ccd95f8b67acd7b4c04ec8896bf0c8e80ba30523cffc057" +checksum = "0f0e249228c6ad2d240c2dc94b714d711629d52bad946075d8e9b2f5391f0703" dependencies = [ "bindgen 0.69.4", "cc", @@ -355,6 +368,17 @@ dependencies = [ "tracing", ] +[[package]] +name = "backoff" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b62ddb9cb1ec0a098ad4bbf9344d0713fa193ae1a80af55febcff2627b6a00c1" +dependencies = [ + "getrandom", + "instant", + "rand 0.8.5", +] + [[package]] name = "backtrace" version = "0.3.72" @@ -1331,8 +1355,18 @@ version = "0.13.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a01d95850c592940db9b8194bc39f4bc0e89dee5c4265e4b1807c34a9aba453c" dependencies = [ - "darling_core", - "darling_macro", + "darling_core 0.13.4", + "darling_macro 0.13.4", +] + +[[package]] +name = "darling" +version = "0.20.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6f63b86c8a8826a49b8c21f08a2d07338eec8d900540f8630dc76284be802989" +dependencies = [ + "darling_core 0.20.10", + "darling_macro 0.20.10", ] [[package]] @@ -1349,17 +1383,51 @@ dependencies = [ "syn 1.0.109", ] +[[package]] +name = "darling_core" +version = "0.20.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "95133861a8032aaea082871032f5815eb9e98cef03fa916ab4500513994df9e5" +dependencies = [ + "fnv", + "ident_case", + "proc-macro2 1.0.85", + "quote 1.0.36", + "strsim 0.11.1", + "syn 2.0.66", +] + [[package]] name = "darling_macro" version = "0.13.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9c972679f83bdf9c42bd905396b6c3588a843a17f0f16dfcfa3e2c5d57441835" dependencies = [ - "darling_core", + "darling_core 0.13.4", "quote 1.0.36", "syn 1.0.109", ] +[[package]] +name = "darling_macro" +version = "0.20.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d336a2a514f6ccccaa3e09b02d41d35330c07ddf03a62165fcec10bb561c7806" +dependencies = [ + "darling_core 0.20.10", + "quote 1.0.36", + "syn 2.0.66", +] + +[[package]] +name = "debug-map-sorted" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "75c7dfa83618734bf9fa07aadaa1166b634e9427bb9bc5a1c2332d04d73fb721" +dependencies = [ + "itertools 0.10.5", +] + [[package]] name = "debugid" version = "0.8.0" @@ -1503,6 +1571,12 @@ version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "56ce8c6da7551ec6c462cbaf3bfbc75131ebbfa1c944aeaa9dab51ca1c5f0c3b" +[[package]] +name = "dyn-clone" +version = "1.0.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0d6ef0072f8a535281e4876be788938b528e9a1d43900b82c2569af7da799125" + [[package]] name = "ecdsa" version = "0.14.8" @@ -1784,6 +1858,16 @@ dependencies = [ "pin-project-lite", ] +[[package]] +name = "event-listener-strategy" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0f214dc438f977e6d4e3500aaa277f5ad94ca83fbbd9b1a15713ce2344ccc5a1" +dependencies = [ + "event-listener", + "pin-project-lite", +] + [[package]] name = "fastrand" version = "2.1.0" @@ -1862,6 +1946,15 @@ dependencies = [ "miniz_oxide", ] +[[package]] +name = "fluent-uri" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "17c704e9dbe1ddd863da1e6ff3567795087b1eb201ce80d8fa81162e1516500d" +dependencies = [ + "bitflags 1.3.2", +] + [[package]] name = "flume" version = "0.11.0" @@ -2342,6 +2435,30 @@ dependencies = [ "hashbrown 0.14.5", ] +[[package]] +name = "headers" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "322106e6bd0cba2d5ead589ddb8150a13d7c4217cf80d7c4f682ca994ccc6aa9" +dependencies = [ + "base64 0.21.7", + "bytes", + "headers-core", + "http 1.1.0", + "httpdate", + "mime", + "sha1", +] + +[[package]] +name = "headers-core" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "54b4a22553d4242c49fddb9ba998a99962b5cc6f22cb5a3482bec22522403ce4" +dependencies = [ + "http 1.1.0", +] + [[package]] name = "heck" version = "0.3.3" @@ -2535,6 +2652,26 @@ dependencies = [ "want", ] +[[package]] +name = "hyper-http-proxy" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5d06dbdfbacf34d996c6fb540a71a684a7aae9056c71951163af8a8a4c07b9a4" +dependencies = [ + "bytes", + "futures-util", + "headers", + "http 1.1.0", + "hyper 1.3.1", + "hyper-rustls", + "hyper-util", + "pin-project-lite", + "rustls-native-certs", + "tokio", + "tokio-rustls", + "tower-service", +] + [[package]] name = "hyper-rustls" version = "0.27.2" @@ -2547,6 +2684,7 @@ dependencies = [ "hyper-util", "log", "rustls", + "rustls-native-certs", "rustls-pki-types", "tokio", "tokio-rustls", @@ -2724,6 +2862,15 @@ dependencies = [ "regex", ] +[[package]] +name = "instant" +version = "0.1.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7a5bbe824c507c5da5956355e86a746d82e0e1464f65d862cc5e71da70e94b2c" +dependencies = [ + "cfg-if", +] + [[package]] name = "ipnet" version = "2.9.0" @@ -2836,6 +2983,44 @@ dependencies = [ "wasm-bindgen", ] +[[package]] +name = "json-patch" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5b1fb8864823fad91877e6caea0baca82e49e8db50f8e5c9f9a453e27d3330fc" +dependencies = [ + "jsonptr", + "serde", + "serde_json", + "thiserror", +] + +[[package]] +name = "jsonpath-rust" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "19d8fe85bd70ff715f31ce8c739194b423d79811a19602115d611a3ec85d6200" +dependencies = [ + "lazy_static", + "once_cell", + "pest", + "pest_derive", + "regex", + "serde_json", + "thiserror", +] + +[[package]] +name = "jsonptr" +version = "0.4.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1c6e529149475ca0b2820835d3dce8fcc41c6b943ca608d32f35b449255e4627" +dependencies = [ + "fluent-uri", + "serde", + "serde_json", +] + [[package]] name = "jsonrpsee" version = "0.23.2" @@ -3020,6 +3205,19 @@ dependencies = [ "signature 2.2.0", ] +[[package]] +name = "k8s-openapi" +version = "0.23.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9c8847402328d8301354c94d605481f25a6bdc1ed65471fd96af8eca71141b13" +dependencies = [ + "base64 0.22.1", + "chrono", + "serde", + "serde-value", + "serde_json", +] + [[package]] name = "keccak" version = "0.1.5" @@ -3029,6 +3227,116 @@ dependencies = [ "cpufeatures", ] +[[package]] +name = "kube" +version = "0.95.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fa21063c854820a77c5d7f8deeb7ffa55246d8304e4bcd8cce2956752c6604f8" +dependencies = [ + "k8s-openapi", + "kube-client", + "kube-core", + "kube-derive", + "kube-runtime", +] + +[[package]] +name = "kube-client" +version = "0.95.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "31c2355f5c9d8a11900e71a6fe1e47abd5ec45bf971eb4b162ffe97b46db9bb7" +dependencies = [ + "base64 0.22.1", + "bytes", + "chrono", + "either", + "futures 0.3.30", + "home", + "http 1.1.0", + "http-body 1.0.0", + "http-body-util", + "hyper 1.3.1", + "hyper-http-proxy", + "hyper-rustls", + "hyper-timeout", + "hyper-util", + "jsonpath-rust", + "k8s-openapi", + "kube-core", + "pem", + "rustls", + "rustls-pemfile 2.1.2", + "secrecy", + "serde", + "serde_json", + "serde_yaml", + "thiserror", + "tokio", + "tokio-util", + "tower", + "tower-http", + "tracing", +] + +[[package]] +name = "kube-core" +version = "0.95.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f3030bd91c9db544a50247e7d48d7db9cf633c172732dce13351854526b1e666" +dependencies = [ + "chrono", + "form_urlencoded", + "http 1.1.0", + "json-patch", + "k8s-openapi", + "schemars", + "serde", + "serde-value", + "serde_json", + "thiserror", +] + +[[package]] +name = "kube-derive" +version = "0.95.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fa98be978eddd70a773aa8e86346075365bfb7eb48783410852dbf7cb57f0c27" +dependencies = [ + "darling 0.20.10", + "proc-macro2 1.0.85", + "quote 1.0.36", + "serde_json", + "syn 2.0.66", +] + +[[package]] +name = "kube-runtime" +version = "0.95.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5895cb8aa641ac922408f128b935652b34c2995f16ad7db0984f6caa50217914" +dependencies = [ + "ahash 0.8.11", + "async-broadcast", + "async-stream", + "async-trait", + "backoff", + "derivative", + "futures 0.3.30", + "hashbrown 0.14.5", + "json-patch", + "jsonptr", + "k8s-openapi", + "kube-client", + "parking_lot", + "pin-project", + "serde", + "serde_json", + "thiserror", + "tokio", + "tokio-util", + "tracing", +] + [[package]] name = "lazy_static" version = "1.5.0" @@ -3828,9 +4136,9 @@ checksum = "e3148f5046208a5d56bcfc03053e3ca6334e51da8dfb19b6cdc8b306fae3283e" [[package]] name = "pest" -version = "2.7.13" +version = "2.7.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fdbef9d1d47087a895abd220ed25eb4ad973a5e26f6a4367b038c25e28dfc2d9" +checksum = "cd53dff83f26735fdc1ca837098ccf133605d794cdae66acfc2bfac3ec809d95" dependencies = [ "memchr", "thiserror", @@ -3839,9 +4147,9 @@ dependencies = [ [[package]] name = "pest_derive" -version = "2.7.13" +version = "2.7.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4d3a6e3394ec80feb3b6393c725571754c6188490265c61aaf260810d6b95aa0" +checksum = "2a548d2beca6773b1c244554d36fcf8548a8a58e74156968211567250e48e49a" dependencies = [ "pest", "pest_generator", @@ -3849,9 +4157,9 @@ dependencies = [ [[package]] name = "pest_generator" -version = "2.7.13" +version = "2.7.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "94429506bde1ca69d1b5601962c73f4172ab4726571a59ea95931218cb0e930e" +checksum = "3c93a82e8d145725dcbaf44e5ea887c8a869efdcc28706df2d08c69e17077183" dependencies = [ "pest", "pest_meta", @@ -3862,9 +4170,9 @@ dependencies = [ [[package]] name = "pest_meta" -version = "2.7.13" +version = "2.7.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ac8a071862e93690b6e34e9a5fb8e33ff3734473ac0245b27232222c4906a33f" +checksum = "a941429fea7e08bedec25e4f6785b6ffaacc6b755da98df5ef3e7dcf4a124c4f" dependencies = [ "once_cell", "pest", @@ -4821,9 +5129,9 @@ dependencies = [ [[package]] name = "rustls" -version = "0.23.10" +version = "0.23.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05cff451f60db80f490f3c182b77c35260baace73209e9cdbbe526bfe3a4d402" +checksum = "c58f8c84392efc0a126acce10fa59ff7b3d2ac06ab451a33f2741989b806b044" dependencies = [ "aws-lc-rs", "log", @@ -4902,9 +5210,9 @@ checksum = "84e217e7fdc8466b5b35d30f8c0a30febd29173df4a3a0c2115d306b9c4117ad" [[package]] name = "rustls-webpki" -version = "0.102.4" +version = "0.102.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ff448f7e92e913c4b7d4c6d8e4540a1724b319b4152b8aef6d4cf8339712b33e" +checksum = "84678086bd54edf2b415183ed7a94d0efb049f1b646a33e22a36f3794be6ae56" dependencies = [ "aws-lc-rs", "ring", @@ -4954,6 +5262,30 @@ dependencies = [ "windows-sys 0.52.0", ] +[[package]] +name = "schemars" +version = "0.8.21" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "09c024468a378b7e36765cd36702b7a90cc3cba11654f6685c8f233408e89e92" +dependencies = [ + "dyn-clone", + "schemars_derive", + "serde", + "serde_json", +] + +[[package]] +name = "schemars_derive" +version = "0.8.21" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b1eee588578aff73f856ab961cd2f79e36bc45d7ded33a7562adba4667aecc0e" +dependencies = [ + "proc-macro2 1.0.85", + "quote 1.0.36", + "serde_derive_internals", + "syn 2.0.66", +] + [[package]] name = "scopeguard" version = "1.2.0" @@ -5018,6 +5350,7 @@ version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9bd1c54ea06cfd2f6b63219704de0b9b4f72dcc2b8fdef820be6cd799780e91e" dependencies = [ + "serde", "zeroize", ] @@ -5201,13 +5534,25 @@ dependencies = [ "syn 2.0.66", ] +[[package]] +name = "serde_derive_internals" +version = "0.29.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "18d26a20a969b9e3fdf2fc2d9f21eda6c40e2de84c9408bb5d3b05d499aae711" +dependencies = [ + "proc-macro2 1.0.85", + "quote 1.0.36", + "syn 2.0.66", +] + [[package]] name = "serde_json" -version = "1.0.117" +version = "1.0.128" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "455182ea6142b14f93f4bc5320a2b31c1f266b66a4a5c858b013302a5d8cbfc3" +checksum = "6ff5456707a1de34e7e37f2a6fd3d3f808c318259cbd01ab6377795054b483d8" dependencies = [ "itoa", + "memchr", "ryu", "serde", ] @@ -5252,7 +5597,7 @@ version = "1.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e182d6ec6f05393cc0e5ed1bf81ad6db3a8feedf8ee515ecdd369809bcce8082" dependencies = [ - "darling", + "darling 0.13.4", "proc-macro2 1.0.85", "quote 1.0.36", "syn 1.0.109", @@ -6157,6 +6502,7 @@ dependencies = [ "futures-io", "futures-sink", "pin-project-lite", + "slab", "tokio", ] @@ -6260,6 +6606,25 @@ dependencies = [ "tracing", ] +[[package]] +name = "tower-http" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e9cd434a998747dd2c4276bc96ee2e0c7a2eadf3cae88e52be55a05fa9053f5" +dependencies = [ + "base64 0.21.7", + "bitflags 2.6.0", + "bytes", + "http 1.1.0", + "http-body 1.0.0", + "http-body-util", + "mime", + "pin-project-lite", + "tower-layer", + "tower-service", + "tracing", +] + [[package]] name = "tower-layer" version = "0.3.2" @@ -6386,9 +6751,9 @@ checksum = "42ff0bf0c66b8238c6f3b578df37d0b7848e55df8577b3f74f92a69acceeb825" [[package]] name = "ucd-trie" -version = "0.1.7" +version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2896d95c02a80c6d6a5d6e953d479f5ddf2dfdb6a244441010e373ac0fb88971" +checksum = "ed646292ffc8188ef8ea4d1e0e0150fb15a5c2e12ad9b8fc191ae7a8a7f3c4b9" [[package]] name = "uint" @@ -6520,9 +6885,9 @@ dependencies = [ [[package]] name = "url" -version = "2.5.0" +version = "2.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "31e6302e3bb753d46e83516cae55ae196fc0c309407cf11ab35cc51a4c2a4633" +checksum = "22784dbdf76fdde8af1aeda5622b546b422b6fc585325248a2bf9f5e41e94d6c" dependencies = [ "form_urlencoded", "idna", @@ -7476,8 +7841,12 @@ dependencies = [ "rand 0.8.5", "secrecy", "serde", + "strum", + "strum_macros", + "time", "tracing", "url", + "vise", "zksync_basic_types", "zksync_concurrency", "zksync_consensus_utils", @@ -7703,7 +8072,8 @@ dependencies = [ "async-trait", "rlp", "thiserror", - "zksync_types", + "zksync_basic_types", + "zksync_crypto_primitives", ] [[package]] @@ -7785,6 +8155,7 @@ dependencies = [ "circuit_sequencer_api 0.141.2", "circuit_sequencer_api 0.142.2", "circuit_sequencer_api 0.150.5", + "ethabi", "hex", "itertools 0.10.5", "once_cell", @@ -7923,6 +8294,7 @@ dependencies = [ "secrecy", "serde_json", "serde_yaml", + "time", "tracing", "zksync_basic_types", "zksync_config", @@ -7931,6 +8303,44 @@ dependencies = [ "zksync_types", ] +[[package]] +name = "zksync_prover_autoscaler" +version = "0.1.0" +dependencies = [ + "anyhow", + "async-trait", + "axum", + "chrono", + "clap 4.5.4", + "ctrlc", + "debug-map-sorted", + "futures 0.3.30", + "k8s-openapi", + "kube", + "once_cell", + "regex", + "reqwest 0.12.5", + "ring", + "rustls", + "serde", + "serde_json", + "structopt", + "strum", + "time", + "tokio", + "tracing", + "tracing-subscriber", + "url", + "vise", + "zksync_config", + "zksync_core_leftovers", + "zksync_protobuf_config", + "zksync_prover_job_monitor", + "zksync_types", + "zksync_utils", + "zksync_vlog", +] + [[package]] name = "zksync_prover_dal" version = "0.1.0" @@ -8228,8 +8638,8 @@ dependencies = [ [[package]] name = "zksync_vm2" -version = "0.1.0" -source = "git+https://github.com/matter-labs/vm2.git?rev=74577d9be13b1bff9d1a712389731f669b179e47#74577d9be13b1bff9d1a712389731f669b179e47" +version = "0.2.1" +source = "git+https://github.com/matter-labs/vm2.git?rev=a233d44bbe61dc6a758a754c3b78fe4f83e56699#a233d44bbe61dc6a758a754c3b78fe4f83e56699" dependencies = [ "enum_dispatch", "primitive-types", @@ -8240,8 +8650,8 @@ dependencies = [ [[package]] name = "zksync_vm2_interface" -version = "0.1.0" -source = "git+https://github.com/matter-labs/vm2.git?rev=74577d9be13b1bff9d1a712389731f669b179e47#74577d9be13b1bff9d1a712389731f669b179e47" +version = "0.2.1" +source = "git+https://github.com/matter-labs/vm2.git?rev=a233d44bbe61dc6a758a754c3b78fe4f83e56699#a233d44bbe61dc6a758a754c3b78fe4f83e56699" dependencies = [ "primitive-types", ] diff --git a/prover/Cargo.toml b/prover/Cargo.toml index e95bae3d4c16..742eee649de1 100644 --- a/prover/Cargo.toml +++ b/prover/Cargo.toml @@ -1,8 +1,5 @@ [workspace] -members = [ - "crates/bin/*", - "crates/lib/*", -] +members = ["crates/bin/*", "crates/lib/*"] resolver = "2" @@ -19,20 +16,23 @@ categories = ["cryptography"] [workspace.dependencies] # Common dependencies anyhow = "1.0" -axum = "0.7.5" async-trait = "0.1" +axum = "0.7.5" bincode = "1" chrono = "0.4.38" clap = "4.4.6" colored = "2.0" const-decoder = "0.3.0" ctrlc = "3.1" +debug-map-sorted = "0.1.1" dialoguer = "0.11" futures = "0.3" hex = "0.4" -itertools = "0.10.5" indicatif = "0.16" +itertools = "0.10.5" jemallocator = "0.5" +k8s-openapi = { version = "0.23.0", features = ["v1_30"] } +kube = { version = "0.95.0", features = ["runtime", "derive"] } local-ip-address = "0.5.0" log = "0.4.20" md5 = "0.7.0" @@ -42,6 +42,8 @@ queues = "1.1.0" rand = "0.8" regex = "1.10.4" reqwest = "0.12" +ring = "0.17.8" +rustls = { version = "0.23.12", features = ["ring"] } serde = "1.0" serde_derive = "1.0" serde_json = "1.0" @@ -50,11 +52,13 @@ sqlx = { version = "0.8.1", default-features = false } structopt = "0.3.26" strum = { version = "0.26" } tempfile = "3" +time = "0.3.36" tokio = "1" tokio-util = "0.7.11" toml_edit = "0.14.4" tracing = "0.1" tracing-subscriber = "0.3" +url = "2.5.2" vise = "0.2.0" # Proving dependencies @@ -84,6 +88,7 @@ zksync_eth_client = { path = "../core/lib/eth_client" } zksync_contracts = { path = "../core/lib/contracts" } zksync_core_leftovers = { path = "../core/lib/zksync_core_leftovers" } zksync_periodic_job = { path = "../core/lib/periodic_job" } +zksync_protobuf_config = { path = "../core/lib/protobuf_config" } # Prover workspace dependencies zksync_prover_dal = { path = "crates/lib/prover_dal" } @@ -91,6 +96,7 @@ zksync_prover_fri_types = { path = "crates/lib/prover_fri_types" } zksync_prover_fri_utils = { path = "crates/lib/prover_fri_utils" } zksync_prover_keystore = { path = "crates/lib/keystore" } zksync_vk_setup_data_generator_server_fri = { path = "crates/bin/vk_setup_data_generator_server_fri" } +zksync_prover_job_monitor = { path = "crates/bin/prover_job_monitor" } # for `perf` profiling [profile.perf] diff --git a/prover/crates/bin/prover_autoscaler/Cargo.toml b/prover/crates/bin/prover_autoscaler/Cargo.toml new file mode 100644 index 000000000000..9743b45593e7 --- /dev/null +++ b/prover/crates/bin/prover_autoscaler/Cargo.toml @@ -0,0 +1,45 @@ +[package] +name = "zksync_prover_autoscaler" +version.workspace = true +edition.workspace = true +authors.workspace = true +homepage.workspace = true +repository.workspace = true +license.workspace = true +keywords.workspace = true +categories.workspace = true + +[dependencies] +zksync_core_leftovers.workspace = true +zksync_vlog.workspace = true +zksync_utils.workspace = true +zksync_types.workspace = true +zksync_config = { workspace = true, features = ["observability_ext"] } +zksync_prover_job_monitor.workspace = true +zksync_protobuf_config.workspace = true + +debug-map-sorted.workspace = true +anyhow.workspace = true +async-trait.workspace = true +axum.workspace = true +chrono.workspace = true +clap = { workspace = true, features = ["derive"] } +ctrlc = { workspace = true, features = ["termination"] } +futures.workspace = true +k8s-openapi = { workspace = true, features = ["v1_30"] } +kube = { workspace = true, features = ["runtime", "derive"] } +once_cell.workspace = true +regex.workspace = true +reqwest = { workspace = true, features = ["json"] } +ring.workspace = true +rustls = { workspace = true, features = ["ring"] } +serde = { workspace = true, features = ["derive"] } +serde_json.workspace = true +structopt.workspace = true +strum.workspace = true +time.workspace = true +tokio = { workspace = true, features = ["time", "macros"] } +tracing-subscriber = { workspace = true, features = ["env-filter"] } +tracing.workspace = true +url.workspace = true +vise.workspace = true diff --git a/prover/crates/bin/prover_autoscaler/src/agent.rs b/prover/crates/bin/prover_autoscaler/src/agent.rs new file mode 100644 index 000000000000..3269a43815c9 --- /dev/null +++ b/prover/crates/bin/prover_autoscaler/src/agent.rs @@ -0,0 +1,130 @@ +use std::net::SocketAddr; + +use anyhow::Context as _; +use axum::{ + extract::State, + response::IntoResponse, + routing::{get, post}, + Json, Router, +}; +use futures::future; +use reqwest::StatusCode; +use serde::{Deserialize, Serialize}; +use tokio::sync::watch; + +use crate::{ + cluster_types::Cluster, + k8s::{Scaler, Watcher}, +}; + +struct AppError(anyhow::Error); + +impl IntoResponse for AppError { + fn into_response(self) -> axum::response::Response { + ( + StatusCode::INTERNAL_SERVER_ERROR, + format!("Something went wrong: {}", self.0), + ) + .into_response() + } +} + +pub async fn run_server( + port: u16, + watcher: Watcher, + scaler: Scaler, + mut stop_receiver: watch::Receiver, +) -> anyhow::Result<()> { + let bind_address = SocketAddr::from(([0, 0, 0, 0], port)); + tracing::debug!("Starting Autoscaler agent on {bind_address}"); + let app = create_agent_router(watcher, scaler); + + let listener = tokio::net::TcpListener::bind(bind_address) + .await + .with_context(|| format!("Failed binding Autoscaler agent to {bind_address}"))?; + axum::serve(listener, app) + .with_graceful_shutdown(async move { + if stop_receiver.changed().await.is_err() { + tracing::warn!( + "Stop signal sender for Autoscaler agent was dropped without sending a signal" + ); + } + tracing::info!("Stop signal received, Autoscaler agent is shutting down"); + }) + .await + .context("Autoscaler agent failed")?; + tracing::info!("Autoscaler agent shut down"); + Ok(()) +} + +fn create_agent_router(watcher: Watcher, scaler: Scaler) -> Router { + let app = App { watcher, scaler }; + Router::new() + .route("/healthz", get(health)) + .route("/cluster", get(get_cluster)) + .route("/scale", post(scale)) + .with_state(app) +} + +// TODO: Use +// https://github.com/matter-labs/zksync-era/blob/9821a20018c367ce246dba656daab5c2e7757973/core/node/api_server/src/healthcheck.rs#L53 +// instead. +async fn health() -> &'static str { + "Ok\n" +} + +#[derive(Clone)] +struct App { + watcher: Watcher, + scaler: Scaler, +} + +async fn get_cluster(State(app): State) -> Result, AppError> { + let cluster = app.watcher.cluster.lock().await.clone(); + Ok(Json(cluster)) +} + +#[derive(Clone, Debug, Serialize, Deserialize)] +pub struct ScaleDeploymentRequest { + pub namespace: String, + pub name: String, + pub size: i32, +} + +#[derive(Clone, Debug, Serialize, Deserialize)] +pub struct ScaleRequest { + pub deployments: Vec, +} + +#[derive(Debug, Serialize, Deserialize)] +pub struct ScaleResponse { + pub scale_result: Vec, +} + +/// To test or forse scale in particular cluster use: +/// $ curl -X POST -H "Content-Type: application/json" --data '{"deployments": [{"namespace": "prover-red", "name": "witness-vector-generator-spec-9-f", "size":0},{"namespace": "prover-red", "name": "witness-vector-generator-spec-9-c", "size":0}]}' :8081/scale +async fn scale( + State(app): State, + Json(payload): Json, +) -> Result, AppError> { + let handles: Vec<_> = payload + .deployments + .into_iter() + .map(|d| { + let s = app.scaler.clone(); + tokio::spawn(async move { + match s.scale(&d.namespace, &d.name, d.size).await { + Ok(()) => "".to_string(), + Err(err) => err.to_string(), + } + }) + }) + .collect(); + + let scale_result = future::join_all(handles) + .await + .into_iter() + .map(Result::unwrap) + .collect(); + Ok(Json(ScaleResponse { scale_result })) +} diff --git a/prover/crates/bin/prover_autoscaler/src/cluster_types.rs b/prover/crates/bin/prover_autoscaler/src/cluster_types.rs new file mode 100644 index 000000000000..b074e0774c97 --- /dev/null +++ b/prover/crates/bin/prover_autoscaler/src/cluster_types.rs @@ -0,0 +1,66 @@ +use std::collections::{BTreeMap, HashMap}; + +use chrono::{DateTime, Utc}; +use serde::{Deserialize, Serialize, Serializer}; +use strum::{Display, EnumString}; + +#[derive(Debug, Clone, Default, Serialize, Deserialize)] +pub struct Pod { + // pub name: String, // TODO: Consider if it's needed. + pub owner: String, + pub status: String, + pub changed: DateTime, +} +#[derive(Debug, Clone, Default, Serialize, Deserialize)] +pub struct Deployment { + // pub name: String, // TODO: Consider if it's needed. + pub running: i32, + pub desired: i32, +} + +fn ordered_map( + value: &HashMap, + serializer: S, +) -> Result +where + S: Serializer, +{ + let ordered: BTreeMap<_, _> = value.iter().collect(); + ordered.serialize(serializer) +} + +#[derive(Debug, Default, Clone, Serialize, Deserialize)] +pub struct Namespace { + #[serde(serialize_with = "ordered_map")] + pub deployments: HashMap, + pub pods: HashMap, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct Cluster { + pub name: String, + pub namespaces: HashMap, +} +impl Default for Cluster { + fn default() -> Self { + Self { + name: "".to_string(), + namespaces: HashMap::new(), + } + } +} + +#[derive(Debug, Default, Clone, Serialize, Deserialize)] +pub struct Clusters { + pub clusters: HashMap, +} + +#[derive(Default, Debug, EnumString, Display, Hash, PartialEq, Eq, Clone, Copy)] +pub enum PodStatus { + #[default] + Unknown, + Running, + Pending, + LongPending, + NeedToMove, +} diff --git a/prover/crates/bin/prover_autoscaler/src/global/mod.rs b/prover/crates/bin/prover_autoscaler/src/global/mod.rs new file mode 100644 index 000000000000..5e4afb938437 --- /dev/null +++ b/prover/crates/bin/prover_autoscaler/src/global/mod.rs @@ -0,0 +1,3 @@ +pub mod queuer; +pub mod scaler; +pub mod watcher; diff --git a/prover/crates/bin/prover_autoscaler/src/global/queuer.rs b/prover/crates/bin/prover_autoscaler/src/global/queuer.rs new file mode 100644 index 000000000000..1ef5d96386b5 --- /dev/null +++ b/prover/crates/bin/prover_autoscaler/src/global/queuer.rs @@ -0,0 +1,41 @@ +use std::collections::HashMap; + +use anyhow::{Context, Ok}; +use reqwest::Method; +use zksync_prover_job_monitor::autoscaler_queue_reporter::VersionedQueueReport; +use zksync_utils::http_with_retries::send_request_with_retries; + +#[derive(Debug)] +pub struct Queue { + pub queue: HashMap, +} + +#[derive(Default)] +pub struct Queuer { + pub prover_job_monitor_url: String, +} + +impl Queuer { + pub fn new(pjm_url: String) -> Self { + Self { + prover_job_monitor_url: pjm_url, + } + } + + pub async fn get_queue(&self) -> anyhow::Result { + let url = &self.prover_job_monitor_url; + let response = send_request_with_retries(url, 5, Method::GET, None, None).await; + let res = response + .map_err(|err| anyhow::anyhow!("Failed fetching queue from url: {url}: {err:?}"))? + .json::>() + .await + .context("Failed to read response as json")?; + + Ok(Queue { + queue: res + .iter() + .map(|x| (x.version.to_string(), x.report.prover_jobs.queued as u64)) + .collect::>(), + }) + } +} diff --git a/prover/crates/bin/prover_autoscaler/src/global/scaler.rs b/prover/crates/bin/prover_autoscaler/src/global/scaler.rs new file mode 100644 index 000000000000..9f37c4d11675 --- /dev/null +++ b/prover/crates/bin/prover_autoscaler/src/global/scaler.rs @@ -0,0 +1,360 @@ +use std::{collections::HashMap, str::FromStr}; + +use chrono::Utc; +use debug_map_sorted::SortedOutputExt; +use once_cell::sync::Lazy; +use regex::Regex; +use zksync_config::configs::prover_autoscaler::{Gpu, ProverAutoscalerScalerConfig}; + +use super::{queuer, watcher}; +use crate::{ + cluster_types::{Cluster, Clusters, Pod, PodStatus}, + metrics::AUTOSCALER_METRICS, + task_wiring::Task, +}; + +const DEFAULT_SPEED: u32 = 500; + +#[derive(Default, Debug, PartialEq, Eq)] +struct GPUPool { + name: String, + gpu: Gpu, + provers: HashMap, // TODO: consider using i64 everywhere to avoid type casts. + preemtions: u64, + max_pool_size: u32, +} + +impl GPUPool { + fn sum_by_pod_status(&self, ps: PodStatus) -> u32 { + self.provers.get(&ps).cloned().unwrap_or(0) + } + + fn to_key(&self) -> GPUPoolKey { + GPUPoolKey { + cluster: self.name.clone(), + gpu: self.gpu, + } + } +} + +#[derive(Debug, Eq, Hash, PartialEq)] +struct GPUPoolKey { + cluster: String, + gpu: Gpu, +} + +static PROVER_DEPLOYMENT_RE: Lazy = + Lazy::new(|| Regex::new(r"^prover-gpu-fri-spec-(\d{1,2})?(-(?[ltvpa]\d+))?$").unwrap()); +static PROVER_POD_RE: Lazy = + Lazy::new(|| Regex::new(r"^prover-gpu-fri-spec-(\d{1,2})?(-(?[ltvpa]\d+))?").unwrap()); + +pub struct Scaler { + /// namespace to Protocol Version configuration. + namespaces: HashMap, + watcher: watcher::Watcher, + queuer: queuer::Queuer, + + /// Which cluster to use first. + cluster_priorities: HashMap, + prover_speed: HashMap, + long_pending_duration: chrono::Duration, +} + +struct ProverPodGpu<'a> { + name: &'a str, + pod: &'a Pod, + gpu: Gpu, +} + +impl<'a> ProverPodGpu<'a> { + fn new(name: &'a str, pod: &'a Pod) -> Option> { + PROVER_POD_RE.captures(name).map(|caps| Self { + name, + pod, + gpu: Gpu::from_str(caps.name("gpu").map_or("l4", |m| m.as_str())).unwrap_or_default(), + }) + } +} + +impl Scaler { + pub fn new( + watcher: watcher::Watcher, + queuer: queuer::Queuer, + config: ProverAutoscalerScalerConfig, + ) -> Self { + Self { + namespaces: config.protocol_versions, + watcher, + queuer, + cluster_priorities: config.cluster_priorities, + prover_speed: config.prover_speed, + long_pending_duration: chrono::Duration::seconds( + config.long_pending_duration.whole_seconds(), + ), + } + } + + fn convert_to_gpu_pool(&self, namespace: &String, cluster: &Cluster) -> Vec { + let mut gp_map = HashMap::new(); // + let Some(namespace_value) = &cluster.namespaces.get(namespace) else { + // No namespace in config, ignoring. + return vec![]; + }; + + for caps in namespace_value + .deployments + .keys() + .filter_map(|dn| PROVER_DEPLOYMENT_RE.captures(dn)) + { + // Processing only provers. + let gpu = + Gpu::from_str(caps.name("gpu").map_or("l4", |m| m.as_str())).unwrap_or_default(); + let e = gp_map.entry(gpu).or_insert(GPUPool { + name: cluster.name.clone(), + gpu, + max_pool_size: 100, // TODO: get from the agent. + ..Default::default() + }); + + // Initialize pool only if we have ready deployments. + e.provers.insert(PodStatus::Running, 0); + } + + for ppg in namespace_value + .pods + .iter() + .filter_map(|(pn, pv)| ProverPodGpu::new(pn, pv)) + { + let e = gp_map.entry(ppg.gpu).or_insert(GPUPool { + name: cluster.name.clone(), + gpu: ppg.gpu, + ..Default::default() + }); + let mut status = PodStatus::from_str(&ppg.pod.status).unwrap_or_default(); + if status == PodStatus::Pending + && ppg.pod.changed < Utc::now() - self.long_pending_duration + { + status = PodStatus::LongPending; + } + tracing::info!( + "pod {}: status: {}, real status: {}", + ppg.name, + status, + ppg.pod.status + ); + e.provers.entry(status).and_modify(|n| *n += 1).or_insert(1); + } + + tracing::info!("From pods {:?}", gp_map.sorted_debug()); + + gp_map.into_values().collect() + } + + fn sorted_clusters(&self, namespace: &String, clusters: &Clusters) -> Vec { + let mut gpu_pools: Vec = clusters + .clusters + .values() + .flat_map(|c| self.convert_to_gpu_pool(namespace, c)) + .collect(); + + gpu_pools.sort_by(|a, b| { + a.gpu + .cmp(&b.gpu) // Sort by GPU first. + .then( + a.sum_by_pod_status(PodStatus::NeedToMove) + .cmp(&b.sum_by_pod_status(PodStatus::NeedToMove)), + ) // Sort by need to evict. + .then( + a.sum_by_pod_status(PodStatus::LongPending) + .cmp(&b.sum_by_pod_status(PodStatus::LongPending)), + ) // Sort by long Pending pods. + .then(a.preemtions.cmp(&b.preemtions)) // Sort by preemtions in the cluster. + .then( + self.cluster_priorities + .get(&a.name) + .unwrap_or(&1000) + .cmp(self.cluster_priorities.get(&b.name).unwrap_or(&1000)), + ) // Sort by priority. + .then(b.max_pool_size.cmp(&a.max_pool_size)) // Reverse sort by cluster size. + }); + + gpu_pools + } + + fn speed(&self, gpu: Gpu) -> u64 { + self.prover_speed + .get(&gpu) + .cloned() + .unwrap_or(DEFAULT_SPEED) + .into() + } + + fn provers_to_speed(&self, gpu: Gpu, n: u32) -> u64 { + self.speed(gpu) * n as u64 + } + + fn normalize_queue(&self, gpu: Gpu, q: u64) -> u64 { + let speed = self.speed(gpu); + // Divide and round up if there's any remainder. + (q + speed - 1) / speed * speed + } + + fn run(&self, namespace: &String, q: u64, clusters: &Clusters) -> HashMap { + let sc = self.sorted_clusters(namespace, clusters); + tracing::debug!("Sorted clusters for namespace {}: {:?}", namespace, &sc); + + let mut total: i64 = 0; + let mut provers: HashMap = HashMap::new(); + for c in &sc { + for (status, p) in &c.provers { + match status { + PodStatus::Running | PodStatus::Pending => { + total += self.provers_to_speed(c.gpu, *p) as i64; + provers + .entry(c.to_key()) + .and_modify(|x| *x += p) + .or_insert(*p); + } + _ => (), // Ignore LongPending as not running here. + } + } + } + + // Remove unneeded pods. + if (total as u64) > self.normalize_queue(Gpu::L4, q) { + for c in sc.iter().rev() { + let mut excess_queue = total as u64 - self.normalize_queue(c.gpu, q); + let mut excess_provers = (excess_queue / self.speed(c.gpu)) as u32; + let p = provers.entry(c.to_key()).or_default(); + if *p < excess_provers { + excess_provers = *p; + excess_queue = *p as u64 * self.speed(c.gpu); + } + *p -= excess_provers; + total -= excess_queue as i64; + if total <= 0 { + break; + }; + } + } + + // Reduce load in over capacity pools. + for c in &sc { + let p = provers.entry(c.to_key()).or_default(); + if c.max_pool_size < *p { + let excess = *p - c.max_pool_size; + total -= excess as i64 * self.speed(c.gpu) as i64; + *p -= excess; + } + } + + tracing::debug!("Queue coverd with provers: {}", total); + // Add required provers. + if (total as u64) < q { + for c in &sc { + let mut required_queue = q - total as u64; + let mut required_provers = + (self.normalize_queue(c.gpu, required_queue) / self.speed(c.gpu)) as u32; + let p = provers.entry(c.to_key()).or_default(); + if *p + required_provers > c.max_pool_size { + required_provers = c.max_pool_size - *p; + required_queue = required_provers as u64 * self.speed(c.gpu); + } + *p += required_provers; + total += required_queue as i64; + } + } + + tracing::debug!("run result: provers {:?}, total: {}", &provers, total); + + provers + } +} + +#[async_trait::async_trait] +impl Task for Scaler { + async fn invoke(&self) -> anyhow::Result<()> { + let queue = self.queuer.get_queue().await.unwrap(); + + // TODO: Check that clusters data is ready. + let clusters = self.watcher.clusters.lock().await; + for (ns, ppv) in &self.namespaces { + let q = queue.queue.get(ppv).cloned().unwrap_or(0); + if q > 0 { + let provers = self.run(ns, q, &clusters); + for (k, num) in &provers { + AUTOSCALER_METRICS.provers[&(k.cluster.clone(), ns.clone(), k.gpu)] + .set(*num as u64); + } + // TODO: compare before and desired, send commands [cluster,namespace,deployment] -> provers + } + } + + Ok(()) + } +} + +#[cfg(test)] +mod tests { + use std::sync::Arc; + + use tokio::sync::Mutex; + + use super::*; + use crate::{ + cluster_types::{self, Deployment, Namespace, Pod}, + global::{queuer, watcher}, + }; + + #[test] + fn test_run() { + let watcher = watcher::Watcher { + cluster_agents: vec![], + clusters: Arc::new(Mutex::new(cluster_types::Clusters { + ..Default::default() + })), + }; + let queuer = queuer::Queuer { + prover_job_monitor_url: "".to_string(), + }; + let scaler = Scaler::new(watcher, queuer, ProverAutoscalerScalerConfig::default()); + let got = scaler.run( + &"prover".to_string(), + 1499, + &Clusters { + clusters: HashMap::from([( + "foo".to_string(), + Cluster { + name: "foo".to_string(), + namespaces: HashMap::from([( + "prover".to_string(), + Namespace { + deployments: HashMap::from([( + "prover-gpu-fri-spec-1".to_string(), + Deployment { + ..Default::default() + }, + )]), + pods: HashMap::from([( + "prover-gpu-fri-spec-1-c47644679-x9xqp".to_string(), + Pod { + status: "Running".to_string(), + ..Default::default() + }, + )]), + }, + )]), + }, + )]), + }, + ); + let want = HashMap::from([( + GPUPoolKey { + cluster: "foo".to_string(), + gpu: Gpu::L4, + }, + 3, + )]); + assert!(got == want); + } +} diff --git a/prover/crates/bin/prover_autoscaler/src/global/watcher.rs b/prover/crates/bin/prover_autoscaler/src/global/watcher.rs new file mode 100644 index 000000000000..ef3ebd3b8193 --- /dev/null +++ b/prover/crates/bin/prover_autoscaler/src/global/watcher.rs @@ -0,0 +1,89 @@ +use std::{collections::HashMap, sync::Arc}; + +use anyhow::{Context, Ok}; +use futures::future; +use reqwest::Method; +use tokio::sync::Mutex; +use url::Url; +use zksync_utils::http_with_retries::send_request_with_retries; + +use crate::{ + cluster_types::{Cluster, Clusters}, + task_wiring::Task, +}; + +#[derive(Clone)] +pub struct Watcher { + /// List of base URLs of all agents. + pub cluster_agents: Vec>, + pub clusters: Arc>, +} + +impl Watcher { + pub fn new(agent_urls: Vec) -> Self { + Self { + cluster_agents: agent_urls + .into_iter() + .map(|u| { + Arc::new( + Url::parse(&u) + .unwrap_or_else(|e| panic!("Unparsable Agent URL {}: {}", u, e)), + ) + }) + .collect(), + clusters: Arc::new(Mutex::new(Clusters { + clusters: HashMap::new(), + })), + } + } +} + +#[async_trait::async_trait] +impl Task for Watcher { + async fn invoke(&self) -> anyhow::Result<()> { + let handles: Vec<_> = self + .cluster_agents + .clone() + .into_iter() + .map(|a| { + tracing::debug!("Getting cluster data from agent {}.", a); + tokio::spawn(async move { + let url: String = a + .clone() + .join("/cluster") + .context("Failed to join URL with /cluster")? + .to_string(); + let response = + send_request_with_retries(&url, 5, Method::GET, None, None).await; + response + .map_err(|err| { + anyhow::anyhow!("Failed fetching cluster from url: {url}: {err:?}") + })? + .json::() + .await + .context("Failed to read response as json") + }) + }) + .collect(); + + future::try_join_all( + future::join_all(handles) + .await + .into_iter() + .map(|h| async move { + let c = h.unwrap().unwrap(); + self.clusters + .lock() + .await + .clusters + .insert(c.name.clone(), c); + Ok(()) + }) + .collect::>(), + ) + .await + .unwrap(); + + Ok(()) + } +} diff --git a/prover/crates/bin/prover_autoscaler/src/k8s/mod.rs b/prover/crates/bin/prover_autoscaler/src/k8s/mod.rs new file mode 100644 index 000000000000..0804b9eaa404 --- /dev/null +++ b/prover/crates/bin/prover_autoscaler/src/k8s/mod.rs @@ -0,0 +1,5 @@ +pub use scaler::Scaler; +pub use watcher::Watcher; + +mod scaler; +mod watcher; diff --git a/prover/crates/bin/prover_autoscaler/src/k8s/scaler.rs b/prover/crates/bin/prover_autoscaler/src/k8s/scaler.rs new file mode 100644 index 000000000000..170b0b106507 --- /dev/null +++ b/prover/crates/bin/prover_autoscaler/src/k8s/scaler.rs @@ -0,0 +1,27 @@ +use k8s_openapi::api; +use kube::api::{Api, Patch, PatchParams}; + +#[derive(Clone)] +pub struct Scaler { + pub client: kube::Client, +} + +impl Scaler { + pub async fn scale(&self, namespace: &str, name: &str, size: i32) -> anyhow::Result<()> { + let deployments: Api = + Api::namespaced(self.client.clone(), namespace); + + let patch = serde_json::json!({ + "apiVersion": "apps/v1", + "kind": "Deployment", + "spec": { + "replicas": size + } + }); + let pp = PatchParams::default(); + deployments.patch(name, &pp, &Patch::Merge(patch)).await?; + tracing::info!("Scaled deployment/{} to {} replica(s).", name, size); + + Ok(()) + } +} diff --git a/prover/crates/bin/prover_autoscaler/src/k8s/watcher.rs b/prover/crates/bin/prover_autoscaler/src/k8s/watcher.rs new file mode 100644 index 000000000000..8746d17663be --- /dev/null +++ b/prover/crates/bin/prover_autoscaler/src/k8s/watcher.rs @@ -0,0 +1,138 @@ +use std::{collections::HashMap, sync::Arc}; + +use chrono::Utc; +use futures::{stream, StreamExt, TryStreamExt}; +use k8s_openapi::api; +use kube::{ + api::{Api, ResourceExt}, + runtime::{watcher, WatchStreamExt}, +}; +use tokio::sync::Mutex; + +use crate::{ + cluster_types::{Cluster, Deployment, Namespace, Pod}, + metrics::AUTOSCALER_METRICS, +}; + +#[derive(Clone)] +pub struct Watcher { + pub client: kube::Client, + pub cluster: Arc>, +} + +impl Watcher { + pub fn new(client: kube::Client, cluster_name: String, namespaces: Vec) -> Self { + let mut ns = HashMap::new(); + namespaces.into_iter().for_each(|n| { + ns.insert(n, Namespace::default()); + }); + + Self { + client, + cluster: Arc::new(Mutex::new(Cluster { + name: cluster_name, + namespaces: ns, + })), + } + } + + pub async fn run(self) -> anyhow::Result<()> { + // TODO: add actual metrics + AUTOSCALER_METRICS.protocol_version.set(1); + AUTOSCALER_METRICS.calls.inc_by(1); + + // TODO: watch for a list of namespaces, get: + // - deployments (name, running, desired) [done] + // - pods (name, parent deployment, statuses, when the last status change) [~done] + // - events (number of scheduling failures in last N seconds, which deployments) + // - events (preemptions, which deployment, when, how many) + // - pool size from GCP (name, size, which GPU) + let mut watchers = vec![]; + for namespace in self.cluster.lock().await.namespaces.keys() { + let deployments: Api = + Api::namespaced(self.client.clone(), namespace); + watchers.push( + watcher(deployments, watcher::Config::default()) + .default_backoff() + .applied_objects() + .map_ok(Watched::Deploy) + .boxed(), + ); + + let pods: Api = Api::namespaced(self.client.clone(), namespace); + watchers.push( + watcher(pods, watcher::Config::default()) + .default_backoff() + .applied_objects() + .map_ok(Watched::Pod) + .boxed(), + ); + } + // select on applied events from all watchers + let mut combo_stream = stream::select_all(watchers); + // SelectAll Stream elements must have the same Item, so all packed in this: + #[allow(clippy::large_enum_variant)] + enum Watched { + Deploy(api::apps::v1::Deployment), + Pod(api::core::v1::Pod), + } + while let Some(o) = combo_stream.try_next().await? { + match o { + Watched::Deploy(d) => { + let namespace = match d.namespace() { + Some(n) => n.to_string(), + None => continue, + }; + let mut cluster = self.cluster.lock().await; + let v = cluster.namespaces.get_mut(&namespace).unwrap(); + let dep = v + .deployments + .entry(d.name_any()) + .or_insert(Deployment::default()); + let nums = d.status.clone().unwrap_or_default(); + dep.running = nums.available_replicas.unwrap_or_default(); + dep.desired = nums.replicas.unwrap_or_default(); + + tracing::info!( + "Got deployment: {}, size: {}/{} un {}", + d.name_any(), + nums.available_replicas.unwrap_or_default(), + nums.replicas.unwrap_or_default(), + nums.unavailable_replicas.unwrap_or_default(), + ) + } + Watched::Pod(p) => { + let namespace = match p.namespace() { + Some(n) => n.to_string(), + None => continue, + }; + let mut cluster = self.cluster.lock().await; + let v = cluster.namespaces.get_mut(&namespace).unwrap(); + let pod = v.pods.entry(p.name_any()).or_insert(Pod::default()); + pod.owner = p + .owner_references() + .iter() + .map(|x| format!("{}/{}", x.kind.clone(), x.name.clone())) + .collect::>() + .join(":"); + // TODO: Collect replica sets to match deployments and pods. + let phase = p + .status + .clone() + .unwrap_or_default() + .phase + .unwrap_or_default(); + if phase != pod.status { + // TODO: try to get an idea how to set correct value on restart. + pod.changed = Utc::now(); + } + pod.status = phase; + + tracing::info!("Got pod: {}", p.name_any()) + } + } + } + + Ok(()) + } +} diff --git a/prover/crates/bin/prover_autoscaler/src/lib.rs b/prover/crates/bin/prover_autoscaler/src/lib.rs new file mode 100644 index 000000000000..0b0d704c9078 --- /dev/null +++ b/prover/crates/bin/prover_autoscaler/src/lib.rs @@ -0,0 +1,6 @@ +pub mod agent; +pub(crate) mod cluster_types; +pub mod global; +pub mod k8s; +pub(crate) mod metrics; +pub mod task_wiring; diff --git a/prover/crates/bin/prover_autoscaler/src/main.rs b/prover/crates/bin/prover_autoscaler/src/main.rs new file mode 100644 index 000000000000..196bd6deb81e --- /dev/null +++ b/prover/crates/bin/prover_autoscaler/src/main.rs @@ -0,0 +1,148 @@ +use std::time::Duration; + +use anyhow::Context; +use structopt::StructOpt; +use tokio::{ + sync::{oneshot, watch}, + task::JoinHandle, +}; +use zksync_core_leftovers::temp_config_store::read_yaml_repr; +use zksync_protobuf_config::proto::prover_autoscaler; +use zksync_prover_autoscaler::{ + agent, + global::{self}, + k8s::{Scaler, Watcher}, + task_wiring::TaskRunner, +}; +use zksync_utils::wait_for_tasks::ManagedTasks; +use zksync_vlog::prometheus::PrometheusExporterConfig; + +/// Represents the sequential number of the Prover Autoscaler type. +#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, serde::Serialize, serde::Deserialize)] +pub enum AutoscalerType { + Scaler, + Agent, +} + +impl std::str::FromStr for AutoscalerType { + type Err = String; + + fn from_str(s: &str) -> Result { + match s { + "scaler" => Ok(AutoscalerType::Scaler), + "agent" => Ok(AutoscalerType::Agent), + other => Err(format!("{} is not a valid AutoscalerType", other)), + } + } +} + +#[derive(Debug, StructOpt)] +#[structopt(name = "Prover Autoscaler", about = "Run Prover Autoscaler components")] +struct Opt { + /// Prover Autoscaler can run Agent or Scaler type. + /// + /// Specify `agent` or `scaler` + #[structopt(short, long, default_value = "agent")] + job: AutoscalerType, + /// Name of the cluster Agent is watching. + #[structopt(long)] + cluster_name: Option, + /// Path to the configuration file. + #[structopt(long)] + config_path: std::path::PathBuf, +} + +#[tokio::main] +async fn main() -> anyhow::Result<()> { + let opt = Opt::from_args(); + let general_config = + read_yaml_repr::(&opt.config_path) + .context("general config")?; + let observability_config = general_config + .observability + .context("observability config")?; + let _observability_guard = observability_config.install()?; + // That's unfortunate that there are at least 3 different Duration in rust and we use all 3 in this repo. + // TODO: Consider updating zksync_protobuf to support std::time::Duration. + let graceful_shutdown_timeout = general_config.graceful_shutdown_timeout.unsigned_abs(); + + let (stop_signal_sender, stop_signal_receiver) = oneshot::channel(); + let mut stop_signal_sender = Some(stop_signal_sender); + ctrlc::set_handler(move || { + if let Some(sender) = stop_signal_sender.take() { + sender.send(()).ok(); + } + }) + .context("Error setting Ctrl+C handler")?; + + let (stop_sender, stop_receiver) = watch::channel(false); + + let _ = rustls::crypto::ring::default_provider().install_default(); + let client = kube::Client::try_default().await?; + + tracing::info!("Starting ProverAutoscaler"); + + let mut tasks = vec![]; + + match opt.job { + AutoscalerType::Agent => { + let agent_config = general_config.agent_config.context("agent_config")?; + let exporter_config = PrometheusExporterConfig::pull(agent_config.prometheus_port); + tasks.push(tokio::spawn(exporter_config.run(stop_receiver.clone()))); + + // TODO: maybe get cluster name from curl -H "Metadata-Flavor: Google" + // http://metadata.google.internal/computeMetadata/v1/instance/attributes/cluster-name + let watcher = Watcher::new( + client.clone(), + opt.cluster_name + .context("cluster_name is required for Agent")?, + agent_config.namespaces, + ); + let scaler = Scaler { client }; + tasks.push(tokio::spawn(watcher.clone().run())); + tasks.push(tokio::spawn(agent::run_server( + agent_config.http_port, + watcher, + scaler, + stop_receiver.clone(), + ))) + } + AutoscalerType::Scaler => { + let scaler_config = general_config.scaler_config.context("scaler_config")?; + let interval = scaler_config.scaler_run_interval.unsigned_abs(); + let exporter_config = PrometheusExporterConfig::pull(scaler_config.prometheus_port); + tasks.push(tokio::spawn(exporter_config.run(stop_receiver.clone()))); + let watcher = global::watcher::Watcher::new(scaler_config.agents.clone()); + let queuer = global::queuer::Queuer::new(scaler_config.prover_job_monitor_url.clone()); + let scaler = global::scaler::Scaler::new(watcher.clone(), queuer, scaler_config); + tasks.extend(get_tasks(watcher, scaler, interval, stop_receiver)?); + } + } + + let mut tasks = ManagedTasks::new(tasks); + + tokio::select! { + _ = tasks.wait_single() => {}, + _ = stop_signal_receiver => { + tracing::info!("Stop signal received, shutting down"); + } + } + stop_sender.send(true).ok(); + tasks.complete(graceful_shutdown_timeout).await; + + Ok(()) +} + +fn get_tasks( + watcher: global::watcher::Watcher, + scaler: global::scaler::Scaler, + interval: Duration, + stop_receiver: watch::Receiver, +) -> anyhow::Result>>> { + let mut task_runner = TaskRunner::default(); + + task_runner.add("Watcher", interval, watcher); + task_runner.add("Scaler", interval, scaler); + + Ok(task_runner.spawn(stop_receiver)) +} diff --git a/prover/crates/bin/prover_autoscaler/src/metrics.rs b/prover/crates/bin/prover_autoscaler/src/metrics.rs new file mode 100644 index 000000000000..09cbaa6ba00f --- /dev/null +++ b/prover/crates/bin/prover_autoscaler/src/metrics.rs @@ -0,0 +1,14 @@ +use vise::{Counter, Gauge, LabeledFamily, Metrics}; +use zksync_config::configs::prover_autoscaler::Gpu; + +#[derive(Debug, Metrics)] +#[metrics(prefix = "autoscaler")] +pub(crate) struct AutoscalerMetrics { + pub protocol_version: Gauge, + pub calls: Counter, + #[metrics(labels = ["target_cluster", "target_namespace", "gpu"])] + pub provers: LabeledFamily<(String, String, Gpu), Gauge, 3>, +} + +#[vise::register] +pub(crate) static AUTOSCALER_METRICS: vise::Global = vise::Global::new(); diff --git a/prover/crates/bin/prover_autoscaler/src/task_wiring.rs b/prover/crates/bin/prover_autoscaler/src/task_wiring.rs new file mode 100644 index 000000000000..9b60145ad9ea --- /dev/null +++ b/prover/crates/bin/prover_autoscaler/src/task_wiring.rs @@ -0,0 +1,72 @@ +use std::time::Duration; + +use anyhow::Context; +use tracing::Instrument; + +/// Task trait to be run in ProverJobMonitor. +#[async_trait::async_trait] +pub trait Task { + async fn invoke(&self) -> anyhow::Result<()>; +} + +/// Wrapper for Task with a periodic interface. Holds information about the task and provides DB connectivity. +struct PeriodicTask { + job: Box, + name: String, + interval: Duration, +} + +impl PeriodicTask { + async fn run( + &self, + mut stop_receiver: tokio::sync::watch::Receiver, + ) -> anyhow::Result<()> { + tracing::info!( + "Started Task {} with run interval: {:?}", + self.name, + self.interval + ); + + let mut interval = tokio::time::interval(self.interval); + + while !*stop_receiver.borrow_and_update() { + interval.tick().await; + self.job + .invoke() + .instrument(tracing::info_span!("run", service_name = %self.name)) + .await + .context("failed to invoke task")?; + } + tracing::info!("Stop signal received; Task {} is shut down", self.name); + Ok(()) + } +} + +/// Wrapper on a vector of task. Makes adding/spawning tasks and sharing resources ergonomic. +#[derive(Default)] +pub struct TaskRunner { + tasks: Vec, +} + +impl TaskRunner { + pub fn add(&mut self, name: &str, interval: Duration, job: T) { + self.tasks.push(PeriodicTask { + name: name.into(), + interval, + job: Box::new(job), + }); + } + + pub fn spawn( + self, + stop_receiver: tokio::sync::watch::Receiver, + ) -> Vec>> { + self.tasks + .into_iter() + .map(|task| { + let receiver = stop_receiver.clone(); + tokio::spawn(async move { task.run(receiver).await }) + }) + .collect() + } +} diff --git a/prover/crates/bin/witness_generator/src/rounds/basic_circuits/utils.rs b/prover/crates/bin/witness_generator/src/rounds/basic_circuits/utils.rs index 23ae1b0f2afe..a8bc59bd45e5 100644 --- a/prover/crates/bin/witness_generator/src/rounds/basic_circuits/utils.rs +++ b/prover/crates/bin/witness_generator/src/rounds/basic_circuits/utils.rs @@ -114,6 +114,10 @@ pub(super) async fn generate_witness( } }; + let evm_emulator_code_hash = input.vm_run_data.evm_emulator_code_hash; + // By convention, default AA is used instead of the EVM emulator if the latter is disabled. + let evm_emulator_code_hash = + evm_emulator_code_hash.unwrap_or(input.vm_run_data.default_account_code_hash); let (scheduler_witness, block_witness) = zkevm_test_harness::external_calls::run( Address::zero(), BOOTLOADER_ADDRESS, @@ -121,8 +125,7 @@ pub(super) async fn generate_witness( bootloader_contents, false, input.vm_run_data.default_account_code_hash, - // NOTE: this will be evm_simulator_code_hash in future releases - input.vm_run_data.default_account_code_hash, + evm_emulator_code_hash, input.vm_run_data.used_bytecodes, Vec::default(), MAX_CYCLES_FOR_TX as usize, diff --git a/zk_toolbox/Cargo.lock b/zk_toolbox/Cargo.lock index 3cc21904f5a6..f6f8087c6993 100644 --- a/zk_toolbox/Cargo.lock +++ b/zk_toolbox/Cargo.lock @@ -1079,6 +1079,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b42b6fa04a440b495c8b04d0e71b707c585f83cb9cb28cf8cd0d976c315e31b4" dependencies = [ "powerfmt", + "serde", ] [[package]] @@ -6694,6 +6695,8 @@ dependencies = [ "eyre", "human-panic", "lazy_static", + "prost 0.12.6", + "rand", "secrecy", "serde", "serde_json", @@ -6710,6 +6713,10 @@ dependencies = [ "zksync_config", "zksync_consensus_crypto", "zksync_consensus_roles", + "zksync_consensus_utils", + "zksync_protobuf", + "zksync_protobuf_build", + "zksync_protobuf_config", ] [[package]] @@ -6798,7 +6805,11 @@ dependencies = [ "rand", "secrecy", "serde", + "strum", + "strum_macros", + "time", "url", + "vise", "zksync_basic_types", "zksync_concurrency", "zksync_consensus_utils", @@ -6948,6 +6959,7 @@ dependencies = [ "secrecy", "serde_json", "serde_yaml", + "time", "tracing", "zksync_basic_types", "zksync_config", diff --git a/zk_toolbox/Cargo.toml b/zk_toolbox/Cargo.toml index f3f6abcb9c6e..cb442a6182e6 100644 --- a/zk_toolbox/Cargo.toml +++ b/zk_toolbox/Cargo.toml @@ -34,7 +34,9 @@ zksync_protobuf_config = { path = "../core/lib/protobuf_config" } zksync_basic_types = { path = "../core/lib/basic_types" } zksync_consensus_roles = "=0.5.0" zksync_consensus_crypto = "=0.5.0" +zksync_consensus_utils = "=0.5.0" zksync_protobuf = "=0.5.0" +zksync_protobuf_build = "=0.5.0" # External dependencies anyhow = "1.0.82" @@ -49,6 +51,7 @@ futures = "0.3.30" human-panic = "2.0" lazy_static = "1.4.0" once_cell = "1.19.0" +prost = "0.12.1" rand = "0.8.5" serde = { version = "1.0", features = ["derive"] } serde_json = "1.0" diff --git a/zk_toolbox/README.md b/zk_toolbox/README.md index a3b44fa98b32..6197a79eec99 100644 --- a/zk_toolbox/README.md +++ b/zk_toolbox/README.md @@ -183,12 +183,6 @@ Initialize the prover: zk_inception prover init ``` -Generate setup keys: - -```bash -zk_inception prover generate-sk -``` - Run the prover: ```bash diff --git a/zk_toolbox/crates/config/src/consensus_secrets.rs b/zk_toolbox/crates/config/src/consensus_secrets.rs index 0e5c4592d2fc..da551a452799 100644 --- a/zk_toolbox/crates/config/src/consensus_secrets.rs +++ b/zk_toolbox/crates/config/src/consensus_secrets.rs @@ -2,13 +2,13 @@ use std::path::Path; use xshell::Shell; use zksync_config::configs::consensus::ConsensusSecrets; -use zksync_protobuf_config::decode_yaml_repr; +use zksync_protobuf_config::read_yaml_repr; use crate::traits::ReadConfig; impl ReadConfig for ConsensusSecrets { fn read(shell: &Shell, path: impl AsRef) -> anyhow::Result { let path = shell.current_dir().join(path); - decode_yaml_repr::(&path, false) + read_yaml_repr::(&path, false) } } diff --git a/zk_toolbox/crates/config/src/ecosystem.rs b/zk_toolbox/crates/config/src/ecosystem.rs index 7e7c5d4dae52..a5fcd8b72199 100644 --- a/zk_toolbox/crates/config/src/ecosystem.rs +++ b/zk_toolbox/crates/config/src/ecosystem.rs @@ -146,20 +146,20 @@ impl EcosystemConfig { .unwrap_or(self.default_chain.as_ref()) } - pub fn load_chain(&self, name: Option) -> Option { + pub fn load_chain(&self, name: Option) -> anyhow::Result { let name = name.unwrap_or(self.default_chain.clone()); self.load_chain_inner(&name) } - pub fn load_current_chain(&self) -> Option { + pub fn load_current_chain(&self) -> anyhow::Result { self.load_chain_inner(self.current_chain()) } - fn load_chain_inner(&self, name: &str) -> Option { + fn load_chain_inner(&self, name: &str) -> anyhow::Result { let path = self.chains.join(name).join(CONFIG_NAME); - let config = ChainConfigInternal::read(self.get_shell(), path.clone()).ok()?; + let config = ChainConfigInternal::read(self.get_shell(), path.clone())?; - Some(ChainConfig { + Ok(ChainConfig { id: config.id, name: config.name, chain_id: config.chain_id, diff --git a/zk_toolbox/crates/config/src/external_node.rs b/zk_toolbox/crates/config/src/external_node.rs index a07ff5dc1400..7d884d3e2346 100644 --- a/zk_toolbox/crates/config/src/external_node.rs +++ b/zk_toolbox/crates/config/src/external_node.rs @@ -2,7 +2,7 @@ use std::path::Path; use xshell::Shell; pub use zksync_config::configs::en_config::ENConfig; -use zksync_protobuf_config::{decode_yaml_repr, encode_yaml_repr}; +use zksync_protobuf_config::{encode_yaml_repr, read_yaml_repr}; use crate::{ consts::EN_CONFIG_FILE, @@ -23,6 +23,6 @@ impl SaveConfig for ENConfig { impl ReadConfig for ENConfig { fn read(shell: &Shell, path: impl AsRef) -> anyhow::Result { let path = shell.current_dir().join(path); - decode_yaml_repr::(&path, false) + read_yaml_repr::(&path, false) } } diff --git a/zk_toolbox/crates/config/src/forge_interface/deploy_ecosystem/input.rs b/zk_toolbox/crates/config/src/forge_interface/deploy_ecosystem/input.rs index 30ec0eeb9c48..41ce906f455b 100644 --- a/zk_toolbox/crates/config/src/forge_interface/deploy_ecosystem/input.rs +++ b/zk_toolbox/crates/config/src/forge_interface/deploy_ecosystem/input.rs @@ -146,6 +146,7 @@ impl DeployL1Config { .diamond_init_minimal_l2_gas_price, bootloader_hash: genesis_config.bootloader_hash.unwrap(), default_aa_hash: genesis_config.default_aa_hash.unwrap(), + evm_emulator_hash: genesis_config.evm_emulator_hash, diamond_init_priority_tx_max_pubdata: initial_deployment_config .diamond_init_priority_tx_max_pubdata, diamond_init_pubdata_pricing_mode: initial_deployment_config @@ -194,6 +195,7 @@ pub struct ContractsDeployL1Config { pub diamond_init_minimal_l2_gas_price: u64, pub bootloader_hash: H256, pub default_aa_hash: H256, + pub evm_emulator_hash: Option, } #[derive(Debug, Deserialize, Serialize, Clone)] diff --git a/zk_toolbox/crates/config/src/general.rs b/zk_toolbox/crates/config/src/general.rs index a8e7407edd02..0079105b66ca 100644 --- a/zk_toolbox/crates/config/src/general.rs +++ b/zk_toolbox/crates/config/src/general.rs @@ -6,7 +6,7 @@ use url::Url; use xshell::Shell; use zksync_config::configs::object_store::ObjectStoreMode; pub use zksync_config::configs::GeneralConfig; -use zksync_protobuf_config::{decode_yaml_repr, encode_yaml_repr}; +use zksync_protobuf_config::{encode_yaml_repr, read_yaml_repr}; use crate::{ consts::GENERAL_FILE, @@ -137,7 +137,7 @@ impl SaveConfig for GeneralConfig { impl ReadConfig for GeneralConfig { fn read(shell: &Shell, path: impl AsRef) -> anyhow::Result { let path = shell.current_dir().join(path); - decode_yaml_repr::(&path, false) + read_yaml_repr::(&path, false) } } diff --git a/zk_toolbox/crates/config/src/genesis.rs b/zk_toolbox/crates/config/src/genesis.rs index a6469893fed2..933252541f43 100644 --- a/zk_toolbox/crates/config/src/genesis.rs +++ b/zk_toolbox/crates/config/src/genesis.rs @@ -3,7 +3,7 @@ use std::path::Path; use xshell::Shell; use zksync_basic_types::L1ChainId; pub use zksync_config::GenesisConfig; -use zksync_protobuf_config::{decode_yaml_repr, encode_yaml_repr}; +use zksync_protobuf_config::{encode_yaml_repr, read_yaml_repr}; use crate::{ consts::GENESIS_FILE, @@ -32,6 +32,6 @@ impl SaveConfig for GenesisConfig { impl ReadConfig for GenesisConfig { fn read(shell: &Shell, path: impl AsRef) -> anyhow::Result { let path = shell.current_dir().join(path); - decode_yaml_repr::(&path, false) + read_yaml_repr::(&path, false) } } diff --git a/zk_toolbox/crates/config/src/lib.rs b/zk_toolbox/crates/config/src/lib.rs index 1a7c5bf1d7e2..b449aefe3a26 100644 --- a/zk_toolbox/crates/config/src/lib.rs +++ b/zk_toolbox/crates/config/src/lib.rs @@ -10,7 +10,7 @@ pub use manipulations::*; pub use secrets::*; pub use wallet_creation::*; pub use wallets::*; -pub use zksync_protobuf_config::{decode_yaml_repr, encode_yaml_repr}; +pub use zksync_protobuf_config::{encode_yaml_repr, read_yaml_repr}; mod apps; mod chain; diff --git a/zk_toolbox/crates/config/src/secrets.rs b/zk_toolbox/crates/config/src/secrets.rs index f0a39148b034..cf0a9927c560 100644 --- a/zk_toolbox/crates/config/src/secrets.rs +++ b/zk_toolbox/crates/config/src/secrets.rs @@ -5,24 +5,22 @@ use common::db::DatabaseConfig; use xshell::Shell; use zksync_basic_types::url::SensitiveUrl; pub use zksync_config::configs::Secrets as SecretsConfig; -use zksync_protobuf_config::{decode_yaml_repr, encode_yaml_repr}; +use zksync_protobuf_config::{encode_yaml_repr, read_yaml_repr}; use crate::{ consts::SECRETS_FILE, traits::{FileConfigWithDefaultName, ReadConfig, SaveConfig}, }; -pub fn set_databases( +pub fn set_server_database( secrets: &mut SecretsConfig, server_db_config: &DatabaseConfig, - prover_db_config: &DatabaseConfig, ) -> anyhow::Result<()> { let database = secrets .database .as_mut() - .context("Databases must be presented")?; + .context("Server database must be presented")?; database.server_url = Some(SensitiveUrl::from(server_db_config.full_url())); - database.prover_url = Some(SensitiveUrl::from(prover_db_config.full_url())); Ok(()) } @@ -33,7 +31,7 @@ pub fn set_prover_database( let database = secrets .database .as_mut() - .context("Databases must be presented")?; + .context("Prover database must be presented")?; database.prover_url = Some(SensitiveUrl::from(prover_db_config.full_url())); Ok(()) } @@ -61,6 +59,6 @@ impl SaveConfig for SecretsConfig { impl ReadConfig for SecretsConfig { fn read(shell: &Shell, path: impl AsRef) -> anyhow::Result { let path = shell.current_dir().join(path); - decode_yaml_repr::(&path, false) + read_yaml_repr::(&path, false) } } diff --git a/zk_toolbox/crates/zk_inception/Cargo.toml b/zk_toolbox/crates/zk_inception/Cargo.toml index 28b709c557b7..5d42dadaed13 100644 --- a/zk_toolbox/crates/zk_inception/Cargo.toml +++ b/zk_toolbox/crates/zk_inception/Cargo.toml @@ -35,8 +35,16 @@ zksync_basic_types.workspace = true clap-markdown.workspace = true zksync_consensus_roles.workspace = true zksync_consensus_crypto.workspace = true +zksync_protobuf.workspace = true +zksync_protobuf_config.workspace = true +prost.workspace = true secrecy.workspace = true +[dev-dependencies] +rand.workspace = true +zksync_consensus_utils.workspace = true + [build-dependencies] eyre.workspace = true ethers.workspace = true +zksync_protobuf_build.workspace = true diff --git a/zk_toolbox/crates/zk_inception/README.md b/zk_toolbox/crates/zk_inception/README.md index 904b1421e3a0..7fbbb58c88f3 100644 --- a/zk_toolbox/crates/zk_inception/README.md +++ b/zk_toolbox/crates/zk_inception/README.md @@ -17,8 +17,12 @@ This document contains the help content for the `zk_inception` command-line prog - [`zk_inception chain initialize-bridges`↴](#zk_inception-chain-initialize-bridges) - [`zk_inception chain deploy-l2-contracts`↴](#zk_inception-chain-deploy-l2-contracts) - [`zk_inception chain upgrader`↴](#zk_inception-chain-upgrader) +- [`zk_inception chain deploy-consensus-registry`↴](#zk_inception-chain-deploy-consensus-registry) +- [`zk_inception chain deploy-multicall3`↴](#zk_inception-chain-deploy-multicall3) - [`zk_inception chain deploy-paymaster`↴](#zk_inception-chain-deploy-paymaster) - [`zk_inception chain update-token-multiplier-setter`↴](#zk_inception-chain-update-token-multiplier-setter) +- [`zk_inception consensus set-attester-committee`↴](#zk_inception-consensus-set-attester-committee) +- [`zk_inception consensus get-attester-committee`↴](#zk_inception-consensus-get-attester-committee) - [`zk_inception prover`↴](#zk_inception-prover) - [`zk_inception prover init`↴](#zk_inception-prover-init) - [`zk_inception prover setup-keys`↴](#zk_inception-prover-setup-keys) @@ -364,6 +368,18 @@ Deploy Default Upgrader e.g.: `zk_inception init -a --private-key=` +## `zk_inception chain deploy-consensus-registry` + +Deploy Consensus Registry smart contract + +**Usage:** `zk_inception chain deploy-consensus-registry` + +## `zk_inception chain deploy-multicall3` + +Deploy Multicall3 smart contract + +**Usage:** `zk_inception chain deploy-multicall3` + ## `zk_inception chain deploy-paymaster` Deploy paymaster smart contract @@ -414,6 +430,47 @@ Update Token Multiplier Setter address on L1 e.g.: `zk_inception init -a --private-key=` +## `zk_inception consensus` + +Consensus related commands + +**Usage:** `zk_inception consensus ` + +###### **Subcommands:** + +- `set-attester-committee` — Set attester committee +- `get-attester-committee` — Get attester committee + +## `zk_inception consensus set-attester-committee` + +Set attester committee in the consensus registry smart contract. Requires `consensus_registry` and `multicall3` +contracts to be deployed. + +**Usage:** `zk_inception consensus set-attester-committee [OPTIONS]` + +###### **Options:** + +- `--from-genesis` — Set attester committee to `consensus.genesis_spec.attesters` in general.yaml Mutually exclusive + with `--from-file`. +- `--from-file ` — Set attester committee to committee specified in yaml file at `PATH`. + Mutually exclusive with `--from-genesis`. File format is specified in + `zk_inception/src/commands/consensus/proto/mod.proto`. Example: + + ```yaml + attesters: + - key: attester:public:secp256k1:0339d4b0cdd9896d3929631a4e5e9a5b4919f52592bec571d70bb0e50a3a824714 + weight: 1 + - key: attester:public:secp256k1:024897d8c10d7a57d108cfe2a724d7824c657f219ef5d9f7674810a6746c19fa7b + weight: 1 + ``` + +## `zk_inception consensus get-attester-committee` + +Requires `consensus_registry` and `multicall3` contracts to be deployed. Fetches attester committee from the consensus +registry contract and prints it. + +**Usage:** `zk_inception consensus get-attester-committee` + ## `zk_inception prover` Prover related commands @@ -423,7 +480,6 @@ Prover related commands ###### **Subcommands:** - `init` — Initialize prover -- `generate-sk` — Generate setup keys - `run` — Run prover - `init-bellman-cuda` — Initialize bellman-cuda diff --git a/zk_toolbox/crates/zk_inception/build.rs b/zk_toolbox/crates/zk_inception/build.rs index 43c8d7a5aac9..92f34a542b7f 100644 --- a/zk_toolbox/crates/zk_inception/build.rs +++ b/zk_toolbox/crates/zk_inception/build.rs @@ -7,5 +7,15 @@ fn main() -> eyre::Result<()> { Abigen::new("ConsensusRegistry", "abi/ConsensusRegistry.json")? .generate()? .write_to_file(outdir.join("consensus_registry_abi.rs"))?; + + zksync_protobuf_build::Config { + input_root: "src/commands/consensus/proto".into(), + proto_root: "zksync/toolbox/consensus".into(), + dependencies: vec!["::zksync_protobuf_config::proto".parse().unwrap()], + protobuf_crate: "::zksync_protobuf".parse().unwrap(), + is_public: false, + } + .generate() + .unwrap(); Ok(()) } diff --git a/zk_toolbox/crates/zk_inception/src/commands/chain/args/genesis.rs b/zk_toolbox/crates/zk_inception/src/commands/chain/args/genesis.rs index 21796b3179de..aaf995985a36 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/chain/args/genesis.rs +++ b/zk_toolbox/crates/zk_inception/src/commands/chain/args/genesis.rs @@ -7,11 +7,10 @@ use slugify_rs::slugify; use url::Url; use crate::{ - defaults::{generate_db_names, DBNames, DATABASE_PROVER_URL, DATABASE_SERVER_URL}, + defaults::{generate_db_names, DBNames, DATABASE_SERVER_URL}, messages::{ - msg_prover_db_name_prompt, msg_prover_db_url_prompt, msg_server_db_name_prompt, - msg_server_db_url_prompt, MSG_PROVER_DB_NAME_HELP, MSG_PROVER_DB_URL_HELP, - MSG_SERVER_DB_NAME_HELP, MSG_SERVER_DB_URL_HELP, MSG_USE_DEFAULT_DATABASES_HELP, + msg_server_db_name_prompt, msg_server_db_url_prompt, MSG_SERVER_DB_NAME_HELP, + MSG_SERVER_DB_URL_HELP, MSG_USE_DEFAULT_DATABASES_HELP, }, }; @@ -21,10 +20,6 @@ pub struct GenesisArgs { pub server_db_url: Option, #[clap(long, help = MSG_SERVER_DB_NAME_HELP)] pub server_db_name: Option, - #[clap(long, help = MSG_PROVER_DB_URL_HELP)] - pub prover_db_url: Option, - #[clap(long, help = MSG_PROVER_DB_NAME_HELP)] - pub prover_db_name: Option, #[clap(long, short, help = MSG_USE_DEFAULT_DATABASES_HELP)] pub use_default: bool, #[clap(long, short, action)] @@ -33,15 +28,11 @@ pub struct GenesisArgs { impl GenesisArgs { pub fn fill_values_with_prompt(self, config: &ChainConfig) -> GenesisArgsFinal { - let DBNames { - server_name, - prover_name, - } = generate_db_names(config); + let DBNames { server_name, .. } = generate_db_names(config); let chain_name = config.name.clone(); if self.use_default { GenesisArgsFinal { server_db: DatabaseConfig::new(DATABASE_SERVER_URL.clone(), server_name), - prover_db: DatabaseConfig::new(DATABASE_PROVER_URL.clone(), prover_name), dont_drop: self.dont_drop, } } else { @@ -58,22 +49,8 @@ impl GenesisArgs { }), separator = "_" ); - let prover_db_url = self.prover_db_url.unwrap_or_else(|| { - Prompt::new(&msg_prover_db_url_prompt(&chain_name)) - .default(DATABASE_PROVER_URL.as_str()) - .ask() - }); - let prover_db_name = slugify!( - &self.prover_db_name.unwrap_or_else(|| { - Prompt::new(&msg_prover_db_name_prompt(&chain_name)) - .default(&prover_name) - .ask() - }), - separator = "_" - ); GenesisArgsFinal { server_db: DatabaseConfig::new(server_db_url, server_db_name), - prover_db: DatabaseConfig::new(prover_db_url, prover_db_name), dont_drop: self.dont_drop, } } @@ -96,25 +73,13 @@ impl GenesisArgs { (None, None) }; - let (prover_db_url, prover_db_name) = if let Some(db_full_url) = database.prover_url { - let db_config = DatabaseConfig::from_url(db_full_url.expose_url()) - .context("Invalid prover database URL")?; - (Some(db_config.url), Some(db_config.name)) - } else { - (None, None) - }; - self.server_db_url = self.server_db_url.or(server_db_url); self.server_db_name = self.server_db_name.or(server_db_name); - self.prover_db_url = self.prover_db_url.or(prover_db_url); - self.prover_db_name = self.prover_db_name.or(prover_db_name); Ok(self.fill_values_with_prompt(chain_config)) } pub fn reset_db_names(&mut self) { - self.prover_db_name = None; - self.prover_db_url = None; self.server_db_name = None; self.server_db_url = None; } @@ -123,6 +88,5 @@ impl GenesisArgs { #[derive(Debug, Clone, Serialize, Deserialize)] pub struct GenesisArgsFinal { pub server_db: DatabaseConfig, - pub prover_db: DatabaseConfig, pub dont_drop: bool, } diff --git a/zk_toolbox/crates/zk_inception/src/commands/chain/deploy_l2_contracts.rs b/zk_toolbox/crates/zk_inception/src/commands/chain/deploy_l2_contracts.rs index 26a1d0bb3251..8f0e04b5338a 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/chain/deploy_l2_contracts.rs +++ b/zk_toolbox/crates/zk_inception/src/commands/chain/deploy_l2_contracts.rs @@ -35,6 +35,7 @@ pub enum Deploy2ContractsOption { Upgrader, InitiailizeBridges, ConsensusRegistry, + Multicall3, } pub async fn run( @@ -82,6 +83,16 @@ pub async fn run( ) .await?; } + Deploy2ContractsOption::Multicall3 => { + deploy_multicall3( + shell, + &chain_config, + &ecosystem_config, + &mut contracts, + args, + ) + .await?; + } Deploy2ContractsOption::InitiailizeBridges => { initialize_bridges( shell, @@ -184,6 +195,24 @@ pub async fn deploy_consensus_registry( .await } +pub async fn deploy_multicall3( + shell: &Shell, + chain_config: &ChainConfig, + ecosystem_config: &EcosystemConfig, + contracts_config: &mut ContractsConfig, + forge_args: ForgeScriptArgs, +) -> anyhow::Result<()> { + build_and_deploy( + shell, + chain_config, + ecosystem_config, + forge_args, + Some("runDeployMulticall3"), + |shell, out| contracts_config.set_multicall3(&Multicall3Output::read(shell, out)?), + ) + .await +} + pub async fn deploy_l2_contracts( shell: &Shell, chain_config: &ChainConfig, diff --git a/zk_toolbox/crates/zk_inception/src/commands/chain/genesis/database.rs b/zk_toolbox/crates/zk_inception/src/commands/chain/genesis/database.rs index bb78979ec38e..edf480946be1 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/chain/genesis/database.rs +++ b/zk_toolbox/crates/zk_inception/src/commands/chain/genesis/database.rs @@ -7,7 +7,7 @@ use common::{ logger, }; use config::{ - override_config, set_databases, set_file_artifacts, set_rocks_db_config, + override_config, set_file_artifacts, set_rocks_db_config, set_server_database, traits::SaveConfigWithBasePath, ChainConfig, EcosystemConfig, FileArtifacts, }; use types::ProverMode; @@ -18,12 +18,11 @@ use crate::{ commands::chain::args::genesis::{GenesisArgs, GenesisArgsFinal}, consts::{ PATH_TO_ONLY_REAL_PROOFS_OVERRIDE_CONFIG, PATH_TO_VALIDIUM_OVERRIDE_CONFIG, - PROVER_MIGRATIONS, SERVER_MIGRATIONS, + SERVER_MIGRATIONS, }, messages::{ - MSG_CHAIN_NOT_INITIALIZED, MSG_FAILED_TO_DROP_PROVER_DATABASE_ERR, - MSG_FAILED_TO_DROP_SERVER_DATABASE_ERR, MSG_GENESIS_DATABASES_INITIALIZED, - MSG_INITIALIZING_PROVER_DATABASE, MSG_INITIALIZING_SERVER_DATABASE, + MSG_CHAIN_NOT_INITIALIZED, MSG_FAILED_TO_DROP_SERVER_DATABASE_ERR, + MSG_GENESIS_DATABASES_INITIALIZED, MSG_INITIALIZING_SERVER_DATABASE, MSG_RECREATE_ROCKS_DB_ERRROR, }, utils::rocks_db::{recreate_rocksdb_dirs, RocksDBDirOption}, @@ -37,13 +36,12 @@ pub async fn run(args: GenesisArgs, shell: &Shell) -> anyhow::Result<()> { let mut secrets = chain_config.get_secrets_config()?; let args = args.fill_values_with_secrets(&chain_config)?; - set_databases(&mut secrets, &args.server_db, &args.prover_db)?; + set_server_database(&mut secrets, &args.server_db)?; secrets.save_with_base_path(shell, &chain_config.configs)?; - initialize_databases( + initialize_server_database( shell, &args.server_db, - &args.prover_db, chain_config.link_to_code.clone(), args.dont_drop, ) @@ -53,10 +51,9 @@ pub async fn run(args: GenesisArgs, shell: &Shell) -> anyhow::Result<()> { Ok(()) } -pub async fn initialize_databases( +pub async fn initialize_server_database( shell: &Shell, server_db_config: &DatabaseConfig, - prover_db_config: &DatabaseConfig, link_to_code: PathBuf, dont_drop: bool, ) -> anyhow::Result<()> { @@ -78,23 +75,6 @@ pub async fn initialize_databases( ) .await?; - if global_config().verbose { - logger::debug(MSG_INITIALIZING_PROVER_DATABASE) - } - if !dont_drop { - drop_db_if_exists(prover_db_config) - .await - .context(MSG_FAILED_TO_DROP_PROVER_DATABASE_ERR)?; - init_db(prover_db_config).await?; - } - let path_to_prover_migration = link_to_code.join(PROVER_MIGRATIONS); - migrate_db( - shell, - path_to_prover_migration, - &prover_db_config.full_url(), - ) - .await?; - Ok(()) } @@ -107,7 +87,7 @@ pub fn update_configs( // Update secrets configs let mut secrets = config.get_secrets_config()?; - set_databases(&mut secrets, &args.server_db, &args.prover_db)?; + set_server_database(&mut secrets, &args.server_db)?; secrets.save_with_base_path(shell, &config.configs)?; // Update general config diff --git a/zk_toolbox/crates/zk_inception/src/commands/chain/genesis/mod.rs b/zk_toolbox/crates/zk_inception/src/commands/chain/genesis/mod.rs index 01842c2916ae..c1cc03174aeb 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/chain/genesis/mod.rs +++ b/zk_toolbox/crates/zk_inception/src/commands/chain/genesis/mod.rs @@ -7,7 +7,7 @@ use xshell::Shell; use crate::{ commands::chain::{ args::genesis::{GenesisArgs, GenesisArgsFinal}, - genesis::{self, database::initialize_databases, server::run_server_genesis}, + genesis::{self, database::initialize_server_database, server::run_server_genesis}, }, messages::{ MSG_CHAIN_NOT_INITIALIZED, MSG_GENESIS_COMPLETED, MSG_INITIALIZING_DATABASES_SPINNER, @@ -70,16 +70,14 @@ pub async fn genesis( logger::object_to_string(serde_json::json!({ "chain_config": config, "server_db_config": args.server_db, - "prover_db_config": args.prover_db, })), ); logger::info(MSG_STARTING_GENESIS); let spinner = Spinner::new(MSG_INITIALIZING_DATABASES_SPINNER); - initialize_databases( + initialize_server_database( shell, &args.server_db, - &args.prover_db, config.link_to_code.clone(), args.dont_drop, ) diff --git a/zk_toolbox/crates/zk_inception/src/commands/chain/mod.rs b/zk_toolbox/crates/zk_inception/src/commands/chain/mod.rs index 378309a07cb1..c9a47616486d 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/chain/mod.rs +++ b/zk_toolbox/crates/zk_inception/src/commands/chain/mod.rs @@ -53,6 +53,9 @@ pub enum ChainCommands { /// Deploy L2 consensus registry #[command(alias = "consensus")] DeployConsensusRegistry(ForgeScriptArgs), + /// Deploy L2 multicall3 + #[command(alias = "multicall3")] + DeployMulticall3(ForgeScriptArgs), /// Deploy Default Upgrader #[command(alias = "upgrader")] DeployUpgrader(ForgeScriptArgs), @@ -77,6 +80,9 @@ pub(crate) async fn run(shell: &Shell, args: ChainCommands) -> anyhow::Result<() ChainCommands::DeployConsensusRegistry(args) => { deploy_l2_contracts::run(args, shell, Deploy2ContractsOption::ConsensusRegistry).await } + ChainCommands::DeployMulticall3(args) => { + deploy_l2_contracts::run(args, shell, Deploy2ContractsOption::Multicall3).await + } ChainCommands::DeployUpgrader(args) => { deploy_l2_contracts::run(args, shell, Deploy2ContractsOption::Upgrader).await } diff --git a/zk_toolbox/crates/zk_inception/src/commands/consensus/conv.rs b/zk_toolbox/crates/zk_inception/src/commands/consensus/conv.rs new file mode 100644 index 000000000000..c9d878c8fd32 --- /dev/null +++ b/zk_toolbox/crates/zk_inception/src/commands/consensus/conv.rs @@ -0,0 +1,47 @@ +use anyhow::Context as _; +use zksync_config::configs::consensus as config; +use zksync_consensus_crypto::TextFmt as _; +use zksync_consensus_roles::attester; +use zksync_protobuf::{ProtoFmt, ProtoRepr}; + +use super::proto; +use crate::utils::consensus::parse_attester_committee; + +#[derive(Debug, Clone, PartialEq)] +pub(super) struct SetAttesterCommitteeFile { + pub attesters: attester::Committee, +} + +impl ProtoFmt for SetAttesterCommitteeFile { + type Proto = proto::SetAttesterCommitteeFile; + + fn read(r: &Self::Proto) -> anyhow::Result { + // zksync_config was not allowed to depend on consensus crates, + // therefore to parse the config we need to go through the intermediate + // representation of consensus types defined in zksync_config. + let attesters: Vec<_> = r + .attesters + .iter() + .map(|x| x.read()) + .collect::>() + .context("attesters")?; + Ok(Self { + attesters: parse_attester_committee(&attesters)?, + }) + } + + fn build(&self) -> Self::Proto { + Self::Proto { + attesters: self + .attesters + .iter() + .map(|a| { + ProtoRepr::build(&config::WeightedAttester { + key: config::AttesterPublicKey(a.key.encode()), + weight: a.weight, + }) + }) + .collect(), + } + } +} diff --git a/zk_toolbox/crates/zk_inception/src/commands/consensus.rs b/zk_toolbox/crates/zk_inception/src/commands/consensus/mod.rs similarity index 86% rename from zk_toolbox/crates/zk_inception/src/commands/consensus.rs rename to zk_toolbox/crates/zk_inception/src/commands/consensus/mod.rs index 7cf96ebe5ad5..f30e37af4bcd 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/consensus.rs +++ b/zk_toolbox/crates/zk_inception/src/commands/consensus/mod.rs @@ -1,10 +1,11 @@ -use std::{borrow::Borrow, collections::HashMap, sync::Arc}; +use std::{borrow::Borrow, collections::HashMap, path::PathBuf, sync::Arc}; /// Consensus registry contract operations. /// Includes code duplicated from `zksync_node_consensus::registry::abi`. use anyhow::Context as _; use common::logger; use config::EcosystemConfig; +use conv::*; use ethers::{ abi::Detokenize, contract::{FunctionCall, Multicall}, @@ -19,6 +20,11 @@ use zksync_consensus_roles::{attester, validator}; use crate::{messages, utils::consensus::parse_attester_committee}; +mod conv; +mod proto; +#[cfg(test)] +mod tests; + #[allow(warnings)] mod abi { include!(concat!(env!("OUT_DIR"), "/consensus_registry_abi.rs")); @@ -65,11 +71,25 @@ fn encode_validator_pop(pop: &validator::ProofOfPossession) -> abi::Bls12381Sign } } +#[derive(clap::Args, Debug)] +#[group(required = true, multiple = false)] +pub struct SetAttesterCommitteeCommand { + /// Sets the attester committee in the consensus registry contract to + /// `consensus.genesis_spec.attesters` in general.yaml. + #[clap(long)] + from_genesis: bool, + /// Sets the attester committee in the consensus registry contract to + /// the committee in the yaml file. + /// File format is definied in `commands/consensus/proto/mod.proto`. + #[clap(long)] + from_file: Option, +} + #[derive(clap::Subcommand, Debug)] pub enum Command { /// Sets the attester committee in the consensus registry contract to /// `consensus.genesis_spec.attesters` in general.yaml. - SetAttesterCommittee, + SetAttesterCommittee(SetAttesterCommitteeCommand), /// Fetches the attester committee from the consensus registry contract. GetAttesterCommittee, } @@ -173,7 +193,8 @@ impl Setup { } fn new(shell: &Shell) -> anyhow::Result { - let ecosystem_config = EcosystemConfig::from_file(shell)?; + let ecosystem_config = + EcosystemConfig::from_file(shell).context("EcosystemConfig::from_file()")?; let chain = ecosystem_config .load_current_chain() .context(messages::MSG_CHAIN_NOT_INITIALIZED)?; @@ -227,9 +248,21 @@ impl Setup { attester::Committee::new(attesters.into_iter()).context("attester::Committee::new()") } - async fn set_attester_committee(&self) -> anyhow::Result { + fn read_attester_committee( + &self, + opts: &SetAttesterCommitteeCommand, + ) -> anyhow::Result { // Fetch the desired state. - let want = (|| { + if let Some(path) = &opts.from_file { + let yaml = std::fs::read_to_string(path).context("read_to_string()")?; + let file: SetAttesterCommitteeFile = zksync_protobuf::serde::Deserialize { + deny_unknown_fields: true, + } + .proto_fmt_from_yaml(&yaml) + .context("proto_fmt_from_yaml()")?; + return Ok(file.attesters); + } + let attesters = (|| { Some( &self .general @@ -241,8 +274,10 @@ impl Setup { ) })() .context(messages::MSG_CONSENSUS_GENESIS_SPEC_ATTESTERS_MISSING_IN_GENERAL_YAML)?; - let want = parse_attester_committee(want).context("parse_attester_committee()")?; + parse_attester_committee(attesters).context("parse_attester_committee()") + } + async fn set_attester_committee(&self, want: &attester::Committee) -> anyhow::Result<()> { let provider = self.provider().context("provider()")?; let block_id = self.last_block(&provider).await.context("last_block()")?; let governor = self.governor().context("governor()")?; @@ -337,7 +372,7 @@ impl Setup { ) .await?; txs.wait(&provider).await.context("wait()")?; - Ok(want) + Ok(()) } } @@ -345,8 +380,11 @@ impl Command { pub(crate) async fn run(self, shell: &Shell) -> anyhow::Result<()> { let setup = Setup::new(shell).context("Setup::new()")?; match self { - Self::SetAttesterCommittee => { - let want = setup.set_attester_committee().await?; + Self::SetAttesterCommittee(opts) => { + let want = setup + .read_attester_committee(&opts) + .context("read_attester_committee()")?; + setup.set_attester_committee(&want).await?; let got = setup.get_attester_committee().await?; anyhow::ensure!( got == want, diff --git a/zk_toolbox/crates/zk_inception/src/commands/consensus/proto/mod.proto b/zk_toolbox/crates/zk_inception/src/commands/consensus/proto/mod.proto new file mode 100644 index 000000000000..d8a7323f7144 --- /dev/null +++ b/zk_toolbox/crates/zk_inception/src/commands/consensus/proto/mod.proto @@ -0,0 +1,9 @@ +syntax = "proto3"; + +package zksync.toolbox.consensus; + +import "zksync/core/consensus.proto"; + +message SetAttesterCommitteeFile { + repeated core.consensus.WeightedAttester attesters = 1; +} diff --git a/zk_toolbox/crates/zk_inception/src/commands/consensus/proto/mod.rs b/zk_toolbox/crates/zk_inception/src/commands/consensus/proto/mod.rs new file mode 100644 index 000000000000..61a0a047f0a9 --- /dev/null +++ b/zk_toolbox/crates/zk_inception/src/commands/consensus/proto/mod.rs @@ -0,0 +1,6 @@ +#![allow(warnings)] + +include!(concat!( + env!("OUT_DIR"), + "/src/commands/consensus/proto/gen.rs" +)); diff --git a/zk_toolbox/crates/zk_inception/src/commands/consensus/tests.rs b/zk_toolbox/crates/zk_inception/src/commands/consensus/tests.rs new file mode 100644 index 000000000000..c2f393ad2294 --- /dev/null +++ b/zk_toolbox/crates/zk_inception/src/commands/consensus/tests.rs @@ -0,0 +1,19 @@ +use rand::{distributions::Distribution, Rng}; +use zksync_consensus_utils::EncodeDist; +use zksync_protobuf::testonly::{test_encode_all_formats, FmtConv}; + +use super::SetAttesterCommitteeFile; + +impl Distribution for EncodeDist { + fn sample(&self, rng: &mut R) -> SetAttesterCommitteeFile { + SetAttesterCommitteeFile { + attesters: rng.gen(), + } + } +} + +#[test] +fn test_encoding() { + let rng = &mut rand::thread_rng(); + test_encode_all_formats::>(rng); +} diff --git a/zk_toolbox/crates/zk_inception/src/commands/contract_verifier/args/releases.rs b/zk_toolbox/crates/zk_inception/src/commands/contract_verifier/args/releases.rs index 6f7eae4c1685..2b2b4cf97b1b 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/contract_verifier/args/releases.rs +++ b/zk_toolbox/crates/zk_inception/src/commands/contract_verifier/args/releases.rs @@ -79,13 +79,19 @@ fn get_releases(shell: &Shell, repo: &str, arch: Arch) -> anyhow::Result = serde_json::from_str(&response)?; let mut versions = vec![]; diff --git a/zk_toolbox/crates/zk_inception/src/commands/external_node/prepare_configs.rs b/zk_toolbox/crates/zk_inception/src/commands/external_node/prepare_configs.rs index defbbd12d401..5ab859d17f0a 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/external_node/prepare_configs.rs +++ b/zk_toolbox/crates/zk_inception/src/commands/external_node/prepare_configs.rs @@ -76,6 +76,7 @@ fn prepare_configs( )?, main_node_rate_limit_rps: None, gateway_url: None, + bridge_addresses_refresh_interval_sec: None, }; let mut general_en = general.clone(); diff --git a/zk_toolbox/crates/zk_inception/src/commands/portal.rs b/zk_toolbox/crates/zk_inception/src/commands/portal.rs index 5bf211211779..f9e7fe358609 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/portal.rs +++ b/zk_toolbox/crates/zk_inception/src/commands/portal.rs @@ -107,7 +107,7 @@ async fn validate_portal_config( continue; } // Append missing chain, chain might not be initialized, so ignoring errors - if let Some(chain_config) = ecosystem_config.load_chain(Some(chain_name.clone())) { + if let Ok(chain_config) = ecosystem_config.load_chain(Some(chain_name.clone())) { if let Ok(portal_chain_config) = build_portal_chain_config(&chain_config).await { portal_config.add_chain_config(&portal_chain_config); } diff --git a/zk_toolbox/crates/zk_inception/src/utils/ports.rs b/zk_toolbox/crates/zk_inception/src/utils/ports.rs index 5102b4fd9c6d..018fb79f345d 100644 --- a/zk_toolbox/crates/zk_inception/src/utils/ports.rs +++ b/zk_toolbox/crates/zk_inception/src/utils/ports.rs @@ -169,7 +169,7 @@ impl EcosystemPortsScanner { // - Ecosystem directory (docker-compose files) let mut dirs = vec![ecosystem_config.config.clone()]; for chain in ecosystem_config.list_of_chains() { - if let Some(chain_config) = ecosystem_config.load_chain(Some(chain)) { + if let Ok(chain_config) = ecosystem_config.load_chain(Some(chain)) { dirs.push(chain_config.configs.clone()); if let Some(external_node_config_path) = &chain_config.external_node_config_path { dirs.push(external_node_config_path.clone()); diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/fmt.rs b/zk_toolbox/crates/zk_supervisor/src/commands/fmt.rs index 3aefc15aba79..a6db4643c30a 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/fmt.rs +++ b/zk_toolbox/crates/zk_supervisor/src/commands/fmt.rs @@ -101,14 +101,9 @@ pub async fn run(shell: Shell, args: FmtArgs) -> anyhow::Result<()> { ))); tasks.push(tokio::spawn(prettier_contracts(shell.clone(), args.check))); - futures::future::join_all(tasks) - .await - .iter() - .for_each(|res| { - if let Err(err) = res { - logger::error(err) - } - }); + for result in futures::future::join_all(tasks).await { + result??; + } } Some(Formatter::Prettier { mut targets }) => { if targets.is_empty() { diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/test/args/integration.rs b/zk_toolbox/crates/zk_supervisor/src/commands/test/args/integration.rs index 435dddfc360c..6cec40a2e338 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/test/args/integration.rs +++ b/zk_toolbox/crates/zk_supervisor/src/commands/test/args/integration.rs @@ -9,6 +9,6 @@ pub struct IntegrationArgs { pub external_node: bool, #[clap(short, long, help = MSG_NO_DEPS_HELP)] pub no_deps: bool, - #[clap(short, long, help = MSG_TEST_PATTERN_HELP)] + #[clap(short, long, help = MSG_TEST_PATTERN_HELP, allow_hyphen_values(true))] pub test_pattern: Option, } diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/test/mod.rs b/zk_toolbox/crates/zk_supervisor/src/commands/test/mod.rs index 337733d0e3d1..facd98850d4e 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/test/mod.rs +++ b/zk_toolbox/crates/zk_supervisor/src/commands/test/mod.rs @@ -40,7 +40,7 @@ pub enum TestCommands { Upgrade(UpgradeArgs), #[clap(about = MSG_BUILD_ABOUT)] Build, - #[clap(about = MSG_RUST_TEST_ABOUT, alias = "unit")] + #[clap(about = MSG_RUST_TEST_ABOUT, alias = "unit", allow_hyphen_values(true))] Rust(RustArgs), #[clap(about = MSG_L1_CONTRACTS_ABOUT, alias = "l1")] L1Contracts, diff --git a/zk_toolbox/crates/zk_supervisor/src/dals.rs b/zk_toolbox/crates/zk_supervisor/src/dals.rs index 962a848fe00c..b998eb4301d0 100644 --- a/zk_toolbox/crates/zk_supervisor/src/dals.rs +++ b/zk_toolbox/crates/zk_supervisor/src/dals.rs @@ -1,4 +1,4 @@ -use anyhow::{anyhow, Context}; +use anyhow::Context as _; use config::{EcosystemConfig, SecretsConfig}; use url::Url; use xshell::Shell; @@ -91,7 +91,7 @@ fn get_secrets(shell: &Shell) -> anyhow::Result { let ecosystem_config = EcosystemConfig::from_file(shell)?; let chain_config = ecosystem_config .load_current_chain() - .ok_or(anyhow!(MSG_CHAIN_NOT_FOUND_ERR))?; + .context(MSG_CHAIN_NOT_FOUND_ERR)?; let secrets = chain_config.get_secrets_config()?; Ok(secrets)