From 615b43f9f4cf4c7e728206f3ca5ea135ae5e519c Mon Sep 17 00:00:00 2001 From: Alex Ostrovski Date: Wed, 17 Jan 2024 16:18:00 +0200 Subject: [PATCH 01/27] Generalize `FetcherCursor` for all I/O impls --- core/bin/external_node/src/main.rs | 40 ++++---- .../zksync_core/src/consensus/storage/mod.rs | 12 +-- .../lib/zksync_core/src/consensus/testonly.rs | 2 +- .../zksync_core/src/state_keeper/io/common.rs | 38 +++++++ .../src/state_keeper/io/mempool.rs | 33 +++---- .../src/state_keeper/io/tests/tester.rs | 3 +- core/lib/zksync_core/src/state_keeper/mod.rs | 3 +- .../zksync_core/src/sync_layer/external_io.rs | 34 +++---- .../lib/zksync_core/src/sync_layer/fetcher.rs | 98 +++++++------------ core/lib/zksync_core/src/sync_layer/tests.rs | 45 +++++---- 10 files changed, 152 insertions(+), 156 deletions(-) diff --git a/core/bin/external_node/src/main.rs b/core/bin/external_node/src/main.rs index 07ee4140602d..05cbc8978499 100644 --- a/core/bin/external_node/src/main.rs +++ b/core/bin/external_node/src/main.rs @@ -26,8 +26,9 @@ use zksync_core::{ MiniblockSealer, MiniblockSealerHandle, ZkSyncStateKeeper, }, sync_layer::{ - batch_status_updater::BatchStatusUpdater, external_io::ExternalIO, fetcher::FetcherCursor, - genesis::perform_genesis_if_needed, ActionQueue, MainNodeClient, SyncState, + batch_status_updater::BatchStatusUpdater, external_io::ExternalIO, + fetcher::MainNodeFetcher, genesis::perform_genesis_if_needed, ActionQueue, MainNodeClient, + SyncState, }, }; use zksync_dal::{healthcheck::ConnectionPoolHealthCheck, ConnectionPool}; @@ -56,7 +57,7 @@ async fn build_state_keeper( miniblock_sealer_handle: MiniblockSealerHandle, stop_receiver: watch::Receiver, chain_id: L2ChainId, -) -> ZkSyncStateKeeper { +) -> anyhow::Result { // These config values are used on the main node, and depending on these values certain transactions can // be *rejected* (that is, not included into the block). However, external node only mirrors what the main // node has already executed, so we can safely set these values to the maximum possible values - if the main @@ -77,9 +78,9 @@ async fn build_state_keeper( true, )); - let main_node_url = config.required.main_node_url().unwrap(); + let main_node_url = config.required.main_node_url()?; let main_node_client = ::json_rpc(&main_node_url) - .expect("Failed creating JSON-RPC client for main node"); + .context("Failed creating JSON-RPC client for main node")?; let io = ExternalIO::new( miniblock_sealer_handle, connection_pool, @@ -90,14 +91,15 @@ async fn build_state_keeper( validation_computational_gas_limit, chain_id, ) - .await; + .await + .context("Failed initializing I/O for external node state keeper")?; - ZkSyncStateKeeper::new( + Ok(ZkSyncStateKeeper::new( stop_receiver, Box::new(io), batch_executor_base, Box::new(NoopSealer), - ) + )) } async fn init_tasks( @@ -164,27 +166,27 @@ async fn init_tasks( stop_receiver.clone(), config.remote.l2_chain_id, ) - .await; + .await?; let main_node_client = ::json_rpc(&main_node_url) .context("Failed creating JSON-RPC client for main node")?; let singleton_pool_builder = ConnectionPool::singleton(&config.postgres.database_url); - let fetcher_cursor = { + let fetcher = { let pool = singleton_pool_builder .build() .await .context("failed to build a connection pool for `MainNodeFetcher`")?; let mut storage = pool.access_storage_tagged("sync_layer").await?; - FetcherCursor::new(&mut storage) - .await - .context("failed to load `MainNodeFetcher` cursor from Postgres")? + MainNodeFetcher::new( + &mut storage, + Box::new(main_node_client), + action_queue_sender, + sync_state.clone(), + stop_receiver.clone(), + ) + .await + .context("failed initializing main node fetcher")? }; - let fetcher = fetcher_cursor.into_fetcher( - Box::new(main_node_client), - action_queue_sender, - sync_state.clone(), - stop_receiver.clone(), - ); let metadata_calculator_config = MetadataCalculatorConfig { db_path: config.required.merkle_tree_path.clone(), diff --git a/core/lib/zksync_core/src/consensus/storage/mod.rs b/core/lib/zksync_core/src/consensus/storage/mod.rs index e0d8db17574c..6970d545f115 100644 --- a/core/lib/zksync_core/src/consensus/storage/mod.rs +++ b/core/lib/zksync_core/src/consensus/storage/mod.rs @@ -10,9 +10,9 @@ use zksync_types::{Address, MiniblockNumber}; #[cfg(test)] mod testonly; -use crate::sync_layer::{ - fetcher::{FetchedBlock, FetcherCursor}, - sync_action::ActionQueueSender, +use crate::{ + state_keeper::io::common::IoCursor, + sync_layer::{fetcher::FetchedBlock, sync_action::ActionQueueSender}, }; /// Context-aware `zksync_dal::StorageProcessor` wrapper. @@ -136,14 +136,14 @@ impl<'a> CtxStorage<'a> { } /// Wrapper for `FetcherCursor::new()`. - pub async fn new_fetcher_cursor(&mut self, ctx: &ctx::Ctx) -> ctx::Result { - Ok(ctx.wait(FetcherCursor::new(&mut self.0)).await??) + pub async fn new_fetcher_cursor(&mut self, ctx: &ctx::Ctx) -> ctx::Result { + Ok(ctx.wait(IoCursor::for_fetcher(&mut self.0)).await??) } } #[derive(Debug)] struct Cursor { - inner: FetcherCursor, + inner: IoCursor, actions: ActionQueueSender, } diff --git a/core/lib/zksync_core/src/consensus/testonly.rs b/core/lib/zksync_core/src/consensus/testonly.rs index 83d68a812d4b..eabd8a4fdfc8 100644 --- a/core/lib/zksync_core/src/consensus/testonly.rs +++ b/core/lib/zksync_core/src/consensus/testonly.rs @@ -363,7 +363,7 @@ impl StateKeeperRunner { u32::MAX, L2ChainId::default(), ) - .await; + .await?; s.spawn_bg(miniblock_sealer.run()); s.spawn_bg(run_mock_metadata_calculator(ctx, &self.pool)); s.spawn_bg( diff --git a/core/lib/zksync_core/src/state_keeper/io/common.rs b/core/lib/zksync_core/src/state_keeper/io/common.rs index dad43b7ee9d9..356a5ba6dbd3 100644 --- a/core/lib/zksync_core/src/state_keeper/io/common.rs +++ b/core/lib/zksync_core/src/state_keeper/io/common.rs @@ -1,5 +1,6 @@ use std::time::Duration; +use anyhow::Context; use multivm::{ interface::{L1BatchEnv, L2BlockEnv, SystemEnv, TxExecutionMode}, vm_latest::constants::BLOCK_GAS_LIMIT, @@ -76,6 +77,7 @@ pub(crate) async fn load_l1_batch_params( ) -> Option<(SystemEnv, L1BatchEnv)> { // If miniblock doesn't exist (for instance if it's pending), it means that there is no unsynced state (i.e. no transactions // were executed after the last sealed batch). + // FIXME: doesn't work w/ snapshot recovery; change to a dedicated DB query? let pending_miniblock_number = { let (_, last_miniblock_number_included_in_l1_batch) = storage .blocks_dal() @@ -165,6 +167,42 @@ pub(crate) async fn load_pending_batch( }) } +/// Cursor of the miniblock / L1 batch progress used by [`StateKeeperIO`](super::StateKeeperIO) implementations. +#[derive(Debug)] +pub(crate) struct IoCursor { + pub next_miniblock: MiniblockNumber, + pub prev_miniblock_hash: H256, + pub l1_batch: L1BatchNumber, +} + +impl IoCursor { + /// Loads the cursor from Postgres. + pub async fn new(storage: &mut StorageProcessor<'_>) -> anyhow::Result { + // TODO (PLA-703): Support no L1 batches / miniblocks in the storage + let last_sealed_l1_batch_number = storage + .blocks_dal() + .get_sealed_l1_batch_number() + .await + .context("Failed getting sealed L1 batch number")? + .context("No L1 batches sealed")?; + let last_miniblock_header = storage + .blocks_dal() + .get_last_sealed_miniblock_header() + .await + .context("Failed getting sealed miniblock header")? + .context("No miniblocks sealed")?; + + let next_miniblock = last_miniblock_header.number + 1; + let prev_miniblock_hash = last_miniblock_header.hash; + let next_l1_batch = last_sealed_l1_batch_number + 1; + Ok(Self { + next_miniblock, + prev_miniblock_hash, + l1_batch: next_l1_batch, + }) + } +} + #[cfg(test)] mod tests { use super::*; diff --git a/core/lib/zksync_core/src/state_keeper/io/mempool.rs b/core/lib/zksync_core/src/state_keeper/io/mempool.rs index 20af1b9b221d..6987b532ca08 100644 --- a/core/lib/zksync_core/src/state_keeper/io/mempool.rs +++ b/core/lib/zksync_core/src/state_keeper/io/mempool.rs @@ -27,7 +27,7 @@ use crate::{ state_keeper::{ extractors, io::{ - common::{l1_batch_params, load_pending_batch, poll_iters}, + common::{l1_batch_params, load_pending_batch, poll_iters, IoCursor}, MiniblockParams, MiniblockSealerHandle, PendingBatchData, StateKeeperIO, }, mempool_actor::l2_tx_filter, @@ -404,42 +404,30 @@ impl MempoolIO { l2_erc20_bridge_addr: Address, validation_computational_gas_limit: u32, chain_id: L2ChainId, - ) -> Self { - assert!( + ) -> anyhow::Result { + anyhow::ensure!( config.virtual_blocks_interval > 0, "Virtual blocks interval must be positive" ); - assert!( + anyhow::ensure!( config.virtual_blocks_per_miniblock > 0, "Virtual blocks per miniblock must be positive" ); - let mut storage = pool.access_storage_tagged("state_keeper").await.unwrap(); - // TODO (PLA-703): Support no L1 batches / miniblocks in the storage - let last_sealed_l1_batch_number = storage - .blocks_dal() - .get_sealed_l1_batch_number() - .await - .unwrap() - .expect("No L1 batches sealed"); - let last_miniblock_number = storage - .blocks_dal() - .get_sealed_miniblock_number() - .await - .unwrap(); - + let mut storage = pool.access_storage_tagged("state_keeper").await?; + let cursor = IoCursor::new(&mut storage).await?; drop(storage); - Self { + Ok(Self { mempool, object_store, pool, timeout_sealer: TimeoutSealer::new(config), filter: L2TxFilter::default(), // ^ Will be initialized properly on the first newly opened batch - current_l1_batch_number: last_sealed_l1_batch_number + 1, + current_l1_batch_number: cursor.l1_batch, miniblock_sealer_handle, - current_miniblock_number: last_miniblock_number + 1, + current_miniblock_number: cursor.next_miniblock, fee_account: config.fee_account_addr, validation_computational_gas_limit, delay_interval, @@ -448,7 +436,7 @@ impl MempoolIO { chain_id, virtual_blocks_interval: config.virtual_blocks_interval, virtual_blocks_per_miniblock: config.virtual_blocks_per_miniblock, - } + }) } async fn load_previous_l1_batch_hash(&self) -> U256 { @@ -475,6 +463,7 @@ impl MempoolIO { batch_hash } + // FIXME: won't work with snapshot recovery; track locally? async fn load_previous_miniblock_header(&self) -> MiniblockHeader { let load_latency = KEEPER_METRICS.load_previous_miniblock_header.start(); let mut storage = self diff --git a/core/lib/zksync_core/src/state_keeper/io/tests/tester.rs b/core/lib/zksync_core/src/state_keeper/io/tests/tester.rs index 626924a9c5e5..981e1feedd2a 100644 --- a/core/lib/zksync_core/src/state_keeper/io/tests/tester.rs +++ b/core/lib/zksync_core/src/state_keeper/io/tests/tester.rs @@ -112,7 +112,8 @@ impl Tester { BLOCK_GAS_LIMIT, L2ChainId::from(270), ) - .await; + .await + .unwrap(); (io, mempool) } diff --git a/core/lib/zksync_core/src/state_keeper/mod.rs b/core/lib/zksync_core/src/state_keeper/mod.rs index b1534d9612f0..ee4493cf6139 100644 --- a/core/lib/zksync_core/src/state_keeper/mod.rs +++ b/core/lib/zksync_core/src/state_keeper/mod.rs @@ -67,7 +67,8 @@ pub(crate) async fn create_state_keeper( state_keeper_config.validation_computational_gas_limit, network_config.zksync_network_id, ) - .await; + .await + .expect("Failed initializing main node I/O for state keeper"); let sealer = SequencerSealer::new(state_keeper_config); ZkSyncStateKeeper::new( diff --git a/core/lib/zksync_core/src/sync_layer/external_io.rs b/core/lib/zksync_core/src/sync_layer/external_io.rs index 0b5bef237b6e..bc6a95a28e1b 100644 --- a/core/lib/zksync_core/src/sync_layer/external_io.rs +++ b/core/lib/zksync_core/src/sync_layer/external_io.rs @@ -22,7 +22,7 @@ use crate::{ state_keeper::{ extractors, io::{ - common::{l1_batch_params, load_pending_batch, poll_iters}, + common::{l1_batch_params, load_pending_batch, poll_iters, IoCursor}, MiniblockParams, MiniblockSealerHandle, PendingBatchData, StateKeeperIO, }, metrics::KEEPER_METRICS, @@ -69,42 +69,31 @@ impl ExternalIO { l2_erc20_bridge_addr: Address, validation_computational_gas_limit: u32, chain_id: L2ChainId, - ) -> Self { - let mut storage = pool.access_storage_tagged("sync_layer").await.unwrap(); - // TODO (PLA-703): Support no L1 batches / miniblocks in the storage - let last_sealed_l1_batch_number = storage - .blocks_dal() - .get_sealed_l1_batch_number() - .await - .unwrap() - .expect("No L1 batches sealed"); - let last_miniblock_number = storage - .blocks_dal() - .get_sealed_miniblock_number() - .await - .unwrap(); + ) -> anyhow::Result { + let mut storage = pool.access_storage_tagged("sync_layer").await?; + let cursor = IoCursor::new(&mut storage).await?; drop(storage); tracing::info!( "Initialized the ExternalIO: current L1 batch number {}, current miniblock number {}", - last_sealed_l1_batch_number + 1, - last_miniblock_number + 1, + cursor.l1_batch, + cursor.next_miniblock, ); - sync_state.set_local_block(last_miniblock_number); + sync_state.set_local_block(MiniblockNumber(cursor.next_miniblock.saturating_sub(1))); - Self { + Ok(Self { miniblock_sealer_handle, pool, - current_l1_batch_number: last_sealed_l1_batch_number + 1, - current_miniblock_number: last_miniblock_number + 1, + current_l1_batch_number: cursor.l1_batch, + current_miniblock_number: cursor.next_miniblock, actions, sync_state, main_node_client, l2_erc20_bridge_addr, validation_computational_gas_limit, chain_id, - } + }) } async fn load_previous_l1_batch_hash(&self) -> U256 { @@ -117,6 +106,7 @@ impl ExternalIO { hash } + // FIXME: won't work with snapshot recovery; track locally? async fn load_previous_miniblock_hash(&self) -> H256 { let prev_miniblock_number = self.current_miniblock_number - 1; let mut storage = self.pool.access_storage_tagged("sync_layer").await.unwrap(); diff --git a/core/lib/zksync_core/src/sync_layer/fetcher.rs b/core/lib/zksync_core/src/sync_layer/fetcher.rs index 1f4f7bea810c..0b9f26ada67e 100644 --- a/core/lib/zksync_core/src/sync_layer/fetcher.rs +++ b/core/lib/zksync_core/src/sync_layer/fetcher.rs @@ -15,7 +15,10 @@ use super::{ sync_action::{ActionQueueSender, SyncAction}, SyncState, }; -use crate::metrics::{TxStage, APP_METRICS}; +use crate::{ + metrics::{TxStage, APP_METRICS}, + state_keeper::io::common::IoCursor, +}; const DELAY_INTERVAL: Duration = Duration::from_millis(500); const RETRY_DELAY_INTERVAL: Duration = Duration::from_secs(5); @@ -68,32 +71,10 @@ impl TryFrom for FetchedBlock { } } -/// Cursor of [`MainNodeFetcher`]. -#[derive(Debug)] -pub struct FetcherCursor { - // Fields are public for testing purposes. - pub(crate) next_miniblock: MiniblockNumber, - pub(super) prev_miniblock_hash: H256, - pub(super) l1_batch: L1BatchNumber, -} - -impl FetcherCursor { - /// Loads the cursor from Postgres. - pub async fn new(storage: &mut StorageProcessor<'_>) -> anyhow::Result { - // TODO (PLA-703): Support no L1 batches / miniblocks in the storage - let last_sealed_l1_batch_number = storage - .blocks_dal() - .get_sealed_l1_batch_number() - .await - .context("Failed getting sealed L1 batch number")? - .context("No L1 batches sealed")?; - let last_miniblock_header = storage - .blocks_dal() - .get_last_sealed_miniblock_header() - .await - .context("Failed getting sealed miniblock header")? - .context("No miniblocks sealed")?; - +impl IoCursor { + /// Loads this cursor from storage and modifies it to account for the pending L1 batch if necessary. + pub(crate) async fn for_fetcher(storage: &mut StorageProcessor<'_>) -> anyhow::Result { + let mut this = Self::new(storage).await?; // It's important to know whether we have opened a new batch already or just sealed the previous one. // Depending on it, we must either insert `OpenBatch` item into the queue, or not. let was_new_batch_open = storage @@ -101,24 +82,10 @@ impl FetcherCursor { .pending_batch_exists() .await .context("Failed checking whether pending L1 batch exists")?; - - // Miniblocks are always fully processed. - let next_miniblock = last_miniblock_header.number + 1; - let prev_miniblock_hash = last_miniblock_header.hash; - // Decide whether the next batch should be explicitly opened or not. - let l1_batch = if was_new_batch_open { - // No `OpenBatch` action needed. - last_sealed_l1_batch_number + 1 - } else { - // We need to open the next batch. - last_sealed_l1_batch_number - }; - - Ok(Self { - next_miniblock, - prev_miniblock_hash, - l1_batch, - }) + if !was_new_batch_open { + this.l1_batch -= 1; // Should continue from the last L1 batch present in the storage + } + Ok(this) } pub(crate) fn advance(&mut self, block: FetchedBlock) -> Vec { @@ -193,36 +160,39 @@ impl FetcherCursor { new_actions } +} - /// Builds a fetcher from this cursor. - pub fn into_fetcher( - self, +/// Structure responsible for fetching batches and miniblock data from the main node. +#[derive(Debug)] +pub struct MainNodeFetcher { + // Fields are public for testing purposes. + pub(super) client: CachingMainNodeClient, + pub(super) cursor: IoCursor, + pub(super) actions: ActionQueueSender, + pub(super) sync_state: SyncState, + pub(super) stop_receiver: watch::Receiver, +} + +impl MainNodeFetcher { + pub async fn new( + storage: &mut StorageProcessor<'_>, client: Box, actions: ActionQueueSender, sync_state: SyncState, stop_receiver: watch::Receiver, - ) -> MainNodeFetcher { - MainNodeFetcher { + ) -> anyhow::Result { + let cursor = IoCursor::for_fetcher(storage) + .await + .context("failed getting I/O cursor from Postgres")?; + Ok(Self { client: CachingMainNodeClient::new(client), - cursor: self, + cursor, actions, sync_state, stop_receiver, - } + }) } -} -/// Structure responsible for fetching batches and miniblock data from the main node. -#[derive(Debug)] -pub struct MainNodeFetcher { - client: CachingMainNodeClient, - cursor: FetcherCursor, - actions: ActionQueueSender, - sync_state: SyncState, - stop_receiver: watch::Receiver, -} - -impl MainNodeFetcher { pub async fn run(mut self) -> anyhow::Result<()> { tracing::info!( "Starting the fetcher routine. Initial miniblock: {}, initial l1 batch: {}", diff --git a/core/lib/zksync_core/src/sync_layer/tests.rs b/core/lib/zksync_core/src/sync_layer/tests.rs index 35de5e597df1..94b40d554c7c 100644 --- a/core/lib/zksync_core/src/sync_layer/tests.rs +++ b/core/lib/zksync_core/src/sync_layer/tests.rs @@ -13,15 +13,16 @@ use zksync_types::{ Address, L1BatchNumber, L2ChainId, MiniblockNumber, ProtocolVersionId, Transaction, H256, }; -use super::{fetcher::FetcherCursor, sync_action::SyncAction, *}; +use super::{sync_action::SyncAction, *}; use crate::{ api_server::web3::tests::spawn_http_server, consensus::testonly::MockMainNodeClient, genesis::{ensure_genesis_state, GenesisParams}, state_keeper::{ - seal_criteria::NoopSealer, tests::TestBatchExecutorBuilder, MiniblockSealer, - ZkSyncStateKeeper, + io::common::IoCursor, seal_criteria::NoopSealer, tests::TestBatchExecutorBuilder, + MiniblockSealer, ZkSyncStateKeeper, }, + sync_layer::{client::CachingMainNodeClient, fetcher::MainNodeFetcher}, utils::testonly::{create_l1_batch_metadata, create_l2_transaction}, }; @@ -70,7 +71,8 @@ impl StateKeeperHandles { u32::MAX, L2ChainId::default(), ) - .await; + .await + .unwrap(); let (stop_sender, stop_receiver) = watch::channel(false); let mut batch_executor_base = TestBatchExecutorBuilder::default(); @@ -400,10 +402,6 @@ async fn fetcher_basics() { let pool = ConnectionPool::test_pool().await; let mut storage = pool.access_storage().await.unwrap(); ensure_genesis(&mut storage).await; - let fetcher_cursor = FetcherCursor::new(&mut storage).await.unwrap(); - assert_eq!(fetcher_cursor.l1_batch, L1BatchNumber(0)); - assert_eq!(fetcher_cursor.next_miniblock, MiniblockNumber(1)); - drop(storage); let mut mock_client = MockMainNodeClient::default(); mock_client.push_l1_batch(0); @@ -414,12 +412,19 @@ async fn fetcher_basics() { let (actions_sender, mut actions) = ActionQueue::new(); let (stop_sender, stop_receiver) = watch::channel(false); let sync_state = SyncState::default(); - let fetcher = fetcher_cursor.into_fetcher( + let fetcher = MainNodeFetcher::new( + &mut storage, Box::new(mock_client), actions_sender, sync_state.clone(), stop_receiver, - ); + ) + .await + .unwrap(); + drop(storage); + + assert_eq!(fetcher.cursor.l1_batch, L1BatchNumber(0)); + assert_eq!(fetcher.cursor.next_miniblock, MiniblockNumber(1)); let fetcher_task = tokio::spawn(fetcher.run()); // Check that `sync_state` is updated. @@ -498,17 +503,17 @@ async fn fetcher_with_real_server() { let sync_state = SyncState::default(); let (actions_sender, mut actions) = ActionQueue::new(); let client = ::json_rpc(&format!("http://{server_addr}/")).unwrap(); - let fetcher_cursor = FetcherCursor { - next_miniblock: MiniblockNumber(1), - prev_miniblock_hash: genesis_miniblock_hash, - l1_batch: L1BatchNumber(0), - }; - let fetcher = fetcher_cursor.into_fetcher( - Box::new(client), - actions_sender, - sync_state.clone(), + let fetcher = MainNodeFetcher { + client: CachingMainNodeClient::new(Box::new(client)), + cursor: IoCursor { + next_miniblock: MiniblockNumber(1), + prev_miniblock_hash: genesis_miniblock_hash, + l1_batch: L1BatchNumber(0), + }, + actions: actions_sender, + sync_state: sync_state.clone(), stop_receiver, - ); + }; let fetcher_task = tokio::spawn(fetcher.run()); // Check generated actions. From 2ae2c2aa8e77398cbe1c316940c7c8ebfa13217b Mon Sep 17 00:00:00 2001 From: Alex Ostrovski Date: Wed, 17 Jan 2024 17:03:55 +0200 Subject: [PATCH 02/27] Track prev miniblock params locally --- .../zksync_core/src/state_keeper/io/common.rs | 11 ++-- .../src/state_keeper/io/mempool.rs | 56 +++++++---------- .../src/state_keeper/io/seal_logic.rs | 8 ++- .../src/state_keeper/io/tests/mod.rs | 6 +- .../zksync_core/src/sync_layer/external_io.rs | 61 ++++++++----------- core/lib/zksync_core/src/sync_layer/tests.rs | 1 + 6 files changed, 61 insertions(+), 82 deletions(-) diff --git a/core/lib/zksync_core/src/state_keeper/io/common.rs b/core/lib/zksync_core/src/state_keeper/io/common.rs index 356a5ba6dbd3..6832e6aebcf0 100644 --- a/core/lib/zksync_core/src/state_keeper/io/common.rs +++ b/core/lib/zksync_core/src/state_keeper/io/common.rs @@ -172,6 +172,7 @@ pub(crate) async fn load_pending_batch( pub(crate) struct IoCursor { pub next_miniblock: MiniblockNumber, pub prev_miniblock_hash: H256, + pub prev_miniblock_timestamp: u64, pub l1_batch: L1BatchNumber, } @@ -192,13 +193,11 @@ impl IoCursor { .context("Failed getting sealed miniblock header")? .context("No miniblocks sealed")?; - let next_miniblock = last_miniblock_header.number + 1; - let prev_miniblock_hash = last_miniblock_header.hash; - let next_l1_batch = last_sealed_l1_batch_number + 1; Ok(Self { - next_miniblock, - prev_miniblock_hash, - l1_batch: next_l1_batch, + next_miniblock: last_miniblock_header.number + 1, + prev_miniblock_hash: last_miniblock_header.hash, + prev_miniblock_timestamp: last_miniblock_header.timestamp, + l1_batch: last_sealed_l1_batch_number + 1, }) } } diff --git a/core/lib/zksync_core/src/state_keeper/io/mempool.rs b/core/lib/zksync_core/src/state_keeper/io/mempool.rs index 6987b532ca08..6d64640faebc 100644 --- a/core/lib/zksync_core/src/state_keeper/io/mempool.rs +++ b/core/lib/zksync_core/src/state_keeper/io/mempool.rs @@ -15,9 +15,8 @@ use zksync_dal::ConnectionPool; use zksync_mempool::L2TxFilter; use zksync_object_store::ObjectStore; use zksync_types::{ - block::MiniblockHeader, protocol_version::ProtocolUpgradeTx, - witness_block_state::WitnessBlockState, Address, L1BatchNumber, L2ChainId, MiniblockNumber, - ProtocolVersionId, Transaction, U256, + protocol_version::ProtocolUpgradeTx, witness_block_state::WitnessBlockState, Address, + L1BatchNumber, L2ChainId, MiniblockNumber, ProtocolVersionId, Transaction, H256, U256, }; // TODO (SMA-1206): use seconds instead of milliseconds. use zksync_utils::time::millis_since_epoch; @@ -33,7 +32,7 @@ use crate::{ mempool_actor::l2_tx_filter, metrics::KEEPER_METRICS, seal_criteria::{IoSealCriteria, TimeoutSealer}, - updates::UpdatesManager, + updates::{MiniblockUpdates, UpdatesManager}, MempoolGuard, }, }; @@ -50,6 +49,8 @@ pub(crate) struct MempoolIO { timeout_sealer: TimeoutSealer, filter: L2TxFilter, current_miniblock_number: MiniblockNumber, + prev_miniblock_hash: H256, + prev_miniblock_timestamp: u64, miniblock_sealer_handle: MiniblockSealerHandle, current_l1_batch_number: L1BatchNumber, fee_account: Address, @@ -126,15 +127,8 @@ impl StateKeeperIO for MempoolIO { max_wait: Duration, ) -> Option<(SystemEnv, L1BatchEnv)> { let deadline = Instant::now() + max_wait; - let prev_l1_batch_hash = self.load_previous_l1_batch_hash().await; - let MiniblockHeader { - timestamp: prev_miniblock_timestamp, - hash: prev_miniblock_hash, - .. - } = self.load_previous_miniblock_header().await; - // Block until at least one transaction in the mempool can match the filter (or timeout happens). // This is needed to ensure that block timestamp is not too old. for _ in 0..poll_iters(self.delay_interval, max_wait) { @@ -143,7 +137,7 @@ impl StateKeeperIO for MempoolIO { // We can use `timeout_at` since `sleep_past` is cancel-safe; it only uses `sleep()` async calls. let current_timestamp = tokio::time::timeout_at( deadline.into(), - sleep_past(prev_miniblock_timestamp, self.current_miniblock_number), + sleep_past(self.prev_miniblock_timestamp, self.current_miniblock_number), ); let current_timestamp = current_timestamp.await.ok()?; @@ -177,7 +171,7 @@ impl StateKeeperIO for MempoolIO { prev_l1_batch_hash, self.filter.fee_input, self.current_miniblock_number, - prev_miniblock_hash, + self.prev_miniblock_hash, base_system_contracts, self.validation_computational_gas_limit, protocol_version, @@ -269,7 +263,7 @@ impl StateKeeperIO for MempoolIO { false, ); self.miniblock_sealer_handle.submit(command).await; - self.current_miniblock_number += 1; + self.update_miniblock_fields(&updates_manager.miniblock); } async fn seal_l1_batch( @@ -309,7 +303,7 @@ impl StateKeeperIO for MempoolIO { let pool = self.pool.clone(); let mut storage = pool.access_storage_tagged("state_keeper").await.unwrap(); - updates_manager + let fictive_miniblock = updates_manager .seal_l1_batch( &mut storage, self.current_miniblock_number, @@ -318,7 +312,7 @@ impl StateKeeperIO for MempoolIO { self.l2_erc20_bridge_addr, ) .await; - self.current_miniblock_number += 1; // Due to fictive miniblock being sealed. + self.update_miniblock_fields(&fictive_miniblock); self.current_l1_batch_number += 1; Ok(()) } @@ -428,6 +422,8 @@ impl MempoolIO { current_l1_batch_number: cursor.l1_batch, miniblock_sealer_handle, current_miniblock_number: cursor.next_miniblock, + prev_miniblock_hash: cursor.prev_miniblock_hash, + prev_miniblock_timestamp: cursor.prev_miniblock_timestamp, fee_account: config.fee_account_addr, validation_computational_gas_limit, delay_interval, @@ -439,6 +435,16 @@ impl MempoolIO { }) } + fn update_miniblock_fields(&mut self, miniblock: &MiniblockUpdates) { + assert_eq!( + miniblock.number, self.current_miniblock_number.0, + "Attempted to seal a miniblock with unexpected number" + ); + self.current_miniblock_number += 1; + self.prev_miniblock_hash = miniblock.get_miniblock_hash(); + self.prev_miniblock_timestamp = miniblock.timestamp; + } + async fn load_previous_l1_batch_hash(&self) -> U256 { tracing::info!( "Getting previous L1 batch hash for L1 batch #{}", @@ -463,24 +469,6 @@ impl MempoolIO { batch_hash } - // FIXME: won't work with snapshot recovery; track locally? - async fn load_previous_miniblock_header(&self) -> MiniblockHeader { - let load_latency = KEEPER_METRICS.load_previous_miniblock_header.start(); - let mut storage = self - .pool - .access_storage_tagged("state_keeper") - .await - .unwrap(); - let miniblock_header = storage - .blocks_dal() - .get_miniblock_header(self.current_miniblock_number - 1) - .await - .unwrap() - .expect("Previous miniblock must be sealed and header saved to DB"); - load_latency.observe(); - miniblock_header - } - /// "virtual_blocks_per_miniblock" will be created either if the miniblock_number % virtual_blocks_interval == 0 or /// the miniblock is the first one in the batch. /// For instance: diff --git a/core/lib/zksync_core/src/state_keeper/io/seal_logic.rs b/core/lib/zksync_core/src/state_keeper/io/seal_logic.rs index 6720ab3fa3ec..4eac2b678ee3 100644 --- a/core/lib/zksync_core/src/state_keeper/io/seal_logic.rs +++ b/core/lib/zksync_core/src/state_keeper/io/seal_logic.rs @@ -40,14 +40,15 @@ use crate::{ extractors, metrics::{L1BatchSealStage, MiniblockSealStage, L1_BATCH_METRICS, MINIBLOCK_METRICS}, types::ExecutionMetricsForCriteria, - updates::{MiniblockSealCommand, UpdatesManager}, + updates::{MiniblockSealCommand, MiniblockUpdates, UpdatesManager}, }, }; impl UpdatesManager { /// Persists an L1 batch in the storage. /// This action includes a creation of an empty "fictive" miniblock that contains - /// the events generated during the bootloader "tip phase". + /// the events generated during the bootloader "tip phase". Returns updates for this fictive miniblock. + #[must_use = "fictive miniblock must be used to update I/O params"] pub(crate) async fn seal_l1_batch( mut self, storage: &mut StorageProcessor<'_>, @@ -55,7 +56,7 @@ impl UpdatesManager { l1_batch_env: &L1BatchEnv, finished_batch: FinishedL1Batch, l2_erc20_bridge_addr: Address, - ) { + ) -> MiniblockUpdates { let started_at = Instant::now(); let progress = L1_BATCH_METRICS.start(L1BatchSealStage::VmFinalization); let mut transaction = storage.start_transaction().await.unwrap(); @@ -245,6 +246,7 @@ impl UpdatesManager { l1_batch_env.timestamp, &writes_metrics, ); + miniblock_command.miniblock } fn report_l1_batch_metrics( diff --git a/core/lib/zksync_core/src/state_keeper/io/tests/mod.rs b/core/lib/zksync_core/src/state_keeper/io/tests/mod.rs index 52a5f26dcfd3..0df8e693157a 100644 --- a/core/lib/zksync_core/src/state_keeper/io/tests/mod.rs +++ b/core/lib/zksync_core/src/state_keeper/io/tests/mod.rs @@ -357,9 +357,9 @@ async fn test_miniblock_and_l1_batch_processing( .create_test_mempool_io(pool.clone(), miniblock_sealer_capacity) .await; - let l1_batch_env = default_l1_batch_env(0, 1, Address::random()); + let l1_batch_env = default_l1_batch_env(1, 1, Address::random()); let mut updates = UpdatesManager::new( - l1_batch_env, + l1_batch_env.clone(), BaseSystemContractsHashes::default(), ProtocolVersionId::latest(), ); @@ -380,8 +380,6 @@ async fn test_miniblock_and_l1_batch_processing( }); let finished_batch = default_vm_block_result(); - - let l1_batch_env = default_l1_batch_env(1, 1, Address::random()); mempool .seal_l1_batch(None, updates, &l1_batch_env, finished_batch) .await diff --git a/core/lib/zksync_core/src/sync_layer/external_io.rs b/core/lib/zksync_core/src/sync_layer/external_io.rs index bc6a95a28e1b..be9b8358fb85 100644 --- a/core/lib/zksync_core/src/sync_layer/external_io.rs +++ b/core/lib/zksync_core/src/sync_layer/external_io.rs @@ -1,7 +1,6 @@ use std::{collections::HashMap, convert::TryInto, iter::FromIterator, time::Duration}; use async_trait::async_trait; -use futures::future; use multivm::interface::{FinishedL1Batch, L1BatchEnv, SystemEnv}; use zksync_contracts::{BaseSystemContracts, SystemContractCode}; use zksync_dal::ConnectionPool; @@ -27,7 +26,7 @@ use crate::{ }, metrics::KEEPER_METRICS, seal_criteria::IoSealCriteria, - updates::UpdatesManager, + updates::{MiniblockUpdates, UpdatesManager}, }, }; @@ -47,6 +46,7 @@ pub struct ExternalIO { current_l1_batch_number: L1BatchNumber, current_miniblock_number: MiniblockNumber, + prev_miniblock_hash: H256, actions: ActionQueue, sync_state: SyncState, main_node_client: Box, @@ -87,6 +87,7 @@ impl ExternalIO { pool, current_l1_batch_number: cursor.l1_batch, current_miniblock_number: cursor.next_miniblock, + prev_miniblock_hash: cursor.prev_miniblock_hash, actions, sync_state, main_node_client, @@ -96,6 +97,19 @@ impl ExternalIO { }) } + fn update_miniblock_fields(&mut self, miniblock: &MiniblockUpdates) { + assert_eq!( + miniblock.number, self.current_miniblock_number.0, + "Attempted to seal a miniblock with unexpected number" + ); + // Mimic the metric emitted by the main node to reuse existing Grafana charts. + APP_METRICS.block_number[&BlockStage::Sealed].set(self.current_l1_batch_number.0.into()); + self.sync_state + .set_local_block(self.current_miniblock_number); + self.current_miniblock_number += 1; + self.prev_miniblock_hash = miniblock.get_miniblock_hash(); + } + async fn load_previous_l1_batch_hash(&self) -> U256 { let mut storage = self.pool.access_storage_tagged("sync_layer").await.unwrap(); let wait_latency = KEEPER_METRICS.wait_for_prev_hash_time.start(); @@ -106,19 +120,6 @@ impl ExternalIO { hash } - // FIXME: won't work with snapshot recovery; track locally? - async fn load_previous_miniblock_hash(&self) -> H256 { - let prev_miniblock_number = self.current_miniblock_number - 1; - let mut storage = self.pool.access_storage_tagged("sync_layer").await.unwrap(); - let header = storage - .blocks_dal() - .get_miniblock_header(prev_miniblock_number) - .await - .unwrap() - .unwrap_or_else(|| panic!("Miniblock #{prev_miniblock_number} is missing")); - header.hash - } - async fn load_base_system_contracts_by_version_id( &self, id: ProtocolVersionId, @@ -311,14 +312,11 @@ impl StateKeeperIO for ExternalIO { number, self.current_l1_batch_number, "Batch number mismatch" ); - tracing::info!("Getting previous L1 batch hash and miniblock hash"); - let (previous_l1_batch_hash, previous_miniblock_hash) = future::join( - self.load_previous_l1_batch_hash(), - self.load_previous_miniblock_hash(), - ) - .await; + tracing::info!("Getting previous L1 batch hash"); + let previous_l1_batch_hash = self.load_previous_l1_batch_hash().await; tracing::info!( - "Previous L1 batch hash: {previous_l1_batch_hash}, previous miniblock hash: {previous_miniblock_hash}" + "Previous L1 batch hash: {previous_l1_batch_hash}, previous miniblock hash: {:?}", + self.prev_miniblock_hash ); let base_system_contracts = self @@ -331,7 +329,7 @@ impl StateKeeperIO for ExternalIO { previous_l1_batch_hash, BatchFeeInput::l1_pegged(l1_gas_price, l2_fair_gas_price), miniblock_number, - previous_miniblock_hash, + self.prev_miniblock_hash, base_system_contracts, self.validation_computational_gas_limit, protocol_version, @@ -457,7 +455,7 @@ impl StateKeeperIO for ExternalIO { self.sync_state .set_local_block(self.current_miniblock_number); tracing::info!("Miniblock {} is sealed", self.current_miniblock_number); - self.current_miniblock_number += 1; + self.update_miniblock_fields(&updates_manager.miniblock); } async fn seal_l1_batch( @@ -479,26 +477,19 @@ impl StateKeeperIO for ExternalIO { self.miniblock_sealer_handle.wait_for_all_commands().await; let mut storage = self.pool.access_storage_tagged("sync_layer").await.unwrap(); - let mut transaction = storage.start_transaction().await.unwrap(); - updates_manager + let fictive_miniblock = updates_manager .seal_l1_batch( - &mut transaction, + &mut storage, self.current_miniblock_number, l1_batch_env, finished_batch, self.l2_erc20_bridge_addr, ) .await; - transaction.commit().await.unwrap(); + drop(storage); + self.update_miniblock_fields(&fictive_miniblock); tracing::info!("Batch {} is sealed", self.current_l1_batch_number); - - // Mimic the metric emitted by the main node to reuse existing Grafana charts. - APP_METRICS.block_number[&BlockStage::Sealed].set(self.current_l1_batch_number.0.into()); - - self.sync_state - .set_local_block(self.current_miniblock_number); - self.current_miniblock_number += 1; // Due to fictive miniblock being sealed. self.current_l1_batch_number += 1; Ok(()) } diff --git a/core/lib/zksync_core/src/sync_layer/tests.rs b/core/lib/zksync_core/src/sync_layer/tests.rs index 94b40d554c7c..639d656fb67e 100644 --- a/core/lib/zksync_core/src/sync_layer/tests.rs +++ b/core/lib/zksync_core/src/sync_layer/tests.rs @@ -508,6 +508,7 @@ async fn fetcher_with_real_server() { cursor: IoCursor { next_miniblock: MiniblockNumber(1), prev_miniblock_hash: genesis_miniblock_hash, + prev_miniblock_timestamp: 0, l1_batch: L1BatchNumber(0), }, actions: actions_sender, From 50dfccf03e5996194990beccd73e89b63851b731 Mon Sep 17 00:00:00 2001 From: Alex Ostrovski Date: Wed, 17 Jan 2024 17:54:01 +0200 Subject: [PATCH 03/27] Add extra fields to `snapshot_recovery` table --- ...52f45b2c8c0cccf11ae5247f27f74700afc6.json} | 32 +++++++--- ...71db49f5305f732ecc148dfb64ab6c0037555.json | 22 +++++++ ...8f8af02aa297d85a2695c5f448ed14b2d7386.json | 19 ------ ...extra_fields_to_snapshot_recovery.down.sql | 6 ++ ...d_extra_fields_to_snapshot_recovery.up.sql | 6 ++ core/lib/dal/src/snapshot_recovery_dal.rs | 61 ++++++++++++------- core/lib/types/src/snapshots.rs | 7 ++- 7 files changed, 104 insertions(+), 49 deletions(-) rename core/lib/dal/.sqlx/{query-47c2f23d9209d155f3f32fd21ef7931a02fe5ffaf2c4dc2f1e7a48c0e932c060.json => query-4e272b2c1ec5705f9c76dcc4795352f45b2c8c0cccf11ae5247f27f74700afc6.json} (51%) create mode 100644 core/lib/dal/.sqlx/query-c96723522050fd3e35a61068e0571db49f5305f732ecc148dfb64ab6c0037555.json delete mode 100644 core/lib/dal/.sqlx/query-df3b08549a11729fb475341b8f38f8af02aa297d85a2695c5f448ed14b2d7386.json create mode 100644 core/lib/dal/migrations/20240117151508_add_extra_fields_to_snapshot_recovery.down.sql create mode 100644 core/lib/dal/migrations/20240117151508_add_extra_fields_to_snapshot_recovery.up.sql diff --git a/core/lib/dal/.sqlx/query-47c2f23d9209d155f3f32fd21ef7931a02fe5ffaf2c4dc2f1e7a48c0e932c060.json b/core/lib/dal/.sqlx/query-4e272b2c1ec5705f9c76dcc4795352f45b2c8c0cccf11ae5247f27f74700afc6.json similarity index 51% rename from core/lib/dal/.sqlx/query-47c2f23d9209d155f3f32fd21ef7931a02fe5ffaf2c4dc2f1e7a48c0e932c060.json rename to core/lib/dal/.sqlx/query-4e272b2c1ec5705f9c76dcc4795352f45b2c8c0cccf11ae5247f27f74700afc6.json index fe8a346d1e21..538c51233744 100644 --- a/core/lib/dal/.sqlx/query-47c2f23d9209d155f3f32fd21ef7931a02fe5ffaf2c4dc2f1e7a48c0e932c060.json +++ b/core/lib/dal/.sqlx/query-4e272b2c1ec5705f9c76dcc4795352f45b2c8c0cccf11ae5247f27f74700afc6.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n l1_batch_number,\n l1_batch_root_hash,\n miniblock_number,\n miniblock_root_hash,\n last_finished_chunk_id,\n total_chunk_count\n FROM\n snapshot_recovery\n ", + "query": "\n SELECT\n l1_batch_number,\n l1_batch_timestamp,\n l1_batch_root_hash,\n miniblock_number,\n miniblock_timestamp,\n miniblock_hash,\n protocol_version,\n last_finished_chunk_id,\n total_chunk_count\n FROM\n snapshot_recovery\n ", "describe": { "columns": [ { @@ -10,26 +10,41 @@ }, { "ordinal": 1, + "name": "l1_batch_timestamp", + "type_info": "Int8" + }, + { + "ordinal": 2, "name": "l1_batch_root_hash", "type_info": "Bytea" }, { - "ordinal": 2, + "ordinal": 3, "name": "miniblock_number", "type_info": "Int8" }, { - "ordinal": 3, - "name": "miniblock_root_hash", + "ordinal": 4, + "name": "miniblock_timestamp", + "type_info": "Int8" + }, + { + "ordinal": 5, + "name": "miniblock_hash", "type_info": "Bytea" }, { - "ordinal": 4, + "ordinal": 6, + "name": "protocol_version", + "type_info": "Int4" + }, + { + "ordinal": 7, "name": "last_finished_chunk_id", "type_info": "Int4" }, { - "ordinal": 5, + "ordinal": 8, "name": "total_chunk_count", "type_info": "Int4" } @@ -38,6 +53,9 @@ "Left": [] }, "nullable": [ + false, + false, + false, false, false, false, @@ -46,5 +64,5 @@ false ] }, - "hash": "47c2f23d9209d155f3f32fd21ef7931a02fe5ffaf2c4dc2f1e7a48c0e932c060" + "hash": "4e272b2c1ec5705f9c76dcc4795352f45b2c8c0cccf11ae5247f27f74700afc6" } diff --git a/core/lib/dal/.sqlx/query-c96723522050fd3e35a61068e0571db49f5305f732ecc148dfb64ab6c0037555.json b/core/lib/dal/.sqlx/query-c96723522050fd3e35a61068e0571db49f5305f732ecc148dfb64ab6c0037555.json new file mode 100644 index 000000000000..43088bd2bec1 --- /dev/null +++ b/core/lib/dal/.sqlx/query-c96723522050fd3e35a61068e0571db49f5305f732ecc148dfb64ab6c0037555.json @@ -0,0 +1,22 @@ +{ + "db_name": "PostgreSQL", + "query": "\n INSERT INTO\n snapshot_recovery (\n l1_batch_number,\n l1_batch_timestamp,\n l1_batch_root_hash,\n miniblock_number,\n miniblock_timestamp,\n miniblock_hash,\n protocol_version,\n last_finished_chunk_id,\n total_chunk_count,\n updated_at,\n created_at\n )\n VALUES\n ($1, $2, $3, $4, $5, $6, $7, $8, $9, NOW(), NOW())\n ON CONFLICT (l1_batch_number) DO\n UPDATE\n SET\n l1_batch_number = excluded.l1_batch_number,\n l1_batch_timestamp = excluded.l1_batch_timestamp,\n l1_batch_root_hash = excluded.l1_batch_root_hash,\n miniblock_number = excluded.miniblock_number,\n miniblock_timestamp = excluded.miniblock_timestamp,\n miniblock_hash = excluded.miniblock_hash,\n protocol_version = excluded.protocol_version,\n last_finished_chunk_id = excluded.last_finished_chunk_id,\n total_chunk_count = excluded.total_chunk_count,\n updated_at = excluded.updated_at\n ", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Int8", + "Int8", + "Bytea", + "Int8", + "Int8", + "Bytea", + "Int4", + "Int4", + "Int4" + ] + }, + "nullable": [] + }, + "hash": "c96723522050fd3e35a61068e0571db49f5305f732ecc148dfb64ab6c0037555" +} diff --git a/core/lib/dal/.sqlx/query-df3b08549a11729fb475341b8f38f8af02aa297d85a2695c5f448ed14b2d7386.json b/core/lib/dal/.sqlx/query-df3b08549a11729fb475341b8f38f8af02aa297d85a2695c5f448ed14b2d7386.json deleted file mode 100644 index a04523bc07b8..000000000000 --- a/core/lib/dal/.sqlx/query-df3b08549a11729fb475341b8f38f8af02aa297d85a2695c5f448ed14b2d7386.json +++ /dev/null @@ -1,19 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n INSERT INTO\n snapshot_recovery (\n l1_batch_number,\n l1_batch_root_hash,\n miniblock_number,\n miniblock_root_hash,\n last_finished_chunk_id,\n total_chunk_count,\n updated_at,\n created_at\n )\n VALUES\n ($1, $2, $3, $4, $5, $6, NOW(), NOW())\n ON CONFLICT (l1_batch_number) DO\n UPDATE\n SET\n l1_batch_number = excluded.l1_batch_number,\n l1_batch_root_hash = excluded.l1_batch_root_hash,\n miniblock_number = excluded.miniblock_number,\n miniblock_root_hash = excluded.miniblock_root_hash,\n last_finished_chunk_id = excluded.last_finished_chunk_id,\n total_chunk_count = excluded.total_chunk_count,\n updated_at = excluded.updated_at\n ", - "describe": { - "columns": [], - "parameters": { - "Left": [ - "Int8", - "Bytea", - "Int8", - "Bytea", - "Int4", - "Int4" - ] - }, - "nullable": [] - }, - "hash": "df3b08549a11729fb475341b8f38f8af02aa297d85a2695c5f448ed14b2d7386" -} diff --git a/core/lib/dal/migrations/20240117151508_add_extra_fields_to_snapshot_recovery.down.sql b/core/lib/dal/migrations/20240117151508_add_extra_fields_to_snapshot_recovery.down.sql new file mode 100644 index 000000000000..f108c21a4ee9 --- /dev/null +++ b/core/lib/dal/migrations/20240117151508_add_extra_fields_to_snapshot_recovery.down.sql @@ -0,0 +1,6 @@ +ALTER TABLE snapshot_recovery + RENAME COLUMN miniblock_hash TO miniblock_root_hash; +ALTER TABLE snapshot_recovery + DROP COLUMN l1_batch_timestamp, + DROP COLUMN miniblock_timestamp, + DROP COLUMN protocol_version; diff --git a/core/lib/dal/migrations/20240117151508_add_extra_fields_to_snapshot_recovery.up.sql b/core/lib/dal/migrations/20240117151508_add_extra_fields_to_snapshot_recovery.up.sql new file mode 100644 index 000000000000..f7e67076387c --- /dev/null +++ b/core/lib/dal/migrations/20240117151508_add_extra_fields_to_snapshot_recovery.up.sql @@ -0,0 +1,6 @@ +ALTER TABLE snapshot_recovery + RENAME COLUMN miniblock_root_hash TO miniblock_hash; +ALTER TABLE snapshot_recovery + ADD COLUMN l1_batch_timestamp BIGINT NOT NULL, + ADD COLUMN miniblock_timestamp BIGINT NOT NULL, + ADD COLUMN protocol_version INT NOT NULL; diff --git a/core/lib/dal/src/snapshot_recovery_dal.rs b/core/lib/dal/src/snapshot_recovery_dal.rs index edcf7ccf1986..fd0f6686a968 100644 --- a/core/lib/dal/src/snapshot_recovery_dal.rs +++ b/core/lib/dal/src/snapshot_recovery_dal.rs @@ -1,4 +1,6 @@ -use zksync_types::{snapshots::SnapshotRecoveryStatus, L1BatchNumber, MiniblockNumber, H256}; +use zksync_types::{ + snapshots::SnapshotRecoveryStatus, L1BatchNumber, MiniblockNumber, ProtocolVersionId, H256, +}; use crate::StorageProcessor; @@ -17,31 +19,40 @@ impl SnapshotRecoveryDal<'_, '_> { INSERT INTO snapshot_recovery ( l1_batch_number, + l1_batch_timestamp, l1_batch_root_hash, miniblock_number, - miniblock_root_hash, + miniblock_timestamp, + miniblock_hash, + protocol_version, last_finished_chunk_id, total_chunk_count, updated_at, created_at ) VALUES - ($1, $2, $3, $4, $5, $6, NOW(), NOW()) + ($1, $2, $3, $4, $5, $6, $7, $8, $9, NOW(), NOW()) ON CONFLICT (l1_batch_number) DO UPDATE SET l1_batch_number = excluded.l1_batch_number, + l1_batch_timestamp = excluded.l1_batch_timestamp, l1_batch_root_hash = excluded.l1_batch_root_hash, miniblock_number = excluded.miniblock_number, - miniblock_root_hash = excluded.miniblock_root_hash, + miniblock_timestamp = excluded.miniblock_timestamp, + miniblock_hash = excluded.miniblock_hash, + protocol_version = excluded.protocol_version, last_finished_chunk_id = excluded.last_finished_chunk_id, total_chunk_count = excluded.total_chunk_count, updated_at = excluded.updated_at "#, status.l1_batch_number.0 as i64, + status.l1_batch_timestamp as i64, status.l1_batch_root_hash.0.as_slice(), status.miniblock_number.0 as i64, - status.miniblock_root_hash.0.as_slice(), + status.miniblock_timestamp as i64, + status.miniblock_hash.0.as_slice(), + status.protocol_version as i32, status.last_finished_chunk_id.map(|v| v as i32), status.total_chunk_count as i64, ) @@ -57,9 +68,12 @@ impl SnapshotRecoveryDal<'_, '_> { r#" SELECT l1_batch_number, + l1_batch_timestamp, l1_batch_root_hash, miniblock_number, - miniblock_root_hash, + miniblock_timestamp, + miniblock_hash, + protocol_version, last_finished_chunk_id, total_chunk_count FROM @@ -69,20 +83,25 @@ impl SnapshotRecoveryDal<'_, '_> { .fetch_optional(self.storage.conn()) .await?; - Ok(record.map(|r| SnapshotRecoveryStatus { - l1_batch_number: L1BatchNumber(r.l1_batch_number as u32), - l1_batch_root_hash: H256::from_slice(&r.l1_batch_root_hash), - miniblock_number: MiniblockNumber(r.miniblock_number as u32), - miniblock_root_hash: H256::from_slice(&r.miniblock_root_hash), - last_finished_chunk_id: r.last_finished_chunk_id.map(|v| v as u64), - total_chunk_count: r.total_chunk_count as u64, + Ok(record.map(|row| SnapshotRecoveryStatus { + l1_batch_number: L1BatchNumber(row.l1_batch_number as u32), + l1_batch_timestamp: row.l1_batch_timestamp as u64, + l1_batch_root_hash: H256::from_slice(&row.l1_batch_root_hash), + miniblock_number: MiniblockNumber(row.miniblock_number as u32), + miniblock_timestamp: row.miniblock_timestamp as u64, + miniblock_hash: H256::from_slice(&row.miniblock_hash), + protocol_version: ProtocolVersionId::try_from(row.protocol_version as u16).unwrap(), + last_finished_chunk_id: row.last_finished_chunk_id.map(|v| v as u64), + total_chunk_count: row.total_chunk_count as u64, })) } } #[cfg(test)] mod tests { - use zksync_types::{snapshots::SnapshotRecoveryStatus, L1BatchNumber, MiniblockNumber, H256}; + use zksync_types::{ + snapshots::SnapshotRecoveryStatus, L1BatchNumber, MiniblockNumber, ProtocolVersionId, H256, + }; use crate::ConnectionPool; @@ -98,9 +117,12 @@ mod tests { assert_eq!(None, empty_status); let status = SnapshotRecoveryStatus { l1_batch_number: L1BatchNumber(123), + l1_batch_timestamp: 123, l1_batch_root_hash: H256::random(), miniblock_number: MiniblockNumber(234), - miniblock_root_hash: H256::random(), + miniblock_timestamp: 234, + miniblock_hash: H256::random(), + protocol_version: ProtocolVersionId::latest(), last_finished_chunk_id: None, total_chunk_count: 345, }; @@ -112,15 +134,12 @@ mod tests { .get_applied_snapshot_status() .await .unwrap(); - assert_eq!(Some(status), status_from_db); + assert_eq!(status, status_from_db.unwrap()); let updated_status = SnapshotRecoveryStatus { - l1_batch_number: L1BatchNumber(123), - l1_batch_root_hash: H256::random(), - miniblock_number: MiniblockNumber(234), - miniblock_root_hash: H256::random(), last_finished_chunk_id: Some(2345), total_chunk_count: 345, + ..status }; applied_status_dal .set_applied_snapshot_status(&updated_status) @@ -130,6 +149,6 @@ mod tests { .get_applied_snapshot_status() .await .unwrap(); - assert_eq!(Some(updated_status), updated_status_from_db); + assert_eq!(updated_status, updated_status_from_db.unwrap()); } } diff --git a/core/lib/types/src/snapshots.rs b/core/lib/types/src/snapshots.rs index 19f818bb5d1e..55c73bfb3e09 100644 --- a/core/lib/types/src/snapshots.rs +++ b/core/lib/types/src/snapshots.rs @@ -5,7 +5,7 @@ use serde::{Deserialize, Serialize}; use zksync_basic_types::{AccountTreeId, L1BatchNumber, MiniblockNumber, H256}; use zksync_protobuf::{required, ProtoFmt}; -use crate::{commitment::L1BatchWithMetadata, Bytes, StorageKey, StorageValue}; +use crate::{commitment::L1BatchWithMetadata, Bytes, ProtocolVersionId, StorageKey, StorageValue}; /// Information about all snapshots persisted by the node. #[derive(Debug, Clone, Serialize, Deserialize)] @@ -191,8 +191,11 @@ impl ProtoFmt for SnapshotStorageLogsChunk { pub struct SnapshotRecoveryStatus { pub l1_batch_number: L1BatchNumber, pub l1_batch_root_hash: H256, + pub l1_batch_timestamp: u64, pub miniblock_number: MiniblockNumber, - pub miniblock_root_hash: H256, + pub miniblock_hash: H256, + pub miniblock_timestamp: u64, + pub protocol_version: ProtocolVersionId, pub last_finished_chunk_id: Option, pub total_chunk_count: u64, } From 6bbb072afb47feefab7fe4efd5839e2fae0ecb8c Mon Sep 17 00:00:00 2001 From: Alex Ostrovski Date: Wed, 17 Jan 2024 17:54:47 +0200 Subject: [PATCH 04/27] Account for snapshots in `IoCursor::new()` --- .../src/metadata_calculator/recovery/tests.rs | 10 +++- .../zksync_core/src/state_keeper/io/common.rs | 53 ++++++++++++++----- core/lib/zksync_core/src/utils/testonly.rs | 5 +- 3 files changed, 53 insertions(+), 15 deletions(-) diff --git a/core/lib/zksync_core/src/metadata_calculator/recovery/tests.rs b/core/lib/zksync_core/src/metadata_calculator/recovery/tests.rs index 09f80d80b5e1..c5f39f1f10d8 100644 --- a/core/lib/zksync_core/src/metadata_calculator/recovery/tests.rs +++ b/core/lib/zksync_core/src/metadata_calculator/recovery/tests.rs @@ -162,9 +162,12 @@ async fn prepare_recovery_snapshot( SnapshotRecoveryStatus { l1_batch_number: L1BatchNumber(1), + l1_batch_timestamp: 1, l1_batch_root_hash, miniblock_number: MiniblockNumber(1), - miniblock_root_hash: H256::zero(), // not used + miniblock_timestamp: 1, + miniblock_hash: H256::zero(), // not used + protocol_version: ProtocolVersionId::latest(), last_finished_chunk_id: Some(0), total_chunk_count: 1, } @@ -415,9 +418,12 @@ async fn prepare_clean_recovery_snapshot( let snapshot_recovery = SnapshotRecoveryStatus { l1_batch_number: l1_batch.number, + l1_batch_timestamp: l1_batch.timestamp, l1_batch_root_hash, miniblock_number: miniblock.number, - miniblock_root_hash: H256::zero(), // not used + miniblock_timestamp: miniblock.timestamp, + miniblock_hash: H256::zero(), // not used + protocol_version: l1_batch.protocol_version.unwrap(), last_finished_chunk_id: None, total_chunk_count: 100, }; diff --git a/core/lib/zksync_core/src/state_keeper/io/common.rs b/core/lib/zksync_core/src/state_keeper/io/common.rs index 6832e6aebcf0..8f4db7e8a52e 100644 --- a/core/lib/zksync_core/src/state_keeper/io/common.rs +++ b/core/lib/zksync_core/src/state_keeper/io/common.rs @@ -178,27 +178,56 @@ pub(crate) struct IoCursor { impl IoCursor { /// Loads the cursor from Postgres. + // FIXME: unit tests pub async fn new(storage: &mut StorageProcessor<'_>) -> anyhow::Result { - // TODO (PLA-703): Support no L1 batches / miniblocks in the storage let last_sealed_l1_batch_number = storage .blocks_dal() .get_sealed_l1_batch_number() .await - .context("Failed getting sealed L1 batch number")? - .context("No L1 batches sealed")?; + .context("Failed getting sealed L1 batch number")?; let last_miniblock_header = storage .blocks_dal() .get_last_sealed_miniblock_header() .await - .context("Failed getting sealed miniblock header")? - .context("No miniblocks sealed")?; - - Ok(Self { - next_miniblock: last_miniblock_header.number + 1, - prev_miniblock_hash: last_miniblock_header.hash, - prev_miniblock_timestamp: last_miniblock_header.timestamp, - l1_batch: last_sealed_l1_batch_number + 1, - }) + .context("Failed getting sealed miniblock header")?; + + if let (Some(l1_batch_number), Some(miniblock_header)) = + (last_sealed_l1_batch_number, &last_miniblock_header) + { + Ok(Self { + next_miniblock: miniblock_header.number + 1, + prev_miniblock_hash: miniblock_header.hash, + prev_miniblock_timestamp: miniblock_header.timestamp, + l1_batch: l1_batch_number + 1, + }) + } else { + let snapshot_recovery = storage + .snapshot_recovery_dal() + .get_applied_snapshot_status() + .await + .context("Failed getting snapshot recovery info")? + .context("Postgres contains neither blocks nor snapshot recovery info")?; + let l1_batch = + last_sealed_l1_batch_number.unwrap_or(snapshot_recovery.l1_batch_number) + 1; + + let (next_miniblock, prev_miniblock_hash, prev_miniblock_timestamp); + if let Some(miniblock_header) = &last_miniblock_header { + next_miniblock = miniblock_header.number + 1; + prev_miniblock_hash = miniblock_header.hash; + prev_miniblock_timestamp = miniblock_header.timestamp; + } else { + next_miniblock = snapshot_recovery.miniblock_number + 1; + prev_miniblock_hash = snapshot_recovery.miniblock_hash; + prev_miniblock_timestamp = snapshot_recovery.miniblock_timestamp; + } + + Ok(Self { + next_miniblock, + prev_miniblock_hash, + prev_miniblock_timestamp, + l1_batch, + }) + } } } diff --git a/core/lib/zksync_core/src/utils/testonly.rs b/core/lib/zksync_core/src/utils/testonly.rs index c84754f7cd30..73b8e89396dc 100644 --- a/core/lib/zksync_core/src/utils/testonly.rs +++ b/core/lib/zksync_core/src/utils/testonly.rs @@ -107,9 +107,12 @@ pub(crate) async fn prepare_empty_recovery_snapshot( let snapshot_recovery = SnapshotRecoveryStatus { l1_batch_number: l1_batch_number.into(), + l1_batch_timestamp: l1_batch_number.into(), l1_batch_root_hash: H256::zero(), miniblock_number: l1_batch_number.into(), - miniblock_root_hash: H256::zero(), // not used + miniblock_timestamp: l1_batch_number.into(), + miniblock_hash: H256::zero(), // not used + protocol_version: ProtocolVersionId::latest(), last_finished_chunk_id: None, total_chunk_count: 100, }; From 7c6bb29b3629b59c35d1e9a4a5923fbc0a69f04a Mon Sep 17 00:00:00 2001 From: Alex Ostrovski Date: Wed, 17 Jan 2024 18:05:28 +0200 Subject: [PATCH 05/27] Test `IoCursor::new()` --- .../zksync_core/src/state_keeper/io/common.rs | 72 ++++++++++++++++++- 1 file changed, 71 insertions(+), 1 deletion(-) diff --git a/core/lib/zksync_core/src/state_keeper/io/common.rs b/core/lib/zksync_core/src/state_keeper/io/common.rs index 8f4db7e8a52e..96693d1b8711 100644 --- a/core/lib/zksync_core/src/state_keeper/io/common.rs +++ b/core/lib/zksync_core/src/state_keeper/io/common.rs @@ -178,7 +178,6 @@ pub(crate) struct IoCursor { impl IoCursor { /// Loads the cursor from Postgres. - // FIXME: unit tests pub async fn new(storage: &mut StorageProcessor<'_>) -> anyhow::Result { let last_sealed_l1_batch_number = storage .blocks_dal() @@ -233,7 +232,14 @@ impl IoCursor { #[cfg(test)] mod tests { + use zksync_dal::ConnectionPool; + use zksync_types::block::MiniblockHasher; + use super::*; + use crate::{ + genesis::{ensure_genesis_state, GenesisParams}, + utils::testonly::{create_miniblock, prepare_empty_recovery_snapshot}, + }; #[test] #[rustfmt::skip] // One-line formatting looks better here. @@ -244,4 +250,68 @@ mod tests { assert_eq!(poll_iters(Duration::from_millis(100), Duration::from_millis(200)), 2); assert_eq!(poll_iters(Duration::from_millis(100), Duration::from_millis(201)), 3); } + + #[tokio::test] + async fn creating_io_cursor_with_genesis() { + let pool = ConnectionPool::test_pool().await; + let mut storage = pool.access_storage().await.unwrap(); + ensure_genesis_state(&mut storage, L2ChainId::default(), &GenesisParams::mock()) + .await + .unwrap(); + + let cursor = IoCursor::new(&mut storage).await.unwrap(); + assert_eq!(cursor.l1_batch, L1BatchNumber(1)); + assert_eq!(cursor.next_miniblock, MiniblockNumber(1)); + assert_eq!(cursor.prev_miniblock_timestamp, 0); + assert_eq!( + cursor.prev_miniblock_hash, + MiniblockHasher::legacy_hash(MiniblockNumber(0)) + ); + + let miniblock = create_miniblock(1); + storage + .blocks_dal() + .insert_miniblock(&miniblock) + .await + .unwrap(); + + let cursor = IoCursor::new(&mut storage).await.unwrap(); + assert_eq!(cursor.l1_batch, L1BatchNumber(1)); + assert_eq!(cursor.next_miniblock, MiniblockNumber(2)); + assert_eq!(cursor.prev_miniblock_timestamp, miniblock.timestamp); + assert_eq!(cursor.prev_miniblock_hash, miniblock.hash); + } + + #[tokio::test] + async fn creating_io_cursor_with_snapshot_recovery() { + let pool = ConnectionPool::test_pool().await; + let mut storage = pool.access_storage().await.unwrap(); + let snapshot_recovery = prepare_empty_recovery_snapshot(&mut storage, 23).await; + + let cursor = IoCursor::new(&mut storage).await.unwrap(); + assert_eq!(cursor.l1_batch, L1BatchNumber(24)); + assert_eq!( + cursor.next_miniblock, + snapshot_recovery.miniblock_number + 1 + ); + assert_eq!( + cursor.prev_miniblock_timestamp, + snapshot_recovery.miniblock_timestamp + ); + assert_eq!(cursor.prev_miniblock_hash, snapshot_recovery.miniblock_hash); + + // Add a miniblock so that we have miniblocks (but not an L1 batch) in the storage. + let miniblock = create_miniblock(snapshot_recovery.miniblock_number.0 + 1); + storage + .blocks_dal() + .insert_miniblock(&miniblock) + .await + .unwrap(); + + let cursor = IoCursor::new(&mut storage).await.unwrap(); + assert_eq!(cursor.l1_batch, L1BatchNumber(24)); + assert_eq!(cursor.next_miniblock, miniblock.number + 1); + assert_eq!(cursor.prev_miniblock_timestamp, miniblock.timestamp); + assert_eq!(cursor.prev_miniblock_hash, miniblock.hash); + } } From d308d108b67e10b85f5ebc8d3498a23805491a0c Mon Sep 17 00:00:00 2001 From: Alex Ostrovski Date: Thu, 18 Jan 2024 13:56:38 +0200 Subject: [PATCH 06/27] Sketch `L1BatchParamsProvider` --- .../vm_interactions.rs | 36 +- .../src/state_keeper/extractors.rs | 48 +- .../zksync_core/src/state_keeper/io/common.rs | 746 +++++++++++++++--- .../src/state_keeper/io/mempool.rs | 68 +- .../src/state_keeper/io/seal_logic.rs | 13 - .../zksync_core/src/sync_layer/external_io.rs | 85 +- core/lib/zksync_core/src/utils/testonly.rs | 15 + 7 files changed, 779 insertions(+), 232 deletions(-) diff --git a/core/lib/zksync_core/src/basic_witness_input_producer/vm_interactions.rs b/core/lib/zksync_core/src/basic_witness_input_producer/vm_interactions.rs index 8ad2a66155de..8fde3af2afa0 100644 --- a/core/lib/zksync_core/src/basic_witness_input_producer/vm_interactions.rs +++ b/core/lib/zksync_core/src/basic_witness_input_producer/vm_interactions.rs @@ -9,7 +9,7 @@ use zksync_dal::StorageProcessor; use zksync_state::{PostgresStorage, StoragePtr, StorageView, WriteStorage}; use zksync_types::{L1BatchNumber, L2ChainId, Transaction}; -use crate::state_keeper::io::common::load_l1_batch_params; +use crate::state_keeper::io::common::L1BatchParamsProvider; pub(super) type VmAndStorage<'a> = ( VmInstance>, HistoryEnabled>, @@ -22,19 +22,16 @@ pub(super) fn create_vm( mut connection: StorageProcessor<'_>, l2_chain_id: L2ChainId, ) -> anyhow::Result { - let prev_l1_batch_number = l1_batch_number - 1; - let (_, miniblock_number) = rt_handle + let l1_batch_params_provider = rt_handle + .block_on(L1BatchParamsProvider::new(&mut connection)) + .context("failed initializing L1 batch params provider")?; + let first_miniblock_in_batch = rt_handle .block_on( - connection - .blocks_dal() - .get_miniblock_range_of_l1_batch(prev_l1_batch_number), - )? - .with_context(|| { - format!( - "l1_batch_number {l1_batch_number:?} must have a previous miniblock to start from" - ) - })?; - + l1_batch_params_provider + .load_first_miniblock_in_batch(&mut connection, l1_batch_number), + ) + .with_context(|| format!("failed loading first miniblock in L1 batch #{l1_batch_number}"))? + .with_context(|| format!("no miniblocks persisted for L1 batch #{l1_batch_number}"))?; let fee_account_addr = rt_handle .block_on( connection @@ -49,17 +46,24 @@ pub(super) fn create_vm( // All batches ran by BasicWitnessInputProducer have already been executed by State Keeper. // This means we don't want to reject any execution, therefore we're using MAX as an allow all. let validation_computational_gas_limit = u32::MAX; + let (system_env, l1_batch_env) = rt_handle - .block_on(load_l1_batch_params( + .block_on(l1_batch_params_provider.load_l1_batch_params( &mut connection, - l1_batch_number, + &first_miniblock_in_batch, fee_account_addr, validation_computational_gas_limit, l2_chain_id, )) .context("expected miniblock to be executed and sealed")?; - let pg_storage = PostgresStorage::new(rt_handle.clone(), connection, miniblock_number, true); + let storage_miniblock_number = first_miniblock_in_batch.number() - 1; + let pg_storage = PostgresStorage::new( + rt_handle.clone(), + connection, + storage_miniblock_number, + true, + ); let storage_view = StorageView::new(pg_storage).to_rc_ptr(); let vm = VmInstance::new(l1_batch_env, system_env, storage_view.clone()); diff --git a/core/lib/zksync_core/src/state_keeper/extractors.rs b/core/lib/zksync_core/src/state_keeper/extractors.rs index e31020734f58..8f6f8cac5ba7 100644 --- a/core/lib/zksync_core/src/state_keeper/extractors.rs +++ b/core/lib/zksync_core/src/state_keeper/extractors.rs @@ -1,15 +1,8 @@ //! Pure functions that convert data as required by the state keeper. -use std::{ - convert::TryFrom, - fmt, - time::{Duration, Instant}, -}; +use std::{convert::TryFrom, fmt}; use chrono::{DateTime, TimeZone, Utc}; -use zksync_dal::StorageProcessor; -use zksync_types::{L1BatchNumber, U256}; -use zksync_utils::h256_to_u256; /// Displays a Unix timestamp (seconds since epoch) in human-readable form. Useful for logging. pub(super) fn display_timestamp(timestamp: u64) -> impl fmt::Display { @@ -34,42 +27,3 @@ pub(super) fn display_timestamp(timestamp: u64) -> impl fmt::Display { DisplayedTimestamp::Parsed, ) } - -pub(crate) async fn wait_for_prev_l1_batch_params( - storage: &mut StorageProcessor<'_>, - number: L1BatchNumber, -) -> (U256, u64) { - if number == L1BatchNumber(0) { - return (U256::default(), 0); - } - wait_for_l1_batch_params_unchecked(storage, number - 1).await -} - -/// # Warning -/// -/// If invoked for a `L1BatchNumber` of a non-existent l1 batch, will block current thread indefinitely. -async fn wait_for_l1_batch_params_unchecked( - storage: &mut StorageProcessor<'_>, - number: L1BatchNumber, -) -> (U256, u64) { - // If the state root is not known yet, this duration will be used to back off in the while loops - const SAFE_STATE_ROOT_INTERVAL: Duration = Duration::from_millis(100); - - let stage_started_at: Instant = Instant::now(); - loop { - let data = storage - .blocks_dal() - .get_l1_batch_state_root_and_timestamp(number) - .await - .unwrap(); - if let Some((root_hash, timestamp)) = data { - tracing::trace!( - "Waiting for hash of L1 batch #{number} took {:?}", - stage_started_at.elapsed() - ); - return (h256_to_u256(root_hash), timestamp); - } - - tokio::time::sleep(SAFE_STATE_ROOT_INTERVAL).await; - } -} diff --git a/core/lib/zksync_core/src/state_keeper/io/common.rs b/core/lib/zksync_core/src/state_keeper/io/common.rs index 96693d1b8711..3ad30c12b34f 100644 --- a/core/lib/zksync_core/src/state_keeper/io/common.rs +++ b/core/lib/zksync_core/src/state_keeper/io/common.rs @@ -1,4 +1,4 @@ -use std::time::Duration; +use std::time::{Duration, Instant}; use anyhow::Context; use multivm::{ @@ -8,10 +8,9 @@ use multivm::{ use zksync_contracts::BaseSystemContracts; use zksync_dal::StorageProcessor; use zksync_types::{ - fee_model::BatchFeeInput, Address, L1BatchNumber, L2ChainId, MiniblockNumber, - ProtocolVersionId, H256, U256, ZKPORTER_IS_AVAILABLE, + block::MiniblockHeader, fee_model::BatchFeeInput, snapshots::SnapshotRecoveryStatus, Address, + L1BatchNumber, L2ChainId, MiniblockNumber, ProtocolVersionId, H256, ZKPORTER_IS_AVAILABLE, }; -use zksync_utils::u256_to_h256; use super::PendingBatchData; use crate::state_keeper::extractors; @@ -22,7 +21,7 @@ pub(crate) fn l1_batch_params( current_l1_batch_number: L1BatchNumber, fee_account: Address, l1_batch_timestamp: u64, - previous_batch_hash: U256, + previous_batch_hash: H256, fee_input: BatchFeeInput, first_miniblock_number: MiniblockNumber, prev_miniblock_hash: H256, @@ -43,7 +42,7 @@ pub(crate) fn l1_batch_params( chain_id, }, L1BatchEnv { - previous_batch_hash: Some(u256_to_h256(previous_batch_hash)), + previous_batch_hash: Some(previous_batch_hash), number: current_l1_batch_number, timestamp: l1_batch_timestamp, fee_input, @@ -68,105 +67,6 @@ pub(crate) fn poll_iters(delay_interval: Duration, max_wait: Duration) -> usize ((max_wait_millis + delay_interval_millis - 1) / delay_interval_millis).max(1) as usize } -pub(crate) async fn load_l1_batch_params( - storage: &mut StorageProcessor<'_>, - current_l1_batch_number: L1BatchNumber, - fee_account: Address, - validation_computational_gas_limit: u32, - chain_id: L2ChainId, -) -> Option<(SystemEnv, L1BatchEnv)> { - // If miniblock doesn't exist (for instance if it's pending), it means that there is no unsynced state (i.e. no transactions - // were executed after the last sealed batch). - // FIXME: doesn't work w/ snapshot recovery; change to a dedicated DB query? - let pending_miniblock_number = { - let (_, last_miniblock_number_included_in_l1_batch) = storage - .blocks_dal() - .get_miniblock_range_of_l1_batch(current_l1_batch_number - 1) - .await - .unwrap() - .unwrap(); - last_miniblock_number_included_in_l1_batch + 1 - }; - let pending_miniblock_header = storage - .blocks_dal() - .get_miniblock_header(pending_miniblock_number) - .await - .unwrap()?; - - tracing::info!("Getting previous batch hash"); - let (previous_l1_batch_hash, _) = - extractors::wait_for_prev_l1_batch_params(storage, current_l1_batch_number).await; - - tracing::info!("Getting previous miniblock hash"); - let prev_miniblock_hash = storage - .blocks_dal() - .get_miniblock_header(pending_miniblock_number - 1) - .await - .unwrap() - .unwrap() - .hash; - - let base_system_contracts = storage - .storage_dal() - .get_base_system_contracts( - pending_miniblock_header - .base_system_contracts_hashes - .bootloader, - pending_miniblock_header - .base_system_contracts_hashes - .default_aa, - ) - .await; - - tracing::info!("Previous l1_batch_hash: {}", previous_l1_batch_hash); - Some(l1_batch_params( - current_l1_batch_number, - fee_account, - pending_miniblock_header.timestamp, - previous_l1_batch_hash, - pending_miniblock_header.batch_fee_input, - pending_miniblock_number, - prev_miniblock_hash, - base_system_contracts, - validation_computational_gas_limit, - pending_miniblock_header - .protocol_version - .expect("`protocol_version` must be set for pending miniblock"), - pending_miniblock_header.virtual_blocks, - chain_id, - )) -} - -/// Loads the pending L1 block data from the database. -pub(crate) async fn load_pending_batch( - storage: &mut StorageProcessor<'_>, - current_l1_batch_number: L1BatchNumber, - fee_account: Address, - validation_computational_gas_limit: u32, - chain_id: L2ChainId, -) -> Option { - let (system_env, l1_batch_env) = load_l1_batch_params( - storage, - current_l1_batch_number, - fee_account, - validation_computational_gas_limit, - chain_id, - ) - .await?; - - let pending_miniblocks = storage - .transactions_dal() - .get_miniblocks_to_reexecute() - .await - .unwrap(); - - Some(PendingBatchData { - l1_batch_env, - system_env, - pending_miniblocks, - }) -} - /// Cursor of the miniblock / L1 batch progress used by [`StateKeeperIO`](super::StateKeeperIO) implementations. #[derive(Debug)] pub(crate) struct IoCursor { @@ -230,15 +130,296 @@ impl IoCursor { } } +/// Typesafe wrapper around [`MiniblockHeader`] returned by [`L1BatchParamsProvider`]. +#[derive(Debug)] +pub(crate) struct FirstMiniblockInBatch { + header: MiniblockHeader, + l1_batch_number: L1BatchNumber, +} + +impl FirstMiniblockInBatch { + pub fn number(&self) -> MiniblockNumber { + self.header.number + } + + pub fn has_protocol_version(&self) -> bool { + self.header.protocol_version.is_some() + } + + pub fn set_protocol_version(&mut self, version: ProtocolVersionId) { + assert!( + self.header.protocol_version.is_none(), + "Cannot redefine protocol version" + ); + self.header.protocol_version = Some(version); + } +} + +/// Provider of L1 batch parameters for state keeper I/O implementations. The provider is stateless; i.e., it doesn't +/// enforce a particular order of method calls. +#[derive(Debug)] +pub(crate) struct L1BatchParamsProvider { + snapshot: Option, +} + +impl L1BatchParamsProvider { + pub async fn new(storage: &mut StorageProcessor<'_>) -> anyhow::Result { + let snapshot = storage + .snapshot_recovery_dal() + .get_applied_snapshot_status() + .await?; + Ok(Self { snapshot }) + } + + /// Returns state root hash and timestamp of an L1 batch with the specified number waiting for the hash to be computed + /// if necessary. + pub async fn wait_for_l1_batch_params( + &self, + storage: &mut StorageProcessor<'_>, + number: L1BatchNumber, + ) -> anyhow::Result<(H256, u64)> { + let first_l1_batch = if let Some(snapshot) = &self.snapshot { + // Special case: if we've recovered from a snapshot, we allow to wait for the snapshot L1 batch. + if number == snapshot.l1_batch_number { + return Ok((snapshot.l1_batch_root_hash, snapshot.l1_batch_timestamp)); + } + snapshot.l1_batch_number + 1 + } else { + L1BatchNumber(0) + }; + + anyhow::ensure!( + number >= first_l1_batch, + "Cannot wait a hash of a pruned L1 batch #{number} (first retained batch: {first_l1_batch})" + ); + Self::wait_for_l1_batch_params_unchecked(storage, number).await + } + + async fn wait_for_l1_batch_params_unchecked( + storage: &mut StorageProcessor<'_>, + number: L1BatchNumber, + ) -> anyhow::Result<(H256, u64)> { + // If the state root is not known yet, this duration will be used to back off in the while loops + const SAFE_STATE_ROOT_INTERVAL: Duration = Duration::from_millis(100); + + let stage_started_at: Instant = Instant::now(); + loop { + let data = storage + .blocks_dal() + .get_l1_batch_state_root_and_timestamp(number) + .await?; + if let Some((root_hash, timestamp)) = data { + tracing::trace!( + "Waiting for hash of L1 batch #{number} took {:?}", + stage_started_at.elapsed() + ); + return Ok((root_hash, timestamp)); + } + + tokio::time::sleep(SAFE_STATE_ROOT_INTERVAL).await; + } + } + + /// Returns a header of the first miniblock in the specified L1 batch regardless of whether the batch is sealed or not. + pub(crate) async fn load_first_miniblock_in_batch( + &self, + storage: &mut StorageProcessor<'_>, + l1_batch_number: L1BatchNumber, + ) -> anyhow::Result> { + let miniblock_number = self + .load_number_of_first_miniblock_in_batch(storage, l1_batch_number) + .await + .context("failed getting first miniblock number")?; + Ok(match miniblock_number { + Some(number) => storage + .blocks_dal() + .get_miniblock_header(number) + .await + .context("failed getting miniblock header")? + .map(|header| FirstMiniblockInBatch { + header, + l1_batch_number, + }), + None => None, + }) + } + + async fn load_number_of_first_miniblock_in_batch( + &self, + storage: &mut StorageProcessor<'_>, + l1_batch_number: L1BatchNumber, + ) -> anyhow::Result> { + if l1_batch_number == L1BatchNumber(0) { + return Ok(Some(MiniblockNumber(0))); + } + + if let Some(snapshot) = &self.snapshot { + anyhow::ensure!( + l1_batch_number > snapshot.l1_batch_number, + "Cannot load miniblocks for pruned L1 batch #{l1_batch_number} (first retained batch: {})", + snapshot.l1_batch_number + 1 + ); + if l1_batch_number == snapshot.l1_batch_number + 1 { + return Ok(Some(snapshot.miniblock_number + 1)); + } + } + + let prev_l1_batch = l1_batch_number - 1; + // At this point, we have ensured that `prev_l1_batch` is not pruned. + let Some((_, last_miniblock_in_prev_l1_batch)) = storage + .blocks_dal() + .get_miniblock_range_of_l1_batch(prev_l1_batch) + .await + .with_context(|| { + format!("failed getting miniblock range for L1 batch #{prev_l1_batch}") + })? + else { + return Ok(None); + }; + Ok(Some(last_miniblock_in_prev_l1_batch + 1)) + } + + /// Loads VM-related L1 batch parameters for the specified batch. + pub(crate) async fn load_l1_batch_params( + &self, + storage: &mut StorageProcessor<'_>, + first_miniblock_in_batch: &FirstMiniblockInBatch, + fee_account: Address, + validation_computational_gas_limit: u32, + chain_id: L2ChainId, + ) -> anyhow::Result<(SystemEnv, L1BatchEnv)> { + anyhow::ensure!( + first_miniblock_in_batch.l1_batch_number > L1BatchNumber(0), + "Loading params for genesis L1 batch not supported" + ); + // L1 batch timestamp is set to the timestamp of its first miniblock. + let l1_batch_timestamp = first_miniblock_in_batch.header.timestamp; + + let prev_l1_batch_number = first_miniblock_in_batch.l1_batch_number - 1; + tracing::info!("Getting previous L1 batch hash for batch #{prev_l1_batch_number}"); + let (prev_l1_batch_hash, prev_l1_batch_timestamp) = self + .wait_for_l1_batch_params(storage, prev_l1_batch_number) + .await + .context("failed getting hash for previous L1 batch")?; + tracing::info!("Got state root hash for previous L1 batch #{prev_l1_batch_number}: {prev_l1_batch_hash:?}"); + + anyhow::ensure!( + prev_l1_batch_timestamp < l1_batch_timestamp, + "Invalid params for L1 batch #{}: Timestamp of previous L1 batch ({}) >= provisional L1 batch timestamp ({}), \ + meaning that L1 batch will be rejected by the bootloader", + first_miniblock_in_batch.l1_batch_number, + extractors::display_timestamp(prev_l1_batch_timestamp), + extractors::display_timestamp(l1_batch_timestamp) + ); + + let prev_miniblock_number = first_miniblock_in_batch.header.number - 1; + tracing::info!("Getting previous miniblock hash for miniblock #{prev_miniblock_number}"); + + let prev_miniblock_hash = self.snapshot.as_ref().and_then(|snapshot| { + (snapshot.miniblock_number == prev_miniblock_number).then_some(snapshot.miniblock_hash) + }); + let prev_miniblock_hash = match prev_miniblock_hash { + Some(hash) => hash, + None => storage + .blocks_web3_dal() + .get_miniblock_hash(prev_miniblock_number) + .await + .context("failed getting hash for previous miniblock")? + .context("previous miniblock disappeared from storage")?, + }; + tracing::info!( + "Got hash for previous miniblock #{prev_miniblock_number}: {prev_miniblock_hash:?}" + ); + + let contract_hashes = first_miniblock_in_batch.header.base_system_contracts_hashes; + let base_system_contracts = storage + .storage_dal() + .get_base_system_contracts(contract_hashes.bootloader, contract_hashes.default_aa) + .await; + + Ok(l1_batch_params( + first_miniblock_in_batch.l1_batch_number, + fee_account, + l1_batch_timestamp, + prev_l1_batch_hash, + first_miniblock_in_batch.header.batch_fee_input, + first_miniblock_in_batch.header.number, + prev_miniblock_hash, + base_system_contracts, + validation_computational_gas_limit, + first_miniblock_in_batch + .header + .protocol_version + .context("`protocol_version` must be set for miniblock")?, + first_miniblock_in_batch.header.virtual_blocks, + chain_id, + )) + } + + /// Loads the pending L1 batch data from the database. + /// + /// # Errors + /// + /// Propagates DB errors. Also returns an error if `first_miniblock_in_batch` doesn't correspond to a pending L1 batch. + pub(crate) async fn load_pending_batch( + &self, + storage: &mut StorageProcessor<'_>, + first_miniblock_in_batch: &FirstMiniblockInBatch, + fee_account: Address, + validation_computational_gas_limit: u32, + chain_id: L2ChainId, + ) -> anyhow::Result { + let (system_env, l1_batch_env) = self + .load_l1_batch_params( + storage, + first_miniblock_in_batch, + fee_account, + validation_computational_gas_limit, + chain_id, + ) + .await + .context("failed loading L1 batch params")?; + + let pending_miniblocks = storage + .transactions_dal() + .get_miniblocks_to_reexecute() + .await + .context("failed loading miniblocks for re-execution")?; + let first_pending_miniblock = pending_miniblocks + .first() + .context("no pending miniblocks; was `first_miniblock_in_batch` loaded for a correct L1 batch number?")?; + anyhow::ensure!( + first_pending_miniblock.number == first_miniblock_in_batch.header.number, + "Invalid `first_miniblock_in_batch` supplied: its L1 batch #{} is not pending", + first_miniblock_in_batch.l1_batch_number + ); + Ok(PendingBatchData { + l1_batch_env, + system_env, + pending_miniblocks, + }) + } +} + #[cfg(test)] mod tests { + use std::{collections::HashMap, ops}; + + use futures::FutureExt; + use zksync_contracts::BaseSystemContractsHashes; use zksync_dal::ConnectionPool; - use zksync_types::block::MiniblockHasher; + use zksync_types::{ + block::{BlockGasCount, MiniblockHasher}, + fee::TransactionExecutionMetrics, + }; use super::*; use crate::{ genesis::{ensure_genesis_state, GenesisParams}, - utils::testonly::{create_miniblock, prepare_empty_recovery_snapshot}, + utils::testonly::{ + create_l1_batch, create_l2_transaction, create_miniblock, execute_l2_transaction, + prepare_empty_recovery_snapshot, + }, }; #[test] @@ -314,4 +495,355 @@ mod tests { assert_eq!(cursor.prev_miniblock_timestamp, miniblock.timestamp); assert_eq!(cursor.prev_miniblock_hash, miniblock.hash); } + + #[tokio::test] + async fn waiting_for_l1_batch_params_with_genesis() { + let pool = ConnectionPool::test_pool().await; + let mut storage = pool.access_storage().await.unwrap(); + let genesis_root_hash = + ensure_genesis_state(&mut storage, L2ChainId::default(), &GenesisParams::mock()) + .await + .unwrap(); + + let provider = L1BatchParamsProvider::new(&mut storage).await.unwrap(); + assert!(provider.snapshot.is_none()); + let (hash, timestamp) = provider + .wait_for_l1_batch_params(&mut storage, L1BatchNumber(0)) + .await + .unwrap(); + assert_eq!(hash, genesis_root_hash); + assert_eq!(timestamp, 0); + + let new_l1_batch = create_l1_batch(1); + storage + .blocks_dal() + .insert_l1_batch(&new_l1_batch, &[], BlockGasCount::default(), &[], &[], 0) + .await + .unwrap(); + + let wait_future = provider.wait_for_l1_batch_params(&mut storage, L1BatchNumber(1)); + futures::pin_mut!(wait_future); + tokio::task::yield_now().await; + assert!((&mut wait_future).now_or_never().is_none()); + + let expected_hash = H256::repeat_byte(1); + let mut storage = pool.access_storage().await.unwrap(); + storage + .blocks_dal() + .set_l1_batch_hash(L1BatchNumber(1), expected_hash) + .await + .unwrap(); + let (hash, timestamp) = wait_future.await.unwrap(); + assert_eq!(hash, expected_hash); + assert_eq!(timestamp, new_l1_batch.timestamp); + } + + #[tokio::test] + async fn waiting_for_l1_batch_params_after_snapshot_recovery() { + let pool = ConnectionPool::test_pool().await; + let mut storage = pool.access_storage().await.unwrap(); + let snapshot_recovery = prepare_empty_recovery_snapshot(&mut storage, 23).await; + + let provider = L1BatchParamsProvider::new(&mut storage).await.unwrap(); + assert!(provider.snapshot.is_some()); + let (hash, timestamp) = provider + .wait_for_l1_batch_params(&mut storage, snapshot_recovery.l1_batch_number) + .await + .unwrap(); + assert_eq!(hash, snapshot_recovery.l1_batch_root_hash); + assert_eq!(timestamp, snapshot_recovery.l1_batch_timestamp); + + for pruned_l1_batch in [0, 1, snapshot_recovery.l1_batch_number.0 - 1] { + assert!(provider + .wait_for_l1_batch_params(&mut storage, L1BatchNumber(pruned_l1_batch)) + .await + .is_err()); + } + + let new_l1_batch = create_l1_batch(snapshot_recovery.l1_batch_number.0 + 1); + storage + .blocks_dal() + .insert_l1_batch(&new_l1_batch, &[], BlockGasCount::default(), &[], &[], 0) + .await + .unwrap(); + + let wait_future = + provider.wait_for_l1_batch_params(&mut storage, snapshot_recovery.l1_batch_number + 1); + futures::pin_mut!(wait_future); + tokio::task::yield_now().await; + assert!((&mut wait_future).now_or_never().is_none()); + + let expected_hash = H256::repeat_byte(1); + let mut storage = pool.access_storage().await.unwrap(); + storage + .blocks_dal() + .set_l1_batch_hash(new_l1_batch.number, expected_hash) + .await + .unwrap(); + let (hash, timestamp) = wait_future.await.unwrap(); + assert_eq!(hash, expected_hash); + assert_eq!(timestamp, new_l1_batch.timestamp); + } + + #[tokio::test] + async fn getting_first_miniblock_in_batch_with_genesis() { + let pool = ConnectionPool::test_pool().await; + let mut storage = pool.access_storage().await.unwrap(); + ensure_genesis_state(&mut storage, L2ChainId::default(), &GenesisParams::mock()) + .await + .unwrap(); + + let provider = L1BatchParamsProvider::new(&mut storage).await.unwrap(); + let mut batches_and_miniblocks = HashMap::from([ + (L1BatchNumber(0), Ok(Some(MiniblockNumber(0)))), + (L1BatchNumber(1), Ok(Some(MiniblockNumber(1)))), + (L1BatchNumber(2), Ok(None)), + (L1BatchNumber(100), Ok(None)), + ]); + assert_first_miniblock_numbers(&provider, &mut storage, &batches_and_miniblocks).await; + + let new_miniblock = create_miniblock(1); + storage + .blocks_dal() + .insert_miniblock(&new_miniblock) + .await + .unwrap(); + let new_miniblock = create_miniblock(2); + storage + .blocks_dal() + .insert_miniblock(&new_miniblock) + .await + .unwrap(); + + assert_first_miniblock_numbers(&provider, &mut storage, &batches_and_miniblocks).await; + + let new_l1_batch = create_l1_batch(1); + storage + .blocks_dal() + .insert_l1_batch(&new_l1_batch, &[], BlockGasCount::default(), &[], &[], 0) + .await + .unwrap(); + storage + .blocks_dal() + .mark_miniblocks_as_executed_in_l1_batch(new_l1_batch.number) + .await + .unwrap(); + + batches_and_miniblocks.insert(L1BatchNumber(2), Ok(Some(MiniblockNumber(3)))); + assert_first_miniblock_numbers(&provider, &mut storage, &batches_and_miniblocks).await; + } + + async fn assert_first_miniblock_numbers( + provider: &L1BatchParamsProvider, + storage: &mut StorageProcessor<'_>, + batches_and_miniblocks: &HashMap, ()>>, + ) { + for (&batch, &expected_miniblock) in batches_and_miniblocks { + let number = provider + .load_number_of_first_miniblock_in_batch(storage, batch) + .await; + match expected_miniblock { + Ok(expected) => { + assert_eq!( + number.unwrap(), + expected, + "load_number_of_first_miniblock_in_batch({batch})" + ); + } + Err(()) => { + number.unwrap_err(); + } + } + } + } + + #[tokio::test] + async fn getting_first_miniblock_in_batch_after_snapshot_recovery() { + let pool = ConnectionPool::test_pool().await; + let mut storage = pool.access_storage().await.unwrap(); + let snapshot_recovery = prepare_empty_recovery_snapshot(&mut storage, 23).await; + + let provider = L1BatchParamsProvider::new(&mut storage).await.unwrap(); + let mut batches_and_miniblocks = HashMap::from([ + (L1BatchNumber(1), Err(())), + (snapshot_recovery.l1_batch_number, Err(())), + ( + snapshot_recovery.l1_batch_number + 1, + Ok(Some(snapshot_recovery.miniblock_number + 1)), + ), + (snapshot_recovery.l1_batch_number + 2, Ok(None)), + (L1BatchNumber(100), Ok(None)), + ]); + assert_first_miniblock_numbers(&provider, &mut storage, &batches_and_miniblocks).await; + + let new_miniblock = create_miniblock(snapshot_recovery.miniblock_number.0 + 1); + storage + .blocks_dal() + .insert_miniblock(&new_miniblock) + .await + .unwrap(); + + assert_first_miniblock_numbers(&provider, &mut storage, &batches_and_miniblocks).await; + + let new_l1_batch = create_l1_batch(snapshot_recovery.l1_batch_number.0 + 1); + storage + .blocks_dal() + .insert_l1_batch(&new_l1_batch, &[], BlockGasCount::default(), &[], &[], 0) + .await + .unwrap(); + storage + .blocks_dal() + .mark_miniblocks_as_executed_in_l1_batch(new_l1_batch.number) + .await + .unwrap(); + + batches_and_miniblocks.insert( + snapshot_recovery.l1_batch_number + 2, + Ok(Some(new_miniblock.number + 1)), + ); + assert_first_miniblock_numbers(&provider, &mut storage, &batches_and_miniblocks).await; + } + + #[tokio::test] + async fn loading_pending_batch_with_genesis() { + let pool = ConnectionPool::test_pool().await; + let mut storage = pool.access_storage().await.unwrap(); + let genesis_params = GenesisParams::mock(); + ensure_genesis_state(&mut storage, L2ChainId::default(), &genesis_params) + .await + .unwrap(); + store_pending_miniblocks( + &mut storage, + 1..=2, + genesis_params.base_system_contracts.hashes(), + ) + .await; + + let provider = L1BatchParamsProvider::new(&mut storage).await.unwrap(); + let first_miniblock_in_batch = provider + .load_first_miniblock_in_batch(&mut storage, L1BatchNumber(1)) + .await + .unwrap() + .expect("no first miniblock"); + assert_eq!(first_miniblock_in_batch.number(), MiniblockNumber(1)); + + let pending_batch = provider + .load_pending_batch( + &mut storage, + &first_miniblock_in_batch, + Address::zero(), + u32::MAX, + L2ChainId::default(), + ) + .await + .unwrap(); + + assert_eq!(pending_batch.pending_miniblocks.len(), 2); + assert_eq!(pending_batch.l1_batch_env.number, L1BatchNumber(1)); + assert_eq!(pending_batch.l1_batch_env.timestamp, 1); + assert_eq!(pending_batch.l1_batch_env.first_l2_block.number, 1); + assert_eq!(pending_batch.l1_batch_env.first_l2_block.timestamp, 1); + assert_eq!( + pending_batch.l1_batch_env.first_l2_block.prev_block_hash, + MiniblockHasher::legacy_hash(MiniblockNumber(0)) + ); + } + + async fn store_pending_miniblocks( + storage: &mut StorageProcessor<'_>, + numbers: ops::RangeInclusive, + contract_hashes: BaseSystemContractsHashes, + ) { + for miniblock_number in numbers { + let tx = create_l2_transaction(10, 100); + storage + .transactions_dal() + .insert_transaction_l2(tx.clone(), TransactionExecutionMetrics::default()) + .await; + let mut new_miniblock = create_miniblock(miniblock_number); + new_miniblock.base_system_contracts_hashes = contract_hashes; + storage + .blocks_dal() + .insert_miniblock(&new_miniblock) + .await + .unwrap(); + let tx_result = execute_l2_transaction(tx); + storage + .transactions_dal() + .mark_txs_as_executed_in_miniblock(new_miniblock.number, &[tx_result], 1.into()) + .await; + } + } + + #[tokio::test] + async fn loading_pending_batch_after_snapshot_recovery() { + let pool = ConnectionPool::test_pool().await; + let mut storage = pool.access_storage().await.unwrap(); + let snapshot_recovery = prepare_empty_recovery_snapshot(&mut storage, 23).await; + let contracts = GenesisParams::mock().base_system_contracts; + let factory_deps = HashMap::from([ + ( + contracts.bootloader.hash, + zksync_utils::be_words_to_bytes(&contracts.bootloader.code), + ), + ( + contracts.default_aa.hash, + zksync_utils::be_words_to_bytes(&contracts.default_aa.code), + ), + ]); + + let starting_miniblock_number = snapshot_recovery.miniblock_number.0 + 1; + store_pending_miniblocks( + &mut storage, + starting_miniblock_number..=starting_miniblock_number + 1, + contracts.hashes(), + ) + .await; + storage + .storage_dal() + .insert_factory_deps(snapshot_recovery.miniblock_number + 1, &factory_deps) + .await; + + let provider = L1BatchParamsProvider::new(&mut storage).await.unwrap(); + let first_miniblock_in_batch = provider + .load_first_miniblock_in_batch(&mut storage, snapshot_recovery.l1_batch_number + 1) + .await + .unwrap() + .expect("no first miniblock"); + assert_eq!( + first_miniblock_in_batch.number(), + snapshot_recovery.miniblock_number + 1 + ); + + let pending_batch = provider + .load_pending_batch( + &mut storage, + &first_miniblock_in_batch, + Address::zero(), + u32::MAX, + L2ChainId::default(), + ) + .await + .unwrap(); + + let expected_timestamp = u64::from(snapshot_recovery.miniblock_number.0) + 1; + assert_eq!(pending_batch.pending_miniblocks.len(), 2); + assert_eq!( + pending_batch.l1_batch_env.number, + snapshot_recovery.l1_batch_number + 1 + ); + assert_eq!(pending_batch.l1_batch_env.timestamp, expected_timestamp); + assert_eq!( + pending_batch.l1_batch_env.first_l2_block.number, + snapshot_recovery.miniblock_number.0 + 1 + ); + assert_eq!( + pending_batch.l1_batch_env.first_l2_block.timestamp, + expected_timestamp + ); + assert_eq!( + pending_batch.l1_batch_env.first_l2_block.prev_block_hash, + snapshot_recovery.miniblock_hash + ); + } } diff --git a/core/lib/zksync_core/src/state_keeper/io/mempool.rs b/core/lib/zksync_core/src/state_keeper/io/mempool.rs index 6d64640faebc..61f1584ec99f 100644 --- a/core/lib/zksync_core/src/state_keeper/io/mempool.rs +++ b/core/lib/zksync_core/src/state_keeper/io/mempool.rs @@ -5,6 +5,7 @@ use std::{ time::{Duration, Instant}, }; +use anyhow::Context as _; use async_trait::async_trait; use multivm::{ interface::{FinishedL1Batch, L1BatchEnv, SystemEnv}, @@ -16,7 +17,7 @@ use zksync_mempool::L2TxFilter; use zksync_object_store::ObjectStore; use zksync_types::{ protocol_version::ProtocolUpgradeTx, witness_block_state::WitnessBlockState, Address, - L1BatchNumber, L2ChainId, MiniblockNumber, ProtocolVersionId, Transaction, H256, U256, + L1BatchNumber, L2ChainId, MiniblockNumber, ProtocolVersionId, Transaction, H256, }; // TODO (SMA-1206): use seconds instead of milliseconds. use zksync_utils::time::millis_since_epoch; @@ -26,7 +27,7 @@ use crate::{ state_keeper::{ extractors, io::{ - common::{l1_batch_params, load_pending_batch, poll_iters, IoCursor}, + common::{l1_batch_params, poll_iters, IoCursor, L1BatchParamsProvider}, MiniblockParams, MiniblockSealerHandle, PendingBatchData, StateKeeperIO, }, mempool_actor::l2_tx_filter, @@ -51,6 +52,7 @@ pub(crate) struct MempoolIO { current_miniblock_number: MiniblockNumber, prev_miniblock_hash: H256, prev_miniblock_timestamp: u64, + l1_batch_params_provider: L1BatchParamsProvider, miniblock_sealer_handle: MiniblockSealerHandle, current_l1_batch_number: L1BatchNumber, fee_account: Address, @@ -93,18 +95,40 @@ impl StateKeeperIO for MempoolIO { .await .unwrap(); + let pending_miniblock_header = self + .l1_batch_params_provider + .load_first_miniblock_in_batch(&mut storage, self.current_l1_batch_number) + .await + .with_context(|| { + format!( + "failed loading first miniblock for L1 batch #{}", + self.current_l1_batch_number + ) + }) + .unwrap()?; + let pending_batch_data = self + .l1_batch_params_provider + .load_pending_batch( + &mut storage, + &pending_miniblock_header, + self.fee_account, + self.validation_computational_gas_limit, + self.chain_id, + ) + .await + .with_context(|| { + format!( + "failed loading pending L1 batch #{}", + self.current_l1_batch_number + ) + }) + .unwrap(); + let PendingBatchData { l1_batch_env, system_env, pending_miniblocks, - } = load_pending_batch( - &mut storage, - self.current_l1_batch_number, - self.fee_account, - self.validation_computational_gas_limit, - self.chain_id, - ) - .await?; + } = pending_batch_data; // Initialize the filter for the transactions that come after the pending batch. // We use values from the pending block to match the filter with one used before the restart. let (base_fee, gas_per_pubdata) = @@ -409,7 +433,12 @@ impl MempoolIO { ); let mut storage = pool.access_storage_tagged("state_keeper").await?; - let cursor = IoCursor::new(&mut storage).await?; + let cursor = IoCursor::new(&mut storage) + .await + .context("failed initializing I/O cursor")?; + let l1_batch_params_provider = L1BatchParamsProvider::new(&mut storage) + .await + .context("failed initializing L1 batch params provider")?; drop(storage); Ok(Self { @@ -424,6 +453,7 @@ impl MempoolIO { current_miniblock_number: cursor.next_miniblock, prev_miniblock_hash: cursor.prev_miniblock_hash, prev_miniblock_timestamp: cursor.prev_miniblock_timestamp, + l1_batch_params_provider, fee_account: config.fee_account_addr, validation_computational_gas_limit, delay_interval, @@ -445,7 +475,7 @@ impl MempoolIO { self.prev_miniblock_timestamp = miniblock.timestamp; } - async fn load_previous_l1_batch_hash(&self) -> U256 { + async fn load_previous_l1_batch_hash(&self) -> H256 { tracing::info!( "Getting previous L1 batch hash for L1 batch #{}", self.current_l1_batch_number @@ -457,13 +487,19 @@ impl MempoolIO { .access_storage_tagged("state_keeper") .await .unwrap(); - let (batch_hash, _) = - extractors::wait_for_prev_l1_batch_params(&mut storage, self.current_l1_batch_number) - .await; + let prev_l1_batch_number = self.current_l1_batch_number - 1; + let (batch_hash, _) = self + .l1_batch_params_provider + .wait_for_l1_batch_params(&mut storage, prev_l1_batch_number) + .await + .with_context(|| { + format!("error waiting for params for L1 batch #{prev_l1_batch_number}") + }) + .unwrap(); wait_latency.observe(); tracing::info!( - "Got previous L1 batch hash: {batch_hash:0>64x} for L1 batch #{}", + "Got previous L1 batch hash: {batch_hash:?} for L1 batch #{}", self.current_l1_batch_number ); batch_hash diff --git a/core/lib/zksync_core/src/state_keeper/io/seal_logic.rs b/core/lib/zksync_core/src/state_keeper/io/seal_logic.rs index 4eac2b678ee3..0cf16a074b95 100644 --- a/core/lib/zksync_core/src/state_keeper/io/seal_logic.rs +++ b/core/lib/zksync_core/src/state_keeper/io/seal_logic.rs @@ -37,7 +37,6 @@ use zksync_utils::{h256_to_u256, time::millis_since_epoch, u256_to_h256}; use crate::{ metrics::{BlockStage, MiniblockStage, APP_METRICS}, state_keeper::{ - extractors, metrics::{L1BatchSealStage, MiniblockSealStage, L1_BATCH_METRICS, MINIBLOCK_METRICS}, types::ExecutionMetricsForCriteria, updates::{MiniblockSealCommand, MiniblockUpdates, UpdatesManager}, @@ -114,20 +113,8 @@ impl UpdatesManager { ); let progress = L1_BATCH_METRICS.start(L1BatchSealStage::InsertL1BatchHeader); - let (_prev_hash, prev_timestamp) = - extractors::wait_for_prev_l1_batch_params(&mut transaction, l1_batch_env.number).await; - assert!( - prev_timestamp < l1_batch_env.timestamp, - "Cannot seal L1 batch #{}: Timestamp of previous L1 batch ({}) >= provisional L1 batch timestamp ({}), \ - meaning that L1 batch will be rejected by the bootloader", - l1_batch_env.number, - extractors::display_timestamp(prev_timestamp), - extractors::display_timestamp(l1_batch_env.timestamp) - ); - let l2_to_l1_messages = extract_long_l2_to_l1_messages(&finished_batch.final_execution_state.events); - let l1_batch = L1BatchHeader { number: l1_batch_env.number, is_finished: true, diff --git a/core/lib/zksync_core/src/sync_layer/external_io.rs b/core/lib/zksync_core/src/sync_layer/external_io.rs index be9b8358fb85..7719e295ac65 100644 --- a/core/lib/zksync_core/src/sync_layer/external_io.rs +++ b/core/lib/zksync_core/src/sync_layer/external_io.rs @@ -1,5 +1,6 @@ use std::{collections::HashMap, convert::TryInto, iter::FromIterator, time::Duration}; +use anyhow::Context as _; use async_trait::async_trait; use multivm::interface::{FinishedL1Batch, L1BatchEnv, SystemEnv}; use zksync_contracts::{BaseSystemContracts, SystemContractCode}; @@ -7,7 +8,7 @@ use zksync_dal::ConnectionPool; use zksync_types::{ ethabi::Address, fee_model::BatchFeeInput, protocol_version::ProtocolUpgradeTx, witness_block_state::WitnessBlockState, L1BatchNumber, L2ChainId, MiniblockNumber, - ProtocolVersionId, Transaction, H256, U256, + ProtocolVersionId, Transaction, H256, }; use zksync_utils::{be_words_to_bytes, bytes_to_be_words}; @@ -19,9 +20,8 @@ use super::{ use crate::{ metrics::{BlockStage, APP_METRICS}, state_keeper::{ - extractors, io::{ - common::{l1_batch_params, load_pending_batch, poll_iters, IoCursor}, + common::{l1_batch_params, poll_iters, IoCursor, L1BatchParamsProvider}, MiniblockParams, MiniblockSealerHandle, PendingBatchData, StateKeeperIO, }, metrics::KEEPER_METRICS, @@ -47,6 +47,7 @@ pub struct ExternalIO { current_l1_batch_number: L1BatchNumber, current_miniblock_number: MiniblockNumber, prev_miniblock_hash: H256, + l1_batch_params_provider: L1BatchParamsProvider, actions: ActionQueue, sync_state: SyncState, main_node_client: Box, @@ -71,7 +72,12 @@ impl ExternalIO { chain_id: L2ChainId, ) -> anyhow::Result { let mut storage = pool.access_storage_tagged("sync_layer").await?; - let cursor = IoCursor::new(&mut storage).await?; + let cursor = IoCursor::new(&mut storage) + .await + .context("failed initializing I/O cursor")?; + let l1_batch_params_provider = L1BatchParamsProvider::new(&mut storage) + .await + .context("failed initializing L1 batch params provider")?; drop(storage); tracing::info!( @@ -88,6 +94,7 @@ impl ExternalIO { current_l1_batch_number: cursor.l1_batch, current_miniblock_number: cursor.next_miniblock, prev_miniblock_hash: cursor.prev_miniblock_hash, + l1_batch_params_provider, actions, sync_state, main_node_client, @@ -110,12 +117,18 @@ impl ExternalIO { self.prev_miniblock_hash = miniblock.get_miniblock_hash(); } - async fn load_previous_l1_batch_hash(&self) -> U256 { + async fn load_previous_l1_batch_hash(&self) -> H256 { let mut storage = self.pool.access_storage_tagged("sync_layer").await.unwrap(); let wait_latency = KEEPER_METRICS.wait_for_prev_hash_time.start(); - let (hash, _) = - extractors::wait_for_prev_l1_batch_params(&mut storage, self.current_l1_batch_number) - .await; + let prev_l1_batch_number = self.current_l1_batch_number - 1; + let (hash, _) = self + .l1_batch_params_provider + .wait_for_l1_batch_params(&mut storage, prev_l1_batch_number) + .await + .with_context(|| { + format!("error waiting for params for L1 batch #{prev_l1_batch_number}") + }) + .unwrap(); wait_latency.observe(); hash } @@ -248,48 +261,54 @@ impl StateKeeperIO for ExternalIO { ) }) .fee_account_address; - let pending_miniblock_number = { - let (_, last_miniblock_number_included_in_l1_batch) = storage - .blocks_dal() - .get_miniblock_range_of_l1_batch(self.current_l1_batch_number - 1) - .await - .unwrap() - .unwrap(); - last_miniblock_number_included_in_l1_batch + 1 - }; - let pending_miniblock_header = storage - .blocks_dal() - .get_miniblock_header(pending_miniblock_number) + let mut pending_miniblock_header = self + .l1_batch_params_provider + .load_first_miniblock_in_batch(&mut storage, self.current_l1_batch_number) .await + .with_context(|| { + format!( + "failed loading first miniblock for L1 batch #{}", + self.current_l1_batch_number + ) + }) .unwrap()?; - - if pending_miniblock_header.protocol_version.is_none() { + if !pending_miniblock_header.has_protocol_version() { // Fetch protocol version ID for pending miniblocks to know which VM to use to re-execute them. let sync_block = self .main_node_client - .fetch_l2_block(pending_miniblock_header.number, false) + .fetch_l2_block(pending_miniblock_header.number(), false) .await .expect("Failed to fetch block from the main node") .expect("Block must exist"); // Loading base system contracts will insert protocol version in the database if it's not present there. - let _ = self - .load_base_system_contracts_by_version_id(sync_block.protocol_version) + self.load_base_system_contracts_by_version_id(sync_block.protocol_version) .await; storage .blocks_dal() .set_protocol_version_for_pending_miniblocks(sync_block.protocol_version) .await .unwrap(); + pending_miniblock_header.set_protocol_version(sync_block.protocol_version); } - load_pending_batch( - &mut storage, - self.current_l1_batch_number, - fee_account, - self.validation_computational_gas_limit, - self.chain_id, - ) - .await + let data = self + .l1_batch_params_provider + .load_pending_batch( + &mut storage, + &pending_miniblock_header, + fee_account, + self.validation_computational_gas_limit, + self.chain_id, + ) + .await + .with_context(|| { + format!( + "failed loading data for pending L1 batch #{}", + self.current_l1_batch_number + ) + }) + .unwrap(); + Some(data) } async fn wait_for_new_batch_params( diff --git a/core/lib/zksync_core/src/utils/testonly.rs b/core/lib/zksync_core/src/utils/testonly.rs index 73b8e89396dc..feaba4be3026 100644 --- a/core/lib/zksync_core/src/utils/testonly.rs +++ b/core/lib/zksync_core/src/utils/testonly.rs @@ -11,6 +11,7 @@ use zksync_types::{ l2::L2Tx, snapshots::SnapshotRecoveryStatus, transaction_request::PaymasterParams, + tx::{tx_execution_info::TxExecutionStatus, ExecutionMetrics, TransactionExecutionResult}, Address, L1BatchNumber, L2ChainId, MiniblockNumber, Nonce, ProtocolVersion, ProtocolVersionId, H256, U256, }; @@ -96,6 +97,20 @@ pub(crate) fn create_l2_transaction(fee_per_gas: u64, gas_per_pubdata: u32) -> L tx } +pub(crate) fn execute_l2_transaction(transaction: L2Tx) -> TransactionExecutionResult { + TransactionExecutionResult { + hash: transaction.hash(), + transaction: transaction.into(), + execution_info: ExecutionMetrics::default(), + execution_status: TxExecutionStatus::Success, + refunded_gas: 0, + operator_suggested_refund: 0, + compressed_bytecodes: vec![], + call_traces: vec![], + revert_reason: None, + } +} + pub(crate) async fn prepare_empty_recovery_snapshot( storage: &mut StorageProcessor<'_>, l1_batch_number: u32, From dfa26df9cab14b92052c7e3725ce9e5b6633d029 Mon Sep 17 00:00:00 2001 From: Alex Ostrovski Date: Thu, 18 Jan 2024 14:16:26 +0200 Subject: [PATCH 07/27] Adapt `get_miniblocks_to_reexecute()` for snapshot recovery --- ...37d8d542b4f14cf560972c005ab3cc13d1f63.json | 23 ---- ...253eb340a21afd7d65ce6d2f523aeded8dfc0.json | 29 +++++ ...85ec6ec1e522bc058710560ef78e75f94ddac.json | 22 ++++ core/lib/dal/src/transactions_dal.rs | 102 +++++++++++------- 4 files changed, 116 insertions(+), 60 deletions(-) delete mode 100644 core/lib/dal/.sqlx/query-525123d4ec2b427f1c171f30d0937d8d542b4f14cf560972c005ab3cc13d1f63.json create mode 100644 core/lib/dal/.sqlx/query-d3f9202d665ef4fcb028dae6484253eb340a21afd7d65ce6d2f523aeded8dfc0.json create mode 100644 core/lib/dal/.sqlx/query-f1541a8d970d57ed118ee603e7285ec6ec1e522bc058710560ef78e75f94ddac.json diff --git a/core/lib/dal/.sqlx/query-525123d4ec2b427f1c171f30d0937d8d542b4f14cf560972c005ab3cc13d1f63.json b/core/lib/dal/.sqlx/query-525123d4ec2b427f1c171f30d0937d8d542b4f14cf560972c005ab3cc13d1f63.json deleted file mode 100644 index 7764425aa214..000000000000 --- a/core/lib/dal/.sqlx/query-525123d4ec2b427f1c171f30d0937d8d542b4f14cf560972c005ab3cc13d1f63.json +++ /dev/null @@ -1,23 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n SELECT\n hash\n FROM\n miniblocks\n WHERE\n number BETWEEN $1 AND $2\n ORDER BY\n number\n ", - "describe": { - "columns": [ - { - "ordinal": 0, - "name": "hash", - "type_info": "Bytea" - } - ], - "parameters": { - "Left": [ - "Int8", - "Int8" - ] - }, - "nullable": [ - false - ] - }, - "hash": "525123d4ec2b427f1c171f30d0937d8d542b4f14cf560972c005ab3cc13d1f63" -} diff --git a/core/lib/dal/.sqlx/query-d3f9202d665ef4fcb028dae6484253eb340a21afd7d65ce6d2f523aeded8dfc0.json b/core/lib/dal/.sqlx/query-d3f9202d665ef4fcb028dae6484253eb340a21afd7d65ce6d2f523aeded8dfc0.json new file mode 100644 index 000000000000..469c338969df --- /dev/null +++ b/core/lib/dal/.sqlx/query-d3f9202d665ef4fcb028dae6484253eb340a21afd7d65ce6d2f523aeded8dfc0.json @@ -0,0 +1,29 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT\n number,\n hash\n FROM\n miniblocks\n WHERE\n number BETWEEN $1 AND $2\n ORDER BY\n number\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "number", + "type_info": "Int8" + }, + { + "ordinal": 1, + "name": "hash", + "type_info": "Bytea" + } + ], + "parameters": { + "Left": [ + "Int8", + "Int8" + ] + }, + "nullable": [ + false, + false + ] + }, + "hash": "d3f9202d665ef4fcb028dae6484253eb340a21afd7d65ce6d2f523aeded8dfc0" +} diff --git a/core/lib/dal/.sqlx/query-f1541a8d970d57ed118ee603e7285ec6ec1e522bc058710560ef78e75f94ddac.json b/core/lib/dal/.sqlx/query-f1541a8d970d57ed118ee603e7285ec6ec1e522bc058710560ef78e75f94ddac.json new file mode 100644 index 000000000000..0613eb77a30b --- /dev/null +++ b/core/lib/dal/.sqlx/query-f1541a8d970d57ed118ee603e7285ec6ec1e522bc058710560ef78e75f94ddac.json @@ -0,0 +1,22 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT\n miniblock_hash\n FROM\n snapshot_recovery\n WHERE\n miniblock_number = $1\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "miniblock_hash", + "type_info": "Bytea" + } + ], + "parameters": { + "Left": [ + "Int8" + ] + }, + "nullable": [ + false + ] + }, + "hash": "f1541a8d970d57ed118ee603e7285ec6ec1e522bc058710560ef78e75f94ddac" +} diff --git a/core/lib/dal/src/transactions_dal.rs b/core/lib/dal/src/transactions_dal.rs index 19c19646bbfa..ef185e78a5b6 100644 --- a/core/lib/dal/src/transactions_dal.rs +++ b/core/lib/dal/src/transactions_dal.rs @@ -1,6 +1,6 @@ use std::{collections::HashMap, fmt, time::Duration}; -use anyhow::Context; +use anyhow::Context as _; use bigdecimal::BigDecimal; use itertools::Itertools; use sqlx::{error, types::chrono::NaiveDateTime}; @@ -1168,7 +1168,7 @@ impl TransactionsDal<'_, '_> { .fetch_all(self.storage.conn()) .await?; - self.get_miniblocks_to_execute(transactions).await + self.map_transactions_to_execution_data(transactions).await } /// Returns miniblocks with their transactions to be used in VM execution. @@ -1196,10 +1196,10 @@ impl TransactionsDal<'_, '_> { .fetch_all(self.storage.conn()) .await?; - self.get_miniblocks_to_execute(transactions).await + self.map_transactions_to_execution_data(transactions).await } - async fn get_miniblocks_to_execute( + async fn map_transactions_to_execution_data( &mut self, transactions: Vec, ) -> anyhow::Result> { @@ -1217,14 +1217,10 @@ impl TransactionsDal<'_, '_> { if transactions_by_miniblock.is_empty() { return Ok(Vec::new()); } - let from_miniblock = transactions_by_miniblock - .first() - .context("No first transaction found for miniblock")? - .0; - let to_miniblock = transactions_by_miniblock - .last() - .context("No last transaction found for miniblock")? - .0; + let from_miniblock = transactions_by_miniblock.first().unwrap().0; + let to_miniblock = transactions_by_miniblock.last().unwrap().0; + // `unwrap()`s are safe; `transactions_by_miniblock` is not empty as checked above + let miniblock_data = sqlx::query!( r#" SELECT @@ -1243,9 +1239,15 @@ impl TransactionsDal<'_, '_> { .fetch_all(self.storage.conn()) .await?; - let prev_hashes = sqlx::query!( + anyhow::ensure!( + miniblock_data.len() == transactions_by_miniblock.len(), + "Not enough miniblock data retrieved" + ); + + let prev_miniblock_hashes = sqlx::query!( r#" SELECT + number, hash FROM miniblocks @@ -1260,31 +1262,57 @@ impl TransactionsDal<'_, '_> { .fetch_all(self.storage.conn()) .await?; - assert_eq!( - miniblock_data.len(), - transactions_by_miniblock.len(), - "Not enough miniblock data retrieved" - ); - assert_eq!( - prev_hashes.len(), - transactions_by_miniblock.len(), - "Not enough previous hashes retrieved" - ); - - Ok(transactions_by_miniblock + let prev_miniblock_hashes: HashMap<_, _> = prev_miniblock_hashes .into_iter() - .zip(miniblock_data) - .zip(prev_hashes) - .map( - |(((number, txs), miniblock_data_row), prev_hash_row)| MiniblockExecutionData { - number, - timestamp: miniblock_data_row.timestamp as u64, - prev_block_hash: H256::from_slice(&prev_hash_row.hash), - virtual_blocks: miniblock_data_row.virtual_blocks as u32, - txs, - }, - ) - .collect()) + .map(|row| { + ( + MiniblockNumber(row.number as u32), + H256::from_slice(&row.hash), + ) + }) + .collect(); + + let mut data = Vec::with_capacity(transactions_by_miniblock.len()); + let it = transactions_by_miniblock.into_iter().zip(miniblock_data); + for ((number, txs), miniblock_row) in it { + let prev_miniblock_number = number - 1; + let prev_block_hash = match prev_miniblock_hashes.get(&prev_miniblock_number) { + Some(hash) => *hash, + None => { + // Can occur after snapshot recovery; the first previous miniblock may not be present + // in the storage. + let row = sqlx::query!( + r#" + SELECT + miniblock_hash + FROM + snapshot_recovery + WHERE + miniblock_number = $1 + "#, + prev_miniblock_number.0 as i32 + ) + .fetch_optional(self.storage.conn()) + .await? + .with_context(|| { + format!( + "miniblock #{prev_miniblock_number} is not in storage, and its hash is not \ + in snapshot recovery data" + ) + })?; + H256::from_slice(&row.miniblock_hash) + } + }; + + data.push(MiniblockExecutionData { + number, + timestamp: miniblock_row.timestamp as u64, + prev_block_hash, + virtual_blocks: miniblock_row.virtual_blocks as u32, + txs, + }); + } + Ok(data) } pub async fn get_tx_locations(&mut self, l1_batch_number: L1BatchNumber) -> TxLocations { From f5d6db966575aa8c31e23e8e51bfc56628cd543e Mon Sep 17 00:00:00 2001 From: Alex Ostrovski Date: Thu, 18 Jan 2024 14:22:02 +0200 Subject: [PATCH 08/27] Move `common` tests to separate file --- .../zksync_core/src/state_keeper/io/common.rs | 849 ------------------ .../src/state_keeper/io/common/mod.rs | 405 +++++++++ .../src/state_keeper/io/common/tests.rs | 445 +++++++++ 3 files changed, 850 insertions(+), 849 deletions(-) delete mode 100644 core/lib/zksync_core/src/state_keeper/io/common.rs create mode 100644 core/lib/zksync_core/src/state_keeper/io/common/mod.rs create mode 100644 core/lib/zksync_core/src/state_keeper/io/common/tests.rs diff --git a/core/lib/zksync_core/src/state_keeper/io/common.rs b/core/lib/zksync_core/src/state_keeper/io/common.rs deleted file mode 100644 index 3ad30c12b34f..000000000000 --- a/core/lib/zksync_core/src/state_keeper/io/common.rs +++ /dev/null @@ -1,849 +0,0 @@ -use std::time::{Duration, Instant}; - -use anyhow::Context; -use multivm::{ - interface::{L1BatchEnv, L2BlockEnv, SystemEnv, TxExecutionMode}, - vm_latest::constants::BLOCK_GAS_LIMIT, -}; -use zksync_contracts::BaseSystemContracts; -use zksync_dal::StorageProcessor; -use zksync_types::{ - block::MiniblockHeader, fee_model::BatchFeeInput, snapshots::SnapshotRecoveryStatus, Address, - L1BatchNumber, L2ChainId, MiniblockNumber, ProtocolVersionId, H256, ZKPORTER_IS_AVAILABLE, -}; - -use super::PendingBatchData; -use crate::state_keeper::extractors; - -/// Returns the parameters required to initialize the VM for the next L1 batch. -#[allow(clippy::too_many_arguments)] -pub(crate) fn l1_batch_params( - current_l1_batch_number: L1BatchNumber, - fee_account: Address, - l1_batch_timestamp: u64, - previous_batch_hash: H256, - fee_input: BatchFeeInput, - first_miniblock_number: MiniblockNumber, - prev_miniblock_hash: H256, - base_system_contracts: BaseSystemContracts, - validation_computational_gas_limit: u32, - protocol_version: ProtocolVersionId, - virtual_blocks: u32, - chain_id: L2ChainId, -) -> (SystemEnv, L1BatchEnv) { - ( - SystemEnv { - zk_porter_available: ZKPORTER_IS_AVAILABLE, - version: protocol_version, - base_system_smart_contracts: base_system_contracts, - gas_limit: BLOCK_GAS_LIMIT, - execution_mode: TxExecutionMode::VerifyExecute, - default_validation_computational_gas_limit: validation_computational_gas_limit, - chain_id, - }, - L1BatchEnv { - previous_batch_hash: Some(previous_batch_hash), - number: current_l1_batch_number, - timestamp: l1_batch_timestamp, - fee_input, - fee_account, - enforced_base_fee: None, - first_l2_block: L2BlockEnv { - number: first_miniblock_number.0, - timestamp: l1_batch_timestamp, - prev_block_hash: prev_miniblock_hash, - max_virtual_blocks_to_create: virtual_blocks, - }, - }, - ) -} - -/// Returns the amount of iterations `delay_interval` fits into `max_wait`, rounding up. -pub(crate) fn poll_iters(delay_interval: Duration, max_wait: Duration) -> usize { - let max_wait_millis = max_wait.as_millis() as u64; - let delay_interval_millis = delay_interval.as_millis() as u64; - assert!(delay_interval_millis > 0, "delay interval must be positive"); - - ((max_wait_millis + delay_interval_millis - 1) / delay_interval_millis).max(1) as usize -} - -/// Cursor of the miniblock / L1 batch progress used by [`StateKeeperIO`](super::StateKeeperIO) implementations. -#[derive(Debug)] -pub(crate) struct IoCursor { - pub next_miniblock: MiniblockNumber, - pub prev_miniblock_hash: H256, - pub prev_miniblock_timestamp: u64, - pub l1_batch: L1BatchNumber, -} - -impl IoCursor { - /// Loads the cursor from Postgres. - pub async fn new(storage: &mut StorageProcessor<'_>) -> anyhow::Result { - let last_sealed_l1_batch_number = storage - .blocks_dal() - .get_sealed_l1_batch_number() - .await - .context("Failed getting sealed L1 batch number")?; - let last_miniblock_header = storage - .blocks_dal() - .get_last_sealed_miniblock_header() - .await - .context("Failed getting sealed miniblock header")?; - - if let (Some(l1_batch_number), Some(miniblock_header)) = - (last_sealed_l1_batch_number, &last_miniblock_header) - { - Ok(Self { - next_miniblock: miniblock_header.number + 1, - prev_miniblock_hash: miniblock_header.hash, - prev_miniblock_timestamp: miniblock_header.timestamp, - l1_batch: l1_batch_number + 1, - }) - } else { - let snapshot_recovery = storage - .snapshot_recovery_dal() - .get_applied_snapshot_status() - .await - .context("Failed getting snapshot recovery info")? - .context("Postgres contains neither blocks nor snapshot recovery info")?; - let l1_batch = - last_sealed_l1_batch_number.unwrap_or(snapshot_recovery.l1_batch_number) + 1; - - let (next_miniblock, prev_miniblock_hash, prev_miniblock_timestamp); - if let Some(miniblock_header) = &last_miniblock_header { - next_miniblock = miniblock_header.number + 1; - prev_miniblock_hash = miniblock_header.hash; - prev_miniblock_timestamp = miniblock_header.timestamp; - } else { - next_miniblock = snapshot_recovery.miniblock_number + 1; - prev_miniblock_hash = snapshot_recovery.miniblock_hash; - prev_miniblock_timestamp = snapshot_recovery.miniblock_timestamp; - } - - Ok(Self { - next_miniblock, - prev_miniblock_hash, - prev_miniblock_timestamp, - l1_batch, - }) - } - } -} - -/// Typesafe wrapper around [`MiniblockHeader`] returned by [`L1BatchParamsProvider`]. -#[derive(Debug)] -pub(crate) struct FirstMiniblockInBatch { - header: MiniblockHeader, - l1_batch_number: L1BatchNumber, -} - -impl FirstMiniblockInBatch { - pub fn number(&self) -> MiniblockNumber { - self.header.number - } - - pub fn has_protocol_version(&self) -> bool { - self.header.protocol_version.is_some() - } - - pub fn set_protocol_version(&mut self, version: ProtocolVersionId) { - assert!( - self.header.protocol_version.is_none(), - "Cannot redefine protocol version" - ); - self.header.protocol_version = Some(version); - } -} - -/// Provider of L1 batch parameters for state keeper I/O implementations. The provider is stateless; i.e., it doesn't -/// enforce a particular order of method calls. -#[derive(Debug)] -pub(crate) struct L1BatchParamsProvider { - snapshot: Option, -} - -impl L1BatchParamsProvider { - pub async fn new(storage: &mut StorageProcessor<'_>) -> anyhow::Result { - let snapshot = storage - .snapshot_recovery_dal() - .get_applied_snapshot_status() - .await?; - Ok(Self { snapshot }) - } - - /// Returns state root hash and timestamp of an L1 batch with the specified number waiting for the hash to be computed - /// if necessary. - pub async fn wait_for_l1_batch_params( - &self, - storage: &mut StorageProcessor<'_>, - number: L1BatchNumber, - ) -> anyhow::Result<(H256, u64)> { - let first_l1_batch = if let Some(snapshot) = &self.snapshot { - // Special case: if we've recovered from a snapshot, we allow to wait for the snapshot L1 batch. - if number == snapshot.l1_batch_number { - return Ok((snapshot.l1_batch_root_hash, snapshot.l1_batch_timestamp)); - } - snapshot.l1_batch_number + 1 - } else { - L1BatchNumber(0) - }; - - anyhow::ensure!( - number >= first_l1_batch, - "Cannot wait a hash of a pruned L1 batch #{number} (first retained batch: {first_l1_batch})" - ); - Self::wait_for_l1_batch_params_unchecked(storage, number).await - } - - async fn wait_for_l1_batch_params_unchecked( - storage: &mut StorageProcessor<'_>, - number: L1BatchNumber, - ) -> anyhow::Result<(H256, u64)> { - // If the state root is not known yet, this duration will be used to back off in the while loops - const SAFE_STATE_ROOT_INTERVAL: Duration = Duration::from_millis(100); - - let stage_started_at: Instant = Instant::now(); - loop { - let data = storage - .blocks_dal() - .get_l1_batch_state_root_and_timestamp(number) - .await?; - if let Some((root_hash, timestamp)) = data { - tracing::trace!( - "Waiting for hash of L1 batch #{number} took {:?}", - stage_started_at.elapsed() - ); - return Ok((root_hash, timestamp)); - } - - tokio::time::sleep(SAFE_STATE_ROOT_INTERVAL).await; - } - } - - /// Returns a header of the first miniblock in the specified L1 batch regardless of whether the batch is sealed or not. - pub(crate) async fn load_first_miniblock_in_batch( - &self, - storage: &mut StorageProcessor<'_>, - l1_batch_number: L1BatchNumber, - ) -> anyhow::Result> { - let miniblock_number = self - .load_number_of_first_miniblock_in_batch(storage, l1_batch_number) - .await - .context("failed getting first miniblock number")?; - Ok(match miniblock_number { - Some(number) => storage - .blocks_dal() - .get_miniblock_header(number) - .await - .context("failed getting miniblock header")? - .map(|header| FirstMiniblockInBatch { - header, - l1_batch_number, - }), - None => None, - }) - } - - async fn load_number_of_first_miniblock_in_batch( - &self, - storage: &mut StorageProcessor<'_>, - l1_batch_number: L1BatchNumber, - ) -> anyhow::Result> { - if l1_batch_number == L1BatchNumber(0) { - return Ok(Some(MiniblockNumber(0))); - } - - if let Some(snapshot) = &self.snapshot { - anyhow::ensure!( - l1_batch_number > snapshot.l1_batch_number, - "Cannot load miniblocks for pruned L1 batch #{l1_batch_number} (first retained batch: {})", - snapshot.l1_batch_number + 1 - ); - if l1_batch_number == snapshot.l1_batch_number + 1 { - return Ok(Some(snapshot.miniblock_number + 1)); - } - } - - let prev_l1_batch = l1_batch_number - 1; - // At this point, we have ensured that `prev_l1_batch` is not pruned. - let Some((_, last_miniblock_in_prev_l1_batch)) = storage - .blocks_dal() - .get_miniblock_range_of_l1_batch(prev_l1_batch) - .await - .with_context(|| { - format!("failed getting miniblock range for L1 batch #{prev_l1_batch}") - })? - else { - return Ok(None); - }; - Ok(Some(last_miniblock_in_prev_l1_batch + 1)) - } - - /// Loads VM-related L1 batch parameters for the specified batch. - pub(crate) async fn load_l1_batch_params( - &self, - storage: &mut StorageProcessor<'_>, - first_miniblock_in_batch: &FirstMiniblockInBatch, - fee_account: Address, - validation_computational_gas_limit: u32, - chain_id: L2ChainId, - ) -> anyhow::Result<(SystemEnv, L1BatchEnv)> { - anyhow::ensure!( - first_miniblock_in_batch.l1_batch_number > L1BatchNumber(0), - "Loading params for genesis L1 batch not supported" - ); - // L1 batch timestamp is set to the timestamp of its first miniblock. - let l1_batch_timestamp = first_miniblock_in_batch.header.timestamp; - - let prev_l1_batch_number = first_miniblock_in_batch.l1_batch_number - 1; - tracing::info!("Getting previous L1 batch hash for batch #{prev_l1_batch_number}"); - let (prev_l1_batch_hash, prev_l1_batch_timestamp) = self - .wait_for_l1_batch_params(storage, prev_l1_batch_number) - .await - .context("failed getting hash for previous L1 batch")?; - tracing::info!("Got state root hash for previous L1 batch #{prev_l1_batch_number}: {prev_l1_batch_hash:?}"); - - anyhow::ensure!( - prev_l1_batch_timestamp < l1_batch_timestamp, - "Invalid params for L1 batch #{}: Timestamp of previous L1 batch ({}) >= provisional L1 batch timestamp ({}), \ - meaning that L1 batch will be rejected by the bootloader", - first_miniblock_in_batch.l1_batch_number, - extractors::display_timestamp(prev_l1_batch_timestamp), - extractors::display_timestamp(l1_batch_timestamp) - ); - - let prev_miniblock_number = first_miniblock_in_batch.header.number - 1; - tracing::info!("Getting previous miniblock hash for miniblock #{prev_miniblock_number}"); - - let prev_miniblock_hash = self.snapshot.as_ref().and_then(|snapshot| { - (snapshot.miniblock_number == prev_miniblock_number).then_some(snapshot.miniblock_hash) - }); - let prev_miniblock_hash = match prev_miniblock_hash { - Some(hash) => hash, - None => storage - .blocks_web3_dal() - .get_miniblock_hash(prev_miniblock_number) - .await - .context("failed getting hash for previous miniblock")? - .context("previous miniblock disappeared from storage")?, - }; - tracing::info!( - "Got hash for previous miniblock #{prev_miniblock_number}: {prev_miniblock_hash:?}" - ); - - let contract_hashes = first_miniblock_in_batch.header.base_system_contracts_hashes; - let base_system_contracts = storage - .storage_dal() - .get_base_system_contracts(contract_hashes.bootloader, contract_hashes.default_aa) - .await; - - Ok(l1_batch_params( - first_miniblock_in_batch.l1_batch_number, - fee_account, - l1_batch_timestamp, - prev_l1_batch_hash, - first_miniblock_in_batch.header.batch_fee_input, - first_miniblock_in_batch.header.number, - prev_miniblock_hash, - base_system_contracts, - validation_computational_gas_limit, - first_miniblock_in_batch - .header - .protocol_version - .context("`protocol_version` must be set for miniblock")?, - first_miniblock_in_batch.header.virtual_blocks, - chain_id, - )) - } - - /// Loads the pending L1 batch data from the database. - /// - /// # Errors - /// - /// Propagates DB errors. Also returns an error if `first_miniblock_in_batch` doesn't correspond to a pending L1 batch. - pub(crate) async fn load_pending_batch( - &self, - storage: &mut StorageProcessor<'_>, - first_miniblock_in_batch: &FirstMiniblockInBatch, - fee_account: Address, - validation_computational_gas_limit: u32, - chain_id: L2ChainId, - ) -> anyhow::Result { - let (system_env, l1_batch_env) = self - .load_l1_batch_params( - storage, - first_miniblock_in_batch, - fee_account, - validation_computational_gas_limit, - chain_id, - ) - .await - .context("failed loading L1 batch params")?; - - let pending_miniblocks = storage - .transactions_dal() - .get_miniblocks_to_reexecute() - .await - .context("failed loading miniblocks for re-execution")?; - let first_pending_miniblock = pending_miniblocks - .first() - .context("no pending miniblocks; was `first_miniblock_in_batch` loaded for a correct L1 batch number?")?; - anyhow::ensure!( - first_pending_miniblock.number == first_miniblock_in_batch.header.number, - "Invalid `first_miniblock_in_batch` supplied: its L1 batch #{} is not pending", - first_miniblock_in_batch.l1_batch_number - ); - Ok(PendingBatchData { - l1_batch_env, - system_env, - pending_miniblocks, - }) - } -} - -#[cfg(test)] -mod tests { - use std::{collections::HashMap, ops}; - - use futures::FutureExt; - use zksync_contracts::BaseSystemContractsHashes; - use zksync_dal::ConnectionPool; - use zksync_types::{ - block::{BlockGasCount, MiniblockHasher}, - fee::TransactionExecutionMetrics, - }; - - use super::*; - use crate::{ - genesis::{ensure_genesis_state, GenesisParams}, - utils::testonly::{ - create_l1_batch, create_l2_transaction, create_miniblock, execute_l2_transaction, - prepare_empty_recovery_snapshot, - }, - }; - - #[test] - #[rustfmt::skip] // One-line formatting looks better here. - fn test_poll_iters() { - assert_eq!(poll_iters(Duration::from_millis(100), Duration::from_millis(0)), 1); - assert_eq!(poll_iters(Duration::from_millis(100), Duration::from_millis(100)), 1); - assert_eq!(poll_iters(Duration::from_millis(100), Duration::from_millis(101)), 2); - assert_eq!(poll_iters(Duration::from_millis(100), Duration::from_millis(200)), 2); - assert_eq!(poll_iters(Duration::from_millis(100), Duration::from_millis(201)), 3); - } - - #[tokio::test] - async fn creating_io_cursor_with_genesis() { - let pool = ConnectionPool::test_pool().await; - let mut storage = pool.access_storage().await.unwrap(); - ensure_genesis_state(&mut storage, L2ChainId::default(), &GenesisParams::mock()) - .await - .unwrap(); - - let cursor = IoCursor::new(&mut storage).await.unwrap(); - assert_eq!(cursor.l1_batch, L1BatchNumber(1)); - assert_eq!(cursor.next_miniblock, MiniblockNumber(1)); - assert_eq!(cursor.prev_miniblock_timestamp, 0); - assert_eq!( - cursor.prev_miniblock_hash, - MiniblockHasher::legacy_hash(MiniblockNumber(0)) - ); - - let miniblock = create_miniblock(1); - storage - .blocks_dal() - .insert_miniblock(&miniblock) - .await - .unwrap(); - - let cursor = IoCursor::new(&mut storage).await.unwrap(); - assert_eq!(cursor.l1_batch, L1BatchNumber(1)); - assert_eq!(cursor.next_miniblock, MiniblockNumber(2)); - assert_eq!(cursor.prev_miniblock_timestamp, miniblock.timestamp); - assert_eq!(cursor.prev_miniblock_hash, miniblock.hash); - } - - #[tokio::test] - async fn creating_io_cursor_with_snapshot_recovery() { - let pool = ConnectionPool::test_pool().await; - let mut storage = pool.access_storage().await.unwrap(); - let snapshot_recovery = prepare_empty_recovery_snapshot(&mut storage, 23).await; - - let cursor = IoCursor::new(&mut storage).await.unwrap(); - assert_eq!(cursor.l1_batch, L1BatchNumber(24)); - assert_eq!( - cursor.next_miniblock, - snapshot_recovery.miniblock_number + 1 - ); - assert_eq!( - cursor.prev_miniblock_timestamp, - snapshot_recovery.miniblock_timestamp - ); - assert_eq!(cursor.prev_miniblock_hash, snapshot_recovery.miniblock_hash); - - // Add a miniblock so that we have miniblocks (but not an L1 batch) in the storage. - let miniblock = create_miniblock(snapshot_recovery.miniblock_number.0 + 1); - storage - .blocks_dal() - .insert_miniblock(&miniblock) - .await - .unwrap(); - - let cursor = IoCursor::new(&mut storage).await.unwrap(); - assert_eq!(cursor.l1_batch, L1BatchNumber(24)); - assert_eq!(cursor.next_miniblock, miniblock.number + 1); - assert_eq!(cursor.prev_miniblock_timestamp, miniblock.timestamp); - assert_eq!(cursor.prev_miniblock_hash, miniblock.hash); - } - - #[tokio::test] - async fn waiting_for_l1_batch_params_with_genesis() { - let pool = ConnectionPool::test_pool().await; - let mut storage = pool.access_storage().await.unwrap(); - let genesis_root_hash = - ensure_genesis_state(&mut storage, L2ChainId::default(), &GenesisParams::mock()) - .await - .unwrap(); - - let provider = L1BatchParamsProvider::new(&mut storage).await.unwrap(); - assert!(provider.snapshot.is_none()); - let (hash, timestamp) = provider - .wait_for_l1_batch_params(&mut storage, L1BatchNumber(0)) - .await - .unwrap(); - assert_eq!(hash, genesis_root_hash); - assert_eq!(timestamp, 0); - - let new_l1_batch = create_l1_batch(1); - storage - .blocks_dal() - .insert_l1_batch(&new_l1_batch, &[], BlockGasCount::default(), &[], &[], 0) - .await - .unwrap(); - - let wait_future = provider.wait_for_l1_batch_params(&mut storage, L1BatchNumber(1)); - futures::pin_mut!(wait_future); - tokio::task::yield_now().await; - assert!((&mut wait_future).now_or_never().is_none()); - - let expected_hash = H256::repeat_byte(1); - let mut storage = pool.access_storage().await.unwrap(); - storage - .blocks_dal() - .set_l1_batch_hash(L1BatchNumber(1), expected_hash) - .await - .unwrap(); - let (hash, timestamp) = wait_future.await.unwrap(); - assert_eq!(hash, expected_hash); - assert_eq!(timestamp, new_l1_batch.timestamp); - } - - #[tokio::test] - async fn waiting_for_l1_batch_params_after_snapshot_recovery() { - let pool = ConnectionPool::test_pool().await; - let mut storage = pool.access_storage().await.unwrap(); - let snapshot_recovery = prepare_empty_recovery_snapshot(&mut storage, 23).await; - - let provider = L1BatchParamsProvider::new(&mut storage).await.unwrap(); - assert!(provider.snapshot.is_some()); - let (hash, timestamp) = provider - .wait_for_l1_batch_params(&mut storage, snapshot_recovery.l1_batch_number) - .await - .unwrap(); - assert_eq!(hash, snapshot_recovery.l1_batch_root_hash); - assert_eq!(timestamp, snapshot_recovery.l1_batch_timestamp); - - for pruned_l1_batch in [0, 1, snapshot_recovery.l1_batch_number.0 - 1] { - assert!(provider - .wait_for_l1_batch_params(&mut storage, L1BatchNumber(pruned_l1_batch)) - .await - .is_err()); - } - - let new_l1_batch = create_l1_batch(snapshot_recovery.l1_batch_number.0 + 1); - storage - .blocks_dal() - .insert_l1_batch(&new_l1_batch, &[], BlockGasCount::default(), &[], &[], 0) - .await - .unwrap(); - - let wait_future = - provider.wait_for_l1_batch_params(&mut storage, snapshot_recovery.l1_batch_number + 1); - futures::pin_mut!(wait_future); - tokio::task::yield_now().await; - assert!((&mut wait_future).now_or_never().is_none()); - - let expected_hash = H256::repeat_byte(1); - let mut storage = pool.access_storage().await.unwrap(); - storage - .blocks_dal() - .set_l1_batch_hash(new_l1_batch.number, expected_hash) - .await - .unwrap(); - let (hash, timestamp) = wait_future.await.unwrap(); - assert_eq!(hash, expected_hash); - assert_eq!(timestamp, new_l1_batch.timestamp); - } - - #[tokio::test] - async fn getting_first_miniblock_in_batch_with_genesis() { - let pool = ConnectionPool::test_pool().await; - let mut storage = pool.access_storage().await.unwrap(); - ensure_genesis_state(&mut storage, L2ChainId::default(), &GenesisParams::mock()) - .await - .unwrap(); - - let provider = L1BatchParamsProvider::new(&mut storage).await.unwrap(); - let mut batches_and_miniblocks = HashMap::from([ - (L1BatchNumber(0), Ok(Some(MiniblockNumber(0)))), - (L1BatchNumber(1), Ok(Some(MiniblockNumber(1)))), - (L1BatchNumber(2), Ok(None)), - (L1BatchNumber(100), Ok(None)), - ]); - assert_first_miniblock_numbers(&provider, &mut storage, &batches_and_miniblocks).await; - - let new_miniblock = create_miniblock(1); - storage - .blocks_dal() - .insert_miniblock(&new_miniblock) - .await - .unwrap(); - let new_miniblock = create_miniblock(2); - storage - .blocks_dal() - .insert_miniblock(&new_miniblock) - .await - .unwrap(); - - assert_first_miniblock_numbers(&provider, &mut storage, &batches_and_miniblocks).await; - - let new_l1_batch = create_l1_batch(1); - storage - .blocks_dal() - .insert_l1_batch(&new_l1_batch, &[], BlockGasCount::default(), &[], &[], 0) - .await - .unwrap(); - storage - .blocks_dal() - .mark_miniblocks_as_executed_in_l1_batch(new_l1_batch.number) - .await - .unwrap(); - - batches_and_miniblocks.insert(L1BatchNumber(2), Ok(Some(MiniblockNumber(3)))); - assert_first_miniblock_numbers(&provider, &mut storage, &batches_and_miniblocks).await; - } - - async fn assert_first_miniblock_numbers( - provider: &L1BatchParamsProvider, - storage: &mut StorageProcessor<'_>, - batches_and_miniblocks: &HashMap, ()>>, - ) { - for (&batch, &expected_miniblock) in batches_and_miniblocks { - let number = provider - .load_number_of_first_miniblock_in_batch(storage, batch) - .await; - match expected_miniblock { - Ok(expected) => { - assert_eq!( - number.unwrap(), - expected, - "load_number_of_first_miniblock_in_batch({batch})" - ); - } - Err(()) => { - number.unwrap_err(); - } - } - } - } - - #[tokio::test] - async fn getting_first_miniblock_in_batch_after_snapshot_recovery() { - let pool = ConnectionPool::test_pool().await; - let mut storage = pool.access_storage().await.unwrap(); - let snapshot_recovery = prepare_empty_recovery_snapshot(&mut storage, 23).await; - - let provider = L1BatchParamsProvider::new(&mut storage).await.unwrap(); - let mut batches_and_miniblocks = HashMap::from([ - (L1BatchNumber(1), Err(())), - (snapshot_recovery.l1_batch_number, Err(())), - ( - snapshot_recovery.l1_batch_number + 1, - Ok(Some(snapshot_recovery.miniblock_number + 1)), - ), - (snapshot_recovery.l1_batch_number + 2, Ok(None)), - (L1BatchNumber(100), Ok(None)), - ]); - assert_first_miniblock_numbers(&provider, &mut storage, &batches_and_miniblocks).await; - - let new_miniblock = create_miniblock(snapshot_recovery.miniblock_number.0 + 1); - storage - .blocks_dal() - .insert_miniblock(&new_miniblock) - .await - .unwrap(); - - assert_first_miniblock_numbers(&provider, &mut storage, &batches_and_miniblocks).await; - - let new_l1_batch = create_l1_batch(snapshot_recovery.l1_batch_number.0 + 1); - storage - .blocks_dal() - .insert_l1_batch(&new_l1_batch, &[], BlockGasCount::default(), &[], &[], 0) - .await - .unwrap(); - storage - .blocks_dal() - .mark_miniblocks_as_executed_in_l1_batch(new_l1_batch.number) - .await - .unwrap(); - - batches_and_miniblocks.insert( - snapshot_recovery.l1_batch_number + 2, - Ok(Some(new_miniblock.number + 1)), - ); - assert_first_miniblock_numbers(&provider, &mut storage, &batches_and_miniblocks).await; - } - - #[tokio::test] - async fn loading_pending_batch_with_genesis() { - let pool = ConnectionPool::test_pool().await; - let mut storage = pool.access_storage().await.unwrap(); - let genesis_params = GenesisParams::mock(); - ensure_genesis_state(&mut storage, L2ChainId::default(), &genesis_params) - .await - .unwrap(); - store_pending_miniblocks( - &mut storage, - 1..=2, - genesis_params.base_system_contracts.hashes(), - ) - .await; - - let provider = L1BatchParamsProvider::new(&mut storage).await.unwrap(); - let first_miniblock_in_batch = provider - .load_first_miniblock_in_batch(&mut storage, L1BatchNumber(1)) - .await - .unwrap() - .expect("no first miniblock"); - assert_eq!(first_miniblock_in_batch.number(), MiniblockNumber(1)); - - let pending_batch = provider - .load_pending_batch( - &mut storage, - &first_miniblock_in_batch, - Address::zero(), - u32::MAX, - L2ChainId::default(), - ) - .await - .unwrap(); - - assert_eq!(pending_batch.pending_miniblocks.len(), 2); - assert_eq!(pending_batch.l1_batch_env.number, L1BatchNumber(1)); - assert_eq!(pending_batch.l1_batch_env.timestamp, 1); - assert_eq!(pending_batch.l1_batch_env.first_l2_block.number, 1); - assert_eq!(pending_batch.l1_batch_env.first_l2_block.timestamp, 1); - assert_eq!( - pending_batch.l1_batch_env.first_l2_block.prev_block_hash, - MiniblockHasher::legacy_hash(MiniblockNumber(0)) - ); - } - - async fn store_pending_miniblocks( - storage: &mut StorageProcessor<'_>, - numbers: ops::RangeInclusive, - contract_hashes: BaseSystemContractsHashes, - ) { - for miniblock_number in numbers { - let tx = create_l2_transaction(10, 100); - storage - .transactions_dal() - .insert_transaction_l2(tx.clone(), TransactionExecutionMetrics::default()) - .await; - let mut new_miniblock = create_miniblock(miniblock_number); - new_miniblock.base_system_contracts_hashes = contract_hashes; - storage - .blocks_dal() - .insert_miniblock(&new_miniblock) - .await - .unwrap(); - let tx_result = execute_l2_transaction(tx); - storage - .transactions_dal() - .mark_txs_as_executed_in_miniblock(new_miniblock.number, &[tx_result], 1.into()) - .await; - } - } - - #[tokio::test] - async fn loading_pending_batch_after_snapshot_recovery() { - let pool = ConnectionPool::test_pool().await; - let mut storage = pool.access_storage().await.unwrap(); - let snapshot_recovery = prepare_empty_recovery_snapshot(&mut storage, 23).await; - let contracts = GenesisParams::mock().base_system_contracts; - let factory_deps = HashMap::from([ - ( - contracts.bootloader.hash, - zksync_utils::be_words_to_bytes(&contracts.bootloader.code), - ), - ( - contracts.default_aa.hash, - zksync_utils::be_words_to_bytes(&contracts.default_aa.code), - ), - ]); - - let starting_miniblock_number = snapshot_recovery.miniblock_number.0 + 1; - store_pending_miniblocks( - &mut storage, - starting_miniblock_number..=starting_miniblock_number + 1, - contracts.hashes(), - ) - .await; - storage - .storage_dal() - .insert_factory_deps(snapshot_recovery.miniblock_number + 1, &factory_deps) - .await; - - let provider = L1BatchParamsProvider::new(&mut storage).await.unwrap(); - let first_miniblock_in_batch = provider - .load_first_miniblock_in_batch(&mut storage, snapshot_recovery.l1_batch_number + 1) - .await - .unwrap() - .expect("no first miniblock"); - assert_eq!( - first_miniblock_in_batch.number(), - snapshot_recovery.miniblock_number + 1 - ); - - let pending_batch = provider - .load_pending_batch( - &mut storage, - &first_miniblock_in_batch, - Address::zero(), - u32::MAX, - L2ChainId::default(), - ) - .await - .unwrap(); - - let expected_timestamp = u64::from(snapshot_recovery.miniblock_number.0) + 1; - assert_eq!(pending_batch.pending_miniblocks.len(), 2); - assert_eq!( - pending_batch.l1_batch_env.number, - snapshot_recovery.l1_batch_number + 1 - ); - assert_eq!(pending_batch.l1_batch_env.timestamp, expected_timestamp); - assert_eq!( - pending_batch.l1_batch_env.first_l2_block.number, - snapshot_recovery.miniblock_number.0 + 1 - ); - assert_eq!( - pending_batch.l1_batch_env.first_l2_block.timestamp, - expected_timestamp - ); - assert_eq!( - pending_batch.l1_batch_env.first_l2_block.prev_block_hash, - snapshot_recovery.miniblock_hash - ); - } -} diff --git a/core/lib/zksync_core/src/state_keeper/io/common/mod.rs b/core/lib/zksync_core/src/state_keeper/io/common/mod.rs new file mode 100644 index 000000000000..6985c3aa3fe9 --- /dev/null +++ b/core/lib/zksync_core/src/state_keeper/io/common/mod.rs @@ -0,0 +1,405 @@ +use std::time::{Duration, Instant}; + +use anyhow::Context; +use multivm::{ + interface::{L1BatchEnv, L2BlockEnv, SystemEnv, TxExecutionMode}, + vm_latest::constants::BLOCK_GAS_LIMIT, +}; +use zksync_contracts::BaseSystemContracts; +use zksync_dal::StorageProcessor; +use zksync_types::{ + block::MiniblockHeader, fee_model::BatchFeeInput, snapshots::SnapshotRecoveryStatus, Address, + L1BatchNumber, L2ChainId, MiniblockNumber, ProtocolVersionId, H256, ZKPORTER_IS_AVAILABLE, +}; + +use super::PendingBatchData; +use crate::state_keeper::extractors; + +#[cfg(test)] +mod tests; + +/// Returns the parameters required to initialize the VM for the next L1 batch. +#[allow(clippy::too_many_arguments)] +pub(crate) fn l1_batch_params( + current_l1_batch_number: L1BatchNumber, + fee_account: Address, + l1_batch_timestamp: u64, + previous_batch_hash: H256, + fee_input: BatchFeeInput, + first_miniblock_number: MiniblockNumber, + prev_miniblock_hash: H256, + base_system_contracts: BaseSystemContracts, + validation_computational_gas_limit: u32, + protocol_version: ProtocolVersionId, + virtual_blocks: u32, + chain_id: L2ChainId, +) -> (SystemEnv, L1BatchEnv) { + ( + SystemEnv { + zk_porter_available: ZKPORTER_IS_AVAILABLE, + version: protocol_version, + base_system_smart_contracts: base_system_contracts, + gas_limit: BLOCK_GAS_LIMIT, + execution_mode: TxExecutionMode::VerifyExecute, + default_validation_computational_gas_limit: validation_computational_gas_limit, + chain_id, + }, + L1BatchEnv { + previous_batch_hash: Some(previous_batch_hash), + number: current_l1_batch_number, + timestamp: l1_batch_timestamp, + fee_input, + fee_account, + enforced_base_fee: None, + first_l2_block: L2BlockEnv { + number: first_miniblock_number.0, + timestamp: l1_batch_timestamp, + prev_block_hash: prev_miniblock_hash, + max_virtual_blocks_to_create: virtual_blocks, + }, + }, + ) +} + +/// Returns the amount of iterations `delay_interval` fits into `max_wait`, rounding up. +pub(crate) fn poll_iters(delay_interval: Duration, max_wait: Duration) -> usize { + let max_wait_millis = max_wait.as_millis() as u64; + let delay_interval_millis = delay_interval.as_millis() as u64; + assert!(delay_interval_millis > 0, "delay interval must be positive"); + + ((max_wait_millis + delay_interval_millis - 1) / delay_interval_millis).max(1) as usize +} + +/// Cursor of the miniblock / L1 batch progress used by [`StateKeeperIO`](super::StateKeeperIO) implementations. +#[derive(Debug)] +pub(crate) struct IoCursor { + pub next_miniblock: MiniblockNumber, + pub prev_miniblock_hash: H256, + pub prev_miniblock_timestamp: u64, + pub l1_batch: L1BatchNumber, +} + +impl IoCursor { + /// Loads the cursor from Postgres. + pub async fn new(storage: &mut StorageProcessor<'_>) -> anyhow::Result { + let last_sealed_l1_batch_number = storage + .blocks_dal() + .get_sealed_l1_batch_number() + .await + .context("Failed getting sealed L1 batch number")?; + let last_miniblock_header = storage + .blocks_dal() + .get_last_sealed_miniblock_header() + .await + .context("Failed getting sealed miniblock header")?; + + if let (Some(l1_batch_number), Some(miniblock_header)) = + (last_sealed_l1_batch_number, &last_miniblock_header) + { + Ok(Self { + next_miniblock: miniblock_header.number + 1, + prev_miniblock_hash: miniblock_header.hash, + prev_miniblock_timestamp: miniblock_header.timestamp, + l1_batch: l1_batch_number + 1, + }) + } else { + let snapshot_recovery = storage + .snapshot_recovery_dal() + .get_applied_snapshot_status() + .await + .context("Failed getting snapshot recovery info")? + .context("Postgres contains neither blocks nor snapshot recovery info")?; + let l1_batch = + last_sealed_l1_batch_number.unwrap_or(snapshot_recovery.l1_batch_number) + 1; + + let (next_miniblock, prev_miniblock_hash, prev_miniblock_timestamp); + if let Some(miniblock_header) = &last_miniblock_header { + next_miniblock = miniblock_header.number + 1; + prev_miniblock_hash = miniblock_header.hash; + prev_miniblock_timestamp = miniblock_header.timestamp; + } else { + next_miniblock = snapshot_recovery.miniblock_number + 1; + prev_miniblock_hash = snapshot_recovery.miniblock_hash; + prev_miniblock_timestamp = snapshot_recovery.miniblock_timestamp; + } + + Ok(Self { + next_miniblock, + prev_miniblock_hash, + prev_miniblock_timestamp, + l1_batch, + }) + } + } +} + +/// Typesafe wrapper around [`MiniblockHeader`] returned by [`L1BatchParamsProvider`]. +#[derive(Debug)] +pub(crate) struct FirstMiniblockInBatch { + header: MiniblockHeader, + l1_batch_number: L1BatchNumber, +} + +impl FirstMiniblockInBatch { + pub fn number(&self) -> MiniblockNumber { + self.header.number + } + + pub fn has_protocol_version(&self) -> bool { + self.header.protocol_version.is_some() + } + + pub fn set_protocol_version(&mut self, version: ProtocolVersionId) { + assert!( + self.header.protocol_version.is_none(), + "Cannot redefine protocol version" + ); + self.header.protocol_version = Some(version); + } +} + +/// Provider of L1 batch parameters for state keeper I/O implementations. The provider is stateless; i.e., it doesn't +/// enforce a particular order of method calls. +#[derive(Debug)] +pub(crate) struct L1BatchParamsProvider { + snapshot: Option, +} + +impl L1BatchParamsProvider { + pub async fn new(storage: &mut StorageProcessor<'_>) -> anyhow::Result { + let snapshot = storage + .snapshot_recovery_dal() + .get_applied_snapshot_status() + .await?; + Ok(Self { snapshot }) + } + + /// Returns state root hash and timestamp of an L1 batch with the specified number waiting for the hash to be computed + /// if necessary. + pub async fn wait_for_l1_batch_params( + &self, + storage: &mut StorageProcessor<'_>, + number: L1BatchNumber, + ) -> anyhow::Result<(H256, u64)> { + let first_l1_batch = if let Some(snapshot) = &self.snapshot { + // Special case: if we've recovered from a snapshot, we allow to wait for the snapshot L1 batch. + if number == snapshot.l1_batch_number { + return Ok((snapshot.l1_batch_root_hash, snapshot.l1_batch_timestamp)); + } + snapshot.l1_batch_number + 1 + } else { + L1BatchNumber(0) + }; + + anyhow::ensure!( + number >= first_l1_batch, + "Cannot wait a hash of a pruned L1 batch #{number} (first retained batch: {first_l1_batch})" + ); + Self::wait_for_l1_batch_params_unchecked(storage, number).await + } + + async fn wait_for_l1_batch_params_unchecked( + storage: &mut StorageProcessor<'_>, + number: L1BatchNumber, + ) -> anyhow::Result<(H256, u64)> { + // If the state root is not known yet, this duration will be used to back off in the while loops + const SAFE_STATE_ROOT_INTERVAL: Duration = Duration::from_millis(100); + + let stage_started_at: Instant = Instant::now(); + loop { + let data = storage + .blocks_dal() + .get_l1_batch_state_root_and_timestamp(number) + .await?; + if let Some((root_hash, timestamp)) = data { + tracing::trace!( + "Waiting for hash of L1 batch #{number} took {:?}", + stage_started_at.elapsed() + ); + return Ok((root_hash, timestamp)); + } + + tokio::time::sleep(SAFE_STATE_ROOT_INTERVAL).await; + } + } + + /// Returns a header of the first miniblock in the specified L1 batch regardless of whether the batch is sealed or not. + pub(crate) async fn load_first_miniblock_in_batch( + &self, + storage: &mut StorageProcessor<'_>, + l1_batch_number: L1BatchNumber, + ) -> anyhow::Result> { + let miniblock_number = self + .load_number_of_first_miniblock_in_batch(storage, l1_batch_number) + .await + .context("failed getting first miniblock number")?; + Ok(match miniblock_number { + Some(number) => storage + .blocks_dal() + .get_miniblock_header(number) + .await + .context("failed getting miniblock header")? + .map(|header| FirstMiniblockInBatch { + header, + l1_batch_number, + }), + None => None, + }) + } + + async fn load_number_of_first_miniblock_in_batch( + &self, + storage: &mut StorageProcessor<'_>, + l1_batch_number: L1BatchNumber, + ) -> anyhow::Result> { + if l1_batch_number == L1BatchNumber(0) { + return Ok(Some(MiniblockNumber(0))); + } + + if let Some(snapshot) = &self.snapshot { + anyhow::ensure!( + l1_batch_number > snapshot.l1_batch_number, + "Cannot load miniblocks for pruned L1 batch #{l1_batch_number} (first retained batch: {})", + snapshot.l1_batch_number + 1 + ); + if l1_batch_number == snapshot.l1_batch_number + 1 { + return Ok(Some(snapshot.miniblock_number + 1)); + } + } + + let prev_l1_batch = l1_batch_number - 1; + // At this point, we have ensured that `prev_l1_batch` is not pruned. + let Some((_, last_miniblock_in_prev_l1_batch)) = storage + .blocks_dal() + .get_miniblock_range_of_l1_batch(prev_l1_batch) + .await + .with_context(|| { + format!("failed getting miniblock range for L1 batch #{prev_l1_batch}") + })? + else { + return Ok(None); + }; + Ok(Some(last_miniblock_in_prev_l1_batch + 1)) + } + + /// Loads VM-related L1 batch parameters for the specified batch. + pub(crate) async fn load_l1_batch_params( + &self, + storage: &mut StorageProcessor<'_>, + first_miniblock_in_batch: &FirstMiniblockInBatch, + fee_account: Address, + validation_computational_gas_limit: u32, + chain_id: L2ChainId, + ) -> anyhow::Result<(SystemEnv, L1BatchEnv)> { + anyhow::ensure!( + first_miniblock_in_batch.l1_batch_number > L1BatchNumber(0), + "Loading params for genesis L1 batch not supported" + ); + // L1 batch timestamp is set to the timestamp of its first miniblock. + let l1_batch_timestamp = first_miniblock_in_batch.header.timestamp; + + let prev_l1_batch_number = first_miniblock_in_batch.l1_batch_number - 1; + tracing::info!("Getting previous L1 batch hash for batch #{prev_l1_batch_number}"); + let (prev_l1_batch_hash, prev_l1_batch_timestamp) = self + .wait_for_l1_batch_params(storage, prev_l1_batch_number) + .await + .context("failed getting hash for previous L1 batch")?; + tracing::info!("Got state root hash for previous L1 batch #{prev_l1_batch_number}: {prev_l1_batch_hash:?}"); + + anyhow::ensure!( + prev_l1_batch_timestamp < l1_batch_timestamp, + "Invalid params for L1 batch #{}: Timestamp of previous L1 batch ({}) >= provisional L1 batch timestamp ({}), \ + meaning that L1 batch will be rejected by the bootloader", + first_miniblock_in_batch.l1_batch_number, + extractors::display_timestamp(prev_l1_batch_timestamp), + extractors::display_timestamp(l1_batch_timestamp) + ); + + let prev_miniblock_number = first_miniblock_in_batch.header.number - 1; + tracing::info!("Getting previous miniblock hash for miniblock #{prev_miniblock_number}"); + + let prev_miniblock_hash = self.snapshot.as_ref().and_then(|snapshot| { + (snapshot.miniblock_number == prev_miniblock_number).then_some(snapshot.miniblock_hash) + }); + let prev_miniblock_hash = match prev_miniblock_hash { + Some(hash) => hash, + None => storage + .blocks_web3_dal() + .get_miniblock_hash(prev_miniblock_number) + .await + .context("failed getting hash for previous miniblock")? + .context("previous miniblock disappeared from storage")?, + }; + tracing::info!( + "Got hash for previous miniblock #{prev_miniblock_number}: {prev_miniblock_hash:?}" + ); + + let contract_hashes = first_miniblock_in_batch.header.base_system_contracts_hashes; + let base_system_contracts = storage + .storage_dal() + .get_base_system_contracts(contract_hashes.bootloader, contract_hashes.default_aa) + .await; + + Ok(l1_batch_params( + first_miniblock_in_batch.l1_batch_number, + fee_account, + l1_batch_timestamp, + prev_l1_batch_hash, + first_miniblock_in_batch.header.batch_fee_input, + first_miniblock_in_batch.header.number, + prev_miniblock_hash, + base_system_contracts, + validation_computational_gas_limit, + first_miniblock_in_batch + .header + .protocol_version + .context("`protocol_version` must be set for miniblock")?, + first_miniblock_in_batch.header.virtual_blocks, + chain_id, + )) + } + + /// Loads the pending L1 batch data from the database. + /// + /// # Errors + /// + /// Propagates DB errors. Also returns an error if `first_miniblock_in_batch` doesn't correspond to a pending L1 batch. + pub(crate) async fn load_pending_batch( + &self, + storage: &mut StorageProcessor<'_>, + first_miniblock_in_batch: &FirstMiniblockInBatch, + fee_account: Address, + validation_computational_gas_limit: u32, + chain_id: L2ChainId, + ) -> anyhow::Result { + let (system_env, l1_batch_env) = self + .load_l1_batch_params( + storage, + first_miniblock_in_batch, + fee_account, + validation_computational_gas_limit, + chain_id, + ) + .await + .context("failed loading L1 batch params")?; + + let pending_miniblocks = storage + .transactions_dal() + .get_miniblocks_to_reexecute() + .await + .context("failed loading miniblocks for re-execution")?; + let first_pending_miniblock = pending_miniblocks + .first() + .context("no pending miniblocks; was `first_miniblock_in_batch` loaded for a correct L1 batch number?")?; + anyhow::ensure!( + first_pending_miniblock.number == first_miniblock_in_batch.header.number, + "Invalid `first_miniblock_in_batch` supplied: its L1 batch #{} is not pending", + first_miniblock_in_batch.l1_batch_number + ); + Ok(PendingBatchData { + l1_batch_env, + system_env, + pending_miniblocks, + }) + } +} diff --git a/core/lib/zksync_core/src/state_keeper/io/common/tests.rs b/core/lib/zksync_core/src/state_keeper/io/common/tests.rs new file mode 100644 index 000000000000..8920bbf17dcb --- /dev/null +++ b/core/lib/zksync_core/src/state_keeper/io/common/tests.rs @@ -0,0 +1,445 @@ +//! Tests for the common I/O utils. + +use std::{collections::HashMap, ops}; + +use futures::FutureExt; +use zksync_contracts::BaseSystemContractsHashes; +use zksync_dal::ConnectionPool; +use zksync_types::{ + block::{BlockGasCount, MiniblockHasher}, + fee::TransactionExecutionMetrics, +}; + +use super::*; +use crate::{ + genesis::{ensure_genesis_state, GenesisParams}, + utils::testonly::{ + create_l1_batch, create_l2_transaction, create_miniblock, execute_l2_transaction, + prepare_empty_recovery_snapshot, + }, +}; + +#[test] +#[rustfmt::skip] // One-line formatting looks better here. +fn test_poll_iters() { + assert_eq!(poll_iters(Duration::from_millis(100), Duration::from_millis(0)), 1); + assert_eq!(poll_iters(Duration::from_millis(100), Duration::from_millis(100)), 1); + assert_eq!(poll_iters(Duration::from_millis(100), Duration::from_millis(101)), 2); + assert_eq!(poll_iters(Duration::from_millis(100), Duration::from_millis(200)), 2); + assert_eq!(poll_iters(Duration::from_millis(100), Duration::from_millis(201)), 3); +} + +#[tokio::test] +async fn creating_io_cursor_with_genesis() { + let pool = ConnectionPool::test_pool().await; + let mut storage = pool.access_storage().await.unwrap(); + ensure_genesis_state(&mut storage, L2ChainId::default(), &GenesisParams::mock()) + .await + .unwrap(); + + let cursor = IoCursor::new(&mut storage).await.unwrap(); + assert_eq!(cursor.l1_batch, L1BatchNumber(1)); + assert_eq!(cursor.next_miniblock, MiniblockNumber(1)); + assert_eq!(cursor.prev_miniblock_timestamp, 0); + assert_eq!( + cursor.prev_miniblock_hash, + MiniblockHasher::legacy_hash(MiniblockNumber(0)) + ); + + let miniblock = create_miniblock(1); + storage + .blocks_dal() + .insert_miniblock(&miniblock) + .await + .unwrap(); + + let cursor = IoCursor::new(&mut storage).await.unwrap(); + assert_eq!(cursor.l1_batch, L1BatchNumber(1)); + assert_eq!(cursor.next_miniblock, MiniblockNumber(2)); + assert_eq!(cursor.prev_miniblock_timestamp, miniblock.timestamp); + assert_eq!(cursor.prev_miniblock_hash, miniblock.hash); +} + +#[tokio::test] +async fn creating_io_cursor_with_snapshot_recovery() { + let pool = ConnectionPool::test_pool().await; + let mut storage = pool.access_storage().await.unwrap(); + let snapshot_recovery = prepare_empty_recovery_snapshot(&mut storage, 23).await; + + let cursor = IoCursor::new(&mut storage).await.unwrap(); + assert_eq!(cursor.l1_batch, L1BatchNumber(24)); + assert_eq!( + cursor.next_miniblock, + snapshot_recovery.miniblock_number + 1 + ); + assert_eq!( + cursor.prev_miniblock_timestamp, + snapshot_recovery.miniblock_timestamp + ); + assert_eq!(cursor.prev_miniblock_hash, snapshot_recovery.miniblock_hash); + + // Add a miniblock so that we have miniblocks (but not an L1 batch) in the storage. + let miniblock = create_miniblock(snapshot_recovery.miniblock_number.0 + 1); + storage + .blocks_dal() + .insert_miniblock(&miniblock) + .await + .unwrap(); + + let cursor = IoCursor::new(&mut storage).await.unwrap(); + assert_eq!(cursor.l1_batch, L1BatchNumber(24)); + assert_eq!(cursor.next_miniblock, miniblock.number + 1); + assert_eq!(cursor.prev_miniblock_timestamp, miniblock.timestamp); + assert_eq!(cursor.prev_miniblock_hash, miniblock.hash); +} + +#[tokio::test] +async fn waiting_for_l1_batch_params_with_genesis() { + let pool = ConnectionPool::test_pool().await; + let mut storage = pool.access_storage().await.unwrap(); + let genesis_root_hash = + ensure_genesis_state(&mut storage, L2ChainId::default(), &GenesisParams::mock()) + .await + .unwrap(); + + let provider = L1BatchParamsProvider::new(&mut storage).await.unwrap(); + assert!(provider.snapshot.is_none()); + let (hash, timestamp) = provider + .wait_for_l1_batch_params(&mut storage, L1BatchNumber(0)) + .await + .unwrap(); + assert_eq!(hash, genesis_root_hash); + assert_eq!(timestamp, 0); + + let new_l1_batch = create_l1_batch(1); + storage + .blocks_dal() + .insert_l1_batch(&new_l1_batch, &[], BlockGasCount::default(), &[], &[], 0) + .await + .unwrap(); + + let wait_future = provider.wait_for_l1_batch_params(&mut storage, L1BatchNumber(1)); + futures::pin_mut!(wait_future); + tokio::task::yield_now().await; + assert!((&mut wait_future).now_or_never().is_none()); + + let expected_hash = H256::repeat_byte(1); + let mut storage = pool.access_storage().await.unwrap(); + storage + .blocks_dal() + .set_l1_batch_hash(L1BatchNumber(1), expected_hash) + .await + .unwrap(); + let (hash, timestamp) = wait_future.await.unwrap(); + assert_eq!(hash, expected_hash); + assert_eq!(timestamp, new_l1_batch.timestamp); +} + +#[tokio::test] +async fn waiting_for_l1_batch_params_after_snapshot_recovery() { + let pool = ConnectionPool::test_pool().await; + let mut storage = pool.access_storage().await.unwrap(); + let snapshot_recovery = prepare_empty_recovery_snapshot(&mut storage, 23).await; + + let provider = L1BatchParamsProvider::new(&mut storage).await.unwrap(); + assert!(provider.snapshot.is_some()); + let (hash, timestamp) = provider + .wait_for_l1_batch_params(&mut storage, snapshot_recovery.l1_batch_number) + .await + .unwrap(); + assert_eq!(hash, snapshot_recovery.l1_batch_root_hash); + assert_eq!(timestamp, snapshot_recovery.l1_batch_timestamp); + + for pruned_l1_batch in [0, 1, snapshot_recovery.l1_batch_number.0 - 1] { + assert!(provider + .wait_for_l1_batch_params(&mut storage, L1BatchNumber(pruned_l1_batch)) + .await + .is_err()); + } + + let new_l1_batch = create_l1_batch(snapshot_recovery.l1_batch_number.0 + 1); + storage + .blocks_dal() + .insert_l1_batch(&new_l1_batch, &[], BlockGasCount::default(), &[], &[], 0) + .await + .unwrap(); + + let wait_future = + provider.wait_for_l1_batch_params(&mut storage, snapshot_recovery.l1_batch_number + 1); + futures::pin_mut!(wait_future); + tokio::task::yield_now().await; + assert!((&mut wait_future).now_or_never().is_none()); + + let expected_hash = H256::repeat_byte(1); + let mut storage = pool.access_storage().await.unwrap(); + storage + .blocks_dal() + .set_l1_batch_hash(new_l1_batch.number, expected_hash) + .await + .unwrap(); + let (hash, timestamp) = wait_future.await.unwrap(); + assert_eq!(hash, expected_hash); + assert_eq!(timestamp, new_l1_batch.timestamp); +} + +#[tokio::test] +async fn getting_first_miniblock_in_batch_with_genesis() { + let pool = ConnectionPool::test_pool().await; + let mut storage = pool.access_storage().await.unwrap(); + ensure_genesis_state(&mut storage, L2ChainId::default(), &GenesisParams::mock()) + .await + .unwrap(); + + let provider = L1BatchParamsProvider::new(&mut storage).await.unwrap(); + let mut batches_and_miniblocks = HashMap::from([ + (L1BatchNumber(0), Ok(Some(MiniblockNumber(0)))), + (L1BatchNumber(1), Ok(Some(MiniblockNumber(1)))), + (L1BatchNumber(2), Ok(None)), + (L1BatchNumber(100), Ok(None)), + ]); + assert_first_miniblock_numbers(&provider, &mut storage, &batches_and_miniblocks).await; + + let new_miniblock = create_miniblock(1); + storage + .blocks_dal() + .insert_miniblock(&new_miniblock) + .await + .unwrap(); + let new_miniblock = create_miniblock(2); + storage + .blocks_dal() + .insert_miniblock(&new_miniblock) + .await + .unwrap(); + + assert_first_miniblock_numbers(&provider, &mut storage, &batches_and_miniblocks).await; + + let new_l1_batch = create_l1_batch(1); + storage + .blocks_dal() + .insert_l1_batch(&new_l1_batch, &[], BlockGasCount::default(), &[], &[], 0) + .await + .unwrap(); + storage + .blocks_dal() + .mark_miniblocks_as_executed_in_l1_batch(new_l1_batch.number) + .await + .unwrap(); + + batches_and_miniblocks.insert(L1BatchNumber(2), Ok(Some(MiniblockNumber(3)))); + assert_first_miniblock_numbers(&provider, &mut storage, &batches_and_miniblocks).await; +} + +async fn assert_first_miniblock_numbers( + provider: &L1BatchParamsProvider, + storage: &mut StorageProcessor<'_>, + batches_and_miniblocks: &HashMap, ()>>, +) { + for (&batch, &expected_miniblock) in batches_and_miniblocks { + let number = provider + .load_number_of_first_miniblock_in_batch(storage, batch) + .await; + match expected_miniblock { + Ok(expected) => { + assert_eq!( + number.unwrap(), + expected, + "load_number_of_first_miniblock_in_batch({batch})" + ); + } + Err(()) => { + number.unwrap_err(); + } + } + } +} + +#[tokio::test] +async fn getting_first_miniblock_in_batch_after_snapshot_recovery() { + let pool = ConnectionPool::test_pool().await; + let mut storage = pool.access_storage().await.unwrap(); + let snapshot_recovery = prepare_empty_recovery_snapshot(&mut storage, 23).await; + + let provider = L1BatchParamsProvider::new(&mut storage).await.unwrap(); + let mut batches_and_miniblocks = HashMap::from([ + (L1BatchNumber(1), Err(())), + (snapshot_recovery.l1_batch_number, Err(())), + ( + snapshot_recovery.l1_batch_number + 1, + Ok(Some(snapshot_recovery.miniblock_number + 1)), + ), + (snapshot_recovery.l1_batch_number + 2, Ok(None)), + (L1BatchNumber(100), Ok(None)), + ]); + assert_first_miniblock_numbers(&provider, &mut storage, &batches_and_miniblocks).await; + + let new_miniblock = create_miniblock(snapshot_recovery.miniblock_number.0 + 1); + storage + .blocks_dal() + .insert_miniblock(&new_miniblock) + .await + .unwrap(); + + assert_first_miniblock_numbers(&provider, &mut storage, &batches_and_miniblocks).await; + + let new_l1_batch = create_l1_batch(snapshot_recovery.l1_batch_number.0 + 1); + storage + .blocks_dal() + .insert_l1_batch(&new_l1_batch, &[], BlockGasCount::default(), &[], &[], 0) + .await + .unwrap(); + storage + .blocks_dal() + .mark_miniblocks_as_executed_in_l1_batch(new_l1_batch.number) + .await + .unwrap(); + + batches_and_miniblocks.insert( + snapshot_recovery.l1_batch_number + 2, + Ok(Some(new_miniblock.number + 1)), + ); + assert_first_miniblock_numbers(&provider, &mut storage, &batches_and_miniblocks).await; +} + +#[tokio::test] +async fn loading_pending_batch_with_genesis() { + let pool = ConnectionPool::test_pool().await; + let mut storage = pool.access_storage().await.unwrap(); + let genesis_params = GenesisParams::mock(); + ensure_genesis_state(&mut storage, L2ChainId::default(), &genesis_params) + .await + .unwrap(); + store_pending_miniblocks( + &mut storage, + 1..=2, + genesis_params.base_system_contracts.hashes(), + ) + .await; + + let provider = L1BatchParamsProvider::new(&mut storage).await.unwrap(); + let first_miniblock_in_batch = provider + .load_first_miniblock_in_batch(&mut storage, L1BatchNumber(1)) + .await + .unwrap() + .expect("no first miniblock"); + assert_eq!(first_miniblock_in_batch.number(), MiniblockNumber(1)); + + let pending_batch = provider + .load_pending_batch( + &mut storage, + &first_miniblock_in_batch, + Address::zero(), + u32::MAX, + L2ChainId::default(), + ) + .await + .unwrap(); + + assert_eq!(pending_batch.pending_miniblocks.len(), 2); + assert_eq!(pending_batch.l1_batch_env.number, L1BatchNumber(1)); + assert_eq!(pending_batch.l1_batch_env.timestamp, 1); + assert_eq!(pending_batch.l1_batch_env.first_l2_block.number, 1); + assert_eq!(pending_batch.l1_batch_env.first_l2_block.timestamp, 1); + assert_eq!( + pending_batch.l1_batch_env.first_l2_block.prev_block_hash, + MiniblockHasher::legacy_hash(MiniblockNumber(0)) + ); +} + +async fn store_pending_miniblocks( + storage: &mut StorageProcessor<'_>, + numbers: ops::RangeInclusive, + contract_hashes: BaseSystemContractsHashes, +) { + for miniblock_number in numbers { + let tx = create_l2_transaction(10, 100); + storage + .transactions_dal() + .insert_transaction_l2(tx.clone(), TransactionExecutionMetrics::default()) + .await; + let mut new_miniblock = create_miniblock(miniblock_number); + new_miniblock.base_system_contracts_hashes = contract_hashes; + storage + .blocks_dal() + .insert_miniblock(&new_miniblock) + .await + .unwrap(); + let tx_result = execute_l2_transaction(tx); + storage + .transactions_dal() + .mark_txs_as_executed_in_miniblock(new_miniblock.number, &[tx_result], 1.into()) + .await; + } +} + +#[tokio::test] +async fn loading_pending_batch_after_snapshot_recovery() { + let pool = ConnectionPool::test_pool().await; + let mut storage = pool.access_storage().await.unwrap(); + let snapshot_recovery = prepare_empty_recovery_snapshot(&mut storage, 23).await; + let contracts = GenesisParams::mock().base_system_contracts; + let factory_deps = HashMap::from([ + ( + contracts.bootloader.hash, + zksync_utils::be_words_to_bytes(&contracts.bootloader.code), + ), + ( + contracts.default_aa.hash, + zksync_utils::be_words_to_bytes(&contracts.default_aa.code), + ), + ]); + + let starting_miniblock_number = snapshot_recovery.miniblock_number.0 + 1; + store_pending_miniblocks( + &mut storage, + starting_miniblock_number..=starting_miniblock_number + 1, + contracts.hashes(), + ) + .await; + storage + .storage_dal() + .insert_factory_deps(snapshot_recovery.miniblock_number + 1, &factory_deps) + .await; + + let provider = L1BatchParamsProvider::new(&mut storage).await.unwrap(); + let first_miniblock_in_batch = provider + .load_first_miniblock_in_batch(&mut storage, snapshot_recovery.l1_batch_number + 1) + .await + .unwrap() + .expect("no first miniblock"); + assert_eq!( + first_miniblock_in_batch.number(), + snapshot_recovery.miniblock_number + 1 + ); + + let pending_batch = provider + .load_pending_batch( + &mut storage, + &first_miniblock_in_batch, + Address::zero(), + u32::MAX, + L2ChainId::default(), + ) + .await + .unwrap(); + + let expected_timestamp = u64::from(snapshot_recovery.miniblock_number.0) + 1; + assert_eq!(pending_batch.pending_miniblocks.len(), 2); + assert_eq!( + pending_batch.l1_batch_env.number, + snapshot_recovery.l1_batch_number + 1 + ); + assert_eq!(pending_batch.l1_batch_env.timestamp, expected_timestamp); + assert_eq!( + pending_batch.l1_batch_env.first_l2_block.number, + snapshot_recovery.miniblock_number.0 + 1 + ); + assert_eq!( + pending_batch.l1_batch_env.first_l2_block.timestamp, + expected_timestamp + ); + assert_eq!( + pending_batch.l1_batch_env.first_l2_block.prev_block_hash, + snapshot_recovery.miniblock_hash + ); +} From edb4ad0bee5a7ddc9d553293fcdb3fef14d015d9 Mon Sep 17 00:00:00 2001 From: Alex Ostrovski Date: Thu, 18 Jan 2024 15:22:49 +0200 Subject: [PATCH 09/27] Recreate DB state more accurately in SK tests --- .../src/state_keeper/io/common/mod.rs | 3 +- .../src/state_keeper/io/tests/mod.rs | 23 ++++++----- .../src/state_keeper/io/tests/tester.rs | 38 ++++++++++++++++--- 3 files changed, 49 insertions(+), 15 deletions(-) diff --git a/core/lib/zksync_core/src/state_keeper/io/common/mod.rs b/core/lib/zksync_core/src/state_keeper/io/common/mod.rs index 6985c3aa3fe9..0f4af1f58a9a 100644 --- a/core/lib/zksync_core/src/state_keeper/io/common/mod.rs +++ b/core/lib/zksync_core/src/state_keeper/io/common/mod.rs @@ -393,7 +393,8 @@ impl L1BatchParamsProvider { .context("no pending miniblocks; was `first_miniblock_in_batch` loaded for a correct L1 batch number?")?; anyhow::ensure!( first_pending_miniblock.number == first_miniblock_in_batch.header.number, - "Invalid `first_miniblock_in_batch` supplied: its L1 batch #{} is not pending", + "Invalid `first_miniblock_in_batch` supplied: its L1 batch #{} is not pending; \ + first pending miniblock: {first_pending_miniblock:?}, first miniblock in batch: {first_miniblock_in_batch:?}", first_miniblock_in_batch.l1_batch_number ); Ok(PendingBatchData { diff --git a/core/lib/zksync_core/src/state_keeper/io/tests/mod.rs b/core/lib/zksync_core/src/state_keeper/io/tests/mod.rs index 0df8e693157a..4ffa6cedc963 100644 --- a/core/lib/zksync_core/src/state_keeper/io/tests/mod.rs +++ b/core/lib/zksync_core/src/state_keeper/io/tests/mod.rs @@ -45,23 +45,24 @@ async fn test_filter_initialization() { #[tokio::test] async fn test_filter_with_pending_batch() { let connection_pool = ConnectionPool::test_pool().await; - let tester = Tester::new(); - + let mut tester = Tester::new(); tester.genesis(&connection_pool).await; // Insert a sealed batch so there will be a `prev_l1_batch_state_root`. // These gas values are random and don't matter for filter calculation as there will be a // pending batch the filter will be based off of. - tester + let tx_result = tester .insert_miniblock(&connection_pool, 1, 5, BatchFeeInput::l1_pegged(55, 555)) .await; - tester.insert_sealed_batch(&connection_pool, 1).await; + tester + .insert_sealed_batch(&connection_pool, 1, &[tx_result]) + .await; // Inserting a pending miniblock that isn't included in a sealed batch means there is a pending batch. // The gas values are randomly chosen but so affect filter values calculation. let fee_input = BatchFeeInput::l1_pegged(100, 1000); - + tester.set_timestamp(2); tester .insert_miniblock(&connection_pool, 2, 10, fee_input) .await; @@ -90,10 +91,12 @@ async fn test_filter_with_no_pending_batch() { // Insert a sealed batch so there will be a `prev_l1_batch_state_root`. // These gas values are random and don't matter for filter calculation. - tester + let tx_result = tester .insert_miniblock(&connection_pool, 1, 5, BatchFeeInput::l1_pegged(55, 555)) .await; - tester.insert_sealed_batch(&connection_pool, 1).await; + tester + .insert_sealed_batch(&connection_pool, 1, &[tx_result]) + .await; // Create a copy of the tx filter that the mempool will use. let want_filter = l2_tx_filter( @@ -130,13 +133,15 @@ async fn test_timestamps_are_distinct( tester.genesis(&connection_pool).await; tester.set_timestamp(prev_miniblock_timestamp); - tester + let tx_result = tester .insert_miniblock(&connection_pool, 1, 5, BatchFeeInput::l1_pegged(55, 555)) .await; if delay_prev_miniblock_compared_to_batch { tester.set_timestamp(prev_miniblock_timestamp - 1); } - tester.insert_sealed_batch(&connection_pool, 1).await; + tester + .insert_sealed_batch(&connection_pool, 1, &[tx_result]) + .await; let (mut mempool, mut guard) = tester.create_test_mempool_io(connection_pool, 1).await; // Insert a transaction to trigger L1 batch creation. diff --git a/core/lib/zksync_core/src/state_keeper/io/tests/tester.rs b/core/lib/zksync_core/src/state_keeper/io/tests/tester.rs index 981e1feedd2a..407d540fbe14 100644 --- a/core/lib/zksync_core/src/state_keeper/io/tests/tester.rs +++ b/core/lib/zksync_core/src/state_keeper/io/tests/tester.rs @@ -1,6 +1,6 @@ //! Testing harness for the IO. -use std::{sync::Arc, time::Duration}; +use std::{slice, sync::Arc, time::Duration}; use multivm::vm_latest::constants::BLOCK_GAS_LIMIT; use zksync_config::{configs::chain::StateKeeperConfig, GasAdjusterConfig}; @@ -10,10 +10,12 @@ use zksync_eth_client::clients::MockEthereum; use zksync_object_store::ObjectStoreFactory; use zksync_types::{ block::MiniblockHeader, + fee::TransactionExecutionMetrics, fee_model::{BatchFeeInput, FeeModelConfig, FeeModelConfigV1}, protocol_version::L1VerifierConfig, system_contracts::get_system_smart_contracts, - Address, L2ChainId, PriorityOpId, ProtocolVersionId, H256, + tx::TransactionExecutionResult, + Address, L2ChainId, MiniblockNumber, PriorityOpId, ProtocolVersionId, H256, }; use crate::{ @@ -21,7 +23,9 @@ use crate::{ genesis::create_genesis_l1_batch, l1_gas_price::GasAdjuster, state_keeper::{io::MiniblockSealer, tests::create_transaction, MempoolGuard, MempoolIO}, - utils::testonly::{create_l1_batch, create_miniblock}, + utils::testonly::{ + create_l1_batch, create_l2_transaction, create_miniblock, execute_l2_transaction, + }, }; #[derive(Debug)] @@ -145,8 +149,13 @@ impl Tester { number: u32, base_fee_per_gas: u64, fee_input: BatchFeeInput, - ) { + ) -> TransactionExecutionResult { let mut storage = pool.access_storage_tagged("state_keeper").await.unwrap(); + let tx = create_l2_transaction(10, 100); + storage + .transactions_dal() + .insert_transaction_l2(tx.clone(), TransactionExecutionMetrics::default()) + .await; storage .blocks_dal() .insert_miniblock(&MiniblockHeader { @@ -158,9 +167,24 @@ impl Tester { }) .await .unwrap(); + let tx_result = execute_l2_transaction(tx.clone()); + storage + .transactions_dal() + .mark_txs_as_executed_in_miniblock( + MiniblockNumber(number), + slice::from_ref(&tx_result), + 1.into(), + ) + .await; + tx_result } - pub(super) async fn insert_sealed_batch(&self, pool: &ConnectionPool, number: u32) { + pub(super) async fn insert_sealed_batch( + &self, + pool: &ConnectionPool, + number: u32, + tx_results: &[TransactionExecutionResult], + ) { let batch_header = create_l1_batch(number); let mut storage = pool.access_storage_tagged("state_keeper").await.unwrap(); storage @@ -173,6 +197,10 @@ impl Tester { .mark_miniblocks_as_executed_in_l1_batch(batch_header.number) .await .unwrap(); + storage + .transactions_dal() + .mark_txs_as_executed_in_l1_batch(batch_header.number, tx_results) + .await; storage .blocks_dal() .set_l1_batch_hash(batch_header.number, H256::default()) From 07c693b6f06899aaaf0b47a4a5fae06adb6ed6b6 Mon Sep 17 00:00:00 2001 From: Alex Ostrovski Date: Thu, 18 Jan 2024 17:44:09 +0200 Subject: [PATCH 10/27] Add (non-working) test for `MempoolIO` --- .../src/state_keeper/io/tests/mod.rs | 126 ++++++++++++++++-- 1 file changed, 118 insertions(+), 8 deletions(-) diff --git a/core/lib/zksync_core/src/state_keeper/io/tests/mod.rs b/core/lib/zksync_core/src/state_keeper/io/tests/mod.rs index 4ffa6cedc963..0860152d22d2 100644 --- a/core/lib/zksync_core/src/state_keeper/io/tests/mod.rs +++ b/core/lib/zksync_core/src/state_keeper/io/tests/mod.rs @@ -6,8 +6,11 @@ use zksync_contracts::BaseSystemContractsHashes; use zksync_dal::ConnectionPool; use zksync_mempool::L2TxFilter; use zksync_types::{ - block::BlockGasCount, fee_model::BatchFeeInput, tx::ExecutionMetrics, AccountTreeId, Address, - L1BatchNumber, MiniblockNumber, ProtocolVersionId, StorageKey, VmEvent, H256, U256, + block::{BlockGasCount, MiniblockHasher}, + fee_model::BatchFeeInput, + tx::ExecutionMetrics, + AccountTreeId, Address, L1BatchNumber, MiniblockNumber, ProtocolVersionId, StorageKey, VmEvent, + H256, U256, }; use zksync_utils::time::seconds_since_epoch; @@ -22,7 +25,7 @@ use crate::{ }, updates::{MiniblockSealCommand, MiniblockUpdates, UpdatesManager}, }, - utils::testonly::create_l1_batch_metadata, + utils::testonly::prepare_empty_recovery_snapshot, }; mod tester; @@ -349,14 +352,14 @@ async fn test_miniblock_and_l1_batch_processing( // Genesis is needed for proper mempool initialization. tester.genesis(&pool).await; - let mut conn = pool.access_storage_tagged("state_keeper").await.unwrap(); + let mut storage = pool.access_storage().await.unwrap(); // Save metadata for the genesis L1 batch so that we don't hang in `seal_l1_batch`. - let metadata = create_l1_batch_metadata(0); - conn.blocks_dal() - .save_l1_batch_metadata(L1BatchNumber(0), &metadata, H256::zero(), false) + storage + .blocks_dal() + .set_l1_batch_hash(L1BatchNumber(0), H256::zero()) .await .unwrap(); - drop(conn); + drop(storage); let (mut mempool, _) = tester .create_test_mempool_io(pool.clone(), miniblock_sealer_capacity) @@ -421,6 +424,113 @@ async fn miniblock_and_l1_batch_processing_with_sync_sealer() { test_miniblock_and_l1_batch_processing(pool, 0).await; } +// FIXME (PLA-589): Doesn't work because of missing system contracts, which cannot be added w/o a miniblock +#[ignore] +#[tokio::test] +async fn miniblock_processing_after_snapshot_recovery() { + let connection_pool = ConnectionPool::test_pool().await; + let mut storage = connection_pool.access_storage().await.unwrap(); + let snapshot_recovery = prepare_empty_recovery_snapshot(&mut storage, 23).await; + let tester = Tester::new(); + + let (mut mempool, _) = tester + .create_test_mempool_io(connection_pool.clone(), 1) + .await; + assert_eq!( + mempool.current_miniblock_number(), + snapshot_recovery.miniblock_number + 1 + ); + assert_eq!( + mempool.current_l1_batch_number(), + snapshot_recovery.l1_batch_number + 1 + ); + assert!(mempool.load_pending_batch().await.is_none()); + + let (_, l1_batch_env) = mempool + .wait_for_new_batch_params(Duration::from_secs(10)) + .await + .unwrap(); + assert_eq!(l1_batch_env.number, snapshot_recovery.l1_batch_number + 1); + assert_eq!( + l1_batch_env.previous_batch_hash, + Some(snapshot_recovery.l1_batch_root_hash) + ); + assert_eq!( + l1_batch_env.first_l2_block.prev_block_hash, + snapshot_recovery.miniblock_hash + ); + + let mut updates = UpdatesManager::new( + l1_batch_env, + BaseSystemContractsHashes::default(), + ProtocolVersionId::latest(), + ); + + let tx = create_transaction(10, 100); + let tx_hash = tx.hash(); + updates.extend_from_executed_transaction( + tx, + create_execution_result(0, []), + vec![], + BlockGasCount::default(), + ExecutionMetrics::default(), + vec![], + ); + mempool.seal_miniblock(&updates).await; + + // Check that the miniblock is persisted and has correct data. + let persisted_miniblock = storage + .blocks_dal() + .get_miniblock_header(snapshot_recovery.miniblock_number + 1) + .await + .unwrap() + .expect("no miniblock persisted"); + assert_eq!( + persisted_miniblock.number, + snapshot_recovery.miniblock_number + 1 + ); + assert_eq!(persisted_miniblock.l2_tx_count, 1); + + let mut miniblock_hasher = MiniblockHasher::new( + persisted_miniblock.number, + persisted_miniblock.timestamp, + snapshot_recovery.miniblock_hash, + ); + miniblock_hasher.push_tx_hash(tx_hash); + assert_eq!( + persisted_miniblock.hash, + miniblock_hasher.finalize(ProtocolVersionId::latest()) + ); + + // Emulate node restart. + let (mut mempool, _) = tester + .create_test_mempool_io(connection_pool.clone(), 1) + .await; + assert_eq!( + mempool.current_miniblock_number(), + snapshot_recovery.miniblock_number + 2 + ); + assert_eq!( + mempool.current_l1_batch_number(), + snapshot_recovery.l1_batch_number + 1 + ); + assert!(mempool.load_pending_batch().await.is_some()); + + let (_, l1_batch_env) = mempool + .wait_for_new_batch_params(Duration::from_secs(10)) + .await + .unwrap(); + assert_eq!(l1_batch_env.number, snapshot_recovery.l1_batch_number + 1); + assert_eq!( + l1_batch_env.previous_batch_hash, + Some(snapshot_recovery.l1_batch_root_hash) + ); + assert_eq!( + l1_batch_env.first_l2_block.prev_block_hash, + snapshot_recovery.miniblock_hash + ); +} + #[tokio::test] async fn miniblock_sealer_handle_blocking() { let pool = ConnectionPool::test_pool().await; From d9914ce820220f759a2fa2f8f8bf68deae25d853 Mon Sep 17 00:00:00 2001 From: Alex Ostrovski Date: Fri, 19 Jan 2024 10:45:17 +0200 Subject: [PATCH 11/27] Adapt `load_previous_batch_version_id()` --- .../src/state_keeper/io/common/mod.rs | 23 +++++ .../src/state_keeper/io/common/tests.rs | 87 +++++++++++++++++++ .../src/state_keeper/io/mempool.rs | 15 +++- .../zksync_core/src/state_keeper/keeper.rs | 7 +- .../zksync_core/src/sync_layer/external_io.rs | 11 ++- 5 files changed, 133 insertions(+), 10 deletions(-) diff --git a/core/lib/zksync_core/src/state_keeper/io/common/mod.rs b/core/lib/zksync_core/src/state_keeper/io/common/mod.rs index 0f4af1f58a9a..db9fc402bdae 100644 --- a/core/lib/zksync_core/src/state_keeper/io/common/mod.rs +++ b/core/lib/zksync_core/src/state_keeper/io/common/mod.rs @@ -223,6 +223,29 @@ impl L1BatchParamsProvider { } } + pub(crate) async fn load_l1_batch_protocol_version( + &self, + storage: &mut StorageProcessor<'_>, + l1_batch_number: L1BatchNumber, + ) -> anyhow::Result> { + if let Some(snapshot) = &self.snapshot { + if l1_batch_number == snapshot.l1_batch_number { + return Ok(Some(snapshot.protocol_version)); + } + anyhow::ensure!( + l1_batch_number > snapshot.l1_batch_number, + "Requested protocol version for pruned L1 batch #{l1_batch_number}; first retained batch is #{}", + snapshot.l1_batch_number + 1 + ); + } + + storage + .blocks_dal() + .get_batch_protocol_version_id(l1_batch_number) + .await + .map_err(Into::into) + } + /// Returns a header of the first miniblock in the specified L1 batch regardless of whether the batch is sealed or not. pub(crate) async fn load_first_miniblock_in_batch( &self, diff --git a/core/lib/zksync_core/src/state_keeper/io/common/tests.rs b/core/lib/zksync_core/src/state_keeper/io/common/tests.rs index 8920bbf17dcb..ca838d7b33bc 100644 --- a/core/lib/zksync_core/src/state_keeper/io/common/tests.rs +++ b/core/lib/zksync_core/src/state_keeper/io/common/tests.rs @@ -8,6 +8,7 @@ use zksync_dal::ConnectionPool; use zksync_types::{ block::{BlockGasCount, MiniblockHasher}, fee::TransactionExecutionMetrics, + ProtocolVersion, }; use super::*; @@ -443,3 +444,89 @@ async fn loading_pending_batch_after_snapshot_recovery() { snapshot_recovery.miniblock_hash ); } + +#[tokio::test] +async fn getting_batch_version_with_genesis() { + let pool = ConnectionPool::test_pool().await; + let mut storage = pool.access_storage().await.unwrap(); + let mut genesis_params = GenesisParams::mock(); + genesis_params.protocol_version = ProtocolVersionId::Version5; + ensure_genesis_state(&mut storage, L2ChainId::default(), &genesis_params) + .await + .unwrap(); + + let provider = L1BatchParamsProvider::new(&mut storage).await.unwrap(); + let version = provider + .load_l1_batch_protocol_version(&mut storage, L1BatchNumber(0)) + .await + .unwrap(); + assert_eq!(version, Some(genesis_params.protocol_version)); + + assert!(provider + .load_l1_batch_protocol_version(&mut storage, L1BatchNumber(1)) + .await + .unwrap() + .is_none()); + + storage + .protocol_versions_dal() + .save_protocol_version_with_tx(ProtocolVersion::default()) + .await; + let new_l1_batch = create_l1_batch(1); + storage + .blocks_dal() + .insert_l1_batch(&new_l1_batch, &[], BlockGasCount::default(), &[], &[], 0) + .await + .unwrap(); + + let version = provider + .load_l1_batch_protocol_version(&mut storage, L1BatchNumber(1)) + .await + .unwrap(); + assert_eq!(version, new_l1_batch.protocol_version); +} + +#[tokio::test] +async fn getting_batch_version_after_snapshot_recovery() { + let pool = ConnectionPool::test_pool().await; + let mut storage = pool.access_storage().await.unwrap(); + let snapshot_recovery = prepare_empty_recovery_snapshot(&mut storage, 23).await; + + let provider = L1BatchParamsProvider::new(&mut storage).await.unwrap(); + let version = provider + .load_l1_batch_protocol_version(&mut storage, snapshot_recovery.l1_batch_number) + .await + .unwrap(); + assert_eq!(version, Some(snapshot_recovery.protocol_version)); + + assert!(provider + .load_l1_batch_protocol_version(&mut storage, L1BatchNumber(1)) + .await + .is_err()); + assert!(provider + .load_l1_batch_protocol_version(&mut storage, snapshot_recovery.l1_batch_number + 1) + .await + .unwrap() + .is_none()); + + storage + .protocol_versions_dal() + .save_protocol_version_with_tx(ProtocolVersion { + id: ProtocolVersionId::next(), + ..ProtocolVersion::default() + }) + .await; + let mut new_l1_batch = create_l1_batch(snapshot_recovery.l1_batch_number.0 + 1); + new_l1_batch.protocol_version = Some(ProtocolVersionId::next()); + storage + .blocks_dal() + .insert_l1_batch(&new_l1_batch, &[], BlockGasCount::default(), &[], &[], 0) + .await + .unwrap(); + + let version = provider + .load_l1_batch_protocol_version(&mut storage, snapshot_recovery.l1_batch_number + 1) + .await + .unwrap(); + assert_eq!(version, new_l1_batch.protocol_version); +} diff --git a/core/lib/zksync_core/src/state_keeper/io/mempool.rs b/core/lib/zksync_core/src/state_keeper/io/mempool.rs index 61f1584ec99f..23b35231ace5 100644 --- a/core/lib/zksync_core/src/state_keeper/io/mempool.rs +++ b/core/lib/zksync_core/src/state_keeper/io/mempool.rs @@ -342,11 +342,18 @@ impl StateKeeperIO for MempoolIO { } async fn load_previous_batch_version_id(&mut self) -> Option { - let mut storage = self.pool.access_storage().await.unwrap(); - storage - .blocks_dal() - .get_batch_protocol_version_id(self.current_l1_batch_number - 1) + let mut storage = self + .pool + .access_storage_tagged("state_keeper") .await + .unwrap(); + let prev_l1_batch_number = self.current_l1_batch_number - 1; + self.l1_batch_params_provider + .load_l1_batch_protocol_version(&mut storage, prev_l1_batch_number) + .await + .with_context(|| { + format!("failed loading protocol version for L1 batch #{prev_l1_batch_number}") + }) .unwrap() } diff --git a/core/lib/zksync_core/src/state_keeper/keeper.rs b/core/lib/zksync_core/src/state_keeper/keeper.rs index 209809d33f95..995ca87910f0 100644 --- a/core/lib/zksync_core/src/state_keeper/keeper.rs +++ b/core/lib/zksync_core/src/state_keeper/keeper.rs @@ -134,8 +134,11 @@ impl ZkSyncStateKeeper { protocol_version, ); - let previous_batch_protocol_version = - self.io.load_previous_batch_version_id().await.unwrap(); + let previous_batch_protocol_version = self + .io + .load_previous_batch_version_id() + .await + .expect("No protocol version defined for previous L1 batch"); let version_changed = protocol_version != previous_batch_protocol_version; let mut protocol_upgrade_tx = if pending_miniblocks.is_empty() && version_changed { diff --git a/core/lib/zksync_core/src/sync_layer/external_io.rs b/core/lib/zksync_core/src/sync_layer/external_io.rs index 7719e295ac65..c3938c40e12f 100644 --- a/core/lib/zksync_core/src/sync_layer/external_io.rs +++ b/core/lib/zksync_core/src/sync_layer/external_io.rs @@ -514,11 +514,14 @@ impl StateKeeperIO for ExternalIO { } async fn load_previous_batch_version_id(&mut self) -> Option { - let mut storage = self.pool.access_storage().await.unwrap(); - storage - .blocks_dal() - .get_batch_protocol_version_id(self.current_l1_batch_number - 1) + let mut storage = self.pool.access_storage_tagged("sync_layer").await.unwrap(); + let prev_l1_batch_number = self.current_l1_batch_number - 1; + self.l1_batch_params_provider + .load_l1_batch_protocol_version(&mut storage, prev_l1_batch_number) .await + .with_context(|| { + format!("failed loading protocol version for L1 batch #{prev_l1_batch_number}") + }) .unwrap() } From 100d49a0064c198e3c3ffe9129c5964bca158cdc Mon Sep 17 00:00:00 2001 From: Alex Ostrovski Date: Thu, 25 Jan 2024 17:47:23 +0200 Subject: [PATCH 12/27] Refactor `prepare_recovery_snapshot()` util --- .../src/api_server/execution_sandbox/tests.rs | 4 +- .../src/api_server/web3/tests/mod.rs | 5 +- .../src/api_server/web3/tests/ws.rs | 8 ++- .../src/state_keeper/io/common/tests.rs | 30 +++------- .../src/state_keeper/io/tests/mod.rs | 4 +- .../sync_layer/batch_status_updater/tests.rs | 8 +-- core/lib/zksync_core/src/utils/testonly.rs | 57 +++++++------------ 7 files changed, 42 insertions(+), 74 deletions(-) diff --git a/core/lib/zksync_core/src/api_server/execution_sandbox/tests.rs b/core/lib/zksync_core/src/api_server/execution_sandbox/tests.rs index d18bf5dafd01..62c872688b79 100644 --- a/core/lib/zksync_core/src/api_server/execution_sandbox/tests.rs +++ b/core/lib/zksync_core/src/api_server/execution_sandbox/tests.rs @@ -5,7 +5,7 @@ use assert_matches::assert_matches; use super::*; use crate::{ genesis::{ensure_genesis_state, GenesisParams}, - utils::testonly::{create_miniblock, prepare_empty_recovery_snapshot}, + utils::testonly::{create_miniblock, prepare_recovery_snapshot}, }; #[tokio::test] @@ -67,7 +67,7 @@ async fn creating_block_args() { async fn creating_block_args_after_snapshot_recovery() { let pool = ConnectionPool::test_pool().await; let mut storage = pool.access_storage().await.unwrap(); - let snapshot_recovery = prepare_empty_recovery_snapshot(&mut storage, 23).await; + let snapshot_recovery = prepare_recovery_snapshot(&mut storage, 23, &[]).await; let pending_block_args = BlockArgs::pending(&mut storage).await.unwrap(); assert_eq!( diff --git a/core/lib/zksync_core/src/api_server/web3/tests/mod.rs b/core/lib/zksync_core/src/api_server/web3/tests/mod.rs index b80f4bc5020f..85a02f3feb70 100644 --- a/core/lib/zksync_core/src/api_server/web3/tests/mod.rs +++ b/core/lib/zksync_core/src/api_server/web3/tests/mod.rs @@ -39,7 +39,7 @@ use crate::{ genesis::{ensure_genesis_state, GenesisParams}, utils::testonly::{ create_l1_batch, create_l1_batch_metadata, create_l2_transaction, create_miniblock, - prepare_empty_recovery_snapshot, prepare_recovery_snapshot, + prepare_recovery_snapshot, }, }; @@ -216,9 +216,6 @@ impl StorageInitialization { .await?; } } - Self::Recovery { logs, factory_deps } if logs.is_empty() && factory_deps.is_empty() => { - prepare_empty_recovery_snapshot(storage, Self::SNAPSHOT_RECOVERY_BLOCK).await; - } Self::Recovery { logs, factory_deps } => { prepare_recovery_snapshot(storage, Self::SNAPSHOT_RECOVERY_BLOCK, logs).await; storage diff --git a/core/lib/zksync_core/src/api_server/web3/tests/ws.rs b/core/lib/zksync_core/src/api_server/web3/tests/ws.rs index a368854f9e75..7a0bd57f2ef6 100644 --- a/core/lib/zksync_core/src/api_server/web3/tests/ws.rs +++ b/core/lib/zksync_core/src/api_server/web3/tests/ws.rs @@ -98,8 +98,12 @@ async fn wait_for_notifier_miniblock( async fn notifiers_start_after_snapshot_recovery() { let pool = ConnectionPool::test_pool().await; let mut storage = pool.access_storage().await.unwrap(); - prepare_empty_recovery_snapshot(&mut storage, StorageInitialization::SNAPSHOT_RECOVERY_BLOCK) - .await; + prepare_recovery_snapshot( + &mut storage, + StorageInitialization::SNAPSHOT_RECOVERY_BLOCK, + &[], + ) + .await; let (stop_sender, stop_receiver) = watch::channel(false); let (events_sender, mut events_receiver) = mpsc::unbounded_channel(); diff --git a/core/lib/zksync_core/src/state_keeper/io/common/tests.rs b/core/lib/zksync_core/src/state_keeper/io/common/tests.rs index c7aff32fb680..f033ddda9d33 100644 --- a/core/lib/zksync_core/src/state_keeper/io/common/tests.rs +++ b/core/lib/zksync_core/src/state_keeper/io/common/tests.rs @@ -20,7 +20,7 @@ use crate::{ genesis::{ensure_genesis_state, GenesisParams}, utils::testonly::{ create_l1_batch, create_l2_transaction, create_miniblock, execute_l2_transaction, - prepare_empty_recovery_snapshot, + prepare_recovery_snapshot, }, }; @@ -69,7 +69,7 @@ async fn creating_io_cursor_with_genesis() { async fn creating_io_cursor_with_snapshot_recovery() { let pool = ConnectionPool::test_pool().await; let mut storage = pool.access_storage().await.unwrap(); - let snapshot_recovery = prepare_empty_recovery_snapshot(&mut storage, 23).await; + let snapshot_recovery = prepare_recovery_snapshot(&mut storage, 23, &[]).await; let cursor = IoCursor::new(&mut storage).await.unwrap(); assert_eq!(cursor.l1_batch, L1BatchNumber(24)); @@ -143,7 +143,7 @@ async fn waiting_for_l1_batch_params_with_genesis() { async fn waiting_for_l1_batch_params_after_snapshot_recovery() { let pool = ConnectionPool::test_pool().await; let mut storage = pool.access_storage().await.unwrap(); - let snapshot_recovery = prepare_empty_recovery_snapshot(&mut storage, 23).await; + let snapshot_recovery = prepare_recovery_snapshot(&mut storage, 23, &[]).await; let provider = L1BatchParamsProvider::new(&mut storage).await.unwrap(); let (hash, timestamp) = provider @@ -261,7 +261,7 @@ async fn assert_first_miniblock_numbers( async fn getting_first_miniblock_in_batch_after_snapshot_recovery() { let pool = ConnectionPool::test_pool().await; let mut storage = pool.access_storage().await.unwrap(); - let snapshot_recovery = prepare_empty_recovery_snapshot(&mut storage, 23).await; + let snapshot_recovery = prepare_recovery_snapshot(&mut storage, 23, &[]).await; let provider = L1BatchParamsProvider::new(&mut storage).await.unwrap(); let mut batches_and_miniblocks = HashMap::from([ @@ -382,31 +382,15 @@ async fn store_pending_miniblocks( async fn loading_pending_batch_after_snapshot_recovery() { let pool = ConnectionPool::test_pool().await; let mut storage = pool.access_storage().await.unwrap(); - let snapshot_recovery = prepare_empty_recovery_snapshot(&mut storage, 23).await; - let contracts = GenesisParams::mock().base_system_contracts; - let factory_deps = HashMap::from([ - ( - contracts.bootloader.hash, - zksync_utils::be_words_to_bytes(&contracts.bootloader.code), - ), - ( - contracts.default_aa.hash, - zksync_utils::be_words_to_bytes(&contracts.default_aa.code), - ), - ]); + let snapshot_recovery = prepare_recovery_snapshot(&mut storage, 23, &[]).await; let starting_miniblock_number = snapshot_recovery.miniblock_number.0 + 1; store_pending_miniblocks( &mut storage, starting_miniblock_number..=starting_miniblock_number + 1, - contracts.hashes(), + GenesisParams::mock().base_system_contracts.hashes(), ) .await; - storage - .storage_dal() - .insert_factory_deps(snapshot_recovery.miniblock_number + 1, &factory_deps) - .await - .unwrap(); let provider = L1BatchParamsProvider::new(&mut storage).await.unwrap(); let first_miniblock_in_batch = provider @@ -499,7 +483,7 @@ async fn getting_batch_version_with_genesis() { async fn getting_batch_version_after_snapshot_recovery() { let pool = ConnectionPool::test_pool().await; let mut storage = pool.access_storage().await.unwrap(); - let snapshot_recovery = prepare_empty_recovery_snapshot(&mut storage, 23).await; + let snapshot_recovery = prepare_recovery_snapshot(&mut storage, 23, &[]).await; let provider = L1BatchParamsProvider::new(&mut storage).await.unwrap(); let version = provider diff --git a/core/lib/zksync_core/src/state_keeper/io/tests/mod.rs b/core/lib/zksync_core/src/state_keeper/io/tests/mod.rs index f4908e676246..b651bf347337 100644 --- a/core/lib/zksync_core/src/state_keeper/io/tests/mod.rs +++ b/core/lib/zksync_core/src/state_keeper/io/tests/mod.rs @@ -25,7 +25,7 @@ use crate::{ }, updates::{MiniblockSealCommand, MiniblockUpdates, UpdatesManager}, }, - utils::testonly::prepare_empty_recovery_snapshot, + utils::testonly::prepare_recovery_snapshot, }; mod tester; @@ -443,7 +443,7 @@ async fn miniblock_and_l1_batch_processing_with_sync_sealer() { async fn miniblock_processing_after_snapshot_recovery() { let connection_pool = ConnectionPool::test_pool().await; let mut storage = connection_pool.access_storage().await.unwrap(); - let snapshot_recovery = prepare_empty_recovery_snapshot(&mut storage, 23).await; + let snapshot_recovery = prepare_recovery_snapshot(&mut storage, 23, &[]).await; let tester = Tester::new(); let (mut mempool, _) = tester diff --git a/core/lib/zksync_core/src/sync_layer/batch_status_updater/tests.rs b/core/lib/zksync_core/src/sync_layer/batch_status_updater/tests.rs index 7ca6e73c37cb..c3a98814f6db 100644 --- a/core/lib/zksync_core/src/sync_layer/batch_status_updater/tests.rs +++ b/core/lib/zksync_core/src/sync_layer/batch_status_updater/tests.rs @@ -12,7 +12,7 @@ use super::*; use crate::{ genesis::{ensure_genesis_state, GenesisParams}, sync_layer::metrics::L1BatchStage, - utils::testonly::{create_l1_batch, create_miniblock, prepare_empty_recovery_snapshot}, + utils::testonly::{create_l1_batch, create_miniblock, prepare_recovery_snapshot}, }; async fn seal_l1_batch(storage: &mut StorageProcessor<'_>, number: L1BatchNumber) { @@ -261,7 +261,7 @@ async fn updater_cursor_for_storage_with_genesis_block() { async fn updater_cursor_after_snapshot_recovery() { let pool = ConnectionPool::test_pool().await; let mut storage = pool.access_storage().await.unwrap(); - prepare_empty_recovery_snapshot(&mut storage, 23).await; + prepare_recovery_snapshot(&mut storage, 23, &[]).await; let cursor = UpdaterCursor::new(&mut storage).await.unwrap(); assert_eq!(cursor.last_committed_l1_batch, L1BatchNumber(23)); @@ -275,7 +275,7 @@ async fn normal_updater_operation(snapshot_recovery: bool, async_batches: bool) let pool = ConnectionPool::test_pool().await; let mut storage = pool.access_storage().await.unwrap(); let first_batch_number = if snapshot_recovery { - prepare_empty_recovery_snapshot(&mut storage, 23).await; + prepare_recovery_snapshot(&mut storage, 23, &[]).await; L1BatchNumber(24) } else { ensure_genesis_state(&mut storage, L2ChainId::default(), &GenesisParams::mock()) @@ -347,7 +347,7 @@ async fn updater_with_gradual_main_node_updates(snapshot_recovery: bool) { let pool = ConnectionPool::test_pool().await; let mut storage = pool.access_storage().await.unwrap(); let first_batch_number = if snapshot_recovery { - prepare_empty_recovery_snapshot(&mut storage, 23).await; + prepare_recovery_snapshot(&mut storage, 23, &[]).await; L1BatchNumber(24) } else { ensure_genesis_state(&mut storage, L2ChainId::default(), &GenesisParams::mock()) diff --git a/core/lib/zksync_core/src/utils/testonly.rs b/core/lib/zksync_core/src/utils/testonly.rs index 09491add9f41..cb5c4bbbe335 100644 --- a/core/lib/zksync_core/src/utils/testonly.rs +++ b/core/lib/zksync_core/src/utils/testonly.rs @@ -1,5 +1,7 @@ //! Test utils. +use std::collections::HashMap; + use multivm::utils::get_max_gas_per_pubdata_byte; use zksync_contracts::BaseSystemContractsHashes; use zksync_dal::StorageProcessor; @@ -18,7 +20,7 @@ use zksync_types::{ StorageLog, H256, U256, }; -use crate::l1_gas_price::L1GasPriceProvider; +use crate::{genesis::GenesisParams, l1_gas_price::L1GasPriceProvider}; /// Creates a miniblock header with the specified number and deterministic contents. pub(crate) fn create_miniblock(number: u32) -> MiniblockHeader { @@ -136,17 +138,26 @@ pub(crate) async fn prepare_recovery_snapshot( .protocol_versions_dal() .save_protocol_version_with_tx(ProtocolVersion::default()) .await; - // TODO (PLA-596): Don't insert L1 batches / miniblocks once the relevant foreign keys are removed + let miniblock = create_miniblock(l1_batch_number); - storage - .blocks_dal() - .insert_miniblock(&miniblock) - .await - .unwrap(); let l1_batch = create_l1_batch(l1_batch_number); + // Miniblock and L1 batch are intentionally **not** inserted into the storage. + + // Store factory deps for the base system contracts. + let contracts = GenesisParams::mock().base_system_contracts; + let factory_deps = HashMap::from([ + ( + contracts.bootloader.hash, + zksync_utils::be_words_to_bytes(&contracts.bootloader.code), + ), + ( + contracts.default_aa.hash, + zksync_utils::be_words_to_bytes(&contracts.default_aa.code), + ), + ]); storage - .blocks_dal() - .insert_l1_batch(&l1_batch, &[], Default::default(), &[], &[], 0) + .storage_dal() + .insert_factory_deps(miniblock.number, &factory_deps) .await .unwrap(); @@ -182,34 +193,6 @@ pub(crate) async fn prepare_recovery_snapshot( snapshot_recovery } -// TODO (PLA-596): Replace with `prepare_recovery_snapshot(.., &[])` -pub(crate) async fn prepare_empty_recovery_snapshot( - storage: &mut StorageProcessor<'_>, - l1_batch_number: u32, -) -> SnapshotRecoveryStatus { - storage - .protocol_versions_dal() - .save_protocol_version_with_tx(ProtocolVersion::default()) - .await; - - let snapshot_recovery = SnapshotRecoveryStatus { - l1_batch_number: l1_batch_number.into(), - l1_batch_timestamp: l1_batch_number.into(), - l1_batch_root_hash: H256::zero(), - miniblock_number: l1_batch_number.into(), - miniblock_timestamp: l1_batch_number.into(), - miniblock_hash: H256::zero(), // not used - protocol_version: ProtocolVersionId::latest(), - storage_logs_chunks_processed: vec![true; 100], - }; - storage - .snapshot_recovery_dal() - .insert_initial_recovery_status(&snapshot_recovery) - .await - .unwrap(); - snapshot_recovery -} - /// Mock [`L1GasPriceProvider`] that returns a constant value. #[derive(Debug)] pub(crate) struct MockL1GasPriceProvider(pub u64); From 65c791d792a48b920171e553895fffce6aa9409e Mon Sep 17 00:00:00 2001 From: Alex Ostrovski Date: Fri, 26 Jan 2024 11:02:36 +0200 Subject: [PATCH 13/27] Test miniblock processing after snapshot recovery --- .../src/state_keeper/io/mempool.rs | 1 + .../src/state_keeper/io/tests/mod.rs | 78 ++++++++++++------- .../src/state_keeper/io/tests/tester.rs | 10 ++- .../zksync_core/src/state_keeper/keeper.rs | 12 +-- .../zksync_core/src/state_keeper/tests/mod.rs | 8 +- .../src/state_keeper/updates/mod.rs | 13 ++-- core/lib/zksync_core/src/utils/testonly.rs | 12 +-- 7 files changed, 74 insertions(+), 60 deletions(-) diff --git a/core/lib/zksync_core/src/state_keeper/io/mempool.rs b/core/lib/zksync_core/src/state_keeper/io/mempool.rs index b8390115e148..74a21e80af79 100644 --- a/core/lib/zksync_core/src/state_keeper/io/mempool.rs +++ b/core/lib/zksync_core/src/state_keeper/io/mempool.rs @@ -161,6 +161,7 @@ impl StateKeeperIO for MempoolIO { max_wait: Duration, ) -> Option<(SystemEnv, L1BatchEnv)> { let deadline = Instant::now() + max_wait; + // FIXME: why do we wait for hash immediately and not below? (changed in #809) let prev_l1_batch_hash = self.load_previous_l1_batch_hash().await; // Block until at least one transaction in the mempool can match the filter (or timeout happens). diff --git a/core/lib/zksync_core/src/state_keeper/io/tests/mod.rs b/core/lib/zksync_core/src/state_keeper/io/tests/mod.rs index b651bf347337..40e7d7d89441 100644 --- a/core/lib/zksync_core/src/state_keeper/io/tests/mod.rs +++ b/core/lib/zksync_core/src/state_keeper/io/tests/mod.rs @@ -7,6 +7,7 @@ use zksync_dal::ConnectionPool; use zksync_mempool::L2TxFilter; use zksync_types::{ block::{BlockGasCount, MiniblockHasher}, + fee::TransactionExecutionMetrics, fee_model::{BatchFeeInput, PubdataIndependentBatchFeeModelInput}, tx::ExecutionMetrics, AccountTreeId, Address, L1BatchNumber, MiniblockNumber, ProtocolVersionId, StorageKey, VmEvent, @@ -21,7 +22,7 @@ use crate::{ mempool_actor::l2_tx_filter, tests::{ create_execution_result, create_transaction, create_updates_manager, - default_l1_batch_env, default_vm_block_result, Query, + default_l1_batch_env, default_system_env, default_vm_block_result, Query, }, updates::{MiniblockSealCommand, MiniblockUpdates, UpdatesManager}, }, @@ -379,11 +380,7 @@ async fn test_miniblock_and_l1_batch_processing( .await; let l1_batch_env = default_l1_batch_env(1, 1, Address::random()); - let mut updates = UpdatesManager::new( - l1_batch_env.clone(), - BaseSystemContractsHashes::default(), - ProtocolVersionId::latest(), - ); + let mut updates = UpdatesManager::new(&l1_batch_env, &default_system_env()); let tx = create_transaction(10, 100); updates.extend_from_executed_transaction( @@ -437,8 +434,6 @@ async fn miniblock_and_l1_batch_processing_with_sync_sealer() { test_miniblock_and_l1_batch_processing(pool, 0).await; } -// FIXME (PLA-589): Doesn't work because of missing system contracts, which cannot be added w/o a miniblock -#[ignore] #[tokio::test] async fn miniblock_processing_after_snapshot_recovery() { let connection_pool = ConnectionPool::test_pool().await; @@ -446,8 +441,8 @@ async fn miniblock_processing_after_snapshot_recovery() { let snapshot_recovery = prepare_recovery_snapshot(&mut storage, 23, &[]).await; let tester = Tester::new(); - let (mut mempool, _) = tester - .create_test_mempool_io(connection_pool.clone(), 1) + let (mut mempool, mut mempool_guard) = tester + .create_test_mempool_io(connection_pool.clone(), 0) .await; assert_eq!( mempool.current_miniblock_number(), @@ -459,7 +454,23 @@ async fn miniblock_processing_after_snapshot_recovery() { ); assert!(mempool.load_pending_batch().await.is_none()); - let (_, l1_batch_env) = mempool + // Insert a transaction into the mempool in order to open a new batch. + let tx_filter = l2_tx_filter( + &tester.create_batch_fee_input_provider().await, + ProtocolVersionId::latest().into(), + ) + .await; + let tx = tester.insert_tx( + &mut mempool_guard, + tx_filter.fee_per_gas, + tx_filter.gas_per_pubdata, + ); + storage + .transactions_dal() + .insert_transaction_l2(tx.clone(), TransactionExecutionMetrics::default()) + .await; + + let (system_env, l1_batch_env) = mempool .wait_for_new_batch_params(Duration::from_secs(10)) .await .unwrap(); @@ -473,16 +484,11 @@ async fn miniblock_processing_after_snapshot_recovery() { snapshot_recovery.miniblock_hash ); - let mut updates = UpdatesManager::new( - l1_batch_env, - BaseSystemContractsHashes::default(), - ProtocolVersionId::latest(), - ); + let mut updates = UpdatesManager::new(&l1_batch_env, &system_env); - let tx = create_transaction(10, 100); let tx_hash = tx.hash(); updates.extend_from_executed_transaction( - tx, + tx.into(), create_execution_result(0, []), vec![], BlockGasCount::default(), @@ -515,9 +521,17 @@ async fn miniblock_processing_after_snapshot_recovery() { miniblock_hasher.finalize(ProtocolVersionId::latest()) ); + let miniblock_transactions = storage + .transactions_web3_dal() + .get_raw_miniblock_transactions(persisted_miniblock.number) + .await + .unwrap(); + assert_eq!(miniblock_transactions.len(), 1); + assert_eq!(miniblock_transactions[0].hash(), tx_hash); + // Emulate node restart. let (mut mempool, _) = tester - .create_test_mempool_io(connection_pool.clone(), 1) + .create_test_mempool_io(connection_pool.clone(), 0) .await; assert_eq!( mempool.current_miniblock_number(), @@ -527,21 +541,31 @@ async fn miniblock_processing_after_snapshot_recovery() { mempool.current_l1_batch_number(), snapshot_recovery.l1_batch_number + 1 ); - assert!(mempool.load_pending_batch().await.is_some()); - let (_, l1_batch_env) = mempool - .wait_for_new_batch_params(Duration::from_secs(10)) - .await - .unwrap(); - assert_eq!(l1_batch_env.number, snapshot_recovery.l1_batch_number + 1); + let pending_batch = mempool.load_pending_batch().await.unwrap(); assert_eq!( - l1_batch_env.previous_batch_hash, + pending_batch.l1_batch_env.number, + snapshot_recovery.l1_batch_number + 1 + ); + assert_eq!( + pending_batch.l1_batch_env.previous_batch_hash, Some(snapshot_recovery.l1_batch_root_hash) ); assert_eq!( - l1_batch_env.first_l2_block.prev_block_hash, + pending_batch.l1_batch_env.first_l2_block.prev_block_hash, + snapshot_recovery.miniblock_hash + ); + assert_eq!(pending_batch.pending_miniblocks.len(), 1); + assert_eq!( + pending_batch.pending_miniblocks[0].number, + snapshot_recovery.miniblock_number + 1 + ); + assert_eq!( + pending_batch.pending_miniblocks[0].prev_block_hash, snapshot_recovery.miniblock_hash ); + assert_eq!(pending_batch.pending_miniblocks[0].txs.len(), 1); + assert_eq!(pending_batch.pending_miniblocks[0].txs[0].hash(), tx_hash); } #[tokio::test] diff --git a/core/lib/zksync_core/src/state_keeper/io/tests/tester.rs b/core/lib/zksync_core/src/state_keeper/io/tests/tester.rs index d15907173c35..3d681d065d06 100644 --- a/core/lib/zksync_core/src/state_keeper/io/tests/tester.rs +++ b/core/lib/zksync_core/src/state_keeper/io/tests/tester.rs @@ -12,6 +12,7 @@ use zksync_types::{ block::MiniblockHeader, fee::TransactionExecutionMetrics, fee_model::{BatchFeeInput, FeeModelConfig, FeeModelConfigV1}, + l2::L2Tx, protocol_version::L1VerifierConfig, system_contracts::get_system_smart_contracts, tx::TransactionExecutionResult, @@ -22,7 +23,7 @@ use crate::{ fee_model::MainNodeFeeInputProvider, genesis::create_genesis_l1_batch, l1_gas_price::GasAdjuster, - state_keeper::{io::MiniblockSealer, tests::create_transaction, MempoolGuard, MempoolIO}, + state_keeper::{io::MiniblockSealer, MempoolGuard, MempoolIO}, utils::testonly::{ create_l1_batch, create_l2_transaction, create_miniblock, execute_l2_transaction, }, @@ -213,8 +214,9 @@ impl Tester { guard: &mut MempoolGuard, fee_per_gas: u64, gas_per_pubdata: u32, - ) { - let tx = create_transaction(fee_per_gas, gas_per_pubdata); - guard.insert(vec![tx], Default::default()); + ) -> L2Tx { + let tx = create_l2_transaction(fee_per_gas, gas_per_pubdata); + guard.insert(vec![tx.clone().into()], Default::default()); + tx } } diff --git a/core/lib/zksync_core/src/state_keeper/keeper.rs b/core/lib/zksync_core/src/state_keeper/keeper.rs index 70d100dc599f..4bd0857337b4 100644 --- a/core/lib/zksync_core/src/state_keeper/keeper.rs +++ b/core/lib/zksync_core/src/state_keeper/keeper.rs @@ -128,11 +128,7 @@ impl ZkSyncStateKeeper { }; let protocol_version = system_env.version; - let mut updates_manager = UpdatesManager::new( - l1_batch_env.clone(), - system_env.base_system_smart_contracts.hashes(), - protocol_version, - ); + let mut updates_manager = UpdatesManager::new(&l1_batch_env, &system_env); let previous_batch_protocol_version = self .io @@ -211,11 +207,7 @@ impl ZkSyncStateKeeper { // Start the new batch. (system_env, l1_batch_env) = self.wait_for_new_batch_params().await?; - updates_manager = UpdatesManager::new( - l1_batch_env.clone(), - system_env.base_system_smart_contracts.hashes(), - system_env.version, - ); + updates_manager = UpdatesManager::new(&l1_batch_env, &system_env); batch_executor = self .batch_executor_base .init_batch( diff --git a/core/lib/zksync_core/src/state_keeper/tests/mod.rs b/core/lib/zksync_core/src/state_keeper/tests/mod.rs index d3cf1335bdc5..2fccf1c7d793 100644 --- a/core/lib/zksync_core/src/state_keeper/tests/mod.rs +++ b/core/lib/zksync_core/src/state_keeper/tests/mod.rs @@ -15,7 +15,7 @@ use multivm::{ }; use once_cell::sync::Lazy; use zksync_config::configs::chain::StateKeeperConfig; -use zksync_contracts::{BaseSystemContracts, BaseSystemContractsHashes}; +use zksync_contracts::BaseSystemContracts; use zksync_system_constants::ZKPORTER_IS_AVAILABLE; use zksync_types::{ aggregated_operations::AggregatedActionType, @@ -115,11 +115,7 @@ pub(super) fn default_vm_block_result() -> FinishedL1Batch { pub(super) fn create_updates_manager() -> UpdatesManager { let l1_batch_env = default_l1_batch_env(1, 1, Address::default()); - UpdatesManager::new( - l1_batch_env, - BaseSystemContractsHashes::default(), - ProtocolVersionId::latest(), - ) + UpdatesManager::new(&l1_batch_env, &default_system_env()) } pub(super) fn create_transaction(fee_per_gas: u64, gas_per_pubdata: u32) -> Transaction { diff --git a/core/lib/zksync_core/src/state_keeper/updates/mod.rs b/core/lib/zksync_core/src/state_keeper/updates/mod.rs index 7718882af283..f6a9f9b15f0b 100644 --- a/core/lib/zksync_core/src/state_keeper/updates/mod.rs +++ b/core/lib/zksync_core/src/state_keeper/updates/mod.rs @@ -1,5 +1,5 @@ use multivm::{ - interface::{L1BatchEnv, VmExecutionResultAndLogs}, + interface::{L1BatchEnv, SystemEnv, VmExecutionResultAndLogs}, utils::get_batch_base_fee, }; use zksync_contracts::BaseSystemContractsHashes; @@ -36,17 +36,14 @@ pub struct UpdatesManager { } impl UpdatesManager { - pub(crate) fn new( - l1_batch_env: L1BatchEnv, - base_system_contract_hashes: BaseSystemContractsHashes, - protocol_version: ProtocolVersionId, - ) -> Self { + pub(crate) fn new(l1_batch_env: &L1BatchEnv, system_env: &SystemEnv) -> Self { + let protocol_version = system_env.version; Self { batch_timestamp: l1_batch_env.timestamp, batch_fee_input: l1_batch_env.fee_input, - base_fee_per_gas: get_batch_base_fee(&l1_batch_env, protocol_version.into()), + base_fee_per_gas: get_batch_base_fee(l1_batch_env, protocol_version.into()), protocol_version, - base_system_contract_hashes, + base_system_contract_hashes: system_env.base_system_smart_contracts.hashes(), l1_batch: L1BatchUpdates::new(), miniblock: MiniblockUpdates::new( l1_batch_env.first_l2_block.timestamp, diff --git a/core/lib/zksync_core/src/utils/testonly.rs b/core/lib/zksync_core/src/utils/testonly.rs index cb5c4bbbe335..64a893eb9906 100644 --- a/core/lib/zksync_core/src/utils/testonly.rs +++ b/core/lib/zksync_core/src/utils/testonly.rs @@ -134,17 +134,19 @@ pub(crate) async fn prepare_recovery_snapshot( .collect(); let l1_batch_root_hash = ZkSyncTree::process_genesis_batch(&tree_instructions).root_hash; - storage - .protocol_versions_dal() - .save_protocol_version_with_tx(ProtocolVersion::default()) - .await; - let miniblock = create_miniblock(l1_batch_number); let l1_batch = create_l1_batch(l1_batch_number); // Miniblock and L1 batch are intentionally **not** inserted into the storage. // Store factory deps for the base system contracts. let contracts = GenesisParams::mock().base_system_contracts; + storage + .protocol_versions_dal() + .save_protocol_version_with_tx(ProtocolVersion { + base_system_contracts_hashes: contracts.hashes(), + ..ProtocolVersion::default() + }) + .await; let factory_deps = HashMap::from([ ( contracts.bootloader.hash, From 8f8de9b8d265cd68d673ef5eccbbe3d359aa8d7a Mon Sep 17 00:00:00 2001 From: Alex Ostrovski Date: Fri, 26 Jan 2024 13:01:55 +0200 Subject: [PATCH 14/27] Fix `sync_block()` DB query --- ...bdbe73cb6fd88165b4d75edb4e37603ccbff.json} | 4 +- core/lib/dal/src/blocks_web3_dal.rs | 17 ++----- core/lib/dal/src/storage_web3_dal.rs | 17 ++----- core/lib/dal/src/sync_dal.rs | 50 ++++++++++++++++++- core/lib/dal/src/tests/mod.rs | 18 ++++++- 5 files changed, 77 insertions(+), 29 deletions(-) rename core/lib/dal/.sqlx/{query-5880a85667ccc26d392ff6272e317afe4e38bcfe5ce93bf229d68622066ab8a1.json => query-074fa007cca1761ed99f139e614ebdbe73cb6fd88165b4d75edb4e37603ccbff.json} (59%) diff --git a/core/lib/dal/.sqlx/query-5880a85667ccc26d392ff6272e317afe4e38bcfe5ce93bf229d68622066ab8a1.json b/core/lib/dal/.sqlx/query-074fa007cca1761ed99f139e614ebdbe73cb6fd88165b4d75edb4e37603ccbff.json similarity index 59% rename from core/lib/dal/.sqlx/query-5880a85667ccc26d392ff6272e317afe4e38bcfe5ce93bf229d68622066ab8a1.json rename to core/lib/dal/.sqlx/query-074fa007cca1761ed99f139e614ebdbe73cb6fd88165b4d75edb4e37603ccbff.json index 31ce6f31993f..a121f197d0a6 100644 --- a/core/lib/dal/.sqlx/query-5880a85667ccc26d392ff6272e317afe4e38bcfe5ce93bf229d68622066ab8a1.json +++ b/core/lib/dal/.sqlx/query-074fa007cca1761ed99f139e614ebdbe73cb6fd88165b4d75edb4e37603ccbff.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n miniblocks.number,\n COALESCE(\n miniblocks.l1_batch_number,\n (\n SELECT\n (MAX(number) + 1)\n FROM\n l1_batches\n )\n ) AS \"l1_batch_number!\",\n (\n SELECT\n MAX(m2.number)\n FROM\n miniblocks m2\n WHERE\n miniblocks.l1_batch_number = m2.l1_batch_number\n ) AS \"last_batch_miniblock?\",\n miniblocks.timestamp,\n miniblocks.l1_gas_price,\n miniblocks.l2_fair_gas_price,\n miniblocks.fair_pubdata_price,\n miniblocks.bootloader_code_hash,\n miniblocks.default_aa_code_hash,\n miniblocks.virtual_blocks,\n miniblocks.hash,\n miniblocks.protocol_version AS \"protocol_version!\",\n l1_batches.fee_account_address AS \"fee_account_address?\"\n FROM\n miniblocks\n LEFT JOIN l1_batches ON miniblocks.l1_batch_number = l1_batches.number\n WHERE\n miniblocks.number = $1\n ", + "query": "\n SELECT\n miniblocks.number,\n COALESCE(\n miniblocks.l1_batch_number,\n (\n SELECT\n (MAX(number) + 1)\n FROM\n l1_batches\n ),\n (\n SELECT\n MAX(l1_batch_number) + 1\n FROM\n snapshot_recovery\n )\n ) AS \"l1_batch_number!\",\n (\n SELECT\n MAX(m2.number)\n FROM\n miniblocks m2\n WHERE\n miniblocks.l1_batch_number = m2.l1_batch_number\n ) AS \"last_batch_miniblock?\",\n miniblocks.timestamp,\n miniblocks.l1_gas_price,\n miniblocks.l2_fair_gas_price,\n miniblocks.fair_pubdata_price,\n miniblocks.bootloader_code_hash,\n miniblocks.default_aa_code_hash,\n miniblocks.virtual_blocks,\n miniblocks.hash,\n miniblocks.protocol_version AS \"protocol_version!\",\n l1_batches.fee_account_address AS \"fee_account_address?\"\n FROM\n miniblocks\n LEFT JOIN l1_batches ON miniblocks.l1_batch_number = l1_batches.number\n WHERE\n miniblocks.number = $1\n ", "describe": { "columns": [ { @@ -90,5 +90,5 @@ false ] }, - "hash": "5880a85667ccc26d392ff6272e317afe4e38bcfe5ce93bf229d68622066ab8a1" + "hash": "074fa007cca1761ed99f139e614ebdbe73cb6fd88165b4d75edb4e37603ccbff" } diff --git a/core/lib/dal/src/blocks_web3_dal.rs b/core/lib/dal/src/blocks_web3_dal.rs index bb91f4129d08..ff7e110dedd6 100644 --- a/core/lib/dal/src/blocks_web3_dal.rs +++ b/core/lib/dal/src/blocks_web3_dal.rs @@ -597,13 +597,15 @@ mod tests { use zksync_types::{ block::{MiniblockHasher, MiniblockHeader}, fee::TransactionExecutionMetrics, - snapshots::SnapshotRecoveryStatus, MiniblockNumber, ProtocolVersion, ProtocolVersionId, }; use super::*; use crate::{ - tests::{create_miniblock_header, mock_execution_result, mock_l2_transaction}, + tests::{ + create_miniblock_header, create_snapshot_recovery, mock_execution_result, + mock_l2_transaction, + }, ConnectionPool, }; @@ -762,16 +764,7 @@ mod tests { async fn resolving_pending_block_id_for_snapshot_recovery() { let connection_pool = ConnectionPool::test_pool().await; let mut conn = connection_pool.access_storage().await.unwrap(); - let snapshot_recovery = SnapshotRecoveryStatus { - l1_batch_number: L1BatchNumber(23), - l1_batch_timestamp: 23, - l1_batch_root_hash: H256::zero(), - miniblock_number: MiniblockNumber(42), - miniblock_timestamp: 42, - miniblock_hash: H256::zero(), - protocol_version: ProtocolVersionId::latest(), - storage_logs_chunks_processed: vec![true; 100], - }; + let snapshot_recovery = create_snapshot_recovery(); conn.snapshot_recovery_dal() .insert_initial_recovery_status(&snapshot_recovery) .await diff --git a/core/lib/dal/src/storage_web3_dal.rs b/core/lib/dal/src/storage_web3_dal.rs index ae8e4c5b7505..e5b83169c9e1 100644 --- a/core/lib/dal/src/storage_web3_dal.rs +++ b/core/lib/dal/src/storage_web3_dal.rs @@ -259,12 +259,14 @@ impl StorageWeb3Dal<'_, '_> { mod tests { use zksync_types::{ block::{BlockGasCount, L1BatchHeader}, - snapshots::SnapshotRecoveryStatus, ProtocolVersion, ProtocolVersionId, }; use super::*; - use crate::{tests::create_miniblock_header, ConnectionPool}; + use crate::{ + tests::{create_miniblock_header, create_snapshot_recovery}, + ConnectionPool, + }; #[tokio::test] async fn resolving_l1_batch_number_of_miniblock() { @@ -341,16 +343,7 @@ mod tests { conn.protocol_versions_dal() .save_protocol_version_with_tx(ProtocolVersion::default()) .await; - let snapshot_recovery = SnapshotRecoveryStatus { - l1_batch_number: L1BatchNumber(23), - l1_batch_timestamp: 23, - l1_batch_root_hash: H256::zero(), - miniblock_number: MiniblockNumber(42), - miniblock_timestamp: 42, - miniblock_hash: H256::zero(), - protocol_version: ProtocolVersionId::latest(), - storage_logs_chunks_processed: vec![true; 100], - }; + let snapshot_recovery = create_snapshot_recovery(); conn.snapshot_recovery_dal() .insert_initial_recovery_status(&snapshot_recovery) .await diff --git a/core/lib/dal/src/sync_dal.rs b/core/lib/dal/src/sync_dal.rs index 53310603d2ce..2c158590044e 100644 --- a/core/lib/dal/src/sync_dal.rs +++ b/core/lib/dal/src/sync_dal.rs @@ -30,6 +30,12 @@ impl SyncDal<'_, '_> { (MAX(number) + 1) FROM l1_batches + ), + ( + SELECT + MAX(l1_batch_number) + 1 + FROM + snapshot_recovery ) ) AS "l1_batch_number!", ( @@ -102,7 +108,10 @@ mod tests { use super::*; use crate::{ - tests::{create_miniblock_header, mock_execution_result, mock_l2_transaction}, + tests::{ + create_miniblock_header, create_snapshot_recovery, mock_execution_result, + mock_l2_transaction, + }, ConnectionPool, }; @@ -220,4 +229,43 @@ mod tests { assert!(block.last_in_batch); assert_eq!(block.operator_address, l1_batch_header.fee_account_address); } + + #[tokio::test] + async fn sync_block_after_snapshot_recovery() { + let pool = ConnectionPool::test_pool().await; + let mut conn = pool.access_storage().await.unwrap(); + + // Simulate snapshot recovery. + conn.protocol_versions_dal() + .save_protocol_version_with_tx(ProtocolVersion::default()) + .await; + let snapshot_recovery = create_snapshot_recovery(); + conn.snapshot_recovery_dal() + .insert_initial_recovery_status(&snapshot_recovery) + .await + .unwrap(); + + assert!(conn + .sync_dal() + .sync_block(snapshot_recovery.miniblock_number, Address::zero(), false) + .await + .unwrap() + .is_none()); + + let miniblock_header = create_miniblock_header(snapshot_recovery.miniblock_number.0 + 1); + conn.blocks_dal() + .insert_miniblock(&miniblock_header) + .await + .unwrap(); + + let block = conn + .sync_dal() + .sync_block(miniblock_header.number, Address::zero(), false) + .await + .unwrap() + .expect("No new miniblock"); + assert_eq!(block.number, miniblock_header.number); + assert_eq!(block.timestamp, miniblock_header.timestamp); + assert_eq!(block.l1_batch_number, snapshot_recovery.l1_batch_number + 1); + } } diff --git a/core/lib/dal/src/tests/mod.rs b/core/lib/dal/src/tests/mod.rs index 5b285ff04f8b..393288558b07 100644 --- a/core/lib/dal/src/tests/mod.rs +++ b/core/lib/dal/src/tests/mod.rs @@ -8,9 +8,10 @@ use zksync_types::{ helpers::unix_timestamp_ms, l1::{L1Tx, OpProcessingType, PriorityQueueType}, l2::L2Tx, + snapshots::SnapshotRecoveryStatus, tx::{tx_execution_info::TxExecutionStatus, ExecutionMetrics, TransactionExecutionResult}, - Address, Execute, L1BlockNumber, L1TxCommonData, L2ChainId, MiniblockNumber, PriorityOpId, - ProtocolVersionId, H160, H256, U256, + Address, Execute, L1BatchNumber, L1BlockNumber, L1TxCommonData, L2ChainId, MiniblockNumber, + PriorityOpId, ProtocolVersionId, H160, H256, U256, }; use crate::{ @@ -117,6 +118,19 @@ pub(crate) fn mock_execution_result(transaction: L2Tx) -> TransactionExecutionRe } } +pub(crate) fn create_snapshot_recovery() -> SnapshotRecoveryStatus { + SnapshotRecoveryStatus { + l1_batch_number: L1BatchNumber(23), + l1_batch_timestamp: 23, + l1_batch_root_hash: H256::zero(), + miniblock_number: MiniblockNumber(42), + miniblock_timestamp: 42, + miniblock_hash: H256::zero(), + protocol_version: ProtocolVersionId::latest(), + storage_logs_chunks_processed: vec![true; 100], + } +} + #[tokio::test] async fn workflow_with_submit_tx_equal_hashes() { let connection_pool = ConnectionPool::test_pool().await; From 2bf0b58f1583cad8ccfac602f5ab32a3f6600277 Mon Sep 17 00:00:00 2001 From: Alex Ostrovski Date: Fri, 26 Jan 2024 13:02:22 +0200 Subject: [PATCH 15/27] Remove unused file --- .../src/sync_layer/gossip/conversions.rs | 38 ------------------- 1 file changed, 38 deletions(-) delete mode 100644 core/lib/zksync_core/src/sync_layer/gossip/conversions.rs diff --git a/core/lib/zksync_core/src/sync_layer/gossip/conversions.rs b/core/lib/zksync_core/src/sync_layer/gossip/conversions.rs deleted file mode 100644 index de9f00093fa9..000000000000 --- a/core/lib/zksync_core/src/sync_layer/gossip/conversions.rs +++ /dev/null @@ -1,38 +0,0 @@ -//! Conversion logic between server and consensus types. -use anyhow::Context as _; -use zksync_consensus_roles::validator::FinalBlock; -use zksync_dal::blocks_dal::ConsensusBlockFields; -use zksync_types::MiniblockNumber; - -use crate::{consensus, sync_layer::fetcher::FetchedBlock}; - -impl FetchedBlock { - pub(super) fn from_gossip_block( - block: &FinalBlock, - last_in_batch: bool, - ) -> anyhow::Result { - let number = u32::try_from(block.header.number.0) - .context("Integer overflow converting block number")?; - let payload = consensus::Payload::decode(&block.payload) - .context("Failed deserializing block payload")?; - - Ok(Self { - number: MiniblockNumber(number), - l1_batch_number: payload.l1_batch_number, - last_in_batch, - protocol_version: payload.protocol_version, - timestamp: payload.timestamp, - reference_hash: Some(payload.hash), - l1_gas_price: payload.l1_gas_price, - l2_fair_gas_price: payload.l2_fair_gas_price, - fair_pubdata_price: payload.fair_pubdata_price, - virtual_blocks: payload.virtual_blocks, - operator_address: payload.operator_address, - transactions: payload.transactions, - consensus: Some(ConsensusBlockFields { - parent: block.header.parent, - justification: block.justification.clone(), - }), - }) - } -} From bc6de9715f55d21636a624ec50bcb4436fe50c66 Mon Sep 17 00:00:00 2001 From: Alex Ostrovski Date: Fri, 26 Jan 2024 13:03:35 +0200 Subject: [PATCH 16/27] Fix fee account retrieval after snapshot recovery --- .../zksync_core/src/sync_layer/external_io.rs | 43 +++++++++++++------ 1 file changed, 30 insertions(+), 13 deletions(-) diff --git a/core/lib/zksync_core/src/sync_layer/external_io.rs b/core/lib/zksync_core/src/sync_layer/external_io.rs index 458d2ef6fe40..7cf3f719f032 100644 --- a/core/lib/zksync_core/src/sync_layer/external_io.rs +++ b/core/lib/zksync_core/src/sync_layer/external_io.rs @@ -250,19 +250,6 @@ impl StateKeeperIO for ExternalIO { async fn load_pending_batch(&mut self) -> Option { let mut storage = self.pool.access_storage_tagged("sync_layer").await.unwrap(); - // TODO (BFT-99): Do not assume that fee account is the same as in previous batch. - let fee_account = storage - .blocks_dal() - .get_l1_batch_header(self.current_l1_batch_number - 1) - .await - .unwrap() - .unwrap_or_else(|| { - panic!( - "No block header for batch {}", - self.current_l1_batch_number - 1 - ) - }) - .fee_account_address; let mut pending_miniblock_header = self .l1_batch_params_provider .load_first_miniblock_in_batch(&mut storage, self.current_l1_batch_number) @@ -293,6 +280,36 @@ impl StateKeeperIO for ExternalIO { pending_miniblock_header.set_protocol_version(sync_block.protocol_version); } + // TODO: this workaround won't be necessary once fee address is moved to miniblock entity. + let fee_account = storage + .blocks_dal() + .get_fee_address_for_l1_batch(self.current_l1_batch_number - 1) + .await + .expect("Failed getting fee address for previous L1 batch"); + let fee_account = match fee_account { + Some(account) => account, + None => { + let last_miniblock_number_in_prev_batch = pending_miniblock_header.number() - 1; + tracing::info!( + "Fee address for L1 batch #{} is not available locally; fetching from main node \ + (miniblock #{last_miniblock_number_in_prev_batch})", + self.current_l1_batch_number - 1 + ); + let prev_block = self + .main_node_client + .fetch_l2_block(last_miniblock_number_in_prev_batch, false) + .await + .unwrap() + .context("failed fetching block in previous L1 batch from main node") + .unwrap(); + assert_eq!( + prev_block.l1_batch_number, self.current_l1_batch_number - 1, + "Miniblock {prev_block:?} fetched to fill fee address has unexpected L1 batch number" + ); + prev_block.operator_address + } + }; + let (system_env, l1_batch_env) = self .l1_batch_params_provider .load_l1_batch_params( From a2375f7797ddaa8d4c62ca66c5b16ff1cba190b7 Mon Sep 17 00:00:00 2001 From: Alex Ostrovski Date: Fri, 26 Jan 2024 13:03:57 +0200 Subject: [PATCH 17/27] Test `ExternalIO` after snapshot recovery --- .../lib/zksync_core/src/consensus/testonly.rs | 36 +++- .../zksync_core/src/sync_layer/external_io.rs | 4 +- core/lib/zksync_core/src/sync_layer/tests.rs | 191 ++++++++++++------ 3 files changed, 167 insertions(+), 64 deletions(-) diff --git a/core/lib/zksync_core/src/consensus/testonly.rs b/core/lib/zksync_core/src/consensus/testonly.rs index d6444dcd32f7..8e63b1048bd4 100644 --- a/core/lib/zksync_core/src/consensus/testonly.rs +++ b/core/lib/zksync_core/src/consensus/testonly.rs @@ -1,4 +1,5 @@ //! Utilities for testing the consensus module. + use anyhow::Context as _; use rand::Rng; use zksync_concurrency::{ctx, error::Wrap as _, scope, sync, time}; @@ -6,8 +7,8 @@ use zksync_consensus_roles::validator; use zksync_contracts::{BaseSystemContractsHashes, SystemContractCode}; use zksync_dal::ConnectionPool; use zksync_types::{ - api, block::MiniblockHasher, Address, L1BatchNumber, L2ChainId, MiniblockNumber, - ProtocolVersionId, H256, + api, block::MiniblockHasher, snapshots::SnapshotRecoveryStatus, Address, L1BatchNumber, + L2ChainId, MiniblockNumber, ProtocolVersionId, H256, }; use crate::{ @@ -31,9 +32,35 @@ use crate::{ pub(crate) struct MockMainNodeClient { prev_miniblock_hash: H256, l2_blocks: Vec, + block_number_offset: u32, } impl MockMainNodeClient { + pub fn for_snapshot_recovery(snapshot: &SnapshotRecoveryStatus) -> Self { + // This block may be requested during node initialization + let last_miniblock_in_snapshot_batch = api::en::SyncBlock { + number: snapshot.miniblock_number, + l1_batch_number: snapshot.l1_batch_number, + last_in_batch: true, + timestamp: snapshot.miniblock_timestamp, + l1_gas_price: 2, + l2_fair_gas_price: 3, + fair_pubdata_price: Some(24), + base_system_contracts_hashes: BaseSystemContractsHashes::default(), + operator_address: Address::repeat_byte(2), + transactions: Some(vec![]), + virtual_blocks: Some(0), + hash: Some(snapshot.miniblock_hash), + protocol_version: ProtocolVersionId::latest(), + }; + + Self { + prev_miniblock_hash: snapshot.miniblock_hash, + l2_blocks: vec![last_miniblock_in_snapshot_batch], + block_number_offset: snapshot.miniblock_number.0, + } + } + /// `miniblock_count` doesn't include a fictive miniblock. Returns hashes of generated transactions. pub fn push_l1_batch(&mut self, miniblock_count: u32) -> Vec { let l1_batch_number = self @@ -129,7 +156,10 @@ impl MainNodeClient for MockMainNodeClient { number: MiniblockNumber, with_transactions: bool, ) -> anyhow::Result> { - let Some(mut block) = self.l2_blocks.get(number.0 as usize).cloned() else { + let Some(block_index) = number.0.checked_sub(self.block_number_offset) else { + return Ok(None); + }; + let Some(mut block) = self.l2_blocks.get(block_index as usize).cloned() else { return Ok(None); }; if !with_transactions { diff --git a/core/lib/zksync_core/src/sync_layer/external_io.rs b/core/lib/zksync_core/src/sync_layer/external_io.rs index 7cf3f719f032..66c22020ac69 100644 --- a/core/lib/zksync_core/src/sync_layer/external_io.rs +++ b/core/lib/zksync_core/src/sync_layer/external_io.rs @@ -280,7 +280,7 @@ impl StateKeeperIO for ExternalIO { pending_miniblock_header.set_protocol_version(sync_block.protocol_version); } - // TODO: this workaround won't be necessary once fee address is moved to miniblock entity. + // TODO (PLA-674): this workaround won't be necessary once fee address is moved to miniblock entity. let fee_account = storage .blocks_dal() .get_fee_address_for_l1_batch(self.current_l1_batch_number - 1) @@ -292,7 +292,7 @@ impl StateKeeperIO for ExternalIO { let last_miniblock_number_in_prev_batch = pending_miniblock_header.number() - 1; tracing::info!( "Fee address for L1 batch #{} is not available locally; fetching from main node \ - (miniblock #{last_miniblock_number_in_prev_batch})", + (in miniblock #{last_miniblock_number_in_prev_batch})", self.current_l1_batch_number - 1 ); let prev_block = self diff --git a/core/lib/zksync_core/src/sync_layer/tests.rs b/core/lib/zksync_core/src/sync_layer/tests.rs index 618518dd5685..64d0bfbc6726 100644 --- a/core/lib/zksync_core/src/sync_layer/tests.rs +++ b/core/lib/zksync_core/src/sync_layer/tests.rs @@ -6,11 +6,14 @@ use std::{ time::{Duration, Instant}, }; +use test_casing::test_casing; use tokio::{sync::watch, task::JoinHandle}; use zksync_config::configs::chain::NetworkConfig; use zksync_dal::{ConnectionPool, StorageProcessor}; use zksync_types::{ + block::MiniblockHasher, fee_model::{BatchFeeInput, PubdataIndependentBatchFeeModelInput}, + snapshots::SnapshotRecoveryStatus, Address, L1BatchNumber, L2ChainId, MiniblockNumber, ProtocolVersionId, Transaction, H256, }; @@ -24,7 +27,7 @@ use crate::{ MiniblockSealer, ZkSyncStateKeeper, }, sync_layer::{client::CachingMainNodeClient, fetcher::MainNodeFetcher}, - utils::testonly::{create_l1_batch_metadata, create_l2_transaction}, + utils::testonly::{create_l1_batch_metadata, create_l2_transaction, prepare_recovery_snapshot}, }; const TEST_TIMEOUT: Duration = Duration::from_secs(10); @@ -53,12 +56,15 @@ pub(super) struct StateKeeperHandles { impl StateKeeperHandles { /// `tx_hashes` are grouped by the L1 batch. - pub async fn new(pool: ConnectionPool, actions: ActionQueue, tx_hashes: &[&[H256]]) -> Self { + pub async fn new( + pool: ConnectionPool, + main_node_client: MockMainNodeClient, + actions: ActionQueue, + tx_hashes: &[&[H256]], + ) -> Self { assert!(!tx_hashes.is_empty()); assert!(tx_hashes.iter().all(|tx_hashes| !tx_hashes.is_empty())); - ensure_genesis(&mut pool.access_storage().await.unwrap()).await; - let sync_state = SyncState::new(); let (miniblock_sealer, miniblock_sealer_handle) = MiniblockSealer::new(pool.clone(), 5); tokio::spawn(miniblock_sealer.run()); @@ -68,7 +74,7 @@ impl StateKeeperHandles { pool, actions, sync_state.clone(), - Box::::default(), + Box::new(main_node_client), OPERATOR_ADDRESS, u32::MAX, L2ChainId::default(), @@ -142,33 +148,65 @@ fn extract_tx_hashes<'a>(actions: impl IntoIterator) -> V .collect() } +/// Returns a mock snapshot recovery status equivalent to "recovering" from the genesis block. +fn genesis_snapshot_recovery_status() -> SnapshotRecoveryStatus { + SnapshotRecoveryStatus { + l1_batch_number: L1BatchNumber(0), + l1_batch_root_hash: H256::zero(), // unused + l1_batch_timestamp: 0, + miniblock_number: MiniblockNumber(0), + miniblock_hash: MiniblockHasher::legacy_hash(MiniblockNumber(0)), + miniblock_timestamp: 0, + protocol_version: ProtocolVersionId::default(), + storage_logs_chunks_processed: vec![], + } +} + +#[test_casing(2, [false, true])] #[tokio::test] -async fn external_io_basics() { +async fn external_io_basics(snapshot_recovery: bool) { let pool = ConnectionPool::test_pool().await; - let open_l1_batch = open_l1_batch(1, 1, 1); + let mut storage = pool.access_storage().await.unwrap(); + let snapshot = if snapshot_recovery { + prepare_recovery_snapshot(&mut storage, 23, &[]).await + } else { + ensure_genesis(&mut storage).await; + genesis_snapshot_recovery_status() + }; + + let open_l1_batch = open_l1_batch( + snapshot.l1_batch_number.0 + 1, + snapshot.miniblock_timestamp + 1, + snapshot.miniblock_number.0 + 1, + ); let tx = create_l2_transaction(10, 100); let tx_hash = tx.hash(); let tx = SyncAction::Tx(Box::new(tx.into())); let actions = vec![open_l1_batch, tx, SyncAction::SealMiniblock]; let (actions_sender, action_queue) = ActionQueue::new(); - let state_keeper = - StateKeeperHandles::new(pool.clone(), action_queue, &[&extract_tx_hashes(&actions)]).await; + let client = MockMainNodeClient::default(); + let state_keeper = StateKeeperHandles::new( + pool.clone(), + client, + action_queue, + &[&extract_tx_hashes(&actions)], + ) + .await; actions_sender.push_actions(actions).await; // Wait until the miniblock is sealed. state_keeper - .wait(|state| state.get_local_block() == MiniblockNumber(1)) + .wait(|state| state.get_local_block() == snapshot.miniblock_number + 1) .await; // Check that the miniblock is persisted. - let mut storage = pool.access_storage().await.unwrap(); let miniblock = storage .blocks_dal() - .get_miniblock_header(MiniblockNumber(1)) + .get_miniblock_header(snapshot.miniblock_number + 1) .await .unwrap() - .expect("Miniblock #1 is not persisted"); - assert_eq!(miniblock.timestamp, 1); + .expect("New miniblock is not persisted"); + assert_eq!(miniblock.timestamp, snapshot.miniblock_timestamp + 1); let expected_fee_input = BatchFeeInput::PubdataIndependent(PubdataIndependentBatchFeeModelInput { @@ -187,12 +225,31 @@ async fn external_io_basics() { .await .unwrap() .expect("Transaction not persisted"); - assert_eq!(tx_receipt.block_number, 1.into()); + assert_eq!( + tx_receipt.block_number, + (snapshot.miniblock_number.0 + 1).into() + ); assert_eq!(tx_receipt.transaction_index, 0.into()); } -pub(super) async fn run_state_keeper_with_multiple_miniblocks(pool: ConnectionPool) -> Vec { - let open_l1_batch = open_l1_batch(1, 1, 1); +pub(super) async fn run_state_keeper_with_multiple_miniblocks( + pool: ConnectionPool, + snapshot_recovery: bool, +) -> (SnapshotRecoveryStatus, Vec) { + let mut storage = pool.access_storage().await.unwrap(); + let snapshot = if snapshot_recovery { + prepare_recovery_snapshot(&mut storage, 23, &[]).await + } else { + ensure_genesis(&mut storage).await; + genesis_snapshot_recovery_status() + }; + drop(storage); + + let open_l1_batch = open_l1_batch( + snapshot.l1_batch_number.0 + 1, + snapshot.miniblock_timestamp + 1, + snapshot.miniblock_number.0 + 1, + ); let txs = (0..5).map(|_| { let tx = create_l2_transaction(10, 100); SyncAction::Tx(Box::new(tx.into())) @@ -203,8 +260,8 @@ pub(super) async fn run_state_keeper_with_multiple_miniblocks(pool: ConnectionPo .collect(); let open_miniblock = SyncAction::Miniblock { - number: MiniblockNumber(2), - timestamp: 2, + number: snapshot.miniblock_number + 2, + timestamp: snapshot.miniblock_timestamp + 2, virtual_blocks: 1, }; let more_txs = (0..3).map(|_| { @@ -222,41 +279,47 @@ pub(super) async fn run_state_keeper_with_multiple_miniblocks(pool: ConnectionPo .chain(&second_miniblock_actions), ); let (actions_sender, action_queue) = ActionQueue::new(); - let state_keeper = StateKeeperHandles::new(pool, action_queue, &[&tx_hashes]).await; + let client = MockMainNodeClient::default(); + let state_keeper = StateKeeperHandles::new(pool, client, action_queue, &[&tx_hashes]).await; actions_sender.push_actions(first_miniblock_actions).await; actions_sender.push_actions(second_miniblock_actions).await; // Wait until both miniblocks are sealed. state_keeper - .wait(|state| state.get_local_block() == MiniblockNumber(2)) + .wait(|state| state.get_local_block() == snapshot.miniblock_number + 2) .await; - tx_hashes + (snapshot, tx_hashes) } +#[test_casing(2, [false, true])] #[tokio::test] -async fn external_io_with_multiple_miniblocks() { +async fn external_io_with_multiple_miniblocks(snapshot_recovery: bool) { let pool = ConnectionPool::test_pool().await; - let tx_hashes = run_state_keeper_with_multiple_miniblocks(pool.clone()).await; + let (snapshot, tx_hashes) = + run_state_keeper_with_multiple_miniblocks(pool.clone(), snapshot_recovery).await; assert_eq!(tx_hashes.len(), 8); // Check that both miniblocks are persisted. - let tx_hashes_by_miniblock = [(1, &tx_hashes[..5]), (2, &tx_hashes[5..])]; + let tx_hashes_by_miniblock = [ + (snapshot.miniblock_number + 1, &tx_hashes[..5]), + (snapshot.miniblock_number + 2, &tx_hashes[5..]), + ]; let mut storage = pool.access_storage().await.unwrap(); for (number, expected_tx_hashes) in tx_hashes_by_miniblock { let miniblock = storage .blocks_dal() - .get_miniblock_header(MiniblockNumber(number)) + .get_miniblock_header(number) .await .unwrap() - .unwrap_or_else(|| panic!("Miniblock #{} is not persisted", number)); + .unwrap_or_else(|| panic!("Miniblock #{number} is not persisted")); assert_eq!(miniblock.l2_tx_count, expected_tx_hashes.len() as u16); - assert_eq!(miniblock.timestamp, u64::from(number)); + assert_eq!(miniblock.timestamp, u64::from(number.0)); let sync_block = storage .sync_dal() - .sync_block(MiniblockNumber(number), OPERATOR_ADDRESS, true) + .sync_block(number, OPERATOR_ADDRESS, true) .await .unwrap() - .unwrap_or_else(|| panic!("Sync block #{} is not persisted", number)); + .unwrap_or_else(|| panic!("Sync block #{number} is not persisted")); let transactions = sync_block.transactions.unwrap(); assert_eq!(transactions.len(), expected_tx_hashes.len()); @@ -265,43 +328,54 @@ async fn external_io_with_multiple_miniblocks() { } drop(storage); - test_external_io_recovery(pool, tx_hashes).await; + test_external_io_recovery(pool, &snapshot, tx_hashes).await; } -async fn test_external_io_recovery(pool: ConnectionPool, mut tx_hashes: Vec) { +async fn test_external_io_recovery( + pool: ConnectionPool, + snapshot: &SnapshotRecoveryStatus, + mut tx_hashes: Vec, +) { let new_tx = create_l2_transaction(10, 100); tx_hashes.push(new_tx.hash()); let new_tx = SyncAction::Tx(Box::new(new_tx.into())); let (actions_sender, action_queue) = ActionQueue::new(); - let state_keeper = StateKeeperHandles::new(pool.clone(), action_queue, &[&tx_hashes]).await; + let client = if snapshot.l1_batch_number > L1BatchNumber(0) { + MockMainNodeClient::for_snapshot_recovery(snapshot) + } else { + MockMainNodeClient::default() + }; + + let state_keeper = + StateKeeperHandles::new(pool.clone(), client, action_queue, &[&tx_hashes]).await; // Check that the state keeper state is restored. assert_eq!( state_keeper.sync_state.get_local_block(), - MiniblockNumber(2) + snapshot.miniblock_number + 2 ); // Send new actions and wait until the new miniblock is sealed. let open_miniblock = SyncAction::Miniblock { - number: MiniblockNumber(3), - timestamp: 3, + number: snapshot.miniblock_number + 3, + timestamp: snapshot.miniblock_timestamp + 3, virtual_blocks: 1, }; let actions = vec![open_miniblock, new_tx, SyncAction::SealMiniblock]; actions_sender.push_actions(actions).await; state_keeper - .wait(|state| state.get_local_block() == MiniblockNumber(3)) + .wait(|state| state.get_local_block() == snapshot.miniblock_number + 3) .await; let mut storage = pool.access_storage().await.unwrap(); let miniblock = storage .blocks_dal() - .get_miniblock_header(MiniblockNumber(3)) + .get_miniblock_header(snapshot.miniblock_number + 3) .await .unwrap() - .expect("Miniblock #3 is not persisted"); + .expect("New miniblock is not persisted"); assert_eq!(miniblock.l2_tx_count, 1); - assert_eq!(miniblock.timestamp, 3); + assert_eq!(miniblock.timestamp, snapshot.miniblock_timestamp + 3); } pub(super) async fn mock_l1_batch_hash_computation(pool: ConnectionPool, number: u32) { @@ -331,6 +405,8 @@ pub(super) async fn mock_l1_batch_hash_computation(pool: ConnectionPool, number: pub(super) async fn run_state_keeper_with_multiple_l1_batches( pool: ConnectionPool, ) -> Vec> { + ensure_genesis(&mut pool.access_storage().await.unwrap()).await; + let l1_batch = open_l1_batch(1, 1, 1); let first_tx = create_l2_transaction(10, 100); let first_tx_hash = first_tx.hash(); @@ -354,6 +430,7 @@ pub(super) async fn run_state_keeper_with_multiple_l1_batches( let (actions_sender, action_queue) = ActionQueue::new(); let state_keeper = StateKeeperHandles::new( pool.clone(), + MockMainNodeClient::default(), action_queue, &[&[first_tx_hash], &[second_tx_hash]], ) @@ -484,21 +561,14 @@ async fn fetcher_basics() { fetcher_task.await.unwrap().unwrap(); } +#[test_casing(2, [false, true])] #[tokio::test] -async fn fetcher_with_real_server() { +async fn fetcher_with_real_server(snapshot_recovery: bool) { let pool = ConnectionPool::test_pool().await; // Fill in transactions grouped in multiple miniblocks in the storage. - let tx_hashes = run_state_keeper_with_multiple_miniblocks(pool.clone()).await; + let (snapshot, tx_hashes) = + run_state_keeper_with_multiple_miniblocks(pool.clone(), snapshot_recovery).await; let mut tx_hashes = VecDeque::from(tx_hashes); - let mut connection = pool.access_storage().await.unwrap(); - let genesis_miniblock_hash = connection - .blocks_dal() - .get_miniblock_header(MiniblockNumber(0)) - .await - .unwrap() - .expect("No genesis miniblock") - .hash; - drop(connection); // Start the API server. let network_config = NetworkConfig::for_tests(); @@ -520,10 +590,10 @@ async fn fetcher_with_real_server() { let fetcher = MainNodeFetcher { client: CachingMainNodeClient::new(Box::new(client)), cursor: IoCursor { - next_miniblock: MiniblockNumber(1), - prev_miniblock_hash: genesis_miniblock_hash, - prev_miniblock_timestamp: 0, - l1_batch: L1BatchNumber(0), + next_miniblock: snapshot.miniblock_number + 1, + prev_miniblock_hash: snapshot.miniblock_hash, + prev_miniblock_timestamp: snapshot.miniblock_timestamp, + l1_batch: snapshot.l1_batch_number, }, actions: actions_sender, sync_state: sync_state.clone(), @@ -532,9 +602,12 @@ async fn fetcher_with_real_server() { let fetcher_task = tokio::spawn(fetcher.run()); // Check generated actions. - let mut current_miniblock_number = MiniblockNumber(0); + let mut current_miniblock_number = snapshot.miniblock_number; let mut tx_count_in_miniblock = 0; - let miniblock_number_to_tx_count = HashMap::from([(1, 5), (2, 3)]); + let miniblock_number_to_tx_count = HashMap::from([ + (snapshot.miniblock_number + 1, 5), + (snapshot.miniblock_number + 2, 3), + ]); let started_at = Instant::now(); let deadline = started_at + TEST_TIMEOUT; loop { @@ -547,7 +620,7 @@ async fn fetcher_with_real_server() { first_miniblock_info, .. } => { - assert_eq!(number, L1BatchNumber(1)); + assert_eq!(number, snapshot.l1_batch_number + 1); current_miniblock_number += 1; // First miniblock is implicitly opened tx_count_in_miniblock = 0; assert_eq!(first_miniblock_info.0, current_miniblock_number); @@ -567,7 +640,7 @@ async fn fetcher_with_real_server() { tx_count_in_miniblock, miniblock_number_to_tx_count[¤t_miniblock_number] ); - if current_miniblock_number == MiniblockNumber(2) { + if current_miniblock_number == snapshot.miniblock_number + 2 { break; } } From b694b3955db019ce799f3134f3b42bd72ce90a70 Mon Sep 17 00:00:00 2001 From: Alex Ostrovski Date: Fri, 26 Jan 2024 18:12:56 +0200 Subject: [PATCH 18/27] Generalize `prepare_recovery_snapshot()` --- .../src/api_server/execution_sandbox/tests.rs | 3 +- .../src/api_server/tx_sender/tests.rs | 16 ++-- .../src/api_server/web3/tests/debug.rs | 5 +- .../src/api_server/web3/tests/filters.rs | 8 +- .../src/api_server/web3/tests/mod.rs | 80 ++++++++++--------- .../src/api_server/web3/tests/vm.rs | 6 +- .../src/api_server/web3/tests/ws.rs | 11 +-- .../src/metadata_calculator/recovery/tests.rs | 8 +- .../src/state_keeper/io/common/tests.rs | 15 ++-- .../src/state_keeper/io/tests/mod.rs | 3 +- .../sync_layer/batch_status_updater/tests.rs | 6 +- core/lib/zksync_core/src/sync_layer/tests.rs | 4 +- core/lib/zksync_core/src/utils/testonly.rs | 7 +- 13 files changed, 98 insertions(+), 74 deletions(-) diff --git a/core/lib/zksync_core/src/api_server/execution_sandbox/tests.rs b/core/lib/zksync_core/src/api_server/execution_sandbox/tests.rs index 62c872688b79..525b2a26b431 100644 --- a/core/lib/zksync_core/src/api_server/execution_sandbox/tests.rs +++ b/core/lib/zksync_core/src/api_server/execution_sandbox/tests.rs @@ -67,7 +67,8 @@ async fn creating_block_args() { async fn creating_block_args_after_snapshot_recovery() { let pool = ConnectionPool::test_pool().await; let mut storage = pool.access_storage().await.unwrap(); - let snapshot_recovery = prepare_recovery_snapshot(&mut storage, 23, &[]).await; + let snapshot_recovery = + prepare_recovery_snapshot(&mut storage, L1BatchNumber(23), MiniblockNumber(42), &[]).await; let pending_block_args = BlockArgs::pending(&mut storage).await.unwrap(); assert_eq!( diff --git a/core/lib/zksync_core/src/api_server/tx_sender/tests.rs b/core/lib/zksync_core/src/api_server/tx_sender/tests.rs index 55c6852cd4ab..8cb37e426332 100644 --- a/core/lib/zksync_core/src/api_server/tx_sender/tests.rs +++ b/core/lib/zksync_core/src/api_server/tx_sender/tests.rs @@ -1,6 +1,6 @@ //! Tests for the transaction sender. -use zksync_types::{get_nonce_key, StorageLog}; +use zksync_types::{get_nonce_key, L1BatchNumber, StorageLog}; use super::*; use crate::{ @@ -89,7 +89,7 @@ async fn getting_nonce_for_account() { #[tokio::test] async fn getting_nonce_for_account_after_snapshot_recovery() { - const SNAPSHOT_MINIBLOCK_NUMBER: u32 = 42; + const SNAPSHOT_MINIBLOCK_NUMBER: MiniblockNumber = MiniblockNumber(42); let pool = ConnectionPool::test_pool().await; let mut storage = pool.access_storage().await.unwrap(); @@ -99,7 +99,13 @@ async fn getting_nonce_for_account_after_snapshot_recovery() { StorageLog::new_write_log(get_nonce_key(&test_address), H256::from_low_u64_be(123)), StorageLog::new_write_log(get_nonce_key(&other_address), H256::from_low_u64_be(25)), ]; - prepare_recovery_snapshot(&mut storage, SNAPSHOT_MINIBLOCK_NUMBER, &nonce_logs).await; + prepare_recovery_snapshot( + &mut storage, + L1BatchNumber(23), + SNAPSHOT_MINIBLOCK_NUMBER, + &nonce_logs, + ) + .await; let l2_chain_id = L2ChainId::default(); let tx_executor = MockTransactionExecutor::default().into(); @@ -115,7 +121,7 @@ async fn getting_nonce_for_account_after_snapshot_recovery() { storage .blocks_dal() - .insert_miniblock(&create_miniblock(SNAPSHOT_MINIBLOCK_NUMBER + 1)) + .insert_miniblock(&create_miniblock(SNAPSHOT_MINIBLOCK_NUMBER.0 + 1)) .await .unwrap(); let new_nonce_logs = vec![StorageLog::new_write_log( @@ -125,7 +131,7 @@ async fn getting_nonce_for_account_after_snapshot_recovery() { storage .storage_logs_dal() .insert_storage_logs( - MiniblockNumber(SNAPSHOT_MINIBLOCK_NUMBER + 1), + SNAPSHOT_MINIBLOCK_NUMBER + 1, &[(H256::default(), new_nonce_logs)], ) .await; diff --git a/core/lib/zksync_core/src/api_server/web3/tests/debug.rs b/core/lib/zksync_core/src/api_server/web3/tests/debug.rs index bf929469b441..e8821d03e696 100644 --- a/core/lib/zksync_core/src/api_server/web3/tests/debug.rs +++ b/core/lib/zksync_core/src/api_server/web3/tests/debug.rs @@ -137,8 +137,7 @@ impl HttpTest for TraceBlockTestWithSnapshotRecovery { } async fn test(&self, client: &HttpClient, pool: &ConnectionPool) -> anyhow::Result<()> { - let snapshot_miniblock_number = - MiniblockNumber(StorageInitialization::SNAPSHOT_RECOVERY_BLOCK); + let snapshot_miniblock_number = StorageInitialization::SNAPSHOT_RECOVERY_BLOCK; let missing_miniblock_numbers = [ MiniblockNumber(0), snapshot_miniblock_number - 1, @@ -150,7 +149,7 @@ impl HttpTest for TraceBlockTestWithSnapshotRecovery { .trace_block_by_number(number.0.into(), None) .await .unwrap_err(); - assert_pruned_block_error(&error, 24); + assert_pruned_block_error(&error, snapshot_miniblock_number + 1); } TraceBlockTest(snapshot_miniblock_number + 1) diff --git a/core/lib/zksync_core/src/api_server/web3/tests/filters.rs b/core/lib/zksync_core/src/api_server/web3/tests/filters.rs index 913437b5e199..2b202be8c028 100644 --- a/core/lib/zksync_core/src/api_server/web3/tests/filters.rs +++ b/core/lib/zksync_core/src/api_server/web3/tests/filters.rs @@ -30,11 +30,11 @@ impl HttpTest for BasicFilterChangesTest { let new_tx_hash = tx_result.hash; let new_miniblock = store_miniblock( &mut pool.access_storage().await?, - MiniblockNumber(if self.snapshot_recovery { + if self.snapshot_recovery { StorageInitialization::SNAPSHOT_RECOVERY_BLOCK + 1 } else { - 1 - }), + MiniblockNumber(1) + }, &[tx_result], ) .await?; @@ -116,7 +116,7 @@ impl HttpTest for LogFilterChangesTest { let mut storage = pool.access_storage().await?; let first_local_miniblock = if self.snapshot_recovery { - StorageInitialization::SNAPSHOT_RECOVERY_BLOCK + 1 + StorageInitialization::SNAPSHOT_RECOVERY_BLOCK.0 + 1 } else { 1 }; diff --git a/core/lib/zksync_core/src/api_server/web3/tests/mod.rs b/core/lib/zksync_core/src/api_server/web3/tests/mod.rs index 85a02f3feb70..a6554b3a072c 100644 --- a/core/lib/zksync_core/src/api_server/web3/tests/mod.rs +++ b/core/lib/zksync_core/src/api_server/web3/tests/mod.rs @@ -191,7 +191,8 @@ enum StorageInitialization { } impl StorageInitialization { - const SNAPSHOT_RECOVERY_BLOCK: u32 = 23; + const SNAPSHOT_RECOVERY_BATCH: L1BatchNumber = L1BatchNumber(23); + const SNAPSHOT_RECOVERY_BLOCK: MiniblockNumber = MiniblockNumber(23); fn empty_recovery() -> Self { Self::Recovery { @@ -217,13 +218,16 @@ impl StorageInitialization { } } Self::Recovery { logs, factory_deps } => { - prepare_recovery_snapshot(storage, Self::SNAPSHOT_RECOVERY_BLOCK, logs).await; + prepare_recovery_snapshot( + storage, + Self::SNAPSHOT_RECOVERY_BATCH, + Self::SNAPSHOT_RECOVERY_BLOCK, + logs, + ) + .await; storage .storage_dal() - .insert_factory_deps( - MiniblockNumber(Self::SNAPSHOT_RECOVERY_BLOCK), - factory_deps, - ) + .insert_factory_deps(Self::SNAPSHOT_RECOVERY_BLOCK, factory_deps) .await?; } } @@ -456,17 +460,17 @@ impl HttpTest for BlockMethodsWithSnapshotRecovery { let block_number = client.get_block_number().await?; let expected_block_number = StorageInitialization::SNAPSHOT_RECOVERY_BLOCK + 1; - assert_eq!(block_number, expected_block_number.into()); + assert_eq!(block_number, expected_block_number.0.into()); - for block_number in [api::BlockNumber::Latest, expected_block_number.into()] { + for block_number in [api::BlockNumber::Latest, expected_block_number.0.into()] { let block = client .get_block_by_number(block_number, false) .await? .context("no latest block")?; - assert_eq!(block.number, expected_block_number.into()); + assert_eq!(block.number, expected_block_number.0.into()); } - for number in [0, 1, expected_block_number - 1] { + for number in [0, 1, expected_block_number.0 - 1] { let error = client .get_block_details(MiniblockNumber(number)) .await @@ -494,7 +498,7 @@ impl HttpTest for BlockMethodsWithSnapshotRecovery { } } -fn assert_pruned_block_error(error: &ClientError, first_retained_block: u32) { +fn assert_pruned_block_error(error: &ClientError, first_retained_block: MiniblockNumber) { if let ClientError::Call(error) = error { assert_eq!(error.code(), ErrorCode::InvalidParams.code()); assert!( @@ -533,58 +537,58 @@ impl HttpTest for L1BatchMethodsWithSnapshotRecovery { let mut storage = pool.access_storage().await?; let miniblock_number = StorageInitialization::SNAPSHOT_RECOVERY_BLOCK + 1; - store_miniblock(&mut storage, MiniblockNumber(miniblock_number), &[]).await?; - seal_l1_batch(&mut storage, L1BatchNumber(miniblock_number)).await?; + let l1_batch_number = StorageInitialization::SNAPSHOT_RECOVERY_BATCH + 1; + store_miniblock(&mut storage, miniblock_number, &[]).await?; + seal_l1_batch(&mut storage, l1_batch_number).await?; drop(storage); - let l1_batch_number = client.get_l1_batch_number().await?; - assert_eq!(l1_batch_number, miniblock_number.into()); + assert_eq!( + client.get_l1_batch_number().await?, + l1_batch_number.0.into() + ); // `get_miniblock_range` method let miniblock_range = client - .get_miniblock_range(L1BatchNumber(miniblock_number)) + .get_miniblock_range(l1_batch_number) .await? .context("no range for sealed L1 batch")?; - assert_eq!(miniblock_range.0, miniblock_number.into()); - assert_eq!(miniblock_range.1, miniblock_number.into()); + assert_eq!(miniblock_range.0, miniblock_number.0.into()); + assert_eq!(miniblock_range.1, miniblock_number.0.into()); - let miniblock_range_for_future_batch = client - .get_miniblock_range(L1BatchNumber(miniblock_number) + 1) - .await?; + let miniblock_range_for_future_batch = + client.get_miniblock_range(l1_batch_number + 1).await?; assert_eq!(miniblock_range_for_future_batch, None); let error = client - .get_miniblock_range(L1BatchNumber(miniblock_number) - 1) + .get_miniblock_range(l1_batch_number - 1) .await .unwrap_err(); - assert_pruned_l1_batch_error(&error, miniblock_number); + assert_pruned_l1_batch_error(&error, l1_batch_number); // `get_l1_batch_details` method let details = client - .get_l1_batch_details(L1BatchNumber(miniblock_number)) + .get_l1_batch_details(l1_batch_number) .await? .context("no details for sealed L1 batch")?; - assert_eq!(details.number, L1BatchNumber(miniblock_number)); + assert_eq!(details.number, l1_batch_number); - let details_for_future_batch = client - .get_l1_batch_details(L1BatchNumber(miniblock_number) + 1) - .await?; + let details_for_future_batch = client.get_l1_batch_details(l1_batch_number + 1).await?; assert!( details_for_future_batch.is_none(), "{details_for_future_batch:?}" ); let error = client - .get_l1_batch_details(L1BatchNumber(miniblock_number) - 1) + .get_l1_batch_details(l1_batch_number - 1) .await .unwrap_err(); - assert_pruned_l1_batch_error(&error, miniblock_number); + assert_pruned_l1_batch_error(&error, l1_batch_number); Ok(()) } } -fn assert_pruned_l1_batch_error(error: &ClientError, first_retained_l1_batch: u32) { +fn assert_pruned_l1_batch_error(error: &ClientError, first_retained_l1_batch: L1BatchNumber) { if let ClientError::Call(error) = error { assert_eq!(error.code(), ErrorCode::InvalidParams.code()); assert!( @@ -631,7 +635,7 @@ impl HttpTest for StorageAccessWithSnapshotRecovery { let address = Address::repeat_byte(1); let first_local_miniblock = StorageInitialization::SNAPSHOT_RECOVERY_BLOCK + 1; - for number in [0, 1, first_local_miniblock - 1] { + for number in [0, 1, first_local_miniblock.0 - 1] { let number = api::BlockIdVariant::BlockNumber(number.into()); let error = client.get_code(address, Some(number)).await.unwrap_err(); assert_pruned_block_error(&error, first_local_miniblock); @@ -641,13 +645,13 @@ impl HttpTest for StorageAccessWithSnapshotRecovery { .get_storage_at(address, 0.into(), Some(number)) .await .unwrap_err(); - assert_pruned_block_error(&error, 24); + assert_pruned_block_error(&error, first_local_miniblock); } - store_miniblock(&mut storage, MiniblockNumber(first_local_miniblock), &[]).await?; + store_miniblock(&mut storage, first_local_miniblock, &[]).await?; drop(storage); - for number in [api::BlockNumber::Latest, first_local_miniblock.into()] { + for number in [api::BlockNumber::Latest, first_local_miniblock.0.into()] { let number = api::BlockIdVariant::BlockNumber(number); let code = client.get_code(address, Some(number)).await?; assert_eq!(code.0, b"code"); @@ -789,7 +793,7 @@ impl HttpTest for TransactionCountAfterSnapshotRecoveryTest { let pruned_block_numbers = [ api::BlockNumber::Earliest, 0.into(), - StorageInitialization::SNAPSHOT_RECOVERY_BLOCK.into(), + StorageInitialization::SNAPSHOT_RECOVERY_BLOCK.0.into(), ]; for number in pruned_block_numbers { let number = api::BlockIdVariant::BlockNumber(number); @@ -801,9 +805,9 @@ impl HttpTest for TransactionCountAfterSnapshotRecoveryTest { } let latest_miniblock_number = StorageInitialization::SNAPSHOT_RECOVERY_BLOCK + 1; - store_miniblock(&mut storage, MiniblockNumber(latest_miniblock_number), &[]).await?; + store_miniblock(&mut storage, latest_miniblock_number, &[]).await?; - let latest_block_numbers = [api::BlockNumber::Latest, latest_miniblock_number.into()]; + let latest_block_numbers = [api::BlockNumber::Latest, latest_miniblock_number.0.into()]; for number in latest_block_numbers { let number = api::BlockIdVariant::BlockNumber(number); let latest_count = client diff --git a/core/lib/zksync_core/src/api_server/web3/tests/vm.rs b/core/lib/zksync_core/src/api_server/web3/tests/vm.rs index ba5ca2ead005..a35ea22cf2a3 100644 --- a/core/lib/zksync_core/src/api_server/web3/tests/vm.rs +++ b/core/lib/zksync_core/src/api_server/web3/tests/vm.rs @@ -96,7 +96,7 @@ impl HttpTest for CallTestAfterSnapshotRecovery { assert_eq!(call_result.0, b"output"); let first_local_miniblock = StorageInitialization::SNAPSHOT_RECOVERY_BLOCK + 1; - let first_miniblock_numbers = [api::BlockNumber::Latest, first_local_miniblock.into()]; + let first_miniblock_numbers = [api::BlockNumber::Latest, first_local_miniblock.0.into()]; for number in first_miniblock_numbers { let number = api::BlockIdVariant::BlockNumber(number); let error = client @@ -110,7 +110,7 @@ impl HttpTest for CallTestAfterSnapshotRecovery { } } - let pruned_block_numbers = [0, 1, StorageInitialization::SNAPSHOT_RECOVERY_BLOCK]; + let pruned_block_numbers = [0, 1, StorageInitialization::SNAPSHOT_RECOVERY_BLOCK.0]; for number in pruned_block_numbers { let number = api::BlockIdVariant::BlockNumber(number.into()); let error = client @@ -121,7 +121,7 @@ impl HttpTest for CallTestAfterSnapshotRecovery { } let mut storage = pool.access_storage().await?; - store_miniblock(&mut storage, MiniblockNumber(first_local_miniblock), &[]).await?; + store_miniblock(&mut storage, first_local_miniblock, &[]).await?; drop(storage); for number in first_miniblock_numbers { diff --git a/core/lib/zksync_core/src/api_server/web3/tests/ws.rs b/core/lib/zksync_core/src/api_server/web3/tests/ws.rs index 7a0bd57f2ef6..818a3d34564c 100644 --- a/core/lib/zksync_core/src/api_server/web3/tests/ws.rs +++ b/core/lib/zksync_core/src/api_server/web3/tests/ws.rs @@ -100,6 +100,7 @@ async fn notifiers_start_after_snapshot_recovery() { let mut storage = pool.access_storage().await.unwrap(); prepare_recovery_snapshot( &mut storage, + StorageInitialization::SNAPSHOT_RECOVERY_BATCH, StorageInitialization::SNAPSHOT_RECOVERY_BLOCK, &[], ) @@ -120,7 +121,7 @@ async fn notifiers_start_after_snapshot_recovery() { } // Emulate creating the first miniblock; check that notifiers react to it. - let first_local_miniblock = MiniblockNumber(StorageInitialization::SNAPSHOT_RECOVERY_BLOCK + 1); + let first_local_miniblock = StorageInitialization::SNAPSHOT_RECOVERY_BLOCK + 1; store_miniblock(&mut storage, first_local_miniblock, &[]) .await .unwrap(); @@ -265,11 +266,11 @@ impl WsTest for BasicSubscriptionsTest { let mut storage = pool.access_storage().await?; let tx_result = execute_l2_transaction(create_l2_transaction(1, 2)); let new_tx_hash = tx_result.hash; - let miniblock_number = MiniblockNumber(if self.snapshot_recovery { + let miniblock_number = if self.snapshot_recovery { StorageInitialization::SNAPSHOT_RECOVERY_BLOCK + 1 } else { - 1 - }); + MiniblockNumber(1) + }; let new_miniblock = store_miniblock(&mut storage, miniblock_number, &[tx_result]).await?; drop(storage); @@ -389,7 +390,7 @@ impl WsTest for LogSubscriptionsTest { let mut storage = pool.access_storage().await?; let miniblock_number = if self.snapshot_recovery { - StorageInitialization::SNAPSHOT_RECOVERY_BLOCK + 1 + StorageInitialization::SNAPSHOT_RECOVERY_BLOCK.0 + 1 } else { 1 }; diff --git a/core/lib/zksync_core/src/metadata_calculator/recovery/tests.rs b/core/lib/zksync_core/src/metadata_calculator/recovery/tests.rs index 1d353277d8df..f9a2537db570 100644 --- a/core/lib/zksync_core/src/metadata_calculator/recovery/tests.rs +++ b/core/lib/zksync_core/src/metadata_calculator/recovery/tests.rs @@ -242,7 +242,13 @@ async fn entire_recovery_workflow(case: RecoveryWorkflowCase) { // Emulate the recovered view of Postgres. Unlike with previous tests, we don't perform genesis. let snapshot_logs = gen_storage_logs(100..300, 1).pop().unwrap(); let mut storage = pool.access_storage().await.unwrap(); - let snapshot_recovery = prepare_recovery_snapshot(&mut storage, 23, &snapshot_logs).await; + let snapshot_recovery = prepare_recovery_snapshot( + &mut storage, + L1BatchNumber(23), + MiniblockNumber(42), + &snapshot_logs, + ) + .await; let temp_dir = TempDir::new().expect("failed get temporary directory for RocksDB"); let merkle_tree_config = MerkleTreeConfig { diff --git a/core/lib/zksync_core/src/state_keeper/io/common/tests.rs b/core/lib/zksync_core/src/state_keeper/io/common/tests.rs index f033ddda9d33..34480d37cef9 100644 --- a/core/lib/zksync_core/src/state_keeper/io/common/tests.rs +++ b/core/lib/zksync_core/src/state_keeper/io/common/tests.rs @@ -69,7 +69,8 @@ async fn creating_io_cursor_with_genesis() { async fn creating_io_cursor_with_snapshot_recovery() { let pool = ConnectionPool::test_pool().await; let mut storage = pool.access_storage().await.unwrap(); - let snapshot_recovery = prepare_recovery_snapshot(&mut storage, 23, &[]).await; + let snapshot_recovery = + prepare_recovery_snapshot(&mut storage, L1BatchNumber(23), MiniblockNumber(42), &[]).await; let cursor = IoCursor::new(&mut storage).await.unwrap(); assert_eq!(cursor.l1_batch, L1BatchNumber(24)); @@ -143,7 +144,8 @@ async fn waiting_for_l1_batch_params_with_genesis() { async fn waiting_for_l1_batch_params_after_snapshot_recovery() { let pool = ConnectionPool::test_pool().await; let mut storage = pool.access_storage().await.unwrap(); - let snapshot_recovery = prepare_recovery_snapshot(&mut storage, 23, &[]).await; + let snapshot_recovery = + prepare_recovery_snapshot(&mut storage, L1BatchNumber(23), MiniblockNumber(42), &[]).await; let provider = L1BatchParamsProvider::new(&mut storage).await.unwrap(); let (hash, timestamp) = provider @@ -261,7 +263,8 @@ async fn assert_first_miniblock_numbers( async fn getting_first_miniblock_in_batch_after_snapshot_recovery() { let pool = ConnectionPool::test_pool().await; let mut storage = pool.access_storage().await.unwrap(); - let snapshot_recovery = prepare_recovery_snapshot(&mut storage, 23, &[]).await; + let snapshot_recovery = + prepare_recovery_snapshot(&mut storage, L1BatchNumber(23), MiniblockNumber(42), &[]).await; let provider = L1BatchParamsProvider::new(&mut storage).await.unwrap(); let mut batches_and_miniblocks = HashMap::from([ @@ -382,7 +385,8 @@ async fn store_pending_miniblocks( async fn loading_pending_batch_after_snapshot_recovery() { let pool = ConnectionPool::test_pool().await; let mut storage = pool.access_storage().await.unwrap(); - let snapshot_recovery = prepare_recovery_snapshot(&mut storage, 23, &[]).await; + let snapshot_recovery = + prepare_recovery_snapshot(&mut storage, L1BatchNumber(23), MiniblockNumber(42), &[]).await; let starting_miniblock_number = snapshot_recovery.miniblock_number.0 + 1; store_pending_miniblocks( @@ -483,7 +487,8 @@ async fn getting_batch_version_with_genesis() { async fn getting_batch_version_after_snapshot_recovery() { let pool = ConnectionPool::test_pool().await; let mut storage = pool.access_storage().await.unwrap(); - let snapshot_recovery = prepare_recovery_snapshot(&mut storage, 23, &[]).await; + let snapshot_recovery = + prepare_recovery_snapshot(&mut storage, L1BatchNumber(23), MiniblockNumber(42), &[]).await; let provider = L1BatchParamsProvider::new(&mut storage).await.unwrap(); let version = provider diff --git a/core/lib/zksync_core/src/state_keeper/io/tests/mod.rs b/core/lib/zksync_core/src/state_keeper/io/tests/mod.rs index 40e7d7d89441..cbbd454c940d 100644 --- a/core/lib/zksync_core/src/state_keeper/io/tests/mod.rs +++ b/core/lib/zksync_core/src/state_keeper/io/tests/mod.rs @@ -438,7 +438,8 @@ async fn miniblock_and_l1_batch_processing_with_sync_sealer() { async fn miniblock_processing_after_snapshot_recovery() { let connection_pool = ConnectionPool::test_pool().await; let mut storage = connection_pool.access_storage().await.unwrap(); - let snapshot_recovery = prepare_recovery_snapshot(&mut storage, 23, &[]).await; + let snapshot_recovery = + prepare_recovery_snapshot(&mut storage, L1BatchNumber(23), MiniblockNumber(42), &[]).await; let tester = Tester::new(); let (mut mempool, mut mempool_guard) = tester diff --git a/core/lib/zksync_core/src/sync_layer/batch_status_updater/tests.rs b/core/lib/zksync_core/src/sync_layer/batch_status_updater/tests.rs index c3a98814f6db..ff623beebe1e 100644 --- a/core/lib/zksync_core/src/sync_layer/batch_status_updater/tests.rs +++ b/core/lib/zksync_core/src/sync_layer/batch_status_updater/tests.rs @@ -261,7 +261,7 @@ async fn updater_cursor_for_storage_with_genesis_block() { async fn updater_cursor_after_snapshot_recovery() { let pool = ConnectionPool::test_pool().await; let mut storage = pool.access_storage().await.unwrap(); - prepare_recovery_snapshot(&mut storage, 23, &[]).await; + prepare_recovery_snapshot(&mut storage, L1BatchNumber(23), MiniblockNumber(42), &[]).await; let cursor = UpdaterCursor::new(&mut storage).await.unwrap(); assert_eq!(cursor.last_committed_l1_batch, L1BatchNumber(23)); @@ -275,7 +275,7 @@ async fn normal_updater_operation(snapshot_recovery: bool, async_batches: bool) let pool = ConnectionPool::test_pool().await; let mut storage = pool.access_storage().await.unwrap(); let first_batch_number = if snapshot_recovery { - prepare_recovery_snapshot(&mut storage, 23, &[]).await; + prepare_recovery_snapshot(&mut storage, L1BatchNumber(23), MiniblockNumber(42), &[]).await; L1BatchNumber(24) } else { ensure_genesis_state(&mut storage, L2ChainId::default(), &GenesisParams::mock()) @@ -347,7 +347,7 @@ async fn updater_with_gradual_main_node_updates(snapshot_recovery: bool) { let pool = ConnectionPool::test_pool().await; let mut storage = pool.access_storage().await.unwrap(); let first_batch_number = if snapshot_recovery { - prepare_recovery_snapshot(&mut storage, 23, &[]).await; + prepare_recovery_snapshot(&mut storage, L1BatchNumber(23), MiniblockNumber(42), &[]).await; L1BatchNumber(24) } else { ensure_genesis_state(&mut storage, L2ChainId::default(), &GenesisParams::mock()) diff --git a/core/lib/zksync_core/src/sync_layer/tests.rs b/core/lib/zksync_core/src/sync_layer/tests.rs index 64d0bfbc6726..a3af9bda9308 100644 --- a/core/lib/zksync_core/src/sync_layer/tests.rs +++ b/core/lib/zksync_core/src/sync_layer/tests.rs @@ -168,7 +168,7 @@ async fn external_io_basics(snapshot_recovery: bool) { let pool = ConnectionPool::test_pool().await; let mut storage = pool.access_storage().await.unwrap(); let snapshot = if snapshot_recovery { - prepare_recovery_snapshot(&mut storage, 23, &[]).await + prepare_recovery_snapshot(&mut storage, L1BatchNumber(23), MiniblockNumber(42), &[]).await } else { ensure_genesis(&mut storage).await; genesis_snapshot_recovery_status() @@ -238,7 +238,7 @@ pub(super) async fn run_state_keeper_with_multiple_miniblocks( ) -> (SnapshotRecoveryStatus, Vec) { let mut storage = pool.access_storage().await.unwrap(); let snapshot = if snapshot_recovery { - prepare_recovery_snapshot(&mut storage, 23, &[]).await + prepare_recovery_snapshot(&mut storage, L1BatchNumber(23), MiniblockNumber(42), &[]).await } else { ensure_genesis(&mut storage).await; genesis_snapshot_recovery_status() diff --git a/core/lib/zksync_core/src/utils/testonly.rs b/core/lib/zksync_core/src/utils/testonly.rs index 64a893eb9906..62e5cd38fe2e 100644 --- a/core/lib/zksync_core/src/utils/testonly.rs +++ b/core/lib/zksync_core/src/utils/testonly.rs @@ -121,7 +121,8 @@ pub(crate) fn execute_l2_transaction(transaction: L2Tx) -> TransactionExecutionR /// Prepares a recovery snapshot without performing genesis. pub(crate) async fn prepare_recovery_snapshot( storage: &mut StorageProcessor<'_>, - l1_batch_number: u32, + l1_batch_number: L1BatchNumber, + miniblock_number: MiniblockNumber, snapshot_logs: &[StorageLog], ) -> SnapshotRecoveryStatus { let mut storage = storage.start_transaction().await.unwrap(); @@ -134,8 +135,8 @@ pub(crate) async fn prepare_recovery_snapshot( .collect(); let l1_batch_root_hash = ZkSyncTree::process_genesis_batch(&tree_instructions).root_hash; - let miniblock = create_miniblock(l1_batch_number); - let l1_batch = create_l1_batch(l1_batch_number); + let miniblock = create_miniblock(miniblock_number.0); + let l1_batch = create_l1_batch(l1_batch_number.0); // Miniblock and L1 batch are intentionally **not** inserted into the storage. // Store factory deps for the base system contracts. From fd319c4f7b59456bfec796db6b548315bbafa83a Mon Sep 17 00:00:00 2001 From: Alex Ostrovski Date: Mon, 29 Jan 2024 11:50:39 +0200 Subject: [PATCH 19/27] Set real hash for `Execute` transactions --- core/tests/test_account/src/lib.rs | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/core/tests/test_account/src/lib.rs b/core/tests/test_account/src/lib.rs index ec3c1b7a7b0f..0f063fca48de 100644 --- a/core/tests/test_account/src/lib.rs +++ b/core/tests/test_account/src/lib.rs @@ -8,6 +8,7 @@ use zksync_system_constants::{ REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_BYTE, }; use zksync_types::{ + api, fee::Fee, l1::{OpProcessingType, PriorityQueueType}, l2::L2Tx, @@ -86,7 +87,10 @@ impl Account { ) .expect("should create a signed execute transaction"); - tx.set_input(H256::random().0.to_vec(), H256::random()); + // Set the real transaction hash, which is necessary for transaction execution in VM to function properly. + let tx_request = api::TransactionRequest::from(tx.clone()); + let tx_hash = tx_request.get_tx_hash(L2ChainId::default()).unwrap(); + tx.set_input(H256::random().0.to_vec(), tx_hash); tx.into() } From b748885862baa1f5b215258709d549894aa2c895 Mon Sep 17 00:00:00 2001 From: Alex Ostrovski Date: Mon, 29 Jan 2024 12:09:31 +0200 Subject: [PATCH 20/27] Test snapshot recovery for L1 batch executor --- .../state_keeper/batch_executor/tests/mod.rs | 60 +++++- .../batch_executor/tests/tester.rs | 186 ++++++++++++++++-- core/lib/zksync_core/src/utils/testonly.rs | 23 ++- 3 files changed, 242 insertions(+), 27 deletions(-) diff --git a/core/lib/zksync_core/src/state_keeper/batch_executor/tests/mod.rs b/core/lib/zksync_core/src/state_keeper/batch_executor/tests/mod.rs index 362afe20437d..9e7caa6f575d 100644 --- a/core/lib/zksync_core/src/state_keeper/batch_executor/tests/mod.rs +++ b/core/lib/zksync_core/src/state_keeper/batch_executor/tests/mod.rs @@ -1,11 +1,11 @@ use assert_matches::assert_matches; +use test_casing::test_casing; use zksync_dal::ConnectionPool; use zksync_test_account::Account; -use zksync_types::PriorityOpId; +use zksync_types::{get_nonce_key, utils::storage_key_for_eth_balance, PriorityOpId}; -use self::tester::Tester; +use self::tester::{AccountLoadNextExecutable, StorageSnapshot, TestConfig, Tester}; use super::TxExecutionResult; -use crate::state_keeper::batch_executor::tests::tester::{AccountLoadNextExecutable, TestConfig}; mod tester; @@ -34,9 +34,7 @@ fn assert_reverted(execution_result: &TxExecutionResult) { async fn execute_l2_tx() { let connection_pool = ConnectionPool::test_pool().await; let mut alice = Account::random(); - let tester = Tester::new(connection_pool); - tester.genesis().await; tester.fund(&[alice.address()]).await; let executor = tester.create_batch_executor().await; @@ -46,6 +44,58 @@ async fn execute_l2_tx() { executor.finish_batch().await; } +#[derive(Debug, Clone, Copy)] +enum SnapshotRecoveryMutation { + RemoveNonce, + RemoveBalance, +} + +impl SnapshotRecoveryMutation { + const ALL: [Option; 3] = [None, Some(Self::RemoveNonce), Some(Self::RemoveBalance)]; + + fn mutate_snapshot(self, storage_snapshot: &mut StorageSnapshot, alice: &Account) { + match self { + Self::RemoveNonce => { + let nonce_key = get_nonce_key(&alice.address()); + let nonce_value = storage_snapshot.storage_logs.remove(&nonce_key); + assert!(nonce_value.is_some()); + } + Self::RemoveBalance => { + let balance_key = storage_key_for_eth_balance(&alice.address()); + let balance_value = storage_snapshot.storage_logs.remove(&balance_key); + assert!(balance_value.is_some()); + } + } + } +} + +/// Tests that we can continue executing account transactions after emulating snapshot recovery. +/// Test cases with a set `mutation` ensure that the VM executor correctly detects missing data (e.g., dropped account nonce). +#[test_casing(3, SnapshotRecoveryMutation::ALL)] +#[tokio::test] +async fn execute_l2_tx_after_snapshot_recovery(mutation: Option) { + let mut alice = Account::random(); + let connection_pool = ConnectionPool::test_pool().await; + + let mut storage_snapshot = StorageSnapshot::new(&connection_pool, &mut alice, 10).await; + assert!(storage_snapshot.storage_logs.len() > 10); // sanity check + assert!(!storage_snapshot.factory_deps.is_empty()); + if let Some(mutation) = mutation { + mutation.mutate_snapshot(&mut storage_snapshot, &alice); + } + let snapshot = storage_snapshot.recover(&connection_pool).await; + + let tester = Tester::new(connection_pool); + let executor = tester.recover_batch_executor(&snapshot).await; + let res = executor.execute_tx(alice.execute()).await; + if mutation.is_none() { + assert_executed(&res); + executor.finish_batch().await; + } else { + assert_rejected(&res); + } +} + /// Checks that we can successfully execute a single L1 tx in batch executor. #[tokio::test] async fn execute_l1_tx() { diff --git a/core/lib/zksync_core/src/state_keeper/batch_executor/tests/tester.rs b/core/lib/zksync_core/src/state_keeper/batch_executor/tests/tester.rs index dc7f9d0d9794..f406e9935271 100644 --- a/core/lib/zksync_core/src/state_keeper/batch_executor/tests/tester.rs +++ b/core/lib/zksync_core/src/state_keeper/batch_executor/tests/tester.rs @@ -1,8 +1,10 @@ //! Testing harness for the batch executor. //! Contains helper functionality to initialize test context and perform tests without too much boilerplate. +use std::collections::HashMap; + use multivm::{ - interface::{L1BatchEnv, SystemEnv}, + interface::{L1BatchEnv, L2BlockEnv, SystemEnv}, vm_latest::constants::INITIAL_STORAGE_WRITE_PUBDATA_BYTES, }; use tempfile::TempDir; @@ -12,20 +14,23 @@ use zksync_contracts::{get_loadnext_contract, test_contracts::LoadnextContractEx use zksync_dal::ConnectionPool; use zksync_test_account::{Account, DeployContractsTx, TxType}; use zksync_types::{ - ethabi::Token, fee::Fee, system_contracts::get_system_smart_contracts, - utils::storage_key_for_standard_token_balance, AccountTreeId, Address, Execute, L1BatchNumber, - L2ChainId, MiniblockNumber, PriorityOpId, ProtocolVersionId, StorageLog, Transaction, H256, - L2_ETH_TOKEN_ADDRESS, SYSTEM_CONTEXT_MINIMAL_BASE_FEE, U256, + block::MiniblockHasher, ethabi::Token, fee::Fee, snapshots::SnapshotRecoveryStatus, + storage_writes_deduplicator::StorageWritesDeduplicator, + system_contracts::get_system_smart_contracts, utils::storage_key_for_standard_token_balance, + AccountTreeId, Address, Execute, L1BatchNumber, L2ChainId, MiniblockNumber, PriorityOpId, + ProtocolVersionId, StorageKey, StorageLog, Transaction, H256, L2_ETH_TOKEN_ADDRESS, + SYSTEM_CONTEXT_MINIMAL_BASE_FEE, U256, }; use zksync_utils::u256_to_h256; use crate::{ genesis::create_genesis_l1_batch, state_keeper::{ - batch_executor::BatchExecutorHandle, + batch_executor::{BatchExecutorHandle, TxExecutionResult}, tests::{default_l1_batch_env, default_system_env, BASE_SYSTEM_CONTRACTS}, L1BatchExecutorBuilder, MainBatchExecutorBuilder, }, + utils::testonly::prepare_recovery_snapshot, }; const DEFAULT_GAS_PER_PUBDATA: u32 = 10000; @@ -87,6 +92,17 @@ impl Tester { /// Creates a batch executor instance. /// This function intentionally uses sensible defaults to not introduce boilerplate. pub(super) async fn create_batch_executor(&self) -> BatchExecutorHandle { + // Not really important for the batch executor - it operates over a single batch. + let (l1_batch_env, system_env) = self.batch_params(L1BatchNumber(1), 100); + self.create_batch_executor_inner(l1_batch_env, system_env) + .await + } + + async fn create_batch_executor_inner( + &self, + l1_batch_env: L1BatchEnv, + system_env: SystemEnv, + ) -> BatchExecutorHandle { let mut builder = MainBatchExecutorBuilder::new( self.db_dir.path().to_str().unwrap().to_owned(), self.pool.clone(), @@ -96,13 +112,6 @@ impl Tester { 100, false, ); - - // Not really important for the batch executor - it operates over a single batch. - let (l1_batch_env, system_env) = self.batch_params( - L1BatchNumber(1), - 100, - self.config.validation_computational_gas_limit, - ); let (_stop_sender, stop_receiver) = watch::channel(false); builder .init_batch(l1_batch_env, system_env, &stop_receiver) @@ -110,19 +119,37 @@ impl Tester { .expect("Batch executor was interrupted") } + pub(super) async fn recover_batch_executor( + &self, + snapshot: &SnapshotRecoveryStatus, + ) -> BatchExecutorHandle { + let current_timestamp = snapshot.miniblock_timestamp + 1; + let (mut l1_batch_env, system_env) = + self.batch_params(snapshot.l1_batch_number + 1, current_timestamp); + l1_batch_env.previous_batch_hash = Some(snapshot.l1_batch_root_hash); + l1_batch_env.first_l2_block = L2BlockEnv { + number: snapshot.miniblock_number.0 + 1, + timestamp: current_timestamp, + prev_block_hash: snapshot.miniblock_hash, + max_virtual_blocks_to_create: 1, + }; + + self.create_batch_executor_inner(l1_batch_env, system_env) + .await + } + /// Creates test batch params that can be fed into the VM. fn batch_params( &self, l1_batch_number: L1BatchNumber, timestamp: u64, - validation_computational_gas_limit: u32, ) -> (L1BatchEnv, SystemEnv) { let mut system_params = default_system_env(); if let Some(vm_gas_limit) = self.config.vm_gas_limit { system_params.gas_limit = vm_gas_limit; } system_params.default_validation_computational_gas_limit = - validation_computational_gas_limit; + self.config.validation_computational_gas_limit; let mut batch_params = default_l1_batch_env(l1_batch_number.0, timestamp, self.fee_account); batch_params.previous_batch_hash = Some(H256::zero()); // Not important in this context. (batch_params, system_params) @@ -330,11 +357,136 @@ fn fee(gas_limit: u32) -> Fee { pub fn mock_loadnext_gas_burn_calldata(gas: u32) -> Vec { let loadnext_contract = get_loadnext_contract(); - let contract_function = loadnext_contract.contract.function("burnGas").unwrap(); - let params = vec![Token::Uint(U256::from(gas))]; contract_function .encode_input(¶ms) .expect("failed to encode parameters") } + +/// Concise representation of a storage snapshot for testing recovery. +#[derive(Debug)] +pub(super) struct StorageSnapshot { + pub miniblock_number: MiniblockNumber, + pub miniblock_hash: H256, + pub miniblock_timestamp: u64, + pub storage_logs: HashMap, + pub factory_deps: HashMap>, +} + +impl StorageSnapshot { + /// Generates a new snapshot by executing the specified number of transactions, each in a separate miniblock. + pub async fn new( + connection_pool: &ConnectionPool, + alice: &mut Account, + transaction_count: u32, + ) -> Self { + let tester = Tester::new(connection_pool.clone()); + tester.genesis().await; + tester.fund(&[alice.address()]).await; + + let mut storage = connection_pool.access_storage().await.unwrap(); + let all_logs = storage + .snapshots_creator_dal() + .get_storage_logs_chunk(MiniblockNumber(0), H256::zero()..=H256::repeat_byte(0xff)) + .await + .unwrap(); + let factory_deps = storage + .snapshots_creator_dal() + .get_all_factory_deps(MiniblockNumber(0)) + .await + .unwrap(); + let mut all_logs: HashMap<_, _> = all_logs + .into_iter() + .map(|log| (log.key, log.value)) + .collect(); + + let executor = tester.create_batch_executor().await; + let mut l2_block_env = L2BlockEnv { + number: 1, + prev_block_hash: MiniblockHasher::legacy_hash(MiniblockNumber(0)), + timestamp: 100, + max_virtual_blocks_to_create: 1, + }; + let mut storage_writes_deduplicator = StorageWritesDeduplicator::new(); + + for _ in 0..transaction_count { + let tx = alice.execute(); + let tx_hash = tx.hash(); // probably incorrect + let res = executor.execute_tx(tx).await; + if let TxExecutionResult::Success { tx_result, .. } = res { + let storage_logs = &tx_result.logs.storage_logs; + storage_writes_deduplicator + .apply(storage_logs.iter().filter(|log| log.log_query.rw_flag)); + } else { + panic!("Unexpected tx execution result: {res:?}"); + }; + + let mut hasher = MiniblockHasher::new( + MiniblockNumber(l2_block_env.number), + l2_block_env.timestamp, + l2_block_env.prev_block_hash, + ); + hasher.push_tx_hash(tx_hash); + + l2_block_env.number += 1; + l2_block_env.timestamp += 1; + l2_block_env.prev_block_hash = hasher.finalize(ProtocolVersionId::latest()); + executor.start_next_miniblock(l2_block_env).await; + } + + let (finished_batch, _) = executor.finish_batch().await; + let storage_logs = &finished_batch.block_tip_execution_result.logs.storage_logs; + storage_writes_deduplicator.apply(storage_logs.iter().filter(|log| log.log_query.rw_flag)); + let modified_entries = storage_writes_deduplicator.into_modified_key_values(); + all_logs.extend( + modified_entries + .into_iter() + .map(|(key, slot)| (key, u256_to_h256(slot.value))), + ); + + // Compute the hash of the last (fictive) miniblock in the batch. + let miniblock_hash = MiniblockHasher::new( + MiniblockNumber(l2_block_env.number), + l2_block_env.timestamp, + l2_block_env.prev_block_hash, + ) + .finalize(ProtocolVersionId::latest()); + + storage.blocks_dal().delete_genesis().await.unwrap(); + Self { + miniblock_number: MiniblockNumber(l2_block_env.number), + miniblock_timestamp: l2_block_env.timestamp, + miniblock_hash, + storage_logs: all_logs, + factory_deps: factory_deps.into_iter().collect(), + } + } + + /// Recovers storage from this snapshot. + pub async fn recover(self, connection_pool: &ConnectionPool) -> SnapshotRecoveryStatus { + let snapshot_logs: Vec<_> = self + .storage_logs + .into_iter() + .map(|(key, value)| StorageLog::new_write_log(key, value)) + .collect(); + let mut storage = connection_pool.access_storage().await.unwrap(); + let mut snapshot = prepare_recovery_snapshot( + &mut storage, + L1BatchNumber(1), + self.miniblock_number, + &snapshot_logs, + ) + .await; + + snapshot.miniblock_hash = self.miniblock_hash; + snapshot.miniblock_timestamp = self.miniblock_timestamp; + + storage + .storage_dal() + .insert_factory_deps(snapshot.miniblock_number, &self.factory_deps) + .await + .unwrap(); + snapshot + } +} diff --git a/core/lib/zksync_core/src/utils/testonly.rs b/core/lib/zksync_core/src/utils/testonly.rs index 62e5cd38fe2e..c474f2e5eb3f 100644 --- a/core/lib/zksync_core/src/utils/testonly.rs +++ b/core/lib/zksync_core/src/utils/testonly.rs @@ -141,13 +141,26 @@ pub(crate) async fn prepare_recovery_snapshot( // Store factory deps for the base system contracts. let contracts = GenesisParams::mock().base_system_contracts; - storage + + let protocol_version = storage .protocol_versions_dal() - .save_protocol_version_with_tx(ProtocolVersion { - base_system_contracts_hashes: contracts.hashes(), - ..ProtocolVersion::default() - }) + .get_protocol_version(ProtocolVersionId::latest()) .await; + if let Some(protocol_version) = protocol_version { + assert_eq!( + protocol_version.base_system_contracts_hashes, + contracts.hashes(), + "Protocol version set up with incorrect base system contracts" + ); + } else { + storage + .protocol_versions_dal() + .save_protocol_version_with_tx(ProtocolVersion { + base_system_contracts_hashes: contracts.hashes(), + ..ProtocolVersion::default() + }) + .await; + } let factory_deps = HashMap::from([ ( contracts.bootloader.hash, From ff3066063484e0f44c1291f728247c12dac39eb3 Mon Sep 17 00:00:00 2001 From: Alex Ostrovski Date: Mon, 29 Jan 2024 12:32:17 +0200 Subject: [PATCH 21/27] Refactor fetcher initialization for EN --- core/bin/external_node/src/main.rs | 87 +++++++++++++++--------------- 1 file changed, 42 insertions(+), 45 deletions(-) diff --git a/core/bin/external_node/src/main.rs b/core/bin/external_node/src/main.rs index 69b1b679ac4b..fdd1ce8accd0 100644 --- a/core/bin/external_node/src/main.rs +++ b/core/bin/external_node/src/main.rs @@ -174,53 +174,50 @@ async fn init_tasks( .context("Failed creating JSON-RPC client for main node")?; let singleton_pool_builder = ConnectionPool::singleton(&config.postgres.database_url); - let fetcher_handle = match config.consensus.clone() { - None => { - let pool = singleton_pool_builder - .build() - .await - .context("failed to build a connection pool for `MainNodeFetcher`")?; - let mut storage = pool.access_storage_tagged("sync_layer").await?; - let fetcher = MainNodeFetcher::new( - &mut storage, - Box::new(main_node_client), - action_queue_sender, - sync_state.clone(), - stop_receiver.clone(), - ) - .await - .context("failed initializing main node fetcher")?; - tokio::spawn(fetcher.run()) - } - Some(cfg) => { - let pool = connection_pool.clone(); - let mut stop_receiver = stop_receiver.clone(); - let sync_state = sync_state.clone(); - #[allow(clippy::redundant_locals)] - tokio::spawn(async move { - let sync_state = sync_state; - let main_node_client = main_node_client; - scope::run!(&ctx::root(), |ctx, s| async { - s.spawn_bg(async { - let res = cfg.run(ctx, pool, action_queue_sender).await; - tracing::info!("Consensus actor stopped"); - res - }); - // TODO: information about the head block of the validators - // (currently just the main node) - // should also be provided over the gossip network. - s.spawn_bg(async { - consensus::run_main_node_state_fetcher(ctx, &main_node_client, &sync_state) - .await?; - Ok(()) - }); - ctx.wait(stop_receiver.wait_for(|stop| *stop)).await??; + let fetcher_handle = if let Some(cfg) = config.consensus.clone() { + let pool = connection_pool.clone(); + let mut stop_receiver = stop_receiver.clone(); + let sync_state = sync_state.clone(); + + #[allow(clippy::redundant_locals)] + tokio::spawn(async move { + let sync_state = sync_state; + let main_node_client = main_node_client; + scope::run!(&ctx::root(), |ctx, s| async { + s.spawn_bg(async { + let res = cfg.run(ctx, pool, action_queue_sender).await; + tracing::info!("Consensus actor stopped"); + res + }); + // TODO: information about the head block of the validators (currently just the main node) + // should also be provided over the gossip network. + s.spawn_bg(async { + consensus::run_main_node_state_fetcher(ctx, &main_node_client, &sync_state) + .await?; Ok(()) - }) - .await - .context("consensus actor") + }); + ctx.wait(stop_receiver.wait_for(|stop| *stop)).await??; + Ok(()) }) - } + .await + .context("consensus actor") + }) + } else { + let pool = singleton_pool_builder + .build() + .await + .context("failed to build a connection pool for `MainNodeFetcher`")?; + let mut storage = pool.access_storage_tagged("sync_layer").await?; + let fetcher = MainNodeFetcher::new( + &mut storage, + Box::new(main_node_client), + action_queue_sender, + sync_state.clone(), + stop_receiver.clone(), + ) + .await + .context("failed initializing main node fetcher")?; + tokio::spawn(fetcher.run()) }; let metadata_calculator_config = MetadataCalculatorConfig { From 763accb109940c49fc19348209107a674eda170d Mon Sep 17 00:00:00 2001 From: Alex Ostrovski Date: Tue, 30 Jan 2024 15:10:12 +0200 Subject: [PATCH 22/27] Make DB migration backward-compatible --- ...151508_add_extra_fields_to_snapshot_recovery.up.sql | 6 ------ ...508_add_extra_fields_to_snapshot_recovery.down.sql} | 3 +-- ...151508_add_extra_fields_to_snapshot_recovery.up.sql | 10 ++++++++++ 3 files changed, 11 insertions(+), 8 deletions(-) delete mode 100644 core/lib/dal/migrations/20240117151508_add_extra_fields_to_snapshot_recovery.up.sql rename core/lib/dal/migrations/{20240117151508_add_extra_fields_to_snapshot_recovery.down.sql => 20240130151508_add_extra_fields_to_snapshot_recovery.down.sql} (61%) create mode 100644 core/lib/dal/migrations/20240130151508_add_extra_fields_to_snapshot_recovery.up.sql diff --git a/core/lib/dal/migrations/20240117151508_add_extra_fields_to_snapshot_recovery.up.sql b/core/lib/dal/migrations/20240117151508_add_extra_fields_to_snapshot_recovery.up.sql deleted file mode 100644 index f7e67076387c..000000000000 --- a/core/lib/dal/migrations/20240117151508_add_extra_fields_to_snapshot_recovery.up.sql +++ /dev/null @@ -1,6 +0,0 @@ -ALTER TABLE snapshot_recovery - RENAME COLUMN miniblock_root_hash TO miniblock_hash; -ALTER TABLE snapshot_recovery - ADD COLUMN l1_batch_timestamp BIGINT NOT NULL, - ADD COLUMN miniblock_timestamp BIGINT NOT NULL, - ADD COLUMN protocol_version INT NOT NULL; diff --git a/core/lib/dal/migrations/20240117151508_add_extra_fields_to_snapshot_recovery.down.sql b/core/lib/dal/migrations/20240130151508_add_extra_fields_to_snapshot_recovery.down.sql similarity index 61% rename from core/lib/dal/migrations/20240117151508_add_extra_fields_to_snapshot_recovery.down.sql rename to core/lib/dal/migrations/20240130151508_add_extra_fields_to_snapshot_recovery.down.sql index f108c21a4ee9..fbe98139ec8d 100644 --- a/core/lib/dal/migrations/20240117151508_add_extra_fields_to_snapshot_recovery.down.sql +++ b/core/lib/dal/migrations/20240130151508_add_extra_fields_to_snapshot_recovery.down.sql @@ -1,6 +1,5 @@ ALTER TABLE snapshot_recovery - RENAME COLUMN miniblock_hash TO miniblock_root_hash; -ALTER TABLE snapshot_recovery + DROP COLUMN miniblock_hash, DROP COLUMN l1_batch_timestamp, DROP COLUMN miniblock_timestamp, DROP COLUMN protocol_version; diff --git a/core/lib/dal/migrations/20240130151508_add_extra_fields_to_snapshot_recovery.up.sql b/core/lib/dal/migrations/20240130151508_add_extra_fields_to_snapshot_recovery.up.sql new file mode 100644 index 000000000000..b34e199a2ae7 --- /dev/null +++ b/core/lib/dal/migrations/20240130151508_add_extra_fields_to_snapshot_recovery.up.sql @@ -0,0 +1,10 @@ +ALTER TABLE snapshot_recovery + ADD COLUMN miniblock_hash BYTEA NOT NULL, + ADD COLUMN l1_batch_timestamp BIGINT NOT NULL, + ADD COLUMN miniblock_timestamp BIGINT NOT NULL, + ADD COLUMN protocol_version INT NOT NULL; +-- `miniblock_root_hash` should be renamed to `miniblock_hash`, but we cannot do it straightforwardly +-- because of backward compatibility. Instead, we create a new column and set a dummy default value +-- for the old one, so that INSERTs not referencing `miniblock_root_hash` don't fail. +ALTER TABLE snapshot_recovery + ALTER COLUMN miniblock_root_hash SET DEFAULT '\x0000000000000000000000000000000000000000000000000000000000000000'::bytea; From 915a87264d94100c389a9c1b6c11271bce8ecc89 Mon Sep 17 00:00:00 2001 From: Alex Ostrovski Date: Thu, 1 Feb 2024 11:42:16 +0200 Subject: [PATCH 23/27] Test missing protocol version in `ExternalIO` --- .../lib/zksync_core/src/consensus/testonly.rs | 32 +++++-- .../zksync_core/src/sync_layer/external_io.rs | 11 ++- core/lib/zksync_core/src/sync_layer/tests.rs | 83 +++++++++++++++++++ 3 files changed, 118 insertions(+), 8 deletions(-) diff --git a/core/lib/zksync_core/src/consensus/testonly.rs b/core/lib/zksync_core/src/consensus/testonly.rs index edf28d22d8c5..bcdcf553d692 100644 --- a/core/lib/zksync_core/src/consensus/testonly.rs +++ b/core/lib/zksync_core/src/consensus/testonly.rs @@ -1,5 +1,7 @@ //! Utilities for testing the consensus module. +use std::collections::HashMap; + use anyhow::Context as _; use rand::{ distributions::{Distribution, Standard}, @@ -11,7 +13,7 @@ use zksync_contracts::{BaseSystemContractsHashes, SystemContractCode}; use zksync_dal::ConnectionPool; use zksync_types::{ api, block::MiniblockHasher, snapshots::SnapshotRecoveryStatus, Address, L1BatchNumber, - L2ChainId, MiniblockNumber, ProtocolVersionId, H256, + L2ChainId, MiniblockNumber, ProtocolVersionId, H256, U256, }; use crate::{ @@ -61,6 +63,8 @@ pub(crate) struct MockMainNodeClient { prev_miniblock_hash: H256, l2_blocks: Vec, block_number_offset: u32, + protocol_versions: HashMap, + system_contracts: HashMap>, } impl MockMainNodeClient { @@ -86,6 +90,7 @@ impl MockMainNodeClient { prev_miniblock_hash: snapshot.miniblock_hash, l2_blocks: vec![last_miniblock_in_snapshot_batch], block_number_offset: snapshot.miniblock_number.0, + ..Self::default() } } @@ -142,15 +147,28 @@ impl MockMainNodeClient { self.l2_blocks.extend(l2_blocks); tx_hashes } + + pub fn insert_protocol_version(&mut self, version: api::ProtocolVersion) { + self.system_contracts + .insert(version.base_system_contracts.bootloader, vec![]); + self.system_contracts + .insert(version.base_system_contracts.default_aa, vec![]); + self.protocol_versions.insert(version.version_id, version); + } } #[async_trait::async_trait] impl MainNodeClient for MockMainNodeClient { async fn fetch_system_contract_by_hash( &self, - _hash: H256, + hash: H256, ) -> anyhow::Result { - anyhow::bail!("Not implemented"); + let code = self + .system_contracts + .get(&hash) + .cloned() + .with_context(|| format!("requested unexpected system contract {hash:?}"))?; + Ok(SystemContractCode { hash, code }) } async fn fetch_genesis_contract_bytecode( @@ -162,9 +180,13 @@ impl MainNodeClient for MockMainNodeClient { async fn fetch_protocol_version( &self, - _protocol_version: ProtocolVersionId, + protocol_version: ProtocolVersionId, ) -> anyhow::Result { - anyhow::bail!("Not implemented"); + let protocol_version = protocol_version as u16; + self.protocol_versions + .get(&protocol_version) + .cloned() + .with_context(|| format!("requested unexpected protocol version {protocol_version}")) } async fn fetch_genesis_l1_batch_hash(&self) -> anyhow::Result { diff --git a/core/lib/zksync_core/src/sync_layer/external_io.rs b/core/lib/zksync_core/src/sync_layer/external_io.rs index a9f47989a809..7e25b121a1b2 100644 --- a/core/lib/zksync_core/src/sync_layer/external_io.rs +++ b/core/lib/zksync_core/src/sync_layer/external_io.rs @@ -1,4 +1,4 @@ -use std::{collections::HashMap, convert::TryInto, iter::FromIterator, time::Duration}; +use std::{collections::HashMap, time::Duration}; use anyhow::Context as _; use async_trait::async_trait; @@ -154,6 +154,8 @@ impl ExternalIO { match base_system_contracts { Some(version) => version, None => { + tracing::info!("Fetching protocol version {id:?} from the main node"); + let protocol_version = self .main_node_client .fetch_protocol_version(id) @@ -205,7 +207,10 @@ impl ExternalIO { hash, }, None => { - tracing::info!("Fetching base system contract bytecode from the main node"); + tracing::info!( + "Fetching base system contract bytecode with hash {hash:?} from the main node" + ); + let contract = self .main_node_client .fetch_system_contract_by_hash(hash) @@ -218,7 +223,7 @@ impl ExternalIO { .storage_dal() .insert_factory_deps( self.current_miniblock_number, - &HashMap::from_iter([(contract.hash, be_words_to_bytes(&contract.code))]), + &HashMap::from([(contract.hash, be_words_to_bytes(&contract.code))]), ) .await .unwrap(); diff --git a/core/lib/zksync_core/src/sync_layer/tests.rs b/core/lib/zksync_core/src/sync_layer/tests.rs index f666cc9512c5..fc78b7d9678b 100644 --- a/core/lib/zksync_core/src/sync_layer/tests.rs +++ b/core/lib/zksync_core/src/sync_layer/tests.rs @@ -9,8 +9,10 @@ use std::{ use test_casing::test_casing; use tokio::{sync::watch, task::JoinHandle}; use zksync_config::configs::chain::NetworkConfig; +use zksync_contracts::BaseSystemContractsHashes; use zksync_dal::{ConnectionPool, StorageProcessor}; use zksync_types::{ + api, block::MiniblockHasher, fee_model::{BatchFeeInput, PubdataIndependentBatchFeeModelInput}, snapshots::SnapshotRecoveryStatus, @@ -234,6 +236,87 @@ async fn external_io_basics(snapshot_recovery: bool) { assert_eq!(tx_receipt.transaction_index, 0.into()); } +#[test_casing(2, [false, true])] +#[tokio::test] +async fn external_io_works_without_local_protocol_version(snapshot_recovery: bool) { + let pool = ConnectionPool::test_pool().await; + let mut storage = pool.access_storage().await.unwrap(); + let snapshot = if snapshot_recovery { + prepare_recovery_snapshot(&mut storage, L1BatchNumber(23), MiniblockNumber(42), &[]).await + } else { + ensure_genesis(&mut storage).await; + genesis_snapshot_recovery_status() + }; + + let mut open_l1_batch = open_l1_batch( + snapshot.l1_batch_number.0 + 1, + snapshot.miniblock_timestamp + 1, + snapshot.miniblock_number.0 + 1, + ); + if let SyncAction::OpenBatch { + protocol_version, .. + } = &mut open_l1_batch + { + *protocol_version = ProtocolVersionId::next(); + } else { + unreachable!(); + }; + + let tx = create_l2_transaction(10, 100); + let tx = SyncAction::Tx(Box::new(tx.into())); + let actions = vec![open_l1_batch, tx, SyncAction::SealMiniblock]; + + let (actions_sender, action_queue) = ActionQueue::new(); + let mut client = MockMainNodeClient::default(); + let next_protocol_version = api::ProtocolVersion { + version_id: ProtocolVersionId::next() as u16, + timestamp: snapshot.miniblock_timestamp + 1, + base_system_contracts: BaseSystemContractsHashes { + bootloader: H256::repeat_byte(1), + default_aa: H256::repeat_byte(2), + }, + ..api::ProtocolVersion::default() + }; + client.insert_protocol_version(next_protocol_version.clone()); + + let state_keeper = StateKeeperHandles::new( + pool.clone(), + client, + action_queue, + &[&extract_tx_hashes(&actions)], + ) + .await; + actions_sender.push_actions(actions).await; + // Wait until the miniblock is sealed. + state_keeper + .wait(|state| state.get_local_block() == snapshot.miniblock_number + 1) + .await; + + // Check that the miniblock and the protocol version for it are persisted. + let persisted_protocol_version = storage + .protocol_versions_dal() + .get_protocol_version(ProtocolVersionId::next()) + .await + .expect("next protocol version not persisted"); + assert_eq!( + persisted_protocol_version.timestamp, + next_protocol_version.timestamp + ); + assert_eq!( + persisted_protocol_version.base_system_contracts_hashes, + next_protocol_version.base_system_contracts + ); + + let miniblock = storage + .blocks_dal() + .get_miniblock_header(snapshot.miniblock_number + 1) + .await + .unwrap() + .expect("New miniblock is not persisted"); + assert_eq!(miniblock.timestamp, snapshot.miniblock_timestamp + 1); + assert_eq!(miniblock.protocol_version, Some(ProtocolVersionId::next())); +} + pub(super) async fn run_state_keeper_with_multiple_miniblocks( pool: ConnectionPool, snapshot_recovery: bool, From 849b76b882e527dcbc17a68d075aa958d0b9eb5f Mon Sep 17 00:00:00 2001 From: Alex Ostrovski Date: Thu, 1 Feb 2024 11:49:35 +0200 Subject: [PATCH 24/27] Rename `wait_for_previous_l1_batch_hash()` methods --- .../src/state_keeper/io/mempool.rs | 4 ++-- .../zksync_core/src/sync_layer/external_io.rs | 19 ++++++++++--------- 2 files changed, 12 insertions(+), 11 deletions(-) diff --git a/core/lib/zksync_core/src/state_keeper/io/mempool.rs b/core/lib/zksync_core/src/state_keeper/io/mempool.rs index b6aadd8533fc..63fb83fcb8a7 100644 --- a/core/lib/zksync_core/src/state_keeper/io/mempool.rs +++ b/core/lib/zksync_core/src/state_keeper/io/mempool.rs @@ -162,7 +162,7 @@ impl StateKeeperIO for MempoolIO { ) -> Option<(SystemEnv, L1BatchEnv)> { let deadline = Instant::now() + max_wait; // FIXME: why do we wait for hash immediately and not below? (changed in #809) - let prev_l1_batch_hash = self.load_previous_l1_batch_hash().await; + let prev_l1_batch_hash = self.wait_for_previous_l1_batch_hash().await; // Block until at least one transaction in the mempool can match the filter (or timeout happens). // This is needed to ensure that block timestamp is not too old. @@ -495,7 +495,7 @@ impl MempoolIO { self.prev_miniblock_timestamp = miniblock.timestamp; } - async fn load_previous_l1_batch_hash(&self) -> H256 { + async fn wait_for_previous_l1_batch_hash(&self) -> H256 { tracing::info!( "Getting previous L1 batch hash for L1 batch #{}", self.current_l1_batch_number diff --git a/core/lib/zksync_core/src/sync_layer/external_io.rs b/core/lib/zksync_core/src/sync_layer/external_io.rs index 7e25b121a1b2..af37079c0db5 100644 --- a/core/lib/zksync_core/src/sync_layer/external_io.rs +++ b/core/lib/zksync_core/src/sync_layer/external_io.rs @@ -122,7 +122,11 @@ impl ExternalIO { self.prev_miniblock_hash = miniblock.get_miniblock_hash(); } - async fn load_previous_l1_batch_hash(&self) -> H256 { + async fn wait_for_previous_l1_batch_hash(&self) -> H256 { + tracing::info!( + "Getting previous L1 batch hash for L1 batch #{}", + self.current_l1_batch_number + ); let mut storage = self.pool.access_storage_tagged("sync_layer").await.unwrap(); let wait_latency = KEEPER_METRICS.wait_for_prev_hash_time.start(); let prev_l1_batch_number = self.current_l1_batch_number - 1; @@ -338,10 +342,9 @@ impl StateKeeperIO for ExternalIO { number, self.current_l1_batch_number, "Batch number mismatch" ); - tracing::info!("Getting previous L1 batch hash"); - let previous_l1_batch_hash = self.load_previous_l1_batch_hash().await; + let previous_l1_batch_hash = self.wait_for_previous_l1_batch_hash().await; tracing::info!( - "Previous L1 batch hash: {previous_l1_batch_hash}, previous miniblock hash: {:?}", + "Previous L1 batch hash: {previous_l1_batch_hash:?}, previous miniblock hash: {:?}", self.prev_miniblock_hash ); @@ -369,7 +372,7 @@ impl StateKeeperIO for ExternalIO { )); } Some(other) => { - panic!("Unexpected action in the action queue: {:?}", other); + panic!("Unexpected action in the action queue: {other:?}"); } None => { tokio::time::sleep(POLL_INTERVAL).await; @@ -415,13 +418,11 @@ impl StateKeeperIO for ExternalIO { } Some(other) => { panic!( - "Unexpected action in the queue while waiting for the next miniblock {:?}", - other + "Unexpected action in the queue while waiting for the next miniblock: {other:?}" ); } - _ => { + None => { tokio::time::sleep(POLL_INTERVAL).await; - continue; } } } From b947d02c522a30026c28fc915d7fda1b4a50ffac Mon Sep 17 00:00:00 2001 From: Alex Ostrovski Date: Thu, 1 Feb 2024 12:01:17 +0200 Subject: [PATCH 25/27] Simplify `wait_for_new_miniblock_params()` method --- .../zksync_core/src/state_keeper/io/mempool.rs | 7 +++++-- core/lib/zksync_core/src/state_keeper/io/mod.rs | 1 - .../zksync_core/src/state_keeper/io/tests/mod.rs | 11 +++++------ core/lib/zksync_core/src/state_keeper/keeper.rs | 15 +++++---------- .../zksync_core/src/state_keeper/tests/tester.rs | 1 - .../lib/zksync_core/src/sync_layer/external_io.rs | 1 - 6 files changed, 15 insertions(+), 21 deletions(-) diff --git a/core/lib/zksync_core/src/state_keeper/io/mempool.rs b/core/lib/zksync_core/src/state_keeper/io/mempool.rs index 63fb83fcb8a7..650e0847cc45 100644 --- a/core/lib/zksync_core/src/state_keeper/io/mempool.rs +++ b/core/lib/zksync_core/src/state_keeper/io/mempool.rs @@ -222,13 +222,12 @@ impl StateKeeperIO for MempoolIO { async fn wait_for_new_miniblock_params( &mut self, max_wait: Duration, - prev_miniblock_timestamp: u64, ) -> Option { // We must provide different timestamps for each miniblock. // If miniblock sealing interval is greater than 1 second then `sleep_past` won't actually sleep. let timestamp = tokio::time::timeout( max_wait, - sleep_past(prev_miniblock_timestamp, self.current_miniblock_number), + sleep_past(self.prev_miniblock_timestamp, self.current_miniblock_number), ) .await .ok()?; @@ -545,6 +544,10 @@ impl MempoolIO { pub(super) fn filter(&self) -> &L2TxFilter { &self.filter } + + pub(super) fn set_prev_miniblock_timestamp(&mut self, timestamp: u64) { + self.prev_miniblock_timestamp = timestamp; + } } #[cfg(test)] diff --git a/core/lib/zksync_core/src/state_keeper/io/mod.rs b/core/lib/zksync_core/src/state_keeper/io/mod.rs index 16cc15e03b01..a159e3644c7e 100644 --- a/core/lib/zksync_core/src/state_keeper/io/mod.rs +++ b/core/lib/zksync_core/src/state_keeper/io/mod.rs @@ -82,7 +82,6 @@ pub trait StateKeeperIO: 'static + Send + IoSealCriteria { async fn wait_for_new_miniblock_params( &mut self, max_wait: Duration, - prev_miniblock_timestamp: u64, ) -> Option; /// Blocks for up to `max_wait` until the next transaction is available for execution. /// Returns `None` if no transaction became available until the timeout. diff --git a/core/lib/zksync_core/src/state_keeper/io/tests/mod.rs b/core/lib/zksync_core/src/state_keeper/io/tests/mod.rs index deae69986dda..fa695f4774dd 100644 --- a/core/lib/zksync_core/src/state_keeper/io/tests/mod.rs +++ b/core/lib/zksync_core/src/state_keeper/io/tests/mod.rs @@ -663,12 +663,11 @@ async fn different_timestamp_for_miniblocks_in_same_batch() { tester.genesis(&connection_pool).await; let (mut mempool, _) = tester.create_test_mempool_io(connection_pool, 1).await; let current_timestamp = seconds_since_epoch(); - let MiniblockParams { - timestamp: next_timestamp, - .. - } = mempool - .wait_for_new_miniblock_params(Duration::from_secs(10), current_timestamp) + mempool.set_prev_miniblock_timestamp(current_timestamp); + + let miniblock_params = mempool + .wait_for_new_miniblock_params(Duration::from_secs(10)) .await .unwrap(); - assert!(next_timestamp > current_timestamp); + assert!(miniblock_params.timestamp > current_timestamp); } diff --git a/core/lib/zksync_core/src/state_keeper/keeper.rs b/core/lib/zksync_core/src/state_keeper/keeper.rs index c1d035d4538a..3992a1b61e8f 100644 --- a/core/lib/zksync_core/src/state_keeper/keeper.rs +++ b/core/lib/zksync_core/src/state_keeper/keeper.rs @@ -196,9 +196,7 @@ impl ZkSyncStateKeeper { self.io.seal_miniblock(&updates_manager).await; // We've sealed the miniblock that we had, but we still need to setup the timestamp // for the fictive miniblock. - let new_miniblock_params = self - .wait_for_new_miniblock_params(updates_manager.miniblock.timestamp) - .await?; + let new_miniblock_params = self.wait_for_new_miniblock_params().await?; Self::start_next_miniblock( new_miniblock_params, &mut updates_manager, @@ -259,14 +257,11 @@ impl ZkSyncStateKeeper { Err(Error::Canceled) } - async fn wait_for_new_miniblock_params( - &mut self, - prev_miniblock_timestamp: u64, - ) -> Result { + async fn wait_for_new_miniblock_params(&mut self) -> Result { while !self.is_canceled() { if let Some(params) = self .io - .wait_for_new_miniblock_params(POLL_WAIT_DURATION, prev_miniblock_timestamp) + .wait_for_new_miniblock_params(POLL_WAIT_DURATION) .await { return Ok(params); @@ -381,7 +376,7 @@ impl ZkSyncStateKeeper { // We've processed all the miniblocks, and right now we're initializing the next *actual* miniblock. let new_miniblock_params = self - .wait_for_new_miniblock_params(updates_manager.miniblock.timestamp) + .wait_for_new_miniblock_params() .await .map_err(|e| e.context("wait_for_new_miniblock_params"))?; Self::start_next_miniblock(new_miniblock_params, updates_manager, batch_executor).await; @@ -421,7 +416,7 @@ impl ZkSyncStateKeeper { self.io.seal_miniblock(updates_manager).await; let new_miniblock_params = self - .wait_for_new_miniblock_params(updates_manager.miniblock.timestamp) + .wait_for_new_miniblock_params() .await .map_err(|e| e.context("wait_for_new_miniblock_params"))?; tracing::debug!( diff --git a/core/lib/zksync_core/src/state_keeper/tests/tester.rs b/core/lib/zksync_core/src/state_keeper/tests/tester.rs index ca65b1653260..fc5d595b0422 100644 --- a/core/lib/zksync_core/src/state_keeper/tests/tester.rs +++ b/core/lib/zksync_core/src/state_keeper/tests/tester.rs @@ -661,7 +661,6 @@ impl StateKeeperIO for TestIO { async fn wait_for_new_miniblock_params( &mut self, _max_wait: Duration, - _prev_miniblock_timestamp: u64, ) -> Option { Some(MiniblockParams { timestamp: self.timestamp, diff --git a/core/lib/zksync_core/src/sync_layer/external_io.rs b/core/lib/zksync_core/src/sync_layer/external_io.rs index af37079c0db5..4d175fe0b17f 100644 --- a/core/lib/zksync_core/src/sync_layer/external_io.rs +++ b/core/lib/zksync_core/src/sync_layer/external_io.rs @@ -385,7 +385,6 @@ impl StateKeeperIO for ExternalIO { async fn wait_for_new_miniblock_params( &mut self, max_wait: Duration, - _prev_miniblock_timestamp: u64, ) -> Option { // Wait for the next miniblock to appear in the queue. let actions = &mut self.actions; From 7d058466caa05cbfd84f7160b1cf3fbf6290cf9c Mon Sep 17 00:00:00 2001 From: Alex Ostrovski Date: Mon, 5 Feb 2024 15:02:44 +0200 Subject: [PATCH 26/27] Fix snapshot applier test --- core/lib/snapshots_applier/src/tests/mod.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/core/lib/snapshots_applier/src/tests/mod.rs b/core/lib/snapshots_applier/src/tests/mod.rs index 764173775618..4ecc2f177127 100644 --- a/core/lib/snapshots_applier/src/tests/mod.rs +++ b/core/lib/snapshots_applier/src/tests/mod.rs @@ -93,10 +93,10 @@ async fn snapshots_creator_can_successfully_recover_db() { let expected_status = SnapshotRecoveryStatus { l1_batch_number, l1_batch_root_hash, - l1_batch_timestamp: l1_batch_number.0.into(), + l1_batch_timestamp: 0, miniblock_number, miniblock_hash, - miniblock_timestamp: miniblock_number.0.into(), + miniblock_timestamp: 0, protocol_version: ProtocolVersionId::default(), storage_logs_chunks_processed: vec![true, true], }; From fac593f36c597e3842168f15ada8df422098d611 Mon Sep 17 00:00:00 2001 From: Alex Ostrovski Date: Mon, 5 Feb 2024 19:50:53 +0200 Subject: [PATCH 27/27] Move `wait_for_previous_l1_batch_hash()` invocation --- .../zksync_core/src/state_keeper/io/mempool.rs | 18 +++++++++++++----- 1 file changed, 13 insertions(+), 5 deletions(-) diff --git a/core/lib/zksync_core/src/state_keeper/io/mempool.rs b/core/lib/zksync_core/src/state_keeper/io/mempool.rs index 650e0847cc45..00d39502d645 100644 --- a/core/lib/zksync_core/src/state_keeper/io/mempool.rs +++ b/core/lib/zksync_core/src/state_keeper/io/mempool.rs @@ -161,8 +161,6 @@ impl StateKeeperIO for MempoolIO { max_wait: Duration, ) -> Option<(SystemEnv, L1BatchEnv)> { let deadline = Instant::now() + max_wait; - // FIXME: why do we wait for hash immediately and not below? (changed in #809) - let prev_l1_batch_hash = self.wait_for_previous_l1_batch_hash().await; // Block until at least one transaction in the mempool can match the filter (or timeout happens). // This is needed to ensure that block timestamp is not too old. @@ -181,11 +179,16 @@ impl StateKeeperIO for MempoolIO { self.current_l1_batch_number.0, self.filter.fee_input ); - let mut storage = self.pool.access_storage().await.unwrap(); + let mut storage = self + .pool + .access_storage_tagged("state_keeper") + .await + .unwrap(); let (base_system_contracts, protocol_version) = storage .protocol_versions_dal() .base_system_contracts_by_timestamp(current_timestamp) .await; + drop(storage); // We create a new filter each time, since parameters may change and a previously // ignored transaction in the mempool may be scheduled for the execution. @@ -194,12 +197,13 @@ impl StateKeeperIO for MempoolIO { protocol_version.into(), ) .await; - // We only need to get the root hash when we're certain that we have a new transaction. if !self.mempool.has_next(&self.filter) { tokio::time::sleep(self.delay_interval).await; continue; } + // We only need to get the root hash when we're certain that we have a new transaction. + let prev_l1_batch_hash = self.wait_for_previous_l1_batch_hash().await; return Some(l1_batch_params( self.current_l1_batch_number, self.fee_account, @@ -372,7 +376,11 @@ impl StateKeeperIO for MempoolIO { &mut self, version_id: ProtocolVersionId, ) -> Option { - let mut storage = self.pool.access_storage().await.unwrap(); + let mut storage = self + .pool + .access_storage_tagged("state_keeper") + .await + .unwrap(); storage .protocol_versions_dal() .get_protocol_upgrade_tx(version_id)