Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

feat: save writes needed for tree in state keeper #1965

Merged
merged 12 commits into from
May 28, 2024
8 changes: 5 additions & 3 deletions core/bin/external_node/src/main.rs
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,7 @@ use zksync_reorg_detector::ReorgDetector;
use zksync_state::{PostgresStorageCaches, RocksdbStorageOptions};
use zksync_state_keeper::{
seal_criteria::NoopSealer, AsyncRocksdbCache, BatchExecutor, MainBatchExecutor, OutputHandler,
StateKeeperPersistence, ZkSyncStateKeeper,
StateKeeperPersistence, TreeWritesPersistence, ZkSyncStateKeeper,
};
use zksync_storage::RocksDB;
use zksync_types::L2ChainId;
Expand Down Expand Up @@ -228,9 +228,11 @@ async fn run_core(
tracing::warn!("Disabling persisting protective reads; this should be safe, but is considered an experimental option at the moment");
persistence = persistence.without_protective_reads();
}
let tree_writes_persistence = TreeWritesPersistence::new(connection_pool.clone());

let output_handler =
OutputHandler::new(Box::new(persistence)).with_handler(Box::new(sync_state.clone()));
let output_handler = OutputHandler::new(Box::new(persistence))
.with_handler(Box::new(tree_writes_persistence))
.with_handler(Box::new(sync_state.clone()));
let state_keeper = build_state_keeper(
action_queue,
config.required.state_cache_path.clone(),
Expand Down

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
ALTER TABLE l1_batches DROP COLUMN IF EXISTS tree_writes;
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
ALTER TABLE l1_batches ADD COLUMN IF NOT EXISTS tree_writes BYTEA;
80 changes: 79 additions & 1 deletion core/lib/dal/src/blocks_dal.rs
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@ use anyhow::Context as _;
use bigdecimal::{BigDecimal, FromPrimitive, ToPrimitive};
use zksync_db_connection::{
connection::Connection,
error::DalResult,
error::{DalResult, SqlxContext},
instrument::{InstrumentExt, Instrumented},
interpolate_query, match_query_as,
};
Expand All @@ -18,6 +18,7 @@ use zksync_types::{
block::{BlockGasCount, L1BatchHeader, L1BatchTreeData, L2BlockHeader, StorageOracleInfo},
circuit::CircuitStatistic,
commitment::{L1BatchCommitmentArtifacts, L1BatchWithMetadata},
writes::TreeWrite,
Address, L1BatchNumber, L2BlockNumber, ProtocolVersionId, H256, U256,
};

Expand Down Expand Up @@ -2205,6 +2206,83 @@ impl BlocksDal<'_, '_> {
.await?;
Ok(())
}

pub async fn set_tree_writes(
&mut self,
l1_batch_number: L1BatchNumber,
tree_writes: Vec<TreeWrite>,
) -> DalResult<()> {
let instrumentation =
Instrumented::new("set_tree_writes").with_arg("l1_batch_number", &l1_batch_number);
let tree_writes = bincode::serialize(&tree_writes)
.map_err(|err| instrumentation.arg_error("tree_writes", err))?;

let query = sqlx::query!(
r#"
UPDATE l1_batches
SET
tree_writes = $1
WHERE
number = $2
"#,
&tree_writes,
i64::from(l1_batch_number.0),
);

instrumentation.with(query).execute(self.storage).await?;

Ok(())
}

pub async fn get_tree_writes(
&mut self,
l1_batch_number: L1BatchNumber,
) -> DalResult<Option<Vec<TreeWrite>>> {
Ok(sqlx::query!(
r#"
SELECT
tree_writes
FROM
l1_batches
WHERE
number = $1
"#,
i64::from(l1_batch_number.0),
)
.try_map(|row| {
row.tree_writes
.map(|data| bincode::deserialize(&data).decode_column("tree_writes"))
.transpose()
})
.instrument("get_tree_writes")
.with_arg("l1_batch_number", &l1_batch_number)
.fetch_optional(self.storage)
.await?
.flatten())
}

pub async fn check_tree_writes_presence(
&mut self,
l1_batch_number: L1BatchNumber,
) -> DalResult<bool> {
Ok(sqlx::query!(
r#"
SELECT
(tree_writes IS NOT NULL) AS "tree_writes_are_present!"
FROM
l1_batches
WHERE
number = $1
"#,
i64::from(l1_batch_number.0),
)
.instrument("check_tree_writes_presence")
.with_arg("l1_batch_number", &l1_batch_number)
.fetch_optional(self.storage)
.await?
.map(|row| row.tree_writes_are_present)
.unwrap_or(false))
}
}

/// These methods should only be used for tests.
Expand Down
34 changes: 20 additions & 14 deletions core/lib/dal/src/storage_logs_dedup_dal.rs
Original file line number Diff line number Diff line change
Expand Up @@ -172,28 +172,34 @@ impl StorageLogsDedupDal<'_, '_> {
.map(|max| max as u64))
}

/// Returns the maximum enumeration index assigned in a specific L1 batch.
pub async fn max_enumeration_index_for_l1_batch(
/// Returns the max enumeration index by the provided L1 batch number.
pub async fn max_enumeration_index_by_l1_batch(
&mut self,
l1_batch_number: L1BatchNumber,
) -> DalResult<Option<u64>> {
let row = sqlx::query!(
Ok(sqlx::query!(
r#"
SELECT
MAX(INDEX) AS "max?"
FROM
initial_writes
WHERE
l1_batch_number = $1
l1_batch_number = (
SELECT
MAX(l1_batch_number) AS "max?"
FROM
initial_writes
WHERE
l1_batch_number <= $1
)
"#,
i64::from(l1_batch_number.0)
)
.instrument("max_enumeration_index_for_l1_batch")
.with_arg("l1_batch_number", &l1_batch_number)
.instrument("max_enumeration_index_by_l1_batch")
.fetch_one(self.storage)
.await?;

Ok(row.max.map(|max| max as u64))
.await?
.max
.map(|max| max as u64))
}

pub async fn initial_writes_for_batch(
Expand Down Expand Up @@ -326,12 +332,12 @@ mod tests {
use crate::{ConnectionPool, CoreDal};

#[tokio::test]
async fn getting_max_enumeration_index_for_batch() {
async fn getting_max_enumeration_index_in_batch() {
let pool = ConnectionPool::<Core>::test_pool().await;
let mut conn = pool.connection().await.unwrap();
let max_index = conn
.storage_logs_dedup_dal()
.max_enumeration_index_for_l1_batch(L1BatchNumber(0))
.max_enumeration_index_by_l1_batch(L1BatchNumber(0))
.await
.unwrap();
assert_eq!(max_index, None);
Expand All @@ -348,7 +354,7 @@ mod tests {

let max_index = conn
.storage_logs_dedup_dal()
.max_enumeration_index_for_l1_batch(L1BatchNumber(0))
.max_enumeration_index_by_l1_batch(L1BatchNumber(0))
.await
.unwrap();
assert_eq!(max_index, Some(2));
Expand All @@ -364,14 +370,14 @@ mod tests {

let max_index = conn
.storage_logs_dedup_dal()
.max_enumeration_index_for_l1_batch(L1BatchNumber(0))
.max_enumeration_index_by_l1_batch(L1BatchNumber(0))
.await
.unwrap();
assert_eq!(max_index, Some(2));

let max_index = conn
.storage_logs_dedup_dal()
.max_enumeration_index_for_l1_batch(L1BatchNumber(1))
.max_enumeration_index_by_l1_batch(L1BatchNumber(1))
.await
.unwrap();
assert_eq!(max_index, Some(4));
Expand Down
6 changes: 3 additions & 3 deletions core/lib/multivm/src/glue/types/vm/vm_block_result.rs
Original file line number Diff line number Diff line change
Expand Up @@ -71,7 +71,7 @@ impl GlueFrom<crate::vm_m5::vm_instance::VmBlockResult> for crate::interface::Fi
},
final_bootloader_memory: None,
pubdata_input: None,
initially_written_slots: None,
state_diffs: None,
}
}
}
Expand Down Expand Up @@ -131,7 +131,7 @@ impl GlueFrom<crate::vm_m6::vm_instance::VmBlockResult> for crate::interface::Fi
},
final_bootloader_memory: None,
pubdata_input: None,
initially_written_slots: None,
state_diffs: None,
}
}
}
Expand Down Expand Up @@ -189,7 +189,7 @@ impl GlueFrom<crate::vm_1_3_2::vm_instance::VmBlockResult> for crate::interface:
},
final_bootloader_memory: None,
pubdata_input: None,
initially_written_slots: None,
state_diffs: None,
}
}
}
Expand Down
2 changes: 1 addition & 1 deletion core/lib/multivm/src/interface/traits/vm.rs
Original file line number Diff line number Diff line change
Expand Up @@ -143,7 +143,7 @@ pub trait VmInterface<S, H: HistoryMode> {
final_execution_state: execution_state,
final_bootloader_memory: Some(bootloader_memory),
pubdata_input: None,
initially_written_slots: None,
state_diffs: None,
}
}
}
Expand Down
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
use zksync_types::H256;
use zksync_types::writes::StateDiffRecord;

use super::{BootloaderMemory, CurrentExecutionState, VmExecutionResultAndLogs};

Expand All @@ -13,7 +13,6 @@ pub struct FinishedL1Batch {
pub final_bootloader_memory: Option<BootloaderMemory>,
/// Pubdata to be published on L1. Could be none for old versions of the VM.
pub pubdata_input: Option<Vec<u8>>,
/// List of hashed keys of slots that were initially written in the batch.
/// Could be none for old versions of the VM.
pub initially_written_slots: Option<Vec<H256>>,
/// List of state diffs. Could be none for old versions of the VM.
pub state_diffs: Option<Vec<StateDiffRecord>>,
}
2 changes: 1 addition & 1 deletion core/lib/multivm/src/versions/vm_1_4_1/vm.rs
Original file line number Diff line number Diff line change
Expand Up @@ -179,7 +179,7 @@ impl<S: WriteStorage, H: HistoryMode> VmInterface<S, H> for Vm<S, H> {
.clone()
.build_pubdata(false),
),
initially_written_slots: None,
state_diffs: None,
}
}
}
Expand Down
12 changes: 3 additions & 9 deletions core/lib/multivm/src/versions/vm_1_4_2/vm.rs
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@ use zksync_state::{StoragePtr, WriteStorage};
use zksync_types::{
event::extract_l2tol1logs_from_l1_messenger,
l2_to_l1_log::{SystemL2ToL1Log, UserL2ToL1Log},
Transaction, H256,
Transaction,
};
use zksync_utils::bytecode::CompressedBytecodeInfo;

Expand Down Expand Up @@ -179,17 +179,11 @@ impl<S: WriteStorage, H: HistoryMode> VmInterface<S, H> for Vm<S, H> {
.clone()
.build_pubdata(false),
),
initially_written_slots: Some(
state_diffs: Some(
self.bootloader_state
.get_pubdata_information()
.state_diffs
.iter()
.filter_map(|record| {
record
.is_write_initial()
.then_some(H256(record.derived_key))
})
.collect(),
.clone(),
),
}
}
Expand Down
2 changes: 1 addition & 1 deletion core/lib/multivm/src/versions/vm_boojum_integration/vm.rs
Original file line number Diff line number Diff line change
Expand Up @@ -179,7 +179,7 @@ impl<S: WriteStorage, H: HistoryMode> VmInterface<S, H> for Vm<S, H> {
.clone()
.build_pubdata(false),
),
initially_written_slots: None,
state_diffs: None,
}
}
}
Expand Down
Loading
Loading