From 872985f6de9d55248f42ce844ae621ab25829ed3 Mon Sep 17 00:00:00 2001
From: tomg10 <lemures64@gmail.com>
Date: Wed, 15 Nov 2023 21:55:23 +0100
Subject: [PATCH 01/43] feat: Snapshot Creator

---
 .githooks/pre-commit                          |   2 +-
 Cargo.lock                                    |  24 ++
 Cargo.toml                                    |   1 +
 core/bin/snapshots_creator/Cargo.toml         |  30 +++
 core/bin/snapshots_creator/README.md          |  68 +++++
 core/bin/snapshots_creator/src/main.rs        | 251 ++++++++++++++++++
 ...1013163109_create_snapshots_table.down.sql |   1 +
 ...231013163109_create_snapshots_table.up.sql |   7 +
 core/lib/dal/src/lib.rs                       |  11 +
 core/lib/dal/src/snapshots_creator_dal.rs     | 110 ++++++++
 core/lib/dal/src/snapshots_dal.rs             | 146 ++++++++++
 core/lib/env_config/src/object_store.rs       |  24 ++
 core/lib/object_store/Cargo.toml              |   2 +
 core/lib/object_store/src/file.rs             |   5 +
 core/lib/object_store/src/gcs.rs              |   8 +
 core/lib/object_store/src/mock.rs             |   4 +
 core/lib/object_store/src/objects.rs          |  65 +++++
 core/lib/object_store/src/raw.rs              |   8 +
 core/lib/types/src/lib.rs                     |   1 +
 core/lib/types/src/snapshots.rs               |  80 ++++++
 core/lib/web3_decl/src/namespaces/mod.rs      |   7 +-
 .../lib/web3_decl/src/namespaces/snapshots.rs |  26 ++
 .../web3/backend_jsonrpc/namespaces/mod.rs    |   2 +
 .../backend_jsonrpc/namespaces/snapshots.rs   |  51 ++++
 .../web3/backend_jsonrpsee/namespaces/mod.rs  |   1 +
 .../backend_jsonrpsee/namespaces/snapshots.rs |  28 ++
 .../zksync_core/src/api_server/web3/mod.rs    |  30 ++-
 .../src/api_server/web3/namespaces/mod.rs     |   3 +-
 .../api_server/web3/namespaces/snapshots.rs   |  79 ++++++
 core/tests/ts-integration/package.json        |   3 +-
 .../tests/api/snapshots-creator.test.ts       |  82 ++++++
 etc/env/base/object_store.toml                |   7 +
 infrastructure/zk/src/run/run.ts              |   6 +
 infrastructure/zk/src/test/integration.ts     |  13 +
 34 files changed, 1178 insertions(+), 8 deletions(-)
 create mode 100644 core/bin/snapshots_creator/Cargo.toml
 create mode 100644 core/bin/snapshots_creator/README.md
 create mode 100644 core/bin/snapshots_creator/src/main.rs
 create mode 100644 core/lib/dal/migrations/20231013163109_create_snapshots_table.down.sql
 create mode 100644 core/lib/dal/migrations/20231013163109_create_snapshots_table.up.sql
 create mode 100644 core/lib/dal/src/snapshots_creator_dal.rs
 create mode 100644 core/lib/dal/src/snapshots_dal.rs
 create mode 100644 core/lib/types/src/snapshots.rs
 create mode 100644 core/lib/web3_decl/src/namespaces/snapshots.rs
 create mode 100644 core/lib/zksync_core/src/api_server/web3/backend_jsonrpc/namespaces/snapshots.rs
 create mode 100644 core/lib/zksync_core/src/api_server/web3/backend_jsonrpsee/namespaces/snapshots.rs
 create mode 100644 core/lib/zksync_core/src/api_server/web3/namespaces/snapshots.rs
 create mode 100644 core/tests/ts-integration/tests/api/snapshots-creator.test.ts

diff --git a/.githooks/pre-commit b/.githooks/pre-commit
index 1f0c6b945b65..81081e7dddfe 100755
--- a/.githooks/pre-commit
+++ b/.githooks/pre-commit
@@ -1,4 +1,4 @@
-#!/bin/sh
+#!/bin/bash
 #
 # Pre-commit hook verifying that inappropriate code will not be committed.
 
diff --git a/Cargo.lock b/Cargo.lock
index ec650188c8a9..dc19581dd773 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -6538,6 +6538,28 @@ dependencies = [
  "serde",
 ]
 
+[[package]]
+name = "snapshots_creator"
+version = "0.1.0"
+dependencies = [
+ "anyhow",
+ "futures 0.3.28",
+ "prometheus_exporter",
+ "serde",
+ "serde_json",
+ "tokio",
+ "tracing",
+ "vise",
+ "vlog",
+ "zksync_config",
+ "zksync_core",
+ "zksync_dal",
+ "zksync_env_config",
+ "zksync_object_store",
+ "zksync_types",
+ "zksync_utils",
+]
+
 [[package]]
 name = "snark_wrapper"
 version = "0.1.0"
@@ -8922,9 +8944,11 @@ dependencies = [
  "anyhow",
  "async-trait",
  "bincode",
+ "flate2",
  "google-cloud-auth",
  "google-cloud-storage",
  "http",
+ "serde_json",
  "tempdir",
  "tokio",
  "tracing",
diff --git a/Cargo.toml b/Cargo.toml
index 75a4c7237d2b..ac01673bfb99 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -6,6 +6,7 @@ members = [
     "core/bin/external_node",
     "core/bin/merkle_tree_consistency_checker",
     "core/bin/rocksdb_util",
+    "core/bin/snapshots_creator",
     "core/bin/storage_logs_dedup_migration",
     "core/bin/system-constants-generator",
     "core/bin/verification_key_generator_and_server",
diff --git a/core/bin/snapshots_creator/Cargo.toml b/core/bin/snapshots_creator/Cargo.toml
new file mode 100644
index 000000000000..c11cbbb49495
--- /dev/null
+++ b/core/bin/snapshots_creator/Cargo.toml
@@ -0,0 +1,30 @@
+[package]
+name = "snapshots_creator"
+version = "0.1.0"
+edition = "2021"
+authors = ["The Matter Labs Team <hello@matterlabs.dev>"]
+homepage = "https://zksync.io/"
+repository = "https://github.com/matter-labs/zksync-era"
+license = "MIT OR Apache-2.0"
+keywords = ["blockchain", "zksync"]
+categories = ["cryptography"]
+publish = false # We don't want to publish our binaries.
+
+[dependencies]
+vise = { git = "https://github.com/matter-labs/vise.git", version = "0.1.0", rev = "dd05139b76ab0843443ab3ff730174942c825dae" }
+prometheus_exporter = { path = "../../lib/prometheus_exporter" }
+zksync_config = { path = "../../lib/config" }
+zksync_dal = { path = "../../lib/dal" }
+zksync_env_config = { path = "../../lib/env_config" }
+zksync_utils = { path = "../../lib/utils" }
+zksync_types = { path = "../../lib/types" }
+zksync_core = { path = "../../lib/zksync_core" }
+zksync_object_store = { path = "../../lib/object_store" }
+vlog = { path = "../../lib/vlog" }
+
+anyhow = "1.0"
+tokio = { version = "1", features = ["full"] }
+tracing = "0.1"
+futures = "0.3"
+serde = { version = "1.0.189", features = ["derive"] }
+serde_json = "1.0"
diff --git a/core/bin/snapshots_creator/README.md b/core/bin/snapshots_creator/README.md
new file mode 100644
index 000000000000..f2e45f6db135
--- /dev/null
+++ b/core/bin/snapshots_creator/README.md
@@ -0,0 +1,68 @@
+# Snapshots Creator
+
+Snapshot creator is small command line tool for creating a snapshot of zkSync node for EN node to be able to initialize
+to a certain L1 Batch.
+
+Snapshots do not contain full transactions history, but rather a minimal subset of information needed to bootstrap EN
+node.
+
+Usage (local development):\
+First run `zk env dev` \
+then the creator can be run using:  
+`cargo run --bin snapshots_creator --release`
+
+Snapshot contents can be stored based on blob_store config either in local filesystem or GS.
+
+## Snapshots format
+
+Each snapshot consists of three types of objects: header, storage logs chunks and factory deps:
+
+- Snapshot Header (currently returned by snapshots namespace of JSON-RPC API)
+
+```rust
+pub struct SnapshotHeader {
+    pub l1_batch_number: L1BatchNumber,
+    pub miniblock_number: MiniblockNumber,
+    // ordered by chunk_id
+    pub storage_logs_chunks: Vec<SnapshotStorageLogsChunkMetadata>,
+    pub factory_deps_filepath: String,
+    pub last_l1_batch_with_metadata: L1BatchWithMetadata,
+    pub generated_at: DateTime<Utc>,
+}
+
+pub struct SnapshotStorageLogsChunkMetadata {
+    pub chunk_id: u64,
+    // can be either a gs or filesystem path
+    pub filepath: String,
+}
+```
+
+- Snapshot Storage logs chunks (most likely to be stored in gzipped protobuf files, but this part is still WIP) :
+
+```rust
+pub struct SnapshotStorageLogsChunk {
+    // sorted by hashed_keys interpreted as little-endian numbers
+    pub storage_logs: Vec<SnapshotStorageLog>,
+}
+
+// "most recent" for each key together with info when the key was first used
+pub struct SnapshotStorageLog {
+    pub key: StorageKey,
+    pub value: StorageValue,
+    pub l1_batch_number_of_initial_write: L1BatchNumber,
+    pub enumeration_index: u64,
+}
+```
+
+- Factory dependencies (most likely to be stored as protobufs in the very near future)
+
+```rust
+pub struct SnapshotFactoryDependencies {
+		pub factory_deps: Vec<SnapshotFactoryDependency>
+}
+
+pub struct SnapshotFactoryDependency {
+    pub bytecode_hash: H256,
+    pub bytecode: Vec<u8>,
+}
+```
diff --git a/core/bin/snapshots_creator/src/main.rs b/core/bin/snapshots_creator/src/main.rs
new file mode 100644
index 000000000000..60629824c7a9
--- /dev/null
+++ b/core/bin/snapshots_creator/src/main.rs
@@ -0,0 +1,251 @@
+use anyhow::Context as _;
+use prometheus_exporter::PrometheusExporterConfig;
+use tokio::sync::watch;
+use tokio::sync::watch::Receiver;
+use vise::{Gauge, Metrics};
+use zksync_config::configs::PrometheusConfig;
+use zksync_config::PostgresConfig;
+
+use zksync_dal::ConnectionPool;
+use zksync_env_config::object_store::SnapshotsObjectStoreConfig;
+use zksync_env_config::FromEnv;
+use zksync_object_store::{ObjectStore, ObjectStoreFactory};
+use zksync_types::snapshots::{
+    SnapshotFactoryDependencies, SnapshotStorageLogsChunk, SnapshotStorageLogsStorageKey,
+};
+use zksync_types::zkevm_test_harness::zk_evm::zkevm_opcode_defs::decoding::AllowedPcOrImm;
+use zksync_types::{L1BatchNumber, MiniblockNumber};
+use zksync_utils::ceil_div;
+use zksync_utils::time::seconds_since_epoch;
+
+#[derive(Debug, Metrics)]
+#[metrics(prefix = "snapshots_creator")]
+struct SnapshotsCreatorMetrics {
+    pub storage_logs_chunks_count: Gauge<u64>,
+
+    pub snapshot_generation_duration: Gauge<u64>,
+
+    pub snapshot_l1_batch: Gauge<u64>,
+
+    pub snapshot_generation_timestamp: Gauge<u64>,
+}
+#[vise::register]
+pub(crate) static METRICS: vise::Global<SnapshotsCreatorMetrics> = vise::Global::new();
+
+async fn maybe_enable_prometheus_metrics(stop_receiver: Receiver<bool>) -> anyhow::Result<()> {
+    let prometheus_config = PrometheusConfig::from_env().ok();
+    if let Some(prometheus_config) = prometheus_config {
+        let exporter_config = PrometheusExporterConfig::push(
+            prometheus_config.gateway_endpoint(),
+            prometheus_config.push_interval(),
+        );
+
+        tracing::info!("Starting prometheus exporter with config {prometheus_config:?}");
+        tokio::spawn(exporter_config.run(stop_receiver));
+    } else {
+        tracing::info!("Starting without prometheus exporter");
+    }
+    Ok(())
+}
+async fn process_storage_logs_single_chunk(
+    blob_store: &dyn ObjectStore,
+    pool: &ConnectionPool,
+    l1_batch_number: L1BatchNumber,
+    chunk_id: u64,
+    chunk_size: u64,
+    chunks_count: u64,
+) -> anyhow::Result<String> {
+    let mut conn = pool.access_storage().await?;
+    let logs = conn
+        .snapshots_creator_dal()
+        .get_storage_logs_chunk(l1_batch_number, chunk_id, chunk_size)
+        .await
+        .context("Error fetching storage logs count")?;
+    let storage_logs_chunk = SnapshotStorageLogsChunk { storage_logs: logs };
+    let key = SnapshotStorageLogsStorageKey {
+        l1_batch_number,
+        chunk_id,
+    };
+    let filename = blob_store
+        .put(key, &storage_logs_chunk)
+        .await
+        .context("Error storing storage logs chunk in blob store")?;
+
+    let output_filepath_prefix = blob_store.get_storage_prefix::<SnapshotStorageLogsChunk>();
+    let output_filepath = format!("{output_filepath_prefix}/{filename}");
+
+    tracing::info!(
+        "Finished storing storage logs chunk {}/{chunks_count}, output stored in {output_filepath}",
+        chunk_id + 1,
+    );
+    Ok(output_filepath)
+}
+
+async fn process_factory_deps(
+    blob_store: &dyn ObjectStore,
+    pool: &ConnectionPool,
+    miniblock_number: MiniblockNumber,
+    l1_batch_number: L1BatchNumber,
+) -> anyhow::Result<String> {
+    tracing::info!("Processing factory dependencies");
+    let mut conn = pool.access_storage().await?;
+    let factory_deps = conn
+        .snapshots_creator_dal()
+        .get_all_factory_deps(miniblock_number)
+        .await;
+    let factory_deps = SnapshotFactoryDependencies { factory_deps };
+    let filename = blob_store
+        .put(l1_batch_number, &factory_deps)
+        .await
+        .context("Error storing factory deps in blob store")?;
+    let output_filepath_prefix = blob_store.get_storage_prefix::<SnapshotFactoryDependencies>();
+    let output_filepath = format!("{output_filepath_prefix}/{filename}");
+    tracing::info!(
+        "Finished processing factory dependencies, output stored in {}",
+        output_filepath
+    );
+    Ok(output_filepath)
+}
+
+async fn run(blob_store: Box<dyn ObjectStore>, pool: ConnectionPool) -> anyhow::Result<()> {
+    let mut conn = pool.access_storage().await?;
+    let start_time = seconds_since_epoch();
+
+    let l1_batch_number = conn.blocks_dal().get_sealed_l1_batch_number().await? - 1; // we subtract 1 so that after restore, EN node has at least one l1 batch to fetch
+
+    if !conn
+        .snapshots_dal()
+        .get_snapshot_metadata(l1_batch_number)
+        .await?
+        .is_none()
+    {
+        tracing::info!(
+            "Snapshot for L1 batch number {} already exists, exiting",
+            l1_batch_number
+        );
+        return Ok(());
+    }
+
+    let miniblock_number = conn
+        .blocks_dal()
+        .get_miniblock_range_of_l1_batch(l1_batch_number)
+        .await?
+        .unwrap()
+        .1;
+    let storage_logs_count = conn
+        .snapshots_creator_dal()
+        .get_storage_logs_count(l1_batch_number)
+        .await?;
+
+    drop(conn);
+
+    //TODO load this from config
+    let chunk_size = 1_000_000;
+    let chunks_count = ceil_div(storage_logs_count, chunk_size);
+
+    tracing::info!(
+        "Creating snapshot for storage logs up to miniblock {}, l1_batch {}",
+        miniblock_number,
+        l1_batch_number.0
+    );
+    tracing::info!(
+        "{} chunks of max size {} will be generated",
+        chunks_count,
+        chunk_size
+    );
+
+    let factory_deps_output_file =
+        process_factory_deps(&*blob_store, &pool, miniblock_number, l1_batch_number).await?;
+
+    let mut storage_logs_output_files = vec![];
+
+    for chunk_id in 0..chunks_count {
+        tracing::info!(
+            "Processing storage logs chunk {}/{chunks_count}",
+            chunk_id + 1
+        );
+        let output_file = process_storage_logs_single_chunk(
+            &*blob_store,
+            &pool,
+            l1_batch_number,
+            chunk_id,
+            chunk_size,
+            chunks_count,
+        )
+        .await?;
+        storage_logs_output_files.push(output_file.clone());
+    }
+
+    let mut conn = pool.access_storage().await?;
+
+    conn.snapshots_dal()
+        .add_snapshot(
+            l1_batch_number,
+            &storage_logs_output_files,
+            factory_deps_output_file,
+        )
+        .await?;
+
+    METRICS.snapshot_l1_batch.set(l1_batch_number.0.as_u64());
+    METRICS.storage_logs_chunks_count.set(chunks_count);
+    METRICS
+        .snapshot_generation_timestamp
+        .set(seconds_since_epoch());
+    METRICS
+        .snapshot_generation_duration
+        .set(seconds_since_epoch() - start_time);
+
+    tracing::info!(
+        r#"Run metrics:
+snapshot_generation_duration: {}sec
+snapshot_l1_batch: {},
+snapshot_generation_timestamp: {}
+storage_logs_chunks_count: {}
+  "#,
+        METRICS.snapshot_generation_duration.get(),
+        METRICS.snapshot_l1_batch.get(),
+        METRICS.snapshot_generation_timestamp.get(),
+        METRICS.storage_logs_chunks_count.get()
+    );
+
+    Ok(())
+}
+
+#[tokio::main]
+async fn main() -> anyhow::Result<()> {
+    let (stop_sender, stop_receiver) = watch::channel(false);
+
+    tracing::info!("Starting snapshots creator");
+    #[allow(deprecated)] // TODO (QIT-21): Use centralized configuration approach.
+    let log_format = vlog::log_format_from_env();
+    #[allow(deprecated)] // TODO (QIT-21): Use centralized configuration approach.
+    let sentry_url = vlog::sentry_url_from_env();
+    #[allow(deprecated)] // TODO (QIT-21): Use centralized configuration approach.
+    let environment = vlog::environment_from_env();
+
+    maybe_enable_prometheus_metrics(stop_receiver).await?;
+    let mut builder = vlog::ObservabilityBuilder::new().with_log_format(log_format);
+    if let Some(sentry_url) = sentry_url {
+        builder = builder
+            .with_sentry_url(&sentry_url)
+            .context("Invalid Sentry URL")?
+            .with_sentry_environment(environment);
+    }
+    let _guard = builder.build();
+
+    let object_store_config =
+        SnapshotsObjectStoreConfig::from_env().context("SnapshotsObjectStoreConfig::from_env()")?;
+    let blob_store = ObjectStoreFactory::new(object_store_config.0)
+        .create_store()
+        .await;
+
+    let postgres_config = PostgresConfig::from_env().context("PostgresConfig")?;
+    let pool = ConnectionPool::singleton(postgres_config.replica_url()?)
+        .build()
+        .await?;
+
+    run(blob_store, pool).await?;
+    tracing::info!("Finished running snapshot creator!");
+    stop_sender.send(true).ok();
+    Ok(())
+}
diff --git a/core/lib/dal/migrations/20231013163109_create_snapshots_table.down.sql b/core/lib/dal/migrations/20231013163109_create_snapshots_table.down.sql
new file mode 100644
index 000000000000..708ff00f00e5
--- /dev/null
+++ b/core/lib/dal/migrations/20231013163109_create_snapshots_table.down.sql
@@ -0,0 +1 @@
+DROP TABLE IF EXISTS snapshots;
diff --git a/core/lib/dal/migrations/20231013163109_create_snapshots_table.up.sql b/core/lib/dal/migrations/20231013163109_create_snapshots_table.up.sql
new file mode 100644
index 000000000000..b1ca08d67ebb
--- /dev/null
+++ b/core/lib/dal/migrations/20231013163109_create_snapshots_table.up.sql
@@ -0,0 +1,7 @@
+CREATE TABLE snapshots
+(
+    l1_batch_number          BIGINT    NOT NULL PRIMARY KEY,
+    storage_logs_filepaths   TEXT[]    NOT NULL,
+    factory_deps_filepath    TEXT      NOT NULL,
+    created_at               TIMESTAMP NOT NULL
+);
diff --git a/core/lib/dal/src/lib.rs b/core/lib/dal/src/lib.rs
index d934c2c06e63..f5c55524d6f3 100644
--- a/core/lib/dal/src/lib.rs
+++ b/core/lib/dal/src/lib.rs
@@ -16,6 +16,7 @@ use crate::{
     fri_witness_generator_dal::FriWitnessGeneratorDal, gpu_prover_queue_dal::GpuProverQueueDal,
     proof_generation_dal::ProofGenerationDal, protocol_versions_dal::ProtocolVersionsDal,
     protocol_versions_web3_dal::ProtocolVersionsWeb3Dal, prover_dal::ProverDal,
+    snapshots_creator_dal::SnapshotsCreatorDal, snapshots_dal::SnapshotsDal,
     storage_dal::StorageDal, storage_logs_dal::StorageLogsDal,
     storage_logs_dedup_dal::StorageLogsDedupDal, storage_web3_dal::StorageWeb3Dal,
     sync_dal::SyncDal, system_dal::SystemDal, tokens_dal::TokensDal,
@@ -49,6 +50,8 @@ pub mod proof_generation_dal;
 pub mod protocol_versions_dal;
 pub mod protocol_versions_web3_dal;
 pub mod prover_dal;
+pub mod snapshots_creator_dal;
+pub mod snapshots_dal;
 pub mod storage_dal;
 pub mod storage_logs_dal;
 pub mod storage_logs_dedup_dal;
@@ -235,4 +238,12 @@ impl<'a> StorageProcessor<'a> {
     pub fn system_dal(&mut self) -> SystemDal<'_, 'a> {
         SystemDal { storage: self }
     }
+
+    pub fn snapshots_dal(&mut self) -> SnapshotsDal<'_, 'a> {
+        SnapshotsDal { storage: self }
+    }
+
+    pub fn snapshots_creator_dal(&mut self) -> SnapshotsCreatorDal<'_, 'a> {
+        SnapshotsCreatorDal { storage: self }
+    }
 }
diff --git a/core/lib/dal/src/snapshots_creator_dal.rs b/core/lib/dal/src/snapshots_creator_dal.rs
new file mode 100644
index 000000000000..577af1284193
--- /dev/null
+++ b/core/lib/dal/src/snapshots_creator_dal.rs
@@ -0,0 +1,110 @@
+use crate::instrument::InstrumentExt;
+use crate::StorageProcessor;
+use zksync_types::snapshots::{SnapshotFactoryDependency, SnapshotStorageLog};
+use zksync_types::{AccountTreeId, Address, L1BatchNumber, MiniblockNumber, StorageKey, H256};
+
+#[derive(Debug)]
+pub struct SnapshotsCreatorDal<'a, 'c> {
+    pub(crate) storage: &'a mut StorageProcessor<'c>,
+}
+
+impl SnapshotsCreatorDal<'_, '_> {
+    pub async fn get_storage_logs_count(
+        &mut self,
+        l1_batch_number: L1BatchNumber,
+    ) -> Result<u64, sqlx::Error> {
+        let count = sqlx::query!(
+            "SELECT count(*) FROM initial_writes WHERE l1_batch_number <= $1",
+            l1_batch_number.0 as i32
+        )
+        .fetch_one(self.storage.conn())
+        .await
+        .unwrap()
+        .count
+        .unwrap();
+        Ok(count as u64)
+    }
+
+    pub async fn get_storage_logs_chunk(
+        &mut self,
+        l1_batch_number: L1BatchNumber,
+        chunk_id: u64,
+        chunk_size: u64,
+    ) -> Result<Vec<SnapshotStorageLog>, sqlx::Error> {
+        let miniblock_number: i64 = sqlx::query!(
+            "select MAX(number) from miniblocks where l1_batch_number = $1",
+            l1_batch_number.0 as i64
+        )
+        .fetch_one(self.storage.conn())
+        .await?
+        .max
+        .unwrap_or_default();
+
+        let storage_logs = sqlx::query!(
+            r#"
+            SELECT storage_logs.key,
+                   storage_logs.value,
+                   storage_logs.address,
+                   storage_logs.miniblock_number,
+                   initial_writes.l1_batch_number,
+                   initial_writes.index
+            FROM (SELECT hashed_key,
+                         max(ARRAY [miniblock_number, operation_number]::int[]) AS op
+                  FROM storage_logs
+                  WHERE miniblock_number <= $1
+                  GROUP BY hashed_key
+                  ORDER BY hashed_key) AS keys
+                     INNER JOIN storage_logs ON keys.hashed_key = storage_logs.hashed_key
+                AND storage_logs.miniblock_number = keys.op[1]
+                AND storage_logs.operation_number = keys.op[2]
+                     INNER JOIN initial_writes ON keys.hashed_key = initial_writes.hashed_key
+            WHERE miniblock_number <= $1
+            ORDER BY ARRAY(
+                 SELECT get_byte(storage_logs.hashed_key, 32 - generate_series)
+                 FROM generate_series(1, 32)
+             )
+            LIMIT $2 OFFSET $3;
+             "#,
+            miniblock_number,
+            chunk_size as i64,
+            (chunk_size * chunk_id) as i64
+        )
+        .instrument("get_storage_logs_chunk")
+        .report_latency()
+        .fetch_all(self.storage.conn())
+        .await?
+        .iter()
+        .map(|row| SnapshotStorageLog {
+            key: StorageKey::new(
+                AccountTreeId::new(Address::from_slice(&row.address)),
+                H256::from_slice(&row.key),
+            ),
+            value: H256::from_slice(&row.value),
+            l1_batch_number_of_initial_write: L1BatchNumber(row.l1_batch_number as u32),
+            enumeration_index: row.index as u64,
+        })
+        .collect();
+        Ok(storage_logs)
+    }
+
+    pub async fn get_all_factory_deps(
+        &mut self,
+        miniblock_number: MiniblockNumber,
+    ) -> Vec<SnapshotFactoryDependency> {
+        sqlx::query!(
+            "SELECT bytecode, bytecode_hash FROM factory_deps WHERE miniblock_number <= $1",
+            miniblock_number.0 as i64,
+        )
+        .instrument("get_all_factory_deps")
+        .report_latency()
+        .fetch_all(self.storage.conn())
+        .await
+        .unwrap()
+        .into_iter()
+        .map(|row| SnapshotFactoryDependency {
+            bytecode_hash: H256::from_slice(&row.bytecode_hash),
+            bytecode: row.bytecode,
+        })
+        .collect()
+    }
+}
diff --git a/core/lib/dal/src/snapshots_dal.rs b/core/lib/dal/src/snapshots_dal.rs
new file mode 100644
index 000000000000..99a6363f166c
--- /dev/null
+++ b/core/lib/dal/src/snapshots_dal.rs
@@ -0,0 +1,146 @@
+use crate::StorageProcessor;
+use sqlx::types::chrono::{DateTime, Utc};
+use zksync_types::snapshots::{AllSnapshots, SnapshotMetadata};
+use zksync_types::L1BatchNumber;
+
+#[derive(Debug)]
+pub struct SnapshotsDal<'a, 'c> {
+    pub(crate) storage: &'a mut StorageProcessor<'c>,
+}
+
+impl SnapshotsDal<'_, '_> {
+    pub async fn add_snapshot(
+        &mut self,
+        l1_batch_number: L1BatchNumber,
+        storage_logs_filepaths: &[String],
+        factory_deps_filepaths: String,
+    ) -> Result<(), sqlx::Error> {
+        sqlx::query!(
+            "INSERT INTO snapshots (l1_batch_number, created_at, storage_logs_filepaths, factory_deps_filepath) \
+             VALUES ($1, now(), $2, $3)",
+            l1_batch_number.0 as i32,
+            storage_logs_filepaths,
+            factory_deps_filepaths,
+        )
+        .execute(self.storage.conn())
+        .await?;
+        Ok(())
+    }
+
+    pub async fn get_all_snapshots(&mut self) -> Result<AllSnapshots, sqlx::Error> {
+        let records: Vec<SnapshotMetadata> = sqlx::query!(
+            "SELECT l1_batch_number, created_at, factory_deps_filepath FROM snapshots"
+        )
+        .fetch_all(self.storage.conn())
+        .await?
+        .into_iter()
+        .map(|r| SnapshotMetadata {
+            l1_batch_number: L1BatchNumber(r.l1_batch_number as u32),
+            generated_at: DateTime::<Utc>::from_naive_utc_and_offset(r.created_at, Utc),
+            factory_deps_filepath: r.factory_deps_filepath,
+        })
+        .collect();
+        Ok(AllSnapshots { snapshots: records })
+    }
+
+    pub async fn get_snapshot_metadata(
+        &mut self,
+        l1_batch_number: L1BatchNumber,
+    ) -> Result<Option<SnapshotMetadata>, sqlx::Error> {
+        let record: Option<SnapshotMetadata> = sqlx::query!(
+            "SELECT l1_batch_number, created_at, factory_deps_filepath FROM snapshots WHERE l1_batch_number = $1",
+            l1_batch_number.0 as i32
+        )
+        .fetch_optional(self.storage.conn())
+        .await?
+        .map(|r| SnapshotMetadata {
+            l1_batch_number: L1BatchNumber(r.l1_batch_number as u32),
+            generated_at: DateTime::<Utc>::from_naive_utc_and_offset(r.created_at, Utc),
+            factory_deps_filepath: r.factory_deps_filepath,
+        });
+        Ok(record)
+    }
+
+    pub async fn get_snapshot_files(
+        &mut self,
+        l1_batch_number: L1BatchNumber,
+    ) -> Result<Option<Vec<String>>, sqlx::Error> {
+        let record = sqlx::query!(
+            "SELECT storage_logs_filepaths \
+            FROM snapshots WHERE l1_batch_number = $1",
+            l1_batch_number.0 as i32
+        )
+        .fetch_optional(self.storage.conn())
+        .await?;
+
+        Ok(record.map(|r| r.storage_logs_filepaths))
+    }
+}
+
+#[cfg(test)]
+mod tests {
+    use crate::ConnectionPool;
+    use zksync_types::L1BatchNumber;
+
+    #[tokio::test]
+    async fn adding_snapshot() {
+        let pool = ConnectionPool::test_pool().await;
+        let mut conn = pool.access_storage().await.unwrap();
+        let mut dal = conn.snapshots_dal();
+        let l1_batch_number = L1BatchNumber(100);
+        dal.add_snapshot(
+            l1_batch_number,
+            &[],
+            "gs:///bucket/factory_deps.bin".to_string(),
+        )
+        .await
+        .expect("Failed to add snapshot");
+
+        let snapshots = dal
+            .get_all_snapshots()
+            .await
+            .expect("Failed to retrieve snapshots");
+        assert_eq!(1, snapshots.snapshots.len());
+        assert_eq!(
+            snapshots.snapshots[0].l1_batch_number,
+            l1_batch_number as L1BatchNumber
+        );
+
+        let snapshot_metadata = dal
+            .get_snapshot_metadata(l1_batch_number)
+            .await
+            .expect("Failed to retrieve snapshot")
+            .unwrap();
+        assert_eq!(
+            snapshot_metadata.l1_batch_number,
+            l1_batch_number as L1BatchNumber
+        );
+    }
+
+    #[tokio::test]
+    async fn adding_files() {
+        let pool = ConnectionPool::test_pool().await;
+        let mut conn = pool.access_storage().await.unwrap();
+        let mut dal = conn.snapshots_dal();
+        let l1_batch_number = L1BatchNumber(100);
+        dal.add_snapshot(
+            l1_batch_number,
+            &[
+                "gs:///bucket/test_file1.bin".to_string(),
+                "gs:///bucket/test_file2.bin".to_string(),
+            ],
+            "gs:///bucket/factory_deps.bin".to_string(),
+        )
+        .await
+        .expect("Failed to add snapshot");
+
+        let files = dal
+            .get_snapshot_files(l1_batch_number)
+            .await
+            .expect("Failed to retrieve snapshot");
+        assert!(files.is_some());
+        let files = files.unwrap();
+        assert!(files.contains(&"gs:///bucket/test_file1.bin".to_string()));
+        assert!(files.contains(&"gs:///bucket/test_file2.bin".to_string()));
+    }
+}
diff --git a/core/lib/env_config/src/object_store.rs b/core/lib/env_config/src/object_store.rs
index 3b4afe86b522..f56d57dfd6b4 100644
--- a/core/lib/env_config/src/object_store.rs
+++ b/core/lib/env_config/src/object_store.rs
@@ -30,6 +30,16 @@ impl FromEnv for ProverObjectStoreConfig {
     }
 }
 
+#[derive(Debug)]
+pub struct SnapshotsObjectStoreConfig(pub ObjectStoreConfig);
+
+impl FromEnv for SnapshotsObjectStoreConfig {
+    fn from_env() -> anyhow::Result<Self> {
+        let config = envy_load("snapshots_object_store", "SNAPSHOTS_OBJECT_STORE_")?;
+        Ok(Self(config))
+    }
+}
+
 #[cfg(test)]
 mod tests {
     use zksync_config::{configs::object_store::ObjectStoreMode, ObjectStoreConfig};
@@ -93,4 +103,18 @@ mod tests {
         let actual = ProverObjectStoreConfig::from_env().unwrap().0;
         assert_eq!(actual, expected_config("/prover_base_url"));
     }
+
+    #[test]
+    fn snapshots_bucket_config_from_env() {
+        let mut lock = MUTEX.lock();
+        let config = r#"
+            SNAPSHOTS_OBJECT_STORE_BUCKET_BASE_URL="/snapshots_base_url"
+            SNAPSHOTS_OBJECT_STORE_MODE="FileBacked"
+            SNAPSHOTS_OBJECT_STORE_FILE_BACKED_BASE_PATH="artifacts"
+            SNAPSHOTS_OBJECT_STORE_GCS_CREDENTIAL_FILE_PATH="/path/to/credentials.json"
+            SNAPSHOTS_OBJECT_STORE_MAX_RETRIES="5"
+        "#;
+        lock.set_env(config);
+        let actual = SnapshotsObjectStoreConfig::from_env().unwrap().0;
+    }
 }
diff --git a/core/lib/object_store/Cargo.toml b/core/lib/object_store/Cargo.toml
index 20f52a995a8c..bd071d0d0024 100644
--- a/core/lib/object_store/Cargo.toml
+++ b/core/lib/object_store/Cargo.toml
@@ -20,6 +20,8 @@ bincode = "1"
 google-cloud-storage = "0.15.0"
 google-cloud-auth = "0.13.0"
 http = "0.2.9"
+serde_json = "1.0"
+flate2 = "1.0.28"
 tokio = { version = "1.21.2", features = ["full"] }
 tracing = "0.1"
 
diff --git a/core/lib/object_store/src/file.rs b/core/lib/object_store/src/file.rs
index 6f589e83630d..6cfb93309a43 100644
--- a/core/lib/object_store/src/file.rs
+++ b/core/lib/object_store/src/file.rs
@@ -32,6 +32,7 @@ impl FileBackedObjectStore {
             Bucket::NodeAggregationWitnessJobsFri,
             Bucket::SchedulerWitnessJobsFri,
             Bucket::ProofsFri,
+            Bucket::StorageSnapshot,
         ] {
             let bucket_path = format!("{base_dir}/{bucket}");
             fs::create_dir_all(&bucket_path)
@@ -69,6 +70,10 @@ impl ObjectStore for FileBackedObjectStore {
         let filename = self.filename(bucket, key);
         fs::remove_file(filename).await.map_err(From::from)
     }
+
+    fn get_storage_prefix_raw(&self, bucket: Bucket) -> String {
+        format!("{}/{}", self.base_dir, bucket)
+    }
 }
 
 #[cfg(test)]
diff --git a/core/lib/object_store/src/gcs.rs b/core/lib/object_store/src/gcs.rs
index 1d88aa5237ae..ecc9d7d26d7c 100644
--- a/core/lib/object_store/src/gcs.rs
+++ b/core/lib/object_store/src/gcs.rs
@@ -207,6 +207,14 @@ impl ObjectStore for GoogleCloudStorage {
     async fn remove_raw(&self, bucket: Bucket, key: &str) -> Result<(), ObjectStoreError> {
         self.remove_inner(bucket.as_str(), key).await
     }
+
+    fn get_storage_prefix_raw(&self, bucket: Bucket) -> String {
+        format!(
+            "https://storage.googleapis.com/{}/{}",
+            self.bucket_prefix.clone(),
+            bucket.as_str()
+        )
+    }
 }
 
 #[cfg(test)]
diff --git a/core/lib/object_store/src/mock.rs b/core/lib/object_store/src/mock.rs
index ac1a2fd7a444..7db220870ddb 100644
--- a/core/lib/object_store/src/mock.rs
+++ b/core/lib/object_store/src/mock.rs
@@ -45,4 +45,8 @@ impl ObjectStore for MockStore {
         bucket_map.remove(key);
         Ok(())
     }
+
+    fn get_storage_prefix_raw(&self, bucket: Bucket) -> String {
+        format!("{bucket}")
+    }
 }
diff --git a/core/lib/object_store/src/objects.rs b/core/lib/object_store/src/objects.rs
index 35808bb4686d..59a056923eb3 100644
--- a/core/lib/object_store/src/objects.rs
+++ b/core/lib/object_store/src/objects.rs
@@ -1,8 +1,13 @@
 //! Stored objects.
 
+use flate2::{read::GzDecoder, write::GzEncoder, Compression};
+use std::io::Read;
+use zksync_types::aggregated_operations::L1BatchProofForL1;
+use zksync_types::snapshots::{SnapshotFactoryDependencies, SnapshotStorageLogsStorageKey};
 use zksync_types::{
     aggregated_operations::L1BatchProofForL1,
     proofs::{AggregationRound, PrepareBasicCircuitsJob},
+    snapshots::SnapshotStorageLogsChunk,
     storage::witness_block_state::WitnessBlockState,
     zkevm_test_harness::{
         abstract_zksync_circuit::concrete_circuits::ZkSyncCircuit,
@@ -65,6 +70,62 @@ macro_rules! serialize_using_bincode {
     };
 }
 
+/// Derives [`StoredObject::serialize()`] and [`StoredObject::deserialize()`] using
+/// the `json` (de)serializer. Should be used in `impl StoredObject` blocks.
+
+impl StoredObject for SnapshotFactoryDependencies {
+    const BUCKET: Bucket = Bucket::StorageSnapshot;
+    type Key<'a> = L1BatchNumber;
+
+    fn encode_key(key: Self::Key<'_>) -> String {
+        format!("snapshot_l1_batch_{}_factory_deps.json.gzip", key)
+    }
+
+    //TODO use better language agnostic serialization format like protobuf
+    fn serialize(&self) -> Result<Vec<u8>, BoxedError> {
+        let mut encoder = GzEncoder::new(Vec::new(), Compression::default());
+        serde_json::to_writer(&mut encoder, self).map_err(|e| BoxedError::from(e))?;
+        encoder.finish().map_err(From::from)
+    }
+
+    fn deserialize(bytes: Vec<u8>) -> Result<Self, BoxedError> {
+        let mut decoder = GzDecoder::new(&bytes[..]);
+        let mut decompressed_bytes = Vec::new();
+        decoder
+            .read_to_end(&mut decompressed_bytes)
+            .map_err(|e| BoxedError::from(e))?;
+        serde_json::from_slice(&decompressed_bytes).map_err(From::from)
+    }
+}
+
+impl StoredObject for SnapshotStorageLogsChunk {
+    const BUCKET: Bucket = Bucket::StorageSnapshot;
+    type Key<'a> = SnapshotStorageLogsStorageKey;
+
+    fn encode_key(key: Self::Key<'_>) -> String {
+        format!(
+            "snapshot_l1_batch_{}_storage_logs_part_{:0<3}.json.gzip",
+            key.l1_batch_number, key.chunk_id
+        )
+    }
+
+    //TODO use better language agnostic serialization format like protobuf
+    fn serialize(&self) -> Result<Vec<u8>, BoxedError> {
+        let mut encoder = GzEncoder::new(Vec::new(), Compression::default());
+        serde_json::to_writer(&mut encoder, self).map_err(|e| BoxedError::from(e))?;
+        encoder.finish().map_err(From::from)
+    }
+
+    fn deserialize(bytes: Vec<u8>) -> Result<Self, BoxedError> {
+        let mut decoder = GzDecoder::new(&bytes[..]);
+        let mut decompressed_bytes = Vec::new();
+        decoder
+            .read_to_end(&mut decompressed_bytes)
+            .map_err(|e| BoxedError::from(e))?;
+        serde_json::from_slice(&decompressed_bytes).map_err(From::from)
+    }
+}
+
 impl StoredObject for WitnessBlockState {
     const BUCKET: Bucket = Bucket::WitnessInput;
     type Key<'a> = L1BatchNumber;
@@ -244,4 +305,8 @@ impl dyn ObjectStore + '_ {
         self.put_raw(V::BUCKET, &key, bytes).await?;
         Ok(key)
     }
+
+    pub fn get_storage_prefix<V: StoredObject>(&self) -> String {
+        self.get_storage_prefix_raw(V::BUCKET)
+    }
 }
diff --git a/core/lib/object_store/src/raw.rs b/core/lib/object_store/src/raw.rs
index c68b4cb978f6..75a6c63b01c5 100644
--- a/core/lib/object_store/src/raw.rs
+++ b/core/lib/object_store/src/raw.rs
@@ -19,6 +19,7 @@ pub enum Bucket {
     NodeAggregationWitnessJobsFri,
     SchedulerWitnessJobsFri,
     ProofsFri,
+    StorageSnapshot,
 }
 
 impl Bucket {
@@ -34,6 +35,7 @@ impl Bucket {
             Self::NodeAggregationWitnessJobsFri => "node_aggregation_witness_jobs_fri",
             Self::SchedulerWitnessJobsFri => "scheduler_witness_jobs_fri",
             Self::ProofsFri => "proofs_fri",
+            Self::StorageSnapshot => "storage_logs_snapshots",
         }
     }
 }
@@ -113,6 +115,8 @@ pub trait ObjectStore: fmt::Debug + Send + Sync {
     ///
     /// Returns an error if removal fails.
     async fn remove_raw(&self, bucket: Bucket, key: &str) -> Result<(), ObjectStoreError>;
+
+    fn get_storage_prefix_raw(&self, bucket: Bucket) -> String;
 }
 
 #[async_trait]
@@ -133,6 +137,10 @@ impl<T: ObjectStore + ?Sized> ObjectStore for Arc<T> {
     async fn remove_raw(&self, bucket: Bucket, key: &str) -> Result<(), ObjectStoreError> {
         (**self).remove_raw(bucket, key).await
     }
+
+    fn get_storage_prefix_raw(&self, bucket: Bucket) -> String {
+        (**self).get_storage_prefix_raw(bucket)
+    }
 }
 
 #[derive(Debug)]
diff --git a/core/lib/types/src/lib.rs b/core/lib/types/src/lib.rs
index 4574824b37f8..32a62c4df804 100644
--- a/core/lib/types/src/lib.rs
+++ b/core/lib/types/src/lib.rs
@@ -44,6 +44,7 @@ pub mod l2;
 pub mod l2_to_l1_log;
 pub mod priority_op_onchain_data;
 pub mod protocol_version;
+pub mod snapshots;
 pub mod storage;
 pub mod storage_writes_deduplicator;
 pub mod system_contracts;
diff --git a/core/lib/types/src/snapshots.rs b/core/lib/types/src/snapshots.rs
new file mode 100644
index 000000000000..7e8d3e1d993b
--- /dev/null
+++ b/core/lib/types/src/snapshots.rs
@@ -0,0 +1,80 @@
+use crate::commitment::L1BatchWithMetadata;
+use crate::{StorageKey, StorageValue, H256};
+use chrono::{DateTime, Utc};
+use serde::{Deserialize, Serialize};
+use zksync_basic_types::{L1BatchNumber, MiniblockNumber};
+
+#[derive(Debug, Clone, Serialize, Deserialize)]
+#[serde(rename_all = "camelCase")]
+pub struct AllSnapshots {
+    pub snapshots: Vec<SnapshotMetadata>,
+}
+
+#[derive(Debug, Clone, Serialize, Deserialize)]
+#[serde(rename_all = "camelCase")]
+pub struct SnapshotMetadata {
+    pub l1_batch_number: L1BatchNumber,
+    pub generated_at: DateTime<Utc>,
+    pub factory_deps_filepath: String,
+}
+
+#[derive(Debug, Clone, Serialize, Deserialize)]
+#[serde(rename_all = "camelCase")]
+pub struct SnapshotHeader {
+    pub l1_batch_number: L1BatchNumber,
+    pub miniblock_number: MiniblockNumber,
+    pub storage_logs_chunks: Vec<SnapshotStorageLogsChunkMetadata>,
+    pub factory_deps_filepath: String,
+    pub last_l1_batch_with_metadata: L1BatchWithMetadata,
+    pub generated_at: DateTime<Utc>,
+}
+
+#[derive(Debug, Clone, Serialize, Deserialize)]
+#[serde(rename_all = "camelCase")]
+pub struct SnapshotStorageLogsChunkMetadata {
+    pub chunk_id: u64,
+    pub filepath: String,
+}
+
+#[derive(Debug, Clone, Copy, Serialize, Deserialize)]
+#[serde(rename_all = "camelCase")]
+pub struct SnapshotStorageLogsStorageKey {
+    pub l1_batch_number: L1BatchNumber,
+    pub chunk_id: u64,
+}
+
+#[derive(Debug, Clone, Serialize, Deserialize)]
+#[serde(rename_all = "camelCase")]
+pub struct SnapshotStorageLogsChunk {
+    pub storage_logs: Vec<SnapshotStorageLog>,
+}
+
+#[derive(Debug, Clone, Serialize, Deserialize)]
+#[serde(rename_all = "camelCase")]
+pub struct SnapshotStorageLog {
+    pub key: StorageKey,
+    pub value: StorageValue,
+    pub l1_batch_number_of_initial_write: L1BatchNumber,
+    pub enumeration_index: u64,
+}
+
+#[derive(Debug, Serialize, Deserialize)]
+#[serde(rename_all = "camelCase")]
+pub struct SnapshotFactoryDependencies {
+    pub factory_deps: Vec<SnapshotFactoryDependency>,
+}
+
+#[derive(Debug, Clone, Serialize, Deserialize)]
+#[serde(rename_all = "camelCase")]
+pub struct SnapshotFactoryDependency {
+    pub bytecode_hash: H256,
+    pub bytecode: Vec<u8>,
+}
+
+#[derive(Debug, Serialize, Deserialize)]
+#[serde(rename_all = "camelCase")]
+pub struct AppliedSnapshotStatus {
+    pub l1_batch_number: L1BatchNumber,
+    pub is_finished: bool,
+    pub last_finished_chunk_id: Option<u64>,
+}
diff --git a/core/lib/web3_decl/src/namespaces/mod.rs b/core/lib/web3_decl/src/namespaces/mod.rs
index 26e610c16449..66cff2a6dbb8 100644
--- a/core/lib/web3_decl/src/namespaces/mod.rs
+++ b/core/lib/web3_decl/src/namespaces/mod.rs
@@ -3,16 +3,19 @@ pub mod en;
 pub mod eth;
 pub mod eth_subscribe;
 pub mod net;
+pub mod snapshots;
 pub mod web3;
 pub mod zks;
 
 #[cfg(feature = "client")]
 pub use self::{
     debug::DebugNamespaceClient, en::EnNamespaceClient, eth::EthNamespaceClient,
-    net::NetNamespaceClient, web3::Web3NamespaceClient, zks::ZksNamespaceClient,
+    net::NetNamespaceClient, snapshots::SnapshotsNamespaceServer, web3::Web3NamespaceClient,
+    zks::ZksNamespaceClient,
 };
 #[cfg(feature = "server")]
 pub use self::{
     debug::DebugNamespaceServer, en::EnNamespaceServer, eth::EthNamespaceServer,
-    net::NetNamespaceServer, web3::Web3NamespaceServer, zks::ZksNamespaceServer,
+    net::NetNamespaceServer, snapshots::SnapshotsNamespaceClient, web3::Web3NamespaceServer,
+    zks::ZksNamespaceServer,
 };
diff --git a/core/lib/web3_decl/src/namespaces/snapshots.rs b/core/lib/web3_decl/src/namespaces/snapshots.rs
new file mode 100644
index 000000000000..08bb65b6968e
--- /dev/null
+++ b/core/lib/web3_decl/src/namespaces/snapshots.rs
@@ -0,0 +1,26 @@
+use jsonrpsee::{core::RpcResult, proc_macros::rpc};
+use zksync_types::snapshots::{AllSnapshots, SnapshotHeader};
+use zksync_types::L1BatchNumber;
+
+#[cfg_attr(
+    all(feature = "client", feature = "server"),
+    rpc(server, client, namespace = "snapshots")
+)]
+#[cfg_attr(
+    all(feature = "client", not(feature = "server")),
+    rpc(client, namespace = "snapshots")
+)]
+#[cfg_attr(
+    all(not(feature = "client"), feature = "server"),
+    rpc(server, namespace = "snapshots")
+)]
+pub trait SnapshotsNamespace {
+    #[method(name = "getAllSnapshots")]
+    async fn get_all_snapshots(&self) -> RpcResult<AllSnapshots>;
+
+    #[method(name = "getSnapshot")]
+    async fn get_snapshot_by_l1_batch_number(
+        &self,
+        l1_batch_number: L1BatchNumber,
+    ) -> RpcResult<Option<SnapshotHeader>>;
+}
diff --git a/core/lib/zksync_core/src/api_server/web3/backend_jsonrpc/namespaces/mod.rs b/core/lib/zksync_core/src/api_server/web3/backend_jsonrpc/namespaces/mod.rs
index 8fbd3919c26c..72a21e3c2509 100644
--- a/core/lib/zksync_core/src/api_server/web3/backend_jsonrpc/namespaces/mod.rs
+++ b/core/lib/zksync_core/src/api_server/web3/backend_jsonrpc/namespaces/mod.rs
@@ -4,3 +4,5 @@ pub mod eth;
 pub mod net;
 pub mod web3;
 pub mod zks;
+
+pub mod snapshots;
diff --git a/core/lib/zksync_core/src/api_server/web3/backend_jsonrpc/namespaces/snapshots.rs b/core/lib/zksync_core/src/api_server/web3/backend_jsonrpc/namespaces/snapshots.rs
new file mode 100644
index 000000000000..2b9bf30f0457
--- /dev/null
+++ b/core/lib/zksync_core/src/api_server/web3/backend_jsonrpc/namespaces/snapshots.rs
@@ -0,0 +1,51 @@
+// Built-in uses
+
+// External uses
+use jsonrpc_core::{BoxFuture, Result};
+use jsonrpc_derive::rpc;
+
+// Workspace uses
+use crate::api_server::web3::backend_jsonrpc::error::into_jsrpc_error;
+use crate::l1_gas_price::L1GasPriceProvider;
+use zksync_types::snapshots::{AllSnapshots, SnapshotHeader};
+use zksync_types::L1BatchNumber;
+
+// Local uses
+use crate::web3::namespaces::SnapshotsNamespace;
+
+#[rpc]
+pub trait SnapshotsNamespaceT {
+    #[rpc(name = "snapshots_getAllSnapshots")]
+    fn get_all_snapshots(&self) -> BoxFuture<Result<AllSnapshots>>;
+
+    #[rpc(name = "snapshots_getSnapshot")]
+    fn get_snapshot_by_l1_batch_number(
+        &self,
+        l1_batch_number: L1BatchNumber,
+    ) -> BoxFuture<Result<Option<SnapshotHeader>>>;
+}
+
+impl<G: L1GasPriceProvider + Send + Sync + 'static> SnapshotsNamespaceT for SnapshotsNamespace<G> {
+    fn get_all_snapshots(&self) -> BoxFuture<Result<AllSnapshots>> {
+        let self_ = self.clone();
+        Box::pin(async move {
+            self_
+                .get_all_snapshots_impl()
+                .await
+                .map_err(into_jsrpc_error)
+        })
+    }
+
+    fn get_snapshot_by_l1_batch_number(
+        &self,
+        l1_batch_number: L1BatchNumber,
+    ) -> BoxFuture<Result<Option<SnapshotHeader>>> {
+        let self_ = self.clone();
+        Box::pin(async move {
+            self_
+                .get_snapshot_by_l1_batch_number_impl(l1_batch_number)
+                .await
+                .map_err(into_jsrpc_error)
+        })
+    }
+}
diff --git a/core/lib/zksync_core/src/api_server/web3/backend_jsonrpsee/namespaces/mod.rs b/core/lib/zksync_core/src/api_server/web3/backend_jsonrpsee/namespaces/mod.rs
index 2551b90e824e..3b76771a8cdf 100644
--- a/core/lib/zksync_core/src/api_server/web3/backend_jsonrpsee/namespaces/mod.rs
+++ b/core/lib/zksync_core/src/api_server/web3/backend_jsonrpsee/namespaces/mod.rs
@@ -3,5 +3,6 @@ pub mod en;
 pub mod eth;
 pub mod eth_subscribe;
 pub mod net;
+pub mod snapshots;
 pub mod web3;
 pub mod zks;
diff --git a/core/lib/zksync_core/src/api_server/web3/backend_jsonrpsee/namespaces/snapshots.rs b/core/lib/zksync_core/src/api_server/web3/backend_jsonrpsee/namespaces/snapshots.rs
new file mode 100644
index 000000000000..88fd28de9756
--- /dev/null
+++ b/core/lib/zksync_core/src/api_server/web3/backend_jsonrpsee/namespaces/snapshots.rs
@@ -0,0 +1,28 @@
+use crate::api_server::web3::backend_jsonrpsee::into_jsrpc_error;
+use crate::api_server::web3::namespaces::SnapshotsNamespace;
+use crate::l1_gas_price::L1GasPriceProvider;
+use async_trait::async_trait;
+use zksync_types::snapshots::{AllSnapshots, SnapshotHeader};
+use zksync_types::L1BatchNumber;
+use zksync_web3_decl::jsonrpsee::core::RpcResult;
+use zksync_web3_decl::namespaces::SnapshotsNamespaceServer;
+
+#[async_trait]
+impl<G: L1GasPriceProvider + Send + Sync + 'static> SnapshotsNamespaceServer
+    for SnapshotsNamespace<G>
+{
+    async fn get_all_snapshots(&self) -> RpcResult<AllSnapshots> {
+        self.get_all_snapshots_impl()
+            .await
+            .map_err(into_jsrpc_error)
+    }
+
+    async fn get_snapshot_by_l1_batch_number(
+        &self,
+        l1_batch_number: L1BatchNumber,
+    ) -> RpcResult<Option<SnapshotHeader>> {
+        self.get_snapshot_by_l1_batch_number_impl(l1_batch_number)
+            .await
+            .map_err(into_jsrpc_error)
+    }
+}
diff --git a/core/lib/zksync_core/src/api_server/web3/mod.rs b/core/lib/zksync_core/src/api_server/web3/mod.rs
index 411c04112c90..7bffb43518a5 100644
--- a/core/lib/zksync_core/src/api_server/web3/mod.rs
+++ b/core/lib/zksync_core/src/api_server/web3/mod.rs
@@ -23,7 +23,7 @@ use zksync_web3_decl::{
     },
     namespaces::{
         DebugNamespaceServer, EnNamespaceServer, EthNamespaceServer, NetNamespaceServer,
-        Web3NamespaceServer, ZksNamespaceServer,
+        SnapshotsNamespaceServer, Web3NamespaceServer, ZksNamespaceServer,
     },
     types::Filter,
 };
@@ -40,7 +40,8 @@ use self::{
     },
     metrics::API_METRICS,
     namespaces::{
-        DebugNamespace, EnNamespace, EthNamespace, NetNamespace, Web3Namespace, ZksNamespace,
+        DebugNamespace, EnNamespace, EthNamespace, NetNamespace, SnapshotsNamespace, Web3Namespace,
+        ZksNamespace,
     },
     pubsub::{EthSubscribe, PubSubEvent},
     state::{Filters, InternalApiConfig, RpcState, SealedMiniblockNumber},
@@ -63,6 +64,22 @@ pub mod state;
 #[cfg(test)]
 pub(crate) mod tests;
 
+use self::backend_jsonrpc::{
+    batch_limiter_middleware::{LimitMiddleware, Transport},
+    error::internal_error,
+    namespaces::{
+        debug::DebugNamespaceT, en::EnNamespaceT, eth::EthNamespaceT, net::NetNamespaceT,
+        web3::Web3NamespaceT, zks::ZksNamespaceT,
+    },
+    pub_sub::Web3PubSub,
+};
+use self::metrics::API_METRICS;
+use self::namespaces::{
+    DebugNamespace, EnNamespace, EthNamespace, NetNamespace, Web3Namespace, ZksNamespace,
+};
+use self::pubsub::{EthSubscribe, PubSubEvent};
+use self::state::{Filters, InternalApiConfig, RpcState, SealedMiniblockNumber};
+
 /// Timeout for graceful shutdown logic within API servers.
 const GRACEFUL_SHUTDOWN_TIMEOUT: Duration = Duration::from_secs(5);
 
@@ -99,6 +116,7 @@ pub enum Namespace {
     Zks,
     En,
     Pubsub,
+    Snapshots,
 }
 
 impl Namespace {
@@ -110,6 +128,7 @@ impl Namespace {
         Namespace::Zks,
         Namespace::En,
         Namespace::Pubsub,
+        Namespace::Snapshots,
     ];
 
     pub const NON_DEBUG: &'static [Namespace] = &[
@@ -119,6 +138,7 @@ impl Namespace {
         Namespace::Zks,
         Namespace::En,
         Namespace::Pubsub,
+        Namespace::Snapshots,
     ];
 }
 
@@ -343,7 +363,11 @@ impl<G: 'static + Send + Sync + L1GasPriceProvider> ApiBuilder<G> {
                 .expect("Can't merge en namespace");
         }
         if namespaces.contains(&Namespace::Debug) {
-            rpc.merge(DebugNamespace::new(rpc_state).await.into_rpc())
+            rpc.merge(DebugNamespace::new(rpc_state.clone()).await.into_rpc())
+                .expect("Can't merge debug namespace");
+        }
+        if namespaces.contains(&Namespace::Snapshots) {
+            rpc.merge(SnapshotsNamespace::new(rpc_state).into_rpc())
                 .expect("Can't merge debug namespace");
         }
         rpc
diff --git a/core/lib/zksync_core/src/api_server/web3/namespaces/mod.rs b/core/lib/zksync_core/src/api_server/web3/namespaces/mod.rs
index 8504717f3b9e..e1b77d381da0 100644
--- a/core/lib/zksync_core/src/api_server/web3/namespaces/mod.rs
+++ b/core/lib/zksync_core/src/api_server/web3/namespaces/mod.rs
@@ -5,10 +5,11 @@ mod debug;
 mod en;
 pub(crate) mod eth;
 mod net;
+mod snapshots;
 mod web3;
 mod zks;
 
 pub use self::{
     debug::DebugNamespace, en::EnNamespace, eth::EthNamespace, net::NetNamespace,
-    web3::Web3Namespace, zks::ZksNamespace,
+    snapshots::SnapshotsNamespace, web3::Web3Namespace, zks::ZksNamespace,
 };
diff --git a/core/lib/zksync_core/src/api_server/web3/namespaces/snapshots.rs b/core/lib/zksync_core/src/api_server/web3/namespaces/snapshots.rs
new file mode 100644
index 000000000000..b536c4392301
--- /dev/null
+++ b/core/lib/zksync_core/src/api_server/web3/namespaces/snapshots.rs
@@ -0,0 +1,79 @@
+use crate::api_server::web3::state::RpcState;
+use crate::l1_gas_price::L1GasPriceProvider;
+use zksync_types::snapshots::{AllSnapshots, SnapshotHeader, SnapshotStorageLogsChunkMetadata};
+use zksync_types::L1BatchNumber;
+use zksync_web3_decl::error::Web3Error;
+
+#[derive(Debug)]
+pub struct SnapshotsNamespace<G> {
+    pub state: RpcState<G>,
+}
+
+impl<G> Clone for SnapshotsNamespace<G> {
+    fn clone(&self) -> Self {
+        Self {
+            state: self.state.clone(),
+        }
+    }
+}
+impl<G: L1GasPriceProvider> SnapshotsNamespace<G> {
+    pub fn new(state: RpcState<G>) -> Self {
+        Self { state }
+    }
+    pub async fn get_all_snapshots_impl(&self) -> Result<AllSnapshots, Web3Error> {
+        let mut storage_processor = self.state.connection_pool.access_storage().await.unwrap();
+        let mut snapshots_dal = storage_processor.snapshots_dal();
+        Ok(snapshots_dal.get_all_snapshots().await.unwrap())
+    }
+
+    pub async fn get_snapshot_by_l1_batch_number_impl(
+        &self,
+        l1_batch_number: L1BatchNumber,
+    ) -> Result<Option<SnapshotHeader>, Web3Error> {
+        let mut storage_processor = self.state.connection_pool.access_storage().await.unwrap();
+        let mut snapshots_dal = storage_processor.snapshots_dal();
+        let snapshot_files = snapshots_dal
+            .get_snapshot_files(l1_batch_number)
+            .await
+            .unwrap();
+        if snapshot_files.is_none() {
+            Ok(None)
+        } else {
+            let snapshot_metadata = snapshots_dal
+                .get_snapshot_metadata(l1_batch_number)
+                .await
+                .unwrap()
+                .unwrap();
+            let snapshot_files = snapshot_files.as_ref().unwrap();
+            let chunks = snapshot_files
+                .iter()
+                .enumerate()
+                .map(|(chunk_id, filepath)| SnapshotStorageLogsChunkMetadata {
+                    chunk_id: chunk_id as u64,
+                    filepath: filepath.clone(),
+                })
+                .collect();
+            let l1_batch_with_metadata = storage_processor
+                .blocks_dal()
+                .get_l1_batch_metadata(l1_batch_number)
+                .await
+                .unwrap()
+                .unwrap();
+            let miniblock_number = storage_processor
+                .blocks_dal()
+                .get_miniblock_range_of_l1_batch(l1_batch_number)
+                .await
+                .unwrap()
+                .unwrap()
+                .1;
+            Ok(Some(SnapshotHeader {
+                l1_batch_number: snapshot_metadata.l1_batch_number,
+                generated_at: snapshot_metadata.generated_at,
+                miniblock_number,
+                last_l1_batch_with_metadata: l1_batch_with_metadata,
+                storage_logs_chunks: chunks,
+                factory_deps_filepath: snapshot_metadata.factory_deps_filepath,
+            }))
+        }
+    }
+}
diff --git a/core/tests/ts-integration/package.json b/core/tests/ts-integration/package.json
index d296db7174f0..37e65991a583 100644
--- a/core/tests/ts-integration/package.json
+++ b/core/tests/ts-integration/package.json
@@ -10,7 +10,8 @@
         "api-test": "zk f jest -- api/web3.test.ts api/debug.test.ts",
         "contract-verification-test": "zk f jest -- api/contract-verification.test.ts",
         "build": "hardhat compile",
-        "build-yul": "hardhat run scripts/compile-yul.ts"
+        "build-yul": "hardhat run scripts/compile-yul.ts",
+        "snapshots-creator-test": "zk f jest -- api/snapshots-creator.test.ts"
     },
     "devDependencies": {
         "@matterlabs/hardhat-zksync-deploy": "^0.6.1",
diff --git a/core/tests/ts-integration/tests/api/snapshots-creator.test.ts b/core/tests/ts-integration/tests/api/snapshots-creator.test.ts
new file mode 100644
index 000000000000..76b713614a9e
--- /dev/null
+++ b/core/tests/ts-integration/tests/api/snapshots-creator.test.ts
@@ -0,0 +1,82 @@
+import { TestMaster } from '../../src/index';
+import * as utils from 'zk/build/utils';
+import fs from 'fs';
+import * as zlib from 'zlib';
+describe('Snapshots API tests', () => {
+    let testMaster: TestMaster;
+
+    beforeAll(() => {
+        testMaster = TestMaster.getInstance(__filename);
+
+        if (process.env.ZKSYNC_ENV!.startsWith('ext-node')) {
+            console.warn("You are trying to run snapshots creator tests on external node. It's not supported.");
+        }
+    });
+
+    async function runCreator() {
+        console.log('Starting creator');
+        await utils.spawn(`cd $ZKSYNC_HOME && cargo run --bin snapshots_creator --release`);
+    }
+
+    async function rpcRequest(name: string, params: any) {
+        const response = await testMaster.mainAccount().provider.send(name, params);
+        console.log(response);
+        return response;
+    }
+
+    async function getAllSnapshots() {
+        return await rpcRequest('snapshots_getAllSnapshots', []);
+    }
+
+    async function getSnapshot(snapshotL1Batch: number) {
+        return rpcRequest('snapshots_getSnapshot', [snapshotL1Batch]);
+    }
+
+    async function decompressGzip(filePath: string): Promise<string> {
+        return new Promise((resolve, reject) => {
+            const readStream = fs.createReadStream(filePath);
+            const gunzip = zlib.createGunzip();
+            let data = '';
+
+            gunzip.on('data', (chunk) => (data += chunk.toString()));
+            gunzip.on('end', () => resolve(data));
+            gunzip.on('error', reject);
+
+            readStream.pipe(gunzip);
+        });
+    }
+    async function createAndValidateSnapshot() {
+        let existingL1Batches = (await getAllSnapshots()).snapshots as any[];
+        await runCreator();
+        let newSnapshotsBatches = await getAllSnapshots();
+        let addedSnapshots = (newSnapshotsBatches.snapshots as any[]).filter(
+            (snapshot) => !existingL1Batches.find((other) => snapshot.l1BatchNumber === other.l1BatchNumber)
+        );
+        expect(addedSnapshots.length).toEqual(1);
+
+        let l1BatchNumber = addedSnapshots[0].l1BatchNumber;
+        let fullSnapshot = await getSnapshot(l1BatchNumber);
+        let miniblockNumber = fullSnapshot.miniblockNumber;
+
+        expect(fullSnapshot.l1BatchNumber).toEqual(addedSnapshots[0].l1BatchNumber);
+        let path = `${process.env.ZKSYNC_HOME}/${fullSnapshot.storageLogsChunks[0].filepath}`;
+
+        let output = JSON.parse(await decompressGzip(path));
+
+        for (const storageLog of output['storageLogs'] as any[]) {
+            let snapshotAccountAddress = storageLog['key']['account']['address'];
+            let snapshotKey = storageLog['key']['key'];
+            let snapshotValue = storageLog['value'];
+            let snapshotL1BatchNumber = storageLog['l1BatchNumberOfInitialWrite'];
+            const valueOnBlockchain = await testMaster
+                .mainAccount()
+                .provider.getStorageAt(snapshotAccountAddress, snapshotKey, miniblockNumber);
+            expect(snapshotValue).toEqual(valueOnBlockchain);
+            expect(snapshotL1BatchNumber).toBeLessThanOrEqual(l1BatchNumber);
+        }
+    }
+
+    test('snapshots can be created', async () => {
+        await createAndValidateSnapshot();
+    });
+});
diff --git a/etc/env/base/object_store.toml b/etc/env/base/object_store.toml
index 3ffec9f2ff65..5fd775acb371 100644
--- a/etc/env/base/object_store.toml
+++ b/etc/env/base/object_store.toml
@@ -18,3 +18,10 @@ mode="FileBacked"
 file_backed_base_path="artifacts"
 gcs_credential_file_path="/path/to/gcs_credentials.json"
 max_retries=5
+
+[snapshots_object_store]
+bucket_base_url="snapshots_base_url"
+mode="FileBacked"
+file_backed_base_path="artifacts"
+gcs_credential_file_path="/path/to/gcs_credentials.json"
+max_retries=5
diff --git a/infrastructure/zk/src/run/run.ts b/infrastructure/zk/src/run/run.ts
index 12568cd5851d..c8b70be9ccaf 100644
--- a/infrastructure/zk/src/run/run.ts
+++ b/infrastructure/zk/src/run/run.ts
@@ -119,6 +119,10 @@ export async function cross_en_checker() {
     await utils.spawn(`${logLevel} ${suffix}`);
 }
 
+export async function snapshots_creator() {
+    let logLevel = 'RUST_LOG=snapshots_creator=debug';
+    await utils.spawn(`${logLevel} cargo run --bin snapshots_creator --release`);
+}
 export const command = new Command('run').description('run miscellaneous applications').addCommand(dataRestore.command);
 
 command.command('test-accounts').description('print ethereum test accounts').action(testAccounts);
@@ -188,6 +192,8 @@ command
         await readVariable(address, contractName, variableName, cmd.file);
     });
 
+command.command('snapshots-creator').action(snapshots_creator);
+
 command
     .command('cross-en-checker')
     .description('run the cross external nodes checker. See Checker Readme the default run mode and configuration.')
diff --git a/infrastructure/zk/src/test/integration.ts b/infrastructure/zk/src/test/integration.ts
index cecc6fb49d80..64e164d62131 100644
--- a/infrastructure/zk/src/test/integration.ts
+++ b/infrastructure/zk/src/test/integration.ts
@@ -24,6 +24,11 @@ export async function contractVerification(bail: boolean = false) {
     await utils.spawn('yarn ts-integration contract-verification-test' + flag);
 }
 
+export async function snapshotsCreator(bail: boolean = false) {
+    const flag = bail ? ' --bail' : '';
+    await utils.spawn('yarn ts-integration snapshots-creator-test' + flag);
+}
+
 export async function server(options: string[] = []) {
     if (process.env.ZKSYNC_ENV?.startsWith('ext-node')) {
         process.env.ZKSYNC_WEB3_API_URL = `http://127.0.0.1:${process.env.EN_HTTP_PORT}`;
@@ -174,6 +179,14 @@ command
         await contractVerification(cmd.bail);
     });
 
+command
+    .command('snapshots-creator')
+    .description('run snapshots creator tests')
+    .option('--bail')
+    .action(async (cmd: Command) => {
+        await snapshotsCreator(cmd.bail);
+    });
+
 command
     .command('testkit [options...]')
     .allowUnknownOption(true)

From f9344e9726b02a89d3cf49f41c9a099e5fb631ce Mon Sep 17 00:00:00 2001
From: tomg10 <lemures64@gmail.com>
Date: Wed, 15 Nov 2023 22:03:05 +0100
Subject: [PATCH 02/43] fix: fmt and lint

---
 core/bin/snapshots_creator/README.md    | 2 +-
 core/lib/env_config/src/object_store.rs | 1 +
 2 files changed, 2 insertions(+), 1 deletion(-)

diff --git a/core/bin/snapshots_creator/README.md b/core/bin/snapshots_creator/README.md
index f2e45f6db135..88c34e0c2cf2 100644
--- a/core/bin/snapshots_creator/README.md
+++ b/core/bin/snapshots_creator/README.md
@@ -58,7 +58,7 @@ pub struct SnapshotStorageLog {
 
 ```rust
 pub struct SnapshotFactoryDependencies {
-		pub factory_deps: Vec<SnapshotFactoryDependency>
+  pub factory_deps: Vec<SnapshotFactoryDependency>
 }
 
 pub struct SnapshotFactoryDependency {
diff --git a/core/lib/env_config/src/object_store.rs b/core/lib/env_config/src/object_store.rs
index f56d57dfd6b4..23b1abaf5166 100644
--- a/core/lib/env_config/src/object_store.rs
+++ b/core/lib/env_config/src/object_store.rs
@@ -116,5 +116,6 @@ mod tests {
         "#;
         lock.set_env(config);
         let actual = SnapshotsObjectStoreConfig::from_env().unwrap().0;
+        assert_eq!(actual, expected_config("/snapshots_base_url"));
     }
 }

From 10e3a1485568d03e4d1428165f31a859974b44b9 Mon Sep 17 00:00:00 2001
From: tomg10 <lemures64@gmail.com>
Date: Wed, 15 Nov 2023 22:08:58 +0100
Subject: [PATCH 03/43] fix: remove slow ORDER BY

---
 core/lib/dal/src/snapshots_creator_dal.rs | 5 +----
 1 file changed, 1 insertion(+), 4 deletions(-)

diff --git a/core/lib/dal/src/snapshots_creator_dal.rs b/core/lib/dal/src/snapshots_creator_dal.rs
index 577af1284193..022bdb1c56b8 100644
--- a/core/lib/dal/src/snapshots_creator_dal.rs
+++ b/core/lib/dal/src/snapshots_creator_dal.rs
@@ -59,10 +59,7 @@ impl SnapshotsCreatorDal<'_, '_> {
                 AND storage_logs.operation_number = keys.op[2]
                      INNER JOIN initial_writes ON keys.hashed_key = initial_writes.hashed_key
             WHERE miniblock_number <= $1
-            ORDER BY ARRAY(
-                 SELECT get_byte(storage_logs.hashed_key, 32 - generate_series)
-                 FROM generate_series(1, 32)
-             )
+            ORDER BY hashed_key
             LIMIT $2 OFFSET $3;
              "#,
             miniblock_number,

From 48c8e926b8da5e45313fe0f7d70221b6e5f5b666 Mon Sep 17 00:00:00 2001
From: tomg10 <lemures64@gmail.com>
Date: Wed, 15 Nov 2023 23:03:00 +0100
Subject: [PATCH 04/43] fix: fix ambiguous column name

---
 core/lib/dal/sqlx-data.json               | 52 +++++++++++++++++++++++
 core/lib/dal/src/snapshots_creator_dal.rs |  2 +-
 2 files changed, 53 insertions(+), 1 deletion(-)

diff --git a/core/lib/dal/sqlx-data.json b/core/lib/dal/sqlx-data.json
index 36be2088171b..1a8b458f2dbd 100644
--- a/core/lib/dal/sqlx-data.json
+++ b/core/lib/dal/sqlx-data.json
@@ -13,6 +13,58 @@
     },
     "query": "UPDATE proof_generation_details SET status=$1, updated_at = now() WHERE l1_batch_number = $2"
   },
+  "00226c81a8bb7bc56ccce6d65247e5b8cd8803179e50cb93fefbaca81e98315e": {
+    "describe": {
+      "columns": [
+        {
+          "name": "key",
+          "ordinal": 0,
+          "type_info": "Bytea"
+        },
+        {
+          "name": "value",
+          "ordinal": 1,
+          "type_info": "Bytea"
+        },
+        {
+          "name": "address",
+          "ordinal": 2,
+          "type_info": "Bytea"
+        },
+        {
+          "name": "miniblock_number",
+          "ordinal": 3,
+          "type_info": "Int8"
+        },
+        {
+          "name": "l1_batch_number",
+          "ordinal": 4,
+          "type_info": "Int8"
+        },
+        {
+          "name": "index",
+          "ordinal": 5,
+          "type_info": "Int8"
+        }
+      ],
+      "nullable": [
+        false,
+        false,
+        false,
+        false,
+        false,
+        false
+      ],
+      "parameters": {
+        "Left": [
+          "Int8",
+          "Int8",
+          "Int8"
+        ]
+      }
+    },
+    "query": "\n            SELECT storage_logs.key,\n                   storage_logs.value,\n                   storage_logs.address,\n                   storage_logs.miniblock_number,\n                   initial_writes.l1_batch_number,\n                   initial_writes.index\n            FROM (SELECT hashed_key,\n                         max(ARRAY [miniblock_number, operation_number]::int[]) AS op\n                  FROM storage_logs\n                  WHERE miniblock_number <= $1\n                  GROUP BY hashed_key\n                  ORDER BY hashed_key) AS keys\n                     INNER JOIN storage_logs ON keys.hashed_key = storage_logs.hashed_key\n                AND storage_logs.miniblock_number = keys.op[1]\n                AND storage_logs.operation_number = keys.op[2]\n                     INNER JOIN initial_writes ON keys.hashed_key = initial_writes.hashed_key\n            WHERE miniblock_number <= $1\n            ORDER BY storage_logs.hashed_key\n            LIMIT $2 OFFSET $3;\n             "
+  },
   "00bd80fd83aff559d8d9232c2e98a12a1dd2c8f31792cd915e2cf11f28e583b7": {
     "describe": {
       "columns": [
diff --git a/core/lib/dal/src/snapshots_creator_dal.rs b/core/lib/dal/src/snapshots_creator_dal.rs
index 022bdb1c56b8..9f21c0aa9e3a 100644
--- a/core/lib/dal/src/snapshots_creator_dal.rs
+++ b/core/lib/dal/src/snapshots_creator_dal.rs
@@ -59,7 +59,7 @@ impl SnapshotsCreatorDal<'_, '_> {
                 AND storage_logs.operation_number = keys.op[2]
                      INNER JOIN initial_writes ON keys.hashed_key = initial_writes.hashed_key
             WHERE miniblock_number <= $1
-            ORDER BY hashed_key
+            ORDER BY storage_logs.hashed_key
             LIMIT $2 OFFSET $3;
              "#,
             miniblock_number,

From 6b9a1076f6ed76f843fd53d1be5b912cf13c42e6 Mon Sep 17 00:00:00 2001
From: tomg10 <lemures64@gmail.com>
Date: Wed, 15 Nov 2023 23:16:42 +0100
Subject: [PATCH 05/43] fix: lint

---
 core/lib/object_store/src/objects.rs | 10 +++++-----
 1 file changed, 5 insertions(+), 5 deletions(-)

diff --git a/core/lib/object_store/src/objects.rs b/core/lib/object_store/src/objects.rs
index 59a056923eb3..adde9abc2967 100644
--- a/core/lib/object_store/src/objects.rs
+++ b/core/lib/object_store/src/objects.rs
@@ -78,13 +78,13 @@ impl StoredObject for SnapshotFactoryDependencies {
     type Key<'a> = L1BatchNumber;
 
     fn encode_key(key: Self::Key<'_>) -> String {
-        format!("snapshot_l1_batch_{}_factory_deps.json.gzip", key)
+        format!("snapshot_l1_batch_{key}_factory_deps.json.gzip")
     }
 
     //TODO use better language agnostic serialization format like protobuf
     fn serialize(&self) -> Result<Vec<u8>, BoxedError> {
         let mut encoder = GzEncoder::new(Vec::new(), Compression::default());
-        serde_json::to_writer(&mut encoder, self).map_err(|e| BoxedError::from(e))?;
+        serde_json::to_writer(&mut encoder, self).map_err(BoxedError::from)?;
         encoder.finish().map_err(From::from)
     }
 
@@ -93,7 +93,7 @@ impl StoredObject for SnapshotFactoryDependencies {
         let mut decompressed_bytes = Vec::new();
         decoder
             .read_to_end(&mut decompressed_bytes)
-            .map_err(|e| BoxedError::from(e))?;
+            .map_err(BoxedError::from)?;
         serde_json::from_slice(&decompressed_bytes).map_err(From::from)
     }
 }
@@ -112,7 +112,7 @@ impl StoredObject for SnapshotStorageLogsChunk {
     //TODO use better language agnostic serialization format like protobuf
     fn serialize(&self) -> Result<Vec<u8>, BoxedError> {
         let mut encoder = GzEncoder::new(Vec::new(), Compression::default());
-        serde_json::to_writer(&mut encoder, self).map_err(|e| BoxedError::from(e))?;
+        serde_json::to_writer(&mut encoder, self).map_err(BoxedError::from)?;
         encoder.finish().map_err(From::from)
     }
 
@@ -121,7 +121,7 @@ impl StoredObject for SnapshotStorageLogsChunk {
         let mut decompressed_bytes = Vec::new();
         decoder
             .read_to_end(&mut decompressed_bytes)
-            .map_err(|e| BoxedError::from(e))?;
+            .map_err(BoxedError::from)?;
         serde_json::from_slice(&decompressed_bytes).map_err(From::from)
     }
 }

From cd38cd1431be03a8051ca15f651a823aa2bc3fcc Mon Sep 17 00:00:00 2001
From: tomg10 <lemures64@gmail.com>
Date: Wed, 15 Nov 2023 23:30:29 +0100
Subject: [PATCH 06/43] fix: lint

---
 core/bin/snapshots_creator/src/main.rs | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/core/bin/snapshots_creator/src/main.rs b/core/bin/snapshots_creator/src/main.rs
index 60629824c7a9..f4fd3273f924 100644
--- a/core/bin/snapshots_creator/src/main.rs
+++ b/core/bin/snapshots_creator/src/main.rs
@@ -113,11 +113,11 @@ async fn run(blob_store: Box<dyn ObjectStore>, pool: ConnectionPool) -> anyhow::
 
     let l1_batch_number = conn.blocks_dal().get_sealed_l1_batch_number().await? - 1; // we subtract 1 so that after restore, EN node has at least one l1 batch to fetch
 
-    if !conn
+    if conn
         .snapshots_dal()
         .get_snapshot_metadata(l1_batch_number)
         .await?
-        .is_none()
+        .is_some()
     {
         tracing::info!(
             "Snapshot for L1 batch number {} already exists, exiting",

From 465b7c7f2cd3ee282293570f87c795ebea2679ba Mon Sep 17 00:00:00 2001
From: tomg10 <lemures64@gmail.com>
Date: Wed, 15 Nov 2023 23:49:21 +0100
Subject: [PATCH 07/43] fix: ignore snapshots-creator integration test when
 running external node

---
 .github/workflows/ci-core-reusable.yml | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/.github/workflows/ci-core-reusable.yml b/.github/workflows/ci-core-reusable.yml
index 11c2b990dad4..34e0e1c5f3a4 100644
--- a/.github/workflows/ci-core-reusable.yml
+++ b/.github/workflows/ci-core-reusable.yml
@@ -278,7 +278,7 @@ jobs:
 
       # TODO(PLA-653): Restore bridge tests for EN.
       - name: Integration tests
-        run: ci_run zk test i server --testPathIgnorePatterns 'contract-verification|custom-erc20-bridge'
+        run: ci_run zk test i server --testPathIgnorePatterns 'contract-verification|custom-erc20-bridge|snapshots-creator'
 
       - name: Run Cross EN Checker
         run: ci_run zk run cross-en-checker

From f82c13b4f9b00b7d28b582e283277e82b3c9157c Mon Sep 17 00:00:00 2001
From: tomg10 <lemures64@gmail.com>
Date: Thu, 16 Nov 2023 00:03:41 +0100
Subject: [PATCH 08/43] feat: better logs

---
 core/bin/snapshots_creator/src/main.rs | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/core/bin/snapshots_creator/src/main.rs b/core/bin/snapshots_creator/src/main.rs
index f4fd3273f924..93ea30770f88 100644
--- a/core/bin/snapshots_creator/src/main.rs
+++ b/core/bin/snapshots_creator/src/main.rs
@@ -149,7 +149,7 @@ async fn run(blob_store: Box<dyn ObjectStore>, pool: ConnectionPool) -> anyhow::
         l1_batch_number.0
     );
     tracing::info!(
-        "{} chunks of max size {} will be generated",
+        "Starting to generate {} chunks of max size {}",
         chunks_count,
         chunk_size
     );

From b8c2a19beb6fd9db1109f483f8d61d5f8dddba79 Mon Sep 17 00:00:00 2001
From: tomg10 <lemures64@gmail.com>
Date: Thu, 16 Nov 2023 16:12:11 +0100
Subject: [PATCH 09/43] feat: PR feedback vol. 1

---
 core/bin/snapshots_creator/README.md          |  2 +-
 core/bin/snapshots_creator/src/main.rs        | 49 +++++++++++--------
 core/lib/dal/src/snapshots_creator_dal.rs     | 11 +++--
 core/lib/dal/src/snapshots_dal.rs             | 29 +++--------
 core/lib/object_store/src/objects.rs          | 38 ++++++++++++--
 core/lib/types/src/snapshots.rs               |  1 +
 .../api_server/web3/namespaces/snapshots.rs   | 17 +++----
 7 files changed, 86 insertions(+), 61 deletions(-)

diff --git a/core/bin/snapshots_creator/README.md b/core/bin/snapshots_creator/README.md
index 88c34e0c2cf2..86c24fef81b3 100644
--- a/core/bin/snapshots_creator/README.md
+++ b/core/bin/snapshots_creator/README.md
@@ -9,7 +9,7 @@ node.
 Usage (local development):\
 First run `zk env dev` \
 then the creator can be run using:  
-`cargo run --bin snapshots_creator --release`
+`zk run snapshots_creator`
 
 Snapshot contents can be stored based on blob_store config either in local filesystem or GS.
 
diff --git a/core/bin/snapshots_creator/src/main.rs b/core/bin/snapshots_creator/src/main.rs
index 93ea30770f88..2da5263da1c2 100644
--- a/core/bin/snapshots_creator/src/main.rs
+++ b/core/bin/snapshots_creator/src/main.rs
@@ -21,13 +21,13 @@ use zksync_utils::time::seconds_since_epoch;
 #[derive(Debug, Metrics)]
 #[metrics(prefix = "snapshots_creator")]
 struct SnapshotsCreatorMetrics {
-    pub storage_logs_chunks_count: Gauge<u64>,
+    storage_logs_chunks_count: Gauge<u64>,
 
-    pub snapshot_generation_duration: Gauge<u64>,
+    snapshot_generation_duration: Gauge<u64>,
 
-    pub snapshot_l1_batch: Gauge<u64>,
+    snapshot_l1_batch: Gauge<u64>,
 
-    pub snapshot_generation_timestamp: Gauge<u64>,
+    snapshot_generation_timestamp: Gauge<u64>,
 }
 #[vise::register]
 pub(crate) static METRICS: vise::Global<SnapshotsCreatorMetrics> = vise::Global::new();
@@ -55,7 +55,7 @@ async fn process_storage_logs_single_chunk(
     chunk_size: u64,
     chunks_count: u64,
 ) -> anyhow::Result<String> {
-    let mut conn = pool.access_storage().await?;
+    let mut conn = pool.access_storage_tagged("snapshots_creator").await?;
     let logs = conn
         .snapshots_creator_dal()
         .get_storage_logs_chunk(l1_batch_number, chunk_id, chunk_size)
@@ -88,7 +88,7 @@ async fn process_factory_deps(
     l1_batch_number: L1BatchNumber,
 ) -> anyhow::Result<String> {
     tracing::info!("Processing factory dependencies");
-    let mut conn = pool.access_storage().await?;
+    let mut conn = pool.access_storage_tagged("snapshots_creator").await?;
     let factory_deps = conn
         .snapshots_creator_dal()
         .get_all_factory_deps(miniblock_number)
@@ -108,7 +108,7 @@ async fn process_factory_deps(
 }
 
 async fn run(blob_store: Box<dyn ObjectStore>, pool: ConnectionPool) -> anyhow::Result<()> {
-    let mut conn = pool.access_storage().await?;
+    let mut conn = pool.access_storage_tagged("snapshots_creator").await?;
     let start_time = seconds_since_epoch();
 
     let l1_batch_number = conn.blocks_dal().get_sealed_l1_batch_number().await? - 1; // we subtract 1 so that after restore, EN node has at least one l1 batch to fetch
@@ -126,7 +126,7 @@ async fn run(blob_store: Box<dyn ObjectStore>, pool: ConnectionPool) -> anyhow::
         return Ok(());
     }
 
-    let miniblock_number = conn
+    let last_miniblock_number_in_batch = conn
         .blocks_dal()
         .get_miniblock_range_of_l1_batch(l1_batch_number)
         .await?
@@ -145,7 +145,7 @@ async fn run(blob_store: Box<dyn ObjectStore>, pool: ConnectionPool) -> anyhow::
 
     tracing::info!(
         "Creating snapshot for storage logs up to miniblock {}, l1_batch {}",
-        miniblock_number,
+        last_miniblock_number_in_batch,
         l1_batch_number.0
     );
     tracing::info!(
@@ -154,8 +154,13 @@ async fn run(blob_store: Box<dyn ObjectStore>, pool: ConnectionPool) -> anyhow::
         chunk_size
     );
 
-    let factory_deps_output_file =
-        process_factory_deps(&*blob_store, &pool, miniblock_number, l1_batch_number).await?;
+    let factory_deps_output_file = process_factory_deps(
+        &*blob_store,
+        &pool,
+        last_miniblock_number_in_batch,
+        l1_batch_number,
+    )
+    .await?;
 
     let mut storage_logs_output_files = vec![];
 
@@ -176,7 +181,7 @@ async fn run(blob_store: Box<dyn ObjectStore>, pool: ConnectionPool) -> anyhow::
         storage_logs_output_files.push(output_file.clone());
     }
 
-    let mut conn = pool.access_storage().await?;
+    let mut conn = pool.access_storage_tagged("snapshots_creator").await?;
 
     conn.snapshots_dal()
         .add_snapshot(
@@ -195,16 +200,18 @@ async fn run(blob_store: Box<dyn ObjectStore>, pool: ConnectionPool) -> anyhow::
         .snapshot_generation_duration
         .set(seconds_since_epoch() - start_time);
 
+    tracing::info!("Run metrics:");
+    tracing::info!(
+        "snapshot_generation_duration: {}s",
+        METRICS.snapshot_generation_duration.get()
+    );
+    tracing::info!("snapshot_l1_batch: {}", METRICS.snapshot_l1_batch.get());
+    tracing::info!(
+        "snapshot_generation_timestamp: {}",
+        METRICS.snapshot_generation_timestamp.get()
+    );
     tracing::info!(
-        r#"Run metrics:
-snapshot_generation_duration: {}sec
-snapshot_l1_batch: {},
-snapshot_generation_timestamp: {}
-storage_logs_chunks_count: {}
-  "#,
-        METRICS.snapshot_generation_duration.get(),
-        METRICS.snapshot_l1_batch.get(),
-        METRICS.snapshot_generation_timestamp.get(),
+        "storage_logs_chunks_count: {}",
         METRICS.storage_logs_chunks_count.get()
     );
 
diff --git a/core/lib/dal/src/snapshots_creator_dal.rs b/core/lib/dal/src/snapshots_creator_dal.rs
index 9f21c0aa9e3a..00172f3f456d 100644
--- a/core/lib/dal/src/snapshots_creator_dal.rs
+++ b/core/lib/dal/src/snapshots_creator_dal.rs
@@ -14,14 +14,19 @@ impl SnapshotsCreatorDal<'_, '_> {
         l1_batch_number: L1BatchNumber,
     ) -> Result<u64, sqlx::Error> {
         let count = sqlx::query!(
-            "SELECT count(*) FROM initial_writes WHERE l1_batch_number <= $1",
+            r#"
+            SELECT index
+            FROM initial_writes
+            WHERE l1_batch_number <= $1
+            ORDER BY l1_batch_number DESC , index DESC 
+            LIMIT 1;
+            "#,
             l1_batch_number.0 as i32
         )
         .fetch_one(self.storage.conn())
         .await
         .unwrap()
-        .count
-        .unwrap();
+        .index;
         Ok(count as u64)
     }
 
diff --git a/core/lib/dal/src/snapshots_dal.rs b/core/lib/dal/src/snapshots_dal.rs
index 99a6363f166c..f13a74449e66 100644
--- a/core/lib/dal/src/snapshots_dal.rs
+++ b/core/lib/dal/src/snapshots_dal.rs
@@ -29,7 +29,7 @@ impl SnapshotsDal<'_, '_> {
 
     pub async fn get_all_snapshots(&mut self) -> Result<AllSnapshots, sqlx::Error> {
         let records: Vec<SnapshotMetadata> = sqlx::query!(
-            "SELECT l1_batch_number, created_at, factory_deps_filepath FROM snapshots"
+            "SELECT l1_batch_number, created_at, factory_deps_filepath, storage_logs_filepaths FROM snapshots"
         )
         .fetch_all(self.storage.conn())
         .await?
@@ -38,6 +38,7 @@ impl SnapshotsDal<'_, '_> {
             l1_batch_number: L1BatchNumber(r.l1_batch_number as u32),
             generated_at: DateTime::<Utc>::from_naive_utc_and_offset(r.created_at, Utc),
             factory_deps_filepath: r.factory_deps_filepath,
+            storage_logs_filepaths: r.storage_logs_filepaths,
         })
         .collect();
         Ok(AllSnapshots { snapshots: records })
@@ -48,7 +49,7 @@ impl SnapshotsDal<'_, '_> {
         l1_batch_number: L1BatchNumber,
     ) -> Result<Option<SnapshotMetadata>, sqlx::Error> {
         let record: Option<SnapshotMetadata> = sqlx::query!(
-            "SELECT l1_batch_number, created_at, factory_deps_filepath FROM snapshots WHERE l1_batch_number = $1",
+            "SELECT l1_batch_number, created_at, factory_deps_filepath, storage_logs_filepaths FROM snapshots WHERE l1_batch_number = $1",
             l1_batch_number.0 as i32
         )
         .fetch_optional(self.storage.conn())
@@ -57,24 +58,10 @@ impl SnapshotsDal<'_, '_> {
             l1_batch_number: L1BatchNumber(r.l1_batch_number as u32),
             generated_at: DateTime::<Utc>::from_naive_utc_and_offset(r.created_at, Utc),
             factory_deps_filepath: r.factory_deps_filepath,
+            storage_logs_filepaths: r.storage_logs_filepaths,
         });
         Ok(record)
     }
-
-    pub async fn get_snapshot_files(
-        &mut self,
-        l1_batch_number: L1BatchNumber,
-    ) -> Result<Option<Vec<String>>, sqlx::Error> {
-        let record = sqlx::query!(
-            "SELECT storage_logs_filepaths \
-            FROM snapshots WHERE l1_batch_number = $1",
-            l1_batch_number.0 as i32
-        )
-        .fetch_optional(self.storage.conn())
-        .await?;
-
-        Ok(record.map(|r| r.storage_logs_filepaths))
-    }
 }
 
 #[cfg(test)]
@@ -135,11 +122,11 @@ mod tests {
         .expect("Failed to add snapshot");
 
         let files = dal
-            .get_snapshot_files(l1_batch_number)
+            .get_snapshot_metadata(l1_batch_number)
             .await
-            .expect("Failed to retrieve snapshot");
-        assert!(files.is_some());
-        let files = files.unwrap();
+            .expect("Failed to retrieve snapshot")
+            .unwrap()
+            .storage_logs_filepaths;
         assert!(files.contains(&"gs:///bucket/test_file1.bin".to_string()));
         assert!(files.contains(&"gs:///bucket/test_file2.bin".to_string()));
     }
diff --git a/core/lib/object_store/src/objects.rs b/core/lib/object_store/src/objects.rs
index adde9abc2967..bf036adaff62 100644
--- a/core/lib/object_store/src/objects.rs
+++ b/core/lib/object_store/src/objects.rs
@@ -70,9 +70,6 @@ macro_rules! serialize_using_bincode {
     };
 }
 
-/// Derives [`StoredObject::serialize()`] and [`StoredObject::deserialize()`] using
-/// the `json` (de)serializer. Should be used in `impl StoredObject` blocks.
-
 impl StoredObject for SnapshotFactoryDependencies {
     const BUCKET: Bucket = Bucket::StorageSnapshot;
     type Key<'a> = L1BatchNumber;
@@ -104,7 +101,7 @@ impl StoredObject for SnapshotStorageLogsChunk {
 
     fn encode_key(key: Self::Key<'_>) -> String {
         format!(
-            "snapshot_l1_batch_{}_storage_logs_part_{:0<3}.json.gzip",
+            "snapshot_l1_batch_{}_storage_logs_part_{:0>4}.json.gzip",
             key.l1_batch_number, key.chunk_id
         )
     }
@@ -310,3 +307,36 @@ impl dyn ObjectStore + '_ {
         self.get_storage_prefix_raw(V::BUCKET)
     }
 }
+
+#[cfg(test)]
+mod tests {
+    use super::*;
+
+    #[test]
+    fn test_storage_logs_filesnames_generate_corretly() {
+        let filename1 = SnapshotStorageLogsChunk::encode_key(SnapshotStorageLogsStorageKey {
+            l1_batch_number: L1BatchNumber(42),
+            chunk_id: 97,
+        });
+        let filename2 = SnapshotStorageLogsChunk::encode_key(SnapshotStorageLogsStorageKey {
+            l1_batch_number: L1BatchNumber(3),
+            chunk_id: 531,
+        });
+        let filename3 = SnapshotStorageLogsChunk::encode_key(SnapshotStorageLogsStorageKey {
+            l1_batch_number: L1BatchNumber(567),
+            chunk_id: 5,
+        });
+        assert_eq!(
+            "snapshot_l1_batch_42_storage_logs_part_0097.json.gzip",
+            filename1
+        );
+        assert_eq!(
+            "snapshot_l1_batch_3_storage_logs_part_0531.json.gzip",
+            filename2
+        );
+        assert_eq!(
+            "snapshot_l1_batch_567_storage_logs_part_0005.json.gzip",
+            filename3
+        );
+    }
+}
diff --git a/core/lib/types/src/snapshots.rs b/core/lib/types/src/snapshots.rs
index 7e8d3e1d993b..d81433c63de4 100644
--- a/core/lib/types/src/snapshots.rs
+++ b/core/lib/types/src/snapshots.rs
@@ -16,6 +16,7 @@ pub struct SnapshotMetadata {
     pub l1_batch_number: L1BatchNumber,
     pub generated_at: DateTime<Utc>,
     pub factory_deps_filepath: String,
+    pub storage_logs_filepaths: Vec<String>,
 }
 
 #[derive(Debug, Clone, Serialize, Deserialize)]
diff --git a/core/lib/zksync_core/src/api_server/web3/namespaces/snapshots.rs b/core/lib/zksync_core/src/api_server/web3/namespaces/snapshots.rs
index b536c4392301..7574c0a2c232 100644
--- a/core/lib/zksync_core/src/api_server/web3/namespaces/snapshots.rs
+++ b/core/lib/zksync_core/src/api_server/web3/namespaces/snapshots.rs
@@ -32,19 +32,12 @@ impl<G: L1GasPriceProvider> SnapshotsNamespace<G> {
     ) -> Result<Option<SnapshotHeader>, Web3Error> {
         let mut storage_processor = self.state.connection_pool.access_storage().await.unwrap();
         let mut snapshots_dal = storage_processor.snapshots_dal();
-        let snapshot_files = snapshots_dal
-            .get_snapshot_files(l1_batch_number)
+        let snapshot_metadata = snapshots_dal
+            .get_snapshot_metadata(l1_batch_number)
             .await
             .unwrap();
-        if snapshot_files.is_none() {
-            Ok(None)
-        } else {
-            let snapshot_metadata = snapshots_dal
-                .get_snapshot_metadata(l1_batch_number)
-                .await
-                .unwrap()
-                .unwrap();
-            let snapshot_files = snapshot_files.as_ref().unwrap();
+        if let Some(snapshot_metadata) = snapshot_metadata {
+            let snapshot_files = snapshot_metadata.storage_logs_filepaths.clone();
             let chunks = snapshot_files
                 .iter()
                 .enumerate()
@@ -74,6 +67,8 @@ impl<G: L1GasPriceProvider> SnapshotsNamespace<G> {
                 storage_logs_chunks: chunks,
                 factory_deps_filepath: snapshot_metadata.factory_deps_filepath,
             }))
+        } else {
+            return Ok(None);
         }
     }
 }

From 883e3947bb4571b786c9c4f237ebd7b271fc6e64 Mon Sep 17 00:00:00 2001
From: tomg10 <lemures64@gmail.com>
Date: Thu, 16 Nov 2023 16:38:24 +0100
Subject: [PATCH 10/43] feat: PR feedback vol. 2

---
 core/bin/snapshots_creator/src/main.rs        |  2 +-
 core/lib/dal/src/snapshots_dal.rs             | 14 ++++-----
 core/lib/object_store/src/mock.rs             |  2 +-
 .../api_server/web3/namespaces/snapshots.rs   | 30 ++++++++++++++-----
 4 files changed, 30 insertions(+), 18 deletions(-)

diff --git a/core/bin/snapshots_creator/src/main.rs b/core/bin/snapshots_creator/src/main.rs
index 2da5263da1c2..d0cdc94916ee 100644
--- a/core/bin/snapshots_creator/src/main.rs
+++ b/core/bin/snapshots_creator/src/main.rs
@@ -187,7 +187,7 @@ async fn run(blob_store: Box<dyn ObjectStore>, pool: ConnectionPool) -> anyhow::
         .add_snapshot(
             l1_batch_number,
             &storage_logs_output_files,
-            factory_deps_output_file,
+            &factory_deps_output_file,
         )
         .await?;
 
diff --git a/core/lib/dal/src/snapshots_dal.rs b/core/lib/dal/src/snapshots_dal.rs
index f13a74449e66..a22c2dd76db1 100644
--- a/core/lib/dal/src/snapshots_dal.rs
+++ b/core/lib/dal/src/snapshots_dal.rs
@@ -13,7 +13,7 @@ impl SnapshotsDal<'_, '_> {
         &mut self,
         l1_batch_number: L1BatchNumber,
         storage_logs_filepaths: &[String],
-        factory_deps_filepaths: String,
+        factory_deps_filepaths: &str,
     ) -> Result<(), sqlx::Error> {
         sqlx::query!(
             "INSERT INTO snapshots (l1_batch_number, created_at, storage_logs_filepaths, factory_deps_filepath) \
@@ -75,13 +75,9 @@ mod tests {
         let mut conn = pool.access_storage().await.unwrap();
         let mut dal = conn.snapshots_dal();
         let l1_batch_number = L1BatchNumber(100);
-        dal.add_snapshot(
-            l1_batch_number,
-            &[],
-            "gs:///bucket/factory_deps.bin".to_string(),
-        )
-        .await
-        .expect("Failed to add snapshot");
+        dal.add_snapshot(l1_batch_number, &[], "gs:///bucket/factory_deps.bin")
+            .await
+            .expect("Failed to add snapshot");
 
         let snapshots = dal
             .get_all_snapshots()
@@ -116,7 +112,7 @@ mod tests {
                 "gs:///bucket/test_file1.bin".to_string(),
                 "gs:///bucket/test_file2.bin".to_string(),
             ],
-            "gs:///bucket/factory_deps.bin".to_string(),
+            "gs:///bucket/factory_deps.bin",
         )
         .await
         .expect("Failed to add snapshot");
diff --git a/core/lib/object_store/src/mock.rs b/core/lib/object_store/src/mock.rs
index 7db220870ddb..f170de5e7533 100644
--- a/core/lib/object_store/src/mock.rs
+++ b/core/lib/object_store/src/mock.rs
@@ -47,6 +47,6 @@ impl ObjectStore for MockStore {
     }
 
     fn get_storage_prefix_raw(&self, bucket: Bucket) -> String {
-        format!("{bucket}")
+        bucket.to_string()
     }
 }
diff --git a/core/lib/zksync_core/src/api_server/web3/namespaces/snapshots.rs b/core/lib/zksync_core/src/api_server/web3/namespaces/snapshots.rs
index 7574c0a2c232..f47307fd9d48 100644
--- a/core/lib/zksync_core/src/api_server/web3/namespaces/snapshots.rs
+++ b/core/lib/zksync_core/src/api_server/web3/namespaces/snapshots.rs
@@ -1,3 +1,4 @@
+use crate::api_server::web3::backend_jsonrpc::error::internal_error;
 use crate::api_server::web3::state::RpcState;
 use crate::l1_gas_price::L1GasPriceProvider;
 use zksync_types::snapshots::{AllSnapshots, SnapshotHeader, SnapshotStorageLogsChunkMetadata};
@@ -21,21 +22,36 @@ impl<G: L1GasPriceProvider> SnapshotsNamespace<G> {
         Self { state }
     }
     pub async fn get_all_snapshots_impl(&self) -> Result<AllSnapshots, Web3Error> {
-        let mut storage_processor = self.state.connection_pool.access_storage().await.unwrap();
+        let method_name = "get_all_snapshots";
+        let mut storage_processor = self
+            .state
+            .connection_pool
+            .access_storage_tagged("api")
+            .await
+            .map_err(|err| internal_error(method_name, err))?;
         let mut snapshots_dal = storage_processor.snapshots_dal();
-        Ok(snapshots_dal.get_all_snapshots().await.unwrap())
+        Ok(snapshots_dal
+            .get_all_snapshots()
+            .await
+            .map_err(|err| internal_error(method_name, err))?)
     }
 
     pub async fn get_snapshot_by_l1_batch_number_impl(
         &self,
         l1_batch_number: L1BatchNumber,
     ) -> Result<Option<SnapshotHeader>, Web3Error> {
-        let mut storage_processor = self.state.connection_pool.access_storage().await.unwrap();
+        let method_name = "get_snapshot_by_l1_batch_number";
+        let mut storage_processor = self
+            .state
+            .connection_pool
+            .access_storage_tagged("api")
+            .await
+            .map_err(|err| internal_error(method_name, err))?;
         let mut snapshots_dal = storage_processor.snapshots_dal();
         let snapshot_metadata = snapshots_dal
             .get_snapshot_metadata(l1_batch_number)
             .await
-            .unwrap();
+            .map_err(|err| internal_error(method_name, err))?;
         if let Some(snapshot_metadata) = snapshot_metadata {
             let snapshot_files = snapshot_metadata.storage_logs_filepaths.clone();
             let chunks = snapshot_files
@@ -50,13 +66,13 @@ impl<G: L1GasPriceProvider> SnapshotsNamespace<G> {
                 .blocks_dal()
                 .get_l1_batch_metadata(l1_batch_number)
                 .await
-                .unwrap()
+                .map_err(|err| internal_error(method_name, err))?
                 .unwrap();
             let miniblock_number = storage_processor
                 .blocks_dal()
                 .get_miniblock_range_of_l1_batch(l1_batch_number)
                 .await
-                .unwrap()
+                .map_err(|err| internal_error(method_name, err))?
                 .unwrap()
                 .1;
             Ok(Some(SnapshotHeader {
@@ -68,7 +84,7 @@ impl<G: L1GasPriceProvider> SnapshotsNamespace<G> {
                 factory_deps_filepath: snapshot_metadata.factory_deps_filepath,
             }))
         } else {
-            return Ok(None);
+            Ok(None)
         }
     }
 }

From f04ed5f6f7e20acc11ae694db63a1062a0835911 Mon Sep 17 00:00:00 2001
From: tomg10 <lemures64@gmail.com>
Date: Thu, 16 Nov 2023 16:40:08 +0100
Subject: [PATCH 11/43] feat: PR feedback vol. 3

---
 core/bin/snapshots_creator/src/main.rs | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/core/bin/snapshots_creator/src/main.rs b/core/bin/snapshots_creator/src/main.rs
index d0cdc94916ee..0ac91e4db631 100644
--- a/core/bin/snapshots_creator/src/main.rs
+++ b/core/bin/snapshots_creator/src/main.rs
@@ -179,6 +179,7 @@ async fn run(blob_store: Box<dyn ObjectStore>, pool: ConnectionPool) -> anyhow::
         )
         .await?;
         storage_logs_output_files.push(output_file.clone());
+        METRICS.storage_logs_chunks_count.set(chunk_id);
     }
 
     let mut conn = pool.access_storage_tagged("snapshots_creator").await?;
@@ -192,7 +193,6 @@ async fn run(blob_store: Box<dyn ObjectStore>, pool: ConnectionPool) -> anyhow::
         .await?;
 
     METRICS.snapshot_l1_batch.set(l1_batch_number.0.as_u64());
-    METRICS.storage_logs_chunks_count.set(chunks_count);
     METRICS
         .snapshot_generation_timestamp
         .set(seconds_since_epoch());

From 2bd4a53dca41d4523d8a008c1d295d4065a911f8 Mon Sep 17 00:00:00 2001
From: tomg10 <lemures64@gmail.com>
Date: Thu, 16 Nov 2023 18:11:49 +0100
Subject: [PATCH 12/43] fix: lint

---
 .../zksync_core/src/api_server/web3/namespaces/snapshots.rs   | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/core/lib/zksync_core/src/api_server/web3/namespaces/snapshots.rs b/core/lib/zksync_core/src/api_server/web3/namespaces/snapshots.rs
index f47307fd9d48..7fc5f3f45906 100644
--- a/core/lib/zksync_core/src/api_server/web3/namespaces/snapshots.rs
+++ b/core/lib/zksync_core/src/api_server/web3/namespaces/snapshots.rs
@@ -30,10 +30,10 @@ impl<G: L1GasPriceProvider> SnapshotsNamespace<G> {
             .await
             .map_err(|err| internal_error(method_name, err))?;
         let mut snapshots_dal = storage_processor.snapshots_dal();
-        Ok(snapshots_dal
+        snapshots_dal
             .get_all_snapshots()
             .await
-            .map_err(|err| internal_error(method_name, err))?)
+            .map_err(|err| internal_error(method_name, err))
     }
 
     pub async fn get_snapshot_by_l1_batch_number_impl(

From cf17e937e4822b5210c99d2406ec88d04fcd25c3 Mon Sep 17 00:00:00 2001
From: tomg10 <lemures64@gmail.com>
Date: Fri, 17 Nov 2023 12:55:43 +0100
Subject: [PATCH 13/43] feat: print lint version

---
 infrastructure/zk/src/lint.ts | 2 ++
 1 file changed, 2 insertions(+)

diff --git a/infrastructure/zk/src/lint.ts b/infrastructure/zk/src/lint.ts
index b34cd1000312..b012d84d67ba 100644
--- a/infrastructure/zk/src/lint.ts
+++ b/infrastructure/zk/src/lint.ts
@@ -38,6 +38,8 @@ async function lintSystemContracts(check: boolean = false) {
 
 async function clippy() {
     process.chdir(process.env.ZKSYNC_HOME!);
+    const { stdout: version } = await utils.exec('cargo clippy --version');
+    console.log(`linting using clippy: ${version}`)
     await utils.spawn('cargo clippy --tests -- -D warnings');
 }
 

From 58206cbbe08b92725721516ea6e0ae473fc4cbf7 Mon Sep 17 00:00:00 2001
From: tomg10 <lemures64@gmail.com>
Date: Fri, 17 Nov 2023 13:02:53 +0100
Subject: [PATCH 14/43] fix: fmt

---
 infrastructure/zk/src/lint.ts | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/infrastructure/zk/src/lint.ts b/infrastructure/zk/src/lint.ts
index b012d84d67ba..5813ba2b2b46 100644
--- a/infrastructure/zk/src/lint.ts
+++ b/infrastructure/zk/src/lint.ts
@@ -39,7 +39,7 @@ async function lintSystemContracts(check: boolean = false) {
 async function clippy() {
     process.chdir(process.env.ZKSYNC_HOME!);
     const { stdout: version } = await utils.exec('cargo clippy --version');
-    console.log(`linting using clippy: ${version}`)
+    console.log(`linting using clippy: ${version}`);
     await utils.spawn('cargo clippy --tests -- -D warnings');
 }
 

From 96d8c30025d50c3737ee16fd8c21f533ace0942c Mon Sep 17 00:00:00 2001
From: tomg10 <lemures64@gmail.com>
Date: Fri, 17 Nov 2023 13:29:11 +0100
Subject: [PATCH 15/43] feat: more info when running lint

---
 infrastructure/zk/src/lint.ts | 4 +++-
 1 file changed, 3 insertions(+), 1 deletion(-)

diff --git a/infrastructure/zk/src/lint.ts b/infrastructure/zk/src/lint.ts
index 5813ba2b2b46..65ee5b75b6dd 100644
--- a/infrastructure/zk/src/lint.ts
+++ b/infrastructure/zk/src/lint.ts
@@ -38,8 +38,10 @@ async function lintSystemContracts(check: boolean = false) {
 
 async function clippy() {
     process.chdir(process.env.ZKSYNC_HOME!);
+    const { stdout: rustVersion } = await utils.exec('rustc --version');
+    console.log(`linting using rustc: ${rustVersion.trim()}`);
     const { stdout: version } = await utils.exec('cargo clippy --version');
-    console.log(`linting using clippy: ${version}`);
+    console.log(`linting using clippy: ${version.trim()}`);
     await utils.spawn('cargo clippy --tests -- -D warnings');
 }
 

From b0a4bf0a452a74f48142417e854e68f4e9f3e6c5 Mon Sep 17 00:00:00 2001
From: tomg10 <lemures64@gmail.com>
Date: Mon, 27 Nov 2023 13:24:58 +0100
Subject: [PATCH 16/43] fix: another PR feedback (more to come)

---
 core/bin/snapshots_creator/README.md          | 48 +------------------
 core/bin/snapshots_creator/src/main.rs        |  2 -
 ...231013163109_create_snapshots_table.up.sql |  4 +-
 core/lib/dal/src/snapshots_creator_dal.rs     |  4 ++
 core/lib/dal/src/snapshots_dal.rs             | 31 ++++++------
 core/lib/types/src/snapshots.rs               | 17 ++-----
 .../api_server/web3/namespaces/snapshots.rs   |  1 -
 7 files changed, 27 insertions(+), 80 deletions(-)

diff --git a/core/bin/snapshots_creator/README.md b/core/bin/snapshots_creator/README.md
index 86c24fef81b3..b669dbc4636b 100644
--- a/core/bin/snapshots_creator/README.md
+++ b/core/bin/snapshots_creator/README.md
@@ -15,54 +15,8 @@ Snapshot contents can be stored based on blob_store config either in local files
 
 ## Snapshots format
 
-Each snapshot consists of three types of objects: header, storage logs chunks and factory deps:
+Each snapshot consists of three types of objects (see [snapshots.rs](https://github.com/matter-labs/zksync-era/core/lib/types/src/snapshots.rs)) : header, storage logs chunks and factory deps:
 
 - Snapshot Header (currently returned by snapshots namespace of JSON-RPC API)
-
-```rust
-pub struct SnapshotHeader {
-    pub l1_batch_number: L1BatchNumber,
-    pub miniblock_number: MiniblockNumber,
-    // ordered by chunk_id
-    pub storage_logs_chunks: Vec<SnapshotStorageLogsChunkMetadata>,
-    pub factory_deps_filepath: String,
-    pub last_l1_batch_with_metadata: L1BatchWithMetadata,
-    pub generated_at: DateTime<Utc>,
-}
-
-pub struct SnapshotStorageLogsChunkMetadata {
-    pub chunk_id: u64,
-    // can be either a gs or filesystem path
-    pub filepath: String,
-}
-```
-
 - Snapshot Storage logs chunks (most likely to be stored in gzipped protobuf files, but this part is still WIP) :
-
-```rust
-pub struct SnapshotStorageLogsChunk {
-    // sorted by hashed_keys interpreted as little-endian numbers
-    pub storage_logs: Vec<SnapshotStorageLog>,
-}
-
-// "most recent" for each key together with info when the key was first used
-pub struct SnapshotStorageLog {
-    pub key: StorageKey,
-    pub value: StorageValue,
-    pub l1_batch_number_of_initial_write: L1BatchNumber,
-    pub enumeration_index: u64,
-}
-```
-
 - Factory dependencies (most likely to be stored as protobufs in the very near future)
-
-```rust
-pub struct SnapshotFactoryDependencies {
-  pub factory_deps: Vec<SnapshotFactoryDependency>
-}
-
-pub struct SnapshotFactoryDependency {
-    pub bytecode_hash: H256,
-    pub bytecode: Vec<u8>,
-}
-```
diff --git a/core/bin/snapshots_creator/src/main.rs b/core/bin/snapshots_creator/src/main.rs
index 0ac91e4db631..00b65d4b5eec 100644
--- a/core/bin/snapshots_creator/src/main.rs
+++ b/core/bin/snapshots_creator/src/main.rs
@@ -26,8 +26,6 @@ struct SnapshotsCreatorMetrics {
     snapshot_generation_duration: Gauge<u64>,
 
     snapshot_l1_batch: Gauge<u64>,
-
-    snapshot_generation_timestamp: Gauge<u64>,
 }
 #[vise::register]
 pub(crate) static METRICS: vise::Global<SnapshotsCreatorMetrics> = vise::Global::new();
diff --git a/core/lib/dal/migrations/20231013163109_create_snapshots_table.up.sql b/core/lib/dal/migrations/20231013163109_create_snapshots_table.up.sql
index b1ca08d67ebb..2d6a700092db 100644
--- a/core/lib/dal/migrations/20231013163109_create_snapshots_table.up.sql
+++ b/core/lib/dal/migrations/20231013163109_create_snapshots_table.up.sql
@@ -3,5 +3,7 @@ CREATE TABLE snapshots
     l1_batch_number          BIGINT    NOT NULL PRIMARY KEY,
     storage_logs_filepaths   TEXT[]    NOT NULL,
     factory_deps_filepath    TEXT      NOT NULL,
-    created_at               TIMESTAMP NOT NULL
+
+    created_at TIMESTAMP NOT NULL,
+    updated_at TIMESTAMP NOT NULL
 );
diff --git a/core/lib/dal/src/snapshots_creator_dal.rs b/core/lib/dal/src/snapshots_creator_dal.rs
index 00172f3f456d..1492226f2920 100644
--- a/core/lib/dal/src/snapshots_creator_dal.rs
+++ b/core/lib/dal/src/snapshots_creator_dal.rs
@@ -23,6 +23,8 @@ impl SnapshotsCreatorDal<'_, '_> {
             "#,
             l1_batch_number.0 as i32
         )
+        .instrument("get_storage_logs_count")
+        .report_latency()
         .fetch_one(self.storage.conn())
         .await
         .unwrap()
@@ -40,6 +42,8 @@ impl SnapshotsCreatorDal<'_, '_> {
             "select MAX(number) from miniblocks where l1_batch_number = $1",
             l1_batch_number.0 as i64
         )
+        .instrument("get_storage_logs_chunk")
+        .report_latency()
         .fetch_one(self.storage.conn())
         .await?
         .max
diff --git a/core/lib/dal/src/snapshots_dal.rs b/core/lib/dal/src/snapshots_dal.rs
index a22c2dd76db1..a9fde98ff916 100644
--- a/core/lib/dal/src/snapshots_dal.rs
+++ b/core/lib/dal/src/snapshots_dal.rs
@@ -1,5 +1,5 @@
+use crate::instrument::InstrumentExt;
 use crate::StorageProcessor;
-use sqlx::types::chrono::{DateTime, Utc};
 use zksync_types::snapshots::{AllSnapshots, SnapshotMetadata};
 use zksync_types::L1BatchNumber;
 
@@ -16,30 +16,29 @@ impl SnapshotsDal<'_, '_> {
         factory_deps_filepaths: &str,
     ) -> Result<(), sqlx::Error> {
         sqlx::query!(
-            "INSERT INTO snapshots (l1_batch_number, created_at, storage_logs_filepaths, factory_deps_filepath) \
-             VALUES ($1, now(), $2, $3)",
+            "INSERT INTO snapshots (l1_batch_number, storage_logs_filepaths, factory_deps_filepath, created_at, updated_at) \
+             VALUES ($1, $2, $3, NOW(), NOW())",
             l1_batch_number.0 as i32,
             storage_logs_filepaths,
             factory_deps_filepaths,
         )
+        .instrument("add_snapshot")
+        .report_latency()
         .execute(self.storage.conn())
         .await?;
         Ok(())
     }
 
     pub async fn get_all_snapshots(&mut self) -> Result<AllSnapshots, sqlx::Error> {
-        let records: Vec<SnapshotMetadata> = sqlx::query!(
-            "SELECT l1_batch_number, created_at, factory_deps_filepath, storage_logs_filepaths FROM snapshots"
+        let records: Vec<L1BatchNumber> = sqlx::query!(
+            "SELECT l1_batch_number, factory_deps_filepath, storage_logs_filepaths FROM snapshots"
         )
+        .instrument("get_all_snapshots")
+        .report_latency()
         .fetch_all(self.storage.conn())
         .await?
         .into_iter()
-        .map(|r| SnapshotMetadata {
-            l1_batch_number: L1BatchNumber(r.l1_batch_number as u32),
-            generated_at: DateTime::<Utc>::from_naive_utc_and_offset(r.created_at, Utc),
-            factory_deps_filepath: r.factory_deps_filepath,
-            storage_logs_filepaths: r.storage_logs_filepaths,
-        })
+        .map(|r| L1BatchNumber(r.l1_batch_number as u32))
         .collect();
         Ok(AllSnapshots { snapshots: records })
     }
@@ -49,14 +48,15 @@ impl SnapshotsDal<'_, '_> {
         l1_batch_number: L1BatchNumber,
     ) -> Result<Option<SnapshotMetadata>, sqlx::Error> {
         let record: Option<SnapshotMetadata> = sqlx::query!(
-            "SELECT l1_batch_number, created_at, factory_deps_filepath, storage_logs_filepaths FROM snapshots WHERE l1_batch_number = $1",
+            "SELECT l1_batch_number, factory_deps_filepath, storage_logs_filepaths FROM snapshots WHERE l1_batch_number = $1",
             l1_batch_number.0 as i32
         )
+        .instrument("get_snapshot_metadata")
+        .report_latency()
         .fetch_optional(self.storage.conn())
         .await?
         .map(|r| SnapshotMetadata {
             l1_batch_number: L1BatchNumber(r.l1_batch_number as u32),
-            generated_at: DateTime::<Utc>::from_naive_utc_and_offset(r.created_at, Utc),
             factory_deps_filepath: r.factory_deps_filepath,
             storage_logs_filepaths: r.storage_logs_filepaths,
         });
@@ -84,10 +84,7 @@ mod tests {
             .await
             .expect("Failed to retrieve snapshots");
         assert_eq!(1, snapshots.snapshots.len());
-        assert_eq!(
-            snapshots.snapshots[0].l1_batch_number,
-            l1_batch_number as L1BatchNumber
-        );
+        assert_eq!(snapshots.snapshots[0], l1_batch_number as L1BatchNumber);
 
         let snapshot_metadata = dal
             .get_snapshot_metadata(l1_batch_number)
diff --git a/core/lib/types/src/snapshots.rs b/core/lib/types/src/snapshots.rs
index d81433c63de4..f3e66d287dc4 100644
--- a/core/lib/types/src/snapshots.rs
+++ b/core/lib/types/src/snapshots.rs
@@ -1,39 +1,40 @@
 use crate::commitment::L1BatchWithMetadata;
 use crate::{StorageKey, StorageValue, H256};
-use chrono::{DateTime, Utc};
 use serde::{Deserialize, Serialize};
 use zksync_basic_types::{L1BatchNumber, MiniblockNumber};
 
 #[derive(Debug, Clone, Serialize, Deserialize)]
 #[serde(rename_all = "camelCase")]
 pub struct AllSnapshots {
-    pub snapshots: Vec<SnapshotMetadata>,
+    pub snapshots: Vec<L1BatchNumber>,
 }
 
+// used in dal to fetch certain snapshot data
 #[derive(Debug, Clone, Serialize, Deserialize)]
 #[serde(rename_all = "camelCase")]
 pub struct SnapshotMetadata {
     pub l1_batch_number: L1BatchNumber,
-    pub generated_at: DateTime<Utc>,
     pub factory_deps_filepath: String,
     pub storage_logs_filepaths: Vec<String>,
 }
 
+//contains all data not contained in factory_deps/storage_logs files to perform restore process
 #[derive(Debug, Clone, Serialize, Deserialize)]
 #[serde(rename_all = "camelCase")]
 pub struct SnapshotHeader {
     pub l1_batch_number: L1BatchNumber,
     pub miniblock_number: MiniblockNumber,
+    //ordered by chunk ids
     pub storage_logs_chunks: Vec<SnapshotStorageLogsChunkMetadata>,
     pub factory_deps_filepath: String,
     pub last_l1_batch_with_metadata: L1BatchWithMetadata,
-    pub generated_at: DateTime<Utc>,
 }
 
 #[derive(Debug, Clone, Serialize, Deserialize)]
 #[serde(rename_all = "camelCase")]
 pub struct SnapshotStorageLogsChunkMetadata {
     pub chunk_id: u64,
+    // can be either be a file available under http(s) or local filesystem path
     pub filepath: String,
 }
 
@@ -71,11 +72,3 @@ pub struct SnapshotFactoryDependency {
     pub bytecode_hash: H256,
     pub bytecode: Vec<u8>,
 }
-
-#[derive(Debug, Serialize, Deserialize)]
-#[serde(rename_all = "camelCase")]
-pub struct AppliedSnapshotStatus {
-    pub l1_batch_number: L1BatchNumber,
-    pub is_finished: bool,
-    pub last_finished_chunk_id: Option<u64>,
-}
diff --git a/core/lib/zksync_core/src/api_server/web3/namespaces/snapshots.rs b/core/lib/zksync_core/src/api_server/web3/namespaces/snapshots.rs
index 7fc5f3f45906..a49211532103 100644
--- a/core/lib/zksync_core/src/api_server/web3/namespaces/snapshots.rs
+++ b/core/lib/zksync_core/src/api_server/web3/namespaces/snapshots.rs
@@ -77,7 +77,6 @@ impl<G: L1GasPriceProvider> SnapshotsNamespace<G> {
                 .1;
             Ok(Some(SnapshotHeader {
                 l1_batch_number: snapshot_metadata.l1_batch_number,
-                generated_at: snapshot_metadata.generated_at,
                 miniblock_number,
                 last_l1_batch_with_metadata: l1_batch_with_metadata,
                 storage_logs_chunks: chunks,

From 1540abf62d70952df9da4ff446a29574b35099db Mon Sep 17 00:00:00 2001
From: tomg10 <lemures64@gmail.com>
Date: Mon, 27 Nov 2023 17:31:56 +0100
Subject: [PATCH 17/43] fix: next round of PR feedback

---
 core/bin/snapshots_creator/README.md          |  4 +-
 core/bin/snapshots_creator/src/main.rs        | 43 +++++++++++++------
 core/lib/config/src/configs/mod.rs            |  2 +
 .../config/src/configs/snapshots_creator.rs   | 11 +++++
 core/lib/config/src/lib.rs                    |  2 +-
 core/lib/dal/sqlx-data.json                   | 24 +++++------
 core/lib/dal/src/snapshots_dal.rs             | 11 +++--
 core/lib/env_config/src/lib.rs                |  1 +
 core/lib/env_config/src/snapshots_creator.rs  |  8 ++++
 core/lib/types/src/snapshots.rs               |  2 +-
 .../tests/api/snapshots-creator.test.ts       | 12 +++---
 11 files changed, 81 insertions(+), 39 deletions(-)
 create mode 100644 core/lib/config/src/configs/snapshots_creator.rs
 create mode 100644 core/lib/env_config/src/snapshots_creator.rs

diff --git a/core/bin/snapshots_creator/README.md b/core/bin/snapshots_creator/README.md
index b669dbc4636b..10202a6df51b 100644
--- a/core/bin/snapshots_creator/README.md
+++ b/core/bin/snapshots_creator/README.md
@@ -15,7 +15,9 @@ Snapshot contents can be stored based on blob_store config either in local files
 
 ## Snapshots format
 
-Each snapshot consists of three types of objects (see [snapshots.rs](https://github.com/matter-labs/zksync-era/core/lib/types/src/snapshots.rs)) : header, storage logs chunks and factory deps:
+Each snapshot consists of three types of objects (see
+[snapshots.rs](https://github.com/matter-labs/zksync-era/core/lib/types/src/snapshots.rs)) : header, storage logs chunks
+and factory deps:
 
 - Snapshot Header (currently returned by snapshots namespace of JSON-RPC API)
 - Snapshot Storage logs chunks (most likely to be stored in gzipped protobuf files, but this part is still WIP) :
diff --git a/core/bin/snapshots_creator/src/main.rs b/core/bin/snapshots_creator/src/main.rs
index 00b65d4b5eec..8de961c19d4c 100644
--- a/core/bin/snapshots_creator/src/main.rs
+++ b/core/bin/snapshots_creator/src/main.rs
@@ -1,10 +1,11 @@
 use anyhow::Context as _;
 use prometheus_exporter::PrometheusExporterConfig;
+use std::time::Duration;
 use tokio::sync::watch;
 use tokio::sync::watch::Receiver;
-use vise::{Gauge, Metrics};
+use vise::{Buckets, Gauge, Histogram, Metrics};
 use zksync_config::configs::PrometheusConfig;
-use zksync_config::PostgresConfig;
+use zksync_config::{PostgresConfig, SnapshotsCreatorConfig};
 
 use zksync_dal::ConnectionPool;
 use zksync_env_config::object_store::SnapshotsObjectStoreConfig;
@@ -23,9 +24,17 @@ use zksync_utils::time::seconds_since_epoch;
 struct SnapshotsCreatorMetrics {
     storage_logs_chunks_count: Gauge<u64>,
 
+    storage_logs_chunks_left_to_process: Gauge<u64>,
+
     snapshot_generation_duration: Gauge<u64>,
 
     snapshot_l1_batch: Gauge<u64>,
+
+    #[metrics(buckets = Buckets::LATENCIES)]
+    storage_logs_processing_durations: Histogram<Duration>,
+
+    #[metrics(buckets = Buckets::LATENCIES)]
+    factory_deps_processing_durations: Histogram<Duration>,
 }
 #[vise::register]
 pub(crate) static METRICS: vise::Global<SnapshotsCreatorMetrics> = vise::Global::new();
@@ -53,6 +62,7 @@ async fn process_storage_logs_single_chunk(
     chunk_size: u64,
     chunks_count: u64,
 ) -> anyhow::Result<String> {
+    let latency = METRICS.storage_logs_processing_durations.start();
     let mut conn = pool.access_storage_tagged("snapshots_creator").await?;
     let logs = conn
         .snapshots_creator_dal()
@@ -76,6 +86,7 @@ async fn process_storage_logs_single_chunk(
         "Finished storing storage logs chunk {}/{chunks_count}, output stored in {output_filepath}",
         chunk_id + 1,
     );
+    latency.observe();
     Ok(output_filepath)
 }
 
@@ -85,6 +96,7 @@ async fn process_factory_deps(
     miniblock_number: MiniblockNumber,
     l1_batch_number: L1BatchNumber,
 ) -> anyhow::Result<String> {
+    let latency = METRICS.factory_deps_processing_durations.start();
     tracing::info!("Processing factory dependencies");
     let mut conn = pool.access_storage_tagged("snapshots_creator").await?;
     let factory_deps = conn
@@ -102,10 +114,13 @@ async fn process_factory_deps(
         "Finished processing factory dependencies, output stored in {}",
         output_filepath
     );
+    latency.observe();
     Ok(output_filepath)
 }
 
 async fn run(blob_store: Box<dyn ObjectStore>, pool: ConnectionPool) -> anyhow::Result<()> {
+    let config = SnapshotsCreatorConfig::from_env().context("SnapshotsCreatorConfig::from_env")?;
+
     let mut conn = pool.access_storage_tagged("snapshots_creator").await?;
     let start_time = seconds_since_epoch();
 
@@ -130,16 +145,18 @@ async fn run(blob_store: Box<dyn ObjectStore>, pool: ConnectionPool) -> anyhow::
         .await?
         .unwrap()
         .1;
-    let storage_logs_count = conn
+    let storage_logs_chunks_count = conn
         .snapshots_creator_dal()
         .get_storage_logs_count(l1_batch_number)
         .await?;
 
     drop(conn);
+    METRICS
+        .storage_logs_chunks_count
+        .set(storage_logs_chunks_count);
 
-    //TODO load this from config
-    let chunk_size = 1_000_000;
-    let chunks_count = ceil_div(storage_logs_count, chunk_size);
+    let chunk_size = config.storage_logs_chunk_size;
+    let chunks_count = ceil_div(storage_logs_chunks_count, chunk_size);
 
     tracing::info!(
         "Creating snapshot for storage logs up to miniblock {}, l1_batch {}",
@@ -162,6 +179,9 @@ async fn run(blob_store: Box<dyn ObjectStore>, pool: ConnectionPool) -> anyhow::
 
     let mut storage_logs_output_files = vec![];
 
+    METRICS
+        .storage_logs_chunks_left_to_process
+        .set(chunks_count);
     for chunk_id in 0..chunks_count {
         tracing::info!(
             "Processing storage logs chunk {}/{chunks_count}",
@@ -177,7 +197,9 @@ async fn run(blob_store: Box<dyn ObjectStore>, pool: ConnectionPool) -> anyhow::
         )
         .await?;
         storage_logs_output_files.push(output_file.clone());
-        METRICS.storage_logs_chunks_count.set(chunk_id);
+        METRICS
+            .storage_logs_chunks_left_to_process
+            .set(chunks_count - chunk_id - 1);
     }
 
     let mut conn = pool.access_storage_tagged("snapshots_creator").await?;
@@ -191,9 +213,6 @@ async fn run(blob_store: Box<dyn ObjectStore>, pool: ConnectionPool) -> anyhow::
         .await?;
 
     METRICS.snapshot_l1_batch.set(l1_batch_number.0.as_u64());
-    METRICS
-        .snapshot_generation_timestamp
-        .set(seconds_since_epoch());
     METRICS
         .snapshot_generation_duration
         .set(seconds_since_epoch() - start_time);
@@ -204,10 +223,6 @@ async fn run(blob_store: Box<dyn ObjectStore>, pool: ConnectionPool) -> anyhow::
         METRICS.snapshot_generation_duration.get()
     );
     tracing::info!("snapshot_l1_batch: {}", METRICS.snapshot_l1_batch.get());
-    tracing::info!(
-        "snapshot_generation_timestamp: {}",
-        METRICS.snapshot_generation_timestamp.get()
-    );
     tracing::info!(
         "storage_logs_chunks_count: {}",
         METRICS.storage_logs_chunks_count.get()
diff --git a/core/lib/config/src/configs/mod.rs b/core/lib/config/src/configs/mod.rs
index 710c128c951f..88fbff206946 100644
--- a/core/lib/config/src/configs/mod.rs
+++ b/core/lib/config/src/configs/mod.rs
@@ -22,6 +22,7 @@ pub use self::{
     prover_group::ProverGroupConfig,
     utils::PrometheusConfig,
     witness_generator::WitnessGeneratorConfig,
+    snapshots_creator::SnapshotsCreatorConfig,
 };
 
 pub mod alerts;
@@ -46,6 +47,7 @@ pub mod object_store;
 pub mod proof_data_handler;
 pub mod prover;
 pub mod prover_group;
+pub mod snapshots_creator;
 pub mod utils;
 pub mod witness_generator;
 
diff --git a/core/lib/config/src/configs/snapshots_creator.rs b/core/lib/config/src/configs/snapshots_creator.rs
new file mode 100644
index 000000000000..75decba3cf92
--- /dev/null
+++ b/core/lib/config/src/configs/snapshots_creator.rs
@@ -0,0 +1,11 @@
+use serde::Deserialize;
+
+#[derive(Debug, Clone, PartialEq, Deserialize)]
+pub struct SnapshotsCreatorConfig {
+    #[serde(default = "snapshots_creator_storage_logs_chunk_size_default")]
+    pub storage_logs_chunk_size: u64,
+}
+
+fn snapshots_creator_storage_logs_chunk_size_default() -> u64 {
+    1_000_000
+}
diff --git a/core/lib/config/src/lib.rs b/core/lib/config/src/lib.rs
index aa83577aad87..4937afe387a5 100644
--- a/core/lib/config/src/lib.rs
+++ b/core/lib/config/src/lib.rs
@@ -3,7 +3,7 @@
 pub use crate::configs::{
     ApiConfig, ChainConfig, ContractVerifierConfig, ContractsConfig, DBConfig, ETHClientConfig,
     ETHSenderConfig, ETHWatchConfig, FetcherConfig, GasAdjusterConfig, ObjectStoreConfig,
-    PostgresConfig, ProverConfig, ProverConfigs,
+    PostgresConfig, ProverConfig, ProverConfigs, SnapshotsCreatorConfig,
 };
 
 pub mod configs;
diff --git a/core/lib/dal/sqlx-data.json b/core/lib/dal/sqlx-data.json
index 1a8b458f2dbd..684b94c926b0 100644
--- a/core/lib/dal/sqlx-data.json
+++ b/core/lib/dal/sqlx-data.json
@@ -328,19 +328,19 @@
         }
       ],
       "nullable": [
+        false,
+        false,
+        false,
+        false,
+        false,
+        false,
+        false,
+        false,
         true,
-        true,
-        true,
-        true,
-        true,
-        true,
-        true,
-        true,
-        true,
-        true,
-        true,
-        true,
-        true
+        false,
+        false,
+        false,
+        false
       ],
       "parameters": {
         "Left": [
diff --git a/core/lib/dal/src/snapshots_dal.rs b/core/lib/dal/src/snapshots_dal.rs
index a9fde98ff916..f5b44c121438 100644
--- a/core/lib/dal/src/snapshots_dal.rs
+++ b/core/lib/dal/src/snapshots_dal.rs
@@ -40,7 +40,9 @@ impl SnapshotsDal<'_, '_> {
         .into_iter()
         .map(|r| L1BatchNumber(r.l1_batch_number as u32))
         .collect();
-        Ok(AllSnapshots { snapshots: records })
+        Ok(AllSnapshots {
+            snapshots_l1_batch_numbers: records,
+        })
     }
 
     pub async fn get_snapshot_metadata(
@@ -83,8 +85,11 @@ mod tests {
             .get_all_snapshots()
             .await
             .expect("Failed to retrieve snapshots");
-        assert_eq!(1, snapshots.snapshots.len());
-        assert_eq!(snapshots.snapshots[0], l1_batch_number as L1BatchNumber);
+        assert_eq!(1, snapshots.snapshots_l1_batch_numbers.len());
+        assert_eq!(
+            snapshots.snapshots_l1_batch_numbers[0],
+            l1_batch_number as L1BatchNumber
+        );
 
         let snapshot_metadata = dal
             .get_snapshot_metadata(l1_batch_number)
diff --git a/core/lib/env_config/src/lib.rs b/core/lib/env_config/src/lib.rs
index a4a4af3f1ec0..126fbb19cdfe 100644
--- a/core/lib/env_config/src/lib.rs
+++ b/core/lib/env_config/src/lib.rs
@@ -23,6 +23,7 @@ pub mod object_store;
 mod proof_data_handler;
 mod prover;
 mod prover_group;
+mod snapshots_creator;
 mod utils;
 mod witness_generator;
 
diff --git a/core/lib/env_config/src/snapshots_creator.rs b/core/lib/env_config/src/snapshots_creator.rs
new file mode 100644
index 000000000000..b79ecfb4f450
--- /dev/null
+++ b/core/lib/env_config/src/snapshots_creator.rs
@@ -0,0 +1,8 @@
+use crate::{envy_load, FromEnv};
+use zksync_config::SnapshotsCreatorConfig;
+
+impl FromEnv for SnapshotsCreatorConfig {
+    fn from_env() -> anyhow::Result<Self> {
+        envy_load("snapshots_creator", "SNAPSHOTS_CREATOR_")
+    }
+}
diff --git a/core/lib/types/src/snapshots.rs b/core/lib/types/src/snapshots.rs
index f3e66d287dc4..9f83eb819871 100644
--- a/core/lib/types/src/snapshots.rs
+++ b/core/lib/types/src/snapshots.rs
@@ -6,7 +6,7 @@ use zksync_basic_types::{L1BatchNumber, MiniblockNumber};
 #[derive(Debug, Clone, Serialize, Deserialize)]
 #[serde(rename_all = "camelCase")]
 pub struct AllSnapshots {
-    pub snapshots: Vec<L1BatchNumber>,
+    pub snapshots_l1_batch_numbers: Vec<L1BatchNumber>,
 }
 
 // used in dal to fetch certain snapshot data
diff --git a/core/tests/ts-integration/tests/api/snapshots-creator.test.ts b/core/tests/ts-integration/tests/api/snapshots-creator.test.ts
index 76b713614a9e..c115d6e1e54e 100644
--- a/core/tests/ts-integration/tests/api/snapshots-creator.test.ts
+++ b/core/tests/ts-integration/tests/api/snapshots-creator.test.ts
@@ -46,19 +46,17 @@ describe('Snapshots API tests', () => {
         });
     }
     async function createAndValidateSnapshot() {
-        let existingL1Batches = (await getAllSnapshots()).snapshots as any[];
+        let existingBatchNumbers = (await getAllSnapshots()).snapshotsL1BatchNumbers as number[];
         await runCreator();
-        let newSnapshotsBatches = await getAllSnapshots();
-        let addedSnapshots = (newSnapshotsBatches.snapshots as any[]).filter(
-            (snapshot) => !existingL1Batches.find((other) => snapshot.l1BatchNumber === other.l1BatchNumber)
-        );
+        let newBatchNumbers = (await getAllSnapshots()).snapshotsL1BatchNumbers as number[];
+        let addedSnapshots = newBatchNumbers.filter((x) => existingBatchNumbers.indexOf(x) === -1);
         expect(addedSnapshots.length).toEqual(1);
 
-        let l1BatchNumber = addedSnapshots[0].l1BatchNumber;
+        let l1BatchNumber = addedSnapshots[0];
         let fullSnapshot = await getSnapshot(l1BatchNumber);
         let miniblockNumber = fullSnapshot.miniblockNumber;
 
-        expect(fullSnapshot.l1BatchNumber).toEqual(addedSnapshots[0].l1BatchNumber);
+        expect(fullSnapshot.l1BatchNumber).toEqual(l1BatchNumber);
         let path = `${process.env.ZKSYNC_HOME}/${fullSnapshot.storageLogsChunks[0].filepath}`;
 
         let output = JSON.parse(await decompressGzip(path));

From 8faa706bec8e290b21176868dae7ea1c3bd06b34 Mon Sep 17 00:00:00 2001
From: tomg10 <lemures64@gmail.com>
Date: Mon, 27 Nov 2023 17:52:29 +0100
Subject: [PATCH 18/43] fix: remove debug

---
 infrastructure/zk/src/lint.ts | 4 ----
 1 file changed, 4 deletions(-)

diff --git a/infrastructure/zk/src/lint.ts b/infrastructure/zk/src/lint.ts
index 65ee5b75b6dd..b34cd1000312 100644
--- a/infrastructure/zk/src/lint.ts
+++ b/infrastructure/zk/src/lint.ts
@@ -38,10 +38,6 @@ async function lintSystemContracts(check: boolean = false) {
 
 async function clippy() {
     process.chdir(process.env.ZKSYNC_HOME!);
-    const { stdout: rustVersion } = await utils.exec('rustc --version');
-    console.log(`linting using rustc: ${rustVersion.trim()}`);
-    const { stdout: version } = await utils.exec('cargo clippy --version');
-    console.log(`linting using clippy: ${version.trim()}`);
     await utils.spawn('cargo clippy --tests -- -D warnings');
 }
 

From 433af835406961167129b466c82513026ca8b041 Mon Sep 17 00:00:00 2001
From: tomg10 <lemures64@gmail.com>
Date: Mon, 27 Nov 2023 17:58:43 +0100
Subject: [PATCH 19/43] fix: format sql

---
 .../migrations/20231013163109_create_snapshots_table.up.sql   | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/core/lib/dal/migrations/20231013163109_create_snapshots_table.up.sql b/core/lib/dal/migrations/20231013163109_create_snapshots_table.up.sql
index 2d6a700092db..ae35521ee5ee 100644
--- a/core/lib/dal/migrations/20231013163109_create_snapshots_table.up.sql
+++ b/core/lib/dal/migrations/20231013163109_create_snapshots_table.up.sql
@@ -4,6 +4,6 @@ CREATE TABLE snapshots
     storage_logs_filepaths   TEXT[]    NOT NULL,
     factory_deps_filepath    TEXT      NOT NULL,
 
-    created_at TIMESTAMP NOT NULL,
-    updated_at TIMESTAMP NOT NULL
+    created_at               TIMESTAMP NOT NULL,
+    updated_at               TIMESTAMP NOT NULL
 );

From 52037ba3f88c8ec3007af9ce57252578ca4c5180 Mon Sep 17 00:00:00 2001
From: tomg10 <lemures64@gmail.com>
Date: Mon, 27 Nov 2023 18:15:08 +0100
Subject: [PATCH 20/43] fix: final round of PR feedback

---
 core/bin/snapshots_creator/src/main.rs        |  4 +--
 core/lib/dal/sqlx-data.json                   | 24 +++++++-------
 core/lib/dal/src/snapshots_creator_dal.rs     | 32 +++++++++----------
 .../tests/api/snapshots-creator.test.ts       |  1 +
 4 files changed, 31 insertions(+), 30 deletions(-)

diff --git a/core/bin/snapshots_creator/src/main.rs b/core/bin/snapshots_creator/src/main.rs
index 8de961c19d4c..a742b1f34368 100644
--- a/core/bin/snapshots_creator/src/main.rs
+++ b/core/bin/snapshots_creator/src/main.rs
@@ -66,7 +66,7 @@ async fn process_storage_logs_single_chunk(
     let mut conn = pool.access_storage_tagged("snapshots_creator").await?;
     let logs = conn
         .snapshots_creator_dal()
-        .get_storage_logs_chunk(l1_batch_number, chunk_id, chunk_size)
+        .get_storage_logs_chunk(l1_batch_number, chunk_id, chunk_size * chunk_id)
         .await
         .context("Error fetching storage logs count")?;
     let storage_logs_chunk = SnapshotStorageLogsChunk { storage_logs: logs };
@@ -102,7 +102,7 @@ async fn process_factory_deps(
     let factory_deps = conn
         .snapshots_creator_dal()
         .get_all_factory_deps(miniblock_number)
-        .await;
+        .await?;
     let factory_deps = SnapshotFactoryDependencies { factory_deps };
     let filename = blob_store
         .put(l1_batch_number, &factory_deps)
diff --git a/core/lib/dal/sqlx-data.json b/core/lib/dal/sqlx-data.json
index 684b94c926b0..1a8b458f2dbd 100644
--- a/core/lib/dal/sqlx-data.json
+++ b/core/lib/dal/sqlx-data.json
@@ -328,19 +328,19 @@
         }
       ],
       "nullable": [
-        false,
-        false,
-        false,
-        false,
-        false,
-        false,
-        false,
-        false,
         true,
-        false,
-        false,
-        false,
-        false
+        true,
+        true,
+        true,
+        true,
+        true,
+        true,
+        true,
+        true,
+        true,
+        true,
+        true,
+        true
       ],
       "parameters": {
         "Left": [
diff --git a/core/lib/dal/src/snapshots_creator_dal.rs b/core/lib/dal/src/snapshots_creator_dal.rs
index 1492226f2920..b4fd02ae793a 100644
--- a/core/lib/dal/src/snapshots_creator_dal.rs
+++ b/core/lib/dal/src/snapshots_creator_dal.rs
@@ -12,7 +12,7 @@ impl SnapshotsCreatorDal<'_, '_> {
     pub async fn get_storage_logs_count(
         &mut self,
         l1_batch_number: L1BatchNumber,
-    ) -> Result<u64, sqlx::Error> {
+    ) -> sqlx::Result<u64> {
         let count = sqlx::query!(
             r#"
             SELECT index
@@ -35,9 +35,9 @@ impl SnapshotsCreatorDal<'_, '_> {
     pub async fn get_storage_logs_chunk(
         &mut self,
         l1_batch_number: L1BatchNumber,
-        chunk_id: u64,
-        chunk_size: u64,
-    ) -> Result<Vec<SnapshotStorageLog>, sqlx::Error> {
+        page_limit: u64,
+        page_offset: u64,
+    ) -> sqlx::Result<Vec<SnapshotStorageLog>> {
         let miniblock_number: i64 = sqlx::query!(
             "select MAX(number) from miniblocks where l1_batch_number = $1",
             l1_batch_number.0 as i64
@@ -72,8 +72,8 @@ impl SnapshotsCreatorDal<'_, '_> {
             LIMIT $2 OFFSET $3;
              "#,
             miniblock_number,
-            chunk_size as i64,
-            (chunk_size * chunk_id) as i64
+            page_limit as i64,
+            page_offset as i64,
         )
         .instrument("get_storage_logs_chunk")
         .report_latency()
@@ -96,21 +96,21 @@ impl SnapshotsCreatorDal<'_, '_> {
     pub async fn get_all_factory_deps(
         &mut self,
         miniblock_number: MiniblockNumber,
-    ) -> Vec<SnapshotFactoryDependency> {
-        sqlx::query!(
+    ) -> sqlx::Result<Vec<SnapshotFactoryDependency>> {
+        let rows = sqlx::query!(
             "SELECT bytecode, bytecode_hash FROM factory_deps WHERE miniblock_number <= $1",
             miniblock_number.0 as i64,
         )
         .instrument("get_all_factory_deps")
         .report_latency()
         .fetch_all(self.storage.conn())
-        .await
-        .unwrap()
-        .into_iter()
-        .map(|row| SnapshotFactoryDependency {
-            bytecode_hash: H256::from_slice(&row.bytecode_hash),
-            bytecode: row.bytecode,
-        })
-        .collect()
+        .await?;
+        return Ok(rows
+            .into_iter()
+            .map(|row| SnapshotFactoryDependency {
+                bytecode_hash: H256::from_slice(&row.bytecode_hash),
+                bytecode: row.bytecode,
+            })
+            .collect());
     }
 }
diff --git a/core/tests/ts-integration/tests/api/snapshots-creator.test.ts b/core/tests/ts-integration/tests/api/snapshots-creator.test.ts
index c115d6e1e54e..8587842146f2 100644
--- a/core/tests/ts-integration/tests/api/snapshots-creator.test.ts
+++ b/core/tests/ts-integration/tests/api/snapshots-creator.test.ts
@@ -60,6 +60,7 @@ describe('Snapshots API tests', () => {
         let path = `${process.env.ZKSYNC_HOME}/${fullSnapshot.storageLogsChunks[0].filepath}`;
 
         let output = JSON.parse(await decompressGzip(path));
+        expect( output['storageLogs'].length > 0);
 
         for (const storageLog of output['storageLogs'] as any[]) {
             let snapshotAccountAddress = storageLog['key']['account']['address'];

From 4bc7d96b48d6485d26101a7b9b9133c773915014 Mon Sep 17 00:00:00 2001
From: tomg10 <lemures64@gmail.com>
Date: Mon, 27 Nov 2023 18:33:13 +0100
Subject: [PATCH 21/43] fix: lint

---
 core/lib/dal/src/snapshots_creator_dal.rs | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/core/lib/dal/src/snapshots_creator_dal.rs b/core/lib/dal/src/snapshots_creator_dal.rs
index b4fd02ae793a..9c1583f95b78 100644
--- a/core/lib/dal/src/snapshots_creator_dal.rs
+++ b/core/lib/dal/src/snapshots_creator_dal.rs
@@ -105,12 +105,12 @@ impl SnapshotsCreatorDal<'_, '_> {
         .report_latency()
         .fetch_all(self.storage.conn())
         .await?;
-        return Ok(rows
+        Ok(rows
             .into_iter()
             .map(|row| SnapshotFactoryDependency {
                 bytecode_hash: H256::from_slice(&row.bytecode_hash),
                 bytecode: row.bytecode,
             })
-            .collect());
+            .collect())
     }
 }

From 45751d48d6ba5d69a4bfeb047063815aa4a9d256 Mon Sep 17 00:00:00 2001
From: tomg10 <lemures64@gmail.com>
Date: Mon, 27 Nov 2023 18:34:44 +0100
Subject: [PATCH 22/43] fix: fmt

---
 core/tests/ts-integration/tests/api/snapshots-creator.test.ts | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/core/tests/ts-integration/tests/api/snapshots-creator.test.ts b/core/tests/ts-integration/tests/api/snapshots-creator.test.ts
index 8587842146f2..33cec7e0674f 100644
--- a/core/tests/ts-integration/tests/api/snapshots-creator.test.ts
+++ b/core/tests/ts-integration/tests/api/snapshots-creator.test.ts
@@ -60,7 +60,7 @@ describe('Snapshots API tests', () => {
         let path = `${process.env.ZKSYNC_HOME}/${fullSnapshot.storageLogsChunks[0].filepath}`;
 
         let output = JSON.parse(await decompressGzip(path));
-        expect( output['storageLogs'].length > 0);
+        expect(output['storageLogs'].length > 0);
 
         for (const storageLog of output['storageLogs'] as any[]) {
             let snapshotAccountAddress = storageLog['key']['account']['address'];

From 72f6e87cede7eda7e4be4fcde3a38420e829c72b Mon Sep 17 00:00:00 2001
From: tomg10 <lemures64@gmail.com>
Date: Mon, 27 Nov 2023 22:18:37 +0100
Subject: [PATCH 23/43] feat: small refactor

---
 core/bin/snapshots_creator/src/main.rs    |  4 +++-
 core/lib/dal/src/snapshots_creator_dal.rs | 15 ++-------------
 2 files changed, 5 insertions(+), 14 deletions(-)

diff --git a/core/bin/snapshots_creator/src/main.rs b/core/bin/snapshots_creator/src/main.rs
index a742b1f34368..c0bb0424cd58 100644
--- a/core/bin/snapshots_creator/src/main.rs
+++ b/core/bin/snapshots_creator/src/main.rs
@@ -57,6 +57,7 @@ async fn maybe_enable_prometheus_metrics(stop_receiver: Receiver<bool>) -> anyho
 async fn process_storage_logs_single_chunk(
     blob_store: &dyn ObjectStore,
     pool: &ConnectionPool,
+    miniblock_number: MiniblockNumber,
     l1_batch_number: L1BatchNumber,
     chunk_id: u64,
     chunk_size: u64,
@@ -66,7 +67,7 @@ async fn process_storage_logs_single_chunk(
     let mut conn = pool.access_storage_tagged("snapshots_creator").await?;
     let logs = conn
         .snapshots_creator_dal()
-        .get_storage_logs_chunk(l1_batch_number, chunk_id, chunk_size * chunk_id)
+        .get_storage_logs_chunk(miniblock_number, chunk_id, chunk_size * chunk_id)
         .await
         .context("Error fetching storage logs count")?;
     let storage_logs_chunk = SnapshotStorageLogsChunk { storage_logs: logs };
@@ -190,6 +191,7 @@ async fn run(blob_store: Box<dyn ObjectStore>, pool: ConnectionPool) -> anyhow::
         let output_file = process_storage_logs_single_chunk(
             &*blob_store,
             &pool,
+            last_miniblock_number_in_batch,
             l1_batch_number,
             chunk_id,
             chunk_size,
diff --git a/core/lib/dal/src/snapshots_creator_dal.rs b/core/lib/dal/src/snapshots_creator_dal.rs
index 9c1583f95b78..29de7020e63c 100644
--- a/core/lib/dal/src/snapshots_creator_dal.rs
+++ b/core/lib/dal/src/snapshots_creator_dal.rs
@@ -34,21 +34,10 @@ impl SnapshotsCreatorDal<'_, '_> {
 
     pub async fn get_storage_logs_chunk(
         &mut self,
-        l1_batch_number: L1BatchNumber,
+        miniblock_number: MiniblockNumber,
         page_limit: u64,
         page_offset: u64,
     ) -> sqlx::Result<Vec<SnapshotStorageLog>> {
-        let miniblock_number: i64 = sqlx::query!(
-            "select MAX(number) from miniblocks where l1_batch_number = $1",
-            l1_batch_number.0 as i64
-        )
-        .instrument("get_storage_logs_chunk")
-        .report_latency()
-        .fetch_one(self.storage.conn())
-        .await?
-        .max
-        .unwrap_or_default();
-
         let storage_logs = sqlx::query!(
             r#"
             SELECT storage_logs.key,
@@ -71,7 +60,7 @@ impl SnapshotsCreatorDal<'_, '_> {
             ORDER BY storage_logs.hashed_key
             LIMIT $2 OFFSET $3;
              "#,
-            miniblock_number,
+            miniblock_number.0 as i64,
             page_limit as i64,
             page_offset as i64,
         )

From 055fc840a1e913bcbdb5b2dc9cf1a313ad3ae058 Mon Sep 17 00:00:00 2001
From: tomg10 <lemures64@gmail.com>
Date: Tue, 28 Nov 2023 10:40:13 +0100
Subject: [PATCH 24/43] fix: removed bytecode_hash from
 SnapshotFactoryDependency

---
 core/lib/dal/src/snapshots_creator_dal.rs | 1 -
 core/lib/types/src/snapshots.rs           | 3 +--
 2 files changed, 1 insertion(+), 3 deletions(-)

diff --git a/core/lib/dal/src/snapshots_creator_dal.rs b/core/lib/dal/src/snapshots_creator_dal.rs
index 29de7020e63c..f9a6480da8e9 100644
--- a/core/lib/dal/src/snapshots_creator_dal.rs
+++ b/core/lib/dal/src/snapshots_creator_dal.rs
@@ -97,7 +97,6 @@ impl SnapshotsCreatorDal<'_, '_> {
         Ok(rows
             .into_iter()
             .map(|row| SnapshotFactoryDependency {
-                bytecode_hash: H256::from_slice(&row.bytecode_hash),
                 bytecode: row.bytecode,
             })
             .collect())
diff --git a/core/lib/types/src/snapshots.rs b/core/lib/types/src/snapshots.rs
index 9f83eb819871..3b3b20d13fe3 100644
--- a/core/lib/types/src/snapshots.rs
+++ b/core/lib/types/src/snapshots.rs
@@ -1,5 +1,5 @@
 use crate::commitment::L1BatchWithMetadata;
-use crate::{StorageKey, StorageValue, H256};
+use crate::{StorageKey, StorageValue};
 use serde::{Deserialize, Serialize};
 use zksync_basic_types::{L1BatchNumber, MiniblockNumber};
 
@@ -69,6 +69,5 @@ pub struct SnapshotFactoryDependencies {
 #[derive(Debug, Clone, Serialize, Deserialize)]
 #[serde(rename_all = "camelCase")]
 pub struct SnapshotFactoryDependency {
-    pub bytecode_hash: H256,
     pub bytecode: Vec<u8>,
 }

From 63d280c105ba350739f2f917467eefe774b79c14 Mon Sep 17 00:00:00 2001
From: tomg10 <lemures64@gmail.com>
Date: Mon, 4 Dec 2023 13:42:06 +0100
Subject: [PATCH 25/43] feat: make queries parallel

---
 core/bin/snapshots_creator/src/chunking.rs    |  21 +++
 core/bin/snapshots_creator/src/main.rs        | 115 +++++++++++-----
 .../config/src/configs/snapshots_creator.rs   |   7 +
 core/lib/dal/sqlx-data.json                   | 128 +++++++++---------
 core/lib/dal/src/snapshots_creator_dal.rs     |  25 ++--
 .../tests/api/snapshots-creator.test.ts       |  33 +++--
 infrastructure/zk/src/run/run.ts              |   1 +
 7 files changed, 201 insertions(+), 129 deletions(-)
 create mode 100644 core/bin/snapshots_creator/src/chunking.rs

diff --git a/core/bin/snapshots_creator/src/chunking.rs b/core/bin/snapshots_creator/src/chunking.rs
new file mode 100644
index 000000000000..ebb88b56649e
--- /dev/null
+++ b/core/bin/snapshots_creator/src/chunking.rs
@@ -0,0 +1,21 @@
+use std::cmp::min;
+
+pub fn get_chunk_hashed_keys_range(chunk_id: u64, chunks_count: u64) -> ([u8; 2], [u8; 2]) {
+    //we don't need whole [u8; 32] range of H256, first two bytes are already enough to evenly divide work
+    // as two bytes = 65536 buckets and the chunks count would go in thousands
+    let buckets = (u16::MAX as u64) + 1;
+    assert!(chunks_count <= buckets);
+
+    //some of the chunks will be exactly this size, some may need to be exactly 1 larger
+    let chunk_size = buckets / chunks_count;
+    // first (buckets % chunks_count) chunks are bigger by 1, rest are of size chunk_size
+    // for instance, if there were 31 buckets and 4 chunks
+    // chunk_size would equal 7, first 31 % 4 = 3, first 3 chunks would be of size 8, last one of 7
+    // 8 + 8 + 8 + 7 = 31
+    let chunk_start = chunk_id * chunk_size + min(chunk_id, buckets % chunks_count);
+    let chunk_end = (chunk_id + 1) * chunk_size + min(chunk_id + 1, buckets % chunks_count) - 1;
+
+    let start_bytes = (chunk_start as u16).to_be_bytes();
+    let end_bytes = (chunk_end as u16).to_be_bytes();
+    return (start_bytes, end_bytes);
+}
diff --git a/core/bin/snapshots_creator/src/main.rs b/core/bin/snapshots_creator/src/main.rs
index c0bb0424cd58..bc8bc8203169 100644
--- a/core/bin/snapshots_creator/src/main.rs
+++ b/core/bin/snapshots_creator/src/main.rs
@@ -1,5 +1,12 @@
+mod chunking;
+
 use anyhow::Context as _;
+use futures::stream::FuturesUnordered;
+use futures::StreamExt;
 use prometheus_exporter::PrometheusExporterConfig;
+use std::cmp::max;
+use std::future::Future;
+use std::pin::Pin;
 use std::time::Duration;
 use tokio::sync::watch;
 use tokio::sync::watch::Receiver;
@@ -7,6 +14,7 @@ use vise::{Buckets, Gauge, Histogram, Metrics};
 use zksync_config::configs::PrometheusConfig;
 use zksync_config::{PostgresConfig, SnapshotsCreatorConfig};
 
+use crate::chunking::get_chunk_hashed_keys_range;
 use zksync_dal::ConnectionPool;
 use zksync_env_config::object_store::SnapshotsObjectStoreConfig;
 use zksync_env_config::FromEnv;
@@ -54,20 +62,21 @@ async fn maybe_enable_prometheus_metrics(stop_receiver: Receiver<bool>) -> anyho
     }
     Ok(())
 }
+
 async fn process_storage_logs_single_chunk(
     blob_store: &dyn ObjectStore,
     pool: &ConnectionPool,
     miniblock_number: MiniblockNumber,
     l1_batch_number: L1BatchNumber,
     chunk_id: u64,
-    chunk_size: u64,
     chunks_count: u64,
 ) -> anyhow::Result<String> {
+    let (min_hashed_key, max_hashed_key) = get_chunk_hashed_keys_range(chunk_id, chunks_count);
     let latency = METRICS.storage_logs_processing_durations.start();
     let mut conn = pool.access_storage_tagged("snapshots_creator").await?;
     let logs = conn
         .snapshots_creator_dal()
-        .get_storage_logs_chunk(miniblock_number, chunk_id, chunk_size * chunk_id)
+        .get_storage_logs_chunk(miniblock_number, &min_hashed_key, &max_hashed_key)
         .await
         .context("Error fetching storage logs count")?;
     let storage_logs_chunk = SnapshotStorageLogsChunk { storage_logs: logs };
@@ -83,11 +92,12 @@ async fn process_storage_logs_single_chunk(
     let output_filepath_prefix = blob_store.get_storage_prefix::<SnapshotStorageLogsChunk>();
     let output_filepath = format!("{output_filepath_prefix}/{filename}");
 
+    let elapsed_ms = latency.observe().as_millis();
     tracing::info!(
-        "Finished storing storage logs chunk {}/{chunks_count}, output stored in {output_filepath}",
-        chunk_id + 1,
+        "Finished storage logs chunk {}/{chunks_count}, step took {elapsed_ms}ms, output stored in {output_filepath}",
+        chunk_id + 1
     );
-    latency.observe();
+    drop(conn);
     Ok(output_filepath)
 }
 
@@ -98,7 +108,6 @@ async fn process_factory_deps(
     l1_batch_number: L1BatchNumber,
 ) -> anyhow::Result<String> {
     let latency = METRICS.factory_deps_processing_durations.start();
-    tracing::info!("Processing factory dependencies");
     let mut conn = pool.access_storage_tagged("snapshots_creator").await?;
     let factory_deps = conn
         .snapshots_creator_dal()
@@ -111,23 +120,32 @@ async fn process_factory_deps(
         .context("Error storing factory deps in blob store")?;
     let output_filepath_prefix = blob_store.get_storage_prefix::<SnapshotFactoryDependencies>();
     let output_filepath = format!("{output_filepath_prefix}/{filename}");
+    let elapsed_ms = latency.observe().as_millis();
     tracing::info!(
-        "Finished processing factory dependencies, output stored in {}",
+        "Finished factory dependencies, step took {elapsed_ms}ms , output stored in {}",
         output_filepath
     );
-    latency.observe();
     Ok(output_filepath)
 }
 
-async fn run(blob_store: Box<dyn ObjectStore>, pool: ConnectionPool) -> anyhow::Result<()> {
+async fn run(
+    blob_store: Box<dyn ObjectStore>,
+    replica_pool: ConnectionPool,
+    master_pool: ConnectionPool,
+) -> anyhow::Result<()> {
     let config = SnapshotsCreatorConfig::from_env().context("SnapshotsCreatorConfig::from_env")?;
 
-    let mut conn = pool.access_storage_tagged("snapshots_creator").await?;
+    let mut conn = replica_pool
+        .access_storage_tagged("snapshots_creator")
+        .await?;
     let start_time = seconds_since_epoch();
 
     let l1_batch_number = conn.blocks_dal().get_sealed_l1_batch_number().await? - 1; // we subtract 1 so that after restore, EN node has at least one l1 batch to fetch
 
-    if conn
+    let mut master_conn = master_pool
+        .access_storage_tagged("snapshots_creator")
+        .await?;
+    if master_conn
         .snapshots_dal()
         .get_snapshot_metadata(l1_batch_number)
         .await?
@@ -139,6 +157,7 @@ async fn run(blob_store: Box<dyn ObjectStore>, pool: ConnectionPool) -> anyhow::
         );
         return Ok(());
     }
+    drop(master_conn);
 
     let last_miniblock_number_in_batch = conn
         .blocks_dal()
@@ -157,7 +176,8 @@ async fn run(blob_store: Box<dyn ObjectStore>, pool: ConnectionPool) -> anyhow::
         .set(storage_logs_chunks_count);
 
     let chunk_size = config.storage_logs_chunk_size;
-    let chunks_count = ceil_div(storage_logs_chunks_count, chunk_size);
+    // we force at least 10 chunks to avoid situations where only one chunk is created in tests
+    let chunks_count = max(10, ceil_div(storage_logs_chunks_count, chunk_size));
 
     tracing::info!(
         "Creating snapshot for storage logs up to miniblock {}, l1_batch {}",
@@ -172,7 +192,7 @@ async fn run(blob_store: Box<dyn ObjectStore>, pool: ConnectionPool) -> anyhow::
 
     let factory_deps_output_file = process_factory_deps(
         &*blob_store,
-        &pool,
+        &replica_pool,
         last_miniblock_number_in_batch,
         l1_batch_number,
     )
@@ -183,30 +203,43 @@ async fn run(blob_store: Box<dyn ObjectStore>, pool: ConnectionPool) -> anyhow::
     METRICS
         .storage_logs_chunks_left_to_process
         .set(chunks_count);
-    for chunk_id in 0..chunks_count {
-        tracing::info!(
-            "Processing storage logs chunk {}/{chunks_count}",
-            chunk_id + 1
-        );
-        let output_file = process_storage_logs_single_chunk(
-            &*blob_store,
-            &pool,
-            last_miniblock_number_in_batch,
-            l1_batch_number,
-            chunk_id,
-            chunk_size,
-            chunks_count,
-        )
-        .await?;
-        storage_logs_output_files.push(output_file.clone());
-        METRICS
-            .storage_logs_chunks_left_to_process
-            .set(chunks_count - chunk_id - 1);
+    let mut tasks =
+        FuturesUnordered::<Pin<Box<dyn Future<Output = anyhow::Result<String>>>>>::new();
+    let mut last_chunk_id = 0;
+    while last_chunk_id < chunks_count || tasks.len() != 0 {
+        while (tasks.len() as u32) < config.concurrent_queries_count && last_chunk_id < chunks_count
+        {
+            tasks.push(Box::pin(process_storage_logs_single_chunk(
+                &*blob_store,
+                &replica_pool,
+                last_miniblock_number_in_batch,
+                l1_batch_number,
+                last_chunk_id,
+                chunks_count,
+            )));
+            last_chunk_id += 1;
+        }
+        if let Some(result) = tasks.next().await {
+            tracing::info!(
+                "Completed chunk {}/{}, {} chunks are still in progress",
+                last_chunk_id - tasks.len() as u64,
+                chunks_count,
+                tasks.len()
+            );
+            storage_logs_output_files.push(result.unwrap());
+            METRICS
+                .storage_logs_chunks_left_to_process
+                .set(chunks_count - last_chunk_id - tasks.len() as u64);
+        }
     }
+    tracing::info!("Finished generating snapshot, storing progress in db");
 
-    let mut conn = pool.access_storage_tagged("snapshots_creator").await?;
+    let mut master_conn = master_pool
+        .access_storage_tagged("snapshots_creator")
+        .await?;
 
-    conn.snapshots_dal()
+    master_conn
+        .snapshots_dal()
         .add_snapshot(
             l1_batch_number,
             &storage_logs_output_files,
@@ -262,11 +295,21 @@ async fn main() -> anyhow::Result<()> {
         .await;
 
     let postgres_config = PostgresConfig::from_env().context("PostgresConfig")?;
-    let pool = ConnectionPool::singleton(postgres_config.replica_url()?)
+    let creator_config =
+        SnapshotsCreatorConfig::from_env().context("SnapshotsCreatorConfig::from_env")?;
+
+    let replica_pool = ConnectionPool::builder(
+        postgres_config.replica_url()?,
+        creator_config.concurrent_queries_count,
+    )
+    .build()
+    .await?;
+
+    let master_pool = ConnectionPool::singleton(postgres_config.master_url()?)
         .build()
         .await?;
 
-    run(blob_store, pool).await?;
+    run(blob_store, replica_pool, master_pool).await?;
     tracing::info!("Finished running snapshot creator!");
     stop_sender.send(true).ok();
     Ok(())
diff --git a/core/lib/config/src/configs/snapshots_creator.rs b/core/lib/config/src/configs/snapshots_creator.rs
index 75decba3cf92..2f37c5d3afde 100644
--- a/core/lib/config/src/configs/snapshots_creator.rs
+++ b/core/lib/config/src/configs/snapshots_creator.rs
@@ -4,8 +4,15 @@ use serde::Deserialize;
 pub struct SnapshotsCreatorConfig {
     #[serde(default = "snapshots_creator_storage_logs_chunk_size_default")]
     pub storage_logs_chunk_size: u64,
+
+    #[serde(default = "snapshots_creator_concurrent_queries_count")]
+    pub concurrent_queries_count: u32,
 }
 
 fn snapshots_creator_storage_logs_chunk_size_default() -> u64 {
     1_000_000
 }
+
+fn snapshots_creator_concurrent_queries_count() -> u32 {
+    25
+}
diff --git a/core/lib/dal/sqlx-data.json b/core/lib/dal/sqlx-data.json
index 1a8b458f2dbd..8792169171d3 100644
--- a/core/lib/dal/sqlx-data.json
+++ b/core/lib/dal/sqlx-data.json
@@ -13,58 +13,6 @@
     },
     "query": "UPDATE proof_generation_details SET status=$1, updated_at = now() WHERE l1_batch_number = $2"
   },
-  "00226c81a8bb7bc56ccce6d65247e5b8cd8803179e50cb93fefbaca81e98315e": {
-    "describe": {
-      "columns": [
-        {
-          "name": "key",
-          "ordinal": 0,
-          "type_info": "Bytea"
-        },
-        {
-          "name": "value",
-          "ordinal": 1,
-          "type_info": "Bytea"
-        },
-        {
-          "name": "address",
-          "ordinal": 2,
-          "type_info": "Bytea"
-        },
-        {
-          "name": "miniblock_number",
-          "ordinal": 3,
-          "type_info": "Int8"
-        },
-        {
-          "name": "l1_batch_number",
-          "ordinal": 4,
-          "type_info": "Int8"
-        },
-        {
-          "name": "index",
-          "ordinal": 5,
-          "type_info": "Int8"
-        }
-      ],
-      "nullable": [
-        false,
-        false,
-        false,
-        false,
-        false,
-        false
-      ],
-      "parameters": {
-        "Left": [
-          "Int8",
-          "Int8",
-          "Int8"
-        ]
-      }
-    },
-    "query": "\n            SELECT storage_logs.key,\n                   storage_logs.value,\n                   storage_logs.address,\n                   storage_logs.miniblock_number,\n                   initial_writes.l1_batch_number,\n                   initial_writes.index\n            FROM (SELECT hashed_key,\n                         max(ARRAY [miniblock_number, operation_number]::int[]) AS op\n                  FROM storage_logs\n                  WHERE miniblock_number <= $1\n                  GROUP BY hashed_key\n                  ORDER BY hashed_key) AS keys\n                     INNER JOIN storage_logs ON keys.hashed_key = storage_logs.hashed_key\n                AND storage_logs.miniblock_number = keys.op[1]\n                AND storage_logs.operation_number = keys.op[2]\n                     INNER JOIN initial_writes ON keys.hashed_key = initial_writes.hashed_key\n            WHERE miniblock_number <= $1\n            ORDER BY storage_logs.hashed_key\n            LIMIT $2 OFFSET $3;\n             "
-  },
   "00bd80fd83aff559d8d9232c2e98a12a1dd2c8f31792cd915e2cf11f28e583b7": {
     "describe": {
       "columns": [
@@ -328,19 +276,19 @@
         }
       ],
       "nullable": [
+        false,
+        false,
+        false,
+        false,
+        false,
+        false,
+        false,
+        false,
         true,
-        true,
-        true,
-        true,
-        true,
-        true,
-        true,
-        true,
-        true,
-        true,
-        true,
-        true,
-        true
+        false,
+        false,
+        false,
+        false
       ],
       "parameters": {
         "Left": [
@@ -5517,6 +5465,58 @@
     },
     "query": "INSERT INTO miniblocks ( number, timestamp, hash, l1_tx_count, l2_tx_count, base_fee_per_gas, l1_gas_price, l2_fair_gas_price, gas_per_pubdata_limit, bootloader_code_hash, default_aa_code_hash, protocol_version, virtual_blocks, created_at, updated_at ) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, now(), now())"
   },
+  "6c68925cc6eb422d8c9f04cd353990c995e948f7031da654739852621d14fcea": {
+    "describe": {
+      "columns": [
+        {
+          "name": "key",
+          "ordinal": 0,
+          "type_info": "Bytea"
+        },
+        {
+          "name": "value",
+          "ordinal": 1,
+          "type_info": "Bytea"
+        },
+        {
+          "name": "address",
+          "ordinal": 2,
+          "type_info": "Bytea"
+        },
+        {
+          "name": "miniblock_number",
+          "ordinal": 3,
+          "type_info": "Int8"
+        },
+        {
+          "name": "l1_batch_number",
+          "ordinal": 4,
+          "type_info": "Int8"
+        },
+        {
+          "name": "index",
+          "ordinal": 5,
+          "type_info": "Int8"
+        }
+      ],
+      "nullable": [
+        true,
+        true,
+        true,
+        true,
+        true,
+        true
+      ],
+      "parameters": {
+        "Left": [
+          "Int8",
+          "Bytea",
+          "Bytea"
+        ]
+      }
+    },
+    "query": "\n            SELECT storage_logs.key,\n                   storage_logs.value,\n                   storage_logs.address,\n                   storage_logs.miniblock_number,\n                   initial_writes.l1_batch_number,\n                   initial_writes.index\n            FROM (SELECT hashed_key,\n                         max(ARRAY [miniblock_number, operation_number]::int[]) AS op\n                  FROM storage_logs\n                  WHERE miniblock_number <= $1 and hashed_key >= $2 and hashed_key < $3\n                  GROUP BY hashed_key\n                  ORDER BY hashed_key) AS keys\n                     INNER JOIN storage_logs ON keys.hashed_key = storage_logs.hashed_key\n                AND storage_logs.miniblock_number = keys.op[1]\n                AND storage_logs.operation_number = keys.op[2]\n                     INNER JOIN initial_writes ON keys.hashed_key = initial_writes.hashed_key;\n             "
+  },
   "6d142503d0d8682992a0353bae4a6b25ec82e7cadf0b2bbadcfd23c27f646bae": {
     "describe": {
       "columns": [],
diff --git a/core/lib/dal/src/snapshots_creator_dal.rs b/core/lib/dal/src/snapshots_creator_dal.rs
index f9a6480da8e9..3f62afaf3e74 100644
--- a/core/lib/dal/src/snapshots_creator_dal.rs
+++ b/core/lib/dal/src/snapshots_creator_dal.rs
@@ -35,8 +35,8 @@ impl SnapshotsCreatorDal<'_, '_> {
     pub async fn get_storage_logs_chunk(
         &mut self,
         miniblock_number: MiniblockNumber,
-        page_limit: u64,
-        page_offset: u64,
+        min_hashed_key: &[u8],
+        max_hashed_key: &[u8],
     ) -> sqlx::Result<Vec<SnapshotStorageLog>> {
         let storage_logs = sqlx::query!(
             r#"
@@ -49,20 +49,17 @@ impl SnapshotsCreatorDal<'_, '_> {
             FROM (SELECT hashed_key,
                          max(ARRAY [miniblock_number, operation_number]::int[]) AS op
                   FROM storage_logs
-                  WHERE miniblock_number <= $1
+                  WHERE miniblock_number <= $1 and hashed_key >= $2 and hashed_key < $3
                   GROUP BY hashed_key
                   ORDER BY hashed_key) AS keys
                      INNER JOIN storage_logs ON keys.hashed_key = storage_logs.hashed_key
                 AND storage_logs.miniblock_number = keys.op[1]
                 AND storage_logs.operation_number = keys.op[2]
-                     INNER JOIN initial_writes ON keys.hashed_key = initial_writes.hashed_key
-            WHERE miniblock_number <= $1
-            ORDER BY storage_logs.hashed_key
-            LIMIT $2 OFFSET $3;
+                     INNER JOIN initial_writes ON keys.hashed_key = initial_writes.hashed_key;
              "#,
             miniblock_number.0 as i64,
-            page_limit as i64,
-            page_offset as i64,
+            min_hashed_key,
+            max_hashed_key,
         )
         .instrument("get_storage_logs_chunk")
         .report_latency()
@@ -71,12 +68,12 @@ impl SnapshotsCreatorDal<'_, '_> {
         .iter()
         .map(|row| SnapshotStorageLog {
             key: StorageKey::new(
-                AccountTreeId::new(Address::from_slice(&row.address)),
-                H256::from_slice(&row.key),
+                AccountTreeId::new(Address::from_slice(row.address.as_ref().unwrap())),
+                H256::from_slice(row.key.as_ref().unwrap()),
             ),
-            value: H256::from_slice(&row.value),
-            l1_batch_number_of_initial_write: L1BatchNumber(row.l1_batch_number as u32),
-            enumeration_index: row.index as u64,
+            value: H256::from_slice(row.value.as_ref().unwrap()),
+            l1_batch_number_of_initial_write: L1BatchNumber(row.l1_batch_number.unwrap() as u32),
+            enumeration_index: row.index.unwrap() as u64,
         })
         .collect();
         Ok(storage_logs)
diff --git a/core/tests/ts-integration/tests/api/snapshots-creator.test.ts b/core/tests/ts-integration/tests/api/snapshots-creator.test.ts
index 33cec7e0674f..f191571677b5 100644
--- a/core/tests/ts-integration/tests/api/snapshots-creator.test.ts
+++ b/core/tests/ts-integration/tests/api/snapshots-creator.test.ts
@@ -1,7 +1,7 @@
 import { TestMaster } from '../../src/index';
-import * as utils from 'zk/build/utils';
 import fs from 'fs';
 import * as zlib from 'zlib';
+import {snapshots_creator} from "zk/build/run/run";
 describe('Snapshots API tests', () => {
     let testMaster: TestMaster;
 
@@ -15,7 +15,7 @@ describe('Snapshots API tests', () => {
 
     async function runCreator() {
         console.log('Starting creator');
-        await utils.spawn(`cd $ZKSYNC_HOME && cargo run --bin snapshots_creator --release`);
+        await snapshots_creator();
     }
 
     async function rpcRequest(name: string, params: any) {
@@ -57,21 +57,24 @@ describe('Snapshots API tests', () => {
         let miniblockNumber = fullSnapshot.miniblockNumber;
 
         expect(fullSnapshot.l1BatchNumber).toEqual(l1BatchNumber);
-        let path = `${process.env.ZKSYNC_HOME}/${fullSnapshot.storageLogsChunks[0].filepath}`;
+        for (let chunkMetadata of fullSnapshot.storageLogsChunks) {
+            console.log(`Verifying ${chunkMetadata.filepath}`)
+            let path = `${process.env.ZKSYNC_HOME}/${chunkMetadata.filepath}`;
 
-        let output = JSON.parse(await decompressGzip(path));
-        expect(output['storageLogs'].length > 0);
+            let output = JSON.parse(await decompressGzip(path));
+            expect(output['storageLogs'].length > 0);
 
-        for (const storageLog of output['storageLogs'] as any[]) {
-            let snapshotAccountAddress = storageLog['key']['account']['address'];
-            let snapshotKey = storageLog['key']['key'];
-            let snapshotValue = storageLog['value'];
-            let snapshotL1BatchNumber = storageLog['l1BatchNumberOfInitialWrite'];
-            const valueOnBlockchain = await testMaster
-                .mainAccount()
-                .provider.getStorageAt(snapshotAccountAddress, snapshotKey, miniblockNumber);
-            expect(snapshotValue).toEqual(valueOnBlockchain);
-            expect(snapshotL1BatchNumber).toBeLessThanOrEqual(l1BatchNumber);
+            for (const storageLog of output['storageLogs'] as any[]) {
+                let snapshotAccountAddress = storageLog['key']['account']['address'];
+                let snapshotKey = storageLog['key']['key'];
+                let snapshotValue = storageLog['value'];
+                let snapshotL1BatchNumber = storageLog['l1BatchNumberOfInitialWrite'];
+                const valueOnBlockchain = await testMaster
+                    .mainAccount()
+                    .provider.getStorageAt(snapshotAccountAddress, snapshotKey, miniblockNumber);
+                expect(snapshotValue).toEqual(valueOnBlockchain);
+                expect(snapshotL1BatchNumber).toBeLessThanOrEqual(l1BatchNumber);
+            }
         }
     }
 
diff --git a/infrastructure/zk/src/run/run.ts b/infrastructure/zk/src/run/run.ts
index c8b70be9ccaf..b00c8e3e0bef 100644
--- a/infrastructure/zk/src/run/run.ts
+++ b/infrastructure/zk/src/run/run.ts
@@ -120,6 +120,7 @@ export async function cross_en_checker() {
 }
 
 export async function snapshots_creator() {
+    process.chdir(`${process.env.ZKSYNC_HOME}`);
     let logLevel = 'RUST_LOG=snapshots_creator=debug';
     await utils.spawn(`${logLevel} cargo run --bin snapshots_creator --release`);
 }

From f051d6a12a59cecdcc0234a228d0e31702418ed2 Mon Sep 17 00:00:00 2001
From: tomg10 <lemures64@gmail.com>
Date: Mon, 4 Dec 2023 13:43:57 +0100
Subject: [PATCH 26/43] fix: zk fmt

---
 core/tests/ts-integration/tests/api/snapshots-creator.test.ts | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/core/tests/ts-integration/tests/api/snapshots-creator.test.ts b/core/tests/ts-integration/tests/api/snapshots-creator.test.ts
index f191571677b5..df23ce2a5771 100644
--- a/core/tests/ts-integration/tests/api/snapshots-creator.test.ts
+++ b/core/tests/ts-integration/tests/api/snapshots-creator.test.ts
@@ -1,7 +1,7 @@
 import { TestMaster } from '../../src/index';
 import fs from 'fs';
 import * as zlib from 'zlib';
-import {snapshots_creator} from "zk/build/run/run";
+import { snapshots_creator } from 'zk/build/run/run';
 describe('Snapshots API tests', () => {
     let testMaster: TestMaster;
 
@@ -58,7 +58,7 @@ describe('Snapshots API tests', () => {
 
         expect(fullSnapshot.l1BatchNumber).toEqual(l1BatchNumber);
         for (let chunkMetadata of fullSnapshot.storageLogsChunks) {
-            console.log(`Verifying ${chunkMetadata.filepath}`)
+            console.log(`Verifying ${chunkMetadata.filepath}`);
             let path = `${process.env.ZKSYNC_HOME}/${chunkMetadata.filepath}`;
 
             let output = JSON.parse(await decompressGzip(path));

From d9895f5c8759dc28a1e21ca81043698452f7a629 Mon Sep 17 00:00:00 2001
From: tomg10 <lemures64@gmail.com>
Date: Mon, 4 Dec 2023 21:28:03 +0100
Subject: [PATCH 27/43] fix: PR feedback

---
 core/bin/snapshots_creator/src/main.rs    | 32 +++++++++--------------
 core/lib/dal/src/snapshots_creator_dal.rs |  7 +++--
 2 files changed, 16 insertions(+), 23 deletions(-)

diff --git a/core/bin/snapshots_creator/src/main.rs b/core/bin/snapshots_creator/src/main.rs
index bc8bc8203169..c11220df56f5 100644
--- a/core/bin/snapshots_creator/src/main.rs
+++ b/core/bin/snapshots_creator/src/main.rs
@@ -114,6 +114,7 @@ async fn process_factory_deps(
         .get_all_factory_deps(miniblock_number)
         .await?;
     let factory_deps = SnapshotFactoryDependencies { factory_deps };
+    drop(conn);
     let filename = blob_store
         .put(l1_batch_number, &factory_deps)
         .await
@@ -140,7 +141,8 @@ async fn run(
         .await?;
     let start_time = seconds_since_epoch();
 
-    let l1_batch_number = conn.blocks_dal().get_sealed_l1_batch_number().await? - 1; // we subtract 1 so that after restore, EN node has at least one l1 batch to fetch
+    // we subtract 1 so that after restore, EN node has at least one l1 batch to fetch
+    let l1_batch_number = conn.blocks_dal().get_sealed_l1_batch_number().await? - 1;
 
     let mut master_conn = master_pool
         .access_storage_tagged("snapshots_creator")
@@ -151,44 +153,36 @@ async fn run(
         .await?
         .is_some()
     {
-        tracing::info!(
-            "Snapshot for L1 batch number {} already exists, exiting",
-            l1_batch_number
-        );
+        tracing::info!("Snapshot for L1 batch number {l1_batch_number} already exists, exiting",);
         return Ok(());
     }
     drop(master_conn);
 
+    // snapshots always
     let last_miniblock_number_in_batch = conn
         .blocks_dal()
         .get_miniblock_range_of_l1_batch(l1_batch_number)
         .await?
-        .unwrap()
+        .context("Error fetching last miniblock number")?
         .1;
-    let storage_logs_chunks_count = conn
+    let distinct_storage_logs_keys_count = conn
         .snapshots_creator_dal()
-        .get_storage_logs_count(l1_batch_number)
+        .get_distinct_storage_logs_keys_count(l1_batch_number)
         .await?;
 
     drop(conn);
-    METRICS
-        .storage_logs_chunks_count
-        .set(storage_logs_chunks_count);
 
     let chunk_size = config.storage_logs_chunk_size;
     // we force at least 10 chunks to avoid situations where only one chunk is created in tests
-    let chunks_count = max(10, ceil_div(storage_logs_chunks_count, chunk_size));
+    let chunks_count = max(10, ceil_div(distinct_storage_logs_keys_count, chunk_size));
+
+    METRICS.storage_logs_chunks_count.set(chunks_count);
 
     tracing::info!(
-        "Creating snapshot for storage logs up to miniblock {}, l1_batch {}",
-        last_miniblock_number_in_batch,
+        "Creating snapshot for storage logs up to miniblock {last_miniblock_number_in_batch}, l1_batch {}",
         l1_batch_number.0
     );
-    tracing::info!(
-        "Starting to generate {} chunks of max size {}",
-        chunks_count,
-        chunk_size
-    );
+    tracing::info!("Starting to generate {chunks_count} chunks of expected size {chunk_size}");
 
     let factory_deps_output_file = process_factory_deps(
         &*blob_store,
diff --git a/core/lib/dal/src/snapshots_creator_dal.rs b/core/lib/dal/src/snapshots_creator_dal.rs
index 3f62afaf3e74..800e4f8a4d92 100644
--- a/core/lib/dal/src/snapshots_creator_dal.rs
+++ b/core/lib/dal/src/snapshots_creator_dal.rs
@@ -9,7 +9,7 @@ pub struct SnapshotsCreatorDal<'a, 'c> {
 }
 
 impl SnapshotsCreatorDal<'_, '_> {
-    pub async fn get_storage_logs_count(
+    pub async fn get_distinct_storage_logs_keys_count(
         &mut self,
         l1_batch_number: L1BatchNumber,
     ) -> sqlx::Result<u64> {
@@ -26,8 +26,7 @@ impl SnapshotsCreatorDal<'_, '_> {
         .instrument("get_storage_logs_count")
         .report_latency()
         .fetch_one(self.storage.conn())
-        .await
-        .unwrap()
+        .await?
         .index;
         Ok(count as u64)
     }
@@ -84,7 +83,7 @@ impl SnapshotsCreatorDal<'_, '_> {
         miniblock_number: MiniblockNumber,
     ) -> sqlx::Result<Vec<SnapshotFactoryDependency>> {
         let rows = sqlx::query!(
-            "SELECT bytecode, bytecode_hash FROM factory_deps WHERE miniblock_number <= $1",
+            "SELECT bytecode FROM factory_deps WHERE miniblock_number <= $1",
             miniblock_number.0 as i64,
         )
         .instrument("get_all_factory_deps")

From c4081a4388918ac9cef7ab106b6ae887bd25f72c Mon Sep 17 00:00:00 2001
From: tomg10 <lemures64@gmail.com>
Date: Tue, 5 Dec 2023 12:24:39 +0100
Subject: [PATCH 28/43] fix: PR feedback

---
 core/bin/snapshots_creator/README.md       |  4 +-
 core/bin/snapshots_creator/src/chunking.rs |  2 +-
 core/bin/snapshots_creator/src/main.rs     | 32 +++++++---------
 core/lib/dal/sqlx-data.json                | 44 ++++++++++++++++------
 4 files changed, 49 insertions(+), 33 deletions(-)

diff --git a/core/bin/snapshots_creator/README.md b/core/bin/snapshots_creator/README.md
index 10202a6df51b..03167b803592 100644
--- a/core/bin/snapshots_creator/README.md
+++ b/core/bin/snapshots_creator/README.md
@@ -16,8 +16,8 @@ Snapshot contents can be stored based on blob_store config either in local files
 ## Snapshots format
 
 Each snapshot consists of three types of objects (see
-[snapshots.rs](https://github.com/matter-labs/zksync-era/core/lib/types/src/snapshots.rs)) : header, storage logs chunks
-and factory deps:
+[snapshots.rs](https://github.com/matter-labs/zksync-era/blob/main/core/lib/types/src/snapshots.rs)) : header, storage
+logs chunks and factory deps:
 
 - Snapshot Header (currently returned by snapshots namespace of JSON-RPC API)
 - Snapshot Storage logs chunks (most likely to be stored in gzipped protobuf files, but this part is still WIP) :
diff --git a/core/bin/snapshots_creator/src/chunking.rs b/core/bin/snapshots_creator/src/chunking.rs
index ebb88b56649e..68db97fd73cb 100644
--- a/core/bin/snapshots_creator/src/chunking.rs
+++ b/core/bin/snapshots_creator/src/chunking.rs
@@ -17,5 +17,5 @@ pub fn get_chunk_hashed_keys_range(chunk_id: u64, chunks_count: u64) -> ([u8; 2]
 
     let start_bytes = (chunk_start as u16).to_be_bytes();
     let end_bytes = (chunk_end as u16).to_be_bytes();
-    return (start_bytes, end_bytes);
+    (start_bytes, end_bytes)
 }
diff --git a/core/bin/snapshots_creator/src/main.rs b/core/bin/snapshots_creator/src/main.rs
index c11220df56f5..5653ee54dced 100644
--- a/core/bin/snapshots_creator/src/main.rs
+++ b/core/bin/snapshots_creator/src/main.rs
@@ -10,6 +10,7 @@ use std::pin::Pin;
 use std::time::Duration;
 use tokio::sync::watch;
 use tokio::sync::watch::Receiver;
+use vise::Unit;
 use vise::{Buckets, Gauge, Histogram, Metrics};
 use zksync_config::configs::PrometheusConfig;
 use zksync_config::{PostgresConfig, SnapshotsCreatorConfig};
@@ -25,7 +26,6 @@ use zksync_types::snapshots::{
 use zksync_types::zkevm_test_harness::zk_evm::zkevm_opcode_defs::decoding::AllowedPcOrImm;
 use zksync_types::{L1BatchNumber, MiniblockNumber};
 use zksync_utils::ceil_div;
-use zksync_utils::time::seconds_since_epoch;
 
 #[derive(Debug, Metrics)]
 #[metrics(prefix = "snapshots_creator")]
@@ -34,15 +34,16 @@ struct SnapshotsCreatorMetrics {
 
     storage_logs_chunks_left_to_process: Gauge<u64>,
 
-    snapshot_generation_duration: Gauge<u64>,
+    #[metrics(buckets = Buckets::LATENCIES, unit = Unit::Seconds)]
+    snapshot_generation_duration: Histogram<Duration>,
 
     snapshot_l1_batch: Gauge<u64>,
 
-    #[metrics(buckets = Buckets::LATENCIES)]
-    storage_logs_processing_durations: Histogram<Duration>,
+    #[metrics(buckets = Buckets::LATENCIES, unit = Unit::Seconds)]
+    storage_logs_processing_duration: Histogram<Duration>,
 
-    #[metrics(buckets = Buckets::LATENCIES)]
-    factory_deps_processing_durations: Histogram<Duration>,
+    #[metrics(buckets = Buckets::LATENCIES, unit = Unit::Seconds)]
+    factory_deps_processing_duration: Histogram<Duration>,
 }
 #[vise::register]
 pub(crate) static METRICS: vise::Global<SnapshotsCreatorMetrics> = vise::Global::new();
@@ -72,7 +73,7 @@ async fn process_storage_logs_single_chunk(
     chunks_count: u64,
 ) -> anyhow::Result<String> {
     let (min_hashed_key, max_hashed_key) = get_chunk_hashed_keys_range(chunk_id, chunks_count);
-    let latency = METRICS.storage_logs_processing_durations.start();
+    let latency = METRICS.storage_logs_processing_duration.start();
     let mut conn = pool.access_storage_tagged("snapshots_creator").await?;
     let logs = conn
         .snapshots_creator_dal()
@@ -107,7 +108,7 @@ async fn process_factory_deps(
     miniblock_number: MiniblockNumber,
     l1_batch_number: L1BatchNumber,
 ) -> anyhow::Result<String> {
-    let latency = METRICS.factory_deps_processing_durations.start();
+    let latency = METRICS.factory_deps_processing_duration.start();
     let mut conn = pool.access_storage_tagged("snapshots_creator").await?;
     let factory_deps = conn
         .snapshots_creator_dal()
@@ -134,12 +135,13 @@ async fn run(
     replica_pool: ConnectionPool,
     master_pool: ConnectionPool,
 ) -> anyhow::Result<()> {
+    let latency = METRICS.snapshot_generation_duration.start();
+
     let config = SnapshotsCreatorConfig::from_env().context("SnapshotsCreatorConfig::from_env")?;
 
     let mut conn = replica_pool
         .access_storage_tagged("snapshots_creator")
         .await?;
-    let start_time = seconds_since_epoch();
 
     // we subtract 1 so that after restore, EN node has at least one l1 batch to fetch
     let l1_batch_number = conn.blocks_dal().get_sealed_l1_batch_number().await? - 1;
@@ -200,7 +202,7 @@ async fn run(
     let mut tasks =
         FuturesUnordered::<Pin<Box<dyn Future<Output = anyhow::Result<String>>>>>::new();
     let mut last_chunk_id = 0;
-    while last_chunk_id < chunks_count || tasks.len() != 0 {
+    while last_chunk_id < chunks_count || !tasks.is_empty() {
         while (tasks.len() as u32) < config.concurrent_queries_count && last_chunk_id < chunks_count
         {
             tasks.push(Box::pin(process_storage_logs_single_chunk(
@@ -242,15 +244,9 @@ async fn run(
         .await?;
 
     METRICS.snapshot_l1_batch.set(l1_batch_number.0.as_u64());
-    METRICS
-        .snapshot_generation_duration
-        .set(seconds_since_epoch() - start_time);
 
-    tracing::info!("Run metrics:");
-    tracing::info!(
-        "snapshot_generation_duration: {}s",
-        METRICS.snapshot_generation_duration.get()
-    );
+    let elapsed_sec = latency.observe().as_secs();
+    tracing::info!("snapshot_generation_duration: {elapsed_sec}s");
     tracing::info!("snapshot_l1_batch: {}", METRICS.snapshot_l1_batch.get());
     tracing::info!(
         "storage_logs_chunks_count: {}",
diff --git a/core/lib/dal/sqlx-data.json b/core/lib/dal/sqlx-data.json
index 8792169171d3..dc5f7c2f7281 100644
--- a/core/lib/dal/sqlx-data.json
+++ b/core/lib/dal/sqlx-data.json
@@ -276,19 +276,19 @@
         }
       ],
       "nullable": [
-        false,
-        false,
-        false,
-        false,
-        false,
-        false,
-        false,
-        false,
         true,
-        false,
-        false,
-        false,
-        false
+        true,
+        true,
+        true,
+        true,
+        true,
+        true,
+        true,
+        true,
+        true,
+        true,
+        true,
+        true
       ],
       "parameters": {
         "Left": [
@@ -4851,6 +4851,26 @@
     },
     "query": "INSERT INTO events_queue (l1_batch_number, serialized_events_queue) VALUES ($1, $2)"
   },
+  "61cc5a1564918a34b4235290c421f04c40ef935f72f2c72744a5b741439a966a": {
+    "describe": {
+      "columns": [
+        {
+          "name": "bytecode",
+          "ordinal": 0,
+          "type_info": "Bytea"
+        }
+      ],
+      "nullable": [
+        false
+      ],
+      "parameters": {
+        "Left": [
+          "Int8"
+        ]
+      }
+    },
+    "query": "SELECT bytecode FROM factory_deps WHERE miniblock_number <= $1"
+  },
   "6317155050a5dae24ea202cfd54d1e58cc7aeb0bfd4d95aa351f85cff04d3bff": {
     "describe": {
       "columns": [

From 3f98233e57ee16b3ef65f684f3554eaccf7dbd8e Mon Sep 17 00:00:00 2001
From: tomg10 <lemures64@gmail.com>
Date: Tue, 5 Dec 2023 17:09:20 +0100
Subject: [PATCH 29/43] fix: PR feedback

---
 core/bin/snapshots_creator/src/main.rs               |  6 ++----
 core/lib/object_store/src/file.rs                    |  2 +-
 core/lib/object_store/src/gcs.rs                     |  2 +-
 core/lib/object_store/src/mock.rs                    |  2 +-
 core/lib/object_store/src/objects.rs                 |  2 +-
 core/lib/object_store/src/raw.rs                     |  6 +++---
 .../src/api_server/web3/namespaces/snapshots.rs      | 12 ++++++++++--
 7 files changed, 19 insertions(+), 13 deletions(-)

diff --git a/core/bin/snapshots_creator/src/main.rs b/core/bin/snapshots_creator/src/main.rs
index 5653ee54dced..54d2f5ca831e 100644
--- a/core/bin/snapshots_creator/src/main.rs
+++ b/core/bin/snapshots_creator/src/main.rs
@@ -23,7 +23,6 @@ use zksync_object_store::{ObjectStore, ObjectStoreFactory};
 use zksync_types::snapshots::{
     SnapshotFactoryDependencies, SnapshotStorageLogsChunk, SnapshotStorageLogsStorageKey,
 };
-use zksync_types::zkevm_test_harness::zk_evm::zkevm_opcode_defs::decoding::AllowedPcOrImm;
 use zksync_types::{L1BatchNumber, MiniblockNumber};
 use zksync_utils::ceil_div;
 
@@ -80,6 +79,7 @@ async fn process_storage_logs_single_chunk(
         .get_storage_logs_chunk(miniblock_number, &min_hashed_key, &max_hashed_key)
         .await
         .context("Error fetching storage logs count")?;
+    drop(conn);
     let storage_logs_chunk = SnapshotStorageLogsChunk { storage_logs: logs };
     let key = SnapshotStorageLogsStorageKey {
         l1_batch_number,
@@ -98,7 +98,6 @@ async fn process_storage_logs_single_chunk(
         "Finished storage logs chunk {}/{chunks_count}, step took {elapsed_ms}ms, output stored in {output_filepath}",
         chunk_id + 1
     );
-    drop(conn);
     Ok(output_filepath)
 }
 
@@ -160,7 +159,6 @@ async fn run(
     }
     drop(master_conn);
 
-    // snapshots always
     let last_miniblock_number_in_batch = conn
         .blocks_dal()
         .get_miniblock_range_of_l1_batch(l1_batch_number)
@@ -243,7 +241,7 @@ async fn run(
         )
         .await?;
 
-    METRICS.snapshot_l1_batch.set(l1_batch_number.0.as_u64());
+    METRICS.snapshot_l1_batch.set(l1_batch_number.0 as u64);
 
     let elapsed_sec = latency.observe().as_secs();
     tracing::info!("snapshot_generation_duration: {elapsed_sec}s");
diff --git a/core/lib/object_store/src/file.rs b/core/lib/object_store/src/file.rs
index 6cfb93309a43..2d77366a952c 100644
--- a/core/lib/object_store/src/file.rs
+++ b/core/lib/object_store/src/file.rs
@@ -71,7 +71,7 @@ impl ObjectStore for FileBackedObjectStore {
         fs::remove_file(filename).await.map_err(From::from)
     }
 
-    fn get_storage_prefix_raw(&self, bucket: Bucket) -> String {
+    fn storage_prefix_raw(&self, bucket: Bucket) -> String {
         format!("{}/{}", self.base_dir, bucket)
     }
 }
diff --git a/core/lib/object_store/src/gcs.rs b/core/lib/object_store/src/gcs.rs
index ecc9d7d26d7c..1db2e26398dd 100644
--- a/core/lib/object_store/src/gcs.rs
+++ b/core/lib/object_store/src/gcs.rs
@@ -208,7 +208,7 @@ impl ObjectStore for GoogleCloudStorage {
         self.remove_inner(bucket.as_str(), key).await
     }
 
-    fn get_storage_prefix_raw(&self, bucket: Bucket) -> String {
+    fn storage_prefix_raw(&self, bucket: Bucket) -> String {
         format!(
             "https://storage.googleapis.com/{}/{}",
             self.bucket_prefix.clone(),
diff --git a/core/lib/object_store/src/mock.rs b/core/lib/object_store/src/mock.rs
index f170de5e7533..f7ee7119c7a3 100644
--- a/core/lib/object_store/src/mock.rs
+++ b/core/lib/object_store/src/mock.rs
@@ -46,7 +46,7 @@ impl ObjectStore for MockStore {
         Ok(())
     }
 
-    fn get_storage_prefix_raw(&self, bucket: Bucket) -> String {
+    fn storage_prefix_raw(&self, bucket: Bucket) -> String {
         bucket.to_string()
     }
 }
diff --git a/core/lib/object_store/src/objects.rs b/core/lib/object_store/src/objects.rs
index bf036adaff62..e03bce47aa95 100644
--- a/core/lib/object_store/src/objects.rs
+++ b/core/lib/object_store/src/objects.rs
@@ -304,7 +304,7 @@ impl dyn ObjectStore + '_ {
     }
 
     pub fn get_storage_prefix<V: StoredObject>(&self) -> String {
-        self.get_storage_prefix_raw(V::BUCKET)
+        self.storage_prefix_raw(V::BUCKET)
     }
 }
 
diff --git a/core/lib/object_store/src/raw.rs b/core/lib/object_store/src/raw.rs
index 75a6c63b01c5..72e582deeb2b 100644
--- a/core/lib/object_store/src/raw.rs
+++ b/core/lib/object_store/src/raw.rs
@@ -116,7 +116,7 @@ pub trait ObjectStore: fmt::Debug + Send + Sync {
     /// Returns an error if removal fails.
     async fn remove_raw(&self, bucket: Bucket, key: &str) -> Result<(), ObjectStoreError>;
 
-    fn get_storage_prefix_raw(&self, bucket: Bucket) -> String;
+    fn storage_prefix_raw(&self, bucket: Bucket) -> String;
 }
 
 #[async_trait]
@@ -138,8 +138,8 @@ impl<T: ObjectStore + ?Sized> ObjectStore for Arc<T> {
         (**self).remove_raw(bucket, key).await
     }
 
-    fn get_storage_prefix_raw(&self, bucket: Bucket) -> String {
-        (**self).get_storage_prefix_raw(bucket)
+    fn storage_prefix_raw(&self, bucket: Bucket) -> String {
+        (**self).storage_prefix_raw(bucket)
     }
 }
 
diff --git a/core/lib/zksync_core/src/api_server/web3/namespaces/snapshots.rs b/core/lib/zksync_core/src/api_server/web3/namespaces/snapshots.rs
index a49211532103..29c785beac78 100644
--- a/core/lib/zksync_core/src/api_server/web3/namespaces/snapshots.rs
+++ b/core/lib/zksync_core/src/api_server/web3/namespaces/snapshots.rs
@@ -1,6 +1,8 @@
 use crate::api_server::web3::backend_jsonrpc::error::internal_error;
+use crate::api_server::web3::metrics::API_METRICS;
 use crate::api_server::web3::state::RpcState;
 use crate::l1_gas_price::L1GasPriceProvider;
+use actix_web::web::method;
 use zksync_types::snapshots::{AllSnapshots, SnapshotHeader, SnapshotStorageLogsChunkMetadata};
 use zksync_types::L1BatchNumber;
 use zksync_web3_decl::error::Web3Error;
@@ -23,6 +25,7 @@ impl<G: L1GasPriceProvider> SnapshotsNamespace<G> {
     }
     pub async fn get_all_snapshots_impl(&self) -> Result<AllSnapshots, Web3Error> {
         let method_name = "get_all_snapshots";
+        let method_latency = API_METRICS.start_call(method_name);
         let mut storage_processor = self
             .state
             .connection_pool
@@ -30,10 +33,12 @@ impl<G: L1GasPriceProvider> SnapshotsNamespace<G> {
             .await
             .map_err(|err| internal_error(method_name, err))?;
         let mut snapshots_dal = storage_processor.snapshots_dal();
-        snapshots_dal
+        let response = snapshots_dal
             .get_all_snapshots()
             .await
-            .map_err(|err| internal_error(method_name, err))
+            .map_err(|err| internal_error(method_name, err));
+        method_latency.observe();
+        response
     }
 
     pub async fn get_snapshot_by_l1_batch_number_impl(
@@ -41,6 +46,7 @@ impl<G: L1GasPriceProvider> SnapshotsNamespace<G> {
         l1_batch_number: L1BatchNumber,
     ) -> Result<Option<SnapshotHeader>, Web3Error> {
         let method_name = "get_snapshot_by_l1_batch_number";
+        let method_latency = API_METRICS.start_call(method_name);
         let mut storage_processor = self
             .state
             .connection_pool
@@ -75,6 +81,7 @@ impl<G: L1GasPriceProvider> SnapshotsNamespace<G> {
                 .map_err(|err| internal_error(method_name, err))?
                 .unwrap()
                 .1;
+            method_latency.observe();
             Ok(Some(SnapshotHeader {
                 l1_batch_number: snapshot_metadata.l1_batch_number,
                 miniblock_number,
@@ -83,6 +90,7 @@ impl<G: L1GasPriceProvider> SnapshotsNamespace<G> {
                 factory_deps_filepath: snapshot_metadata.factory_deps_filepath,
             }))
         } else {
+            method_latency.observe();
             Ok(None)
         }
     }

From aa75ef77ddd0d1f047600fd81517d351bc6552b2 Mon Sep 17 00:00:00 2001
From: tomg10 <lemures64@gmail.com>
Date: Tue, 5 Dec 2023 17:30:30 +0100
Subject: [PATCH 30/43] fix: PR feedback

---
 core/bin/snapshots_creator/src/main.rs | 12 ++++++------
 1 file changed, 6 insertions(+), 6 deletions(-)

diff --git a/core/bin/snapshots_creator/src/main.rs b/core/bin/snapshots_creator/src/main.rs
index 54d2f5ca831e..6ee3fc135e65 100644
--- a/core/bin/snapshots_creator/src/main.rs
+++ b/core/bin/snapshots_creator/src/main.rs
@@ -93,9 +93,9 @@ async fn process_storage_logs_single_chunk(
     let output_filepath_prefix = blob_store.get_storage_prefix::<SnapshotStorageLogsChunk>();
     let output_filepath = format!("{output_filepath_prefix}/{filename}");
 
-    let elapsed_ms = latency.observe().as_millis();
+    let elapsed = latency.observe();
     tracing::info!(
-        "Finished storage logs chunk {}/{chunks_count}, step took {elapsed_ms}ms, output stored in {output_filepath}",
+        "Finished storage logs chunk {}/{chunks_count}, step took {elapsed:?}, output stored in {output_filepath}",
         chunk_id + 1
     );
     Ok(output_filepath)
@@ -121,9 +121,9 @@ async fn process_factory_deps(
         .context("Error storing factory deps in blob store")?;
     let output_filepath_prefix = blob_store.get_storage_prefix::<SnapshotFactoryDependencies>();
     let output_filepath = format!("{output_filepath_prefix}/{filename}");
-    let elapsed_ms = latency.observe().as_millis();
+    let elapsed = latency.observe();
     tracing::info!(
-        "Finished factory dependencies, step took {elapsed_ms}ms , output stored in {}",
+        "Finished factory dependencies, step took {elapsed:?} , output stored in {}",
         output_filepath
     );
     Ok(output_filepath)
@@ -243,8 +243,8 @@ async fn run(
 
     METRICS.snapshot_l1_batch.set(l1_batch_number.0 as u64);
 
-    let elapsed_sec = latency.observe().as_secs();
-    tracing::info!("snapshot_generation_duration: {elapsed_sec}s");
+    let elapsed_ = latency.observe();
+    tracing::info!("snapshot_generation_duration: {elapsed:?}");
     tracing::info!("snapshot_l1_batch: {}", METRICS.snapshot_l1_batch.get());
     tracing::info!(
         "storage_logs_chunks_count: {}",

From ed0f6ecffbaa77fe243c6bad2caad1233764bf27 Mon Sep 17 00:00:00 2001
From: tomg10 <lemures64@gmail.com>
Date: Tue, 5 Dec 2023 18:52:31 +0100
Subject: [PATCH 31/43] fix: remove import

---
 core/lib/zksync_core/src/api_server/web3/namespaces/snapshots.rs | 1 -
 1 file changed, 1 deletion(-)

diff --git a/core/lib/zksync_core/src/api_server/web3/namespaces/snapshots.rs b/core/lib/zksync_core/src/api_server/web3/namespaces/snapshots.rs
index 29c785beac78..2c68aec80fbc 100644
--- a/core/lib/zksync_core/src/api_server/web3/namespaces/snapshots.rs
+++ b/core/lib/zksync_core/src/api_server/web3/namespaces/snapshots.rs
@@ -2,7 +2,6 @@ use crate::api_server::web3::backend_jsonrpc::error::internal_error;
 use crate::api_server::web3::metrics::API_METRICS;
 use crate::api_server::web3::state::RpcState;
 use crate::l1_gas_price::L1GasPriceProvider;
-use actix_web::web::method;
 use zksync_types::snapshots::{AllSnapshots, SnapshotHeader, SnapshotStorageLogsChunkMetadata};
 use zksync_types::L1BatchNumber;
 use zksync_web3_decl::error::Web3Error;

From c202ec515d57aac1a6ab486a1147c8f426c40f9d Mon Sep 17 00:00:00 2001
From: tomg10 <lemures64@gmail.com>
Date: Tue, 5 Dec 2023 19:06:09 +0100
Subject: [PATCH 32/43] fix: invalid variable name fix

---
 core/bin/snapshots_creator/src/main.rs | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/core/bin/snapshots_creator/src/main.rs b/core/bin/snapshots_creator/src/main.rs
index 6ee3fc135e65..399ace9a0be5 100644
--- a/core/bin/snapshots_creator/src/main.rs
+++ b/core/bin/snapshots_creator/src/main.rs
@@ -243,7 +243,7 @@ async fn run(
 
     METRICS.snapshot_l1_batch.set(l1_batch_number.0 as u64);
 
-    let elapsed_ = latency.observe();
+    let elapsed = latency.observe();
     tracing::info!("snapshot_generation_duration: {elapsed:?}");
     tracing::info!("snapshot_l1_batch: {}", METRICS.snapshot_l1_batch.get());
     tracing::info!(

From c02223aea19e62f1293bcb29c2d4db5ef261a0e9 Mon Sep 17 00:00:00 2001
From: tomg10 <lemures64@gmail.com>
Date: Wed, 6 Dec 2023 10:55:49 +0100
Subject: [PATCH 33/43] fix: remove unwrap()

---
 core/bin/snapshots_creator/src/main.rs | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/core/bin/snapshots_creator/src/main.rs b/core/bin/snapshots_creator/src/main.rs
index 399ace9a0be5..64804cda3d5e 100644
--- a/core/bin/snapshots_creator/src/main.rs
+++ b/core/bin/snapshots_creator/src/main.rs
@@ -220,7 +220,7 @@ async fn run(
                 chunks_count,
                 tasks.len()
             );
-            storage_logs_output_files.push(result.unwrap());
+            storage_logs_output_files.push(result.context("Chunk task failed")?);
             METRICS
                 .storage_logs_chunks_left_to_process
                 .set(chunks_count - last_chunk_id - tasks.len() as u64);

From c822280b64483efed51ce2ee6a61f77cd38c3c56 Mon Sep 17 00:00:00 2001
From: tomg10 <lemures64@gmail.com>
Date: Wed, 6 Dec 2023 13:30:49 +0100
Subject: [PATCH 34/43] fix: PR feedback

---
 core/bin/snapshots_creator/src/chunking.rs |  16 +++-
 core/bin/snapshots_creator/src/main.rs     |   4 +-
 core/lib/dal/sqlx-data.json                | 104 ++++++++++-----------
 core/lib/dal/src/snapshots_creator_dal.rs  |  28 +++---
 4 files changed, 81 insertions(+), 71 deletions(-)

diff --git a/core/bin/snapshots_creator/src/chunking.rs b/core/bin/snapshots_creator/src/chunking.rs
index 68db97fd73cb..2dce348d935c 100644
--- a/core/bin/snapshots_creator/src/chunking.rs
+++ b/core/bin/snapshots_creator/src/chunking.rs
@@ -1,6 +1,7 @@
 use std::cmp::min;
+use zksync_types::H256;
 
-pub fn get_chunk_hashed_keys_range(chunk_id: u64, chunks_count: u64) -> ([u8; 2], [u8; 2]) {
+pub fn get_chunk_hashed_keys_range(chunk_id: u64, chunks_count: u64) -> std::ops::Range<H256> {
     //we don't need whole [u8; 32] range of H256, first two bytes are already enough to evenly divide work
     // as two bytes = 65536 buckets and the chunks count would go in thousands
     let buckets = (u16::MAX as u64) + 1;
@@ -15,7 +16,14 @@ pub fn get_chunk_hashed_keys_range(chunk_id: u64, chunks_count: u64) -> ([u8; 2]
     let chunk_start = chunk_id * chunk_size + min(chunk_id, buckets % chunks_count);
     let chunk_end = (chunk_id + 1) * chunk_size + min(chunk_id + 1, buckets % chunks_count) - 1;
 
-    let start_bytes = (chunk_start as u16).to_be_bytes();
-    let end_bytes = (chunk_end as u16).to_be_bytes();
-    (start_bytes, end_bytes)
+    let mut start_bytes = (chunk_start as u16).to_be_bytes().to_vec();
+    let mut end_bytes = (chunk_end as u16).to_be_bytes().to_vec();
+
+    start_bytes.resize(32, 0);
+    end_bytes.resize(32, 0);
+
+    std::ops::Range {
+        start: H256::from_slice(&start_bytes),
+        end: H256::from_slice(&end_bytes),
+    }
 }
diff --git a/core/bin/snapshots_creator/src/main.rs b/core/bin/snapshots_creator/src/main.rs
index 64804cda3d5e..f6608eb71a32 100644
--- a/core/bin/snapshots_creator/src/main.rs
+++ b/core/bin/snapshots_creator/src/main.rs
@@ -71,12 +71,12 @@ async fn process_storage_logs_single_chunk(
     chunk_id: u64,
     chunks_count: u64,
 ) -> anyhow::Result<String> {
-    let (min_hashed_key, max_hashed_key) = get_chunk_hashed_keys_range(chunk_id, chunks_count);
+    let hashed_keys_range = get_chunk_hashed_keys_range(chunk_id, chunks_count);
     let latency = METRICS.storage_logs_processing_duration.start();
     let mut conn = pool.access_storage_tagged("snapshots_creator").await?;
     let logs = conn
         .snapshots_creator_dal()
-        .get_storage_logs_chunk(miniblock_number, &min_hashed_key, &max_hashed_key)
+        .get_storage_logs_chunk(miniblock_number, hashed_keys_range)
         .await
         .context("Error fetching storage logs count")?;
     drop(conn);
diff --git a/core/lib/dal/sqlx-data.json b/core/lib/dal/sqlx-data.json
index dc5f7c2f7281..e1e978d56f26 100644
--- a/core/lib/dal/sqlx-data.json
+++ b/core/lib/dal/sqlx-data.json
@@ -5485,58 +5485,6 @@
     },
     "query": "INSERT INTO miniblocks ( number, timestamp, hash, l1_tx_count, l2_tx_count, base_fee_per_gas, l1_gas_price, l2_fair_gas_price, gas_per_pubdata_limit, bootloader_code_hash, default_aa_code_hash, protocol_version, virtual_blocks, created_at, updated_at ) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, now(), now())"
   },
-  "6c68925cc6eb422d8c9f04cd353990c995e948f7031da654739852621d14fcea": {
-    "describe": {
-      "columns": [
-        {
-          "name": "key",
-          "ordinal": 0,
-          "type_info": "Bytea"
-        },
-        {
-          "name": "value",
-          "ordinal": 1,
-          "type_info": "Bytea"
-        },
-        {
-          "name": "address",
-          "ordinal": 2,
-          "type_info": "Bytea"
-        },
-        {
-          "name": "miniblock_number",
-          "ordinal": 3,
-          "type_info": "Int8"
-        },
-        {
-          "name": "l1_batch_number",
-          "ordinal": 4,
-          "type_info": "Int8"
-        },
-        {
-          "name": "index",
-          "ordinal": 5,
-          "type_info": "Int8"
-        }
-      ],
-      "nullable": [
-        true,
-        true,
-        true,
-        true,
-        true,
-        true
-      ],
-      "parameters": {
-        "Left": [
-          "Int8",
-          "Bytea",
-          "Bytea"
-        ]
-      }
-    },
-    "query": "\n            SELECT storage_logs.key,\n                   storage_logs.value,\n                   storage_logs.address,\n                   storage_logs.miniblock_number,\n                   initial_writes.l1_batch_number,\n                   initial_writes.index\n            FROM (SELECT hashed_key,\n                         max(ARRAY [miniblock_number, operation_number]::int[]) AS op\n                  FROM storage_logs\n                  WHERE miniblock_number <= $1 and hashed_key >= $2 and hashed_key < $3\n                  GROUP BY hashed_key\n                  ORDER BY hashed_key) AS keys\n                     INNER JOIN storage_logs ON keys.hashed_key = storage_logs.hashed_key\n                AND storage_logs.miniblock_number = keys.op[1]\n                AND storage_logs.operation_number = keys.op[2]\n                     INNER JOIN initial_writes ON keys.hashed_key = initial_writes.hashed_key;\n             "
-  },
   "6d142503d0d8682992a0353bae4a6b25ec82e7cadf0b2bbadcfd23c27f646bae": {
     "describe": {
       "columns": [],
@@ -10104,6 +10052,58 @@
     },
     "query": "SELECT l1_batch_number FROM witness_inputs WHERE length(merkle_tree_paths) <> 0 ORDER BY l1_batch_number DESC LIMIT $1"
   },
+  "dd650c06788a1c47b201e768382320fded2b8950ab836b2e5660f15b71dd11a0": {
+    "describe": {
+      "columns": [
+        {
+          "name": "key!",
+          "ordinal": 0,
+          "type_info": "Bytea"
+        },
+        {
+          "name": "value!",
+          "ordinal": 1,
+          "type_info": "Bytea"
+        },
+        {
+          "name": "address!",
+          "ordinal": 2,
+          "type_info": "Bytea"
+        },
+        {
+          "name": "miniblock_number!",
+          "ordinal": 3,
+          "type_info": "Int8"
+        },
+        {
+          "name": "l1_batch_number!",
+          "ordinal": 4,
+          "type_info": "Int8"
+        },
+        {
+          "name": "index",
+          "ordinal": 5,
+          "type_info": "Int8"
+        }
+      ],
+      "nullable": [
+        true,
+        true,
+        true,
+        true,
+        true,
+        true
+      ],
+      "parameters": {
+        "Left": [
+          "Int8",
+          "Bytea",
+          "Bytea"
+        ]
+      }
+    },
+    "query": "\n            SELECT storage_logs.key as \"key!\",\n                   storage_logs.value as \"value!\",\n                   storage_logs.address as \"address!\",\n                   storage_logs.miniblock_number as \"miniblock_number!\",\n                   initial_writes.l1_batch_number as \"l1_batch_number!\",\n                   initial_writes.index\n            FROM (SELECT hashed_key,\n                         max(ARRAY [miniblock_number, operation_number]::int[]) AS op\n                  FROM storage_logs\n                  WHERE miniblock_number <= $1 and hashed_key >= $2 and hashed_key < $3\n                  GROUP BY hashed_key\n                  ORDER BY hashed_key) AS keys\n                     INNER JOIN storage_logs ON keys.hashed_key = storage_logs.hashed_key\n                AND storage_logs.miniblock_number = keys.op[1]\n                AND storage_logs.operation_number = keys.op[2]\n                     INNER JOIN initial_writes ON keys.hashed_key = initial_writes.hashed_key;\n             "
+  },
   "dd8aa1c9d4dcea22c9a13cca5ae45e951cf963b0608046b88be40309d7379ec2": {
     "describe": {
       "columns": [],
diff --git a/core/lib/dal/src/snapshots_creator_dal.rs b/core/lib/dal/src/snapshots_creator_dal.rs
index 800e4f8a4d92..87c373952b28 100644
--- a/core/lib/dal/src/snapshots_creator_dal.rs
+++ b/core/lib/dal/src/snapshots_creator_dal.rs
@@ -34,16 +34,15 @@ impl SnapshotsCreatorDal<'_, '_> {
     pub async fn get_storage_logs_chunk(
         &mut self,
         miniblock_number: MiniblockNumber,
-        min_hashed_key: &[u8],
-        max_hashed_key: &[u8],
+        hashed_keys_range: std::ops::Range<H256>,
     ) -> sqlx::Result<Vec<SnapshotStorageLog>> {
         let storage_logs = sqlx::query!(
             r#"
-            SELECT storage_logs.key,
-                   storage_logs.value,
-                   storage_logs.address,
-                   storage_logs.miniblock_number,
-                   initial_writes.l1_batch_number,
+            SELECT storage_logs.key as "key!",
+                   storage_logs.value as "value!",
+                   storage_logs.address as "address!",
+                   storage_logs.miniblock_number as "miniblock_number!",
+                   initial_writes.l1_batch_number as "l1_batch_number!",
                    initial_writes.index
             FROM (SELECT hashed_key,
                          max(ARRAY [miniblock_number, operation_number]::int[]) AS op
@@ -57,21 +56,24 @@ impl SnapshotsCreatorDal<'_, '_> {
                      INNER JOIN initial_writes ON keys.hashed_key = initial_writes.hashed_key;
              "#,
             miniblock_number.0 as i64,
-            min_hashed_key,
-            max_hashed_key,
+            hashed_keys_range.start.0.as_slice(),
+            hashed_keys_range.end.0.as_slice(),
         )
         .instrument("get_storage_logs_chunk")
+        .with_arg("miniblock_number", &miniblock_number)
+        .with_arg("min_hashed_key", &hashed_keys_range.start)
+        .with_arg("max_hashed_key", &hashed_keys_range.end)
         .report_latency()
         .fetch_all(self.storage.conn())
         .await?
         .iter()
         .map(|row| SnapshotStorageLog {
             key: StorageKey::new(
-                AccountTreeId::new(Address::from_slice(row.address.as_ref().unwrap())),
-                H256::from_slice(row.key.as_ref().unwrap()),
+                AccountTreeId::new(Address::from_slice(&row.address)),
+                H256::from_slice(&row.key),
             ),
-            value: H256::from_slice(row.value.as_ref().unwrap()),
-            l1_batch_number_of_initial_write: L1BatchNumber(row.l1_batch_number.unwrap() as u32),
+            value: H256::from_slice(&row.value),
+            l1_batch_number_of_initial_write: L1BatchNumber(row.l1_batch_number as u32),
             enumeration_index: row.index.unwrap() as u64,
         })
         .collect();

From 700018dae48e3ecc5e9397e09168c01a0c42db64 Mon Sep 17 00:00:00 2001
From: tomg10 <lemures64@gmail.com>
Date: Wed, 6 Dec 2023 14:34:14 +0100
Subject: [PATCH 35/43] fix: PR feedback

---
 core/lib/zksync_core/src/api_server/web3/mod.rs | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/core/lib/zksync_core/src/api_server/web3/mod.rs b/core/lib/zksync_core/src/api_server/web3/mod.rs
index 7bffb43518a5..f066333059de 100644
--- a/core/lib/zksync_core/src/api_server/web3/mod.rs
+++ b/core/lib/zksync_core/src/api_server/web3/mod.rs
@@ -368,7 +368,7 @@ impl<G: 'static + Send + Sync + L1GasPriceProvider> ApiBuilder<G> {
         }
         if namespaces.contains(&Namespace::Snapshots) {
             rpc.merge(SnapshotsNamespace::new(rpc_state).into_rpc())
-                .expect("Can't merge debug namespace");
+                .expect("Can't merge snapshots namespace");
         }
         rpc
     }

From 633951b3a790f06ce0594c4b64eebe54cf80dfd8 Mon Sep 17 00:00:00 2001
From: tomg10 <lemures64@gmail.com>
Date: Thu, 7 Dec 2023 16:48:58 +0100
Subject: [PATCH 36/43] fix: PR feedback

---
 core/bin/snapshots_creator/src/chunking.rs    |  5 +-
 core/bin/snapshots_creator/src/main.rs        | 70 ++++++++-----------
 core/lib/config/src/configs/mod.rs            |  2 +-
 core/lib/object_store/src/objects.rs          |  3 +-
 .../zksync_core/src/api_server/web3/mod.rs    | 16 -----
 .../api_server/web3/namespaces/snapshots.rs   |  3 +-
 .../tests/api/snapshots-creator.test.ts       | 21 +++---
 7 files changed, 45 insertions(+), 75 deletions(-)

diff --git a/core/bin/snapshots_creator/src/chunking.rs b/core/bin/snapshots_creator/src/chunking.rs
index 2dce348d935c..149919ee00de 100644
--- a/core/bin/snapshots_creator/src/chunking.rs
+++ b/core/bin/snapshots_creator/src/chunking.rs
@@ -22,8 +22,5 @@ pub fn get_chunk_hashed_keys_range(chunk_id: u64, chunks_count: u64) -> std::ops
     start_bytes.resize(32, 0);
     end_bytes.resize(32, 0);
 
-    std::ops::Range {
-        start: H256::from_slice(&start_bytes),
-        end: H256::from_slice(&end_bytes),
-    }
+    H256::from_slice(&start_bytes)..H256::from_slice(&end_bytes)
 }
diff --git a/core/bin/snapshots_creator/src/main.rs b/core/bin/snapshots_creator/src/main.rs
index f6608eb71a32..80367875cf5e 100644
--- a/core/bin/snapshots_creator/src/main.rs
+++ b/core/bin/snapshots_creator/src/main.rs
@@ -1,15 +1,10 @@
 mod chunking;
 
 use anyhow::Context as _;
-use futures::stream::FuturesUnordered;
-use futures::StreamExt;
 use prometheus_exporter::PrometheusExporterConfig;
 use std::cmp::max;
-use std::future::Future;
-use std::pin::Pin;
 use std::time::Duration;
-use tokio::sync::watch;
-use tokio::sync::watch::Receiver;
+use tokio::sync::{watch, Semaphore};
 use vise::Unit;
 use vise::{Buckets, Gauge, Histogram, Metrics};
 use zksync_config::configs::PrometheusConfig;
@@ -47,7 +42,9 @@ struct SnapshotsCreatorMetrics {
 #[vise::register]
 pub(crate) static METRICS: vise::Global<SnapshotsCreatorMetrics> = vise::Global::new();
 
-async fn maybe_enable_prometheus_metrics(stop_receiver: Receiver<bool>) -> anyhow::Result<()> {
+async fn maybe_enable_prometheus_metrics(
+    stop_receiver: watch::Receiver<bool>,
+) -> anyhow::Result<()> {
     let prometheus_config = PrometheusConfig::from_env().ok();
     if let Some(prometheus_config) = prometheus_config {
         let exporter_config = PrometheusExporterConfig::push(
@@ -66,11 +63,13 @@ async fn maybe_enable_prometheus_metrics(stop_receiver: Receiver<bool>) -> anyho
 async fn process_storage_logs_single_chunk(
     blob_store: &dyn ObjectStore,
     pool: &ConnectionPool,
+    semaphore: &Semaphore,
     miniblock_number: MiniblockNumber,
     l1_batch_number: L1BatchNumber,
     chunk_id: u64,
     chunks_count: u64,
 ) -> anyhow::Result<String> {
+    let _permit = semaphore.acquire().await?;
     let hashed_keys_range = get_chunk_hashed_keys_range(chunk_id, chunks_count);
     let latency = METRICS.storage_logs_processing_duration.start();
     let mut conn = pool.access_storage_tagged("snapshots_creator").await?;
@@ -94,10 +93,13 @@ async fn process_storage_logs_single_chunk(
     let output_filepath = format!("{output_filepath_prefix}/{filename}");
 
     let elapsed = latency.observe();
+    let tasks_left = METRICS.storage_logs_chunks_left_to_process.dec_by(1) - 1;
     tracing::info!(
-        "Finished storage logs chunk {}/{chunks_count}, step took {elapsed:?}, output stored in {output_filepath}",
-        chunk_id + 1
-    );
+                "Finished chunk number {chunk_id}, overall_progress {}/{}, step took {elapsed:?}, output stored in {output_filepath}",
+                chunks_count - tasks_left,
+                chunks_count
+            );
+
     Ok(output_filepath)
 }
 
@@ -192,46 +194,32 @@ async fn run(
     )
     .await?;
 
-    let mut storage_logs_output_files = vec![];
-
     METRICS
         .storage_logs_chunks_left_to_process
         .set(chunks_count);
-    let mut tasks =
-        FuturesUnordered::<Pin<Box<dyn Future<Output = anyhow::Result<String>>>>>::new();
-    let mut last_chunk_id = 0;
-    while last_chunk_id < chunks_count || !tasks.is_empty() {
-        while (tasks.len() as u32) < config.concurrent_queries_count && last_chunk_id < chunks_count
-        {
-            tasks.push(Box::pin(process_storage_logs_single_chunk(
-                &*blob_store,
-                &replica_pool,
-                last_miniblock_number_in_batch,
-                l1_batch_number,
-                last_chunk_id,
-                chunks_count,
-            )));
-            last_chunk_id += 1;
-        }
-        if let Some(result) = tasks.next().await {
-            tracing::info!(
-                "Completed chunk {}/{}, {} chunks are still in progress",
-                last_chunk_id - tasks.len() as u64,
-                chunks_count,
-                tasks.len()
-            );
-            storage_logs_output_files.push(result.context("Chunk task failed")?);
-            METRICS
-                .storage_logs_chunks_left_to_process
-                .set(chunks_count - last_chunk_id - tasks.len() as u64);
-        }
-    }
+
+    let semaphore = Semaphore::new(config.concurrent_queries_count as usize);
+    let tasks = (0..chunks_count).map(|chunk_id| {
+        process_storage_logs_single_chunk(
+            &*blob_store,
+            &replica_pool,
+            &semaphore,
+            last_miniblock_number_in_batch,
+            l1_batch_number,
+            chunk_id,
+            chunks_count,
+        )
+    });
+    let mut storage_logs_output_files = futures::future::try_join_all(tasks).await?;
     tracing::info!("Finished generating snapshot, storing progress in db");
 
     let mut master_conn = master_pool
         .access_storage_tagged("snapshots_creator")
         .await?;
 
+    storage_logs_output_files.sort();
+    //sanity check
+    assert_eq!(storage_logs_output_files.len(), chunks_count as usize);
     master_conn
         .snapshots_dal()
         .add_snapshot(
diff --git a/core/lib/config/src/configs/mod.rs b/core/lib/config/src/configs/mod.rs
index 88fbff206946..9acfe9025620 100644
--- a/core/lib/config/src/configs/mod.rs
+++ b/core/lib/config/src/configs/mod.rs
@@ -20,9 +20,9 @@ pub use self::{
     proof_data_handler::ProofDataHandlerConfig,
     prover::{ProverConfig, ProverConfigs},
     prover_group::ProverGroupConfig,
+    snapshots_creator::SnapshotsCreatorConfig,
     utils::PrometheusConfig,
     witness_generator::WitnessGeneratorConfig,
-    snapshots_creator::SnapshotsCreatorConfig,
 };
 
 pub mod alerts;
diff --git a/core/lib/object_store/src/objects.rs b/core/lib/object_store/src/objects.rs
index e03bce47aa95..424aa6681d04 100644
--- a/core/lib/object_store/src/objects.rs
+++ b/core/lib/object_store/src/objects.rs
@@ -2,12 +2,11 @@
 
 use flate2::{read::GzDecoder, write::GzEncoder, Compression};
 use std::io::Read;
-use zksync_types::aggregated_operations::L1BatchProofForL1;
-use zksync_types::snapshots::{SnapshotFactoryDependencies, SnapshotStorageLogsStorageKey};
 use zksync_types::{
     aggregated_operations::L1BatchProofForL1,
     proofs::{AggregationRound, PrepareBasicCircuitsJob},
     snapshots::SnapshotStorageLogsChunk,
+    snapshots::{SnapshotFactoryDependencies, SnapshotStorageLogsStorageKey},
     storage::witness_block_state::WitnessBlockState,
     zkevm_test_harness::{
         abstract_zksync_circuit::concrete_circuits::ZkSyncCircuit,
diff --git a/core/lib/zksync_core/src/api_server/web3/mod.rs b/core/lib/zksync_core/src/api_server/web3/mod.rs
index f066333059de..a606e8b47bce 100644
--- a/core/lib/zksync_core/src/api_server/web3/mod.rs
+++ b/core/lib/zksync_core/src/api_server/web3/mod.rs
@@ -64,22 +64,6 @@ pub mod state;
 #[cfg(test)]
 pub(crate) mod tests;
 
-use self::backend_jsonrpc::{
-    batch_limiter_middleware::{LimitMiddleware, Transport},
-    error::internal_error,
-    namespaces::{
-        debug::DebugNamespaceT, en::EnNamespaceT, eth::EthNamespaceT, net::NetNamespaceT,
-        web3::Web3NamespaceT, zks::ZksNamespaceT,
-    },
-    pub_sub::Web3PubSub,
-};
-use self::metrics::API_METRICS;
-use self::namespaces::{
-    DebugNamespace, EnNamespace, EthNamespace, NetNamespace, Web3Namespace, ZksNamespace,
-};
-use self::pubsub::{EthSubscribe, PubSubEvent};
-use self::state::{Filters, InternalApiConfig, RpcState, SealedMiniblockNumber};
-
 /// Timeout for graceful shutdown logic within API servers.
 const GRACEFUL_SHUTDOWN_TIMEOUT: Duration = Duration::from_secs(5);
 
diff --git a/core/lib/zksync_core/src/api_server/web3/namespaces/snapshots.rs b/core/lib/zksync_core/src/api_server/web3/namespaces/snapshots.rs
index 2c68aec80fbc..76b3c168910e 100644
--- a/core/lib/zksync_core/src/api_server/web3/namespaces/snapshots.rs
+++ b/core/lib/zksync_core/src/api_server/web3/namespaces/snapshots.rs
@@ -8,7 +8,7 @@ use zksync_web3_decl::error::Web3Error;
 
 #[derive(Debug)]
 pub struct SnapshotsNamespace<G> {
-    pub state: RpcState<G>,
+    state: RpcState<G>,
 }
 
 impl<G> Clone for SnapshotsNamespace<G> {
@@ -22,6 +22,7 @@ impl<G: L1GasPriceProvider> SnapshotsNamespace<G> {
     pub fn new(state: RpcState<G>) -> Self {
         Self { state }
     }
+
     pub async fn get_all_snapshots_impl(&self) -> Result<AllSnapshots, Web3Error> {
         let method_name = "get_all_snapshots";
         let method_latency = API_METRICS.start_call(method_name);
diff --git a/core/tests/ts-integration/tests/api/snapshots-creator.test.ts b/core/tests/ts-integration/tests/api/snapshots-creator.test.ts
index df23ce2a5771..1938a53e80a5 100644
--- a/core/tests/ts-integration/tests/api/snapshots-creator.test.ts
+++ b/core/tests/ts-integration/tests/api/snapshots-creator.test.ts
@@ -2,6 +2,7 @@ import { TestMaster } from '../../src/index';
 import fs from 'fs';
 import * as zlib from 'zlib';
 import { snapshots_creator } from 'zk/build/run/run';
+
 describe('Snapshots API tests', () => {
     let testMaster: TestMaster;
 
@@ -46,15 +47,15 @@ describe('Snapshots API tests', () => {
         });
     }
     async function createAndValidateSnapshot() {
-        let existingBatchNumbers = (await getAllSnapshots()).snapshotsL1BatchNumbers as number[];
+        const existingBatchNumbers = (await getAllSnapshots()).snapshotsL1BatchNumbers as number[];
         await runCreator();
-        let newBatchNumbers = (await getAllSnapshots()).snapshotsL1BatchNumbers as number[];
-        let addedSnapshots = newBatchNumbers.filter((x) => existingBatchNumbers.indexOf(x) === -1);
+        const newBatchNumbers = (await getAllSnapshots()).snapshotsL1BatchNumbers as number[];
+        const addedSnapshots = newBatchNumbers.filter((x) => existingBatchNumbers.indexOf(x) === -1);
         expect(addedSnapshots.length).toEqual(1);
 
-        let l1BatchNumber = addedSnapshots[0];
-        let fullSnapshot = await getSnapshot(l1BatchNumber);
-        let miniblockNumber = fullSnapshot.miniblockNumber;
+        const l1BatchNumber = addedSnapshots[0];
+        const fullSnapshot = await getSnapshot(l1BatchNumber);
+        const miniblockNumber = fullSnapshot.miniblockNumber;
 
         expect(fullSnapshot.l1BatchNumber).toEqual(l1BatchNumber);
         for (let chunkMetadata of fullSnapshot.storageLogsChunks) {
@@ -65,10 +66,10 @@ describe('Snapshots API tests', () => {
             expect(output['storageLogs'].length > 0);
 
             for (const storageLog of output['storageLogs'] as any[]) {
-                let snapshotAccountAddress = storageLog['key']['account']['address'];
-                let snapshotKey = storageLog['key']['key'];
-                let snapshotValue = storageLog['value'];
-                let snapshotL1BatchNumber = storageLog['l1BatchNumberOfInitialWrite'];
+                const snapshotAccountAddress = storageLog['key']['account']['address'];
+                const snapshotKey = storageLog['key']['key'];
+                const snapshotValue = storageLog['value'];
+                const snapshotL1BatchNumber = storageLog['l1BatchNumberOfInitialWrite'];
                 const valueOnBlockchain = await testMaster
                     .mainAccount()
                     .provider.getStorageAt(snapshotAccountAddress, snapshotKey, miniblockNumber);

From 45f37a9c499f0e9faf09eafa56d1e1534fa18652 Mon Sep 17 00:00:00 2001
From: tomg10 <lemures64@gmail.com>
Date: Fri, 8 Dec 2023 11:20:00 +0100
Subject: [PATCH 37/43] fix: PR feedback

---
 core/bin/snapshots_creator/src/chunking.rs | 40 ++++++++++------------
 core/lib/dal/src/snapshots_creator_dal.rs  | 10 +++---
 2 files changed, 23 insertions(+), 27 deletions(-)

diff --git a/core/bin/snapshots_creator/src/chunking.rs b/core/bin/snapshots_creator/src/chunking.rs
index 149919ee00de..52206ecca0d9 100644
--- a/core/bin/snapshots_creator/src/chunking.rs
+++ b/core/bin/snapshots_creator/src/chunking.rs
@@ -1,26 +1,22 @@
 use std::cmp::min;
-use zksync_types::H256;
+use std::ops;
+use zksync_types::{H256, U256};
+use zksync_utils::u256_to_h256;
 
-pub fn get_chunk_hashed_keys_range(chunk_id: u64, chunks_count: u64) -> std::ops::Range<H256> {
-    //we don't need whole [u8; 32] range of H256, first two bytes are already enough to evenly divide work
-    // as two bytes = 65536 buckets and the chunks count would go in thousands
-    let buckets = (u16::MAX as u64) + 1;
-    assert!(chunks_count <= buckets);
+pub fn get_chunk_hashed_keys_range(chunk_id: u64, chunks_count: u64) -> ops::RangeInclusive<H256> {
+    assert!(chunks_count > 0);
+    let mut stride = U256::MAX / chunks_count;
+    let stride_minus_one = if stride < U256::MAX {
+        stride += U256::one();
+        stride - 1
+    } else {
+        stride // `stride` is really 1 << 256 == U256::MAX + 1
+    };
 
-    //some of the chunks will be exactly this size, some may need to be exactly 1 larger
-    let chunk_size = buckets / chunks_count;
-    // first (buckets % chunks_count) chunks are bigger by 1, rest are of size chunk_size
-    // for instance, if there were 31 buckets and 4 chunks
-    // chunk_size would equal 7, first 31 % 4 = 3, first 3 chunks would be of size 8, last one of 7
-    // 8 + 8 + 8 + 7 = 31
-    let chunk_start = chunk_id * chunk_size + min(chunk_id, buckets % chunks_count);
-    let chunk_end = (chunk_id + 1) * chunk_size + min(chunk_id + 1, buckets % chunks_count) - 1;
-
-    let mut start_bytes = (chunk_start as u16).to_be_bytes().to_vec();
-    let mut end_bytes = (chunk_end as u16).to_be_bytes().to_vec();
-
-    start_bytes.resize(32, 0);
-    end_bytes.resize(32, 0);
-
-    H256::from_slice(&start_bytes)..H256::from_slice(&end_bytes)
+    let start = stride * chunk_id;
+    let (mut end, is_overflow) = stride_minus_one.overflowing_add(start);
+    if is_overflow {
+        end = U256::MAX;
+    }
+    u256_to_h256(start)..=u256_to_h256(end)
 }
diff --git a/core/lib/dal/src/snapshots_creator_dal.rs b/core/lib/dal/src/snapshots_creator_dal.rs
index 87c373952b28..4384b743c5cb 100644
--- a/core/lib/dal/src/snapshots_creator_dal.rs
+++ b/core/lib/dal/src/snapshots_creator_dal.rs
@@ -34,7 +34,7 @@ impl SnapshotsCreatorDal<'_, '_> {
     pub async fn get_storage_logs_chunk(
         &mut self,
         miniblock_number: MiniblockNumber,
-        hashed_keys_range: std::ops::Range<H256>,
+        hashed_keys_range: std::ops::RangeInclusive<H256>,
     ) -> sqlx::Result<Vec<SnapshotStorageLog>> {
         let storage_logs = sqlx::query!(
             r#"
@@ -56,13 +56,13 @@ impl SnapshotsCreatorDal<'_, '_> {
                      INNER JOIN initial_writes ON keys.hashed_key = initial_writes.hashed_key;
              "#,
             miniblock_number.0 as i64,
-            hashed_keys_range.start.0.as_slice(),
-            hashed_keys_range.end.0.as_slice(),
+            hashed_keys_range.start().0.as_slice(),
+            hashed_keys_range.end().0.as_slice(),
         )
         .instrument("get_storage_logs_chunk")
         .with_arg("miniblock_number", &miniblock_number)
-        .with_arg("min_hashed_key", &hashed_keys_range.start)
-        .with_arg("max_hashed_key", &hashed_keys_range.end)
+        .with_arg("min_hashed_key", &hashed_keys_range.start())
+        .with_arg("max_hashed_key", &hashed_keys_range.end())
         .report_latency()
         .fetch_all(self.storage.conn())
         .await?

From 8df22d0cb139e5f43b5fcb88ea485e9295e9860e Mon Sep 17 00:00:00 2001
From: tomg10 <lemures64@gmail.com>
Date: Fri, 8 Dec 2023 11:21:38 +0100
Subject: [PATCH 38/43] fix: zk fmt

---
 core/bin/snapshots_creator/src/chunking.rs    |  2 +-
 core/bin/snapshots_creator/src/main.rs        | 25 +++++++++----------
 core/lib/dal/src/snapshots_creator_dal.rs     | 10 +++++---
 core/lib/dal/src/snapshots_dal.rs             | 13 ++++++----
 core/lib/env_config/src/snapshots_creator.rs  |  3 ++-
 core/lib/object_store/src/objects.rs          |  8 +++---
 core/lib/types/src/snapshots.rs               |  4 +--
 .../lib/web3_decl/src/namespaces/snapshots.rs |  6 +++--
 .../backend_jsonrpc/namespaces/snapshots.rs   |  7 +++---
 .../backend_jsonrpsee/namespaces/snapshots.rs | 17 +++++++------
 .../api_server/web3/namespaces/snapshots.rs   | 17 ++++++++-----
 11 files changed, 65 insertions(+), 47 deletions(-)

diff --git a/core/bin/snapshots_creator/src/chunking.rs b/core/bin/snapshots_creator/src/chunking.rs
index 52206ecca0d9..248f2de53597 100644
--- a/core/bin/snapshots_creator/src/chunking.rs
+++ b/core/bin/snapshots_creator/src/chunking.rs
@@ -1,5 +1,5 @@
-use std::cmp::min;
 use std::ops;
+
 use zksync_types::{H256, U256};
 use zksync_utils::u256_to_h256;
 
diff --git a/core/bin/snapshots_creator/src/main.rs b/core/bin/snapshots_creator/src/main.rs
index 80367875cf5e..300b12572f99 100644
--- a/core/bin/snapshots_creator/src/main.rs
+++ b/core/bin/snapshots_creator/src/main.rs
@@ -1,26 +1,25 @@
 mod chunking;
 
+use std::{cmp::max, time::Duration};
+
 use anyhow::Context as _;
 use prometheus_exporter::PrometheusExporterConfig;
-use std::cmp::max;
-use std::time::Duration;
 use tokio::sync::{watch, Semaphore};
-use vise::Unit;
-use vise::{Buckets, Gauge, Histogram, Metrics};
-use zksync_config::configs::PrometheusConfig;
-use zksync_config::{PostgresConfig, SnapshotsCreatorConfig};
-
-use crate::chunking::get_chunk_hashed_keys_range;
+use vise::{Buckets, Gauge, Histogram, Metrics, Unit};
+use zksync_config::{configs::PrometheusConfig, PostgresConfig, SnapshotsCreatorConfig};
 use zksync_dal::ConnectionPool;
-use zksync_env_config::object_store::SnapshotsObjectStoreConfig;
-use zksync_env_config::FromEnv;
+use zksync_env_config::{object_store::SnapshotsObjectStoreConfig, FromEnv};
 use zksync_object_store::{ObjectStore, ObjectStoreFactory};
-use zksync_types::snapshots::{
-    SnapshotFactoryDependencies, SnapshotStorageLogsChunk, SnapshotStorageLogsStorageKey,
+use zksync_types::{
+    snapshots::{
+        SnapshotFactoryDependencies, SnapshotStorageLogsChunk, SnapshotStorageLogsStorageKey,
+    },
+    L1BatchNumber, MiniblockNumber,
 };
-use zksync_types::{L1BatchNumber, MiniblockNumber};
 use zksync_utils::ceil_div;
 
+use crate::chunking::get_chunk_hashed_keys_range;
+
 #[derive(Debug, Metrics)]
 #[metrics(prefix = "snapshots_creator")]
 struct SnapshotsCreatorMetrics {
diff --git a/core/lib/dal/src/snapshots_creator_dal.rs b/core/lib/dal/src/snapshots_creator_dal.rs
index 4384b743c5cb..b87003602598 100644
--- a/core/lib/dal/src/snapshots_creator_dal.rs
+++ b/core/lib/dal/src/snapshots_creator_dal.rs
@@ -1,7 +1,9 @@
-use crate::instrument::InstrumentExt;
-use crate::StorageProcessor;
-use zksync_types::snapshots::{SnapshotFactoryDependency, SnapshotStorageLog};
-use zksync_types::{AccountTreeId, Address, L1BatchNumber, MiniblockNumber, StorageKey, H256};
+use zksync_types::{
+    snapshots::{SnapshotFactoryDependency, SnapshotStorageLog},
+    AccountTreeId, Address, L1BatchNumber, MiniblockNumber, StorageKey, H256,
+};
+
+use crate::{instrument::InstrumentExt, StorageProcessor};
 
 #[derive(Debug)]
 pub struct SnapshotsCreatorDal<'a, 'c> {
diff --git a/core/lib/dal/src/snapshots_dal.rs b/core/lib/dal/src/snapshots_dal.rs
index f5b44c121438..0e031b31d142 100644
--- a/core/lib/dal/src/snapshots_dal.rs
+++ b/core/lib/dal/src/snapshots_dal.rs
@@ -1,7 +1,9 @@
-use crate::instrument::InstrumentExt;
-use crate::StorageProcessor;
-use zksync_types::snapshots::{AllSnapshots, SnapshotMetadata};
-use zksync_types::L1BatchNumber;
+use zksync_types::{
+    snapshots::{AllSnapshots, SnapshotMetadata},
+    L1BatchNumber,
+};
+
+use crate::{instrument::InstrumentExt, StorageProcessor};
 
 #[derive(Debug)]
 pub struct SnapshotsDal<'a, 'c> {
@@ -68,9 +70,10 @@ impl SnapshotsDal<'_, '_> {
 
 #[cfg(test)]
 mod tests {
-    use crate::ConnectionPool;
     use zksync_types::L1BatchNumber;
 
+    use crate::ConnectionPool;
+
     #[tokio::test]
     async fn adding_snapshot() {
         let pool = ConnectionPool::test_pool().await;
diff --git a/core/lib/env_config/src/snapshots_creator.rs b/core/lib/env_config/src/snapshots_creator.rs
index b79ecfb4f450..6ed80e3780ce 100644
--- a/core/lib/env_config/src/snapshots_creator.rs
+++ b/core/lib/env_config/src/snapshots_creator.rs
@@ -1,6 +1,7 @@
-use crate::{envy_load, FromEnv};
 use zksync_config::SnapshotsCreatorConfig;
 
+use crate::{envy_load, FromEnv};
+
 impl FromEnv for SnapshotsCreatorConfig {
     fn from_env() -> anyhow::Result<Self> {
         envy_load("snapshots_creator", "SNAPSHOTS_CREATOR_")
diff --git a/core/lib/object_store/src/objects.rs b/core/lib/object_store/src/objects.rs
index 424aa6681d04..89241c7edf7c 100644
--- a/core/lib/object_store/src/objects.rs
+++ b/core/lib/object_store/src/objects.rs
@@ -1,12 +1,14 @@
 //! Stored objects.
 
-use flate2::{read::GzDecoder, write::GzEncoder, Compression};
 use std::io::Read;
+
+use flate2::{read::GzDecoder, write::GzEncoder, Compression};
 use zksync_types::{
     aggregated_operations::L1BatchProofForL1,
     proofs::{AggregationRound, PrepareBasicCircuitsJob},
-    snapshots::SnapshotStorageLogsChunk,
-    snapshots::{SnapshotFactoryDependencies, SnapshotStorageLogsStorageKey},
+    snapshots::{
+        SnapshotFactoryDependencies, SnapshotStorageLogsChunk, SnapshotStorageLogsStorageKey,
+    },
     storage::witness_block_state::WitnessBlockState,
     zkevm_test_harness::{
         abstract_zksync_circuit::concrete_circuits::ZkSyncCircuit,
diff --git a/core/lib/types/src/snapshots.rs b/core/lib/types/src/snapshots.rs
index 3b3b20d13fe3..794480ea550c 100644
--- a/core/lib/types/src/snapshots.rs
+++ b/core/lib/types/src/snapshots.rs
@@ -1,8 +1,8 @@
-use crate::commitment::L1BatchWithMetadata;
-use crate::{StorageKey, StorageValue};
 use serde::{Deserialize, Serialize};
 use zksync_basic_types::{L1BatchNumber, MiniblockNumber};
 
+use crate::{commitment::L1BatchWithMetadata, StorageKey, StorageValue};
+
 #[derive(Debug, Clone, Serialize, Deserialize)]
 #[serde(rename_all = "camelCase")]
 pub struct AllSnapshots {
diff --git a/core/lib/web3_decl/src/namespaces/snapshots.rs b/core/lib/web3_decl/src/namespaces/snapshots.rs
index 08bb65b6968e..02f9aa6b36d2 100644
--- a/core/lib/web3_decl/src/namespaces/snapshots.rs
+++ b/core/lib/web3_decl/src/namespaces/snapshots.rs
@@ -1,6 +1,8 @@
 use jsonrpsee::{core::RpcResult, proc_macros::rpc};
-use zksync_types::snapshots::{AllSnapshots, SnapshotHeader};
-use zksync_types::L1BatchNumber;
+use zksync_types::{
+    snapshots::{AllSnapshots, SnapshotHeader},
+    L1BatchNumber,
+};
 
 #[cfg_attr(
     all(feature = "client", feature = "server"),
diff --git a/core/lib/zksync_core/src/api_server/web3/backend_jsonrpc/namespaces/snapshots.rs b/core/lib/zksync_core/src/api_server/web3/backend_jsonrpc/namespaces/snapshots.rs
index 2b9bf30f0457..aa542320af2c 100644
--- a/core/lib/zksync_core/src/api_server/web3/backend_jsonrpc/namespaces/snapshots.rs
+++ b/core/lib/zksync_core/src/api_server/web3/backend_jsonrpc/namespaces/snapshots.rs
@@ -3,13 +3,14 @@
 // External uses
 use jsonrpc_core::{BoxFuture, Result};
 use jsonrpc_derive::rpc;
+use zksync_types::{
+    snapshots::{AllSnapshots, SnapshotHeader},
+    L1BatchNumber,
+};
 
 // Workspace uses
 use crate::api_server::web3::backend_jsonrpc::error::into_jsrpc_error;
 use crate::l1_gas_price::L1GasPriceProvider;
-use zksync_types::snapshots::{AllSnapshots, SnapshotHeader};
-use zksync_types::L1BatchNumber;
-
 // Local uses
 use crate::web3::namespaces::SnapshotsNamespace;
 
diff --git a/core/lib/zksync_core/src/api_server/web3/backend_jsonrpsee/namespaces/snapshots.rs b/core/lib/zksync_core/src/api_server/web3/backend_jsonrpsee/namespaces/snapshots.rs
index 88fd28de9756..5a60fafa9dc5 100644
--- a/core/lib/zksync_core/src/api_server/web3/backend_jsonrpsee/namespaces/snapshots.rs
+++ b/core/lib/zksync_core/src/api_server/web3/backend_jsonrpsee/namespaces/snapshots.rs
@@ -1,11 +1,14 @@
-use crate::api_server::web3::backend_jsonrpsee::into_jsrpc_error;
-use crate::api_server::web3::namespaces::SnapshotsNamespace;
-use crate::l1_gas_price::L1GasPriceProvider;
 use async_trait::async_trait;
-use zksync_types::snapshots::{AllSnapshots, SnapshotHeader};
-use zksync_types::L1BatchNumber;
-use zksync_web3_decl::jsonrpsee::core::RpcResult;
-use zksync_web3_decl::namespaces::SnapshotsNamespaceServer;
+use zksync_types::{
+    snapshots::{AllSnapshots, SnapshotHeader},
+    L1BatchNumber,
+};
+use zksync_web3_decl::{jsonrpsee::core::RpcResult, namespaces::SnapshotsNamespaceServer};
+
+use crate::{
+    api_server::web3::{backend_jsonrpsee::into_jsrpc_error, namespaces::SnapshotsNamespace},
+    l1_gas_price::L1GasPriceProvider,
+};
 
 #[async_trait]
 impl<G: L1GasPriceProvider + Send + Sync + 'static> SnapshotsNamespaceServer
diff --git a/core/lib/zksync_core/src/api_server/web3/namespaces/snapshots.rs b/core/lib/zksync_core/src/api_server/web3/namespaces/snapshots.rs
index 76b3c168910e..02dd3b18b22d 100644
--- a/core/lib/zksync_core/src/api_server/web3/namespaces/snapshots.rs
+++ b/core/lib/zksync_core/src/api_server/web3/namespaces/snapshots.rs
@@ -1,11 +1,16 @@
-use crate::api_server::web3::backend_jsonrpc::error::internal_error;
-use crate::api_server::web3::metrics::API_METRICS;
-use crate::api_server::web3::state::RpcState;
-use crate::l1_gas_price::L1GasPriceProvider;
-use zksync_types::snapshots::{AllSnapshots, SnapshotHeader, SnapshotStorageLogsChunkMetadata};
-use zksync_types::L1BatchNumber;
+use zksync_types::{
+    snapshots::{AllSnapshots, SnapshotHeader, SnapshotStorageLogsChunkMetadata},
+    L1BatchNumber,
+};
 use zksync_web3_decl::error::Web3Error;
 
+use crate::{
+    api_server::web3::{
+        backend_jsonrpc::error::internal_error, metrics::API_METRICS, state::RpcState,
+    },
+    l1_gas_price::L1GasPriceProvider,
+};
+
 #[derive(Debug)]
 pub struct SnapshotsNamespace<G> {
     state: RpcState<G>,

From a827d72164d70e9ed80efd8980c1f4be5b90d4b2 Mon Sep 17 00:00:00 2001
From: tomg10 <lemures64@gmail.com>
Date: Fri, 8 Dec 2023 16:53:04 +0100
Subject: [PATCH 39/43] fix: made SNAPSHOTS namespace disabled for EN

---
 core/bin/external_node/src/config/mod.rs        |  2 +-
 core/lib/zksync_core/src/api_server/web3/mod.rs | 17 ++++-------------
 .../src/api_server/web3/tests/mod.rs            |  2 +-
 core/lib/zksync_core/src/lib.rs                 | 13 +++++++------
 4 files changed, 13 insertions(+), 21 deletions(-)

diff --git a/core/bin/external_node/src/config/mod.rs b/core/bin/external_node/src/config/mod.rs
index aea48bc0aeb7..b84c6ce59bb6 100644
--- a/core/bin/external_node/src/config/mod.rs
+++ b/core/bin/external_node/src/config/mod.rs
@@ -338,7 +338,7 @@ impl OptionalENConfig {
     pub fn api_namespaces(&self) -> Vec<Namespace> {
         self.api_namespaces
             .clone()
-            .unwrap_or_else(|| Namespace::NON_DEBUG.to_vec())
+            .unwrap_or_else(|| Namespace::DEFAULT.to_vec())
     }
 
     pub fn max_response_body_size(&self) -> usize {
diff --git a/core/lib/zksync_core/src/api_server/web3/mod.rs b/core/lib/zksync_core/src/api_server/web3/mod.rs
index a606e8b47bce..c2549c4aded5 100644
--- a/core/lib/zksync_core/src/api_server/web3/mod.rs
+++ b/core/lib/zksync_core/src/api_server/web3/mod.rs
@@ -104,26 +104,17 @@ pub enum Namespace {
 }
 
 impl Namespace {
-    pub const ALL: &'static [Namespace] = &[
+    pub const DEFAULT: &'static [Namespace] = &[
         Namespace::Eth,
         Namespace::Net,
         Namespace::Web3,
-        Namespace::Debug,
         Namespace::Zks,
         Namespace::En,
         Namespace::Pubsub,
-        Namespace::Snapshots,
     ];
 
-    pub const NON_DEBUG: &'static [Namespace] = &[
-        Namespace::Eth,
-        Namespace::Net,
-        Namespace::Web3,
-        Namespace::Zks,
-        Namespace::En,
-        Namespace::Pubsub,
-        Namespace::Snapshots,
-    ];
+    pub const DEBUG: &'static [Namespace] = &[Namespace::Debug];
+    pub const SNAPSHOTS: &'static [Namespace] = &[Namespace::Snapshots];
 }
 
 /// Handles to the initialized API server.
@@ -367,7 +358,7 @@ impl<G: 'static + Send + Sync + L1GasPriceProvider> ApiBuilder<G> {
 
         if self.namespaces.is_none() {
             tracing::warn!("debug_ API namespace will be disabled by default in ApiBuilder");
-            self.namespaces = Some(Namespace::NON_DEBUG.to_vec());
+            self.namespaces = Some(Namespace::DEFAULT.to_vec());
         }
 
         if self
diff --git a/core/lib/zksync_core/src/api_server/web3/tests/mod.rs b/core/lib/zksync_core/src/api_server/web3/tests/mod.rs
index 8743330710cb..2f7fad8c6e10 100644
--- a/core/lib/zksync_core/src/api_server/web3/tests/mod.rs
+++ b/core/lib/zksync_core/src/api_server/web3/tests/mod.rs
@@ -132,7 +132,7 @@ async fn spawn_server(
         .with_threads(1)
         .with_tx_sender(tx_sender, vm_barrier)
         .with_pub_sub_events(pub_sub_events_sender)
-        .enable_api_namespaces(Namespace::NON_DEBUG.to_vec())
+        .enable_api_namespaces(Namespace::DEFAULT.to_vec())
         .build(stop_receiver)
         .await
         .expect("Failed spawning JSON-RPC server");
diff --git a/core/lib/zksync_core/src/lib.rs b/core/lib/zksync_core/src/lib.rs
index f9192bf295cb..d60262114e3b 100644
--- a/core/lib/zksync_core/src/lib.rs
+++ b/core/lib/zksync_core/src/lib.rs
@@ -1129,11 +1129,12 @@ async fn run_http_api<G: L1GasPriceProvider + Send + Sync + 'static>(
     )
     .await;
 
-    let namespaces = if with_debug_namespace {
-        Namespace::ALL.to_vec()
-    } else {
-        Namespace::NON_DEBUG.to_vec()
-    };
+    let mut namespaces = Namespace::DEFAULT.to_vec();
+    if with_debug_namespace {
+        namespaces.extend(Namespace::DEBUG.to_vec());
+    }
+    namespaces.extend(Namespace::SNAPSHOTS.to_vec());
+
     let last_miniblock_pool = ConnectionPool::singleton(postgres_config.replica_url()?)
         .build()
         .await
@@ -1202,7 +1203,7 @@ async fn run_ws_api<G: L1GasPriceProvider + Send + Sync + 'static>(
             .with_threads(api_config.web3_json_rpc.ws_server_threads())
             .with_tree_api(api_config.web3_json_rpc.tree_api_url())
             .with_tx_sender(tx_sender, vm_barrier)
-            .enable_api_namespaces(Namespace::NON_DEBUG.to_vec());
+            .enable_api_namespaces(Namespace::DEFAULT.to_vec());
 
     if with_logs_request_translator_enabled {
         api_builder = api_builder.enable_request_translator();

From a0f8974dc4ac0474946bfb1543819cacb6b8fef0 Mon Sep 17 00:00:00 2001
From: tomg10 <lemures64@gmail.com>
Date: Fri, 8 Dec 2023 17:48:02 +0100
Subject: [PATCH 40/43] fix: added snapshots namespace to ws as well

---
 core/lib/zksync_core/src/api_server/web3/mod.rs | 4 +++-
 core/lib/zksync_core/src/lib.rs                 | 5 ++++-
 2 files changed, 7 insertions(+), 2 deletions(-)

diff --git a/core/lib/zksync_core/src/api_server/web3/mod.rs b/core/lib/zksync_core/src/api_server/web3/mod.rs
index c2549c4aded5..0a87131e7b9d 100644
--- a/core/lib/zksync_core/src/api_server/web3/mod.rs
+++ b/core/lib/zksync_core/src/api_server/web3/mod.rs
@@ -357,7 +357,9 @@ impl<G: 'static + Send + Sync + L1GasPriceProvider> ApiBuilder<G> {
         }
 
         if self.namespaces.is_none() {
-            tracing::warn!("debug_ API namespace will be disabled by default in ApiBuilder");
+            tracing::warn!(
+                "debug_  and snapshots_ API namespace will be disabled by default in ApiBuilder"
+            );
             self.namespaces = Some(Namespace::DEFAULT.to_vec());
         }
 
diff --git a/core/lib/zksync_core/src/lib.rs b/core/lib/zksync_core/src/lib.rs
index d60262114e3b..03c7aaac72a2 100644
--- a/core/lib/zksync_core/src/lib.rs
+++ b/core/lib/zksync_core/src/lib.rs
@@ -1186,6 +1186,9 @@ async fn run_ws_api<G: L1GasPriceProvider + Send + Sync + 'static>(
         .await
         .context("failed to build last_miniblock_pool")?;
 
+    let mut namespaces = Namespace::DEFAULT.to_vec();
+    namespaces.extend(Namespace::SNAPSHOTS.to_vec());
+
     let mut api_builder =
         web3::ApiBuilder::jsonrpc_backend(internal_api.clone(), replica_connection_pool)
             .ws(api_config.web3_json_rpc.ws_port)
@@ -1203,7 +1206,7 @@ async fn run_ws_api<G: L1GasPriceProvider + Send + Sync + 'static>(
             .with_threads(api_config.web3_json_rpc.ws_server_threads())
             .with_tree_api(api_config.web3_json_rpc.tree_api_url())
             .with_tx_sender(tx_sender, vm_barrier)
-            .enable_api_namespaces(Namespace::DEFAULT.to_vec());
+            .enable_api_namespaces(namespaces);
 
     if with_logs_request_translator_enabled {
         api_builder = api_builder.enable_request_translator();

From d31288855c7c6d756eb1c01fa4454cf9fddf7840 Mon Sep 17 00:00:00 2001
From: tomg10 <lemures64@gmail.com>
Date: Fri, 8 Dec 2023 18:34:35 +0100
Subject: [PATCH 41/43] fix: removed SNAPSHOTS and DEBUG groups of namespaces

---
 core/lib/zksync_core/src/api_server/web3/mod.rs | 3 ---
 core/lib/zksync_core/src/lib.rs                 | 6 +++---
 2 files changed, 3 insertions(+), 6 deletions(-)

diff --git a/core/lib/zksync_core/src/api_server/web3/mod.rs b/core/lib/zksync_core/src/api_server/web3/mod.rs
index 0a87131e7b9d..6ee53088ecff 100644
--- a/core/lib/zksync_core/src/api_server/web3/mod.rs
+++ b/core/lib/zksync_core/src/api_server/web3/mod.rs
@@ -112,9 +112,6 @@ impl Namespace {
         Namespace::En,
         Namespace::Pubsub,
     ];
-
-    pub const DEBUG: &'static [Namespace] = &[Namespace::Debug];
-    pub const SNAPSHOTS: &'static [Namespace] = &[Namespace::Snapshots];
 }
 
 /// Handles to the initialized API server.
diff --git a/core/lib/zksync_core/src/lib.rs b/core/lib/zksync_core/src/lib.rs
index 03c7aaac72a2..74192beecb6c 100644
--- a/core/lib/zksync_core/src/lib.rs
+++ b/core/lib/zksync_core/src/lib.rs
@@ -1131,9 +1131,9 @@ async fn run_http_api<G: L1GasPriceProvider + Send + Sync + 'static>(
 
     let mut namespaces = Namespace::DEFAULT.to_vec();
     if with_debug_namespace {
-        namespaces.extend(Namespace::DEBUG.to_vec());
+        namespaces.push(Namespace::Debug)
     }
-    namespaces.extend(Namespace::SNAPSHOTS.to_vec());
+    namespaces.push(Namespace::Snapshots);
 
     let last_miniblock_pool = ConnectionPool::singleton(postgres_config.replica_url()?)
         .build()
@@ -1187,7 +1187,7 @@ async fn run_ws_api<G: L1GasPriceProvider + Send + Sync + 'static>(
         .context("failed to build last_miniblock_pool")?;
 
     let mut namespaces = Namespace::DEFAULT.to_vec();
-    namespaces.extend(Namespace::SNAPSHOTS.to_vec());
+    namespaces.push(Namespace::Snapshots);
 
     let mut api_builder =
         web3::ApiBuilder::jsonrpc_backend(internal_api.clone(), replica_connection_pool)

From 4be869909ff7c14cfdaff26bcc7ead8d598b3b74 Mon Sep 17 00:00:00 2001
From: tomg10 <lemures64@gmail.com>
Date: Fri, 8 Dec 2023 18:36:50 +0100
Subject: [PATCH 42/43] fix: update sqlx-data.json

---
 core/lib/dal/sqlx-data.json | 96 +++++++++++++++++++++++++++++++++++++
 1 file changed, 96 insertions(+)

diff --git a/core/lib/dal/sqlx-data.json b/core/lib/dal/sqlx-data.json
index e1e978d56f26..67943be6bb91 100644
--- a/core/lib/dal/sqlx-data.json
+++ b/core/lib/dal/sqlx-data.json
@@ -340,6 +340,20 @@
     },
     "query": "INSERT INTO eth_txs_history (eth_tx_id, base_fee_per_gas, priority_fee_per_gas, tx_hash, signed_raw_tx, created_at, updated_at, confirmed_at) VALUES ($1, 0, 0, $2, '\\x00', now(), now(), $3) RETURNING id"
   },
+  "07bb6aa5f4ffe0b753cca8ac92c65bd7618db908250e5bf9e835f54b1dd04755": {
+    "describe": {
+      "columns": [],
+      "nullable": [],
+      "parameters": {
+        "Left": [
+          "Int8",
+          "TextArray",
+          "Text"
+        ]
+      }
+    },
+    "query": "INSERT INTO snapshots (l1_batch_number, storage_logs_filepaths, factory_deps_filepath, created_at, updated_at) VALUES ($1, $2, $3, NOW(), NOW())"
+  },
   "09768b376996b96add16a02d1a59231cb9b525cd5bd19d22a76149962d4c91c2": {
     "describe": {
       "columns": [],
@@ -644,6 +658,38 @@
     },
     "query": "SELECT l1_address FROM tokens WHERE market_volume > $1"
   },
+  "1658e6fce121904c1353e51663fc307b01e02bc412ee46ac17e0f5acacd0b5c4": {
+    "describe": {
+      "columns": [
+        {
+          "name": "l1_batch_number",
+          "ordinal": 0,
+          "type_info": "Int8"
+        },
+        {
+          "name": "factory_deps_filepath",
+          "ordinal": 1,
+          "type_info": "Text"
+        },
+        {
+          "name": "storage_logs_filepaths",
+          "ordinal": 2,
+          "type_info": "TextArray"
+        }
+      ],
+      "nullable": [
+        false,
+        false,
+        false
+      ],
+      "parameters": {
+        "Left": [
+          "Int8"
+        ]
+      }
+    },
+    "query": "SELECT l1_batch_number, factory_deps_filepath, storage_logs_filepaths FROM snapshots WHERE l1_batch_number = $1"
+  },
   "16bca6f4258ff3db90a26a8550c5fc35e666fb698960486528fceba3e452fd62": {
     "describe": {
       "columns": [
@@ -8172,6 +8218,26 @@
     },
     "query": "INSERT INTO basic_witness_input_producer_jobs (l1_batch_number, status, created_at, updated_at) VALUES ($1, $2, now(), now()) ON CONFLICT (l1_batch_number) DO NOTHING"
   },
+  "a190719309378ee1912ffedd8180c151aacf17c3ca3bfca8563fa404d587edc8": {
+    "describe": {
+      "columns": [
+        {
+          "name": "index",
+          "ordinal": 0,
+          "type_info": "Int8"
+        }
+      ],
+      "nullable": [
+        false
+      ],
+      "parameters": {
+        "Left": [
+          "Int8"
+        ]
+      }
+    },
+    "query": "\n            SELECT index\n            FROM initial_writes\n            WHERE l1_batch_number <= $1\n            ORDER BY l1_batch_number DESC , index DESC \n            LIMIT 1;\n            "
+  },
   "a19b7137403c5cdf1be5f5122ce4d297ed661fa8bdb3bc91f8a81fe9da47469e": {
     "describe": {
       "columns": [
@@ -9182,6 +9248,36 @@
     },
     "query": "INSERT INTO prover_protocol_versions\n                    (id, timestamp, recursion_scheduler_level_vk_hash, recursion_node_level_vk_hash,\n                        recursion_leaf_level_vk_hash, recursion_circuits_set_vks_hash, verifier_address, created_at)\n                VALUES ($1, $2, $3, $4, $5, $6, $7, now())\n                "
   },
+  "b707b6247c76a50bda3be8076aafb77de60cfc5a0cc61c7dd60e4330eabc28d7": {
+    "describe": {
+      "columns": [
+        {
+          "name": "l1_batch_number",
+          "ordinal": 0,
+          "type_info": "Int8"
+        },
+        {
+          "name": "factory_deps_filepath",
+          "ordinal": 1,
+          "type_info": "Text"
+        },
+        {
+          "name": "storage_logs_filepaths",
+          "ordinal": 2,
+          "type_info": "TextArray"
+        }
+      ],
+      "nullable": [
+        false,
+        false,
+        false
+      ],
+      "parameters": {
+        "Left": []
+      }
+    },
+    "query": "SELECT l1_batch_number, factory_deps_filepath, storage_logs_filepaths FROM snapshots"
+  },
   "b944df7af612ec911170a43be846eb2f6e27163b0d3983672de2b8d5d60af640": {
     "describe": {
       "columns": [

From 81300ddf47ed994fceed9f62a0825735d939053d Mon Sep 17 00:00:00 2001
From: tomg10 <lemures64@gmail.com>
Date: Mon, 11 Dec 2023 12:35:28 +0100
Subject: [PATCH 43/43] fix: bash -> sh

---
 .githooks/pre-commit | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/.githooks/pre-commit b/.githooks/pre-commit
index 81081e7dddfe..1f0c6b945b65 100755
--- a/.githooks/pre-commit
+++ b/.githooks/pre-commit
@@ -1,4 +1,4 @@
-#!/bin/bash
+#!/bin/sh
 #
 # Pre-commit hook verifying that inappropriate code will not be committed.