From b4cc6176ac7c3c99154d4db069c08e63e9e97631 Mon Sep 17 00:00:00 2001 From: tomg10 Date: Tue, 5 Dec 2023 12:24:39 +0100 Subject: [PATCH] fix: PR feedback --- core/bin/snapshots_creator/README.md | 4 +- core/bin/snapshots_creator/src/chunking.rs | 2 +- core/bin/snapshots_creator/src/main.rs | 32 +++++----- core/lib/dal/sqlx-data.json | 70 ++++++++++------------ 4 files changed, 49 insertions(+), 59 deletions(-) diff --git a/core/bin/snapshots_creator/README.md b/core/bin/snapshots_creator/README.md index 10202a6df51b..03167b803592 100644 --- a/core/bin/snapshots_creator/README.md +++ b/core/bin/snapshots_creator/README.md @@ -16,8 +16,8 @@ Snapshot contents can be stored based on blob_store config either in local files ## Snapshots format Each snapshot consists of three types of objects (see -[snapshots.rs](https://github.com/matter-labs/zksync-era/core/lib/types/src/snapshots.rs)) : header, storage logs chunks -and factory deps: +[snapshots.rs](https://github.com/matter-labs/zksync-era/blob/main/core/lib/types/src/snapshots.rs)) : header, storage +logs chunks and factory deps: - Snapshot Header (currently returned by snapshots namespace of JSON-RPC API) - Snapshot Storage logs chunks (most likely to be stored in gzipped protobuf files, but this part is still WIP) : diff --git a/core/bin/snapshots_creator/src/chunking.rs b/core/bin/snapshots_creator/src/chunking.rs index ebb88b56649e..68db97fd73cb 100644 --- a/core/bin/snapshots_creator/src/chunking.rs +++ b/core/bin/snapshots_creator/src/chunking.rs @@ -17,5 +17,5 @@ pub fn get_chunk_hashed_keys_range(chunk_id: u64, chunks_count: u64) -> ([u8; 2] let start_bytes = (chunk_start as u16).to_be_bytes(); let end_bytes = (chunk_end as u16).to_be_bytes(); - return (start_bytes, end_bytes); + (start_bytes, end_bytes) } diff --git a/core/bin/snapshots_creator/src/main.rs b/core/bin/snapshots_creator/src/main.rs index c11220df56f5..5653ee54dced 100644 --- a/core/bin/snapshots_creator/src/main.rs +++ b/core/bin/snapshots_creator/src/main.rs @@ -10,6 +10,7 @@ use std::pin::Pin; use std::time::Duration; use tokio::sync::watch; use tokio::sync::watch::Receiver; +use vise::Unit; use vise::{Buckets, Gauge, Histogram, Metrics}; use zksync_config::configs::PrometheusConfig; use zksync_config::{PostgresConfig, SnapshotsCreatorConfig}; @@ -25,7 +26,6 @@ use zksync_types::snapshots::{ use zksync_types::zkevm_test_harness::zk_evm::zkevm_opcode_defs::decoding::AllowedPcOrImm; use zksync_types::{L1BatchNumber, MiniblockNumber}; use zksync_utils::ceil_div; -use zksync_utils::time::seconds_since_epoch; #[derive(Debug, Metrics)] #[metrics(prefix = "snapshots_creator")] @@ -34,15 +34,16 @@ struct SnapshotsCreatorMetrics { storage_logs_chunks_left_to_process: Gauge, - snapshot_generation_duration: Gauge, + #[metrics(buckets = Buckets::LATENCIES, unit = Unit::Seconds)] + snapshot_generation_duration: Histogram, snapshot_l1_batch: Gauge, - #[metrics(buckets = Buckets::LATENCIES)] - storage_logs_processing_durations: Histogram, + #[metrics(buckets = Buckets::LATENCIES, unit = Unit::Seconds)] + storage_logs_processing_duration: Histogram, - #[metrics(buckets = Buckets::LATENCIES)] - factory_deps_processing_durations: Histogram, + #[metrics(buckets = Buckets::LATENCIES, unit = Unit::Seconds)] + factory_deps_processing_duration: Histogram, } #[vise::register] pub(crate) static METRICS: vise::Global = vise::Global::new(); @@ -72,7 +73,7 @@ async fn process_storage_logs_single_chunk( chunks_count: u64, ) -> anyhow::Result { let (min_hashed_key, max_hashed_key) = get_chunk_hashed_keys_range(chunk_id, chunks_count); - let latency = METRICS.storage_logs_processing_durations.start(); + let latency = METRICS.storage_logs_processing_duration.start(); let mut conn = pool.access_storage_tagged("snapshots_creator").await?; let logs = conn .snapshots_creator_dal() @@ -107,7 +108,7 @@ async fn process_factory_deps( miniblock_number: MiniblockNumber, l1_batch_number: L1BatchNumber, ) -> anyhow::Result { - let latency = METRICS.factory_deps_processing_durations.start(); + let latency = METRICS.factory_deps_processing_duration.start(); let mut conn = pool.access_storage_tagged("snapshots_creator").await?; let factory_deps = conn .snapshots_creator_dal() @@ -134,12 +135,13 @@ async fn run( replica_pool: ConnectionPool, master_pool: ConnectionPool, ) -> anyhow::Result<()> { + let latency = METRICS.snapshot_generation_duration.start(); + let config = SnapshotsCreatorConfig::from_env().context("SnapshotsCreatorConfig::from_env")?; let mut conn = replica_pool .access_storage_tagged("snapshots_creator") .await?; - let start_time = seconds_since_epoch(); // we subtract 1 so that after restore, EN node has at least one l1 batch to fetch let l1_batch_number = conn.blocks_dal().get_sealed_l1_batch_number().await? - 1; @@ -200,7 +202,7 @@ async fn run( let mut tasks = FuturesUnordered::>>>>::new(); let mut last_chunk_id = 0; - while last_chunk_id < chunks_count || tasks.len() != 0 { + while last_chunk_id < chunks_count || !tasks.is_empty() { while (tasks.len() as u32) < config.concurrent_queries_count && last_chunk_id < chunks_count { tasks.push(Box::pin(process_storage_logs_single_chunk( @@ -242,15 +244,9 @@ async fn run( .await?; METRICS.snapshot_l1_batch.set(l1_batch_number.0.as_u64()); - METRICS - .snapshot_generation_duration - .set(seconds_since_epoch() - start_time); - tracing::info!("Run metrics:"); - tracing::info!( - "snapshot_generation_duration: {}s", - METRICS.snapshot_generation_duration.get() - ); + let elapsed_sec = latency.observe().as_secs(); + tracing::info!("snapshot_generation_duration: {elapsed_sec}s"); tracing::info!("snapshot_l1_batch: {}", METRICS.snapshot_l1_batch.get()); tracing::info!( "storage_logs_chunks_count: {}", diff --git a/core/lib/dal/sqlx-data.json b/core/lib/dal/sqlx-data.json index c2e6468cdc34..33ad1902342b 100644 --- a/core/lib/dal/sqlx-data.json +++ b/core/lib/dal/sqlx-data.json @@ -276,19 +276,19 @@ } ], "nullable": [ - false, - false, - false, - false, - false, - false, - false, - false, true, - false, - false, - false, - false + true, + true, + true, + true, + true, + true, + true, + true, + true, + true, + true, + true ], "parameters": { "Left": [ @@ -5196,6 +5196,26 @@ }, "query": "INSERT INTO events_queue (l1_batch_number, serialized_events_queue) VALUES ($1, $2)" }, + "61cc5a1564918a34b4235290c421f04c40ef935f72f2c72744a5b741439a966a": { + "describe": { + "columns": [ + { + "name": "bytecode", + "ordinal": 0, + "type_info": "Bytea" + } + ], + "nullable": [ + false + ], + "parameters": { + "Left": [ + "Int8" + ] + } + }, + "query": "SELECT bytecode FROM factory_deps WHERE miniblock_number <= $1" + }, "6317155050a5dae24ea202cfd54d1e58cc7aeb0bfd4d95aa351f85cff04d3bff": { "describe": { "columns": [ @@ -8759,32 +8779,6 @@ }, "query": "INSERT INTO prover_fri_protocol_versions (id, recursion_scheduler_level_vk_hash, recursion_node_level_vk_hash, recursion_leaf_level_vk_hash, recursion_circuits_set_vks_hash, created_at) VALUES ($1, $2, $3, $4, $5, now()) ON CONFLICT(id) DO NOTHING" }, - "a2ac8d74aec70c50cd1919c9d532965bfc38d89bf6379bf649318e55071f1b41": { - "describe": { - "columns": [ - { - "name": "bytecode", - "ordinal": 0, - "type_info": "Bytea" - }, - { - "name": "bytecode_hash", - "ordinal": 1, - "type_info": "Bytea" - } - ], - "nullable": [ - false, - false - ], - "parameters": { - "Left": [ - "Int8" - ] - } - }, - "query": "SELECT bytecode, bytecode_hash FROM factory_deps WHERE miniblock_number <= $1" - }, "a39f760d2cd879a78112e57d8611d7099802b03b7cc4933cafb4c47e133ad543": { "describe": { "columns": [