From b22fd824aa59e82a285cadce9202b0aef005563f Mon Sep 17 00:00:00 2001 From: Eitan Seri-Levi Date: Mon, 9 Sep 2024 16:01:19 -0700 Subject: [PATCH 1/9] add do_atomically_for_col --- beacon_node/store/src/database/interface.rs | 11 +++++ .../store/src/database/leveldb_impl.rs | 40 +++++++++++++++++ beacon_node/store/src/database/redb_impl.rs | 43 +++++++++++++++++++ beacon_node/store/src/garbage_collection.rs | 20 ++++++--- beacon_node/store/src/hot_cold_store.rs | 13 ++++++ beacon_node/store/src/lib.rs | 3 ++ beacon_node/store/src/memory_store.rs | 30 +++++++++++++ lighthouse/Cargo.toml | 2 +- 8 files changed, 155 insertions(+), 7 deletions(-) diff --git a/beacon_node/store/src/database/interface.rs b/beacon_node/store/src/database/interface.rs index ce02fbde6bb..62689fbdf78 100644 --- a/beacon_node/store/src/database/interface.rs +++ b/beacon_node/store/src/database/interface.rs @@ -101,6 +101,17 @@ impl KeyValueStore for BeaconNodeBackend { } } + fn do_atomically_for_col(&self, col: &str, batch: Vec) -> Result<(), Error> { + match self { + #[cfg(feature = "leveldb")] + BeaconNodeBackend::LevelDb(txn) => { + leveldb_impl::LevelDB::do_atomically_for_col(txn, col, batch) + } + #[cfg(feature = "redb")] + BeaconNodeBackend::Redb(txn) => redb_impl::Redb::do_atomically_for_col(txn, col, batch), + } + } + fn do_atomically(&self, batch: Vec) -> Result<(), Error> { match self { #[cfg(feature = "leveldb")] diff --git a/beacon_node/store/src/database/leveldb_impl.rs b/beacon_node/store/src/database/leveldb_impl.rs index 1d706592e69..621b60c8347 100644 --- a/beacon_node/store/src/database/leveldb_impl.rs +++ b/beacon_node/store/src/database/leveldb_impl.rs @@ -145,6 +145,46 @@ impl LevelDB { .map_err(Into::into) } + pub fn do_atomically_for_col( + &self, + col: &str, + ops_batch: Vec, + ) -> Result<(), Error> { + let mut leveldb_batch = Writebatch::new(); + for op in ops_batch { + match op { + KeyValueStoreOp::PutKeyValue(column, key, value) => { + if col != column { + // TODO return error + todo!() + } + let _timer = metrics::start_timer(&metrics::DISK_DB_WRITE_TIMES); + metrics::inc_counter_vec_by( + &metrics::DISK_DB_WRITE_BYTES, + &[&column], + value.len() as u64, + ); + metrics::inc_counter_vec(&metrics::DISK_DB_WRITE_COUNT, &[&column]); + let column_key = get_key_for_col(&column, &key); + leveldb_batch.put(BytesKey::from_vec(column_key), &value); + } + + KeyValueStoreOp::DeleteKey(column, key) => { + if col != column { + // TODO return error + todo!() + } + let _timer = metrics::start_timer(&metrics::DISK_DB_DELETE_TIMES); + metrics::inc_counter_vec(&metrics::DISK_DB_DELETE_COUNT, &[&column]); + let column_key = get_key_for_col(&column, &key); + leveldb_batch.delete(BytesKey::from_vec(column_key)); + } + } + } + self.db.write(self.write_options().into(), &leveldb_batch)?; + Ok(()) + } + pub fn do_atomically(&self, ops_batch: Vec) -> Result<(), Error> { let mut leveldb_batch = Writebatch::new(); for op in ops_batch { diff --git a/beacon_node/store/src/database/redb_impl.rs b/beacon_node/store/src/database/redb_impl.rs index 3643e704127..31302e0bdac 100644 --- a/beacon_node/store/src/database/redb_impl.rs +++ b/beacon_node/store/src/database/redb_impl.rs @@ -163,6 +163,49 @@ impl Redb { tx.commit().map_err(Into::into) } + pub fn do_atomically_for_col( + &self, + col: &str, + ops_batch: Vec, + ) -> Result<(), Error> { + let open_db = self.db.read(); + let mut tx = open_db.begin_write()?; + tx.set_durability(self.write_options().into()); + let table_definition: TableDefinition<'_, &[u8], &[u8]> = TableDefinition::new(col); + let mut table = tx.open_table(table_definition)?; + + for op in ops_batch { + match op { + KeyValueStoreOp::PutKeyValue(column, key, value) => { + if col != column { + // TODO return error + todo!() + } + let _timer = metrics::start_timer(&metrics::DISK_DB_WRITE_TIMES); + metrics::inc_counter_vec_by( + &metrics::DISK_DB_WRITE_BYTES, + &[&column], + value.len() as u64, + ); + metrics::inc_counter_vec(&metrics::DISK_DB_WRITE_COUNT, &[&column]); + table.insert(key.as_slice(), value.as_slice())?; + } + KeyValueStoreOp::DeleteKey(column, key) => { + if col != column { + // TODO return error + todo!() + } + metrics::inc_counter_vec(&metrics::DISK_DB_DELETE_COUNT, &[&column]); + let _timer = metrics::start_timer(&metrics::DISK_DB_DELETE_TIMES); + table.remove(key.as_slice())?; + } + } + } + drop(table); + tx.commit()?; + Ok(()) + } + pub fn do_atomically(&self, ops_batch: Vec) -> Result<(), Error> { let open_db = self.db.read(); let mut tx = open_db.begin_write()?; diff --git a/beacon_node/store/src/garbage_collection.rs b/beacon_node/store/src/garbage_collection.rs index 75c24cd7682..26f16993533 100644 --- a/beacon_node/store/src/garbage_collection.rs +++ b/beacon_node/store/src/garbage_collection.rs @@ -1,7 +1,7 @@ //! Garbage collection process that runs at start-up to clean up the database. use crate::database::interface::BeaconNodeBackend; use crate::hot_cold_store::HotColdDB; -use crate::{Error, StoreOp}; +use crate::{DBColumn, Error, StoreOp}; use slog::debug; use types::EthSpec; @@ -18,22 +18,30 @@ where /// Delete the temporary states that were leftover by failed block imports. pub fn delete_temp_states(&self) -> Result<(), Error> { - let delete_ops = + let delete_state_ops = self.iter_temporary_state_roots()? .try_fold(vec![], |mut ops, state_root| { let state_root = state_root?; ops.push(StoreOp::DeleteState(state_root, None)); + Result::<_, Error>::Ok(ops) + })?; + let delete_temp_state_ops = + self.iter_temporary_state_roots()? + .try_fold(vec![], |mut ops, state_root| { + let state_root = state_root?; ops.push(StoreOp::DeleteStateTemporaryFlag(state_root)); Result::<_, Error>::Ok(ops) })?; - - if !delete_ops.is_empty() { + if !delete_state_ops.is_empty() || !delete_temp_state_ops.is_empty() { debug!( self.log, "Garbage collecting {} temporary states", - delete_ops.len() / 2 + (delete_state_ops.len() / 2) + (delete_temp_state_ops.len() / 2) ); - self.do_atomically_with_block_and_blobs_cache(delete_ops)?; + let state_col: &str = DBColumn::BeaconStateSummary.into(); + let temp_state_col: &str = DBColumn::BeaconStateTemporary.into(); + self.do_atomically_for_garbage_collection(state_col, delete_state_ops)?; + self.do_atomically_for_garbage_collection(temp_state_col, delete_temp_state_ops)?; } Ok(()) diff --git a/beacon_node/store/src/hot_cold_store.rs b/beacon_node/store/src/hot_cold_store.rs index 45813b4f691..355ae43b0be 100644 --- a/beacon_node/store/src/hot_cold_store.rs +++ b/beacon_node/store/src/hot_cold_store.rs @@ -1048,6 +1048,19 @@ impl, Cold: ItemStore> HotColdDB Ok(key_value_batch) } + pub fn do_atomically_for_garbage_collection( + &self, + col: &str, + batch: Vec>, + ) -> Result<(), Error> { + match self.convert_to_kv_batch(batch) { + Ok(kv_store_ops) => self.hot_db.do_atomically_for_col(col, kv_store_ops)?, + Err(e) => return Err(e), + }; + + Ok(()) + } + pub fn do_atomically_with_block_and_blobs_cache( &self, batch: Vec>, diff --git a/beacon_node/store/src/lib.rs b/beacon_node/store/src/lib.rs index 6c56dc13117..e962bfba9f0 100644 --- a/beacon_node/store/src/lib.rs +++ b/beacon_node/store/src/lib.rs @@ -72,6 +72,9 @@ pub trait KeyValueStore: Sync + Send + Sized + 'static { /// Removes `key` from `column`. fn key_delete(&self, column: &str, key: &[u8]) -> Result<(), Error>; + /// Execute either all of the operations in `batch` for a given `col` or none at all, returning an error. + fn do_atomically_for_col(&self, col: &str, batch: Vec) -> Result<(), Error>; + /// Execute either all of the operations in `batch` or none at all, returning an error. fn do_atomically(&self, batch: Vec) -> Result<(), Error>; diff --git a/beacon_node/store/src/memory_store.rs b/beacon_node/store/src/memory_store.rs index 170856ca09f..bfc06c104bf 100644 --- a/beacon_node/store/src/memory_store.rs +++ b/beacon_node/store/src/memory_store.rs @@ -63,6 +63,36 @@ impl KeyValueStore for MemoryStore { Ok(()) } + // TODO(modularize-backend) do atomcally for col impl + fn do_atomically_for_col(&self, col: &str, batch: Vec) -> Result<(), Error> { + for op in batch { + match op { + KeyValueStoreOp::PutKeyValue(column, key, value) => { + if col != column { + // TODO(modularize-backend) + // raise error + todo!() + } + let column_key = get_key_for_col(&column, &key); + self.db + .write() + .insert(BytesKey::from_vec(column_key), value); + } + + KeyValueStoreOp::DeleteKey(column, key) => { + if col != column { + // TODO(modularize-backend) + // raise error + todo!() + } + let column_key = get_key_for_col(&column, &key); + self.db.write().remove(&BytesKey::from_vec(column_key)); + } + } + } + Ok(()) + } + fn do_atomically(&self, batch: Vec) -> Result<(), Error> { for op in batch { match op { diff --git a/lighthouse/Cargo.toml b/lighthouse/Cargo.toml index 967bd047935..3abaab9c955 100644 --- a/lighthouse/Cargo.toml +++ b/lighthouse/Cargo.toml @@ -7,7 +7,7 @@ autotests = false rust-version = "1.80.0" [features] -default = ["slasher-lmdb", "beacon-node-leveldb"] +default = ["slasher-lmdb", "beacon-node-redb"] # Writes debugging .ssz files to /tmp during block processing. write_ssz_files = ["beacon_node/write_ssz_files"] # Compiles the BLS crypto code so that the binary is portable across machines. From 6d219a8fc5d06bf3fd4c9bdc4a9f81bba6abb98c Mon Sep 17 00:00:00 2001 From: Eitan Seri-Levi Date: Mon, 16 Sep 2024 14:22:36 -0700 Subject: [PATCH 2/9] add error handling --- .../store/src/database/leveldb_impl.rs | 19 ++++++++++++++----- beacon_node/store/src/database/redb_impl.rs | 18 +++++++++++++----- 2 files changed, 27 insertions(+), 10 deletions(-) diff --git a/beacon_node/store/src/database/leveldb_impl.rs b/beacon_node/store/src/database/leveldb_impl.rs index 621b60c8347..8ed682f2b81 100644 --- a/beacon_node/store/src/database/leveldb_impl.rs +++ b/beacon_node/store/src/database/leveldb_impl.rs @@ -1,7 +1,8 @@ use crate::hot_cold_store::{BytesKey, HotColdDBError}; use crate::Key; use crate::{ - get_key_for_col, metrics, ColumnIter, ColumnKeyIter, DBColumn, Error, KeyValueStoreOp, + errors::Error as DBError, get_key_for_col, metrics, ColumnIter, ColumnKeyIter, DBColumn, Error, + KeyValueStoreOp, }; use leveldb::{ compaction::Compaction, @@ -155,8 +156,12 @@ impl LevelDB { match op { KeyValueStoreOp::PutKeyValue(column, key, value) => { if col != column { - // TODO return error - todo!() + return Err(DBError::DBError { + message: format!( + "Attempted to mutate unexpected column: {}. Expected: {}, ", + column, col + ), + }); } let _timer = metrics::start_timer(&metrics::DISK_DB_WRITE_TIMES); metrics::inc_counter_vec_by( @@ -171,8 +176,12 @@ impl LevelDB { KeyValueStoreOp::DeleteKey(column, key) => { if col != column { - // TODO return error - todo!() + return Err(DBError::DBError { + message: format!( + "Attempted to mutate unexpected column: {}. Expected: {}, ", + column, col + ), + }); } let _timer = metrics::start_timer(&metrics::DISK_DB_DELETE_TIMES); metrics::inc_counter_vec(&metrics::DISK_DB_DELETE_COUNT, &[&column]); diff --git a/beacon_node/store/src/database/redb_impl.rs b/beacon_node/store/src/database/redb_impl.rs index 31302e0bdac..fea9703a1a4 100644 --- a/beacon_node/store/src/database/redb_impl.rs +++ b/beacon_node/store/src/database/redb_impl.rs @@ -1,5 +1,5 @@ +use crate::{errors::Error as DBError, DBColumn, Error, KeyValueStoreOp}; use crate::{metrics, ColumnIter, ColumnKeyIter, Key}; -use crate::{DBColumn, Error, KeyValueStoreOp}; use parking_lot::{Mutex, MutexGuard, RwLock}; use redb::TableDefinition; use std::{borrow::BorrowMut, marker::PhantomData, path::Path}; @@ -178,8 +178,12 @@ impl Redb { match op { KeyValueStoreOp::PutKeyValue(column, key, value) => { if col != column { - // TODO return error - todo!() + return Err(DBError::DBError { + message: format!( + "Attempted to mutate unexpected column: {}. Expected: {}, ", + column, col + ), + }); } let _timer = metrics::start_timer(&metrics::DISK_DB_WRITE_TIMES); metrics::inc_counter_vec_by( @@ -192,8 +196,12 @@ impl Redb { } KeyValueStoreOp::DeleteKey(column, key) => { if col != column { - // TODO return error - todo!() + return Err(DBError::DBError { + message: format!( + "Attempted to mutate unexpected column: {}. Expected: {}, ", + column, col + ), + }); } metrics::inc_counter_vec(&metrics::DISK_DB_DELETE_COUNT, &[&column]); let _timer = metrics::start_timer(&metrics::DISK_DB_DELETE_TIMES); From f75b6dfde26c3d1c030d83134eaefa438009dc87 Mon Sep 17 00:00:00 2001 From: Eitan Seri-Levi Date: Mon, 16 Sep 2024 14:42:55 -0700 Subject: [PATCH 3/9] revert back to leveldb as default --- lighthouse/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lighthouse/Cargo.toml b/lighthouse/Cargo.toml index 3abaab9c955..967bd047935 100644 --- a/lighthouse/Cargo.toml +++ b/lighthouse/Cargo.toml @@ -7,7 +7,7 @@ autotests = false rust-version = "1.80.0" [features] -default = ["slasher-lmdb", "beacon-node-redb"] +default = ["slasher-lmdb", "beacon-node-leveldb"] # Writes debugging .ssz files to /tmp during block processing. write_ssz_files = ["beacon_node/write_ssz_files"] # Compiles the BLS crypto code so that the binary is portable across machines. From a37e14c2d82932bffffe5d5846b8e69d2e3d07c3 Mon Sep 17 00:00:00 2001 From: Eitan Seri-Levi Date: Mon, 16 Sep 2024 14:53:34 -0700 Subject: [PATCH 4/9] remove todos --- beacon_node/store/src/memory_store.rs | 20 +++++++++++++------- 1 file changed, 13 insertions(+), 7 deletions(-) diff --git a/beacon_node/store/src/memory_store.rs b/beacon_node/store/src/memory_store.rs index bfc06c104bf..735b5d150a3 100644 --- a/beacon_node/store/src/memory_store.rs +++ b/beacon_node/store/src/memory_store.rs @@ -1,6 +1,6 @@ use crate::{ get_key_for_col, hot_cold_store::BytesKey, ColumnIter, ColumnKeyIter, DBColumn, Error, - ItemStore, Key, KeyValueStore, KeyValueStoreOp, + ItemStore, Key, KeyValueStore, KeyValueStoreOp, errors::Error as DBError }; use parking_lot::{Mutex, MutexGuard, RwLock}; use std::collections::BTreeMap; @@ -69,9 +69,12 @@ impl KeyValueStore for MemoryStore { match op { KeyValueStoreOp::PutKeyValue(column, key, value) => { if col != column { - // TODO(modularize-backend) - // raise error - todo!() + return Err(DBError::DBError { + message: format!( + "Attempted to mutate unexpected column: {}. Expected: {}, ", + column, col + ), + }); } let column_key = get_key_for_col(&column, &key); self.db @@ -81,9 +84,12 @@ impl KeyValueStore for MemoryStore { KeyValueStoreOp::DeleteKey(column, key) => { if col != column { - // TODO(modularize-backend) - // raise error - todo!() + return Err(DBError::DBError { + message: format!( + "Attempted to mutate unexpected column: {}. Expected: {}, ", + column, col + ), + }); } let column_key = get_key_for_col(&column, &key); self.db.write().remove(&BytesKey::from_vec(column_key)); From 6dc91ba18cf4b8009dad485dd71cf460f175c731 Mon Sep 17 00:00:00 2001 From: Eitan Seri-Levi Date: Mon, 16 Sep 2024 14:58:53 -0700 Subject: [PATCH 5/9] fmt --- beacon_node/store/src/memory_store.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/beacon_node/store/src/memory_store.rs b/beacon_node/store/src/memory_store.rs index 735b5d150a3..bb39c6f9578 100644 --- a/beacon_node/store/src/memory_store.rs +++ b/beacon_node/store/src/memory_store.rs @@ -1,6 +1,6 @@ use crate::{ - get_key_for_col, hot_cold_store::BytesKey, ColumnIter, ColumnKeyIter, DBColumn, Error, - ItemStore, Key, KeyValueStore, KeyValueStoreOp, errors::Error as DBError + errors::Error as DBError, get_key_for_col, hot_cold_store::BytesKey, ColumnIter, ColumnKeyIter, + DBColumn, Error, ItemStore, Key, KeyValueStore, KeyValueStoreOp, }; use parking_lot::{Mutex, MutexGuard, RwLock}; use std::collections::BTreeMap; From 95d825f22d1797ad189e3fd678c6e6fdd007877e Mon Sep 17 00:00:00 2001 From: Eitan Seri-Levi Date: Mon, 16 Sep 2024 21:50:17 -0700 Subject: [PATCH 6/9] fix error --- beacon_node/beacon_chain/src/migrate.rs | 2 +- beacon_node/http_api/tests/tests.rs | 1 - beacon_node/store/src/garbage_collection.rs | 38 +++++++++++---------- beacon_node/store/src/hot_cold_store.rs | 32 ++++++++++++++--- beacon_node/store/src/lib.rs | 5 ++- 5 files changed, 52 insertions(+), 26 deletions(-) diff --git a/beacon_node/beacon_chain/src/migrate.rs b/beacon_node/beacon_chain/src/migrate.rs index f83df7b4468..d3d17c10b94 100644 --- a/beacon_node/beacon_chain/src/migrate.rs +++ b/beacon_node/beacon_chain/src/migrate.rs @@ -682,7 +682,7 @@ impl, Cold: ItemStore> BackgroundMigrator Result<(), Error> { - let delete_state_ops = - self.iter_temporary_state_roots()? - .try_fold(vec![], |mut ops, state_root| { - let state_root = state_root?; - ops.push(StoreOp::DeleteState(state_root, None)); - Result::<_, Error>::Ok(ops) - })?; - let delete_temp_state_ops = - self.iter_temporary_state_roots()? - .try_fold(vec![], |mut ops, state_root| { - let state_root = state_root?; - ops.push(StoreOp::DeleteStateTemporaryFlag(state_root)); - Result::<_, Error>::Ok(ops) - })?; - if !delete_state_ops.is_empty() || !delete_temp_state_ops.is_empty() { + let mut delete_state_ops = vec![]; + let mut delete_summary_ops = vec![]; + let mut delete_temporary_state_ops = vec![]; + let mut delete_states = false; + self.iter_temporary_state_roots()?.for_each( + |state_root| { + if let Ok(state_root) = state_root { + delete_state_ops.push(StoreOp::DeleteState(state_root)); + delete_summary_ops.push(StoreOp::DeleteSummary(state_root)); + delete_temporary_state_ops.push(StoreOp::DeleteStateTemporaryFlag(state_root)); + delete_states = true + } + } + ); + if delete_states { debug!( self.log, "Garbage collecting {} temporary states", - (delete_state_ops.len() / 2) + (delete_temp_state_ops.len() / 2) + delete_state_ops.len() ); - let state_col: &str = DBColumn::BeaconStateSummary.into(); + let state_col: &str = DBColumn::BeaconState.into(); + let summary_col: &str = DBColumn::BeaconStateSummary.into(); let temp_state_col: &str = DBColumn::BeaconStateTemporary.into(); self.do_atomically_for_garbage_collection(state_col, delete_state_ops)?; - self.do_atomically_for_garbage_collection(temp_state_col, delete_temp_state_ops)?; + self.do_atomically_for_garbage_collection(summary_col, delete_summary_ops)?; + self.do_atomically_for_garbage_collection(temp_state_col, delete_temporary_state_ops)?; } Ok(()) diff --git a/beacon_node/store/src/hot_cold_store.rs b/beacon_node/store/src/hot_cold_store.rs index dc51941de88..f398e7e7fec 100644 --- a/beacon_node/store/src/hot_cold_store.rs +++ b/beacon_node/store/src/hot_cold_store.rs @@ -1010,7 +1010,7 @@ impl, Cold: ItemStore> HotColdDB /// (which are frozen, and won't be deleted), or valid descendents of the finalized checkpoint /// (which will be deleted by this function but shouldn't be). pub fn delete_state(&self, state_root: &Hash256, slot: Slot) -> Result<(), Error> { - self.do_atomically_with_block_and_blobs_cache(vec![StoreOp::DeleteState( + self.do_atomically_with_block_and_blobs_cache(vec![StoreOp::DeleteStateAndSummary( *state_root, Some(slot), )]) @@ -1183,7 +1183,23 @@ impl, Cold: ItemStore> HotColdDB } } - StoreOp::DeleteState(state_root, slot) => { + StoreOp::DeleteSummary(state_root) => { + let column_name: &str = DBColumn::BeaconStateSummary.into(); + key_value_batch.push(KeyValueStoreOp::DeleteKey( + column_name.to_owned(), + state_root.as_slice().to_vec(), + )); + } + + StoreOp::DeleteState(state_root) => { + let column_name: &str = DBColumn::BeaconState.into(); + key_value_batch.push(KeyValueStoreOp::DeleteKey( + column_name.to_owned(), + state_root.as_slice().to_vec(), + )); + } + + StoreOp::DeleteStateAndSummary(state_root, slot) => { let column_name: &str = DBColumn::BeaconStateSummary.into(); key_value_batch.push(KeyValueStoreOp::DeleteKey( column_name.to_owned(), @@ -1363,10 +1379,16 @@ impl, Cold: ItemStore> HotColdDB self.state_cache.lock().delete_block_states(&block_root); } - StoreOp::DeleteState(state_root, _) => { + StoreOp::DeleteStateAndSummary(state_root, _) => { self.state_cache.lock().delete_state(&state_root) } + StoreOp::DeleteState(state_root) => { + self.state_cache.lock().delete_state(&state_root) + } + + StoreOp::DeleteSummary(_) => (), + StoreOp::DeleteBlobs(_) => (), StoreOp::DeleteDataColumns(_, _) => (), @@ -2968,7 +2990,7 @@ impl, Cold: ItemStore> HotColdDB "slot" => summary.slot, "reason" => reason, ); - state_delete_batch.push(StoreOp::DeleteState(state_root, Some(summary.slot))); + state_delete_batch.push(StoreOp::DeleteStateAndSummary(state_root, Some(summary.slot))); } } } @@ -3065,7 +3087,7 @@ pub fn migrate_database, Cold: ItemStore>( } // Delete the old summary, and the full state if we lie on an epoch boundary. - hot_db_ops.push(StoreOp::DeleteState(state_root, Some(slot))); + hot_db_ops.push(StoreOp::DeleteStateAndSummary(state_root, Some(slot))); // Store the block root for this slot in the linear array of frozen block roots. block_root_writer.set(slot.as_usize(), block_root, &mut cold_db_ops)?; diff --git a/beacon_node/store/src/lib.rs b/beacon_node/store/src/lib.rs index fcbc054e1c4..6de4444e736 100644 --- a/beacon_node/store/src/lib.rs +++ b/beacon_node/store/src/lib.rs @@ -250,9 +250,12 @@ pub enum StoreOp<'a, E: EthSpec> { DeleteBlock(Hash256), DeleteBlobs(Hash256), DeleteDataColumns(Hash256, Vec), - DeleteState(Hash256, Option), + // Delete Summary and delete state if state is on an epoch boundary + DeleteStateAndSummary(Hash256, Option), + DeleteState(Hash256), DeleteExecutionPayload(Hash256), DeleteSyncCommitteeBranch(Hash256), + DeleteSummary(Hash256), KeyValueOp(KeyValueStoreOp), } From a4c46b450fb4eb3ff3b8239d6bde87dd871d8a21 Mon Sep 17 00:00:00 2001 From: Eitan Seri-Levi Date: Tue, 17 Sep 2024 15:42:33 -0700 Subject: [PATCH 7/9] fmt --- beacon_node/beacon_chain/src/migrate.rs | 8 +++----- beacon_node/store/src/garbage_collection.rs | 16 +++++++--------- beacon_node/store/src/hot_cold_store.rs | 5 ++++- 3 files changed, 14 insertions(+), 15 deletions(-) diff --git a/beacon_node/beacon_chain/src/migrate.rs b/beacon_node/beacon_chain/src/migrate.rs index d3d17c10b94..cf98b1cb9d1 100644 --- a/beacon_node/beacon_chain/src/migrate.rs +++ b/beacon_node/beacon_chain/src/migrate.rs @@ -679,11 +679,9 @@ impl, Cold: ItemStore> BackgroundMigrator, Cold: ItemStore> HotColdDB "slot" => summary.slot, "reason" => reason, ); - state_delete_batch.push(StoreOp::DeleteStateAndSummary(state_root, Some(summary.slot))); + state_delete_batch.push(StoreOp::DeleteStateAndSummary( + state_root, + Some(summary.slot), + )); } } } From 967eaa9713603637044beb7958fed72e5c285b67 Mon Sep 17 00:00:00 2001 From: Eitan Seri-Levi Date: Wed, 18 Sep 2024 18:12:12 -0700 Subject: [PATCH 8/9] never flush the cache during garbage collection --- beacon_node/store/src/database/redb_impl.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/beacon_node/store/src/database/redb_impl.rs b/beacon_node/store/src/database/redb_impl.rs index fea9703a1a4..1dd88c9eb25 100644 --- a/beacon_node/store/src/database/redb_impl.rs +++ b/beacon_node/store/src/database/redb_impl.rs @@ -170,7 +170,7 @@ impl Redb { ) -> Result<(), Error> { let open_db = self.db.read(); let mut tx = open_db.begin_write()?; - tx.set_durability(self.write_options().into()); + tx.set_durability(redb::Durability::None); let table_definition: TableDefinition<'_, &[u8], &[u8]> = TableDefinition::new(col); let mut table = tx.open_table(table_definition)?; From 7757f7d7a8403efcb1f8d53f9385f6dc5efa4f3e Mon Sep 17 00:00:00 2001 From: Eitan Seri-Levi Date: Wed, 18 Sep 2024 18:39:30 -0700 Subject: [PATCH 9/9] further optimizations --- beacon_node/store/src/database/redb_impl.rs | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/beacon_node/store/src/database/redb_impl.rs b/beacon_node/store/src/database/redb_impl.rs index 1dd88c9eb25..0ae97bfeb3a 100644 --- a/beacon_node/store/src/database/redb_impl.rs +++ b/beacon_node/store/src/database/redb_impl.rs @@ -33,7 +33,9 @@ impl Redb { } else { path.to_path_buf() }; - let db = redb::Database::create(path)?; + let db = redb::Builder::new() + .set_cache_size(2 * 1024 * 1024 * 1024) + .create(path)?; let transaction_mutex = Mutex::new(()); for column in DBColumn::iter() {