diff --git a/bin/node/cli/benches/block_production.rs b/bin/node/cli/benches/block_production.rs index 4fcebb123d9e3..bc0e02d9c6412 100644 --- a/bin/node/cli/benches/block_production.rs +++ b/bin/node/cli/benches/block_production.rs @@ -75,6 +75,7 @@ fn new_node(tokio_handle: Handle) -> node_cli::service::NewFullBase { trie_cache_maximum_size: Some(64 * 1024 * 1024), state_pruning: Some(PruningMode::ArchiveAll), blocks_pruning: BlocksPruning::KeepAll, + delayed_canonicalization: Some(32), chain_spec: spec, wasm_method: WasmExecutionMethod::Compiled { instantiation_strategy: WasmtimeInstantiationStrategy::PoolingCopyOnWrite, diff --git a/bin/node/cli/benches/transaction_pool.rs b/bin/node/cli/benches/transaction_pool.rs index a8839642ddc26..c2f9699b1bb54 100644 --- a/bin/node/cli/benches/transaction_pool.rs +++ b/bin/node/cli/benches/transaction_pool.rs @@ -69,6 +69,7 @@ fn new_node(tokio_handle: Handle) -> node_cli::service::NewFullBase { trie_cache_maximum_size: Some(64 * 1024 * 1024), state_pruning: Some(PruningMode::ArchiveAll), blocks_pruning: BlocksPruning::KeepAll, + delayed_canonicalization: Some(32), chain_spec: spec, wasm_method: WasmExecutionMethod::Interpreted, // NOTE: we enforce the use of the native runtime to make the errors more debuggable diff --git a/bin/node/testing/src/bench.rs b/bin/node/testing/src/bench.rs index 59f1fa94c9b20..eda49d5991575 100644 --- a/bin/node/testing/src/bench.rs +++ b/bin/node/testing/src/bench.rs @@ -393,6 +393,7 @@ impl BenchDb { state_pruning: Some(PruningMode::ArchiveAll), source: database_type.into_settings(dir.into()), blocks_pruning: sc_client_db::BlocksPruning::KeepAll, + delayed_canonicalization: None, }; let task_executor = TaskExecutor::new(); diff --git a/client/cli/src/commands/chain_info_cmd.rs b/client/cli/src/commands/chain_info_cmd.rs index cbc22cc4d52d9..52514a0f5a626 100644 --- a/client/cli/src/commands/chain_info_cmd.rs +++ b/client/cli/src/commands/chain_info_cmd.rs @@ -77,6 +77,7 @@ impl ChainInfoCmd { state_pruning: config.state_pruning.clone(), source: config.database.clone(), blocks_pruning: config.blocks_pruning, + delayed_canonicalization: config.delayed_canonicalization, }; let backend = sc_service::new_db_backend::(db_config)?; let info: ChainInfo = backend.blockchain().info().into(); diff --git a/client/cli/src/config.rs b/client/cli/src/config.rs index 77689708a231f..c1ed63bef64ba 100644 --- a/client/cli/src/config.rs +++ b/client/cli/src/config.rs @@ -42,6 +42,10 @@ pub(crate) const NODE_NAME_MAX_LENGTH: usize = 64; /// Default sub directory to store network config. pub(crate) const DEFAULT_NETWORK_CONFIG_PATH: &str = "network"; +/// The blocks that were supposed to get pruned at the finalization N +/// will get pruned at N + 32. +pub(crate) const DELAYED_CANONICALIZATION: u32 = 32; + /// The recommended open file descriptor limit to be configured for the process. const RECOMMENDED_OPEN_FILE_DESCRIPTOR_LIMIT: u64 = 10_000; @@ -248,6 +252,17 @@ pub trait CliConfiguration: Sized { .unwrap_or_else(|| Ok(Default::default())) } + /// Get the delayed canonicalization mode. + /// + /// By default this is retrieved from `delayed_canonicalization` if it is available. + /// Otherwise the mode is active. + fn delayed_canonicalization(&self) -> Result> { + Ok(self + .pruning_params() + .map(|x| x.delayed_canonicalization()) + .unwrap_or(Some(DELAYED_CANONICALIZATION))) + } + /// Get the block pruning mode. /// /// By default this is retrieved from `block_pruning` if it is available. Otherwise its @@ -530,6 +545,7 @@ pub trait CliConfiguration: Sized { trie_cache_maximum_size: self.trie_cache_maximum_size()?, state_pruning: self.state_pruning()?, blocks_pruning: self.blocks_pruning()?, + delayed_canonicalization: self.delayed_canonicalization()?, wasm_method: self.wasm_method()?, wasm_runtime_overrides: self.wasm_runtime_overrides(), execution_strategies: self.execution_strategies(is_dev, is_validator)?, diff --git a/client/cli/src/params/pruning_params.rs b/client/cli/src/params/pruning_params.rs index 2da1de919771c..968c2287fa383 100644 --- a/client/cli/src/params/pruning_params.rs +++ b/client/cli/src/params/pruning_params.rs @@ -16,7 +16,7 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use crate::error; +use crate::{config::DELAYED_CANONICALIZATION, error}; use clap::Args; use sc_service::{BlocksPruning, PruningMode}; @@ -40,6 +40,14 @@ pub struct PruningParams { /// NOTE: only finalized blocks are subject for removal! #[arg(alias = "keep-blocks", long, value_name = "COUNT")] pub blocks_pruning: Option, + /// Specify the delayed canonicalization of blocks. + /// + /// The blocks that were supposed to get pruned at the finalization N + /// will get pruned after a number of finalizations. + /// + /// This option is enabled by default. + #[clap(alias = "delayed-pruning", long)] + pub delayed_canonicalization: Option, } impl PruningParams { @@ -76,4 +84,13 @@ impl PruningParams { None => Ok(BlocksPruning::KeepFinalized), } } + + /// Get the block delayed canonicalization value from the parameters. + pub fn delayed_canonicalization(&self) -> Option { + if self.delayed_canonicalization.unwrap_or(true) { + Some(DELAYED_CANONICALIZATION) + } else { + None + } + } } diff --git a/client/db/benches/state_access.rs b/client/db/benches/state_access.rs index bab79fe7c90db..009f433a84fdc 100644 --- a/client/db/benches/state_access.rs +++ b/client/db/benches/state_access.rs @@ -122,6 +122,7 @@ fn create_backend(config: BenchmarkConfig, temp_dir: &TempDir) -> Backend state_pruning: Some(PruningMode::ArchiveAll), source: DatabaseSource::ParityDb { path }, blocks_pruning: BlocksPruning::KeepAll, + delayed_canonicalization: None, }; Backend::new(settings, 100).expect("Creates backend") diff --git a/client/db/src/lib.rs b/client/db/src/lib.rs index fc031e2aaba59..c34cc48f32152 100644 --- a/client/db/src/lib.rs +++ b/client/db/src/lib.rs @@ -317,6 +317,8 @@ pub struct DatabaseSettings { /// /// NOTE: only finalized blocks are subject for removal! pub blocks_pruning: BlocksPruning, + /// The pruning of blocks is delayed for a number of finalizations. + pub delayed_canonicalization: Option, } /// Block pruning settings. @@ -481,10 +483,14 @@ pub struct BlockchainDb { leaves: RwLock>>, header_metadata_cache: Arc>, header_cache: Mutex>>, + delay_canonicalization: Option, } impl BlockchainDb { - fn new(db: Arc>) -> ClientResult { + fn new( + db: Arc>, + delay_canonicalization: Option, + ) -> ClientResult { let meta = read_meta::(&*db, columns::HEADER)?; let leaves = LeafSet::read_from_db(&*db, columns::META, meta_keys::LEAF_PREFIX)?; Ok(BlockchainDb { @@ -493,6 +499,7 @@ impl BlockchainDb { meta: Arc::new(RwLock::new(meta)), header_metadata_cache: Arc::new(HeaderMetadataCache::default()), header_cache: Default::default(), + delay_canonicalization, }) } @@ -671,8 +678,17 @@ impl sc_client_api::blockchain::Backend for BlockchainDb, + mut block_number: NumberFor, ) -> ClientResult> { + if let Some(delayed) = self.delay_canonicalization { + // No displaced leaves + if block_number < delayed.into() { + return Ok(Default::default()) + } + + block_number = block_number.saturating_sub(delayed.into()); + } + Ok(self .leaves .read() @@ -1045,6 +1061,7 @@ pub struct Backend { offchain_storage: offchain::LocalStorage, blockchain: BlockchainDb, canonicalization_delay: u64, + delayed_canonicalization: Option, import_lock: Arc>, is_archive: bool, blocks_pruning: BlocksPruning, @@ -1080,7 +1097,11 @@ impl Backend { /// Create new memory-backed client backend for tests. #[cfg(any(test, feature = "test-helpers"))] pub fn new_test(blocks_pruning: u32, canonicalization_delay: u64) -> Self { - Self::new_test_with_tx_storage(BlocksPruning::Some(blocks_pruning), canonicalization_delay) + Self::new_test_with_tx_storage( + BlocksPruning::Some(blocks_pruning), + canonicalization_delay, + None, + ) } /// Create new memory-backed client backend for tests. @@ -1088,6 +1109,7 @@ impl Backend { pub fn new_test_with_tx_storage( blocks_pruning: BlocksPruning, canonicalization_delay: u64, + delayed_canonicalization: Option, ) -> Self { let db = kvdb_memorydb::create(crate::utils::NUM_COLUMNS); let db = sp_database::as_database(db); @@ -1101,6 +1123,7 @@ impl Backend { state_pruning: Some(state_pruning), source: DatabaseSource::Custom { db, require_create_flag: true }, blocks_pruning, + delayed_canonicalization, }; Self::new(db_setting, canonicalization_delay).expect("failed to create test-db") @@ -1123,6 +1146,62 @@ impl Backend { self.storage.clone() } + /// Ensure that the gap between the canonicalized and + /// finalized block is filled by canonicalizing all the blocks + /// up to the finalized block. + /// + /// The canonicalized block could be behind the finalized block + /// in the case of delayed pruning. This inconsistency could + /// cause the database to miss-behave if started without this + /// option. + fn remove_canonicalization_gap( + &self, + transaction: &mut Transaction, + canonicalized_num: u64, + finalized_num: u64, + ) -> ClientResult<()> { + trace!(target: "db", "Last canonicalized block #{} and last finalized #{}", canonicalized_num, finalized_num); + + // Canonicalized every block from the last canonicalized + // to the finalized block. + for num in canonicalized_num + 1..=finalized_num { + self.canonicalize_block(transaction, num)?; + } + + Ok(()) + } + + /// Canonicalize the given block number. + fn canonicalize_block( + &self, + transaction: &mut Transaction, + number: u64, + ) -> ClientResult<()> { + let hash = sc_client_api::blockchain::HeaderBackend::hash( + &self.blockchain, + number.saturated_into(), + )? + .ok_or_else(|| { + sp_blockchain::Error::Backend(format!( + "Can't canonicalize missing block number #{} on startup", + number, + )) + })?; + + if !sc_client_api::Backend::have_state_at(self, hash, number.saturated_into()) { + return Ok(()) + } + + trace!(target: "db", "Canonicalize block #{} ({:?}) on startup ", number, hash); + let commit = self.storage.state_db.canonicalize_block(&hash).map_err( + sp_blockchain::Error::from_state_db::< + sc_state_db::Error, + >, + )?; + apply_state_commit(transaction, commit); + Ok(()) + } + fn from_database( db: Arc>, canonicalization_delay: u64, @@ -1146,8 +1225,9 @@ impl Backend { apply_state_commit(&mut db_init_transaction, state_db_init_commit_set); let state_pruning_used = state_db.pruning_mode(); + let canonicalized_num = state_db.best_canonical(); let is_archive_pruning = state_pruning_used.is_archive(); - let blockchain = BlockchainDb::new(db.clone())?; + let blockchain = BlockchainDb::new(db.clone(), config.delayed_canonicalization)?; let storage_db = StorageDb { db: db.clone(), state_db, prefix_keys: !db.supports_ref_counting() }; @@ -1159,6 +1239,7 @@ impl Backend { offchain_storage, blockchain, canonicalization_delay, + delayed_canonicalization: config.delayed_canonicalization, import_lock: Default::default(), is_archive: is_archive_pruning, io_stats: FrozenForDuration::new(std::time::Duration::from_secs(1)), @@ -1172,6 +1253,8 @@ impl Backend { // Older DB versions have no last state key. Check if the state is available and set it. let info = backend.blockchain.info(); + let finalized_num: NumberFor = info.finalized_number; + let finalized_num = finalized_num.saturated_into::(); if info.finalized_state.is_none() && info.finalized_hash != Default::default() && sc_client_api::Backend::have_state_at( @@ -1188,6 +1271,12 @@ impl Backend { }); } + backend.remove_canonicalization_gap( + &mut db_init_transaction, + canonicalized_num.unwrap_or(finalized_num), + finalized_num, + )?; + db.commit(db_init_transaction)?; Ok(backend) @@ -1691,18 +1780,35 @@ impl Backend { &self, transaction: &mut Transaction, f_header: &Block::Header, - f_hash: Block::Hash, + mut f_hash: Block::Hash, displaced: &mut Option>>, with_state: bool, ) -> ClientResult<()> { - let f_num = *f_header.number(); - + let mut f_num = *f_header.number(); let lookup_key = utils::number_and_hash_to_lookup_key(f_num, f_hash)?; if with_state { transaction.set_from_vec(columns::META, meta_keys::FINALIZED_STATE, lookup_key.clone()); } transaction.set_from_vec(columns::META, meta_keys::FINALIZED_BLOCK, lookup_key); + // Update the "finalized" number and hash for pruning of N - delay. + // This implies handling both cases: + // - pruning in the state-db via `canonicalize_block` + // - pruning in db via displaced leaves and `prune_blocks` + if let Some(delayed) = self.delayed_canonicalization { + // No blocks to prune in this window. + if f_num < delayed.into() { + return Ok(()) + } + + let f_actual = f_num; + f_num = f_num.saturating_sub(delayed.into()); + debug!(target: "db", "Mark finalized #{} and canonicalize #{}", f_actual, f_num); + f_hash = self.blockchain.hash(f_num)?.ok_or_else(|| { + sp_blockchain::Error::UnknownBlock(format!("Unknown block number {}", f_num)) + })?; + } + if sc_client_api::Backend::have_state_at(self, f_hash, f_num) && self.storage .state_db @@ -2532,6 +2638,7 @@ pub(crate) mod tests { state_pruning: Some(PruningMode::blocks_pruning(1)), source: DatabaseSource::Custom { db: backing, require_create_flag: false }, blocks_pruning: BlocksPruning::KeepFinalized, + delayed_canonicalization: None, }, 0, ) @@ -3188,7 +3295,7 @@ pub(crate) mod tests { #[test] fn prune_blocks_on_finalize() { - let backend = Backend::::new_test_with_tx_storage(BlocksPruning::Some(2), 0); + let backend = Backend::::new_test_with_tx_storage(BlocksPruning::Some(2), 0, None); let mut blocks = Vec::new(); let mut prev_hash = Default::default(); for i in 0..5 { @@ -3224,7 +3331,7 @@ pub(crate) mod tests { #[test] fn prune_blocks_on_finalize_in_keep_all() { - let backend = Backend::::new_test_with_tx_storage(BlocksPruning::KeepAll, 0); + let backend = Backend::::new_test_with_tx_storage(BlocksPruning::KeepAll, 0, None); let mut blocks = Vec::new(); let mut prev_hash = Default::default(); for i in 0..5 { @@ -3259,7 +3366,7 @@ pub(crate) mod tests { #[test] fn prune_blocks_on_finalize_with_fork_in_keep_all() { - let backend = Backend::::new_test_with_tx_storage(BlocksPruning::KeepAll, 10); + let backend = Backend::::new_test_with_tx_storage(BlocksPruning::KeepAll, 10, None); let mut blocks = Vec::new(); let mut prev_hash = Default::default(); for i in 0..5 { @@ -3329,7 +3436,7 @@ pub(crate) mod tests { #[test] fn prune_blocks_on_finalize_with_fork() { - let backend = Backend::::new_test_with_tx_storage(BlocksPruning::Some(2), 10); + let backend = Backend::::new_test_with_tx_storage(BlocksPruning::Some(2), 10, None); let mut blocks = Vec::new(); let mut prev_hash = Default::default(); for i in 0..5 { @@ -3390,7 +3497,7 @@ pub(crate) mod tests { #[test] fn indexed_data_block_body() { - let backend = Backend::::new_test_with_tx_storage(BlocksPruning::Some(1), 10); + let backend = Backend::::new_test_with_tx_storage(BlocksPruning::Some(1), 10, None); let x0 = ExtrinsicWrapper::from(0u64).encode(); let x1 = ExtrinsicWrapper::from(1u64).encode(); @@ -3434,7 +3541,7 @@ pub(crate) mod tests { #[test] fn index_invalid_size() { - let backend = Backend::::new_test_with_tx_storage(BlocksPruning::Some(1), 10); + let backend = Backend::::new_test_with_tx_storage(BlocksPruning::Some(1), 10, None); let x0 = ExtrinsicWrapper::from(0u64).encode(); let x1 = ExtrinsicWrapper::from(1u64).encode(); @@ -3469,7 +3576,7 @@ pub(crate) mod tests { #[test] fn renew_transaction_storage() { - let backend = Backend::::new_test_with_tx_storage(BlocksPruning::Some(2), 10); + let backend = Backend::::new_test_with_tx_storage(BlocksPruning::Some(2), 10, None); let mut blocks = Vec::new(); let mut prev_hash = Default::default(); let x1 = ExtrinsicWrapper::from(0u64).encode(); @@ -3516,7 +3623,7 @@ pub(crate) mod tests { #[test] fn remove_leaf_block_works() { - let backend = Backend::::new_test_with_tx_storage(BlocksPruning::Some(2), 10); + let backend = Backend::::new_test_with_tx_storage(BlocksPruning::Some(2), 10, None); let mut blocks = Vec::new(); let mut prev_hash = Default::default(); for i in 0..2 { @@ -3793,4 +3900,159 @@ pub(crate) mod tests { assert_eq!(backend.blockchain().leaves().unwrap(), vec![block2]); assert_eq!(backend.blockchain().info().best_hash, block2); } + + #[test] + fn delayed_prune_blocks_on_finalize() { + let backend = + Backend::::new_test_with_tx_storage(BlocksPruning::Some(1), 0, Some(2)); + let ext = Default::default(); + let hash_0 = + insert_block(&backend, 0, Default::default(), None, ext, vec![0.into()], None).unwrap(); + let hash_1 = insert_block(&backend, 1, hash_0, None, ext, vec![1.into()], None).unwrap(); + + // Block tree: + // 0 -> 1 + let mut op = backend.begin_operation().unwrap(); + backend.begin_state_operation(&mut op, hash_1).unwrap(); + op.mark_finalized(hash_0, None).unwrap(); + op.mark_finalized(hash_1, None).unwrap(); + backend.commit_operation(op).unwrap(); + + let bc = backend.blockchain(); + // Delayed pruning must keep both blocks around. + assert_eq!(Some(vec![0.into()]), bc.body(hash_0).unwrap()); + assert_eq!(Some(vec![1.into()]), bc.body(hash_1).unwrap()); + + // Block tree: + // 0 -> 1 -> 2 -> 3 -> 4 + let hash_2 = insert_block(&backend, 2, hash_1, None, ext, vec![2.into()], None).unwrap(); + let hash_3 = insert_block(&backend, 3, hash_2, None, ext, vec![3.into()], None).unwrap(); + let hash_4 = insert_block(&backend, 4, hash_3, None, ext, vec![4.into()], None).unwrap(); + + let mut op = backend.begin_operation().unwrap(); + backend.begin_state_operation(&mut op, hash_4).unwrap(); + op.mark_finalized(hash_2, None).unwrap(); + op.mark_finalized(hash_3, None).unwrap(); + op.mark_finalized(hash_4, None).unwrap(); + backend.commit_operation(op).unwrap(); + + // We keep 3 blocks around: 1 from `BlocksPruning` mode and 2 from delayed pruning. + assert!(bc.body(hash_0).unwrap().is_none()); + assert!(bc.body(hash_1).unwrap().is_none()); + assert_eq!(Some(vec![2.into()]), bc.body(hash_2).unwrap()); + assert_eq!(Some(vec![3.into()]), bc.body(hash_3).unwrap()); + assert_eq!(Some(vec![4.into()]), bc.body(hash_4).unwrap()); + } + + #[test] + fn delayed_prune_blocks_on_finalize_with_fork() { + let backend = + Backend::::new_test_with_tx_storage(BlocksPruning::Some(1), 10, Some(2)); + let mut blocks = Vec::new(); + let mut prev_hash = Default::default(); + + // Block tree: + // 0 -> 1 -> 2 -> 3 -> 4 -> 5 -> 6 + for i in 0..7 { + let hash = insert_block( + &backend, + i, + prev_hash, + None, + Default::default(), + vec![i.into()], + None, + ) + .unwrap(); + blocks.push(hash); + prev_hash = hash; + } + + // Insert a fork at the third block. + // Block tree: + // 0 -> 1 -> 2 -> 3 -> 4 -> 5 -> 6 + // 2 -> 3 + let fork_hash_root = + insert_block(&backend, 3, blocks[2], None, H256::random(), vec![31.into()], None) + .unwrap(); + + let mut op = backend.begin_operation().unwrap(); + backend.begin_state_operation(&mut op, blocks[4]).unwrap(); + op.mark_head(blocks[4]).unwrap(); + backend.commit_operation(op).unwrap(); + + // Mark blocks 0, 1, 2, 3 as finalized. + let mut op = backend.begin_operation().unwrap(); + backend.begin_state_operation(&mut op, blocks[3]).unwrap(); + op.mark_finalized(blocks[0], None).unwrap(); + op.mark_finalized(blocks[1], None).unwrap(); + op.mark_finalized(blocks[2], None).unwrap(); + op.mark_finalized(blocks[3], None).unwrap(); + backend.commit_operation(op).unwrap(); + + let bc = backend.blockchain(); + // Block 0 is pruned. + assert!(bc.body(blocks[0]).unwrap().is_none()); + assert_eq!(Some(vec![1.into()]), bc.body(blocks[1]).unwrap()); + assert_eq!(Some(vec![2.into()]), bc.body(blocks[2]).unwrap()); + assert_eq!(Some(vec![3.into()]), bc.body(blocks[3]).unwrap()); + assert_eq!(Some(vec![4.into()]), bc.body(blocks[4]).unwrap()); + assert_eq!(Some(vec![5.into()]), bc.body(blocks[5]).unwrap()); + assert_eq!(Some(vec![6.into()]), bc.body(blocks[6]).unwrap()); + assert_eq!(Some(vec![31.into()]), bc.body(fork_hash_root).unwrap()); + + // Mark block 4 as finalized. + let mut op = backend.begin_operation().unwrap(); + backend.begin_state_operation(&mut op, blocks[4]).unwrap(); + op.mark_finalized(blocks[4], None).unwrap(); + backend.commit_operation(op).unwrap(); + + // Block 1 is pruned. + assert!(bc.body(blocks[1]).unwrap().is_none()); + assert_eq!(Some(vec![2.into()]), bc.body(blocks[2]).unwrap()); + assert_eq!(Some(vec![3.into()]), bc.body(blocks[3]).unwrap()); + assert_eq!(Some(vec![4.into()]), bc.body(blocks[4]).unwrap()); + assert_eq!(Some(vec![5.into()]), bc.body(blocks[5]).unwrap()); + assert_eq!(Some(vec![6.into()]), bc.body(blocks[6]).unwrap()); + assert_eq!(Some(vec![31.into()]), bc.body(fork_hash_root).unwrap()); + + // Mark block 5 as finalized. + let mut op = backend.begin_operation().unwrap(); + backend.begin_state_operation(&mut op, blocks[5]).unwrap(); + op.mark_finalized(blocks[5], None).unwrap(); + backend.commit_operation(op).unwrap(); + + // Block 2 is pruned along with its fork. + assert!(bc.body(blocks[2]).unwrap().is_none()); + assert_eq!(Some(vec![31.into()]), bc.body(fork_hash_root).unwrap()); + assert_eq!(Some(vec![3.into()]), bc.body(blocks[3]).unwrap()); + assert_eq!(Some(vec![4.into()]), bc.body(blocks[4]).unwrap()); + assert_eq!(Some(vec![5.into()]), bc.body(blocks[5]).unwrap()); + assert_eq!(Some(vec![6.into()]), bc.body(blocks[6]).unwrap()); + + // Ensure the forked leaf 3 is properly stated here. + let displaced = backend.blockchain().displaced_leaves_after_finalizing(6).unwrap(); + assert_eq!(1, displaced.len()); + assert_eq!(fork_hash_root, displaced[0]); + + // Mark block 6 as finalized. + // Because we delay prune by 2, when we finalize block 6 we are actually + // pruning at block 4. The displaced leaves for block 4 are computed + // at hight (block number - 1) = 3. This is the time when the fork + // is picked up for pruning. + let mut op = backend.begin_operation().unwrap(); + backend.begin_state_operation(&mut op, blocks[6]).unwrap(); + op.mark_finalized(blocks[6], None).unwrap(); + backend.commit_operation(op).unwrap(); + + assert!(bc.body(blocks[3]).unwrap().is_none()); + assert!(bc.body(fork_hash_root).unwrap().is_none()); + assert_eq!(Some(vec![4.into()]), bc.body(blocks[4]).unwrap()); + assert_eq!(Some(vec![5.into()]), bc.body(blocks[5]).unwrap()); + assert_eq!(Some(vec![6.into()]), bc.body(blocks[6]).unwrap()); + + // No leaves to report for theoretical node 7. + let displaced = backend.blockchain().displaced_leaves_after_finalizing(7).unwrap(); + assert!(displaced.is_empty()); + } } diff --git a/client/service/src/builder.rs b/client/service/src/builder.rs index 3cb064ec814c5..8835e511e94f9 100644 --- a/client/service/src/builder.rs +++ b/client/service/src/builder.rs @@ -214,6 +214,7 @@ where state_pruning: config.state_pruning.clone(), source: config.database.clone(), blocks_pruning: config.blocks_pruning, + delayed_canonicalization: config.delayed_canonicalization, }; let backend = new_db_backend(db_config)?; @@ -277,7 +278,6 @@ where Block: BlockT, { const CANONICALIZATION_DELAY: u64 = 4096; - Ok(Arc::new(Backend::new(settings, CANONICALIZATION_DELAY)?)) } diff --git a/client/service/src/config.rs b/client/service/src/config.rs index bca0697bcbd08..d2a49c87aa27a 100644 --- a/client/service/src/config.rs +++ b/client/service/src/config.rs @@ -77,6 +77,8 @@ pub struct Configuration { /// /// NOTE: only finalized blocks are subject for removal! pub blocks_pruning: BlocksPruning, + /// Enable the delayed pruning of blocks. + pub delayed_canonicalization: Option, /// Chain configuration. pub chain_spec: Box, /// Wasm execution method. diff --git a/client/service/test/src/client/mod.rs b/client/service/test/src/client/mod.rs index 788f119130ac0..916ac4bb6d356 100644 --- a/client/service/test/src/client/mod.rs +++ b/client/service/test/src/client/mod.rs @@ -1221,6 +1221,7 @@ fn doesnt_import_blocks_that_revert_finality() { state_pruning: Some(PruningMode::ArchiveAll), blocks_pruning: BlocksPruning::KeepAll, source: DatabaseSource::RocksDb { path: tmp.path().into(), cache_size: 1024 }, + delayed_canonicalization: None, }, u64::MAX, ) @@ -1447,6 +1448,7 @@ fn returns_status_for_pruned_blocks() { state_pruning: Some(PruningMode::blocks_pruning(1)), blocks_pruning: BlocksPruning::KeepFinalized, source: DatabaseSource::RocksDb { path: tmp.path().into(), cache_size: 1024 }, + delayed_canonicalization: None, }, u64::MAX, ) diff --git a/client/service/test/src/lib.rs b/client/service/test/src/lib.rs index 5d29d34a3cbf2..cedd4b480a2fd 100644 --- a/client/service/test/src/lib.rs +++ b/client/service/test/src/lib.rs @@ -235,6 +235,7 @@ fn node_config< trie_cache_maximum_size: Some(16 * 1024 * 1024), state_pruning: Default::default(), blocks_pruning: BlocksPruning::KeepFinalized, + delayed_canonicalization: Some(32), chain_spec: Box::new((*spec).clone()), wasm_method: sc_service::config::WasmExecutionMethod::Interpreted, wasm_runtime_overrides: Default::default(), diff --git a/test-utils/client/src/lib.rs b/test-utils/client/src/lib.rs index d3e71f0ad28d6..f836c704a06c2 100644 --- a/test-utils/client/src/lib.rs +++ b/test-utils/client/src/lib.rs @@ -102,8 +102,11 @@ impl /// Create new `TestClientBuilder` with default backend and storage chain mode pub fn with_tx_storage(blocks_pruning: u32) -> Self { - let backend = - Arc::new(Backend::new_test_with_tx_storage(BlocksPruning::Some(blocks_pruning), 0)); + let backend = Arc::new(Backend::new_test_with_tx_storage( + BlocksPruning::Some(blocks_pruning), + 0, + None, + )); Self::with_backend(backend) } }