diff --git a/node/core/av-store/src/lib.rs b/node/core/av-store/src/lib.rs index 21e4852e581d..1c40394fcb4c 100644 --- a/node/core/av-store/src/lib.rs +++ b/node/core/av-store/src/lib.rs @@ -253,7 +253,7 @@ fn delete_meta(tx: &mut DBTransaction, config: &Config, hash: &CandidateHash) { fn delete_unfinalized_height(tx: &mut DBTransaction, config: &Config, block_number: BlockNumber) { let prefix = (UNFINALIZED_PREFIX, BEBlockNumber(block_number)).encode(); - tx.delete_prefix(config.col_meta, &prefix); + tx.delete_prefix(config.col_meta_ordered, &prefix); } fn delete_unfinalized_inclusion( @@ -266,7 +266,7 @@ fn delete_unfinalized_inclusion( let key = (UNFINALIZED_PREFIX, BEBlockNumber(block_number), block_hash, candidate_hash).encode(); - tx.delete(config.col_meta, &key[..]); + tx.delete(config.col_meta_ordered, &key[..]); } fn delete_pruning_key( @@ -276,7 +276,7 @@ fn delete_pruning_key( h: &CandidateHash, ) { let key = (PRUNE_BY_TIME_PREFIX, t.into(), h).encode(); - tx.delete(config.col_meta, &key); + tx.delete(config.col_meta_ordered, &key); } fn write_pruning_key( @@ -287,7 +287,7 @@ fn write_pruning_key( ) { let t = t.into(); let key = (PRUNE_BY_TIME_PREFIX, t, h).encode(); - tx.put(config.col_meta, &key, TOMBSTONE_VALUE); + tx.put(config.col_meta_ordered, &key, TOMBSTONE_VALUE); } fn finalized_block_range(finalized: BlockNumber) -> (Vec, Vec) { @@ -306,7 +306,7 @@ fn write_unfinalized_block_contains( ch: &CandidateHash, ) { let key = (UNFINALIZED_PREFIX, BEBlockNumber(n), h, ch).encode(); - tx.put(config.col_meta, &key, TOMBSTONE_VALUE); + tx.put(config.col_meta_ordered, &key, TOMBSTONE_VALUE); } fn pruning_range(now: impl Into) -> (Vec, Vec) { @@ -424,6 +424,9 @@ pub struct Config { pub col_data: u32, /// The column family for availability store meta information. pub col_meta: u32, + /// Second column family for availability store meta information, + /// content must be ordered. + pub col_meta_ordered: u32, } trait Clock: Send + Sync { @@ -833,7 +836,7 @@ where let batch_num = { let mut iter = subsystem .db - .iter_with_prefix(subsystem.config.col_meta, &start_prefix) + .iter_with_prefix(subsystem.config.col_meta_ordered, &start_prefix) .take_while(|(k, _)| &k[..] < &end_prefix[..]) .peekable(); @@ -881,7 +884,7 @@ where let iter = subsystem .db - .iter_with_prefix(subsystem.config.col_meta, &start_prefix) + .iter_with_prefix(subsystem.config.col_meta_ordered, &start_prefix) .take_while(|(k, _)| &k[..] < &end_prefix[..]) .peekable(); @@ -1228,11 +1231,11 @@ fn prune_all(db: &Arc, config: &Config, clock: &dyn Clock) -> Resu let mut tx = DBTransaction::new(); let iter = db - .iter_with_prefix(config.col_meta, &range_start[..]) + .iter_with_prefix(config.col_meta_ordered, &range_start[..]) .take_while(|(k, _)| &k[..] < &range_end[..]); for (k, _v) in iter { - tx.delete(config.col_meta, &k[..]); + tx.delete(config.col_meta_ordered, &k[..]); let (_, candidate_hash) = match decode_pruning_key(&k[..]) { Ok(m) => m, diff --git a/node/core/av-store/src/tests.rs b/node/core/av-store/src/tests.rs index e7029b4874b6..305f8090d399 100644 --- a/node/core/av-store/src/tests.rs +++ b/node/core/av-store/src/tests.rs @@ -41,10 +41,15 @@ use sp_keyring::Sr25519Keyring; mod columns { pub const DATA: u32 = 0; pub const META: u32 = 1; - pub const NUM_COLUMNS: u32 = 2; + pub const META_ORDERED: u32 = 2; + pub const NUM_COLUMNS: u32 = 3; } -const TEST_CONFIG: Config = Config { col_data: columns::DATA, col_meta: columns::META }; +const TEST_CONFIG: Config = Config { + col_data: columns::DATA, + col_meta: columns::META, + col_meta_ordered: columns::META_ORDERED, +}; type VirtualOverseer = test_helpers::TestSubsystemContextHandle; @@ -198,8 +203,10 @@ fn candidate_included(receipt: CandidateReceipt) -> CandidateEvent { #[cfg(test)] fn test_store() -> Arc { let db = kvdb_memorydb::create(columns::NUM_COLUMNS); - let db = - polkadot_node_subsystem_util::database::kvdb_impl::DbAdapter::new(db, &[columns::META]); + let db = polkadot_node_subsystem_util::database::kvdb_impl::DbAdapter::new( + db, + &[columns::META_ORDERED], + ); Arc::new(db) } diff --git a/node/core/chain-selection/src/db_backend/v1.rs b/node/core/chain-selection/src/db_backend/v1.rs index 1e6f9fbbb7df..55cea77b12cc 100644 --- a/node/core/chain-selection/src/db_backend/v1.rs +++ b/node/core/chain-selection/src/db_backend/v1.rs @@ -190,6 +190,9 @@ impl From for crate::BlockEntry { pub struct Config { /// The column where block metadata is stored. pub col_data: u32, + /// The column where block metadata is stored, + /// ordered. + pub col_data_ordered: u32, } /// The database backend. @@ -220,7 +223,7 @@ impl Backend for DbBackend { fn load_stagnant_at(&self, timestamp: crate::Timestamp) -> Result, Error> { load_decode::>( &*self.inner, - self.config.col_data, + self.config.col_data_ordered, &stagnant_at_key(timestamp.into()), ) .map(|o| o.unwrap_or_default()) @@ -230,8 +233,9 @@ impl Backend for DbBackend { &self, up_to: crate::Timestamp, ) -> Result)>, Error> { - let stagnant_at_iter = - self.inner.iter_with_prefix(self.config.col_data, &STAGNANT_AT_PREFIX[..]); + let stagnant_at_iter = self + .inner + .iter_with_prefix(self.config.col_data_ordered, &STAGNANT_AT_PREFIX[..]); let val = stagnant_at_iter .filter_map(|(k, v)| { @@ -247,8 +251,9 @@ impl Backend for DbBackend { } fn load_first_block_number(&self) -> Result, Error> { - let blocks_at_height_iter = - self.inner.iter_with_prefix(self.config.col_data, &BLOCK_HEIGHT_PREFIX[..]); + let blocks_at_height_iter = self + .inner + .iter_with_prefix(self.config.col_data_ordered, &BLOCK_HEIGHT_PREFIX[..]); let val = blocks_at_height_iter .filter_map(|(k, _)| decode_block_height_key(&k[..])) @@ -258,8 +263,12 @@ impl Backend for DbBackend { } fn load_blocks_by_number(&self, number: BlockNumber) -> Result, Error> { - load_decode::>(&*self.inner, self.config.col_data, &block_height_key(number)) - .map(|o| o.unwrap_or_default()) + load_decode::>( + &*self.inner, + self.config.col_data_ordered, + &block_height_key(number), + ) + .map(|o| o.unwrap_or_default()) } /// Atomically write the list of operations, with later operations taking precedence over prior. @@ -280,10 +289,10 @@ impl Backend for DbBackend { }, BackendWriteOp::WriteBlocksByNumber(block_number, v) => if v.is_empty() { - tx.delete(self.config.col_data, &block_height_key(block_number)); + tx.delete(self.config.col_data_ordered, &block_height_key(block_number)); } else { tx.put_vec( - self.config.col_data, + self.config.col_data_ordered, &block_height_key(block_number), v.encode(), ); @@ -299,24 +308,24 @@ impl Backend for DbBackend { BackendWriteOp::WriteStagnantAt(timestamp, stagnant_at) => { let timestamp: Timestamp = timestamp.into(); if stagnant_at.is_empty() { - tx.delete(self.config.col_data, &stagnant_at_key(timestamp)); + tx.delete(self.config.col_data_ordered, &stagnant_at_key(timestamp)); } else { tx.put_vec( - self.config.col_data, + self.config.col_data_ordered, &stagnant_at_key(timestamp), stagnant_at.encode(), ); } }, BackendWriteOp::DeleteBlocksByNumber(block_number) => { - tx.delete(self.config.col_data, &block_height_key(block_number)); + tx.delete(self.config.col_data_ordered, &block_height_key(block_number)); }, BackendWriteOp::DeleteBlockEntry(hash) => { tx.delete(self.config.col_data, &block_entry_key(&hash)); }, BackendWriteOp::DeleteStagnantAt(timestamp) => { let timestamp: Timestamp = timestamp.into(); - tx.delete(self.config.col_data, &stagnant_at_key(timestamp)); + tx.delete(self.config.col_data_ordered, &stagnant_at_key(timestamp)); }, } } @@ -389,7 +398,7 @@ mod tests { #[cfg(test)] fn test_db() -> Arc { - let db = kvdb_memorydb::create(1); + let db = kvdb_memorydb::create(2); let db = polkadot_node_subsystem_util::database::kvdb_impl::DbAdapter::new(db, &[1]); Arc::new(db) } @@ -433,7 +442,7 @@ mod tests { #[test] fn write_read_block_entry() { let db = test_db(); - let config = Config { col_data: 0 }; + let config = Config { col_data: 0, col_data_ordered: 1 }; let mut backend = DbBackend::new(db, config); @@ -463,7 +472,7 @@ mod tests { #[test] fn delete_block_entry() { let db = test_db(); - let config = Config { col_data: 0 }; + let config = Config { col_data: 0, col_data_ordered: 1 }; let mut backend = DbBackend::new(db, config); @@ -494,7 +503,7 @@ mod tests { #[test] fn earliest_block_number() { let db = test_db(); - let config = Config { col_data: 0 }; + let config = Config { col_data: 0, col_data_ordered: 1 }; let mut backend = DbBackend::new(db, config); @@ -523,7 +532,7 @@ mod tests { #[test] fn stagnant_at_up_to() { let db = test_db(); - let config = Config { col_data: 0 }; + let config = Config { col_data: 0, col_data_ordered: 1 }; let mut backend = DbBackend::new(db, config); @@ -579,7 +588,7 @@ mod tests { #[test] fn write_read_blocks_at_height() { let db = test_db(); - let config = Config { col_data: 0 }; + let config = Config { col_data: 0, col_data_ordered: 1 }; let mut backend = DbBackend::new(db, config); diff --git a/node/core/chain-selection/src/lib.rs b/node/core/chain-selection/src/lib.rs index 8e4cfb6ec240..ae6bc2d62ea8 100644 --- a/node/core/chain-selection/src/lib.rs +++ b/node/core/chain-selection/src/lib.rs @@ -299,6 +299,9 @@ impl StagnantCheckInterval { pub struct Config { /// The column in the database that the storage should use. pub col_data: u32, + /// The column in the database that the storage should use, + /// ordered. + pub col_data_ordered: u32, /// How often to check for stagnant blocks. pub stagnant_check_interval: StagnantCheckInterval, } @@ -325,7 +328,10 @@ where fn start(self, ctx: Context) -> SpawnedSubsystem { let backend = crate::db_backend::v1::DbBackend::new( self.db, - crate::db_backend::v1::Config { col_data: self.config.col_data }, + crate::db_backend::v1::Config { + col_data: self.config.col_data, + col_data_ordered: self.config.col_data_ordered, + }, ); SpawnedSubsystem { diff --git a/node/service/src/lib.rs b/node/service/src/lib.rs index 1fff19ee5846..3767f3b1c654 100644 --- a/node/service/src/lib.rs +++ b/node/service/src/lib.rs @@ -877,9 +877,17 @@ where }, }; + let col_meta_ordered = if parachains_db + .is_indexed_column(crate::parachains_db::REAL_COLUMNS.col_availability_meta) + { + crate::parachains_db::REAL_COLUMNS.col_availability_meta + } else { + crate::parachains_db::REAL_COLUMNS.col_availability_meta_ordered + }; let availability_config = AvailabilityConfig { col_data: crate::parachains_db::REAL_COLUMNS.col_availability_data, col_meta: crate::parachains_db::REAL_COLUMNS.col_availability_meta, + col_meta_ordered, }; let approval_voting_config = ApprovalVotingConfig { @@ -899,8 +907,17 @@ where }, }; + let col_data_ordered = if parachains_db + .is_indexed_column(crate::parachains_db::REAL_COLUMNS.col_chain_selection_data) + { + crate::parachains_db::REAL_COLUMNS.col_chain_selection_data + } else { + crate::parachains_db::REAL_COLUMNS.col_chain_selection_data_ordered + }; + let chain_selection_config = ChainSelectionConfig { col_data: crate::parachains_db::REAL_COLUMNS.col_chain_selection_data, + col_data_ordered, stagnant_check_interval: chain_selection_subsystem::StagnantCheckInterval::never(), }; diff --git a/node/service/src/parachains_db/mod.rs b/node/service/src/parachains_db/mod.rs index e5905004d70e..893c55e973ef 100644 --- a/node/service/src/parachains_db/mod.rs +++ b/node/service/src/parachains_db/mod.rs @@ -26,14 +26,19 @@ pub(crate) mod columns { pub mod v0 { pub const NUM_COLUMNS: u32 = 3; } - pub const NUM_COLUMNS: u32 = 5; + pub const NUM_COLUMNS: u32 = 7; + pub const NUM_COLUMNS_ROCKSDB: u32 = 5; pub const COL_AVAILABILITY_DATA: u32 = 0; pub const COL_AVAILABILITY_META: u32 = 1; pub const COL_APPROVAL_DATA: u32 = 2; pub const COL_CHAIN_SELECTION_DATA: u32 = 3; pub const COL_DISPUTE_COORDINATOR_DATA: u32 = 4; - pub const ORDERED_COL: &[u32] = &[COL_AVAILABILITY_META, COL_CHAIN_SELECTION_DATA]; + pub const COL_AVAILABILITY_META_ORDERED: u32 = 5; + pub const COL_CHAIN_SELECTION_DATA_ORDERED: u32 = 6; + pub const ORDERED_COL_ROCKSDB: &[u32] = &[COL_AVAILABILITY_META, COL_CHAIN_SELECTION_DATA]; + pub const ORDERED_COL_PARITY_DB: &[u32] = + &[COL_AVAILABILITY_META_ORDERED, COL_CHAIN_SELECTION_DATA_ORDERED]; } /// Columns used by different subsystems. @@ -44,10 +49,16 @@ pub struct ColumnsConfig { pub col_availability_data: u32, /// The column used by the av-store for meta information. pub col_availability_meta: u32, + /// Second column used by the av-store for meta information, + /// ordered. + pub col_availability_meta_ordered: u32, /// The column used by approval voting for data. pub col_approval_data: u32, /// The column used by chain selection for data. pub col_chain_selection_data: u32, + /// Second column used by chain selection for data, + /// ordered. + pub col_chain_selection_data_ordered: u32, /// The column used by dispute coordinator for data. pub col_dispute_coordinator_data: u32, } @@ -57,8 +68,10 @@ pub struct ColumnsConfig { pub const REAL_COLUMNS: ColumnsConfig = ColumnsConfig { col_availability_data: columns::COL_AVAILABILITY_DATA, col_availability_meta: columns::COL_AVAILABILITY_META, + col_availability_meta_ordered: columns::COL_AVAILABILITY_META_ORDERED, col_approval_data: columns::COL_APPROVAL_DATA, col_chain_selection_data: columns::COL_CHAIN_SELECTION_DATA, + col_chain_selection_data_ordered: columns::COL_CHAIN_SELECTION_DATA_ORDERED, col_dispute_coordinator_data: columns::COL_DISPUTE_COORDINATOR_DATA, }; @@ -94,7 +107,7 @@ pub fn open_creating_rocksdb( let path = root.join("parachains").join("db"); - let mut db_config = DatabaseConfig::with_columns(columns::NUM_COLUMNS); + let mut db_config = DatabaseConfig::with_columns(columns::NUM_COLUMNS_ROCKSDB); let _ = db_config .memory_budget @@ -113,8 +126,10 @@ pub fn open_creating_rocksdb( std::fs::create_dir_all(&path_str)?; upgrade::try_upgrade_db(&path)?; let db = Database::open(&db_config, &path_str)?; - let db = - polkadot_node_subsystem_util::database::kvdb_impl::DbAdapter::new(db, columns::ORDERED_COL); + let db = polkadot_node_subsystem_util::database::kvdb_impl::DbAdapter::new( + db, + columns::ORDERED_COL_ROCKSDB, + ); Ok(Arc::new(db)) } @@ -139,7 +154,7 @@ pub fn open_creating(root: PathBuf, _cache_sizes: CacheSizes) -> io::Result io::Result