diff --git a/CHANGELOG.md b/CHANGELOG.md index 22f5f80ea..3edb17907 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,6 +2,7 @@ git # Deoxys Changelog ## Next release +- refactor: refactor mc-db crate - feat(api_key): api key passed to FetchConfig correctly - feat(api_key): Added support for --gateway-api to avoid rate limit from the gateway - fix(latest): Retrieve latest synced block via internal client diff --git a/Cargo.lock b/Cargo.lock index 2b0557127..3e4dc3890 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5542,7 +5542,9 @@ dependencies = [ "glob", "libc", "libz-sys", + "lz4-sys", "tikv-jemalloc-sys", + "zstd-sys", ] [[package]] @@ -5849,14 +5851,13 @@ dependencies = [ name = "mc-db" version = "0.1.0" dependencies = [ + "anyhow", "bonsai-trie", "deoxys-runtime", "ethers", - "kvdb", - "kvdb-rocksdb", "log", - "parity-db", "parity-scale-codec", + "rocksdb", "sc-client-db", "sp-database", "sp-runtime", diff --git a/Cargo.toml b/Cargo.toml index 7a88df59f..b735e1501 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -167,9 +167,7 @@ sp-version = { git = "https://github.com/massalabs/polkadot-sdk", branch = "rele ] } # Substrate client dependencies -sc-client-db = { git = "https://github.com/massalabs/polkadot-sdk", branch = "release-polkadot-v1.3.0-std", features = [ - "rocksdb", -] } +sc-client-db = { git = "https://github.com/massalabs/polkadot-sdk", branch = "release-polkadot-v1.3.0-std" } sc-consensus = { git = "https://github.com/massalabs/polkadot-sdk", branch = "release-polkadot-v1.3.0-std" } sc-network = { git = "https://github.com/massalabs/polkadot-sdk", branch = "release-polkadot-v1.3.0-std" } sc-network-common = { git = "https://github.com/massalabs/polkadot-sdk", branch = "release-polkadot-v1.3.0-std" } diff --git a/crates/client/db/Cargo.toml b/crates/client/db/Cargo.toml index e2b286737..22e0da9a5 100644 --- a/crates/client/db/Cargo.toml +++ b/crates/client/db/Cargo.toml @@ -17,7 +17,6 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] # Substrate crates -parity-db = { version = "0.4.12", optional = true } parity-scale-codec = { workspace = true, default-features = true, features = [ "derive", ] } @@ -39,12 +38,11 @@ starknet_api = { workspace = true, default-features = true, features = [ ] } # Other crates +anyhow.workspace = true ethers = { workspace = true } -kvdb = "0.13.0" -kvdb-rocksdb = { version = "0.19.0", optional = true } log = { workspace = true, default-features = true } +rocksdb = { version = "0.21", features = [ + # "multi-threaded-cf", +] } thiserror = { workspace = true } uuid = "1.4.1" - -[features] -default = ["kvdb-rocksdb", "parity-db"] diff --git a/crates/client/db/src/bonsai_db.rs b/crates/client/db/src/bonsai_db.rs index 00deac4d7..05c1e9bba 100644 --- a/crates/client/db/src/bonsai_db.rs +++ b/crates/client/db/src/bonsai_db.rs @@ -1,265 +1,404 @@ +use std::collections::BTreeMap; +use std::fmt; +use std::ops::Deref; use std::sync::Arc; -use bonsai_trie::id::{BasicId, Id}; +use bonsai_trie::id::BasicId; use bonsai_trie::{BonsaiDatabase, BonsaiPersistentDatabase, BonsaiStorage, BonsaiStorageConfig, DatabaseKey}; -use kvdb::{DBTransaction, KeyValueDB}; -use starknet_types_core::hash::{Pedersen, Poseidon}; +use rocksdb::{ + Direction, IteratorMode, OptimisticTransactionOptions, ReadOptions, SnapshotWithThreadMode, Transaction, + WriteBatchWithTransaction, WriteOptions, +}; +use starknet_types_core::hash::StarkHash; -use crate::error::BonsaiDbError; +use crate::{BonsaiDbError, Column, DatabaseExt, DB}; -#[derive(Debug)] -pub enum TrieColumn { - Class, - Contract, - ContractStorage, +pub type RocksDBTransaction = WriteBatchWithTransaction; + +#[derive(Clone, Debug)] +pub(crate) struct DatabaseKeyMapping { + pub(crate) flat: Column, + pub(crate) trie: Column, + pub(crate) trie_log: Column, } -#[derive(Debug)] -pub enum KeyType { - Trie, - Flat, - TrieLog, +impl DatabaseKeyMapping { + pub(crate) fn map(&self, key: &DatabaseKey) -> Column { + match key { + DatabaseKey::Trie(_) => self.trie, + DatabaseKey::Flat(_) => self.flat, + DatabaseKey::TrieLog(_) => self.trie_log, + } + } } -pub struct BonsaiConfigs { - pub contract: BonsaiStorage, - pub contract_storage: BonsaiStorage, - pub class: BonsaiStorage, +/// This structs [`Deref`]s to a readonly [`BonsaiStorage`] for read access, +/// and creates new [`BonsaiStorage`] for write access. +/// +/// If you want the commits to [`BonsaiStorage`] to be written into a local transaction without +/// sending it to rocksdb, use [`BonsaiStorageAccess::transactional`]. You can then commit the +/// changes to rocksdb with [`TransactionDb::commit_transaction`]. +pub struct BonsaiStorageAccess { + db: Arc, + column_mapping: DatabaseKeyMapping, + readonly: BonsaiStorage, } +impl BonsaiStorageAccess { + pub(crate) fn new(db: Arc, column_mapping: DatabaseKeyMapping) -> Self { + let bonsai_db = BonsaiDbForRead::new(db.clone(), column_mapping.clone()); + Self { db, column_mapping, readonly: Self::make_clone(bonsai_db) } + } -impl BonsaiConfigs { - pub fn new(contract: BonsaiDb, contract_storage: BonsaiDb, class: BonsaiDb) -> Self { - let config = BonsaiStorageConfig::default(); + fn make_clone(bonsai_db: D) -> BonsaiStorage { + BonsaiStorage::new(bonsai_db, BonsaiStorageConfig { ..Default::default() }) + .expect("failed to create bonsai storage") + } + + /// Access the bonsai storage in for writing. + pub fn writable(&self) -> BonsaiStorage, H> { + Self::make_clone(BonsaiDb::new(&self.db, self.column_mapping.clone())) + } +} - let contract = - BonsaiStorage::<_, _, Pedersen>::new(contract, config.clone()).expect("Failed to create bonsai storage"); +impl Deref for BonsaiStorageAccess { + type Target = BonsaiStorage; - let contract_storage = BonsaiStorage::<_, _, Pedersen>::new(contract_storage, config.clone()) - .expect("Failed to create bonsai storage"); + /// Access the bonsai storage in for reading. + fn deref(&self) -> &Self::Target { + &self.readonly + } +} - let class = - BonsaiStorage::<_, _, Poseidon>::new(class, config.clone()).expect("Failed to create bonsai storage"); +pub struct BonsaiDbForRead { + /// Database interface for key-value operations. + db: Arc, + /// Mapping from `DatabaseKey` => rocksdb column name + column_mapping: DatabaseKeyMapping, +} - Self { contract, contract_storage, class } +impl BonsaiDbForRead { + pub(crate) fn new(db: Arc, column_mapping: DatabaseKeyMapping) -> Self { + Self { db, column_mapping } } } -impl TrieColumn { - pub fn to_index(&self, key_type: KeyType) -> u32 { - match self { - TrieColumn::Class => match key_type { - KeyType::Trie => crate::columns::TRIE_BONSAI_CLASSES, - KeyType::Flat => crate::columns::FLAT_BONSAI_CLASSES, - KeyType::TrieLog => crate::columns::LOG_BONSAI_CLASSES, - }, - TrieColumn::Contract => match key_type { - KeyType::Trie => crate::columns::TRIE_BONSAI_CONTRACTS, - KeyType::Flat => crate::columns::FLAT_BONSAI_CONTRACTS, - KeyType::TrieLog => crate::columns::LOG_BONSAI_CONTRACTS, - }, - TrieColumn::ContractStorage => match key_type { - KeyType::Trie => crate::columns::TRIE_BONSAI_CONTRACTS, - KeyType::Flat => crate::columns::FLAT_BONSAI_CONTRACTS, - KeyType::TrieLog => crate::columns::LOG_BONSAI_CONTRACTS, - }, - } +impl BonsaiDatabase for BonsaiDbForRead { + type Batch = RocksDBTransaction; + type DatabaseError = BonsaiDbError; + + fn create_batch(&self) -> Self::Batch { + Self::Batch::default() + } + + fn get(&self, key: &DatabaseKey) -> Result>, Self::DatabaseError> { + log::trace!("Getting from RocksDB: {:?}", key); + let handle = self.db.get_column(self.column_mapping.map(key)); + Ok(self.db.get_cf(&handle, key.as_slice())?) + } + + fn get_by_prefix(&self, prefix: &DatabaseKey) -> Result, Vec)>, Self::DatabaseError> { + log::trace!("Getting from RocksDB: {:?}", prefix); + let handle = self.db.get_column(self.column_mapping.map(prefix)); + let iter = self.db.iterator_cf(&handle, IteratorMode::From(prefix.as_slice(), Direction::Forward)); + Ok(iter + .map_while(|kv| { + if let Ok((key, value)) = kv { + if key.starts_with(prefix.as_slice()) { Some((key.to_vec(), value.to_vec())) } else { None } + } else { + None + } + }) + .collect()) + } + + fn contains(&self, key: &DatabaseKey) -> Result { + log::trace!("Checking if RocksDB contains: {:?}", key); + let handle = self.db.get_column(self.column_mapping.map(key)); + Ok(self.db.get_cf(&handle, key.as_slice()).map(|value| value.is_some())?) + } + + fn insert( + &mut self, + _key: &DatabaseKey, + _value: &[u8], + _batch: Option<&mut Self::Batch>, + ) -> Result>, Self::DatabaseError> { + unimplemented!() + } + fn remove( + &mut self, + _key: &DatabaseKey, + _batch: Option<&mut Self::Batch>, + ) -> Result>, Self::DatabaseError> { + unimplemented!() + } + fn remove_by_prefix(&mut self, _prefix: &DatabaseKey) -> Result<(), Self::DatabaseError> { + unimplemented!() + } + fn write_batch(&mut self, _batch: Self::Batch) -> Result<(), Self::DatabaseError> { + unimplemented!() } } -/// Represents a Bonsai database instance parameterized by a block type. -pub struct BonsaiDb { +/// [`Clone`] does not clone snapshot state. +pub struct BonsaiDb<'db> { /// Database interface for key-value operations. - pub(crate) db: Arc, - /// Set current column to give trie context - pub(crate) current_column: TrieColumn, + db: &'db DB, + /// Mapping from `DatabaseKey` => rocksdb column name + column_mapping: DatabaseKeyMapping, + snapshots: BTreeMap>, } -pub fn key_type(key: &DatabaseKey) -> KeyType { - match key { - DatabaseKey::Trie(_) => KeyType::Trie, - DatabaseKey::Flat(_) => KeyType::Flat, - DatabaseKey::TrieLog(_) => KeyType::TrieLog, +impl<'db> Clone for BonsaiDb<'db> { + fn clone(&self) -> Self { + Self { db: self.db, column_mapping: self.column_mapping.clone(), snapshots: Default::default() } } } -impl BonsaiDatabase for BonsaiDb { - type Batch = DBTransaction; +impl<'db> fmt::Debug for BonsaiDb<'db> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("BonsaiDb").field("db", &self.db).field("column_mapping", &self.column_mapping).finish() + } +} + +impl<'db> BonsaiDb<'db> { + pub(crate) fn new(db: &'db DB, column_mapping: DatabaseKeyMapping) -> Self { + Self { db, column_mapping, snapshots: Default::default() } + } +} + +impl<'db> BonsaiDatabase for BonsaiDb<'db> { + type Batch = RocksDBTransaction; type DatabaseError = BonsaiDbError; - /// Creates a new database transaction batch. fn create_batch(&self) -> Self::Batch { - DBTransaction::new() + Self::Batch::default() } - /// Retrieves a value by its database key. fn get(&self, key: &DatabaseKey) -> Result>, Self::DatabaseError> { - let key_type = key_type(key); - let column = self.current_column.to_index(key_type); - let key_slice = key.as_slice(); - self.db.get(column, key_slice).map_err(Into::into) + log::trace!("Getting from RocksDB: {:?}", key); + let handle = self.db.get_column(self.column_mapping.map(key)); + Ok(self.db.get_cf(&handle, key.as_slice())?) + } + + fn get_by_prefix(&self, prefix: &DatabaseKey) -> Result, Vec)>, Self::DatabaseError> { + log::trace!("Getting from RocksDB: {:?}", prefix); + let handle = self.db.get_column(self.column_mapping.map(prefix)); + let iter = self.db.iterator_cf(&handle, IteratorMode::From(prefix.as_slice(), Direction::Forward)); + Ok(iter + .map_while(|kv| { + if let Ok((key, value)) = kv { + if key.starts_with(prefix.as_slice()) { Some((key.to_vec(), value.to_vec())) } else { None } + } else { + None + } + }) + .collect()) + } + + fn contains(&self, key: &DatabaseKey) -> Result { + log::trace!("Checking if RocksDB contains: {:?}", key); + let handle = self.db.get_column(self.column_mapping.map(key)); + Ok(self.db.get_cf(&handle, key.as_slice()).map(|value| value.is_some())?) } - /// Inserts a key-value pair into the database, optionally within a provided batch. fn insert( &mut self, key: &DatabaseKey, value: &[u8], batch: Option<&mut Self::Batch>, ) -> Result>, Self::DatabaseError> { - let key_type: KeyType = key_type(key); - let column = self.current_column.to_index(key_type); - let key_slice = key.as_slice(); - let previous_value = self.db.get(column, key_slice)?; - + log::trace!("Inserting into RocksDB: {:?} {:?}", key, value); + let handle = self.db.get_column(self.column_mapping.map(key)); + let old_value = self.db.get_cf(&handle, key.as_slice())?; if let Some(batch) = batch { - batch.put(column, key_slice, value); + batch.put_cf(&handle, key.as_slice(), value); } else { - let mut transaction = self.create_batch(); - transaction.put(column, key_slice, value); - self.db.write(transaction)?; + self.db.put_cf(&handle, key.as_slice(), value)?; } - - Ok(previous_value) - } - - /// Checks if a key exists in the database. - fn contains(&self, key: &DatabaseKey) -> Result { - let key_type = key_type(key); - let column = self.current_column.to_index(key_type); - let key_slice = key.as_slice(); - self.db.has_key(column, key_slice).map_err(Into::into) + Ok(old_value) } - /// Retrieves all key-value pairs starting with a given prefix. - fn get_by_prefix(&self, prefix: &DatabaseKey) -> Result, Vec)>, Self::DatabaseError> { - let key_type = key_type(prefix); - let column = self.current_column.to_index(key_type); - let prefix_slice = prefix.as_slice(); - let mut result = Vec::new(); - - for pair in self.db.iter_with_prefix(column, prefix_slice) { - let pair = pair.map_err(BonsaiDbError::from)?; - result.push((pair.0.into_vec(), pair.1)); - } - - Ok(result) - } - - /// Removes a key-value pair from the database, optionally within a provided batch. fn remove( &mut self, key: &DatabaseKey, batch: Option<&mut Self::Batch>, ) -> Result>, Self::DatabaseError> { - let key_type = key_type(key); - let column = self.current_column.to_index(key_type); - let key_slice = key.as_slice(); - let previous_value = self.db.get(column, key_slice)?; - + log::trace!("Removing from RocksDB: {:?}", key); + let handle = self.db.get_column(self.column_mapping.map(key)); + let old_value = self.db.get_cf(&handle, key.as_slice())?; if let Some(batch) = batch { - batch.delete(column, key_slice); + batch.delete_cf(&handle, key.as_slice()); } else { - let mut transaction = self.create_batch(); - transaction.delete(column, key_slice); - self.db.write(transaction)?; + self.db.delete_cf(&handle, key.as_slice())?; } - - Ok(previous_value) + Ok(old_value) } - /// Removes all key-value pairs starting with a given prefix. fn remove_by_prefix(&mut self, prefix: &DatabaseKey) -> Result<(), Self::DatabaseError> { - let key_type = key_type(prefix); - let column = self.current_column.to_index(key_type); - let prefix_slice = prefix.as_slice(); - let mut transaction = self.create_batch(); - transaction.delete_prefix(column, prefix_slice); - self.db.write(transaction).map_err(Into::into) + log::trace!("Getting from RocksDB: {:?}", prefix); + let handle = self.db.get_column(self.column_mapping.map(prefix)); + let iter = self.db.iterator_cf(&handle, IteratorMode::From(prefix.as_slice(), Direction::Forward)); + let mut batch = self.create_batch(); + for kv in iter { + if let Ok((key, _)) = kv { + if key.starts_with(prefix.as_slice()) { + batch.delete_cf(&handle, &key); + } else { + break; + } + } else { + break; + } + } + drop(handle); + self.write_batch(batch)?; + Ok(()) } - /// Writes a batch of changes to the database. fn write_batch(&mut self, batch: Self::Batch) -> Result<(), Self::DatabaseError> { - self.db.write(batch).map_err(Into::into) + Ok(self.db.write(batch)?) } } -/// A wrapper around a database transaction. -pub struct TransactionWrapper { - /// The underlying database transaction. - _transaction: DBTransaction, +pub struct BonsaiTransaction<'db> { + txn: Transaction<'db, DB>, + db: &'db DB, + column_mapping: DatabaseKeyMapping, } -/// This implementation is a stub to mute BonsaiPersistentDatabase that is currently not needed. -impl BonsaiDatabase for TransactionWrapper { - type Batch = DBTransaction; +impl<'db> BonsaiDatabase for BonsaiTransaction<'db> { + type Batch = WriteBatchWithTransaction; type DatabaseError = BonsaiDbError; - /// Creates a new database transaction batch. fn create_batch(&self) -> Self::Batch { - DBTransaction::new() + self.txn.get_writebatch() } - /// Retrieves a value by its database key. - fn get(&self, _key: &DatabaseKey) -> Result>, Self::DatabaseError> { - // Simulate database access without performing real operations - Ok(None) + fn get(&self, key: &DatabaseKey) -> Result>, Self::DatabaseError> { + log::trace!("Getting from RocksDB: {:?}", key); + let handle = self.db.get_column(self.column_mapping.map(key)); + Ok(self.txn.get_cf(&handle, key.as_slice())?) } - /// Inserts a key-value pair into the database. - fn insert( - &mut self, - _key: &DatabaseKey, - _value: &[u8], - _batch: Option<&mut Self::Batch>, - ) -> Result>, Self::DatabaseError> { - Ok(None) + fn get_by_prefix(&self, prefix: &DatabaseKey) -> Result, Vec)>, Self::DatabaseError> { + log::trace!("Getting from RocksDB: {:?}", prefix); + let handle = self.db.get_column(self.column_mapping.map(prefix)); + let iter = self.txn.iterator_cf(&handle, IteratorMode::From(prefix.as_slice(), Direction::Forward)); + Ok(iter + .map_while(|kv| { + if let Ok((key, value)) = kv { + if key.starts_with(prefix.as_slice()) { Some((key.to_vec(), value.to_vec())) } else { None } + } else { + None + } + }) + .collect()) } - /// Checks if a key exists in the database. - fn contains(&self, _key: &DatabaseKey) -> Result { - Ok(false) + fn contains(&self, key: &DatabaseKey) -> Result { + log::trace!("Checking if RocksDB contains: {:?}", key); + let handle = self.db.get_column(self.column_mapping.map(key)); + Ok(self.txn.get_cf(&handle, key.as_slice()).map(|value| value.is_some())?) } - /// Retrieves all key-value pairs starting with a given prefix. - fn get_by_prefix(&self, _prefix: &DatabaseKey) -> Result, Vec)>, Self::DatabaseError> { - Ok(Vec::new()) + fn insert( + &mut self, + key: &DatabaseKey, + value: &[u8], + batch: Option<&mut Self::Batch>, + ) -> Result>, Self::DatabaseError> { + log::trace!("Inserting into RocksDB: {:?} {:?}", key, value); + let handle = self.db.get_column(self.column_mapping.map(key)); + let old_value = self.txn.get_cf(&handle, key.as_slice())?; + if let Some(batch) = batch { + batch.put_cf(&handle, key.as_slice(), value); + } else { + self.txn.put_cf(&handle, key.as_slice(), value)?; + } + Ok(old_value) } - /// Removes a key-value pair from the database. fn remove( &mut self, - _key: &DatabaseKey, - _batch: Option<&mut Self::Batch>, + key: &DatabaseKey, + batch: Option<&mut Self::Batch>, ) -> Result>, Self::DatabaseError> { - Ok(None) + log::trace!("Removing from RocksDB: {:?}", key); + let handle = self.db.get_column(self.column_mapping.map(key)); + let old_value = self.txn.get_cf(&handle, key.as_slice())?; + if let Some(batch) = batch { + batch.delete_cf(&handle, key.as_slice()); + } else { + self.txn.delete_cf(&handle, key.as_slice())?; + } + Ok(old_value) } - /// Removes all key-value pairs starting with a given prefix. - fn remove_by_prefix(&mut self, _prefix: &DatabaseKey) -> Result<(), Self::DatabaseError> { + fn remove_by_prefix(&mut self, prefix: &DatabaseKey) -> Result<(), Self::DatabaseError> { + log::trace!("Getting from RocksDB: {:?}", prefix); + let handle = self.db.get_column(self.column_mapping.map(prefix)); + let iter = self.txn.iterator_cf(&handle, IteratorMode::From(prefix.as_slice(), Direction::Forward)); + let mut batch = self.create_batch(); + for kv in iter { + if let Ok((key, _)) = kv { + if key.starts_with(prefix.as_slice()) { + batch.delete_cf(&handle, &key); + } else { + break; + } + } else { + break; + } + } + drop(handle); + self.write_batch(batch)?; Ok(()) } - /// Writes a batch of changes to the database. - fn write_batch(&mut self, _batch: Self::Batch) -> Result<(), Self::DatabaseError> { - Ok(()) + fn write_batch(&mut self, batch: Self::Batch) -> Result<(), Self::DatabaseError> { + Ok(self.txn.rebuild_from_writebatch(&batch)?) } } -/// This implementation is a stub to mute any error but is is currently not used. -impl BonsaiPersistentDatabase for BonsaiDb { - type Transaction = TransactionWrapper; +impl<'db> BonsaiPersistentDatabase for BonsaiDb<'db> +where + Self: 'db, +{ + type Transaction = BonsaiTransaction<'db>; type DatabaseError = BonsaiDbError; - /// Snapshots the current database state, associated with an ID. - fn snapshot(&mut self, _id: ID) {} + fn snapshot(&mut self, id: BasicId) { + log::trace!("Generating RocksDB snapshot"); + let snapshot = self.db.snapshot(); + self.snapshots.insert(id, snapshot); + // TODO: snapshot limits + // if let Some(max_number_snapshot) = self.config.max_saved_snapshots { + // while self.snapshots.len() > max_number_snapshot { + // self.snapshots.pop_first(); + // } + // } + } + + fn transaction(&self, id: BasicId) -> Option { + log::trace!("Generating RocksDB transaction"); + if let Some(snapshot) = self.snapshots.get(&id) { + let write_opts = WriteOptions::default(); + let mut txn_opts = OptimisticTransactionOptions::default(); + txn_opts.set_snapshot(true); + let txn = self.db.transaction_opt(&write_opts, &txn_opts); - /// Begins a new transaction associated with a snapshot ID. - fn transaction(&self, _id: ID) -> Option { - None + let mut read_options = ReadOptions::default(); + read_options.set_snapshot(snapshot); + + Some(BonsaiTransaction { txn, db: self.db, column_mapping: self.column_mapping.clone() }) + } else { + None + } } - /// Merges a completed transaction into the persistent database. - fn merge(&mut self, _transaction: Self::Transaction) -> Result<(), Self::DatabaseError> { + fn merge(&mut self, transaction: Self::Transaction) -> Result<(), Self::DatabaseError> { + transaction.txn.commit()?; Ok(()) } } diff --git a/crates/client/db/src/da_db.rs b/crates/client/db/src/da_db.rs index cf9199ef5..3c01dcfb2 100644 --- a/crates/client/db/src/da_db.rs +++ b/crates/client/db/src/da_db.rs @@ -2,77 +2,76 @@ use std::sync::Arc; // Substrate use parity_scale_codec::{Decode, Encode}; -use sp_database::Database; // Starknet use starknet_api::block::BlockHash; use starknet_api::hash::StarkFelt; use starknet_api::state::ThinStateDiff; use uuid::Uuid; -use crate::{DbError, DbHash}; +use crate::{Column, DatabaseExt, DbError, DB}; // The fact db stores DA facts that need to be written to L1 pub struct DaDb { - pub(crate) db: Arc>, + pub(crate) db: Arc, } // TODO: purge old cairo job keys impl DaDb { + pub(crate) fn new(db: Arc) -> Self { + Self { db } + } + pub fn state_diff(&self, block_hash: &BlockHash) -> Result { - match self.db.get(crate::columns::DA, block_hash.0.bytes()) { + let column = self.db.get_column(Column::Da); + + match self.db.get_cf(&column, block_hash.0.bytes())? { Some(raw) => Ok(ThinStateDiff::decode(&mut &raw[..])?), - None => Err(DbError::ValueNotInitialized(crate::columns::DA, block_hash.to_string())), + None => Err(DbError::ValueNotInitialized(Column::Da, block_hash.to_string())), } } pub fn store_state_diff(&self, block_hash: &BlockHash, diff: &ThinStateDiff) -> Result<(), DbError> { - let mut transaction = sp_database::Transaction::new(); - - transaction.set(crate::columns::DA, block_hash.0.bytes(), &diff.encode()); - - self.db.commit(transaction)?; + let column = self.db.get_column(Column::Da); + self.db.put_cf(&column, block_hash.0.bytes(), diff.encode())?; Ok(()) } pub fn cairo_job(&self, block_hash: &BlockHash) -> Result, DbError> { - match self.db.get(crate::columns::DA, block_hash.0.bytes()) { + let column = self.db.get_column(Column::Da); + + match self.db.get_cf(&column, block_hash.0.bytes())? { Some(raw) => Ok(Some(Uuid::from_slice(&raw[..])?)), None => Ok(None), } } pub fn update_cairo_job(&self, block_hash: &BlockHash, job_id: Uuid) -> Result<(), DbError> { - let mut transaction = sp_database::Transaction::new(); - - transaction.set(crate::columns::DA, block_hash.0.bytes(), &job_id.into_bytes()); - - self.db.commit(transaction)?; + let column = self.db.get_column(Column::Da); + self.db.put_cf(&column, block_hash.0.bytes(), job_id.into_bytes())?; Ok(()) } pub fn last_proved_block(&self) -> Result { - match self.db.get(crate::columns::DA, crate::static_keys::LAST_PROVED_BLOCK) { + let column = self.db.get_column(Column::Da); + + match self.db.get_cf(&column, crate::static_keys::LAST_PROVED_BLOCK)? { Some(raw) => { let felt = StarkFelt::decode(&mut &raw[..])?; Ok(BlockHash(felt)) } None => Err(DbError::ValueNotInitialized( - crate::columns::DA, - // Safe coze `LAST_PROVED_BLOCK` is utf8 - unsafe { std::str::from_utf8_unchecked(crate::static_keys::LAST_PROVED_BLOCK) }.to_string(), + Column::Da, + String::from_utf8(crate::static_keys::LAST_PROVED_BLOCK.to_vec()).expect("unreachable"), )), } } pub fn update_last_proved_block(&self, block_hash: &BlockHash) -> Result<(), DbError> { - let mut transaction = sp_database::Transaction::new(); - - transaction.set(crate::columns::DA, crate::static_keys::LAST_PROVED_BLOCK, &block_hash.0.encode()); - - self.db.commit(transaction)?; + let column = self.db.get_column(Column::Da); + self.db.put_cf(&column, crate::static_keys::LAST_PROVED_BLOCK, block_hash.0.encode())?; Ok(()) } } diff --git a/crates/client/db/src/db_opening_utils/mod.rs b/crates/client/db/src/db_opening_utils/mod.rs deleted file mode 100644 index 6315d243e..000000000 --- a/crates/client/db/src/db_opening_utils/mod.rs +++ /dev/null @@ -1,65 +0,0 @@ -// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 -// This file is part of Frontier. -// -// Copyright (c) 2020-2022 Parity Technologies (UK) Ltd. -// -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// This program is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. -// -// You should have received a copy of the GNU General Public License -// along with this program. If not, see . - -pub mod parity_db_adapter; -pub mod rock_db_adapter; - -use std::path::Path; -use std::sync::Arc; - -use kvdb::KeyValueDB; -use sp_database::Database; - -use crate::{DatabaseSettings, DatabaseSource, DbHash}; - -#[allow(clippy::type_complexity)] -pub(crate) fn open_database( - config: &DatabaseSettings, -) -> Result<(Arc, Arc>), String> { - let dbs: (Arc, Arc>) = match &config.source { - // DatabaseSource::ParityDb { path } => open_parity_db(path).expect("Failed to open parity db"), - DatabaseSource::RocksDb { path, .. } => { - let dbs = open_kvdb_rocksdb(path, true)?; - (dbs.0, dbs.1) - } - DatabaseSource::Auto { paritydb_path, rocksdb_path, .. } => match open_kvdb_rocksdb(rocksdb_path, false) { - Ok(_) => { - let dbs = open_kvdb_rocksdb(paritydb_path, true)?; - (dbs.0, dbs.1) - } - Err(_) => Err("Missing feature flags `parity-db`".to_string())?, - }, - _ => return Err("Missing feature flags `parity-db`".to_string()), - }; - Ok(dbs) -} - -#[allow(clippy::type_complexity)] -pub fn open_kvdb_rocksdb( - path: &Path, - create: bool, -) -> Result<(Arc, Arc>), String> { - let mut db_config = kvdb_rocksdb::DatabaseConfig::with_columns(crate::columns::NUM_COLUMNS); - db_config.create_if_missing = create; - - let db_kvdb = kvdb_rocksdb::Database::open(&db_config, path).map_err(|err| format!("{}", err))?; - let x = Arc::new(db_kvdb); - let y = unsafe { std::mem::transmute::<_, Arc>(x.clone()) }; - - Ok((x, y)) -} diff --git a/crates/client/db/src/db_opening_utils/parity_db_adapter.rs b/crates/client/db/src/db_opening_utils/parity_db_adapter.rs deleted file mode 100644 index 7a6cf7894..000000000 --- a/crates/client/db/src/db_opening_utils/parity_db_adapter.rs +++ /dev/null @@ -1,63 +0,0 @@ -// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 -// This file is part of Frontier. -// -// Copyright (c) 2022 Parity Technologies (UK) Ltd. -// -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// This program is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. -// -// You should have received a copy of the GNU General Public License -// along with this program. If not, see . - -use sp_database::error::DatabaseError; -use sp_database::{Change, ColumnId, Database, Transaction}; - -fn handle_err(result: parity_db::Result) -> T { - match result { - Ok(r) => r, - Err(e) => { - panic!("Critical database error: {:?}", e); - } - } -} - -pub struct DbAdapter(pub parity_db::Db); - -impl> Database for DbAdapter { - fn commit(&self, transaction: Transaction) -> Result<(), DatabaseError> { - handle_err(self.0.commit(transaction.0.into_iter().map(|change| match change { - Change::Set(col, key, value) => (col as u8, key, Some(value)), - Change::Remove(col, key) => (col as u8, key, None), - _ => unimplemented!(), - }))); - - Ok(()) - } - - fn get(&self, col: ColumnId, key: &[u8]) -> Option> { - handle_err(self.0.get(col as u8, key)) - } - - fn contains(&self, col: ColumnId, key: &[u8]) -> bool { - handle_err(self.0.get_size(col as u8, key)).is_some() - } - - fn value_size(&self, col: ColumnId, key: &[u8]) -> Option { - handle_err(self.0.get_size(col as u8, key)).map(|s| s as usize) - } - - fn supports_ref_counting(&self) -> bool { - true - } - - fn sanitize_key(&self, key: &mut Vec) { - let _prefix = key.drain(0..key.len() - crate::DB_HASH_LEN); - } -} diff --git a/crates/client/db/src/db_opening_utils/rock_db_adapter.rs b/crates/client/db/src/db_opening_utils/rock_db_adapter.rs deleted file mode 100644 index eba10db46..000000000 --- a/crates/client/db/src/db_opening_utils/rock_db_adapter.rs +++ /dev/null @@ -1,88 +0,0 @@ -use kvdb::{DBTransaction, KeyValueDB}; -use sp_database::error::DatabaseError; -use sp_database::{Change, ColumnId, Database, Transaction}; - -#[repr(transparent)] -pub struct DbAdapter(kvdb_rocksdb::Database); - -fn handle_err(result: std::io::Result) -> T { - match result { - Ok(r) => r, - Err(e) => { - panic!("Critical database error: {:?}", e); - } - } -} - -impl DbAdapter { - // Returns counter key and counter value if it exists. - fn read_counter(&self, col: ColumnId, key: &[u8]) -> Result<(Vec, Option), DatabaseError> { - // Add a key suffix for the counter - let mut counter_key = key.to_vec(); - counter_key.push(0); - Ok(match self.0.get(col, &counter_key).map_err(|e| DatabaseError(Box::new(e)))? { - Some(data) => { - let mut counter_data = [0; 4]; - if data.len() != 4 { - return Err(DatabaseError(Box::new(std::io::Error::new( - std::io::ErrorKind::Other, - format!("Unexpected counter len {}", data.len()), - )))); - } - counter_data.copy_from_slice(&data); - let counter = u32::from_le_bytes(counter_data); - (counter_key, Some(counter)) - } - None => (counter_key, None), - }) - } -} - -impl> Database for DbAdapter { - fn commit(&self, transaction: Transaction) -> Result<(), DatabaseError> { - let mut tx = DBTransaction::new(); - for change in transaction.0.into_iter() { - match change { - Change::Set(col, key, value) => tx.put_vec(col, &key, value), - Change::Remove(col, key) => tx.delete(col, &key), - Change::Store(col, key, value) => match self.read_counter(col, key.as_ref())? { - (counter_key, Some(mut counter)) => { - counter += 1; - tx.put(col, &counter_key, &counter.to_le_bytes()); - } - (counter_key, None) => { - let d = 1u32.to_le_bytes(); - tx.put(col, &counter_key, &d); - tx.put_vec(col, key.as_ref(), value); - } - }, - Change::Reference(col, key) => { - if let (counter_key, Some(mut counter)) = self.read_counter(col, key.as_ref())? { - counter += 1; - tx.put(col, &counter_key, &counter.to_le_bytes()); - } - } - Change::Release(col, key) => { - if let (counter_key, Some(mut counter)) = self.read_counter(col, key.as_ref())? { - counter -= 1; - if counter == 0 { - tx.delete(col, &counter_key); - tx.delete(col, key.as_ref()); - } else { - tx.put(col, &counter_key, &counter.to_le_bytes()); - } - } - } - } - } - self.0.write(tx).map_err(|e| DatabaseError(Box::new(e))) - } - - fn get(&self, col: ColumnId, key: &[u8]) -> Option> { - handle_err(self.0.get(col, key)) - } - - fn contains(&self, col: ColumnId, key: &[u8]) -> bool { - handle_err(self.0.has_key(col, key)) - } -} diff --git a/crates/client/db/src/error.rs b/crates/client/db/src/error.rs index 0d17e4fd3..38dcb200d 100644 --- a/crates/client/db/src/error.rs +++ b/crates/client/db/src/error.rs @@ -1,44 +1,26 @@ -use std::{error, fmt}; - use bonsai_trie::DBError; +use thiserror::Error; + +use crate::Column; #[derive(thiserror::Error, Debug)] pub enum DbError { #[error("Failed to commit DB Update: `{0}`")] - CommitError(#[from] sp_database::error::DatabaseError), + RocksDB(#[from] rocksdb::Error), #[error("Failed to deserialize DB Data: `{0}`")] DeserializeError(#[from] parity_scale_codec::Error), #[error("Failed to build Uuid: `{0}`")] Uuid(#[from] uuid::Error), #[error("A value was queryied that was not initialized at column: `{0}` key: `{1}`")] - ValueNotInitialized(u32, String), + ValueNotInitialized(Column, String), } -#[derive(Debug)] +#[derive(Debug, Error)] pub enum BonsaiDbError { - Io(std::io::Error), -} - -impl fmt::Display for BonsaiDbError { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - match *self { - BonsaiDbError::Io(ref err) => write!(f, "IO error: {}", err), - } - } -} - -impl error::Error for BonsaiDbError { - fn source(&self) -> Option<&(dyn error::Error + 'static)> { - match *self { - BonsaiDbError::Io(ref err) => Some(err), - } - } -} - -impl From for BonsaiDbError { - fn from(err: std::io::Error) -> BonsaiDbError { - BonsaiDbError::Io(err) - } + #[error("IO error: `{0}`")] + Io(#[from] std::io::Error), + #[error("Failed to commit DB Update: `{0}`")] + RocksDB(#[from] rocksdb::Error), } impl DBError for BonsaiDbError {} diff --git a/crates/client/db/src/l1_handler_tx_fee.rs b/crates/client/db/src/l1_handler_tx_fee.rs index 99ff7c35b..3d775ddef 100644 --- a/crates/client/db/src/l1_handler_tx_fee.rs +++ b/crates/client/db/src/l1_handler_tx_fee.rs @@ -1,31 +1,33 @@ use std::sync::Arc; use parity_scale_codec::Encode; -use sp_database::Database; use starknet_api::hash::StarkFelt; use starknet_api::transaction::Fee; -use crate::{DbError, DbHash}; +use crate::{Column, DatabaseExt, DbError, DB}; pub struct L1HandlerTxFeeDb { - pub(crate) db: Arc>, + pub(crate) db: Arc, } impl L1HandlerTxFeeDb { + pub(crate) fn new(db: Arc) -> Self { + Self { db } + } + /// Store the fee paid on l1 for a specific L1Handler transaction pub fn store_fee_paid_for_l1_handler_tx(&self, tx_hash: StarkFelt, fee: Fee) -> Result<(), DbError> { - let mut transaction = sp_database::Transaction::new(); - - transaction.set(crate::columns::L1_HANDLER_PAID_FEE, &tx_hash.encode(), &fee.0.to_le_bytes()); - - self.db.commit(transaction)?; + let column = self.db.get_column(Column::L1HandlerPaidFee); + self.db.put_cf(&column, tx_hash.encode(), fee.0.to_le_bytes())?; Ok(()) } /// Return the stored fee paid on l1 for a specific L1Handler transaction pub fn get_fee_paid_for_l1_handler_tx(&self, tx_hash: StarkFelt) -> Result { - if let Some(bytes) = self.db.get(crate::columns::L1_HANDLER_PAID_FEE, &tx_hash.encode()) { + let column = self.db.get_column(Column::L1HandlerPaidFee); + + if let Some(bytes) = self.db.get_cf(&column, tx_hash.encode())? { let mut buff = [0u8; 16]; buff.copy_from_slice(&bytes); @@ -33,7 +35,7 @@ impl L1HandlerTxFeeDb { Ok(Fee(fee)) } else { - Err(DbError::ValueNotInitialized(crate::columns::L1_HANDLER_PAID_FEE, tx_hash.to_string())) + Err(DbError::ValueNotInitialized(Column::L1HandlerPaidFee, tx_hash.to_string())) } } } diff --git a/crates/client/db/src/lib.rs b/crates/client/db/src/lib.rs index 21c0def31..99fdd9166 100644 --- a/crates/client/db/src/lib.rs +++ b/crates/client/db/src/lib.rs @@ -11,37 +11,35 @@ //! `paritydb` and `rocksdb` are both supported, behind the `kvdb-rocksd` and `parity-db` feature //! flags. Support for custom databases is possible but not supported yet. -mod error; -use bonsai_trie::id::BasicId; -use bonsai_trie::BonsaiStorage; -pub use error::{BonsaiDbError, DbError}; +use std::fmt; +use std::path::{Path, PathBuf}; +use std::sync::{Arc, OnceLock}; + +use anyhow::{bail, Context, Result}; +use bonsai_db::{BonsaiStorageAccess, DatabaseKeyMapping}; +use da_db::DaDb; +use l1_handler_tx_fee::L1HandlerTxFeeDb; +use mapping_db::MappingDb; +use messaging_db::MessagingDb; +use meta_db::MetaDb; +use sc_client_db::DatabaseSource; +mod error; mod mapping_db; -use kvdb::KeyValueDB; -pub use mapping_db::MappingCommitment; +use rocksdb::{BoundColumnFamily, ColumnFamilyDescriptor, MultiThreaded, OptimisticTransactionDB, Options}; use sierra_classes_db::SierraClassesDb; -use starknet_api::hash::StarkHash; mod da_db; -mod db_opening_utils; mod messaging_db; mod sierra_classes_db; -pub use messaging_db::LastSyncedEventBlock; +use starknet_api::hash::StarkHash; use starknet_types_core::hash::{Pedersen, Poseidon}; pub mod bonsai_db; mod l1_handler_tx_fee; mod meta_db; -use std::path::{Path, PathBuf}; -use std::sync::{Arc, Mutex, OnceLock}; - -use bonsai_db::{BonsaiConfigs, BonsaiDb, TrieColumn}; -use da_db::DaDb; -use l1_handler_tx_fee::L1HandlerTxFeeDb; -use mapping_db::MappingDb; -use messaging_db::MessagingDb; -use meta_db::MetaDb; -use sc_client_db::DatabaseSource; -use sp_database::Database; +pub use error::{BonsaiDbError, DbError}; +pub use mapping_db::MappingCommitment; +pub use messaging_db::LastSyncedEventBlock; const DB_HASH_LEN: usize = 32; /// Hash type that this backend uses for the database. @@ -52,53 +50,159 @@ struct DatabaseSettings { pub source: DatabaseSource, } -pub(crate) mod columns { - /// Total number of columns. - // ===== /!\ =================================================================================== - // MUST BE INCREMENTED WHEN A NEW COLUMN IN ADDED - // ===== /!\ =================================================================================== - pub const NUM_COLUMNS: u32 = 16; +pub type DB = OptimisticTransactionDB; + +pub(crate) fn open_database(config: &DatabaseSettings) -> Result { + Ok(match &config.source { + DatabaseSource::RocksDb { path, .. } => open_rocksdb(path, true)?, + DatabaseSource::Auto { paritydb_path: _, rocksdb_path, .. } => open_rocksdb(rocksdb_path, false)?, + _ => bail!("only the rocksdb database source is supported at the moment"), + }) +} + +pub(crate) fn open_rocksdb(path: &Path, create: bool) -> Result> { + let mut opts = Options::default(); + opts.set_report_bg_io_stats(true); + opts.set_use_fsync(false); + opts.create_if_missing(create); + opts.create_missing_column_families(true); + opts.set_bytes_per_sync(1024 * 1024); + opts.set_keep_log_file_num(1); + let cores = std::thread::available_parallelism().map(|e| e.get() as i32).unwrap_or(1); + opts.increase_parallelism(i32::max(cores / 2, 1)); + + let db = OptimisticTransactionDB::::open_cf_descriptors( + &opts, + path, + Column::ALL.iter().map(|col| ColumnFamilyDescriptor::new(col.rocksdb_name(), col.rocksdb_options())), + )?; + + Ok(db) +} - pub const META: u32 = 0; - pub const BLOCK_MAPPING: u32 = 1; - pub const TRANSACTION_MAPPING: u32 = 2; - pub const SYNCED_MAPPING: u32 = 3; - pub const DA: u32 = 4; +#[derive(Clone, Copy, PartialEq, Eq)] +pub enum Column { + Meta, + BlockMapping, + TransactionMapping, + SyncedMapping, + Da, /// This column is used to map starknet block hashes to a list of transaction hashes that are /// contained in the block. /// /// This column should only be accessed if the `--cache` flag is enabled. - pub const STARKNET_TRANSACTION_HASHES_CACHE: u32 = 5; + StarknetTransactionHashesCache, /// This column is used to map starknet block numbers to their block hashes. /// /// This column should only be accessed if the `--cache` flag is enabled. - pub const STARKNET_BLOCK_HASHES_CACHE: u32 = 6; + StarknetBlockHashesCache, /// This column contains last synchronized L1 block. - pub const MESSAGING: u32 = 7; + Messaging, /// This column contains the Sierra contract classes - pub const SIERRA_CONTRACT_CLASSES: u32 = 8; + SierraContractClasses, /// This column stores the fee paid on l1 for L1Handler transactions - pub const L1_HANDLER_PAID_FEE: u32 = 9; + L1HandlerPaidFee, - /// The bonsai columns are triplicated since we need to set a column for - /// - /// const TRIE_LOG_CF: &str = "trie_log"; - /// const TRIE_CF: &str = "trie"; - /// const FLAT_CF: &str = "flat"; - /// as defined in https://github.com/keep-starknet-strange/bonsai-trie/blob/oss/src/databases/rocks_db.rs - /// - /// For each tries CONTRACTS, CLASSES and STORAGE - pub const TRIE_BONSAI_CONTRACTS: u32 = 10; - pub const FLAT_BONSAI_CONTRACTS: u32 = 11; - pub const LOG_BONSAI_CONTRACTS: u32 = 12; - pub const TRIE_BONSAI_CLASSES: u32 = 13; - pub const FLAT_BONSAI_CLASSES: u32 = 14; - pub const LOG_BONSAI_CLASSES: u32 = 15; + // Each bonsai storage has 3 columns + BonsaiContractsTrie, + BonsaiContractsFlat, + BonsaiContractsLog, + + BonsaiContractsStorageTrie, + BonsaiContractsStorageFlat, + BonsaiContractsStorageLog, + + BonsaiClassesTrie, + BonsaiClassesFlat, + BonsaiClassesLog, +} + +impl fmt::Debug for Column { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "{}", self.rocksdb_name()) + } +} + +impl fmt::Display for Column { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{}", self.rocksdb_name()) + } +} + +impl Column { + pub const ALL: &'static [Self] = { + use Column::*; + &[ + Meta, + BlockMapping, + TransactionMapping, + SyncedMapping, + Da, + StarknetTransactionHashesCache, + StarknetBlockHashesCache, + Messaging, + SierraContractClasses, + L1HandlerPaidFee, + BonsaiContractsTrie, + BonsaiContractsFlat, + BonsaiContractsLog, + BonsaiContractsStorageTrie, + BonsaiContractsStorageFlat, + BonsaiContractsStorageLog, + BonsaiClassesTrie, + BonsaiClassesFlat, + BonsaiClassesLog, + ] + }; + pub const NUM_COLUMNS: usize = Self::ALL.len(); + + pub(crate) fn rocksdb_name(&self) -> &'static str { + match self { + Column::Meta => "meta", + Column::BlockMapping => "block_mapping", + Column::TransactionMapping => "transaction_mapping", + Column::SyncedMapping => "synced_mapping", + Column::Da => "da", + Column::StarknetTransactionHashesCache => "starknet_transaction_hashes_cache", + Column::StarknetBlockHashesCache => "starnet_block_hashes_cache", + Column::Messaging => "messaging", + Column::SierraContractClasses => "sierra_contract_classes", + Column::L1HandlerPaidFee => "l1_handler_paid_fee", + Column::BonsaiContractsTrie => "bonsai_contracts_trie", + Column::BonsaiContractsFlat => "bonsai_contracts_flat", + Column::BonsaiContractsLog => "bonsai_contracts_log", + Column::BonsaiContractsStorageTrie => "bonsai_contracts_storage_trie", + Column::BonsaiContractsStorageFlat => "bonsai_contracts_storage_flat", + Column::BonsaiContractsStorageLog => "bonsai_contracts_storage_log", + Column::BonsaiClassesTrie => "bonsai_classes_trie", + Column::BonsaiClassesFlat => "bonsai_classes_flat", + Column::BonsaiClassesLog => "bonsai_classes_log", + } + } + + /// Per column rocksdb options, like memory budget, compaction profiles, block sizes for hdd/sdd + /// etc. TODO: add basic sensible defaults + pub(crate) fn rocksdb_options(&self) -> Options { + // match self { + // _ => Options::default(), + // } + Options::default() + } +} + +pub(crate) trait DatabaseExt { + fn get_column(&self, col: Column) -> Arc>; +} + +impl DatabaseExt for DB { + fn get_column(&self, col: Column) -> Arc> { + self.cf_handle(col.rocksdb_name()).expect("column not inititalized") + } } pub mod static_keys { @@ -134,9 +238,9 @@ pub struct DeoxysBackend { messaging: Arc, sierra_classes: Arc, l1_handler_paid_fee: Arc, - bonsai_contract: Arc>>, - bonsai_storage: Arc>>, - bonsai_class: Arc>>, + bonsai_contract: BonsaiStorageAccess, + bonsai_storage: BonsaiStorageAccess, + bonsai_class: BonsaiStorageAccess, } // Singleton backing instance for `DeoxysBackend` @@ -152,15 +256,16 @@ impl DeoxysBackend { database: &DatabaseSource, db_config_dir: &Path, cache_more_things: bool, - ) -> Result<&'static Arc, String> { + ) -> Result<&'static Arc> { BACKEND_SINGLETON .set(Arc::new(Self::init(database, db_config_dir, cache_more_things).unwrap())) - .map_err(|_| "Backend already initialized")?; + .ok() + .context("Backend already initialized")?; Ok(BACKEND_SINGLETON.get().unwrap()) } - fn init(database: &DatabaseSource, db_config_dir: &Path, cache_more_things: bool) -> Result { + fn init(database: &DatabaseSource, db_config_dir: &Path, cache_more_things: bool) -> Result { Self::new( &DatabaseSettings { source: match database { @@ -175,33 +280,47 @@ impl DeoxysBackend { paritydb_path: starknet_database_dir(db_config_dir, "paritydb"), cache_size: 0, }, - _ => return Err("Supported db sources: `rocksdb` | `paritydb` | `auto`".to_string()), + _ => bail!("Supported db sources: `rocksdb` | `paritydb` | `auto`"), }, }, cache_more_things, ) } - fn new(config: &DatabaseSettings, cache_more_things: bool) -> Result { - let db = db_opening_utils::open_database(config)?; - let kvdb: Arc = db.0; - let spdb: Arc> = db.1; - - let contract = BonsaiDb { db: kvdb.clone(), current_column: TrieColumn::Contract }; - let contract_storage = BonsaiDb { db: kvdb.clone(), current_column: TrieColumn::ContractStorage }; - let class = BonsaiDb { db: kvdb.clone(), current_column: TrieColumn::Class }; - let config = BonsaiConfigs::new(contract, contract_storage, class); + fn new(config: &DatabaseSettings, cache_more_things: bool) -> Result { + let db = Arc::new(open_database(config)?); Ok(Self { - mapping: Arc::new(MappingDb::new(spdb.clone(), cache_more_things)), - meta: Arc::new(MetaDb { db: spdb.clone() }), - da: Arc::new(DaDb { db: spdb.clone() }), - messaging: Arc::new(MessagingDb { db: spdb.clone() }), - sierra_classes: Arc::new(SierraClassesDb { db: spdb.clone() }), - l1_handler_paid_fee: Arc::new(L1HandlerTxFeeDb { db: spdb.clone() }), - bonsai_contract: Arc::new(Mutex::new(config.contract)), - bonsai_storage: Arc::new(Mutex::new(config.contract_storage)), - bonsai_class: Arc::new(Mutex::new(config.class)), + mapping: Arc::new(MappingDb::new(Arc::clone(&db), cache_more_things)), + meta: Arc::new(MetaDb::new(Arc::clone(&db))), + da: Arc::new(DaDb::new(Arc::clone(&db))), + messaging: Arc::new(MessagingDb::new(Arc::clone(&db))), + sierra_classes: Arc::new(SierraClassesDb::new(Arc::clone(&db))), + l1_handler_paid_fee: Arc::new(L1HandlerTxFeeDb::new(Arc::clone(&db))), + bonsai_contract: BonsaiStorageAccess::new( + Arc::clone(&db), + DatabaseKeyMapping { + flat: Column::BonsaiContractsFlat, + trie: Column::BonsaiContractsTrie, + trie_log: Column::BonsaiContractsLog, + }, + ), + bonsai_storage: BonsaiStorageAccess::new( + Arc::clone(&db), + DatabaseKeyMapping { + flat: Column::BonsaiContractsStorageFlat, + trie: Column::BonsaiContractsStorageTrie, + trie_log: Column::BonsaiContractsStorageLog, + }, + ), + bonsai_class: BonsaiStorageAccess::new( + Arc::clone(&db), + DatabaseKeyMapping { + flat: Column::BonsaiClassesFlat, + trie: Column::BonsaiClassesTrie, + trie_log: Column::BonsaiClassesLog, + }, + ), }) } @@ -230,15 +349,15 @@ impl DeoxysBackend { BACKEND_SINGLETON.get().map(|backend| &backend.sierra_classes).expect("Backend not initialized") } - pub fn bonsai_contract() -> &'static Arc>> { + pub fn bonsai_contract() -> &'static BonsaiStorageAccess { BACKEND_SINGLETON.get().map(|backend| &backend.bonsai_contract).expect("Backend not initialized") } - pub fn bonsai_storage() -> &'static Arc>> { + pub fn bonsai_storage() -> &'static BonsaiStorageAccess { BACKEND_SINGLETON.get().map(|backend| &backend.bonsai_storage).expect("Backend not initialized") } - pub fn bonsai_class() -> &'static Arc>> { + pub fn bonsai_class() -> &'static BonsaiStorageAccess { BACKEND_SINGLETON.get().map(|backend| &backend.bonsai_class).expect("Backend not initialized") } diff --git a/crates/client/db/src/mapping_db.rs b/crates/client/db/src/mapping_db.rs index 0f304baee..9979dbee0 100644 --- a/crates/client/db/src/mapping_db.rs +++ b/crates/client/db/src/mapping_db.rs @@ -1,13 +1,13 @@ -use std::sync::{Arc, Mutex}; +use std::sync::Arc; use deoxys_runtime::opaque::{DBlockT, DHashT}; // Substrate use parity_scale_codec::{Decode, Encode}; -use sp_database::Database; +use rocksdb::WriteBatchWithTransaction; use sp_runtime::traits::Block as BlockT; use starknet_api::hash::StarkHash; -use crate::{DbError, DbHash}; +use crate::{Column, DatabaseExt, DbError, DB}; /// The mapping to write in db #[derive(Debug)] @@ -20,21 +20,22 @@ pub struct MappingCommitment { /// Allow interaction with the mapping db pub struct MappingDb { - db: Arc>, - write_lock: Arc>, + db: Arc, /// Whether more information should be cached in the database. cache_more_things: bool, } impl MappingDb { /// Creates a new instance of the mapping database. - pub fn new(db: Arc>, cache_more_things: bool) -> Self { - Self { db, write_lock: Arc::new(Mutex::new(())), cache_more_things } + pub(crate) fn new(db: Arc, cache_more_things: bool) -> Self { + Self { db, cache_more_things } } /// Check if the given block hash has already been processed pub fn is_synced(&self, block_hash: &DHashT) -> Result { - match self.db.get(crate::columns::SYNCED_MAPPING, &block_hash.encode()) { + let synced_mapping_col = self.db.get_column(Column::SyncedMapping); + + match self.db.get_cf(&synced_mapping_col, block_hash.encode())? { Some(raw) => Ok(bool::decode(&mut &raw[..])?), None => Ok(false), } @@ -45,7 +46,9 @@ impl MappingDb { /// Under some circumstances it can return multiples blocks hashes, meaning that the result has /// to be checked against the actual blockchain state in order to find the good one. pub fn block_hash(&self, starknet_block_hash: StarkHash) -> Result>, DbError> { - match self.db.get(crate::columns::BLOCK_MAPPING, &starknet_block_hash.encode()) { + let block_mapping_col = self.db.get_column(Column::BlockMapping); + + match self.db.get_cf(&block_mapping_col, starknet_block_hash.encode())? { Some(raw) => Ok(Some(Vec::::decode(&mut &raw[..])?)), None => Ok(None), } @@ -53,22 +56,21 @@ impl MappingDb { /// Register that a Substrate block has been seen, without it containing a Starknet one pub fn write_none(&self, block_hash: DHashT) -> Result<(), DbError> { - let _lock = self.write_lock.lock(); - - let mut transaction = sp_database::Transaction::new(); - - transaction.set(crate::columns::SYNCED_MAPPING, &block_hash.encode(), &true.encode()); - - self.db.commit(transaction)?; + let synced_mapping_col = self.db.get_column(Column::SyncedMapping); + self.db.put_cf(&synced_mapping_col, block_hash.encode(), true.encode())?; Ok(()) } /// Register that a Substate block has been seen and map it to the Statknet block it contains pub fn write_hashes(&self, commitment: MappingCommitment) -> Result<(), DbError> { - let _lock = self.write_lock.lock(); + let synced_mapping_col = self.db.get_column(Column::SyncedMapping); + let block_mapping_col = self.db.get_column(Column::BlockMapping); + let transaction_mapping_col = self.db.get_column(Column::TransactionMapping); + let starknet_tx_hashes_col = self.db.get_column(Column::StarknetTransactionHashesCache); + let starknet_block_hashes_col = self.db.get_column(Column::StarknetBlockHashesCache); - let mut transaction = sp_database::Transaction::new(); + let mut transaction: WriteBatchWithTransaction = Default::default(); let substrate_hashes = match self.block_hash(commitment.starknet_block_hash) { Ok(Some(mut data)) => { @@ -84,37 +86,29 @@ impl MappingDb { _ => vec![commitment.block_hash], }; - transaction.set( - crate::columns::BLOCK_MAPPING, - &commitment.starknet_block_hash.encode(), - &substrate_hashes.encode(), - ); + transaction.put_cf(&block_mapping_col, &commitment.starknet_block_hash.encode(), &substrate_hashes.encode()); - transaction.set(crate::columns::SYNCED_MAPPING, &commitment.block_hash.encode(), &true.encode()); + transaction.put_cf(&synced_mapping_col, &commitment.block_hash.encode(), &true.encode()); for transaction_hash in commitment.starknet_transaction_hashes.iter() { - transaction.set( - crate::columns::TRANSACTION_MAPPING, - &transaction_hash.encode(), - &commitment.block_hash.encode(), - ); + transaction.put_cf(&transaction_mapping_col, &transaction_hash.encode(), &commitment.block_hash.encode()); } if self.cache_more_things { - transaction.set( - crate::columns::STARKNET_TRANSACTION_HASHES_CACHE, + transaction.put_cf( + &starknet_tx_hashes_col, &commitment.starknet_block_hash.encode(), &commitment.starknet_transaction_hashes.encode(), ); - transaction.set( - crate::columns::STARKNET_BLOCK_HASHES_CACHE, + transaction.put_cf( + &starknet_block_hashes_col, &commitment.block_number.encode(), &commitment.starknet_block_hash.encode(), ); } - self.db.commit(transaction)?; + self.db.write(transaction)?; Ok(()) } @@ -128,7 +122,9 @@ impl MappingDb { /// native type of substrate, and we are sure it's SCALE encoding is optimized and will not /// change. pub fn block_hash_from_transaction_hash(&self, transaction_hash: StarkHash) -> Result, DbError> { - match self.db.get(crate::columns::TRANSACTION_MAPPING, &transaction_hash.encode()) { + let transaction_mapping_col = self.db.get_column(Column::TransactionMapping); + + match self.db.get_cf(&transaction_mapping_col, transaction_hash.encode())? { Some(raw) => Ok(Some(::decode(&mut &raw[..])?)), None => Ok(None), } @@ -152,12 +148,14 @@ impl MappingDb { &self, starknet_block_hash: StarkHash, ) -> Result>, DbError> { + let starknet_tx_hashes_col = self.db.get_column(Column::StarknetTransactionHashesCache); + if !self.cache_more_things { // The cache is not enabled, no need to even touch the database. return Ok(None); } - match self.db.get(crate::columns::STARKNET_TRANSACTION_HASHES_CACHE, &starknet_block_hash.encode()) { + match self.db.get_cf(&starknet_tx_hashes_col, starknet_block_hash.encode())? { Some(raw) => Ok(Some(Vec::::decode(&mut &raw[..])?)), None => Ok(None), } @@ -181,12 +179,14 @@ impl MappingDb { &self, starknet_block_number: u64, ) -> Result, DbError> { + let starknet_block_hashes_col = self.db.get_column(Column::StarknetBlockHashesCache); + if !self.cache_more_things { // The cache is not enabled, no need to even touch the database. return Ok(None); } - match self.db.get(crate::columns::STARKNET_BLOCK_HASHES_CACHE, &starknet_block_number.encode()) { + match self.db.get_cf(&starknet_block_hashes_col, starknet_block_number.encode())? { Some(raw) => Ok(Some(::decode(&mut &raw[..])?)), None => Ok(None), } diff --git a/crates/client/db/src/messaging_db.rs b/crates/client/db/src/messaging_db.rs index 61bf7fa8a..a0fc1485b 100644 --- a/crates/client/db/src/messaging_db.rs +++ b/crates/client/db/src/messaging_db.rs @@ -2,13 +2,12 @@ use std::sync::Arc; // Substrate use parity_scale_codec::{Decode, Encode}; -use sp_database::Database; use crate::error::DbError; -use crate::DbHash; +use crate::{Column, DatabaseExt, DB}; pub struct MessagingDb { - pub(crate) db: Arc>, + pub(crate) db: Arc, } #[derive(Encode, Decode)] @@ -24,8 +23,14 @@ impl LastSyncedEventBlock { } impl MessagingDb { + pub(crate) fn new(db: Arc) -> Self { + Self { db } + } + pub fn last_synced_l1_block_with_event(&self) -> Result { - match self.db.get(crate::columns::MESSAGING, crate::static_keys::LAST_SYNCED_L1_EVENT_BLOCK) { + let column = self.db.get_column(Column::Messaging); + + match self.db.get_cf(&column, crate::static_keys::LAST_SYNCED_L1_EVENT_BLOCK)? { Some(raw) => Ok(LastSyncedEventBlock::decode(&mut &raw[..])?), None => Ok(LastSyncedEventBlock::new(0, 0)), } @@ -35,16 +40,9 @@ impl MessagingDb { &self, last_synced_event_block: &LastSyncedEventBlock, ) -> Result<(), DbError> { - let mut transaction = sp_database::Transaction::new(); - - transaction.set( - crate::columns::MESSAGING, - crate::static_keys::LAST_SYNCED_L1_EVENT_BLOCK, - &last_synced_event_block.encode(), - ); - - self.db.commit(transaction)?; + let column = self.db.get_column(Column::Messaging); + self.db.put_cf(&column, crate::static_keys::LAST_SYNCED_L1_EVENT_BLOCK, last_synced_event_block.encode())?; Ok(()) } } diff --git a/crates/client/db/src/meta_db.rs b/crates/client/db/src/meta_db.rs index ece48a011..c2d3f6bf3 100644 --- a/crates/client/db/src/meta_db.rs +++ b/crates/client/db/src/meta_db.rs @@ -3,22 +3,27 @@ use std::sync::Arc; use deoxys_runtime::opaque::DHashT; // Substrate use parity_scale_codec::{Decode, Encode}; -use sp_database::Database; -use crate::{DbError, DbHash}; +use crate::{Column, DatabaseExt, DbError, DB}; /// Allow interaction with the meta db /// /// The meta db store the tips of the synced chain. /// In case of forks, there can be multiple tips. pub struct MetaDb { - pub(crate) db: Arc>, + pub(crate) db: Arc, } impl MetaDb { + pub(crate) fn new(db: Arc) -> Self { + Self { db } + } + /// Retrieve the current tips of the synced chain pub fn current_syncing_tips(&self) -> Result, DbError> { - match self.db.get(crate::columns::META, crate::static_keys::CURRENT_SYNCING_TIPS) { + let column = self.db.get_column(Column::Meta); + + match self.db.get_cf(&column, crate::static_keys::CURRENT_SYNCING_TIPS)? { Some(raw) => Ok(Vec::::decode(&mut &raw[..])?), None => Ok(Vec::new()), } @@ -26,12 +31,9 @@ impl MetaDb { /// Store the current tips of the synced chain pub fn write_current_syncing_tips(&self, tips: Vec) -> Result<(), DbError> { - let mut transaction = sp_database::Transaction::new(); - - transaction.set(crate::columns::META, crate::static_keys::CURRENT_SYNCING_TIPS, &tips.encode()); - - self.db.commit(transaction)?; + let column = self.db.get_column(Column::Meta); + self.db.put_cf(&column, crate::static_keys::CURRENT_SYNCING_TIPS, tips.encode())?; Ok(()) } } diff --git a/crates/client/db/src/sierra_classes_db.rs b/crates/client/db/src/sierra_classes_db.rs index b88138085..8cf09967c 100644 --- a/crates/client/db/src/sierra_classes_db.rs +++ b/crates/client/db/src/sierra_classes_db.rs @@ -1,32 +1,34 @@ use std::sync::Arc; use parity_scale_codec::{Decode, Encode}; -use sp_database::Database; use starknet_api::api_core::ClassHash; use starknet_api::state::ContractClass; -use crate::{DbError, DbHash}; +use crate::{Column, DatabaseExt, DbError, DB}; /// Allow interaction with the sierra classes db pub struct SierraClassesDb { - pub(crate) db: Arc>, + db: Arc, } impl SierraClassesDb { - pub fn store_sierra_class(&self, class_hash: ClassHash, class: ContractClass) -> Result<(), DbError> { - let mut transaction = sp_database::Transaction::new(); - - transaction.set(crate::columns::SIERRA_CONTRACT_CLASSES, &class_hash.encode(), &class.encode()); + pub(crate) fn new(db: Arc) -> Self { + Self { db } + } - self.db.commit(transaction)?; + pub fn store_sierra_class(&self, class_hash: ClassHash, class: ContractClass) -> Result<(), DbError> { + let column = self.db.get_column(Column::SierraContractClasses); + self.db.put_cf(&column, class_hash.encode(), class.encode())?; Ok(()) } pub fn get_sierra_class(&self, class_hash: ClassHash) -> Result, DbError> { + let column = self.db.get_column(Column::SierraContractClasses); + let opt_contract_class = self .db - .get(crate::columns::SIERRA_CONTRACT_CLASSES, &class_hash.encode()) + .get_cf(&column, class_hash.encode())? .map(|raw| ContractClass::decode(&mut &raw[..])) .transpose()?; diff --git a/crates/client/sync/src/commitments/lib.rs b/crates/client/sync/src/commitments/lib.rs index 007a06b28..782d5183c 100644 --- a/crates/client/sync/src/commitments/lib.rs +++ b/crates/client/sync/src/commitments/lib.rs @@ -1,4 +1,4 @@ -use std::sync::{Arc, Mutex}; +use std::sync::Arc; use bitvec::order::Msb0; use bitvec::vec::BitVec; @@ -167,32 +167,26 @@ where pub fn update_state_root( csd: CommitmentStateDiff, overrides: Arc, OpaqueExtrinsic>>>, - bonsai_contract: Arc>>, - bonsai_contract_storage: Arc>>, - bonsai_class: Arc>>, + bonsai_contract: &mut BonsaiStorage, + bonsai_contract_storage: &mut BonsaiStorage, + bonsai_class: &mut BonsaiStorage, block_number: u64, substrate_block_hash: Option, ) -> Felt252Wrapper { // Update contract and its storage tries let (contract_trie_root, class_trie_root) = rayon::join( || { - let mut bonsai_contract = bonsai_contract.lock().unwrap(); - let mut bonsai_contract_storage = bonsai_contract_storage.lock().unwrap(); - contract_trie_root( &csd, overrides, - &mut bonsai_contract, - &mut bonsai_contract_storage, + bonsai_contract, + bonsai_contract_storage, block_number, substrate_block_hash, ) .expect("Failed to compute contract root") }, - || { - let mut bonsai_class = bonsai_class.lock().unwrap(); - class_trie_root(&csd, &mut bonsai_class, block_number).expect("Failed to compute class root") - }, + || class_trie_root(&csd, bonsai_class, block_number).expect("Failed to compute class root"), ); calculate_state_root::(contract_trie_root, class_trie_root) diff --git a/crates/client/sync/src/l2.rs b/crates/client/sync/src/l2.rs index 9e0401b66..5fc0c1ada 100644 --- a/crates/client/sync/src/l2.rs +++ b/crates/client/sync/src/l2.rs @@ -1,14 +1,12 @@ //! Contains the code required to sync data from the feeder efficiently. use std::pin::pin; use std::str::FromStr; -use std::sync::{Arc, Mutex, RwLock}; +use std::sync::{Arc, RwLock}; -use bonsai_trie::id::BasicId; -use bonsai_trie::BonsaiStorage; use deoxys_runtime::opaque::{DBlockT, DHashT}; use futures::prelude::*; use lazy_static::lazy_static; -use mc_db::bonsai_db::BonsaiDb; +use mc_db::DeoxysBackend; use mc_storage::OverrideHandle; use mp_block::state_update::StateUpdateWrapper; use mp_block::DeoxysBlock; @@ -25,7 +23,6 @@ use starknet_core::types::{PendingStateUpdate, StarknetError}; use starknet_ff::FieldElement; use starknet_providers::sequencer::models::{BlockId, StateUpdate}; use starknet_providers::{ProviderError, SequencerGatewayProvider}; -use starknet_types_core::hash::{Pedersen, Poseidon}; use thiserror::Error; use tokio::sync::mpsc; use tokio::sync::mpsc::Sender; @@ -141,15 +138,8 @@ pub struct SenderConfig { } /// Spawns workers to fetch blocks and state updates from the feeder. -pub async fn sync( - mut sender_config: SenderConfig, - fetch_config: FetchConfig, - first_block: u64, - bonsai_contract: &Arc>>, - bonsai_contract_storage: &Arc>>, - bonsai_class: &Arc>>, - client: Arc, -) where +pub async fn sync(mut sender_config: SenderConfig, fetch_config: FetchConfig, first_block: u64, client: Arc) +where C: HeaderBackend + 'static, { let SenderConfig { block_sender, state_update_sender, class_sender, command_sink, overrides } = &mut sender_config; @@ -165,8 +155,7 @@ pub async fn sync( if first_block == 1 { let state_update = provider.get_state_update(BlockId::Number(0)).await.expect("getting state update for genesis block"); - verify_l2(0, &state_update, overrides, bonsai_contract, bonsai_contract_storage, bonsai_class, None) - .expect("verifying genesis block"); + verify_l2(0, &state_update, overrides, None).expect("verifying genesis block"); } let fetch_stream = (first_block..).map(|block_n| { @@ -212,9 +201,7 @@ pub async fn sync( let (state_update, block_conv) = { let verify = fetch_config.verify; let overrides = Arc::clone(overrides); - let bonsai_contract = Arc::clone(bonsai_contract); - let bonsai_contract_storage = Arc::clone(bonsai_contract_storage); - let bonsai_class = Arc::clone(bonsai_class); + let state_update = Arc::new(state_update); let state_update_1 = Arc::clone(&state_update); @@ -227,7 +214,7 @@ pub async fn sync( }; let ver_l2 = || { let start = std::time::Instant::now(); - verify_l2(block_n, &state_update, &overrides, &bonsai_contract, &bonsai_contract_storage, &bonsai_class, block_hash) + verify_l2(block_n, &state_update, &overrides,block_hash) .expect("verifying block"); log::debug!("verify_l2: {:?}", std::time::Instant::now() - start); }; @@ -317,26 +304,27 @@ pub fn verify_l2( block_number: u64, state_update: &StateUpdate, overrides: &Arc, OpaqueExtrinsic>>>, - bonsai_contract: &Arc>>, - bonsai_contract_storage: &Arc>>, - bonsai_class: &Arc>>, substrate_block_hash: Option, ) -> Result<(), L2SyncError> { let state_update_wrapper = StateUpdateWrapper::from(state_update); + let mut bonsai_contract = DeoxysBackend::bonsai_contract().writable(); + let mut bonsai_contract_storage = DeoxysBackend::bonsai_storage().writable(); + let mut bonsai_class = DeoxysBackend::bonsai_class().writable(); + let csd = build_commitment_state_diff(state_update_wrapper.clone()); let state_root = update_state_root( csd, Arc::clone(overrides), - Arc::clone(bonsai_contract), - Arc::clone(bonsai_contract_storage), - Arc::clone(bonsai_class), + &mut bonsai_contract, + &mut bonsai_contract_storage, + &mut bonsai_class, block_number, substrate_block_hash, ); log::debug!("state_root: {state_root:?}"); let block_hash = state_update.block_hash.expect("Block hash not found in state update"); - log::info!("update_state_root {} -- block_hash: {block_hash:?}, state_root: {state_root:?}", block_number); + log::debug!("update_state_root {} -- block_hash: {block_hash:?}, state_root: {state_root:?}", block_number); update_l2(L2StateUpdate { block_number, diff --git a/crates/client/sync/src/lib.rs b/crates/client/sync/src/lib.rs index 614780608..170421008 100644 --- a/crates/client/sync/src/lib.rs +++ b/crates/client/sync/src/lib.rs @@ -22,7 +22,6 @@ type CommandSink = futures::channel::mpsc::Sender