diff --git a/full-node/db/sov-db/src/ledger_db/mod.rs b/full-node/db/sov-db/src/ledger_db/mod.rs index 0b81b83a1..012c97a95 100644 --- a/full-node/db/sov-db/src/ledger_db/mod.rs +++ b/full-node/db/sov-db/src/ledger_db/mod.rs @@ -8,7 +8,7 @@ use sov_rollup_interface::{ services::da::SlotData, stf::{BatchReceipt, Event}, }; -use sov_schema_db::{interface::SeekKeyEncoder, Schema, SchemaBatch, DB}; +use sov_schema_db::{Schema, SchemaBatch, SeekKeyEncoder, DB}; use crate::{ rocks_db_config::gen_rocksdb_options, diff --git a/full-node/db/sov-db/src/lib.rs b/full-node/db/sov-db/src/lib.rs index 4b161d687..feab76f05 100644 --- a/full-node/db/sov-db/src/lib.rs +++ b/full-node/db/sov-db/src/lib.rs @@ -1,3 +1,5 @@ +#![forbid(unsafe_code)] + use state_db::StateDB; pub mod ledger_db; diff --git a/full-node/db/sov-db/src/schema/tables.rs b/full-node/db/sov-db/src/schema/tables.rs index 46bb40799..98d1e3865 100644 --- a/full-node/db/sov-db/src/schema/tables.rs +++ b/full-node/db/sov-db/src/schema/tables.rs @@ -28,8 +28,8 @@ use jmt::{ Version, }; use sov_rollup_interface::stf::{Event, EventKey}; -use sov_schema_db::interface::{KeyDecoder, KeyEncoder, SeekKeyEncoder, ValueCodec}; -use sov_schema_db::CodecError; +use sov_schema_db::schema::{KeyDecoder, KeyEncoder, ValueCodec}; +use sov_schema_db::{CodecError, SeekKeyEncoder}; pub const STATE_TABLES: &[&str] = &[ KeyHashToKey::table_name(), @@ -48,7 +48,7 @@ pub const LEDGER_TABLES: &[&str] = &[ EventByNumber::table_name(), ]; -/// Macro to define a table that implements [`sov_schema_db::interface::Schema`]. +/// Macro to define a table that implements [`sov_schema_db::Schema`]. /// KeyCodec and ValueCodec must be implemented separately. /// /// ```ignore @@ -75,7 +75,7 @@ macro_rules! define_table_without_codec { #[derive(Clone, Copy, Debug, Default)] pub(crate) struct $table_name; - impl ::sov_schema_db::interface::Schema for $table_name { + impl ::sov_schema_db::schema::Schema for $table_name { const COLUMN_FAMILY_NAME: &'static str = $table_name::table_name(); type Key = $key; type Value = $value; @@ -98,7 +98,7 @@ macro_rules! define_table_without_codec { macro_rules! impl_borsh_value_codec { ($table_name:ident, $value:ty) => { - impl ::sov_schema_db::interface::ValueCodec<$table_name> for $value { + impl ::sov_schema_db::schema::ValueCodec<$table_name> for $value { fn encode_value( &self, ) -> ::std::result::Result< @@ -117,7 +117,7 @@ macro_rules! impl_borsh_value_codec { }; } -/// Macro to define a table that implements [`sov_schema_db::interface::Schema`]. +/// Macro to define a table that implements [`sov_schema_db::schema::Schema`]. /// Automatically generates KeyCodec<...> and ValueCodec<...> implementations /// using the Encode and Decode traits from sov_rollup_interface /// @@ -131,13 +131,13 @@ macro_rules! define_table_with_default_codec { ($(#[$docs:meta])+ ($table_name:ident) $key:ty => $value:ty) => { define_table_without_codec!($(#[$docs])+ ( $table_name ) $key => $value); - impl ::sov_schema_db::interface::KeyEncoder<$table_name> for $key { + impl ::sov_schema_db::schema::KeyEncoder<$table_name> for $key { fn encode_key(&self) -> ::std::result::Result<::sov_rollup_interface::maybestd::vec::Vec, ::sov_schema_db::CodecError> { ::borsh::BorshSerialize::try_to_vec(self).map_err(Into::into) } } - impl ::sov_schema_db::interface::KeyDecoder<$table_name> for $key { + impl ::sov_schema_db::schema::KeyDecoder<$table_name> for $key { fn decode_key(data: &[u8]) -> ::std::result::Result { ::borsh::BorshDeserialize::deserialize_reader(&mut &data[..]).map_err(Into::into) } @@ -156,7 +156,7 @@ macro_rules! define_table_with_seek_key_codec { ($(#[$docs:meta])+ ($table_name:ident) $key:ty => $value:ty) => { define_table_without_codec!($(#[$docs])+ ( $table_name ) $key => $value); - impl ::sov_schema_db::interface::KeyEncoder<$table_name> for $key { + impl ::sov_schema_db::schema::KeyEncoder<$table_name> for $key { fn encode_key(&self) -> ::std::result::Result<::sov_rollup_interface::maybestd::vec::Vec, ::sov_schema_db::CodecError> { use ::anyhow::Context; use ::bincode::Options; @@ -169,7 +169,7 @@ macro_rules! define_table_with_seek_key_codec { } } - impl ::sov_schema_db::interface::KeyDecoder<$table_name> for $key { + impl ::sov_schema_db::schema::KeyDecoder<$table_name> for $key { fn decode_key(data: &[u8]) -> ::std::result::Result { use ::anyhow::Context; use ::bincode::Options; @@ -182,9 +182,9 @@ macro_rules! define_table_with_seek_key_codec { } } - impl ::sov_schema_db::interface::SeekKeyEncoder<$table_name> for $key { + impl ::sov_schema_db::SeekKeyEncoder<$table_name> for $key { fn encode_seek_key(&self) -> ::std::result::Result<::sov_rollup_interface::maybestd::vec::Vec, ::sov_schema_db::CodecError> { - >::encode_key(self) + >::encode_key(self) } } @@ -239,22 +239,22 @@ define_table_without_codec!( ); impl KeyEncoder for NodeKey { - fn encode_key(&self) -> sov_schema_db::interface::Result> { + fn encode_key(&self) -> sov_schema_db::schema::Result> { self.try_to_vec().map_err(CodecError::from) } } impl KeyDecoder for NodeKey { - fn decode_key(data: &[u8]) -> sov_schema_db::interface::Result { + fn decode_key(data: &[u8]) -> sov_schema_db::schema::Result { Ok(Self::deserialize_reader(&mut &data[..])?) } } impl ValueCodec for Node { - fn encode_value(&self) -> sov_schema_db::interface::Result> { + fn encode_value(&self) -> sov_schema_db::schema::Result> { self.try_to_vec().map_err(CodecError::from) } - fn decode_value(data: &[u8]) -> sov_schema_db::interface::Result { + fn decode_value(data: &[u8]) -> sov_schema_db::schema::Result { Ok(Self::deserialize_reader(&mut &data[..])?) } } @@ -265,7 +265,7 @@ define_table_without_codec!( ); impl + PartialEq + core::fmt::Debug> KeyEncoder for (T, Version) { - fn encode_key(&self) -> sov_schema_db::interface::Result> { + fn encode_key(&self) -> sov_schema_db::schema::Result> { let mut out = Vec::with_capacity(self.0.as_ref().len() + std::mem::size_of::() + 8); self.0 @@ -280,13 +280,13 @@ impl + PartialEq + core::fmt::Debug> KeyEncoder for (T } impl + PartialEq + core::fmt::Debug> SeekKeyEncoder for (T, Version) { - fn encode_seek_key(&self) -> sov_schema_db::interface::Result> { + fn encode_seek_key(&self) -> sov_schema_db::schema::Result> { self.encode_key() } } impl KeyDecoder for (StateKey, Version) { - fn decode_key(data: &[u8]) -> sov_schema_db::interface::Result { + fn decode_key(data: &[u8]) -> sov_schema_db::schema::Result { let mut cursor = maybestd::io::Cursor::new(data); let key = Vec::::deserialize_reader(&mut cursor)?; let version = cursor.read_u64::()?; @@ -295,11 +295,11 @@ impl KeyDecoder for (StateKey, Version) { } impl ValueCodec for JmtValue { - fn encode_value(&self) -> sov_schema_db::interface::Result> { + fn encode_value(&self) -> sov_schema_db::schema::Result> { self.try_to_vec().map_err(CodecError::from) } - fn decode_value(data: &[u8]) -> sov_schema_db::interface::Result { + fn decode_value(data: &[u8]) -> sov_schema_db::schema::Result { Ok(Self::deserialize_reader(&mut &data[..])?) } } diff --git a/full-node/db/sov-schema-db/Cargo.toml b/full-node/db/sov-schema-db/Cargo.toml index f27e51562..bd9a5745c 100644 --- a/full-node/db/sov-schema-db/Cargo.toml +++ b/full-node/db/sov-schema-db/Cargo.toml @@ -1,10 +1,10 @@ [package] name = "sov-schema-db" description = "A low level interface transforming RocksDB into a type-oriented data store" -version = { workspace = true } license = "Apache-2.0" # This license is inherited from Aptos # Workspace inherited keys +version = { workspace = true } authors = { workspace = true } edition = { workspace = true } homepage = { workspace = true } diff --git a/full-node/db/sov-schema-db/src/iterator.rs b/full-node/db/sov-schema-db/src/iterator.rs index 1481d39de..3bb8abc8c 100644 --- a/full-node/db/sov-schema-db/src/iterator.rs +++ b/full-node/db/sov-schema-db/src/iterator.rs @@ -1,16 +1,39 @@ use anyhow::Result; +use std::iter::FusedIterator; use std::marker::PhantomData; -use crate::interface::{KeyDecoder, Schema, SeekKeyEncoder, ValueCodec}; use crate::metrics::{SCHEMADB_ITER_BYTES, SCHEMADB_ITER_LATENCY_SECONDS}; +use crate::schema::{KeyDecoder, Schema, ValueCodec}; + +/// This defines a type that can be used to seek a [`SchemaIterator`], via +/// interfaces like [`SchemaIterator::seek`]. Mind you, not all +/// [`KeyEncoder`](crate::schema::KeyEncoder)s shall be [`SeekKeyEncoder`]s, and +/// vice versa. E.g.: +/// +/// - Some key types don't use an encoding that results in sensible +/// seeking behavior under lexicographic ordering (what RocksDB uses by +/// default), which means you shouldn't implement [`SeekKeyEncoder`] at all. +/// - Other key types might maintain full lexicographic order, which means the +/// original key type can also be [`SeekKeyEncoder`]. +/// - Other key types may be composite, and the first field alone may be +/// a good candidate for [`SeekKeyEncoder`]. +pub trait SeekKeyEncoder: Sized { + /// Converts `self` to bytes which is used to seek the underlying raw + /// iterator. + /// + /// If `self` is also a [`KeyEncoder`](crate::schema::KeyEncoder), then + /// [`SeekKeyEncoder::encode_seek_key`] MUST return the same bytes as + /// [`KeyEncoder::encode_key`](crate::schema::KeyEncoder::encode_key). + fn encode_seek_key(&self) -> crate::schema::Result>; +} -pub enum ScanDirection { +pub(crate) enum ScanDirection { Forward, Backward, } /// DB Iterator parameterized on [`Schema`] that seeks with [`Schema::Key`] and yields -/// [`Schema::Key`] and [`Schema::Value`] +/// [`Schema::Key`] and [`Schema::Value`] pairs. pub struct SchemaIterator<'a, S> { db_iter: rocksdb::DBRawIterator<'a>, direction: ScanDirection, @@ -95,3 +118,5 @@ where self.next_impl().transpose() } } + +impl<'a, S> FusedIterator for SchemaIterator<'a, S> where S: Schema {} diff --git a/full-node/db/sov-schema-db/src/lib.rs b/full-node/db/sov-schema-db/src/lib.rs index 62d265480..dbbb4575b 100644 --- a/full-node/db/sov-schema-db/src/lib.rs +++ b/full-node/db/sov-schema-db/src/lib.rs @@ -2,6 +2,7 @@ // Adapted from aptos-core/schemadb #![forbid(unsafe_code)] +#![deny(missing_docs)] //! This library implements a schematized DB on top of [RocksDB](https://rocksdb.org/). It makes //! sure all data passed in and out are structured according to predefined schemas and prevents @@ -13,12 +14,12 @@ //! [`define_schema!`] macro to define the schema name, the types of key and value, and name of the //! column family. -pub mod interface; -pub mod iterator; +mod iterator; mod metrics; +pub mod schema; use anyhow::{format_err, Result}; -use iterator::{ScanDirection, SchemaIterator}; +use iterator::ScanDirection; use metrics::{ SCHEMADB_BATCH_COMMIT_BYTES, SCHEMADB_BATCH_COMMIT_LATENCY_SECONDS, SCHEMADB_BATCH_PUT_LATENCY_SECONDS, SCHEMADB_DELETES, SCHEMADB_GET_BYTES, @@ -29,67 +30,11 @@ use std::{collections::HashMap, path::Path, sync::Mutex}; use thiserror::Error; use tracing::info; -pub use crate::interface::Schema; -use crate::interface::{ColumnFamilyName, KeyCodec, ValueCodec}; -pub use rocksdb::DEFAULT_COLUMN_FAMILY_NAME; - -#[derive(Debug)] -enum WriteOp { - Value { key: Vec, value: Vec }, - Deletion { key: Vec }, -} - -/// `SchemaBatch` holds a collection of updates that can be applied to a DB atomically. The updates -/// will be applied in the order in which they are added to the `SchemaBatch`. -#[derive(Debug)] -pub struct SchemaBatch { - rows: Mutex>>, -} - -impl Default for SchemaBatch { - fn default() -> Self { - Self { - rows: Mutex::new(HashMap::new()), - } - } -} +use crate::schema::{ColumnFamilyName, KeyCodec, ValueCodec}; -impl SchemaBatch { - /// Creates an empty batch. - pub fn new() -> Self { - Self::default() - } - - /// Adds an insert/update operation to the batch. - pub fn put(&self, key: &impl KeyCodec, value: &impl ValueCodec) -> Result<()> { - let _timer = SCHEMADB_BATCH_PUT_LATENCY_SECONDS - .with_label_values(&["unknown"]) - .start_timer(); - let key = key.encode_key()?; - let value = value.encode_value()?; - self.rows - .lock() - .expect("Lock must not be poisoned") - .entry(S::COLUMN_FAMILY_NAME) - .or_insert_with(Vec::new) - .push(WriteOp::Value { key, value }); - - Ok(()) - } - - /// Adds a delete operation to the batch. - pub fn delete(&self, key: &impl KeyCodec) -> Result<()> { - let key = key.encode_key()?; - self.rows - .lock() - .expect("Lock must not be poisoned") - .entry(S::COLUMN_FAMILY_NAME) - .or_insert_with(Vec::new) - .push(WriteOp::Deletion { key }); - - Ok(()) - } -} +pub use crate::schema::Schema; +pub use iterator::{SchemaIterator, SeekKeyEncoder}; +pub use rocksdb::DEFAULT_COLUMN_FAMILY_NAME; /// This DB is a schematized RocksDB wrapper where all data passed in and out are typed according to /// [`Schema`]s. @@ -100,7 +45,7 @@ pub struct DB { } impl DB { - /// Opens a database backed by rocksdb, using the provided column family names and default + /// Opens a database backed by RocksDB, using the provided column family names and default /// column family options. pub fn open( path: impl AsRef, @@ -121,7 +66,7 @@ impl DB { Ok(db) } - /// Open RocksDB with the provided column family descriptors + /// Open RocksDB with the provided column family descriptors. pub fn open_cf( db_opts: &rocksdb::Options, path: impl AsRef, @@ -148,7 +93,7 @@ impl DB { /// Open db in secondary mode. A secondary db is does not support writes, but can be dynamically caught up /// to the primary instance by a manual call. See https://github.com/facebook/rocksdb/wiki/Read-only-and-Secondary-instances - /// for more details + /// for more details. pub fn open_cf_as_secondary>( opts: &rocksdb::Options, primary_path: P, @@ -215,6 +160,7 @@ impl DB { pub fn iter_with_opts(&self, opts: ReadOptions) -> Result> { self.iter_with_direction::(opts, ScanDirection::Forward) } + /// Returns a backward [`SchemaIterator`] on a certain schema with the default read options. pub fn rev_iter(&self) -> Result> { self.iter_with_direction::(Default::default(), ScanDirection::Backward) @@ -283,8 +229,8 @@ impl DB { Ok(self.inner.flush_cf(self.get_cf_handle(cf_name)?)?) } - /// Returns the current rocksdb property value for the provided column family name - /// and property name + /// Returns the current RocksDB property value for the provided column family name + /// and property name. pub fn get_property(&self, cf_name: &str, property_name: &str) -> Result { self.inner .property_int_value_cf(self.get_cf_handle(cf_name)?, property_name)? @@ -304,12 +250,71 @@ impl DB { } } +#[derive(Debug)] +enum WriteOp { + Value { key: Vec, value: Vec }, + Deletion { key: Vec }, +} + +/// [`SchemaBatch`] holds a collection of updates that can be applied to a DB +/// ([`Schema`]) atomically. The updates will be applied in the order in which +/// they are added to the [`SchemaBatch`]. +#[derive(Debug, Default)] +pub struct SchemaBatch { + rows: Mutex>>, +} + +impl SchemaBatch { + /// Creates an empty batch. + pub fn new() -> Self { + Self::default() + } + + /// Adds an insert/update operation to the batch. + pub fn put(&self, key: &impl KeyCodec, value: &impl ValueCodec) -> Result<()> { + let _timer = SCHEMADB_BATCH_PUT_LATENCY_SECONDS + .with_label_values(&["unknown"]) + .start_timer(); + let key = key.encode_key()?; + let value = value.encode_value()?; + self.rows + .lock() + .expect("Lock must not be poisoned") + .entry(S::COLUMN_FAMILY_NAME) + .or_insert_with(Vec::new) + .push(WriteOp::Value { key, value }); + + Ok(()) + } + + /// Adds a delete operation to the batch. + pub fn delete(&self, key: &impl KeyCodec) -> Result<()> { + let key = key.encode_key()?; + self.rows + .lock() + .expect("Lock must not be poisoned") + .entry(S::COLUMN_FAMILY_NAME) + .or_insert_with(Vec::new) + .push(WriteOp::Deletion { key }); + + Ok(()) + } +} + +/// An error that occurred during (de)serialization of a [`Schema`]'s keys or +/// values. #[derive(Error, Debug)] pub enum CodecError { + /// Unable to deserialize a key because it has a different length than + /// expected. #[error("Invalid key length. Expected {expected:}, got {got:}")] + #[allow(missing_docs)] // The fields' names are self-explanatory. InvalidKeyLength { expected: usize, got: usize }, + /// Some other error occurred when (de)serializing a key or value. Inspect + /// the inner [`anyhow::Error`] for more details. #[error(transparent)] Wrapped(#[from] anyhow::Error), + /// I/O error. #[error(transparent)] Io(#[from] std::io::Error), } diff --git a/full-node/db/sov-schema-db/src/interface/mod.rs b/full-node/db/sov-schema-db/src/schema.rs similarity index 50% rename from full-node/db/sov-schema-db/src/interface/mod.rs rename to full-node/db/sov-schema-db/src/schema.rs index b12bbfc82..4906d82dd 100644 --- a/full-node/db/sov-schema-db/src/interface/mod.rs +++ b/full-node/db/sov-schema-db/src/schema.rs @@ -2,17 +2,20 @@ // While most of the Sovereign SDK will be available under both // MIT and APACHE 2.0 licenses, this file is // licensed under APACHE 2.0 only. + +//! A type-safe interface over [`DB`](crate::DB) column families. + use std::fmt::Debug; use crate::CodecError; /// Crate users are expected to know [column /// family](https://github.com/EighteenZi/rocksdb_wiki/blob/master/Column-Families.md) -/// names beforehand, so they can be `static`. +/// names beforehand, so they can have `static` lifetimes. pub type ColumnFamilyName = &'static str; -/// This trait defines a schema: an association of a column family name, the key type and the value -/// type. +/// A [`Schema`] is a type-safe interface over a specific column family in a +/// [`DB`](crate::DB). It always a key type ([`KeyCodec`]) and a value type ([`ValueCodec`]). pub trait Schema: Debug + Send + Sync + 'static + Sized { /// The column family name associated with this struct. /// Note: all schemas within the same SchemaDB must have distinct column family names. @@ -25,20 +28,72 @@ pub trait Schema: Debug + Send + Sync + 'static + Sized { type Value: ValueCodec; } +/// A [`core::result::Result`] alias with [`CodecError`] as the error type. pub type Result = core::result::Result; /// This trait defines a type that can serve as a [`Schema::Key`]. +/// +/// [`KeyCodec`] is a marker trait with a blaket implementation for all types +/// that are both [`KeyEncoder`] and [`KeyDecoder`]. Having [`KeyEncoder`] and +/// [`KeyDecoder`] as two standalone traits on top of [`KeyCodec`] may seem +/// superflous, but it allows for zero-copy key encoding under specific +/// circumstances. E.g.: +/// +/// ```rust +/// use anyhow::Context; +/// +/// use sov_schema_db::define_schema; +/// use sov_schema_db::schema::{ +/// Schema, KeyEncoder, KeyDecoder, ValueCodec, Result, +/// }; +/// +/// define_schema!(PersonAgeByName, String, u32, "person_age_by_name"); +/// +/// impl KeyEncoder for String { +/// fn encode_key(&self) -> Result> { +/// Ok(self.as_bytes().to_vec()) +/// } +/// } +/// +/// /// What about encoding a `&str`, though? We'd have to copy it into a +/// /// `String` first, which is not ideal. But we can do better: +/// impl<'a> KeyEncoder for &'a str { +/// fn encode_key(&self) -> Result> { +/// Ok(self.as_bytes().to_vec()) +/// } +/// } +/// +/// impl KeyDecoder for String { +/// fn decode_key(data: &[u8]) -> Result { +/// Ok(String::from_utf8(data.to_vec()).context("Can't read key")?) +/// } +/// } +/// +/// impl ValueCodec for u32 { +/// fn encode_value(&self) -> Result> { +/// Ok(self.to_le_bytes().to_vec()) +/// } +/// +/// fn decode_value(data: &[u8]) -> Result { +/// let mut buf = [0u8; 4]; +/// buf.copy_from_slice(data); +/// Ok(u32::from_le_bytes(buf)) +/// } +/// } +/// ``` pub trait KeyCodec: KeyEncoder + KeyDecoder {} impl KeyCodec for T where T: KeyEncoder + KeyDecoder {} +/// Implementors of this trait can be used to encode keys in the given [`Schema`]. pub trait KeyEncoder: Sized + PartialEq + Debug { - /// Converts `self` to bytes to be stored in DB. + /// Converts `self` to bytes to be stored in RocksDB. fn encode_key(&self) -> Result>; } +/// Implementors of this trait can be used to decode keys in the given [`Schema`]. pub trait KeyDecoder: Sized + PartialEq + Debug { - /// Converts bytes fetched from DB to `Self`. + /// Converts bytes fetched from RocksDB to `Self`. fn decode_key(data: &[u8]) -> Result; } @@ -50,24 +105,57 @@ pub trait ValueCodec: Sized + PartialEq + Debug { fn decode_value(data: &[u8]) -> Result; } -/// This defines a type that can be used to seek a [`SchemaIterator`](crate::SchemaIterator), via -/// interfaces like [`seek`](crate::SchemaIterator::seek). -pub trait SeekKeyEncoder: Sized { - /// Converts `self` to bytes which is used to seek the underlying raw iterator. - fn encode_seek_key(&self) -> Result>; -} - +/// A utility macro to define [`Schema`] implementors. You must specify the +/// [`Schema`] implementor's name, the key type, the value type, and the column +/// family name. +/// +/// # Example +/// +/// ```rust +/// use anyhow::Context; +/// +/// use sov_schema_db::define_schema; +/// use sov_schema_db::schema::{ +/// Schema, KeyEncoder, KeyDecoder, ValueCodec, Result, +/// }; +/// +/// define_schema!(PersonAgeByName, String, u32, "person_age_by_name"); +/// +/// impl KeyEncoder for String { +/// fn encode_key(&self) -> Result> { +/// Ok(self.as_bytes().to_vec()) +/// } +/// } +/// +/// impl KeyDecoder for String { +/// fn decode_key(data: &[u8]) -> Result { +/// Ok(String::from_utf8(data.to_vec()).context("Can't read key")?) +/// } +/// } +/// +/// impl ValueCodec for u32 { +/// fn encode_value(&self) -> Result> { +/// Ok(self.to_le_bytes().to_vec()) +/// } +/// +/// fn decode_value(data: &[u8]) -> Result { +/// let mut buf = [0u8; 4]; +/// buf.copy_from_slice(data); +/// Ok(u32::from_le_bytes(buf)) +/// } +/// } +/// ``` #[macro_export] macro_rules! define_schema { ($schema_type:ident, $key_type:ty, $value_type:ty, $cf_name:expr) => { #[derive(Debug)] pub(crate) struct $schema_type; - impl $crate::interface::Schema for $schema_type { + impl $crate::schema::Schema for $schema_type { type Key = $key_type; type Value = $value_type; - const COLUMN_FAMILY_NAME: $crate::interface::ColumnFamilyName = $cf_name; + const COLUMN_FAMILY_NAME: $crate::schema::ColumnFamilyName = $cf_name; } }; } diff --git a/full-node/db/sov-schema-db/tests/db_test.rs b/full-node/db/sov-schema-db/tests/db_test.rs index 51426a052..dd7eabdf9 100644 --- a/full-node/db/sov-schema-db/tests/db_test.rs +++ b/full-node/db/sov-schema-db/tests/db_test.rs @@ -8,7 +8,7 @@ use tempfile::TempDir; use sov_schema_db::{ define_schema, - interface::{ColumnFamilyName, KeyDecoder, KeyEncoder, Result, ValueCodec}, + schema::{ColumnFamilyName, KeyDecoder, KeyEncoder, Result, ValueCodec}, CodecError, }; use sov_schema_db::{Schema, SchemaBatch, DB}; diff --git a/full-node/db/sov-schema-db/tests/iterator_test.rs b/full-node/db/sov-schema-db/tests/iterator_test.rs index ab8040c4c..7f92701e2 100644 --- a/full-node/db/sov-schema-db/tests/iterator_test.rs +++ b/full-node/db/sov-schema-db/tests/iterator_test.rs @@ -7,10 +7,9 @@ use rocksdb::DEFAULT_COLUMN_FAMILY_NAME; use sov_schema_db::{ define_schema, - interface::{KeyDecoder, KeyEncoder, SeekKeyEncoder, ValueCodec}, - CodecError, + schema::{KeyDecoder, KeyEncoder, Schema, ValueCodec}, + CodecError, SchemaIterator, SeekKeyEncoder, DB, }; -use sov_schema_db::{iterator::SchemaIterator, Schema, DB}; use tempfile::TempDir; @@ -56,7 +55,7 @@ impl KeyDecoder for TestKey { } impl SeekKeyEncoder for TestKey { - fn encode_seek_key(&self) -> sov_schema_db::interface::Result> { + fn encode_seek_key(&self) -> sov_schema_db::schema::Result> { self.encode_key() } }