Skip to content
This repository has been archived by the owner on Nov 15, 2023. It is now read-only.

Commit

Permalink
Add parity-db variant for trie benchmarks (#5827)
Browse files Browse the repository at this point in the history
* parity-db bench

* use arkady suggestion
  • Loading branch information
NikVolf authored Apr 30, 2020
1 parent c82a0f7 commit 268450a
Show file tree
Hide file tree
Showing 6 changed files with 124 additions and 26 deletions.
2 changes: 2 additions & 0 deletions Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

2 changes: 2 additions & 0 deletions bin/node/bench/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -30,3 +30,5 @@ fs_extra = "1"
hex = "0.4.0"
rand = { version = "0.7.2", features = ["small_rng"] }
lazy_static = "1.4.0"
parity-util-mem = { version = "0.6.1", default-features = false, features = ["primitive-types"] }
parity-db = { version = "0.1.2" }
4 changes: 2 additions & 2 deletions bin/node/bench/src/core.rs
Original file line number Diff line number Diff line change
Expand Up @@ -139,10 +139,10 @@ pub fn run_benchmark(
}

macro_rules! matrix(
( $var:ident in $over:expr => $tt:expr, $( $rest:tt )* ) => {
( $var:tt in $over:expr => $tt:expr, $( $rest:tt )* ) => {
{
let mut res = Vec::<Box<dyn crate::core::BenchmarkDescription>>::new();
for $var in $over.iter() {
for $var in $over {
res.push(Box::new($tt));
}
res.extend(matrix!( $($rest)* ));
Expand Down
37 changes: 26 additions & 11 deletions bin/node/bench/src/main.rs
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@ mod tempdb;
mod state_sizes;

use crate::core::{run_benchmark, Mode as BenchmarkMode};
use crate::tempdb::DatabaseType;
use import::{ImportBenchmarkDescription, SizeType};
use trie::{TrieReadBenchmarkDescription, TrieWriteBenchmarkDescription, DatabaseSize};
use node_testing::bench::{Profile, KeyTypes};
Expand Down Expand Up @@ -66,7 +67,7 @@ fn main() {
}

let benchmarks = matrix!(
profile in [Profile::Wasm, Profile::Native] =>
profile in [Profile::Wasm, Profile::Native].iter() =>
ImportBenchmarkDescription {
profile: *profile,
key_types: KeyTypes::Sr25519,
Expand All @@ -87,22 +88,36 @@ fn main() {
key_types: KeyTypes::Sr25519,
size: SizeType::Full,
},
size in [SizeType::Small, SizeType::Large] =>
size in [SizeType::Small, SizeType::Large].iter() =>
ImportBenchmarkDescription {
profile: Profile::Native,
key_types: KeyTypes::Sr25519,
size: *size,
},
size in [
DatabaseSize::Empty, DatabaseSize::Smallest, DatabaseSize::Small,
DatabaseSize::Medium, DatabaseSize::Large, DatabaseSize::Huge,
] => TrieReadBenchmarkDescription { database_size: *size },
size in [
DatabaseSize::Empty, DatabaseSize::Smallest, DatabaseSize::Small,
DatabaseSize::Medium, DatabaseSize::Large, DatabaseSize::Huge,
] => TrieWriteBenchmarkDescription { database_size: *size },
(size, db_type) in
[
DatabaseSize::Empty, DatabaseSize::Smallest, DatabaseSize::Small,
DatabaseSize::Medium, DatabaseSize::Large, DatabaseSize::Huge,
]
.iter().flat_map(|size|
[
DatabaseType::RocksDb, DatabaseType::ParityDb
]
.iter().map(move |db_type| (size, db_type)))
=> TrieReadBenchmarkDescription { database_size: *size, database_type: *db_type },
(size, db_type) in
[
DatabaseSize::Empty, DatabaseSize::Smallest, DatabaseSize::Small,
DatabaseSize::Medium, DatabaseSize::Large, DatabaseSize::Huge,
]
.iter().flat_map(|size|
[
DatabaseType::RocksDb, DatabaseType::ParityDb
]
.iter().map(move |db_type| (size, db_type)))
=> TrieWriteBenchmarkDescription { database_size: *size, database_type: *db_type },
);

if opt.list {
for benchmark in benchmarks.iter() {
log::info!("{}: {}", benchmark.name(), benchmark.path().full())
Expand Down
82 changes: 76 additions & 6 deletions bin/node/bench/src/tempdb.rs
Original file line number Diff line number Diff line change
Expand Up @@ -14,12 +14,67 @@
// You should have received a copy of the GNU General Public License
// along with Substrate. If not, see <http://www.gnu.org/licenses/>.

use std::sync::Arc;
use kvdb::KeyValueDB;
use std::{io, sync::Arc};
use kvdb::{KeyValueDB, DBTransaction};
use kvdb_rocksdb::{DatabaseConfig, Database};

#[derive(Debug, Clone, Copy, derive_more::Display)]
pub enum DatabaseType {
RocksDb,
ParityDb,
}

pub struct TempDatabase(tempfile::TempDir);

struct ParityDbWrapper(parity_db::Db);
parity_util_mem::malloc_size_of_is_0!(ParityDbWrapper);

impl KeyValueDB for ParityDbWrapper {
/// Get a value by key.
fn get(&self, col: u32, key: &[u8]) -> io::Result<Option<Vec<u8>>> {
Ok(self.0.get(col as u8, &key[key.len() - 32..]).expect("db error"))
}

/// Get a value by partial key. Only works for flushed data.
fn get_by_prefix(&self, _col: u32, _prefix: &[u8]) -> Option<Box<[u8]>> {
unimplemented!()
}

/// Write a transaction of changes to the buffer.
fn write_buffered(&self, transaction: DBTransaction) {
self.0.commit(
transaction.ops.iter().map(|op| match op {
kvdb::DBOp::Insert { col, key, value } => (*col as u8, &key[key.len() - 32..], Some(value.to_vec())),
kvdb::DBOp::Delete { col, key } => (*col as u8, &key[key.len() - 32..], None),
})
).expect("db error");
}

/// Flush all buffered data.
fn flush(&self) -> io::Result<()> {
Ok(())
}

/// Iterate over flushed data for a given column.
fn iter<'a>(&'a self, _col: u32) -> Box<dyn Iterator<Item = (Box<[u8]>, Box<[u8]>)> + 'a> {
unimplemented!()
}

/// Iterate over flushed data for a given column, starting from a given prefix.
fn iter_from_prefix<'a>(
&'a self,
_col: u32,
_prefix: &'a [u8],
) -> Box<dyn Iterator<Item = (Box<[u8]>, Box<[u8]>)> + 'a> {
unimplemented!()
}

/// Attempt to replace this database with a new one located at the given path.
fn restore(&self, _new_db: &str) -> io::Result<()> {
unimplemented!()
}
}

impl TempDatabase {
pub fn new() -> Self {
let dir = tempfile::tempdir().expect("temp dir creation failed");
Expand All @@ -32,10 +87,25 @@ impl TempDatabase {
TempDatabase(dir)
}

pub fn open(&mut self) -> Arc<dyn KeyValueDB> {
let db_cfg = DatabaseConfig::with_columns(1);
let db = Database::open(&db_cfg, &self.0.path().to_string_lossy()).expect("Database backend error");
Arc::new(db)
pub fn open(&mut self, db_type: DatabaseType) -> Arc<dyn KeyValueDB> {
match db_type {
DatabaseType::RocksDb => {
let db_cfg = DatabaseConfig::with_columns(1);
let db = Database::open(&db_cfg, &self.0.path().to_string_lossy()).expect("Database backend error");
Arc::new(db)
},
DatabaseType::ParityDb => {
Arc::new(ParityDbWrapper({
let mut options = parity_db::Options::with_columns(self.0.path(), 1);
let mut column_options = &mut options.columns[0];
column_options.ref_counted = true;
column_options.preimage = true;
column_options.uniform = true;
parity_db::Db::open(&options).expect("db open error")
}))
}
}

}
}

Expand Down
23 changes: 16 additions & 7 deletions bin/node/bench/src/trie.rs
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@ use crate::{
core::{self, Mode, Path},
generator::generate_trie,
simple_trie::SimpleTrie,
tempdb::TempDatabase,
tempdb::{TempDatabase, DatabaseType},
};

pub const SAMPLE_SIZE: usize = 100;
Expand Down Expand Up @@ -91,13 +91,15 @@ fn pretty_print(v: usize) -> String {

pub struct TrieReadBenchmarkDescription {
pub database_size: DatabaseSize,
pub database_type: DatabaseType,
}

pub struct TrieReadBenchmark {
database: TempDatabase,
root: Hash,
warmup_keys: KeyValues,
query_keys: KeyValues,
database_type: DatabaseType,
}

impl core::BenchmarkDescription for TrieReadBenchmarkDescription {
Expand Down Expand Up @@ -139,7 +141,7 @@ impl core::BenchmarkDescription for TrieReadBenchmarkDescription {
assert_eq!(query_keys.len(), SAMPLE_SIZE);

let root = generate_trie(
database.open(),
database.open(self.database_type),
key_values,
);

Expand All @@ -148,14 +150,16 @@ impl core::BenchmarkDescription for TrieReadBenchmarkDescription {
root,
warmup_keys,
query_keys,
database_type: self.database_type,
})
}

fn name(&self) -> Cow<'static, str> {
format!(
"Trie read benchmark({} database ({} keys))",
"Trie read benchmark({} database ({} keys), db_type: {})",
self.database_size,
pretty_print(self.database_size.keys()),
self.database_type,
).into()
}
}
Expand All @@ -173,7 +177,7 @@ impl core::Benchmark for TrieReadBenchmark {
fn run(&mut self, mode: Mode) -> std::time::Duration {
let mut db = self.database.clone();
let storage: Arc<dyn sp_state_machine::Storage<sp_core::Blake2Hasher>> =
Arc::new(Storage(db.open()));
Arc::new(Storage(db.open(self.database_type)));

let trie_backend = sp_state_machine::TrieBackend::new(
storage,
Expand Down Expand Up @@ -208,8 +212,10 @@ impl core::Benchmark for TrieReadBenchmark {

pub struct TrieWriteBenchmarkDescription {
pub database_size: DatabaseSize,
pub database_type: DatabaseType,
}


impl core::BenchmarkDescription for TrieWriteBenchmarkDescription {
fn path(&self) -> Path {
let mut path = Path::new(&["trie", "write"]);
Expand Down Expand Up @@ -245,22 +251,24 @@ impl core::BenchmarkDescription for TrieWriteBenchmarkDescription {
assert_eq!(warmup_keys.len(), SAMPLE_SIZE);

let root = generate_trie(
database.open(),
database.open(self.database_type),
key_values,
);

Box::new(TrieWriteBenchmark {
database,
root,
warmup_keys,
database_type: self.database_type,
})
}

fn name(&self) -> Cow<'static, str> {
format!(
"Trie write benchmark({} database ({} keys))",
"Trie write benchmark({} database ({} keys), db_type = {})",
self.database_size,
pretty_print(self.database_size.keys()),
self.database_type,
).into()
}
}
Expand All @@ -269,13 +277,14 @@ struct TrieWriteBenchmark {
database: TempDatabase,
root: Hash,
warmup_keys: KeyValues,
database_type: DatabaseType,
}

impl core::Benchmark for TrieWriteBenchmark {
fn run(&mut self, mode: Mode) -> std::time::Duration {
let mut rng = rand::thread_rng();
let mut db = self.database.clone();
let kvdb = db.open();
let kvdb = db.open(self.database_type);

let mut new_root = self.root.clone();

Expand Down

0 comments on commit 268450a

Please sign in to comment.