Skip to content
This repository has been archived by the owner on Nov 6, 2020. It is now read-only.

Commit

Permalink
Merge pull request #1348 from ethcore/db-cache-size
Browse files Browse the repository at this point in the history
Configurable rocksdb cache size
  • Loading branch information
arkpar authored Jun 20, 2016
2 parents 75a3850 + c661643 commit 8b845e5
Show file tree
Hide file tree
Showing 16 changed files with 139 additions and 70 deletions.
14 changes: 12 additions & 2 deletions ethcore/src/blockchain/blockchain.rs
Original file line number Diff line number Diff line change
Expand Up @@ -253,12 +253,22 @@ impl BlockChain {
// open extras db
let mut extras_path = path.to_path_buf();
extras_path.push("extras");
let extras_db = Database::open_default(extras_path.to_str().unwrap()).unwrap();
let extras_db = match config.db_cache_size {
None => Database::open_default(extras_path.to_str().unwrap()).unwrap(),
Some(cache_size) => Database::open(
&DatabaseConfig::with_cache(cache_size/2),
extras_path.to_str().unwrap()).unwrap(),
};

// open blocks db
let mut blocks_path = path.to_path_buf();
blocks_path.push("blocks");
let blocks_db = Database::open_default(blocks_path.to_str().unwrap()).unwrap();
let blocks_db = match config.db_cache_size {
None => Database::open_default(blocks_path.to_str().unwrap()).unwrap(),
Some(cache_size) => Database::open(
&DatabaseConfig::with_cache(cache_size/2),
blocks_path.to_str().unwrap()).unwrap(),
};

let mut cache_man = CacheManager{cache_usage: VecDeque::new(), in_use: HashSet::new()};
(0..COLLECTION_QUEUE_SIZE).foreach(|_| cache_man.cache_usage.push_back(HashSet::new()));
Expand Down
3 changes: 3 additions & 0 deletions ethcore/src/blockchain/config.rs
Original file line number Diff line number Diff line change
Expand Up @@ -23,13 +23,16 @@ pub struct Config {
pub pref_cache_size: usize,
/// Maximum cache size in bytes.
pub max_cache_size: usize,
/// Backing db cache_size
pub db_cache_size: Option<usize>,
}

impl Default for Config {
fn default() -> Self {
Config {
pref_cache_size: 1 << 14,
max_cache_size: 1 << 20,
db_cache_size: None,
}
}
}
Expand Down
5 changes: 4 additions & 1 deletion ethcore/src/client/client.rs
Original file line number Diff line number Diff line change
Expand Up @@ -141,7 +141,10 @@ impl<V> Client<V> where V: Verifier {
let chain = Arc::new(BlockChain::new(config.blockchain, &gb, &path));
let tracedb = Arc::new(try!(TraceDB::new(config.tracing, &path, chain.clone())));

let mut state_db = journaldb::new(&append_path(&path, "state"), config.pruning);
let mut state_db = journaldb::new(
&append_path(&path, "state"),
config.pruning,
config.db_cache_size);

if state_db.is_empty() && spec.ensure_db_good(state_db.as_hashdb_mut()) {
state_db.commit(0, &spec.genesis_header().hash(), None).expect("Error commiting genesis state to state DB");
Expand Down
2 changes: 2 additions & 0 deletions ethcore/src/client/config.rs
Original file line number Diff line number Diff line change
Expand Up @@ -35,4 +35,6 @@ pub struct ClientConfig {
pub pruning: journaldb::Algorithm,
/// The name of the client instance.
pub name: String,
/// State db cache-size if not default
pub db_cache_size: Option<usize>,
}
4 changes: 2 additions & 2 deletions ethcore/src/tests/helpers.rs
Original file line number Diff line number Diff line change
Expand Up @@ -302,7 +302,7 @@ pub fn generate_dummy_empty_blockchain() -> GuardedTempResult<BlockChain> {

pub fn get_temp_journal_db() -> GuardedTempResult<Box<JournalDB>> {
let temp = RandomTempPath::new();
let journal_db = journaldb::new(temp.as_str(), journaldb::Algorithm::EarlyMerge);
let journal_db = journaldb::new(temp.as_str(), journaldb::Algorithm::EarlyMerge, None);
GuardedTempResult {
_temp: temp,
result: Some(journal_db)
Expand All @@ -319,7 +319,7 @@ pub fn get_temp_state() -> GuardedTempResult<State> {
}

pub fn get_temp_journal_db_in(path: &Path) -> Box<JournalDB> {
journaldb::new(path.to_str().unwrap(), journaldb::Algorithm::EarlyMerge)
journaldb::new(path.to_str().unwrap(), journaldb::Algorithm::EarlyMerge, None)
}

pub fn get_temp_state_in(path: &Path) -> State {
Expand Down
5 changes: 4 additions & 1 deletion ethcore/src/trace/config.rs
Original file line number Diff line number Diff line change
Expand Up @@ -48,6 +48,8 @@ pub struct Config {
pub enabled: Switch,
/// Traces blooms configuration.
pub blooms: BloomConfig,
/// Database cache-size if not default
pub db_cache_size: Option<usize>,
}

impl Default for Config {
Expand All @@ -57,7 +59,8 @@ impl Default for Config {
blooms: BloomConfig {
levels: 3,
elements_per_index: 16,
}
},
db_cache_size: None,
}
}
}
9 changes: 7 additions & 2 deletions ethcore/src/trace/db.rs
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@ use std::sync::{RwLock, Arc};
use std::path::Path;
use bloomchain::{Number, Config as BloomConfig};
use bloomchain::group::{BloomGroupDatabase, BloomGroupChain, GroupPosition, BloomGroup};
use util::{H256, H264, Database, DBTransaction};
use util::{H256, H264, Database, DatabaseConfig, DBTransaction};
use header::BlockNumber;
use trace::{BlockTraces, LocalizedTrace, Config, Switch, Filter, Database as TraceDatabase, ImportRequest,
DatabaseExtras, Error};
Expand Down Expand Up @@ -118,7 +118,12 @@ impl<T> TraceDB<T> where T: DatabaseExtras {
pub fn new(config: Config, path: &Path, extras: Arc<T>) -> Result<Self, Error> {
let mut tracedb_path = path.to_path_buf();
tracedb_path.push("tracedb");
let tracesdb = Database::open_default(tracedb_path.to_str().unwrap()).unwrap();
let tracesdb = match config.db_cache_size {
None => Database::open_default(tracedb_path.to_str().unwrap()).unwrap(),
Some(db_cache) => Database::open(
&DatabaseConfig::with_cache(db_cache),
tracedb_path.to_str().unwrap()).unwrap(),
};

// check if in previously tracing was enabled
let old_tracing = match tracesdb.get(b"enabled").unwrap() {
Expand Down
2 changes: 2 additions & 0 deletions parity/cli.rs
Original file line number Diff line number Diff line change
Expand Up @@ -157,6 +157,7 @@ Footprint Options:
--cache MEGABYTES Set total amount of discretionary memory to use for
the entire system, overrides other cache and queue
options.
--db-cache-size MB Database cache size.
Import/Export Options:
--from BLOCK Export from block BLOCK, which may be an index or
Expand Down Expand Up @@ -295,6 +296,7 @@ pub struct Args {
pub flag_ipcdisable: bool,
pub flag_ipcpath: Option<String>,
pub flag_ipcapi: Option<String>,
pub flag_db_cache_size: Option<usize>,
}

pub fn print_version() {
Expand Down
9 changes: 8 additions & 1 deletion parity/configuration.rs
Original file line number Diff line number Diff line change
Expand Up @@ -187,7 +187,7 @@ impl Configuration {
let mut latest_era = None;
let jdb_types = [journaldb::Algorithm::Archive, journaldb::Algorithm::EarlyMerge, journaldb::Algorithm::OverlayRecent, journaldb::Algorithm::RefCounted];
for i in jdb_types.into_iter() {
let db = journaldb::new(&append_path(&get_db_path(Path::new(&self.path()), *i, spec.genesis_header().hash()), "state"), *i);
let db = journaldb::new(&append_path(&get_db_path(Path::new(&self.path()), *i, spec.genesis_header().hash()), "state"), *i, None);
trace!(target: "parity", "Looking for best DB: {} at {:?}", i, db.latest_era());
match (latest_era, db.latest_era()) {
(Some(best), Some(this)) if best >= this => {}
Expand All @@ -214,13 +214,17 @@ impl Configuration {
client_config.blockchain.max_cache_size = self.args.flag_cache_max_size;
}
}
// forced blockchain (blocks + extras) db cache size if provided
client_config.blockchain.db_cache_size = self.args.flag_db_cache_size.and_then(|cs| Some(cs / 2));

client_config.tracing.enabled = match self.args.flag_tracing.as_str() {
"auto" => Switch::Auto,
"on" => Switch::On,
"off" => Switch::Off,
_ => { die!("Invalid tracing method given!") }
};
// forced trace db cache size if provided
client_config.tracing.db_cache_size = self.args.flag_db_cache_size.and_then(|cs| Some(cs / 4));

client_config.pruning = match self.args.flag_pruning.as_str() {
"archive" => journaldb::Algorithm::Archive,
Expand All @@ -231,6 +235,9 @@ impl Configuration {
_ => { die!("Invalid pruning method given."); }
};

// forced state db cache size if provided
client_config.db_cache_size = self.args.flag_db_cache_size.and_then(|cs| Some(cs / 4));

if self.args.flag_jitvm {
client_config.vm_type = VMType::jit().unwrap_or_else(|| die!("Parity built without jit vm."))
}
Expand Down
1 change: 1 addition & 0 deletions parity/migration.rs
Original file line number Diff line number Diff line change
Expand Up @@ -163,6 +163,7 @@ fn migrate_database(version: u32, path: PathBuf, migrations: MigrationManager) -
let db_config = DatabaseConfig {
prefix_size: None,
max_open_files: 64,
cache_size: None,
};

// open old database
Expand Down
23 changes: 12 additions & 11 deletions util/src/journaldb/archivedb.rs
Original file line number Diff line number Diff line change
Expand Up @@ -43,11 +43,12 @@ const DB_VERSION : u32 = 0x103;

impl ArchiveDB {
/// Create a new instance from file
pub fn new(path: &str) -> ArchiveDB {
pub fn new(path: &str, cache_size: Option<usize>) -> ArchiveDB {
let opts = DatabaseConfig {
// this must match account_db prefix
prefix_size: Some(DB_PREFIX_LEN),
max_open_files: 256,
cache_size: cache_size,
};
let backing = Database::open(&opts, path).unwrap_or_else(|e| {
panic!("Error opening state db: {}", e);
Expand All @@ -74,7 +75,7 @@ impl ArchiveDB {
fn new_temp() -> ArchiveDB {
let mut dir = env::temp_dir();
dir.push(H32::random().hex());
Self::new(dir.to_str().unwrap())
Self::new(dir.to_str().unwrap(), None)
}

fn payload(&self, key: &H256) -> Option<Bytes> {
Expand Down Expand Up @@ -327,7 +328,7 @@ mod tests {
let bar = H256::random();

let foo = {
let mut jdb = ArchiveDB::new(dir.to_str().unwrap());
let mut jdb = ArchiveDB::new(dir.to_str().unwrap(), None);
// history is 1
let foo = jdb.insert(b"foo");
jdb.emplace(bar.clone(), b"bar".to_vec());
Expand All @@ -336,13 +337,13 @@ mod tests {
};

{
let mut jdb = ArchiveDB::new(dir.to_str().unwrap());
let mut jdb = ArchiveDB::new(dir.to_str().unwrap(), None);
jdb.remove(&foo);
jdb.commit(1, &b"1".sha3(), Some((0, b"0".sha3()))).unwrap();
}

{
let mut jdb = ArchiveDB::new(dir.to_str().unwrap());
let mut jdb = ArchiveDB::new(dir.to_str().unwrap(), None);
assert!(jdb.exists(&foo));
assert!(jdb.exists(&bar));
jdb.commit(2, &b"2".sha3(), Some((1, b"1".sha3()))).unwrap();
Expand All @@ -355,7 +356,7 @@ mod tests {
dir.push(H32::random().hex());

let foo = {
let mut jdb = ArchiveDB::new(dir.to_str().unwrap());
let mut jdb = ArchiveDB::new(dir.to_str().unwrap(), None);
// history is 1
let foo = jdb.insert(b"foo");
jdb.commit(0, &b"0".sha3(), None).unwrap();
Expand All @@ -369,7 +370,7 @@ mod tests {
};

{
let mut jdb = ArchiveDB::new(dir.to_str().unwrap());
let mut jdb = ArchiveDB::new(dir.to_str().unwrap(), None);
jdb.remove(&foo);
jdb.commit(3, &b"3".sha3(), Some((2, b"2".sha3()))).unwrap();
assert!(jdb.exists(&foo));
Expand All @@ -384,7 +385,7 @@ mod tests {
let mut dir = ::std::env::temp_dir();
dir.push(H32::random().hex());
let (foo, _, _) = {
let mut jdb = ArchiveDB::new(dir.to_str().unwrap());
let mut jdb = ArchiveDB::new(dir.to_str().unwrap(), None);
// history is 1
let foo = jdb.insert(b"foo");
let bar = jdb.insert(b"bar");
Expand All @@ -399,7 +400,7 @@ mod tests {
};

{
let mut jdb = ArchiveDB::new(dir.to_str().unwrap());
let mut jdb = ArchiveDB::new(dir.to_str().unwrap(), None);
jdb.commit(2, &b"2b".sha3(), Some((1, b"1b".sha3()))).unwrap();
assert!(jdb.exists(&foo));
}
Expand All @@ -410,14 +411,14 @@ mod tests {
let temp = ::devtools::RandomTempPath::new();

let key = {
let mut jdb = ArchiveDB::new(temp.as_str());
let mut jdb = ArchiveDB::new(temp.as_str(), None);
let key = jdb.insert(b"foo");
jdb.commit(0, &b"0".sha3(), None).unwrap();
key
};

{
let jdb = ArchiveDB::new(temp.as_str());
let jdb = ArchiveDB::new(temp.as_str(), None);
let state = jdb.state(&key);
assert!(state.is_some());
}
Expand Down
Loading

0 comments on commit 8b845e5

Please sign in to comment.