diff --git a/util/journaldb/src/archivedb.rs b/util/journaldb/src/archivedb.rs index e365e3c872a..e00b37d3c4a 100644 --- a/util/journaldb/src/archivedb.rs +++ b/util/journaldb/src/archivedb.rs @@ -19,7 +19,7 @@ use std::collections::HashMap; use std::collections::hash_map::Entry; use std::sync::Arc; -use rlp::*; +use rlp::{encode, decode}; use hashdb::*; use super::memorydb::*; use super::{DB_PREFIX_LEN, LATEST_ERA_KEY}; @@ -46,7 +46,8 @@ pub struct ArchiveDB { impl ArchiveDB { /// Create a new instance from a key-value db. pub fn new(backing: Arc, col: Option) -> ArchiveDB { - let latest_era = backing.get(col, &LATEST_ERA_KEY).expect("Low-level database error.").map(|val| decode::(&val)); + let latest_era = backing.get(col, &LATEST_ERA_KEY).expect("Low-level database error.") + .map(|val| decode::(&val)); ArchiveDB { overlay: MemoryDB::new(), backing: backing, diff --git a/util/journaldb/src/earlymergedb.rs b/util/journaldb/src/earlymergedb.rs index 4ecf993f8c9..d02d27f7ba7 100644 --- a/util/journaldb/src/earlymergedb.rs +++ b/util/journaldb/src/earlymergedb.rs @@ -21,7 +21,7 @@ use std::collections::hash_map::Entry; use std::sync::Arc; use parking_lot::RwLock; use heapsize::HeapSizeOf; -use rlp::*; +use rlp::{encode, decode}; use hashdb::*; use memorydb::*; use super::{DB_PREFIX_LEN, LATEST_ERA_KEY}; @@ -30,6 +30,7 @@ use kvdb::{KeyValueDB, DBTransaction}; use ethereum_types::H256; use error::{BaseDataError, UtilError}; use bytes::Bytes; +use util::{DatabaseKey, DatabaseValueView, DatabaseValueRef}; #[derive(Debug, Clone, PartialEq, Eq)] struct RefInfo { @@ -111,8 +112,6 @@ pub struct EarlyMergeDB { column: Option, } -const PADDING : [u8; 10] = [ 0u8; 10 ]; - impl EarlyMergeDB { /// Create a new instance from file pub fn new(backing: Arc, col: Option) -> EarlyMergeDB { @@ -267,20 +266,17 @@ impl EarlyMergeDB { let mut era = decode::(&val); latest_era = Some(era); loop { - let mut index = 0usize; - while let Some(rlp_data) = db.get(col, { - let mut r = RlpStream::new_list(3); - r.append(&era); - r.append(&index); - r.append(&&PADDING[..]); - &r.drain() - }).expect("Low-level database error.") { - let rlp = Rlp::new(&rlp_data); - let inserts: Vec = rlp.list_at(1); + //let mut index = 0usize; + let mut db_key = DatabaseKey { + era, + index: 0usize, + }; + while let Some(rlp_data) = db.get(col, &encode(&db_key)).expect("Low-level database error.") { + let inserts = DatabaseValueView::new(&rlp_data).inserts().expect("rlp read from db; qed"); Self::replay_keys(&inserts, db, col, &mut refs); - index += 1; + db_key.index += 1; }; - if index == 0 || era == 0 { + if db_key.index == 0 || era == 0 { break; } era -= 1; @@ -373,18 +369,17 @@ impl JournalDB for EarlyMergeDB { }; { - let mut index = 0usize; + let mut db_key = DatabaseKey { + era: now, + index: 0usize, + }; let mut last; while self.backing.get(self.column, { - let mut r = RlpStream::new_list(3); - r.append(&now); - r.append(&index); - r.append(&&PADDING[..]); - last = r.drain(); + last = encode(&db_key); &last })?.is_some() { - index += 1; + db_key.index += 1; } let drained = self.overlay.drain(); @@ -403,28 +398,25 @@ impl JournalDB for EarlyMergeDB { // TODO: check all removes are in the db. - let mut r = RlpStream::new_list(3); - r.append(id); - // Process the new inserts. // We use the inserts for three things. For each: // - we place into the backing DB or increment the counter if already in; // - we note in the backing db that it was already in; // - we write the key into our journal for this block; - r.begin_list(inserts.len()); - for &(k, _) in &inserts { - r.append(&k); - } - r.append_list(&removes); Self::insert_keys(&inserts, &*self.backing, self.column, &mut refs, batch); let ins = inserts.iter().map(|&(k, _)| k).collect::>(); + let value_ref = DatabaseValueRef { + id, + inserts: &ins, + deletes: &removes, + }; trace!(target: "jdb.ops", " Deletes: {:?}", removes); trace!(target: "jdb.ops", " Inserts: {:?}", ins); - batch.put(self.column, &last, r.as_raw()); + batch.put(self.column, &last, &encode(&value_ref)); if self.latest_era.map_or(true, |e| now > e) { batch.put(self.column, &LATEST_ERA_KEY, &encode(&now)); self.latest_era = Some(now); @@ -438,23 +430,22 @@ impl JournalDB for EarlyMergeDB { let mut refs = self.refs.as_ref().unwrap().write(); // apply old commits' details - let mut index = 0usize; + let mut db_key = DatabaseKey { + era: end_era, + index: 0usize, + }; let mut last; - while let Some(rlp_data) = self.backing.get(self.column, { - let mut r = RlpStream::new_list(3); - r.append(&end_era); - r.append(&index); - r.append(&&PADDING[..]); - last = r.drain(); - &last - })? { - let rlp = Rlp::new(&rlp_data); - let inserts: Vec = rlp.list_at(1); - - if canon_id == &rlp.val_at::(0) { + while let Some(rlp_data) = { + last = encode(&db_key); + self.backing.get(self.column, &last) + }? { + let view = DatabaseValueView::new(&rlp_data); + let inserts = view.inserts().expect("rlp read from db; qed"); + + if canon_id == &view.id().expect("rlp read from db; qed") { // Collect keys to be removed. Canon block - remove the (enacted) deletes. - let deletes: Vec = rlp.list_at(2); + let deletes = view.deletes().expect("rlp read from db; qed"); trace!(target: "jdb.ops", " Expunging: {:?}", deletes); Self::remove_keys(&deletes, &mut refs, batch, self.column, RemoveFrom::Archive); @@ -488,10 +479,10 @@ impl JournalDB for EarlyMergeDB { } batch.delete(self.column, &last); - index += 1; + db_key.index += 1; } - trace!(target: "jdb", "EarlyMergeDB: delete journal for time #{}.{}, (canon was {})", end_era, index, canon_id); + trace!(target: "jdb", "EarlyMergeDB: delete journal for time #{}.{}, (canon was {})", end_era, db_key.index, canon_id); trace!(target: "jdb", "OK: {:?}", &*refs); Ok(0) diff --git a/util/journaldb/src/lib.rs b/util/journaldb/src/lib.rs index 23b56fa03f9..c1fb23b6cd1 100644 --- a/util/journaldb/src/lib.rs +++ b/util/journaldb/src/lib.rs @@ -46,6 +46,7 @@ mod archivedb; mod earlymergedb; mod overlayrecentdb; mod refcounteddb; +mod util; pub mod overlaydb; diff --git a/util/journaldb/src/overlaydb.rs b/util/journaldb/src/overlaydb.rs index 0739bea4807..6ff1b97e8be 100644 --- a/util/journaldb/src/overlaydb.rs +++ b/util/journaldb/src/overlaydb.rs @@ -21,7 +21,7 @@ use std::collections::HashMap; use std::collections::hash_map::Entry; use error::{Result, BaseDataError}; use ethereum_types::H256; -use rlp::*; +use rlp::{UntrustedRlp, RlpStream, Encodable, DecoderError, Decodable, encode, decode}; use hashdb::*; use memorydb::*; use kvdb::{KeyValueDB, DBTransaction}; @@ -41,6 +41,39 @@ pub struct OverlayDB { column: Option, } +struct Payload { + count: u32, + value: DBValue, +} + +impl Payload { + fn new(count: u32, value: DBValue) -> Self { + Payload { + count, + value, + } + } +} + +impl Encodable for Payload { + fn rlp_append(&self, s: &mut RlpStream) { + s.begin_list(2); + s.append(&self.count); + s.append(&&*self.value); + } +} + +impl Decodable for Payload { + fn decode(rlp: &UntrustedRlp) -> ::std::result::Result { + let payload = Payload { + count: rlp.val_at(0)?, + value: DBValue::from_slice(rlp.at(1)?.data()?), + }; + + Ok(payload) + } +} + impl OverlayDB { /// Create a new instance of OverlayDB given a `backing` database. pub fn new(backing: Arc, col: Option) -> OverlayDB { @@ -71,18 +104,19 @@ impl OverlayDB { if rc != 0 { match self.payload(&key) { Some(x) => { - let (back_value, back_rc) = x; - let total_rc: i32 = back_rc as i32 + rc; + let total_rc: i32 = x.count as i32 + rc; if total_rc < 0 { return Err(From::from(BaseDataError::NegativelyReferencedHash(key))); } - deletes += if self.put_payload_in_batch(batch, &key, (back_value, total_rc as u32)) {1} else {0}; + let payload = Payload::new(total_rc as u32, x.value); + deletes += if self.put_payload_in_batch(batch, &key, &payload) {1} else {0}; } None => { if rc < 0 { return Err(From::from(BaseDataError::NegativelyReferencedHash(key))); } - self.put_payload_in_batch(batch, &key, (value, rc as u32)); + let payload = Payload::new(rc as u32, value); + self.put_payload_in_batch(batch, &key, &payload); } }; ret += 1; @@ -100,22 +134,16 @@ impl OverlayDB { pub fn commit_refs(&self, key: &H256) -> i32 { self.overlay.raw(key).map_or(0, |(_, refs)| refs) } /// Get the refs and value of the given key. - fn payload(&self, key: &H256) -> Option<(DBValue, u32)> { + fn payload(&self, key: &H256) -> Option { self.backing.get(self.column, key) .expect("Low-level database error. Some issue with your hard disk?") - .map(|d| { - let r = Rlp::new(&d); - (DBValue::from_slice(r.at(1).data()), r.at(0).as_val()) - }) + .map(|d| decode(&d)) } /// Put the refs and value of the given key, possibly deleting it from the db. - fn put_payload_in_batch(&self, batch: &mut DBTransaction, key: &H256, payload: (DBValue, u32)) -> bool { - if payload.1 > 0 { - let mut s = RlpStream::new_list(2); - s.append(&payload.1); - s.append(&&*payload.0); - batch.put(self.column, key, s.as_raw()); + fn put_payload_in_batch(&self, batch: &mut DBTransaction, key: &H256, payload: &Payload) -> bool { + if payload.count > 0 { + batch.put(self.column, key, &encode(payload)); false } else { batch.delete(self.column, key); @@ -129,7 +157,7 @@ impl HashDB for OverlayDB { let mut ret: HashMap = self.backing.iter(self.column) .map(|(key, _)| { let h = H256::from_slice(&*key); - let r = self.payload(&h).unwrap().1; + let r = self.payload(&h).unwrap().count; (h, r as i32) }) .collect(); @@ -161,9 +189,8 @@ impl HashDB for OverlayDB { }; match self.payload(key) { Some(x) => { - let (d, rc) = x; - if rc as i32 + memrc > 0 { - Some(d) + if x.count as i32 + memrc > 0 { + Some(x.value) } else { None @@ -185,8 +212,7 @@ impl HashDB for OverlayDB { let memrc = k.map_or(0, |(_, rc)| rc); match self.payload(key) { Some(x) => { - let (_, rc) = x; - rc as i32 + memrc > 0 + x.count as i32 + memrc > 0 } // Replace above match arm with this once https://github.com/rust-lang/rust/issues/15287 is done. //Some((d, rc)) if rc + memrc > 0 => true, diff --git a/util/journaldb/src/overlayrecentdb.rs b/util/journaldb/src/overlayrecentdb.rs index bb6663991b0..9ac8a4e294f 100644 --- a/util/journaldb/src/overlayrecentdb.rs +++ b/util/journaldb/src/overlayrecentdb.rs @@ -21,7 +21,7 @@ use std::collections::hash_map::Entry; use std::sync::Arc; use parking_lot::RwLock; use heapsize::HeapSizeOf; -use rlp::*; +use rlp::{UntrustedRlp, RlpStream, encode, decode, DecoderError, Decodable, Encodable}; use hashdb::*; use memorydb::*; use super::{DB_PREFIX_LEN, LATEST_ERA_KEY}; @@ -31,6 +31,7 @@ use ethereum_types::H256; use plain_hasher::H256FastMap; use error::{BaseDataError, UtilError}; use bytes::Bytes; +use util::DatabaseKey; /// Implementation of the `JournalDB` trait for a disk-backed database with a memory overlay /// and, possibly, latent-removal semantics. @@ -70,6 +71,52 @@ pub struct OverlayRecentDB { column: Option, } +struct DatabaseValue { + id: H256, + inserts: Vec<(H256, DBValue)>, + deletes: Vec, +} + +impl Decodable for DatabaseValue { + fn decode(rlp: &UntrustedRlp) -> Result { + let id = rlp.val_at(0)?; + let inserts = rlp.at(1)?.iter().map(|r| { + let k = r.val_at(0)?; + let v = DBValue::from_slice(r.at(1)?.data()?); + Ok((k, v)) + }).collect::, _>>()?; + let deletes = rlp.list_at(2)?; + + let value = DatabaseValue { + id, + inserts, + deletes, + }; + + Ok(value) + } +} + +struct DatabaseValueRef<'a> { + id: &'a H256, + inserts: &'a [(H256, DBValue)], + deletes: &'a [H256], +} + +impl<'a> Encodable for DatabaseValueRef<'a> { + fn rlp_append(&self, s: &mut RlpStream) { + s.begin_list(3); + s.append(self.id); + s.begin_list(self.inserts.len()); + for kv in self.inserts { + s.begin_list(2); + s.append(&kv.0); + s.append(&&*kv.1); + } + s.append_list(self.deletes); + } +} + #[derive(PartialEq)] struct JournalOverlay { backing_overlay: MemoryDB, // Nodes added in the history period @@ -104,8 +151,6 @@ impl Clone for OverlayRecentDB { } } -const PADDING : [u8; 10] = [ 0u8; 10 ]; - impl OverlayRecentDB { /// Create a new instance. pub fn new(backing: Arc, col: Option) -> OverlayRecentDB { @@ -144,43 +189,34 @@ impl OverlayRecentDB { let mut era = decode::(&val); latest_era = Some(era); loop { - let mut index = 0usize; - while let Some(rlp_data) = db.get(col, { - let mut r = RlpStream::new_list(3); - r.append(&era); - r.append(&index); - r.append(&&PADDING[..]); - &r.drain() - }).expect("Low-level database error.") { - trace!("read_overlay: era={}, index={}", era, index); - let rlp = Rlp::new(&rlp_data); - let id: H256 = rlp.val_at(0); - let insertions = rlp.at(1); - let deletions: Vec = rlp.list_at(2); + let mut db_key = DatabaseKey { + era, + index: 0usize, + }; + while let Some(rlp_data) = db.get(col, &encode(&db_key)).expect("Low-level database error.") { + trace!("read_overlay: era={}, index={}", era, db_key.index); + let value = decode::(&rlp_data); + count += value.inserts.len(); let mut inserted_keys = Vec::new(); - for r in insertions.iter() { - let k: H256 = r.val_at(0); - let v = r.at(1).data(); - + for (k, v) in value.inserts { let short_key = to_short_key(&k); if !overlay.contains(&short_key) { cumulative_size += v.len(); } - overlay.emplace(short_key, DBValue::from_slice(v)); + overlay.emplace(short_key, v); inserted_keys.push(k); - count += 1; } journal.entry(era).or_insert_with(Vec::new).push(JournalEntry { - id: id, + id: value.id, insertions: inserted_keys, - deletions: deletions, + deletions: value.deletes, }); - index += 1; + db_key.index += 1; earliest_era = Some(era); }; - if index == 0 || era == 0 { + if db_key.index == 0 || era == 0 { break; } era -= 1; @@ -196,8 +232,6 @@ impl OverlayRecentDB { cumulative_size: cumulative_size, } } - - } #[inline] @@ -256,22 +290,24 @@ impl JournalDB for OverlayRecentDB { // flush previous changes journal_overlay.pending_overlay.clear(); - let mut r = RlpStream::new_list(3); let mut tx = self.transaction_overlay.drain(); let inserted_keys: Vec<_> = tx.iter().filter_map(|(k, &(_, c))| if c > 0 { Some(k.clone()) } else { None }).collect(); let removed_keys: Vec<_> = tx.iter().filter_map(|(k, &(_, c))| if c < 0 { Some(k.clone()) } else { None }).collect(); let ops = inserted_keys.len() + removed_keys.len(); // Increase counter for each inserted key no matter if the block is canonical or not. - let insertions = tx.drain().filter_map(|(k, (v, c))| if c > 0 { Some((k, v)) } else { None }); + let insertions: Vec<_> = tx.drain().filter_map(|(k, (v, c))| if c > 0 { Some((k, v)) } else { None }).collect(); + + let encoded_value = { + let value_ref = DatabaseValueRef { + id, + inserts: &insertions, + deletes: &removed_keys, + }; + encode(&value_ref) + }; - r.append(id); - r.begin_list(inserted_keys.len()); for (k, v) in insertions { - r.begin_list(2); - r.append(&k); - r.append(&&*v); - let short_key = to_short_key(&k); if !journal_overlay.backing_overlay.contains(&short_key) { journal_overlay.cumulative_size += v.len(); @@ -279,14 +315,14 @@ impl JournalDB for OverlayRecentDB { journal_overlay.backing_overlay.emplace(short_key, v); } - r.append_list(&removed_keys); - let mut k = RlpStream::new_list(3); let index = journal_overlay.journal.get(&now).map_or(0, |j| j.len()); - k.append(&now); - k.append(&index); - k.append(&&PADDING[..]); - batch.put_vec(self.column, &k.drain(), r.out()); + let db_key = DatabaseKey { + era: now, + index, + }; + + batch.put_vec(self.column, &encode(&db_key), encoded_value.into_vec()); if journal_overlay.latest_era.map_or(true, |e| now > e) { trace!(target: "journaldb", "Set latest era to {}", now); batch.put_vec(self.column, &LATEST_ERA_KEY, encode(&now).into_vec()); @@ -317,11 +353,11 @@ impl JournalDB for OverlayRecentDB { let mut index = 0usize; for mut journal in records.drain(..) { //delete the record from the db - let mut r = RlpStream::new_list(3); - r.append(&end_era); - r.append(&index); - r.append(&&PADDING[..]); - batch.delete(self.column, &r.drain()); + let db_key = DatabaseKey { + era: end_era, + index, + }; + batch.delete(self.column, &encode(&db_key)); trace!(target: "journaldb", "Delete journal for time #{}.{}: {}, (canon was {}): +{} -{} entries", end_era, index, journal.id, canon_id, journal.insertions.len(), journal.deletions.len()); { if *canon_id == journal.id { diff --git a/util/journaldb/src/refcounteddb.rs b/util/journaldb/src/refcounteddb.rs index 4772c29b3ff..9cd94dbec7d 100644 --- a/util/journaldb/src/refcounteddb.rs +++ b/util/journaldb/src/refcounteddb.rs @@ -19,7 +19,7 @@ use std::collections::HashMap; use std::sync::Arc; use heapsize::HeapSizeOf; -use rlp::*; +use rlp::{encode, decode}; use hashdb::*; use overlaydb::OverlayDB; use memorydb::MemoryDB; @@ -29,6 +29,7 @@ use kvdb::{KeyValueDB, DBTransaction}; use ethereum_types::H256; use error::UtilError; use bytes::Bytes; +use util::{DatabaseKey, DatabaseValueView, DatabaseValueRef}; /// Implementation of the `HashDB` trait for a disk-backed database with a memory overlay /// and latent-removal semantics. @@ -59,12 +60,11 @@ pub struct RefCountedDB { column: Option, } -const PADDING : [u8; 10] = [ 0u8; 10 ]; - impl RefCountedDB { /// Create a new instance given a `backing` database. pub fn new(backing: Arc, col: Option) -> RefCountedDB { - let latest_era = backing.get(col, &LATEST_ERA_KEY).expect("Low-level database error.").map(|val| decode::(&val)); + let latest_era = backing.get(col, &LATEST_ERA_KEY).expect("Low-level database error.") + .map(|val| decode::(&val)); RefCountedDB { forward: OverlayDB::new(backing.clone(), col), @@ -118,29 +118,32 @@ impl JournalDB for RefCountedDB { fn journal_under(&mut self, batch: &mut DBTransaction, now: u64, id: &H256) -> Result { // record new commit's details. - let mut index = 0usize; + let mut db_key = DatabaseKey { + era: now, + index: 0usize, + }; let mut last; while self.backing.get(self.column, { - let mut r = RlpStream::new_list(3); - r.append(&now); - r.append(&index); - r.append(&&PADDING[..]); - last = r.drain(); + last = encode(&db_key); &last })?.is_some() { - index += 1; + db_key.index += 1; } - let mut r = RlpStream::new_list(3); - r.append(id); - r.append_list(&self.inserts); - r.append_list(&self.removes); - batch.put(self.column, &last, r.as_raw()); + { + let value_ref = DatabaseValueRef { + id, + inserts: &self.inserts, + deletes: &self.removes, + }; + + batch.put(self.column, &last, &encode(&value_ref)); + } let ops = self.inserts.len() + self.removes.len(); - trace!(target: "rcdb", "new journal for time #{}.{} => {}: inserts={:?}, removes={:?}", now, index, id, self.inserts, self.removes); + trace!(target: "rcdb", "new journal for time #{}.{} => {}: inserts={:?}, removes={:?}", now, db_key.index, id, self.inserts, self.removes); self.inserts.clear(); self.removes.clear(); @@ -155,27 +158,30 @@ impl JournalDB for RefCountedDB { fn mark_canonical(&mut self, batch: &mut DBTransaction, end_era: u64, canon_id: &H256) -> Result { // apply old commits' details - let mut index = 0usize; + let mut db_key = DatabaseKey { + era: end_era, + index: 0usize, + }; let mut last; while let Some(rlp_data) = { self.backing.get(self.column, { - let mut r = RlpStream::new_list(3); - r.append(&end_era); - r.append(&index); - r.append(&&PADDING[..]); - last = r.drain(); + last = encode(&db_key); &last })? } { - let rlp = Rlp::new(&rlp_data); - let our_id: H256 = rlp.val_at(0); - let to_remove: Vec = rlp.list_at(if *canon_id == our_id {2} else {1}); - trace!(target: "rcdb", "delete journal for time #{}.{}=>{}, (canon was {}): deleting {:?}", end_era, index, our_id, canon_id, to_remove); + let view = DatabaseValueView::new(&rlp_data); + let our_id = view.id().expect("rlp read from db; qed"); + let to_remove = if canon_id == &our_id { + view.deletes() + } else { + view.inserts() + }.expect("rlp read from db; qed"); + trace!(target: "rcdb", "delete journal for time #{}.{}=>{}, (canon was {}): deleting {:?}", end_era, db_key.index, our_id, canon_id, to_remove); for i in &to_remove { self.forward.remove(i); } batch.delete(self.column, &last); - index += 1; + db_key.index += 1; } let r = self.forward.commit_to_batch(batch)?; diff --git a/util/journaldb/src/util.rs b/util/journaldb/src/util.rs new file mode 100644 index 00000000000..45f020eed32 --- /dev/null +++ b/util/journaldb/src/util.rs @@ -0,0 +1,76 @@ +// Copyright 2015-2018 Parity Technologies (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +use ethereum_types::H256; +use rlp::{RlpStream, Encodable, UntrustedRlp, DecoderError}; + +const PADDING : [u8; 10] = [ 0u8; 10 ]; + +pub struct DatabaseKey { + pub era: u64, + pub index: usize, +} + +impl Encodable for DatabaseKey { + fn rlp_append(&self, s: &mut RlpStream) { + s.begin_list(3); + s.append(&self.era); + s.append(&self.index); + s.append(&&PADDING[..]); + } +} + +pub struct DatabaseValueView<'a> { + rlp: UntrustedRlp<'a>, +} + +impl<'a> DatabaseValueView<'a> { + pub fn new(data: &'a [u8]) -> Self { + DatabaseValueView { + rlp: UntrustedRlp::new(data), + } + } + + #[inline] + pub fn id(&self) -> Result { + self.rlp.val_at(0) + } + + #[inline] + pub fn inserts(&self) -> Result, DecoderError> { + self.rlp.list_at(1) + } + + #[inline] + pub fn deletes(&self) -> Result, DecoderError> { + self.rlp.list_at(2) + } +} + +pub struct DatabaseValueRef<'a> { + pub id: &'a H256, + pub inserts: &'a [H256], + pub deletes: &'a [H256], +} + +impl<'a> Encodable for DatabaseValueRef<'a> { + fn rlp_append(&self, s: &mut RlpStream) { + s.begin_list(3); + s.append(self.id); + s.append_list(self.inserts); + s.append_list(self.deletes); + } +}