diff --git a/Cargo.lock b/Cargo.lock index 6f2c8a0380c..dcfd0f61da7 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1666,6 +1666,7 @@ dependencies = [ "checksum lazy_static 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "49247ec2a285bb3dcb23cbd9c35193c025e7251bfce77c1d5da97e6362dffe7f" "checksum libc 0.2.12 (registry+https://github.com/rust-lang/crates.io-index)" = "97def9dc7ce1d8e153e693e3a33020bc69972181adb2f871e87e888876feae49" "checksum log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)" = "ab83497bf8bf4ed2a74259c1c802351fcd67a65baa86394b6ba73c36f4838054" +"checksum lru-cache 0.0.7 (registry+https://github.com/rust-lang/crates.io-index)" = "42d50dcb5d9f145df83b1043207e1ac0c37c9c779c4e128ca4655abc3f3cbf8c" "checksum matches 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "15305656809ce5a4805b1ff2946892810992197ce1270ff79baded852187942e" "checksum memchr 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)" = "d8b629fb514376c675b98c1421e80b151d3817ac42d7c667717d282761418d20" "checksum mime 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "a74cc2587bf97c49f3f5bab62860d6abf3902ca73b66b51d9b049fbdcd727bd2" diff --git a/ethcore/src/blockchain/blockchain.rs b/ethcore/src/blockchain/blockchain.rs index fa4805c9dad..0165ccd0f7b 100644 --- a/ethcore/src/blockchain/blockchain.rs +++ b/ethcore/src/blockchain/blockchain.rs @@ -165,7 +165,7 @@ pub struct BlockChain { pending_best_block: RwLock>, pending_block_hashes: RwLock>, - pending_transaction_addresses: RwLock>, + pending_transaction_addresses: RwLock>>, } impl BlockProvider for BlockChain { @@ -584,8 +584,8 @@ impl BlockChain { block_hashes: self.prepare_block_hashes_update(bytes, &info), block_details: self.prepare_block_details_update(bytes, &info), block_receipts: self.prepare_block_receipts_update(receipts, &info), - transactions_addresses: self.prepare_transaction_addresses_update(bytes, &info), blocks_blooms: self.prepare_block_blooms_update(bytes, &info), + transactions_addresses: self.prepare_transaction_addresses_update(bytes, &info), info: info, block: bytes }, is_best); @@ -618,8 +618,8 @@ impl BlockChain { block_hashes: self.prepare_block_hashes_update(bytes, &info), block_details: update, block_receipts: self.prepare_block_receipts_update(receipts, &info), - transactions_addresses: self.prepare_transaction_addresses_update(bytes, &info), blocks_blooms: self.prepare_block_blooms_update(bytes, &info), + transactions_addresses: self.prepare_transaction_addresses_update(bytes, &info), info: info, block: bytes, }, is_best); @@ -687,8 +687,8 @@ impl BlockChain { block_hashes: self.prepare_block_hashes_update(bytes, &info), block_details: self.prepare_block_details_update(bytes, &info), block_receipts: self.prepare_block_receipts_update(receipts, &info), - transactions_addresses: self.prepare_transaction_addresses_update(bytes, &info), blocks_blooms: self.prepare_block_blooms_update(bytes, &info), + transactions_addresses: self.prepare_transaction_addresses_update(bytes, &info), info: info.clone(), block: bytes, }, true); @@ -744,8 +744,9 @@ impl BlockChain { let mut write_details = self.block_details.write(); batch.extend_with_cache(DB_COL_EXTRA, &mut *write_details, update.block_details, CacheUpdatePolicy::Overwrite); + let mut cache_man = self.cache_man.write(); for hash in block_hashes.into_iter() { - self.note_used(CacheID::BlockDetails(hash)); + cache_man.note_used(CacheID::BlockDetails(hash)); } } @@ -780,7 +781,7 @@ impl BlockChain { let mut write_txs = self.pending_transaction_addresses.write(); batch.extend_with_cache(DB_COL_EXTRA, &mut *write_hashes, update.block_hashes, CacheUpdatePolicy::Overwrite); - batch.extend_with_cache(DB_COL_EXTRA, &mut *write_txs, update.transactions_addresses, CacheUpdatePolicy::Overwrite); + batch.extend_with_option_cache(DB_COL_EXTRA, &mut *write_txs, update.transactions_addresses, CacheUpdatePolicy::Overwrite); } } @@ -798,18 +799,26 @@ impl BlockChain { *best_block = block; } + let pending_txs = mem::replace(&mut *pending_write_txs, HashMap::new()); + let (retracted_txs, enacted_txs) = pending_txs.into_iter().partition::, _>(|&(_, ref value)| value.is_none()); + let pending_hashes_keys: Vec<_> = pending_write_hashes.keys().cloned().collect(); - let pending_txs_keys: Vec<_> = pending_write_txs.keys().cloned().collect(); + let enacted_txs_keys: Vec<_> = enacted_txs.keys().cloned().collect(); write_hashes.extend(mem::replace(&mut *pending_write_hashes, HashMap::new())); - write_txs.extend(mem::replace(&mut *pending_write_txs, HashMap::new())); + write_txs.extend(enacted_txs.into_iter().map(|(k, v)| (k, v.expect("Transactions were partitioned; qed")))); + for hash in retracted_txs.keys() { + write_txs.remove(hash); + } + + let mut cache_man = self.cache_man.write(); for n in pending_hashes_keys.into_iter() { - self.note_used(CacheID::BlockHashes(n)); + cache_man.note_used(CacheID::BlockHashes(n)); } - for hash in pending_txs_keys.into_iter() { - self.note_used(CacheID::TransactionAddresses(hash)); + for hash in enacted_txs_keys { + cache_man.note_used(CacheID::TransactionAddresses(hash)); } } @@ -910,7 +919,7 @@ impl BlockChain { } /// This function returns modified transaction addresses. - fn prepare_transaction_addresses_update(&self, block_bytes: &[u8], info: &BlockInfo) -> HashMap { + fn prepare_transaction_addresses_update(&self, block_bytes: &[u8], info: &BlockInfo) -> HashMap> { let block = BlockView::new(block_bytes); let transaction_hashes = block.transaction_hashes(); @@ -919,10 +928,10 @@ impl BlockChain { transaction_hashes.into_iter() .enumerate() .map(|(i ,tx_hash)| { - (tx_hash, TransactionAddress { + (tx_hash, Some(TransactionAddress { block_hash: info.hash.clone(), index: i - }) + })) }) .collect() }, @@ -933,23 +942,30 @@ impl BlockChain { let hashes = BodyView::new(&bytes).transaction_hashes(); hashes.into_iter() .enumerate() - .map(|(i, tx_hash)| (tx_hash, TransactionAddress { + .map(|(i, tx_hash)| (tx_hash, Some(TransactionAddress { block_hash: hash.clone(), index: i, - })) - .collect::>() + }))) + .collect::>>() }); let current_addresses = transaction_hashes.into_iter() .enumerate() .map(|(i ,tx_hash)| { - (tx_hash, TransactionAddress { + (tx_hash, Some(TransactionAddress { block_hash: info.hash.clone(), index: i - }) + })) }); - addresses.chain(current_addresses).collect() + let retracted = data.retracted.iter().flat_map(|hash| { + let bytes = self.block_body(hash).expect("Retracted block must be in database."); + let hashes = BodyView::new(&bytes).transaction_hashes(); + hashes.into_iter().map(|hash| (hash, None)).collect::>>() + }); + + // The order here is important! Don't remove transaction if it was part of enacted blocks as well. + retracted.chain(addresses).chain(current_addresses).collect() }, BlockLocation::Branch => HashMap::new(), } @@ -1248,6 +1264,70 @@ mod tests { // TODO: insert block that already includes one of them as an uncle to check it's not allowed. } + #[test] + fn test_fork_transaction_addresses() { + let mut canon_chain = ChainGenerator::default(); + let mut finalizer = BlockFinalizer::default(); + let genesis = canon_chain.generate(&mut finalizer).unwrap(); + let mut fork_chain = canon_chain.fork(1); + let mut fork_finalizer = finalizer.fork(); + + let t1 = Transaction { + nonce: 0.into(), + gas_price: 0.into(), + gas: 100_000.into(), + action: Action::Create, + value: 100.into(), + data: "601080600c6000396000f3006000355415600957005b60203560003555".from_hex().unwrap(), + }.sign(&"".sha3()); + + + let b1a = canon_chain + .with_transaction(t1.clone()) + .generate(&mut finalizer).unwrap(); + + // Empty block + let b1b = fork_chain + .generate(&mut fork_finalizer).unwrap(); + + let b2 = fork_chain + .generate(&mut fork_finalizer).unwrap(); + + let b1a_hash = BlockView::new(&b1a).header_view().sha3(); + let b2_hash = BlockView::new(&b2).header_view().sha3(); + + let t1_hash = t1.hash(); + + let temp = RandomTempPath::new(); + let db = new_db(temp.as_str()); + let bc = BlockChain::new(Config::default(), &genesis, db.clone()); + + let mut batch = db.transaction(); + let _ = bc.insert_block(&mut batch, &b1a, vec![]); + bc.commit(); + let _ = bc.insert_block(&mut batch, &b1b, vec![]); + bc.commit(); + db.write(batch).unwrap(); + + assert_eq!(bc.best_block_hash(), b1a_hash); + assert_eq!(bc.transaction_address(&t1_hash), Some(TransactionAddress { + block_hash: b1a_hash.clone(), + index: 0, + })); + + // now let's make forked chain the canon chain + let mut batch = db.transaction(); + let _ = bc.insert_block(&mut batch, &b2, vec![]); + bc.commit(); + db.write(batch).unwrap(); + + // Transaction should be retracted + assert_eq!(bc.best_block_hash(), b2_hash); + assert_eq!(bc.transaction_address(&t1_hash), None); + } + + + #[test] fn test_overwriting_transaction_addresses() { let mut canon_chain = ChainGenerator::default(); @@ -1318,14 +1398,14 @@ mod tests { db.write(batch).unwrap(); assert_eq!(bc.best_block_hash(), b1a_hash); - assert_eq!(bc.transaction_address(&t1_hash).unwrap(), TransactionAddress { + assert_eq!(bc.transaction_address(&t1_hash), Some(TransactionAddress { block_hash: b1a_hash.clone(), index: 0, - }); - assert_eq!(bc.transaction_address(&t2_hash).unwrap(), TransactionAddress { + })); + assert_eq!(bc.transaction_address(&t2_hash), Some(TransactionAddress { block_hash: b1a_hash.clone(), index: 1, - }); + })); // now let's make forked chain the canon chain let mut batch = db.transaction(); @@ -1334,18 +1414,18 @@ mod tests { db.write(batch).unwrap(); assert_eq!(bc.best_block_hash(), b2_hash); - assert_eq!(bc.transaction_address(&t1_hash).unwrap(), TransactionAddress { + assert_eq!(bc.transaction_address(&t1_hash), Some(TransactionAddress { block_hash: b1b_hash.clone(), index: 1, - }); - assert_eq!(bc.transaction_address(&t2_hash).unwrap(), TransactionAddress { + })); + assert_eq!(bc.transaction_address(&t2_hash), Some(TransactionAddress { block_hash: b1b_hash.clone(), index: 0, - }); - assert_eq!(bc.transaction_address(&t3_hash).unwrap(), TransactionAddress { + })); + assert_eq!(bc.transaction_address(&t3_hash), Some(TransactionAddress { block_hash: b2_hash.clone(), index: 0, - }); + })); } #[test] diff --git a/ethcore/src/blockchain/update.rs b/ethcore/src/blockchain/update.rs index 96236533849..323b96f6f36 100644 --- a/ethcore/src/blockchain/update.rs +++ b/ethcore/src/blockchain/update.rs @@ -17,8 +17,8 @@ pub struct ExtrasUpdate<'a> { pub block_details: HashMap, /// Modified block receipts. pub block_receipts: HashMap, - /// Modified transaction addresses. - pub transactions_addresses: HashMap, /// Modified blocks blooms. pub blocks_blooms: HashMap, + /// Modified transaction addresses (None signifies removed transactions). + pub transactions_addresses: HashMap>, } diff --git a/ethcore/src/db.rs b/ethcore/src/db.rs index eab0e2eb5b4..17ca5147645 100644 --- a/ethcore/src/db.rs +++ b/ethcore/src/db.rs @@ -64,6 +64,9 @@ pub trait Writable { /// Writes the value into the database. fn write(&self, col: Option, key: &Key, value: &T) where T: Encodable, R: Deref; + /// Deletes key from the databse. + fn delete(&self, col: Option, key: &Key) where T: Encodable, R: Deref; + /// Writes the value into the database and updates the cache. fn write_with_cache(&self, col: Option, cache: &mut Cache, key: K, value: T, policy: CacheUpdatePolicy) where K: Key + Hash + Eq, @@ -100,6 +103,34 @@ pub trait Writable { }, } } + + /// Writes and removes the values into the database and updates the cache. + fn extend_with_option_cache(&self, col: Option, cache: &mut Cache>, values: HashMap>, policy: CacheUpdatePolicy) where + K: Key + Hash + Eq, + T: Encodable, + R: Deref { + match policy { + CacheUpdatePolicy::Overwrite => { + for (key, value) in values.into_iter() { + match value { + Some(ref v) => self.write(col, &key, v), + None => self.delete(col, &key), + } + cache.insert(key, value); + } + }, + CacheUpdatePolicy::Remove => { + for (key, value) in values.into_iter() { + match value { + Some(v) => self.write(col, &key, &v), + None => self.delete(col, &key), + } + cache.remove(&key); + } + }, + } + } + } /// Should be used to read values from database. @@ -154,6 +185,13 @@ impl Writable for DBTransaction { panic!("db put failed, key: {:?}, err: {:?}", &key.key() as &[u8], err); } } + + fn delete(&self, col: Option, key: &Key) where T: Encodable, R: Deref { + let result = DBTransaction::delete(self, col, &key.key()); + if let Err(err) = result { + panic!("db delete failed, key: {:?}, err: {:?}", &key.key() as &[u8], err); + } + } } impl Readable for Database { diff --git a/ethcore/src/miner/miner.rs b/ethcore/src/miner/miner.rs index 03118fafc49..5cde5007b3f 100644 --- a/ethcore/src/miner/miner.rs +++ b/ethcore/src/miner/miner.rs @@ -879,7 +879,7 @@ impl MinerService for Miner { out_of_chain.for_each(|txs| { let mut transaction_queue = self.transaction_queue.lock(); let _ = self.add_transactions_to_queue( - chain, txs, TransactionOrigin::External, &mut transaction_queue + chain, txs, TransactionOrigin::RetractedBlock, &mut transaction_queue ); }); } diff --git a/ethcore/src/miner/transaction_queue.rs b/ethcore/src/miner/transaction_queue.rs index cdff2f17993..2aa27e062ab 100644 --- a/ethcore/src/miner/transaction_queue.rs +++ b/ethcore/src/miner/transaction_queue.rs @@ -99,6 +99,8 @@ pub enum TransactionOrigin { Local, /// External transaction received from network External, + /// Transactions from retracted blocks + RetractedBlock, } impl PartialOrd for TransactionOrigin { @@ -113,10 +115,11 @@ impl Ord for TransactionOrigin { return Ordering::Equal; } - if *self == TransactionOrigin::Local { - Ordering::Less - } else { - Ordering::Greater + match (*self, *other) { + (TransactionOrigin::RetractedBlock, _) => Ordering::Less, + (_, TransactionOrigin::RetractedBlock) => Ordering::Greater, + (TransactionOrigin::Local, _) => Ordering::Less, + _ => Ordering::Greater, } } } @@ -634,7 +637,10 @@ impl TransactionQueue { // Goes to future or is removed let order = self.current.drop(sender, &k).unwrap(); if k >= current_nonce { - self.future.insert(*sender, k, order.update_height(k, current_nonce)); + let order = order.update_height(k, current_nonce); + if let Some(old) = self.future.insert(*sender, k, order.clone()) { + Self::replace_orders(*sender, k, old, order, &mut self.future, &mut self.by_hash); + } } else { trace!(target: "txqueue", "Removing old transaction: {:?} (nonce: {} < {})", order.hash, k, current_nonce); self.by_hash.remove(&order.hash).expect("All transactions in `future` are also in `by_hash`"); @@ -698,7 +704,9 @@ impl TransactionQueue { self.future.by_priority.remove(&order); // Put to current let order = order.update_height(current_nonce, first_nonce); - self.current.insert(address, current_nonce, order); + if let Some(old) = self.current.insert(address, current_nonce, order.clone()) { + Self::replace_orders(address, current_nonce, old, order, &mut self.current, &mut self.by_hash); + } update_last_nonce_to = Some(current_nonce); current_nonce = current_nonce + U256::one(); } @@ -730,45 +738,51 @@ impl TransactionQueue { let address = tx.sender(); let nonce = tx.nonce(); - - let next_nonce = self.last_nonces - .get(&address) - .cloned() - .map_or(state_nonce, |n| n + U256::one()); + let hash = tx.hash(); // The transaction might be old, let's check that. // This has to be the first test, otherwise calculating // nonce height would result in overflow. if nonce < state_nonce { // Droping transaction - trace!(target: "txqueue", "Dropping old transaction: {:?} (nonce: {} < {})", tx.hash(), nonce, next_nonce); + trace!(target: "txqueue", "Dropping old transaction: {:?} (nonce: {} < {})", hash, nonce, state_nonce); return Err(TransactionError::Old); - } else if nonce > next_nonce { + } + + // Update nonces of transactions in future (remove old transactions) + self.update_future(&address, state_nonce); + // State nonce could be updated. Maybe there are some more items waiting in future? + self.move_matching_future_to_current(address, state_nonce, state_nonce); + // Check the next expected nonce (might be updated by move above) + let next_nonce = self.last_nonces + .get(&address) + .cloned() + .map_or(state_nonce, |n| n + U256::one()); + + // Future transaction + if nonce > next_nonce { // We have a gap - put to future. - // Update nonces of transactions in future (remove old transactions) - self.update_future(&address, state_nonce); // Insert transaction (or replace old one with lower gas price) try!(check_too_cheap(Self::replace_transaction(tx, state_nonce, &mut self.future, &mut self.by_hash))); - // Return an error if this transaction is not imported because of limit. - try!(check_if_removed(&address, &nonce, self.future.enforce_limit(&mut self.by_hash))); + // Enforce limit in Future + let removed = self.future.enforce_limit(&mut self.by_hash); + // Return an error if this transaction was not imported because of limit. + try!(check_if_removed(&address, &nonce, removed)); + + debug!(target: "txqueue", "Importing transaction to future: {:?}", hash); + debug!(target: "txqueue", "status: {:?}", self.status()); return Ok(TransactionImportResult::Future); } + + // We might have filled a gap - move some more transactions from future + self.move_matching_future_to_current(address, nonce, state_nonce); + self.move_matching_future_to_current(address, nonce + U256::one(), state_nonce); + + // Replace transaction if any try!(check_too_cheap(Self::replace_transaction(tx, state_nonce, &mut self.current, &mut self.by_hash))); // Keep track of highest nonce stored in current let new_max = self.last_nonces.get(&address).map_or(nonce, |n| cmp::max(nonce, *n)); self.last_nonces.insert(address, new_max); - // Update nonces of transactions in future - self.update_future(&address, state_nonce); - // Maybe there are some more items waiting in future? - self.move_matching_future_to_current(address, nonce + U256::one(), state_nonce); - // There might be exactly the same transaction waiting in future - // same (sender, nonce), but above function would not move it. - if let Some(order) = self.future.drop(&address, &nonce) { - // Let's insert that transaction to current (if it has higher gas_price) - let future_tx = self.by_hash.remove(&order.hash).expect("All transactions in `future` are always in `by_hash`."); - // if transaction in `current` (then one we are importing) is replaced it means that it has to low gas_price - try!(check_too_cheap(!Self::replace_transaction(future_tx, state_nonce, &mut self.current, &mut self.by_hash))); - } // Also enforce the limit let removed = self.current.enforce_limit(&mut self.by_hash); @@ -812,24 +826,28 @@ impl TransactionQueue { if let Some(old) = set.insert(address, nonce, order.clone()) { - // There was already transaction in queue. Let's check which one should stay - let old_fee = old.gas_price; - let new_fee = order.gas_price; - if old_fee.cmp(&new_fee) == Ordering::Greater { - // Put back old transaction since it has greater priority (higher gas_price) - set.insert(address, nonce, old); - // and remove new one - by_hash.remove(&hash).expect("The hash has been just inserted and no other line is altering `by_hash`."); - false - } else { - // Make sure we remove old transaction entirely - by_hash.remove(&old.hash).expect("The hash is coming from `future` so it has to be in `by_hash`."); - true - } + Self::replace_orders(address, nonce, old, order, set, by_hash) } else { true } } + + fn replace_orders(address: Address, nonce: U256, old: TransactionOrder, order: TransactionOrder, set: &mut TransactionSet, by_hash: &mut HashMap) -> bool { + // There was already transaction in queue. Let's check which one should stay + let old_fee = old.gas_price; + let new_fee = order.gas_price; + if old_fee.cmp(&new_fee) == Ordering::Greater { + // Put back old transaction since it has greater priority (higher gas_price) + set.insert(address, nonce, old); + // and remove new one + by_hash.remove(&order.hash).expect("The hash has been just inserted and no other line is altering `by_hash`."); + false + } else { + // Make sure we remove old transaction entirely + by_hash.remove(&old.hash).expect("The hash is coming from `future` so it has to be in `by_hash`."); + true + } + } } fn check_too_cheap(is_in: bool) -> Result<(), TransactionError> { @@ -953,6 +971,17 @@ mod test { (tx.sign(secret), tx2.sign(secret)) } + #[test] + fn test_ordering() { + assert_eq!(TransactionOrigin::Local.cmp(&TransactionOrigin::External), Ordering::Less); + assert_eq!(TransactionOrigin::RetractedBlock.cmp(&TransactionOrigin::Local), Ordering::Less); + assert_eq!(TransactionOrigin::RetractedBlock.cmp(&TransactionOrigin::External), Ordering::Less); + + assert_eq!(TransactionOrigin::External.cmp(&TransactionOrigin::Local), Ordering::Greater); + assert_eq!(TransactionOrigin::Local.cmp(&TransactionOrigin::RetractedBlock), Ordering::Greater); + assert_eq!(TransactionOrigin::External.cmp(&TransactionOrigin::RetractedBlock), Ordering::Greater); + } + #[test] fn should_create_transaction_set() { // given @@ -1056,6 +1085,32 @@ mod test { assert_eq!(txq.top_transactions()[0], tx2); } + #[test] + fn should_move_all_transactions_from_future() { + // given + let mut txq = TransactionQueue::new(); + let (tx, tx2) = new_txs_with_gas_price_diff(1.into(), 1.into()); + let prev_nonce = |a: &Address| AccountDetails{ nonce: default_nonce(a).nonce - U256::one(), balance: + !U256::zero() }; + + // First insert one transaction to future + let res = txq.add(tx.clone(), &prev_nonce, TransactionOrigin::External); + assert_eq!(res.unwrap(), TransactionImportResult::Future); + assert_eq!(txq.status().future, 1); + + // now import second transaction to current + let res = txq.add(tx2.clone(), &default_nonce, TransactionOrigin::External); + + // then + assert_eq!(res.unwrap(), TransactionImportResult::Current); + assert_eq!(txq.status().pending, 2); + assert_eq!(txq.status().future, 0); + assert_eq!(txq.current.by_priority.len(), 2); + assert_eq!(txq.current.by_address.len(), 2); + assert_eq!(txq.top_transactions()[0], tx); + assert_eq!(txq.top_transactions()[1], tx2); + } + #[test] fn should_import_tx() { // given @@ -1264,6 +1319,27 @@ mod test { assert_eq!(top.len(), 2); } + #[test] + fn should_prioritize_reimported_transactions_within_same_nonce_height() { + // given + let mut txq = TransactionQueue::new(); + let tx = new_tx(); + // the second one has same nonce but higher `gas_price` + let (_, tx2) = new_similar_txs(); + + // when + // first insert local one with higher gas price + txq.add(tx2.clone(), &default_nonce, TransactionOrigin::Local).unwrap(); + // then the one with lower gas price, but from retracted block + txq.add(tx.clone(), &default_nonce, TransactionOrigin::RetractedBlock).unwrap(); + + // then + let top = txq.top_transactions(); + assert_eq!(top[0], tx); // retracted should be first + assert_eq!(top[1], tx2); + assert_eq!(top.len(), 2); + } + #[test] fn should_not_prioritize_local_transactions_with_different_nonce_height() { // given diff --git a/util/src/hash.rs b/util/src/hash.rs index 7bc12f2e9f4..626ed543011 100644 --- a/util/src/hash.rs +++ b/util/src/hash.rs @@ -77,11 +77,11 @@ pub fn clean_0x(s: &str) -> &str { macro_rules! impl_hash { ($from: ident, $size: expr) => { - #[derive(Eq)] #[repr(C)] /// Unformatted binary data of fixed length. pub struct $from (pub [u8; $size]); + impl From<[u8; $size]> for $from { fn from(bytes: [u8; $size]) -> Self { $from(bytes) @@ -263,6 +263,8 @@ macro_rules! impl_hash { } } + impl Eq for $from {} + impl PartialEq for $from { fn eq(&self, other: &Self) -> bool { for i in 0..$size {