From 496a698ce2d43a084e5fc4fc1b4adfe2d239d8ab Mon Sep 17 00:00:00 2001 From: antiochp <30642645+antiochp@users.noreply.github.com> Date: Fri, 7 Aug 2020 13:00:04 +0100 Subject: [PATCH 01/16] Introduce CommitOnly variant of Inputs. Introduce CommitWrapper so we can sort commit only inputs correctly. --- chain/src/chain.rs | 7 +- chain/src/txhashset/utxo_view.rs | 36 ++++++-- core/src/core/block.rs | 2 +- core/src/core/transaction.rs | 154 ++++++++++++++++++++++++------- core/tests/block.rs | 6 +- pool/src/pool.rs | 3 +- 6 files changed, 155 insertions(+), 53 deletions(-) diff --git a/chain/src/chain.rs b/chain/src/chain.rs index e219e17c0c..a8fd29d74d 100644 --- a/chain/src/chain.rs +++ b/chain/src/chain.rs @@ -291,18 +291,19 @@ impl Chain { let mut header_pmmr = self.header_pmmr.write(); let mut txhashset = self.txhashset.write(); - let outputs = + let inputs: Vec<_> = txhashset::extending_readonly(&mut header_pmmr, &mut txhashset, |ext, batch| { let previous_header = batch.get_previous_header(&block.header)?; pipe::rewind_and_apply_fork(&previous_header, ext, batch)?; ext.extension .utxo_view(ext.header_extension) .validate_inputs(block.inputs(), batch) + .map(|outputs| outputs.into_iter().map(|(out, _)| out).collect()) })?; - let outputs: Vec<_> = outputs.into_iter().map(|(out, _)| out).collect(); + let inputs = inputs.as_slice().into(); Ok(Block { header: block.header, - body: block.body.replace_inputs(outputs.into()), + body: block.body.replace_inputs(inputs), }) } diff --git a/chain/src/txhashset/utxo_view.rs b/chain/src/txhashset/utxo_view.rs index 74d6b286f5..d4fec4f482 100644 --- a/chain/src/txhashset/utxo_view.rs +++ b/chain/src/txhashset/utxo_view.rs @@ -16,7 +16,10 @@ use crate::core::core::hash::{Hash, Hashed}; use crate::core::core::pmmr::{self, ReadonlyPMMR}; -use crate::core::core::{Block, BlockHeader, Inputs, Output, OutputIdentifier, Transaction}; +use crate::core::core::{ + Block, BlockHeader, CommitWrapper, Inputs, Output, OutputFeatures, OutputIdentifier, + Transaction, +}; use crate::core::global; use crate::error::{Error, ErrorKind}; use crate::store::Batch; @@ -82,6 +85,16 @@ impl<'a> UTXOView<'a> { batch: &Batch<'_>, ) -> Result, Error> { match inputs { + Inputs::CommitOnly(inputs) => { + let outputs_spent: Result, Error> = inputs + .iter() + .map(|input| { + self.validate_input(input.commitment(), batch) + .and_then(|(out, pos)| Ok((out, pos))) + }) + .collect(); + outputs_spent + } Inputs::FeaturesAndCommit(inputs) => { let outputs_spent: Result, Error> = inputs .iter() @@ -153,15 +166,22 @@ impl<'a> UTXOView<'a> { ) -> Result<(), Error> { // Find the greatest output pos of any coinbase // outputs we are attempting to spend. - let inputs: Vec<_> = inputs.into(); - let pos = inputs + let inputs: Vec = inputs.into(); + + let outputs: Result, _> = inputs .iter() - .filter(|x| x.is_coinbase()) - .filter_map(|x| batch.get_output_pos(&x.commitment()).ok()) - .max() - .unwrap_or(0); + .map(|x| self.validate_input(x.commitment(), batch)) + .collect(); - if pos > 0 { + let pos = outputs? + .iter() + .filter_map(|(out, pos)| match out.features { + OutputFeatures::Coinbase => Some(pos.pos), + _ => None, + }) + .max(); + + if let Some(pos) = pos { // If we have not yet reached 1440 blocks then // we can fail immediately as coinbase cannot be mature. if height < global::coinbase_maturity() { diff --git a/core/src/core/block.rs b/core/src/core/block.rs index 8c11e9ec02..144126d36d 100644 --- a/core/src/core/block.rs +++ b/core/src/core/block.rs @@ -609,7 +609,7 @@ impl Block { kernels.extend_from_slice(cb.kern_full()); // Initialize a tx body and sort everything. - let body = TransactionBody::init(inputs, &outputs, &kernels, false)?; + let body = TransactionBody::init(inputs.into(), &outputs, &kernels, false)?; // Finally return the full block. // Note: we have not actually validated the block here, diff --git a/core/src/core/transaction.rs b/core/src/core/transaction.rs index 1b07e888ca..fe8fb91c02 100644 --- a/core/src/core/transaction.rs +++ b/core/src/core/transaction.rs @@ -691,12 +691,13 @@ impl Readable for TransactionBody { return Err(ser::Error::TooLargeReadErr); } - let inputs = read_multi(reader, num_inputs)?; + let inputs: Vec = read_multi(reader, num_inputs)?; + let inputs = Inputs::from(inputs.as_slice()); let outputs = read_multi(reader, num_outputs)?; let kernels = read_multi(reader, num_kernels)?; // Initialize tx body and verify everything is sorted. - let body = TransactionBody::init(&inputs, &outputs, &kernels, true) + let body = TransactionBody::init(inputs, &outputs, &kernels, true) .map_err(|_| ser::Error::CorruptedData)?; Ok(body) @@ -751,13 +752,13 @@ impl TransactionBody { /// the provided inputs, outputs and kernels. /// Guarantees inputs, outputs, kernels are sorted lexicographically. pub fn init( - inputs: &[Input], + inputs: Inputs, outputs: &[Output], kernels: &[TxKernel], verify_sorted: bool, ) -> Result { let mut body = TransactionBody { - inputs: inputs.into(), + inputs, outputs: outputs.to_vec(), kernels: kernels.to_vec(), }; @@ -792,11 +793,19 @@ impl TransactionBody { /// inputs, if any, are kept intact. /// Sort order is maintained. pub fn with_input(mut self, input: Input) -> TransactionBody { - let mut inputs: Vec<_> = self.inputs.into(); - if let Err(e) = inputs.binary_search(&input) { - inputs.insert(e, input) + match &mut self.inputs { + Inputs::CommitOnly(inputs) => { + let commit = input.into(); + if let Err(e) = inputs.binary_search(&commit) { + inputs.insert(e, commit) + }; + } + Inputs::FeaturesAndCommit(inputs) => { + if let Err(e) = inputs.binary_search(&input) { + inputs.insert(e, input) + }; + } }; - self.inputs = inputs.into(); self } @@ -1166,14 +1175,15 @@ impl Transaction { /// Creates a new transaction initialized with /// the provided inputs, outputs, kernels - pub fn new(inputs: &[Input], outputs: &[Output], kernels: &[TxKernel]) -> Transaction { - let offset = BlindingFactor::zero(); - + pub fn new(inputs: Inputs, outputs: &[Output], kernels: &[TxKernel]) -> Transaction { // Initialize a new tx body and sort everything. let body = TransactionBody::init(inputs, outputs, kernels, false).expect("sorting, not verifying"); - Transaction { offset, body } + Transaction { + offset: BlindingFactor::zero(), + body, + } } /// Creates a new transaction using this transaction as a template @@ -1398,7 +1408,7 @@ pub fn aggregate(txs: &[Transaction]) -> Result { kernels + tx.kernels().len(), ) }); - let mut inputs: Vec = Vec::with_capacity(n_inputs); + let mut inputs: Vec = Vec::with_capacity(n_inputs); let mut outputs: Vec = Vec::with_capacity(n_outputs); let mut kernels: Vec = Vec::with_capacity(n_kernels); @@ -1427,7 +1437,8 @@ pub fn aggregate(txs: &[Transaction]) -> Result { // * full set of tx kernels // * sum of all kernel offsets // Note: We sort input/outputs/kernels when building the transaction body internally. - let tx = Transaction::new(inputs, outputs, &kernels).with_offset(total_kernel_offset); + let tx = + Transaction::new(Inputs::from(inputs), outputs, &kernels).with_offset(total_kernel_offset); Ok(tx) } @@ -1435,7 +1446,7 @@ pub fn aggregate(txs: &[Transaction]) -> Result { /// Attempt to deaggregate a multi-kernel transaction based on multiple /// transactions pub fn deaggregate(mk_tx: Transaction, txs: &[Transaction]) -> Result { - let mut inputs: Vec = vec![]; + let mut inputs: Vec = vec![]; let mut outputs: Vec = vec![]; let mut kernels: Vec = vec![]; @@ -1494,7 +1505,10 @@ pub fn deaggregate(mk_tx: Transaction, txs: &[Transaction]) -> Result), - // Vec of commitments. - // CommitOnly(Vec), + +/// We need to wrap commitments so they can be sorted with hashable_ord. +#[derive(Serialize, Deserialize, Debug, Clone, Copy)] +pub struct CommitWrapper(Commitment); + +impl DefaultHashable for CommitWrapper {} +hashable_ord!(CommitWrapper); + +impl From for CommitWrapper { + fn from(commit: Commitment) -> Self { + CommitWrapper(commit) + } +} + +impl From for CommitWrapper { + fn from(input: Input) -> Self { + CommitWrapper(input.commitment()) + } +} + +impl From<&Input> for CommitWrapper { + fn from(input: &Input) -> Self { + CommitWrapper(input.commitment()) + } +} + +impl AsRef for CommitWrapper { + fn as_ref(&self) -> &Commitment { + &self.0 + } } -impl From for Vec { +impl Writeable for CommitWrapper { + fn write(&self, writer: &mut W) -> Result<(), ser::Error> { + self.0.write(writer) + } +} + +impl From for Vec { fn from(inputs: Inputs) -> Self { match inputs { - Inputs::FeaturesAndCommit(inputs) => inputs, + Inputs::CommitOnly(inputs) => inputs, + Inputs::FeaturesAndCommit(inputs) => { + let mut commits: Vec<_> = inputs.iter().map(|input| input.into()).collect(); + commits.sort_unstable(); + commits + } } } } -impl From<&Inputs> for Vec { +impl From<&Inputs> for Vec { fn from(inputs: &Inputs) -> Self { match inputs { - Inputs::FeaturesAndCommit(inputs) => inputs.to_vec(), + Inputs::CommitOnly(inputs) => inputs.clone(), + Inputs::FeaturesAndCommit(inputs) => { + let mut commits: Vec<_> = inputs.iter().map(|input| input.into()).collect(); + commits.sort_unstable(); + commits + } } } } +impl CommitWrapper { + /// Wrapped commitment. + pub fn commitment(&self) -> Commitment { + self.0 + } +} +/// Wrapper around a vec of inputs. +#[derive(Serialize, Deserialize, Debug, Clone, PartialEq)] +#[serde(untagged)] +pub enum Inputs { + /// Vec of commitments. + CommitOnly(Vec), + /// Vec of inputs. + FeaturesAndCommit(Vec), +} + impl From<&[Input]> for Inputs { fn from(inputs: &[Input]) -> Self { Inputs::FeaturesAndCommit(inputs.to_vec()) } } -impl From> for Inputs { - fn from(inputs: Vec) -> Self { - Inputs::FeaturesAndCommit(inputs) +impl From<&[CommitWrapper]> for Inputs { + fn from(commits: &[CommitWrapper]) -> Self { + Inputs::CommitOnly(commits.to_vec()) } } -impl From> for Inputs { - fn from(outputs: Vec) -> Self { +/// Used when converting to v2 compatibility. +/// We want to preserve output features here. +impl From<&[OutputIdentifier]> for Inputs { + fn from(outputs: &[OutputIdentifier]) -> Self { let inputs = outputs - .into_iter() + .iter() .map(|out| Input { features: out.features, commit: out.commit, @@ -1641,13 +1711,23 @@ impl From> for Inputs { impl Default for Inputs { fn default() -> Self { - Inputs::FeaturesAndCommit(vec![]) + Inputs::CommitOnly(vec![]) } } impl Writeable for Inputs { fn write(&self, writer: &mut W) -> Result<(), ser::Error> { + if self.is_empty() { + return Ok(()); + } match self { + Inputs::CommitOnly(inputs) => { + if writer.serialization_mode() == ser::SerializationMode::Hash { + inputs.write(writer)?; + } else { + panic!("not yet implemented!"); + } + } Inputs::FeaturesAndCommit(inputs) => inputs.write(writer)?, } Ok(()) @@ -1658,6 +1738,7 @@ impl Inputs { /// Number of inputs. pub fn len(&self) -> usize { match self { + Inputs::CommitOnly(inputs) => inputs.len(), Inputs::FeaturesAndCommit(inputs) => inputs.len(), } } @@ -1670,12 +1751,15 @@ impl Inputs { /// Verify inputs are sorted and unique. fn verify_sorted_and_unique(&self) -> Result<(), ser::Error> { match self { + Inputs::CommitOnly(inputs) => inputs.verify_sorted_and_unique(), Inputs::FeaturesAndCommit(inputs) => inputs.verify_sorted_and_unique(), } } + /// Sort the inputs. fn sort_unstable(&mut self) { match self { + Inputs::CommitOnly(inputs) => inputs.sort_unstable(), Inputs::FeaturesAndCommit(inputs) => inputs.sort_unstable(), } } diff --git a/core/tests/block.rs b/core/tests/block.rs index ede316ab4f..012afda807 100644 --- a/core/tests/block.rs +++ b/core/tests/block.rs @@ -732,14 +732,13 @@ fn same_amount_outputs_copy_range_proof() { // now we reconstruct the transaction, swapping the rangeproofs so they // have the wrong privkey - let ins: Vec<_> = tx.inputs().into(); let mut outs = tx.outputs().to_vec(); outs[0].proof = outs[1].proof; let key_id = keychain::ExtKeychain::derive_key_id(1, 4, 0, 0, 0); let prev = BlockHeader::default(); let b = new_block( - &[Transaction::new(&ins, &outs, tx.kernels())], + &[Transaction::new(tx.inputs(), &outs, tx.kernels())], &keychain, &builder, &prev, @@ -784,7 +783,6 @@ fn wrong_amount_range_proof() { .unwrap(); // we take the range proofs from tx2 into tx1 and rebuild the transaction - let ins: Vec<_> = tx1.inputs().into(); let mut outs = tx1.outputs().to_vec(); outs[0].proof = tx2.outputs()[0].proof; outs[1].proof = tx2.outputs()[1].proof; @@ -792,7 +790,7 @@ fn wrong_amount_range_proof() { let key_id = keychain::ExtKeychain::derive_key_id(1, 4, 0, 0, 0); let prev = BlockHeader::default(); let b = new_block( - &[Transaction::new(&ins, &outs, tx1.kernels())], + &[Transaction::new(tx1.inputs(), &outs, tx1.kernels())], &keychain, &builder, &prev, diff --git a/pool/src/pool.rs b/pool/src/pool.rs index ce7795ea48..51976ae504 100644 --- a/pool/src/pool.rs +++ b/pool/src/pool.rs @@ -329,8 +329,7 @@ where // Remember to use the original kernels and kernel offset. let mut outputs = tx.outputs().to_vec(); let (inputs, outputs, _, _) = transaction::cut_through(&mut spent[..], &mut outputs[..])?; - let inputs: Vec<_> = inputs.iter().map(|out| out.into()).collect(); - let tx = Transaction::new(inputs.as_slice(), outputs, tx.kernels()).with_offset(tx.offset); + let tx = Transaction::new(inputs.into(), outputs, tx.kernels()).with_offset(tx.offset); // Validate the tx to ensure our converted inputs are correct. tx.validate(Weighting::AsTransaction, self.verifier_cache.clone()) From d6854f8c20f69ebc62af3319e813e17d99097fac Mon Sep 17 00:00:00 2001 From: antiochp <30642645+antiochp@users.noreply.github.com> Date: Sat, 8 Aug 2020 15:31:54 +0100 Subject: [PATCH 02/16] rememebr to resort if converting --- core/src/core/transaction.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/core/src/core/transaction.rs b/core/src/core/transaction.rs index fe8fb91c02..3946846e2b 100644 --- a/core/src/core/transaction.rs +++ b/core/src/core/transaction.rs @@ -1698,13 +1698,14 @@ impl From<&[CommitWrapper]> for Inputs { /// We want to preserve output features here. impl From<&[OutputIdentifier]> for Inputs { fn from(outputs: &[OutputIdentifier]) -> Self { - let inputs = outputs + let mut inputs: Vec<_> = outputs .iter() .map(|out| Input { features: out.features, commit: out.commit, }) .collect(); + inputs.sort_unstable(); Inputs::FeaturesAndCommit(inputs) } } From f4dcfb2d68f1e4c3fe2ef9cb7f1709088be36bbc Mon Sep 17 00:00:00 2001 From: antiochp <30642645+antiochp@users.noreply.github.com> Date: Sat, 8 Aug 2020 19:40:25 +0100 Subject: [PATCH 03/16] write inputs based on variant and protocol version --- core/src/core/block.rs | 5 ++--- core/src/core/transaction.rs | 32 +++++++++++++++++++++++--------- core/src/ser.rs | 14 ++++++++++++++ 3 files changed, 39 insertions(+), 12 deletions(-) diff --git a/core/src/core/block.rs b/core/src/core/block.rs index 144126d36d..2ecab4f83e 100644 --- a/core/src/core/block.rs +++ b/core/src/core/block.rs @@ -272,7 +272,7 @@ impl PMMRable for BlockHeader { /// Serialization of a block header impl Writeable for BlockHeader { fn write(&self, writer: &mut W) -> Result<(), ser::Error> { - if writer.serialization_mode() != ser::SerializationMode::Hash { + if !writer.serialization_mode().is_hash_mode() { self.write_pre_pow(writer)?; } self.pow.write(writer)?; @@ -507,8 +507,7 @@ impl Hashed for Block { impl Writeable for Block { fn write(&self, writer: &mut W) -> Result<(), ser::Error> { self.header.write(writer)?; - - if writer.serialization_mode() != ser::SerializationMode::Hash { + if !writer.serialization_mode().is_hash_mode() { self.body.write(writer)?; } Ok(()) diff --git a/core/src/core/transaction.rs b/core/src/core/transaction.rs index 3946846e2b..aed43a37df 100644 --- a/core/src/core/transaction.rs +++ b/core/src/core/transaction.rs @@ -331,7 +331,7 @@ impl Writeable for KernelFeatures { fn write(&self, writer: &mut W) -> Result<(), ser::Error> { // Care must be exercised when writing for hashing purposes. // All kernels are hashed using original v1 serialization strategy. - if writer.serialization_mode() == ser::SerializationMode::Hash { + if writer.serialization_mode().is_hash_mode() { return self.write_v1(writer); } @@ -1718,18 +1718,32 @@ impl Default for Inputs { impl Writeable for Inputs { fn write(&self, writer: &mut W) -> Result<(), ser::Error> { + // Nothing to write so we are done. if self.is_empty() { return Ok(()); } - match self { - Inputs::CommitOnly(inputs) => { - if writer.serialization_mode() == ser::SerializationMode::Hash { - inputs.write(writer)?; - } else { - panic!("not yet implemented!"); - } + + // If writing for a hash then simply write all our inputs. + if writer.serialization_mode().is_hash_mode() { + match self { + Inputs::CommitOnly(inputs) => inputs.write(writer)?, + Inputs::FeaturesAndCommit(inputs) => inputs.write(writer)?, + } + } else { + // Otherwise we are writing full data and need to consider our inputs variant and protocol version. + match self { + Inputs::CommitOnly(inputs) => match writer.protocol_version().value() { + 0..=2 => return Err(ser::Error::UnsupportedProtocolVersion), + 3..=ProtocolVersion::MAX => inputs.write(writer)?, + }, + Inputs::FeaturesAndCommit(inputs) => match writer.protocol_version().value() { + 0..=2 => inputs.write(writer)?, + 3..=ProtocolVersion::MAX => { + let inputs: Vec = self.into(); + inputs.write(writer)?; + } + }, } - Inputs::FeaturesAndCommit(inputs) => inputs.write(writer)?, } Ok(()) } diff --git a/core/src/ser.rs b/core/src/ser.rs index 624a59e612..d04ff63e28 100644 --- a/core/src/ser.rs +++ b/core/src/ser.rs @@ -69,6 +69,8 @@ pub enum Error { DuplicateError, /// Block header version (hard-fork schedule). InvalidBlockVersion, + /// Unsupported protocol version + UnsupportedProtocolVersion, } impl From for Error { @@ -92,6 +94,7 @@ impl fmt::Display for Error { Error::TooLargeReadErr => f.write_str("too large read"), Error::HexError(ref e) => write!(f, "hex error {:?}", e), Error::InvalidBlockVersion => f.write_str("invalid block version"), + Error::UnsupportedProtocolVersion => f.write_str("unsupported protocol version"), } } } @@ -115,6 +118,7 @@ impl error::Error for Error { Error::TooLargeReadErr => "too large read", Error::HexError(_) => "hex error", Error::InvalidBlockVersion => "invalid block version", + Error::UnsupportedProtocolVersion => "unsupported protocol version", } } } @@ -128,6 +132,16 @@ pub enum SerializationMode { Hash, } +impl SerializationMode { + /// Hash mode? + pub fn is_hash_mode(&self) -> bool { + match self { + SerializationMode::Hash => true, + _ => false, + } + } +} + /// Implementations defined how different numbers and binary structures are /// written to an underlying stream or container (depending on implementation). pub trait Writer { From 0d152f35fa88137a83525cd3abccba5c22312c88 Mon Sep 17 00:00:00 2001 From: antiochp <30642645+antiochp@users.noreply.github.com> Date: Sun, 9 Aug 2020 16:17:07 +0100 Subject: [PATCH 04/16] read and write protocol version specific inputs --- core/src/core/transaction.rs | 21 +++++++++++++++++++-- core/src/global.rs | 11 +++++++---- core/src/ser.rs | 2 +- 3 files changed, 27 insertions(+), 7 deletions(-) diff --git a/core/src/core/transaction.rs b/core/src/core/transaction.rs index aed43a37df..e17ba8c784 100644 --- a/core/src/core/transaction.rs +++ b/core/src/core/transaction.rs @@ -691,8 +691,18 @@ impl Readable for TransactionBody { return Err(ser::Error::TooLargeReadErr); } - let inputs: Vec = read_multi(reader, num_inputs)?; - let inputs = Inputs::from(inputs.as_slice()); + // Read protocol version specific inputs. + let inputs = match reader.protocol_version().value() { + 0..=2 => { + let inputs: Vec = read_multi(reader, num_inputs)?; + Inputs::from(inputs.as_slice()) + } + 3..=ser::ProtocolVersion::MAX => { + let inputs: Vec = read_multi(reader, num_inputs)?; + Inputs::from(inputs.as_slice()) + } + }; + let outputs = read_multi(reader, num_outputs)?; let kernels = read_multi(reader, num_kernels)?; @@ -1634,6 +1644,13 @@ impl AsRef for CommitWrapper { } } +impl Readable for CommitWrapper { + fn read(reader: &mut R) -> Result { + let commit = Commitment::read(reader)?; + Ok(CommitWrapper(commit)) + } +} + impl Writeable for CommitWrapper { fn write(&self, writer: &mut W) -> Result<(), ser::Error> { self.0.write(writer) diff --git a/core/src/global.rs b/core/src/global.rs index 5ceb7e85d9..750378fb67 100644 --- a/core/src/global.rs +++ b/core/src/global.rs @@ -23,9 +23,12 @@ use crate::consensus::{ PROOFSIZE, SECOND_POW_EDGE_BITS, STATE_SYNC_THRESHOLD, }; use crate::core::block::HeaderVersion; -use crate::pow::{ - self, new_cuckaroo_ctx, new_cuckarood_ctx, new_cuckaroom_ctx, new_cuckarooz_ctx, - new_cuckatoo_ctx, PoWContext, +use crate::{ + pow::{ + self, new_cuckaroo_ctx, new_cuckarood_ctx, new_cuckaroom_ctx, new_cuckarooz_ctx, + new_cuckatoo_ctx, PoWContext, + }, + ser::ProtocolVersion, }; use std::cell::Cell; use util::OneTime; @@ -42,7 +45,7 @@ use util::OneTime; /// Note: We also use a specific (possible different) protocol version /// for both the backend database and MMR data files. /// This defines the p2p layer protocol version for this node. -pub const PROTOCOL_VERSION: u32 = 2; +pub const PROTOCOL_VERSION: ProtocolVersion = ProtocolVersion(3); /// Automated testing edge_bits pub const AUTOMATED_TESTING_MIN_EDGE_BITS: u8 = 10; diff --git a/core/src/ser.rs b/core/src/ser.rs index d04ff63e28..b6dcbdaad1 100644 --- a/core/src/ser.rs +++ b/core/src/ser.rs @@ -333,7 +333,7 @@ impl ProtocolVersion { /// negotiation in the p2p layer. Connected peers will negotiate a suitable /// protocol version for serialization/deserialization of p2p messages. pub fn local() -> ProtocolVersion { - ProtocolVersion(PROTOCOL_VERSION) + PROTOCOL_VERSION } /// We need to specify a protocol version for our local database. From 3a68648e6de79b9b7137a31975d532da912d2182 Mon Sep 17 00:00:00 2001 From: antiochp <30642645+antiochp@users.noreply.github.com> Date: Sun, 9 Aug 2020 23:15:35 +0100 Subject: [PATCH 05/16] store full blocks in local db in v3 convert to v2 when relaying to v2 peers --- chain/src/chain.rs | 35 +++++++++++++++++----------------- p2p/src/peer.rs | 21 ++------------------ p2p/src/peers.rs | 4 ++-- p2p/src/protocol.rs | 5 ++--- p2p/src/serv.rs | 2 +- p2p/src/types.rs | 3 ++- servers/src/common/adapters.rs | 16 ++++++++++------ store/src/lmdb.rs | 2 +- 8 files changed, 38 insertions(+), 50 deletions(-) diff --git a/chain/src/chain.rs b/chain/src/chain.rs index a8fd29d74d..eb46c107a0 100644 --- a/chain/src/chain.rs +++ b/chain/src/chain.rs @@ -19,8 +19,8 @@ use crate::core::core::hash::{Hash, Hashed, ZERO_HASH}; use crate::core::core::merkle_proof::MerkleProof; use crate::core::core::verifier_cache::VerifierCache; use crate::core::core::{ - Block, BlockHeader, BlockSums, Committed, Inputs, KernelFeatures, Output, OutputIdentifier, - Transaction, TxKernel, + Block, BlockHeader, BlockSums, Committed, HeaderVersion, Inputs, KernelFeatures, Output, + OutputIdentifier, Transaction, TxKernel, }; use crate::core::global; use crate::core::pow; @@ -219,10 +219,8 @@ impl Chain { }; // DB migrations to be run prior to the chain being used. - { - // Migrate full blocks to protocol version v2. - chain.migrate_db_v1_v2()?; - } + // Migrate full blocks to protocol version v3. + chain.migrate_db_v2_v3()?; chain.log_heads()?; @@ -275,7 +273,7 @@ impl Chain { /// We also need to support relaying blocks with FeaturesAndCommit inputs to peers. /// So we need a way to convert blocks from CommitOnly to FeaturesAndCommit. /// Validating the inputs against the utxo_view allows us to look the outputs up. - fn convert_block_v2(&self, block: Block) -> Result { + pub fn convert_block_v2(&self, block: Block) -> Result { debug!( "convert_block_v2: {} at {}", block.header.hash(), @@ -398,9 +396,13 @@ impl Chain { // Only do this once we know the header PoW is valid. self.check_orphan(&b, opts)?; - // Convert block to FeaturesAndCommit inputs. - // We know this block is not an orphan and header is valid at this point. - let b = self.convert_block_v2(b)?; + // Conversion is relatively expensive so defer this until after orphan check. + // Maintain "features and commit" backward compatibility until HF4. + let b = if b.header.version <= HeaderVersion(4) { + self.convert_block_v2(b)? + } else { + b + }; let (maybe_new_head, prev_head) = { let mut header_pmmr = self.header_pmmr.write(); @@ -1404,14 +1406,13 @@ impl Chain { self.header_pmmr.read().get_header_hash_by_height(height) } - /// Migrate our local db from v1 to v2. - /// This covers blocks which themselves contain transactions. - /// Transaction kernels changed in v2 due to "variable size kernels". - fn migrate_db_v1_v2(&self) -> Result<(), Error> { - let store_v1 = self.store.with_version(ProtocolVersion(1)); - let batch = store_v1.batch()?; + /// Migrate our local db from v2 to v3. + /// "commit only" inputs. + fn migrate_db_v2_v3(&self) -> Result<(), Error> { + let store_v2 = self.store.with_version(ProtocolVersion(2)); + let batch = store_v2.batch()?; for (_, block) in batch.blocks_iter()? { - batch.migrate_block(&block, ProtocolVersion(2))?; + batch.migrate_block(&block, ProtocolVersion(3))?; } batch.commit()?; Ok(()) diff --git a/p2p/src/peer.rs b/p2p/src/peer.rs index d77da384ad..6907a85274 100644 --- a/p2p/src/peer.rs +++ b/p2p/src/peer.rs @@ -252,23 +252,6 @@ impl Peer { self.send(ban_reason_msg, msg::Type::BanReason).map(|_| ()) } - /// Sends the provided block to the remote peer. The request may be dropped - /// if the remote peer is known to already have the block. - pub fn send_block(&self, b: &core::Block) -> Result { - if !self.tracking_adapter.has_recv(b.hash()) { - trace!("Send block {} to {}", b.hash(), self.info.addr); - self.send(b, msg::Type::Block)?; - Ok(true) - } else { - debug!( - "Suppress block send {} to {} (already seen)", - b.hash(), - self.info.addr, - ); - Ok(false) - } - } - pub fn send_compact_block(&self, b: &core::CompactBlock) -> Result { if !self.tracking_adapter.has_recv(b.hash()) { trace!("Send compact block {} to {}", b.hash(), self.info.addr); @@ -540,8 +523,8 @@ impl ChainAdapter for TrackingAdapter { self.adapter.locate_headers(locator) } - fn get_block(&self, h: Hash) -> Option { - self.adapter.get_block(h) + fn get_block(&self, h: Hash, peer_info: &PeerInfo) -> Option { + self.adapter.get_block(h, peer_info) } fn txhashset_read(&self, h: Hash) -> Option { diff --git a/p2p/src/peers.rs b/p2p/src/peers.rs index afa0c9d08f..36d411be24 100644 --- a/p2p/src/peers.rs +++ b/p2p/src/peers.rs @@ -667,8 +667,8 @@ impl ChainAdapter for Peers { self.adapter.locate_headers(hs) } - fn get_block(&self, h: Hash) -> Option { - self.adapter.get_block(h) + fn get_block(&self, h: Hash, peer_info: &PeerInfo) -> Option { + self.adapter.get_block(h, peer_info) } fn txhashset_read(&self, h: Hash) -> Option { diff --git a/p2p/src/protocol.rs b/p2p/src/protocol.rs index bfdf4b56d2..1112b3078c 100644 --- a/p2p/src/protocol.rs +++ b/p2p/src/protocol.rs @@ -15,7 +15,6 @@ use crate::chain; use crate::conn::{Message, MessageHandler, Tracker}; use crate::core::core::{self, hash::Hash, hash::Hashed, CompactBlock}; - use crate::msg::{ BanReason, GetPeerAddrs, Headers, Locator, Msg, PeerAddrs, Ping, Pong, TxHashSetArchive, TxHashSetRequest, Type, @@ -153,7 +152,7 @@ impl MessageHandler for Protocol { msg.header.msg_len, ); - let bo = adapter.get_block(h); + let bo = adapter.get_block(h, &self.peer_info); if let Some(b) = bo { return Ok(Some(Msg::new(Type::Block, b, self.peer_info.version)?)); } @@ -177,7 +176,7 @@ impl MessageHandler for Protocol { Type::GetCompactBlock => { let h: Hash = msg.body()?; - if let Some(b) = adapter.get_block(h) { + if let Some(b) = adapter.get_block(h, &self.peer_info) { let cb: CompactBlock = b.into(); Ok(Some(Msg::new( Type::CompactBlock, diff --git a/p2p/src/serv.rs b/p2p/src/serv.rs index aab5b35692..ff2a5c34c2 100644 --- a/p2p/src/serv.rs +++ b/p2p/src/serv.rs @@ -335,7 +335,7 @@ impl ChainAdapter for DummyAdapter { fn locate_headers(&self, _: &[Hash]) -> Result, chain::Error> { Ok(vec![]) } - fn get_block(&self, _: Hash) -> Option { + fn get_block(&self, _: Hash, _: &PeerInfo) -> Option { None } fn txhashset_read(&self, _h: Hash) -> Option { diff --git a/p2p/src/types.rs b/p2p/src/types.rs index 713ca19299..a019adbe64 100644 --- a/p2p/src/types.rs +++ b/p2p/src/types.rs @@ -599,7 +599,8 @@ pub trait ChainAdapter: Sync + Send { fn locate_headers(&self, locator: &[Hash]) -> Result, chain::Error>; /// Gets a full block by its hash. - fn get_block(&self, h: Hash) -> Option; + /// Converts block to v2 compatibility if necessary (based on peer protocol version). + fn get_block(&self, h: Hash, peer_info: &PeerInfo) -> Option; /// Provides a reading view into the current txhashset state as well as /// the required indexes for a consumer to rewind to a consistant state diff --git a/servers/src/common/adapters.rs b/servers/src/common/adapters.rs index 83fb9ca830..fb734acbb7 100644 --- a/servers/src/common/adapters.rs +++ b/servers/src/common/adapters.rs @@ -32,6 +32,7 @@ use crate::core::core::transaction::Transaction; use crate::core::core::verifier_cache::VerifierCache; use crate::core::core::{BlockHeader, BlockSums, CompactBlock, Inputs, OutputIdentifier}; use crate::core::pow::Difficulty; +use crate::core::ser::ProtocolVersion; use crate::core::{core, global}; use crate::p2p; use crate::p2p::types::PeerInfo; @@ -355,12 +356,15 @@ where } /// Gets a full block by its hash. - fn get_block(&self, h: Hash) -> Option { - let b = self.chain().get_block(&h); - match b { - Ok(b) => Some(b), - _ => None, - } + /// Will convert to v2 compatibility based on peer protocol version. + fn get_block(&self, h: Hash, peer_info: &PeerInfo) -> Option { + self.chain() + .get_block(&h) + .map(|b| match peer_info.version.value() { + 0..=2 => Some(b), + 3..=ProtocolVersion::MAX => self.chain().convert_block_v2(b).ok(), + }) + .unwrap_or(None) } /// Provides a reading view into the current txhashset state as well as diff --git a/store/src/lmdb.rs b/store/src/lmdb.rs index 3ce4cc91a2..25edaa1859 100644 --- a/store/src/lmdb.rs +++ b/store/src/lmdb.rs @@ -73,7 +73,7 @@ where } } -const DEFAULT_DB_VERSION: ProtocolVersion = ProtocolVersion(2); +const DEFAULT_DB_VERSION: ProtocolVersion = ProtocolVersion(3); /// LMDB-backed store facilitating data access and serialization. All writes /// are done through a Batch abstraction providing atomicity. From 0414191bbe05b697d838f588ec81f6969f422796 Mon Sep 17 00:00:00 2001 From: antiochp <30642645+antiochp@users.noreply.github.com> Date: Sun, 9 Aug 2020 17:20:21 +0100 Subject: [PATCH 06/16] add debug version_str for inputs --- chain/src/chain.rs | 5 +++-- chain/src/pipe.rs | 3 ++- core/src/core/transaction.rs | 8 ++++++++ pool/src/pool.rs | 2 ++ 4 files changed, 15 insertions(+), 3 deletions(-) diff --git a/chain/src/chain.rs b/chain/src/chain.rs index eb46c107a0..f3590d870e 100644 --- a/chain/src/chain.rs +++ b/chain/src/chain.rs @@ -275,9 +275,10 @@ impl Chain { /// Validating the inputs against the utxo_view allows us to look the outputs up. pub fn convert_block_v2(&self, block: Block) -> Result { debug!( - "convert_block_v2: {} at {}", + "convert_block_v2: {} at {} ({})", block.header.hash(), - block.header.height + block.header.height, + block.inputs().version_str(), ); if block.inputs().is_empty() { diff --git a/chain/src/pipe.rs b/chain/src/pipe.rs index 0d990a63ec..39f64bd4b9 100644 --- a/chain/src/pipe.rs +++ b/chain/src/pipe.rs @@ -84,12 +84,13 @@ pub fn process_block( ctx: &mut BlockContext<'_>, ) -> Result<(Option, BlockHeader), Error> { debug!( - "pipe: process_block {} at {} [in/out/kern: {}/{}/{}]", + "pipe: process_block {} at {} [in/out/kern: {}/{}/{}] ({})", b.hash(), b.header.height, b.inputs().len(), b.outputs().len(), b.kernels().len(), + b.inputs().version_str(), ); // Read current chain head from db via the batch. diff --git a/core/src/core/transaction.rs b/core/src/core/transaction.rs index e17ba8c784..da7d211c56 100644 --- a/core/src/core/transaction.rs +++ b/core/src/core/transaction.rs @@ -1795,6 +1795,14 @@ impl Inputs { Inputs::FeaturesAndCommit(inputs) => inputs.sort_unstable(), } } + + /// For debug purposes only. Do not rely on this for anything. + pub fn version_str(&self) -> &str { + match self { + Inputs::CommitOnly(_) => "v3", + Inputs::FeaturesAndCommit(_) => "v2", + } + } } // Enum of various supported kernel "features". diff --git a/pool/src/pool.rs b/pool/src/pool.rs index 51976ae504..ef8058a197 100644 --- a/pool/src/pool.rs +++ b/pool/src/pool.rs @@ -298,6 +298,8 @@ where tx: Transaction, extra_tx: Option, ) -> Result { + debug!("convert_tx_v2: {}", tx.inputs().version_str()); + let mut inputs: Vec<_> = tx.inputs().into(); let agg_tx = self From 4abd8fde38931ce106209086818e9cd678f28814 Mon Sep 17 00:00:00 2001 From: antiochp <30642645+antiochp@users.noreply.github.com> Date: Mon, 10 Aug 2020 15:33:59 +0100 Subject: [PATCH 07/16] no assumptions about spent index sort order --- chain/src/txhashset/txhashset.rs | 9 +++++---- chain/src/txhashset/utxo_view.rs | 11 ++++++++++- 2 files changed, 15 insertions(+), 5 deletions(-) diff --git a/chain/src/txhashset/txhashset.rs b/chain/src/txhashset/txhashset.rs index e43de34c2e..7ceffe0c94 100644 --- a/chain/src/txhashset/txhashset.rs +++ b/chain/src/txhashset/txhashset.rs @@ -1371,13 +1371,14 @@ impl<'a> Extension<'a> { } // Update output_pos based on "unspending" all spent pos from this block. - // This is necessary to ensure the output_pos index correclty reflects a + // This is necessary to ensure the output_pos index correctly reflects a // reused output commitment. For example an output at pos 1, spent, reused at pos 2. // The output_pos index should be updated to reflect the old pos 1 when unspent. if let Ok(spent) = spent { - let inputs: Vec<_> = block.inputs().into(); - for (input, pos) in inputs.iter().zip(spent) { - batch.save_output_pos_height(&input.commitment(), pos)?; + for pos in spent { + if let Some(out) = self.output_pmmr.get_data(pos.pos) { + batch.save_output_pos_height(&out.commitment(), pos)?; + } } } diff --git a/chain/src/txhashset/utxo_view.rs b/chain/src/txhashset/utxo_view.rs index d4fec4f482..7b43fc7558 100644 --- a/chain/src/txhashset/utxo_view.rs +++ b/chain/src/txhashset/utxo_view.rs @@ -106,6 +106,7 @@ impl<'a> UTXOView<'a> { if out == input.into() { Ok((out, pos)) } else { + error!("input mismatch: {:?}, {:?}, {:?}", out, pos, input); Err(ErrorKind::Other("input mismatch".into()).into()) } }) @@ -127,7 +128,15 @@ impl<'a> UTXOView<'a> { let pos = batch.get_output_pos_height(&input)?; if let Some(pos) = pos { if let Some(out) = self.output_pmmr.get_data(pos.pos) { - return Ok((out, pos)); + if out.commitment() == input { + return Ok((out, pos)); + } else { + error!("input mismatch: {:?}, {:?}, {:?}", out, pos, input); + return Err(ErrorKind::Other( + "input mismatch (output_pos index mismatch?)".into(), + ) + .into()); + } } } Err(ErrorKind::AlreadySpent(input).into()) From dae8a6d1a521ec4d6636b2d262c7e4146c2b3c62 Mon Sep 17 00:00:00 2001 From: antiochp <30642645+antiochp@users.noreply.github.com> Date: Fri, 14 Aug 2020 14:38:33 +0100 Subject: [PATCH 08/16] add additional version debug logs --- chain/src/chain.rs | 17 +++++++---------- chain/src/store.rs | 9 ++++++++- pool/src/pool.rs | 6 +++++- servers/src/common/adapters.rs | 13 ++++++++++++- store/src/lmdb.rs | 18 ++++++++++++++---- 5 files changed, 46 insertions(+), 17 deletions(-) diff --git a/chain/src/chain.rs b/chain/src/chain.rs index f3590d870e..e9557891a8 100644 --- a/chain/src/chain.rs +++ b/chain/src/chain.rs @@ -19,8 +19,8 @@ use crate::core::core::hash::{Hash, Hashed, ZERO_HASH}; use crate::core::core::merkle_proof::MerkleProof; use crate::core::core::verifier_cache::VerifierCache; use crate::core::core::{ - Block, BlockHeader, BlockSums, Committed, HeaderVersion, Inputs, KernelFeatures, Output, - OutputIdentifier, Transaction, TxKernel, + Block, BlockHeader, BlockSums, Committed, Inputs, KernelFeatures, Output, OutputIdentifier, + Transaction, TxKernel, }; use crate::core::global; use crate::core::pow; @@ -275,7 +275,7 @@ impl Chain { /// Validating the inputs against the utxo_view allows us to look the outputs up. pub fn convert_block_v2(&self, block: Block) -> Result { debug!( - "convert_block_v2: {} at {} ({})", + "convert_block_v2: {} at {} ({} -> v2)", block.header.hash(), block.header.height, block.inputs().version_str(), @@ -397,13 +397,10 @@ impl Chain { // Only do this once we know the header PoW is valid. self.check_orphan(&b, opts)?; - // Conversion is relatively expensive so defer this until after orphan check. - // Maintain "features and commit" backward compatibility until HF4. - let b = if b.header.version <= HeaderVersion(4) { - self.convert_block_v2(b)? - } else { - b - }; + // We can only reliably convert to "v2" if not an orphan (may spend output from previous block). + // We convert from "v3" to "v2" by looking up outputs to be spent. + // This conversion also ensures a block received in "v2" has valid input features (prevents malleability). + let b = self.convert_block_v2(b)?; let (maybe_new_head, prev_head) = { let mut header_pmmr = self.header_pmmr.write(); diff --git a/chain/src/store.rs b/chain/src/store.rs index 10c2006c41..f4e3b897e4 100644 --- a/chain/src/store.rs +++ b/chain/src/store.rs @@ -206,6 +206,13 @@ impl<'a> Batch<'a> { /// Save the block to the db. /// Note: the block header is not saved to the db here, assumes this has already been done. pub fn save_block(&self, b: &Block) -> Result<(), Error> { + debug!( + "save_block: {} at {} ({} -> v{})", + b.header.hash(), + b.header.height, + b.inputs().version_str(), + self.db.protocol_version(), + ); self.db.put_ser(&to_key(BLOCK_PREFIX, b.hash())[..], b)?; Ok(()) } @@ -222,7 +229,7 @@ impl<'a> Batch<'a> { /// Block may have been read using a previous protocol version but we do not actually care. pub fn migrate_block(&self, b: &Block, version: ProtocolVersion) -> Result<(), Error> { self.db - .put_ser_with_version(&to_key(BLOCK_PREFIX, &mut b.hash())[..], b, version)?; + .put_ser_with_version(&to_key(BLOCK_PREFIX, b.hash())[..], b, version)?; Ok(()) } diff --git a/pool/src/pool.rs b/pool/src/pool.rs index ef8058a197..4e3a6f97f8 100644 --- a/pool/src/pool.rs +++ b/pool/src/pool.rs @@ -298,7 +298,11 @@ where tx: Transaction, extra_tx: Option, ) -> Result { - debug!("convert_tx_v2: {}", tx.inputs().version_str()); + debug!( + "convert_tx_v2: {} ({} -> v2)", + tx.hash(), + tx.inputs().version_str() + ); let mut inputs: Vec<_> = tx.inputs().into(); diff --git a/servers/src/common/adapters.rs b/servers/src/common/adapters.rs index fb734acbb7..507624251e 100644 --- a/servers/src/common/adapters.rs +++ b/servers/src/common/adapters.rs @@ -172,6 +172,12 @@ where // push the freshly hydrated block through the chain pipeline match core::Block::hydrate_from(cb, &[]) { Ok(block) => { + debug!( + "successfully hydrated (empty) block: {} at {} ({})", + block.header.hash(), + block.header.height, + block.inputs().version_str(), + ); if !self.sync_state.is_syncing() { for hook in &self.hooks { hook.on_block_received(&block, &peer_info.addr); @@ -232,7 +238,12 @@ where .validate(&prev.total_kernel_offset, self.verifier_cache.clone()) .is_ok() { - debug!("successfully hydrated block from tx pool!"); + debug!( + "successfully hydrated block: {} at {} ({})", + block.header.hash(), + block.header.height, + block.inputs().version_str(), + ); self.process_block(block, peer_info, chain::Options::NONE) } else if self.sync_state.status() == SyncStatus::NoSync { debug!("adapter: block invalid after hydration, requesting full block"); diff --git a/store/src/lmdb.rs b/store/src/lmdb.rs index 25edaa1859..b22bb0c79c 100644 --- a/store/src/lmdb.rs +++ b/store/src/lmdb.rs @@ -157,11 +157,16 @@ impl Store { env: self.env.clone(), db: self.db.clone(), name: self.name.clone(), - version: version, + version, alloc_chunk_size, } } + /// Protocol version for the store. + pub fn protocol_version(&self) -> ProtocolVersion { + self.version + } + /// Opens the database environment pub fn open(&self) -> Result<(), Error> { let mut w = self.db.write(); @@ -275,7 +280,7 @@ impl Store { ) -> Result, Error> { let res: lmdb::error::Result<&[u8]> = access.get(&db, key); match res.to_opt() { - Ok(Some(mut res)) => match ser::deserialize(&mut res, self.version) { + Ok(Some(mut res)) => match ser::deserialize(&mut res, self.protocol_version()) { Ok(res) => Ok(Some(res)), Err(e) => Err(Error::SerErr(format!("{}", e))), }, @@ -310,7 +315,7 @@ impl Store { cursor, seek: false, prefix: from.to_vec(), - version: self.version, + version: self.protocol_version(), _marker: marker::PhantomData, }) } @@ -348,7 +353,12 @@ impl<'a> Batch<'a> { /// Writes a single key and its `Writeable` value to the db. /// Encapsulates serialization using the (default) version configured on the store instance. pub fn put_ser(&self, key: &[u8], value: &W) -> Result<(), Error> { - self.put_ser_with_version(key, value, self.store.version) + self.put_ser_with_version(key, value, self.store.protocol_version()) + } + + /// Protocol version used by this batch. + pub fn protocol_version(&self) -> ProtocolVersion { + self.store.protocol_version() } /// Writes a single key and its `Writeable` value to the db. From cbfec31ebc64b33e762fe66a519c23832eef6611 Mon Sep 17 00:00:00 2001 From: antiochp <30642645+antiochp@users.noreply.github.com> Date: Fri, 14 Aug 2020 15:36:53 +0100 Subject: [PATCH 09/16] fix ser/deser tests for proto v3 --- core/tests/block.rs | 63 +++++++++++++++++++++++++++++++++------------ core/tests/core.rs | 60 ++++++++++++++++++++++++++++++------------ 2 files changed, 89 insertions(+), 34 deletions(-) diff --git a/core/tests/block.rs b/core/tests/block.rs index 012afda807..8212461c0e 100644 --- a/core/tests/block.rs +++ b/core/tests/block.rs @@ -19,7 +19,7 @@ use crate::core::core::block::{Block, BlockHeader, Error, HeaderVersion, Untrust use crate::core::core::hash::Hashed; use crate::core::core::id::ShortIdentifiable; use crate::core::core::transaction::{ - self, KernelFeatures, NRDRelativeHeight, Output, OutputFeatures, Transaction, + self, KernelFeatures, NRDRelativeHeight, Output, OutputFeatures, OutputIdentifier, Transaction, }; use crate::core::core::verifier_cache::{LruVerifierCache, VerifierCache}; use crate::core::core::{Committed, CompactBlock}; @@ -522,9 +522,53 @@ fn block_single_tx_serialized_size() { let prev = BlockHeader::default(); let key_id = ExtKeychain::derive_key_id(1, 1, 0, 0, 0); let b = new_block(&[tx1], &keychain, &builder, &prev, &key_id); + + // Default protocol version (3) let mut vec = Vec::new(); ser::serialize_default(&mut vec, &b).expect("serialization failed"); + assert_eq!(vec.len(), 2_669); + + // Protocol version 3 + let mut vec = Vec::new(); + ser::serialize(&mut vec, ser::ProtocolVersion(3), &b).expect("serialization failed"); + assert_eq!(vec.len(), 2_669); + + // Protocol version 2. + // Note: block must be in "v2" compatibility with "features and commit" inputs for this. + // Normally we would convert the block by looking inputs up in utxo but we fake it here for testing. + let inputs: Vec<_> = b.inputs().into(); + let inputs: Vec<_> = inputs + .iter() + .map(|input| OutputIdentifier { + features: OutputFeatures::Plain, + commit: input.commitment(), + }) + .collect(); + let b = Block { + header: b.header, + body: b.body.replace_inputs(inputs.as_slice().into()), + }; + + // Protocol version 2 + let mut vec = Vec::new(); + ser::serialize(&mut vec, ser::ProtocolVersion(2), &b).expect("serialization failed"); assert_eq!(vec.len(), 2_670); + + // Protocol version 1 (fixed size kernels) + let mut vec = Vec::new(); + ser::serialize(&mut vec, ser::ProtocolVersion(1), &b).expect("serialization failed"); + assert_eq!(vec.len(), 2_694); + + // Check we can also serialize a v2 compatibility block in v3 protocol version + // without needing to explicitly convert the block. + let mut vec = Vec::new(); + ser::serialize(&mut vec, ser::ProtocolVersion(3), &b).expect("serialization failed"); + assert_eq!(vec.len(), 2_669); + + // Default protocol version (3) for completeness + let mut vec = Vec::new(); + ser::serialize_default(&mut vec, &b).expect("serialization failed"); + assert_eq!(vec.len(), 2_669); } #[test] @@ -571,25 +615,10 @@ fn block_10_tx_serialized_size() { let key_id = ExtKeychain::derive_key_id(1, 1, 0, 0, 0); let b = new_block(&txs, &keychain, &builder, &prev, &key_id); - // Default protocol version. { let mut vec = Vec::new(); ser::serialize_default(&mut vec, &b).expect("serialization failed"); - assert_eq!(vec.len(), 16_836); - } - - // Explicit protocol version 1 - { - let mut vec = Vec::new(); - ser::serialize(&mut vec, ser::ProtocolVersion(1), &b).expect("serialization failed"); - assert_eq!(vec.len(), 16_932); - } - - // Explicit protocol version 2 - { - let mut vec = Vec::new(); - ser::serialize(&mut vec, ser::ProtocolVersion(2), &b).expect("serialization failed"); - assert_eq!(vec.len(), 16_836); + assert_eq!(vec.len(), 16_826); } } diff --git a/core/tests/core.rs b/core/tests/core.rs index 5df98ca6b5..220bfabb49 100644 --- a/core/tests/core.rs +++ b/core/tests/core.rs @@ -21,7 +21,8 @@ use self::core::core::block::Error::KernelLockHeight; use self::core::core::hash::{Hashed, ZERO_HASH}; use self::core::core::verifier_cache::{LruVerifierCache, VerifierCache}; use self::core::core::{ - aggregate, deaggregate, KernelFeatures, Output, Transaction, TxKernel, Weighting, + aggregate, deaggregate, KernelFeatures, Output, OutputFeatures, OutputIdentifier, Transaction, + TxKernel, Weighting, }; use self::core::libtx::build::{self, initial_tx, input, output, with_excess}; use self::core::libtx::{aggsig, ProofBuilder}; @@ -42,26 +43,51 @@ fn test_setup() { fn simple_tx_ser() { let tx = tx2i1o(); - // Default protocol version. - { - let mut vec = Vec::new(); - ser::serialize_default(&mut vec, &tx).expect("serialization failed"); - assert_eq!(vec.len(), 947); - } + // Default protocol version (3). + let mut vec = Vec::new(); + ser::serialize_default(&mut vec, &tx).expect("serialization failed"); + assert_eq!(vec.len(), 945); + + // Explicit protocol version 3. + let mut vec = Vec::new(); + ser::serialize(&mut vec, ser::ProtocolVersion(3), &tx).expect("serialization failed"); + assert_eq!(vec.len(), 945); + + // We need to convert the tx to v2 compatibility with "features and commitment" inputs + // to serialize to any previous protocol version. + // Normally we would do this conversion against the utxo and txpool but we fake it here for testing. + let inputs: Vec<_> = tx.inputs().into(); + let inputs: Vec<_> = inputs + .iter() + .map(|input| OutputIdentifier { + features: OutputFeatures::Plain, + commit: input.commitment(), + }) + .collect(); + let tx = Transaction { + body: tx.body.replace_inputs(inputs.as_slice().into()), + ..tx + }; // Explicit protocol version 1. - { - let mut vec = Vec::new(); - ser::serialize(&mut vec, ser::ProtocolVersion(1), &tx).expect("serialization failed"); - assert_eq!(vec.len(), 955); - } + let mut vec = Vec::new(); + ser::serialize(&mut vec, ser::ProtocolVersion(1), &tx).expect("serialization failed"); + assert_eq!(vec.len(), 955); // Explicit protocol version 2. - { - let mut vec = Vec::new(); - ser::serialize(&mut vec, ser::ProtocolVersion(2), &tx).expect("serialization failed"); - assert_eq!(vec.len(), 947); - } + let mut vec = Vec::new(); + ser::serialize(&mut vec, ser::ProtocolVersion(2), &tx).expect("serialization failed"); + assert_eq!(vec.len(), 947); + + // Check we can still serialize to protocol version 3 without explicitly converting the tx. + let mut vec = Vec::new(); + ser::serialize(&mut vec, ser::ProtocolVersion(3), &tx).expect("serialization failed"); + assert_eq!(vec.len(), 945); + + // And default protocol version for completeness. + let mut vec = Vec::new(); + ser::serialize_default(&mut vec, &tx).expect("serialization failed"); + assert_eq!(vec.len(), 945); } #[test] From 758cc1bc90a28eaa9e48d87c81bf11632d90830f Mon Sep 17 00:00:00 2001 From: antiochp <30642645+antiochp@users.noreply.github.com> Date: Fri, 14 Aug 2020 16:44:03 +0100 Subject: [PATCH 10/16] cleanup coinbase maturity --- chain/src/txhashset/utxo_view.rs | 20 +++++++++++--------- 1 file changed, 11 insertions(+), 9 deletions(-) diff --git a/chain/src/txhashset/utxo_view.rs b/chain/src/txhashset/utxo_view.rs index 7b43fc7558..b7a8956ec4 100644 --- a/chain/src/txhashset/utxo_view.rs +++ b/chain/src/txhashset/utxo_view.rs @@ -177,16 +177,18 @@ impl<'a> UTXOView<'a> { // outputs we are attempting to spend. let inputs: Vec = inputs.into(); - let outputs: Result, _> = inputs + // Note: we only care about coinbase outputs here so filter appropriately. + // Use validate_input() to lookup outputs being spent and ignore non-coinbase. + // If we are trying to spend an already spent or non-existent coinbase this will be caught elsewhere. + let pos = inputs .iter() - .map(|x| self.validate_input(x.commitment(), batch)) - .collect(); - - let pos = outputs? - .iter() - .filter_map(|(out, pos)| match out.features { - OutputFeatures::Coinbase => Some(pos.pos), - _ => None, + .filter_map(|x| { + self.validate_input(x.commitment(), batch) + .map(|(out, pos)| match out.features { + OutputFeatures::Coinbase => Some(pos.pos), + _ => None, + }) + .unwrap_or(None) }) .max(); From a3c3af972ac45f1b365273f8c69d8632d0754ee0 Mon Sep 17 00:00:00 2001 From: antiochp <30642645+antiochp@users.noreply.github.com> Date: Fri, 14 Aug 2020 20:28:55 +0100 Subject: [PATCH 11/16] rework pool to better handle v2 conversion robustly --- chain/src/chain.rs | 4 +- chain/src/pipe.rs | 2 +- chain/src/txhashset/utxo_view.rs | 38 ++++---- chain/tests/test_coinbase_maturity.rs | 8 +- core/src/core/transaction.rs | 5 + pool/src/pool.rs | 41 ++------ pool/src/transaction_pool.rs | 133 +++++++++++++++++--------- pool/src/types.rs | 2 +- pool/tests/block_reconciliation.rs | 18 +++- pool/tests/common.rs | 4 +- servers/src/common/adapters.rs | 4 +- 11 files changed, 146 insertions(+), 113 deletions(-) diff --git a/chain/src/chain.rs b/chain/src/chain.rs index e9557891a8..45bd0cff54 100644 --- a/chain/src/chain.rs +++ b/chain/src/chain.rs @@ -663,12 +663,12 @@ impl Chain { /// Verify we are not attempting to spend a coinbase output /// that has not yet sufficiently matured. - pub fn verify_coinbase_maturity(&self, tx: &Transaction) -> Result<(), Error> { + pub fn verify_coinbase_maturity(&self, inputs: Inputs) -> Result<(), Error> { let height = self.next_block_height()?; let header_pmmr = self.header_pmmr.read(); let txhashset = self.txhashset.read(); txhashset::utxo_view(&header_pmmr, &txhashset, |utxo, batch| { - utxo.verify_coinbase_maturity(&tx.inputs(), height, batch)?; + utxo.verify_coinbase_maturity(inputs, height, batch)?; Ok(()) }) } diff --git a/chain/src/pipe.rs b/chain/src/pipe.rs index 39f64bd4b9..c71ccc822a 100644 --- a/chain/src/pipe.rs +++ b/chain/src/pipe.rs @@ -424,7 +424,7 @@ fn verify_coinbase_maturity( let header_extension = &ext.header_extension; extension .utxo_view(header_extension) - .verify_coinbase_maturity(&block.inputs(), block.header.height, batch) + .verify_coinbase_maturity(block.inputs(), block.header.height, batch) } /// Verify kernel sums across the full utxo and kernel sets based on block_sums diff --git a/chain/src/txhashset/utxo_view.rs b/chain/src/txhashset/utxo_view.rs index b7a8956ec4..7d61ab1e80 100644 --- a/chain/src/txhashset/utxo_view.rs +++ b/chain/src/txhashset/utxo_view.rs @@ -16,10 +16,7 @@ use crate::core::core::hash::{Hash, Hashed}; use crate::core::core::pmmr::{self, ReadonlyPMMR}; -use crate::core::core::{ - Block, BlockHeader, CommitWrapper, Inputs, Output, OutputFeatures, OutputIdentifier, - Transaction, -}; +use crate::core::core::{Block, BlockHeader, Inputs, Output, OutputIdentifier, Transaction}; use crate::core::global; use crate::error::{Error, ErrorKind}; use crate::store::Batch; @@ -169,26 +166,27 @@ impl<'a> UTXOView<'a> { /// that have not sufficiently matured. pub fn verify_coinbase_maturity( &self, - inputs: &Inputs, + inputs: Inputs, height: u64, batch: &Batch<'_>, ) -> Result<(), Error> { - // Find the greatest output pos of any coinbase - // outputs we are attempting to spend. - let inputs: Vec = inputs.into(); - - // Note: we only care about coinbase outputs here so filter appropriately. - // Use validate_input() to lookup outputs being spent and ignore non-coinbase. - // If we are trying to spend an already spent or non-existent coinbase this will be caught elsewhere. - let pos = inputs + let inputs: Vec<_> = inputs.into(); + + // Lookup the outputs being spent. + let spent: Result, _> = inputs .iter() - .filter_map(|x| { - self.validate_input(x.commitment(), batch) - .map(|(out, pos)| match out.features { - OutputFeatures::Coinbase => Some(pos.pos), - _ => None, - }) - .unwrap_or(None) + .map(|x| self.validate_input(x.commitment(), batch)) + .collect(); + + // Find the max pos of any coinbase being spent. + let pos = spent? + .iter() + .filter_map(|(out, pos)| { + if out.features.is_coinbase() { + Some(pos.pos) + } else { + None + } }) .max(); diff --git a/chain/tests/test_coinbase_maturity.rs b/chain/tests/test_coinbase_maturity.rs index 325cbedf61..b6f071ce34 100644 --- a/chain/tests/test_coinbase_maturity.rs +++ b/chain/tests/test_coinbase_maturity.rs @@ -122,7 +122,7 @@ fn test_coinbase_maturity() { // Confirm the tx attempting to spend the coinbase output // is not valid at the current block height given the current chain state. - match chain.verify_coinbase_maturity(&coinbase_txn) { + match chain.verify_coinbase_maturity(coinbase_txn.inputs()) { Ok(_) => {} Err(e) => match e.kind() { ErrorKind::ImmatureCoinbase => {} @@ -204,7 +204,7 @@ fn test_coinbase_maturity() { // Confirm the tx attempting to spend the coinbase output // is not valid at the current block height given the current chain state. - match chain.verify_coinbase_maturity(&coinbase_txn) { + match chain.verify_coinbase_maturity(coinbase_txn.inputs()) { Ok(_) => {} Err(e) => match e.kind() { ErrorKind::ImmatureCoinbase => {} @@ -254,7 +254,9 @@ fn test_coinbase_maturity() { // Confirm the tx spending the coinbase output is now valid. // The coinbase output has matured sufficiently based on current chain state. - chain.verify_coinbase_maturity(&coinbase_txn).unwrap(); + chain + .verify_coinbase_maturity(coinbase_txn.inputs()) + .unwrap(); let txs = &[coinbase_txn]; let fees = txs.iter().map(|tx| tx.fee()).sum(); diff --git a/core/src/core/transaction.rs b/core/src/core/transaction.rs index da7d211c56..d77f47f56c 100644 --- a/core/src/core/transaction.rs +++ b/core/src/core/transaction.rs @@ -2041,6 +2041,11 @@ impl OutputIdentifier { proof, } } + + /// Is this a coinbase? + pub fn is_coinbase(&self) -> bool { + self.features.is_coinbase() + } } impl ToHex for OutputIdentifier { diff --git a/pool/src/pool.rs b/pool/src/pool.rs index 4e3a6f97f8..b939b0393b 100644 --- a/pool/src/pool.rs +++ b/pool/src/pool.rs @@ -289,21 +289,11 @@ where Ok(valid_txs) } - /// Convert a transaction for v2 compatibility. - /// We may receive a transaction with "commit only" inputs. - /// We convert it to "features and commit" so we can safely relay it to v2 peers. - /// Converson is done by looking up outputs to be spent in both the pool and the current utxo. - pub fn convert_tx_v2( + pub fn locate_spends( &self, - tx: Transaction, + tx: &Transaction, extra_tx: Option, - ) -> Result { - debug!( - "convert_tx_v2: {} ({} -> v2)", - tx.hash(), - tx.inputs().version_str() - ); - + ) -> Result<(Vec, Vec), PoolError> { let mut inputs: Vec<_> = tx.inputs().into(); let agg_tx = self @@ -318,30 +308,13 @@ where // By applying cut_through to tx inputs and agg_tx outputs we can // determine the outputs being spent from the pool and those still unspent // that need to be looked up via the current utxo. - let (inputs, _, _, spent_pool) = + let (spent_utxo, _, _, spent_pool) = transaction::cut_through(&mut inputs[..], &mut outputs[..])?; // Lookup remaining outputs to be spent from the current utxo. - let spent_utxo = self.blockchain.validate_inputs(inputs.into())?; - - // Combine outputs spent in utxo with outputs spent in pool to give us the - // full set of outputs being spent by this transaction. - // This is our source of truth for input features. - let mut spent = spent_pool.to_vec(); - spent.extend(spent_utxo); - spent.sort(); - - // Now build the resulting transaction based on our inputs and outputs from the original transaction. - // Remember to use the original kernels and kernel offset. - let mut outputs = tx.outputs().to_vec(); - let (inputs, outputs, _, _) = transaction::cut_through(&mut spent[..], &mut outputs[..])?; - let tx = Transaction::new(inputs.into(), outputs, tx.kernels()).with_offset(tx.offset); - - // Validate the tx to ensure our converted inputs are correct. - tx.validate(Weighting::AsTransaction, self.verifier_cache.clone()) - .map_err(PoolError::InvalidTx)?; - - Ok(tx) + let spent_utxo = self.blockchain.validate_inputs(spent_utxo.into())?; + + Ok((spent_pool.to_vec(), spent_utxo)) } fn apply_tx_to_block_sums( diff --git a/pool/src/transaction_pool.rs b/pool/src/transaction_pool.rs index 8ddaef5b24..5fa9110c7f 100644 --- a/pool/src/transaction_pool.rs +++ b/pool/src/transaction_pool.rs @@ -20,7 +20,9 @@ use self::core::core::hash::{Hash, Hashed}; use self::core::core::id::ShortId; use self::core::core::verifier_cache::VerifierCache; -use self::core::core::{transaction, Block, BlockHeader, HeaderVersion, Transaction, Weighting}; +use self::core::core::{ + transaction, Block, BlockHeader, HeaderVersion, OutputIdentifier, Transaction, Weighting, +}; use self::core::global; use self::util::RwLock; use crate::pool::Pool; @@ -88,22 +90,11 @@ where // Add tx to stempool (passing in all txs from txpool to validate against). fn add_to_stempool( &mut self, - entry: PoolEntry, + entry: &PoolEntry, header: &BlockHeader, - ) -> Result { - let txpool_agg = self.txpool.all_transactions_aggregate(None)?; - - // Convert the tx to v2 looking for unspent outputs in both stempool and txpool, and utxo. - let src = entry.src; - let tx = entry.tx; - let tx_v2 = self.stempool.convert_tx_v2(tx, txpool_agg.clone())?; - let entry = PoolEntry::new(tx_v2, src); - - self.stempool - .add_to_pool(entry.clone(), txpool_agg, header)?; - - // If all is good return our pool entry with the converted tx. - Ok(entry) + extra_tx: Option, + ) -> Result<(), PoolError> { + self.stempool.add_to_pool(entry.clone(), extra_tx, header) } fn add_to_reorg_cache(&mut self, entry: &PoolEntry) { @@ -118,34 +109,27 @@ where debug!("added tx to reorg_cache: size now {}", cache.len()); } - fn add_to_txpool( - &mut self, - entry: PoolEntry, - header: &BlockHeader, - ) -> Result { - // First deaggregate the tx based on current txpool txs. - let entry = if entry.tx.kernels().len() == 1 { - entry + // Deaggregate this tx against the txpool. + // Re-converts the tx to v2 if deaggregated. + // Returns the new deaggregated entry or the original entry if no deaggregation. + fn deaggregate_tx(&self, entry: &PoolEntry) -> Result { + if entry.tx.kernels().len() == 1 { + Ok(entry.clone()) } else { let tx = entry.tx.clone(); let txs = self.txpool.find_matching_transactions(tx.kernels()); - if !txs.is_empty() { - let tx = transaction::deaggregate(tx, &txs)?; - - // Validate this deaggregated tx "as tx", subject to regular tx weight limits. - tx.validate(Weighting::AsTransaction, self.verifier_cache.clone())?; - - PoolEntry::new(tx, TxSource::Deaggregate) + if txs.is_empty() { + Ok(entry.clone()) } else { - entry + let tx = transaction::deaggregate(tx, &txs)?; + let (spent_pool, spent_utxo) = self.txpool.locate_spends(&tx, None)?; + let tx = self.convert_tx_v2(tx, &spent_pool, &spent_utxo)?; + Ok(PoolEntry::new(tx, TxSource::Deaggregate)) } - }; - - // Convert the deaggregated tx to v2 looking for unspent outputs in the txpool, and utxo. - let src = entry.src; - let tx_v2 = self.txpool.convert_tx_v2(entry.tx, None)?; - let entry = PoolEntry::new(tx_v2, src); + } + } + fn add_to_txpool(&mut self, entry: &PoolEntry, header: &BlockHeader) -> Result<(), PoolError> { self.txpool.add_to_pool(entry.clone(), None, header)?; // We now need to reconcile the stempool based on the new state of the txpool. @@ -153,8 +137,7 @@ where let txpool_agg = self.txpool.all_transactions_aggregate(None)?; self.stempool.reconcile(txpool_agg, header)?; - // If all is good return our pool entry with the deaggregated and converted tx. - Ok(entry) + Ok(()) } /// Verify the tx kernel variants and ensure they can all be accepted to the txpool/stempool @@ -184,6 +167,9 @@ where stem: bool, header: &BlockHeader, ) -> Result<(), PoolError> { + // + // TODO - hash here is not sufficient as we have v2 vs. v3 txs... + // // Quick check for duplicate txs. // Our stempool is private and we do not want to reveal anything about the txs contained. // If this is a stem tx and is already present in stempool then fluff by adding to txpool. @@ -215,19 +201,47 @@ where // Check the tx lock_time is valid based on current chain state. self.blockchain.verify_tx_lock_height(&tx)?; + // If stem we want to account for the txpool. + let extra_tx = if stem { + self.txpool.all_transactions_aggregate(None)? + } else { + None + }; + + // Locate outputs being spent from pool and current utxo. + let (spent_pool, spent_utxo) = if stem { + self.stempool.locate_spends(&tx, extra_tx.clone()) + } else { + self.txpool.locate_spends(&tx, None) + }?; + // Check coinbase maturity before we go any further. - self.blockchain.verify_coinbase_maturity(&tx)?; + let coinbase_inputs: Vec<_> = spent_utxo + .iter() + .filter(|x| x.is_coinbase()) + .cloned() + .collect(); + self.blockchain + .verify_coinbase_maturity(coinbase_inputs.as_slice().into())?; + + // Convert the tx to "v2" compatibility with "features and commit" inputs. + let tx_v2 = self.convert_tx_v2(tx, &spent_pool, &spent_utxo)?; + let entry = PoolEntry::new(tx_v2, src); // If this is a stem tx then attempt to add it to stempool. // If the adapter fails to accept the new stem tx then fallback to fluff via txpool. if stem { - let entry = self.add_to_stempool(PoolEntry::new(tx.clone(), src), header)?; + self.add_to_stempool(&entry, header, extra_tx)?; if self.adapter.stem_tx_accepted(&entry).is_ok() { return Ok(()); } } - let entry = self.add_to_txpool(PoolEntry::new(tx, src), header)?; + // Deaggregate this tx against the current txpool. + let entry = self.deaggregate_tx(&entry)?; + + // Add tx to txpool. + self.add_to_txpool(&entry, header)?; self.add_to_reorg_cache(&entry); self.adapter.tx_accepted(&entry); @@ -239,6 +253,37 @@ where Ok(()) } + /// Convert a transaction for v2 compatibility. + /// We may receive a transaction with "commit only" inputs. + /// We convert it to "features and commit" so we can safely relay it to v2 peers. + /// Conversion is done using outputs previously looked up in both the pool and the current utxo. + fn convert_tx_v2( + &self, + tx: Transaction, + spent_pool: &[OutputIdentifier], + spent_utxo: &[OutputIdentifier], + ) -> Result { + debug!( + "convert_tx_v2: {} ({} -> v2)", + tx.hash(), + tx.inputs().version_str(), + ); + + let mut inputs = spent_utxo.to_vec(); + inputs.extend_from_slice(spent_pool); + inputs.sort_unstable(); + + let tx = Transaction { + body: tx.body.replace_inputs(inputs.as_slice().into()), + ..tx + }; + + // Validate the tx to ensure our converted inputs are correct. + tx.validate(Weighting::AsTransaction, self.verifier_cache.clone())?; + + Ok(tx) + } + // Evict a transaction from the txpool. // Uses bucket logic to identify the "last" transaction. // No other tx depends on it and it has low fee_to_weight. @@ -265,7 +310,7 @@ where header.hash(), ); for entry in entries { - let _ = self.add_to_txpool(entry, header); + let _ = self.add_to_txpool(&entry, header); } debug!( "reconcile_reorg_cache: block: {:?} ... done.", diff --git a/pool/src/types.rs b/pool/src/types.rs index 1055475071..bc34cb8f62 100644 --- a/pool/src/types.rs +++ b/pool/src/types.rs @@ -275,7 +275,7 @@ impl From for PoolError { pub trait BlockChain: Sync + Send { /// Verify any coinbase outputs being spent /// have matured sufficiently. - fn verify_coinbase_maturity(&self, tx: &transaction::Transaction) -> Result<(), PoolError>; + fn verify_coinbase_maturity(&self, inputs: Inputs) -> Result<(), PoolError>; /// Verify any coinbase outputs being spent /// have matured sufficiently. diff --git a/pool/tests/block_reconciliation.rs b/pool/tests/block_reconciliation.rs index 0fbdfc378a..0250a55812 100644 --- a/pool/tests/block_reconciliation.rs +++ b/pool/tests/block_reconciliation.rs @@ -141,10 +141,20 @@ fn test_transaction_pool_block_reconciliation() { pool.reconcile_block(&block).unwrap(); assert_eq!(pool.total_size(), 4); - assert_eq!(pool.txpool.entries[0].tx, valid_transaction); - assert_eq!(pool.txpool.entries[1].tx, pool_child); - assert_eq!(pool.txpool.entries[2].tx, conflict_valid_child); - assert_eq!(pool.txpool.entries[3].tx, valid_child_valid); + // Compare the various txs by their kernels as entries in the pool are "v2" compatibility. + assert_eq!( + pool.txpool.entries[0].tx.kernels(), + valid_transaction.kernels() + ); + assert_eq!(pool.txpool.entries[1].tx.kernels(), pool_child.kernels()); + assert_eq!( + pool.txpool.entries[2].tx.kernels(), + conflict_valid_child.kernels() + ); + assert_eq!( + pool.txpool.entries[3].tx.kernels(), + valid_child_valid.kernels() + ); // Cleanup db directory clean_output_dir(db_root.into()); diff --git a/pool/tests/common.rs b/pool/tests/common.rs index 877831caae..7e3e1468b3 100644 --- a/pool/tests/common.rs +++ b/pool/tests/common.rs @@ -143,9 +143,9 @@ impl BlockChain for ChainAdapter { .map_err(|_| PoolError::Other("failed to validate inputs".into())) } - fn verify_coinbase_maturity(&self, tx: &Transaction) -> Result<(), PoolError> { + fn verify_coinbase_maturity(&self, inputs: Inputs) -> Result<(), PoolError> { self.chain - .verify_coinbase_maturity(tx) + .verify_coinbase_maturity(inputs) .map_err(|_| PoolError::ImmatureCoinbase) } diff --git a/servers/src/common/adapters.rs b/servers/src/common/adapters.rs index 507624251e..f1f8f9e2e8 100644 --- a/servers/src/common/adapters.rs +++ b/servers/src/common/adapters.rs @@ -960,9 +960,9 @@ impl pool::BlockChain for PoolToChainAdapter { .map_err(|_| pool::PoolError::Other("failed to validate tx".to_string())) } - fn verify_coinbase_maturity(&self, tx: &Transaction) -> Result<(), pool::PoolError> { + fn verify_coinbase_maturity(&self, inputs: Inputs) -> Result<(), pool::PoolError> { self.chain() - .verify_coinbase_maturity(tx) + .verify_coinbase_maturity(inputs) .map_err(|_| pool::PoolError::ImmatureCoinbase) } From 610aa0b3a5ab684cf7ef37f1dadc99acd45fc38e Mon Sep 17 00:00:00 2001 From: antiochp <30642645+antiochp@users.noreply.github.com> Date: Sat, 15 Aug 2020 07:56:25 +0100 Subject: [PATCH 12/16] cleanup txpool add_to_pool --- pool/src/pool.rs | 26 ++------------ pool/src/transaction_pool.rs | 68 +++++++++++++++++------------------- 2 files changed, 36 insertions(+), 58 deletions(-) diff --git a/pool/src/pool.rs b/pool/src/pool.rs index b939b0393b..eea7f67865 100644 --- a/pool/src/pool.rs +++ b/pool/src/pool.rs @@ -29,7 +29,6 @@ use grin_util as util; use std::cmp::Reverse; use std::collections::{HashMap, HashSet}; use std::sync::Arc; -use util::secp::pedersen::Commitment; use util::static_secp_instance; pub struct Pool @@ -60,28 +59,9 @@ where } /// Does the transaction pool contain an entry for the given transaction? - pub fn contains_tx(&self, hash: Hash) -> bool { - self.entries.iter().any(|x| x.tx.hash() == hash) - } - - pub fn get_tx(&self, hash: Hash) -> Option { - self.entries - .iter() - .find(|x| x.tx.hash() == hash) - .map(|x| x.tx.clone()) - } - - /// Query the tx pool for an individual tx matching the given public excess. - /// Used for checking for duplicate NRD kernels in the txpool. - pub fn retrieve_tx_by_kernel_excess(&self, excess: Commitment) -> Option { - for x in &self.entries { - for k in x.tx.kernels() { - if k.excess() == excess { - return Some(x.tx.clone()); - } - } - } - None + /// Transactions are compared by their kernels. + pub fn contains_tx(&self, tx: &Transaction) -> bool { + self.entries.iter().any(|x| x.tx.kernels() == tx.kernels()) } /// Query the tx pool for an individual tx matching the given kernel hash. diff --git a/pool/src/transaction_pool.rs b/pool/src/transaction_pool.rs index 5fa9110c7f..d11bcbac0f 100644 --- a/pool/src/transaction_pool.rs +++ b/pool/src/transaction_pool.rs @@ -110,23 +110,16 @@ where } // Deaggregate this tx against the txpool. - // Re-converts the tx to v2 if deaggregated. - // Returns the new deaggregated entry or the original entry if no deaggregation. - fn deaggregate_tx(&self, entry: &PoolEntry) -> Result { - if entry.tx.kernels().len() == 1 { - Ok(entry.clone()) - } else { - let tx = entry.tx.clone(); - let txs = self.txpool.find_matching_transactions(tx.kernels()); - if txs.is_empty() { - Ok(entry.clone()) - } else { - let tx = transaction::deaggregate(tx, &txs)?; - let (spent_pool, spent_utxo) = self.txpool.locate_spends(&tx, None)?; - let tx = self.convert_tx_v2(tx, &spent_pool, &spent_utxo)?; - Ok(PoolEntry::new(tx, TxSource::Deaggregate)) + // Returns the new deaggregated tx or the original tx if no deaggregation. + fn deaggregate_tx(&self, entry: PoolEntry) -> Result { + if entry.tx.kernels().len() > 1 { + let txs = self.txpool.find_matching_transactions(entry.tx.kernels()); + if !txs.is_empty() { + let tx = transaction::deaggregate(entry.tx, &txs)?; + return Ok(PoolEntry::new(tx, TxSource::Deaggregate)); } } + Ok(entry) } fn add_to_txpool(&mut self, entry: &PoolEntry, header: &BlockHeader) -> Result<(), PoolError> { @@ -174,18 +167,26 @@ where // Our stempool is private and we do not want to reveal anything about the txs contained. // If this is a stem tx and is already present in stempool then fluff by adding to txpool. // Otherwise if already present in txpool return a "duplicate tx" error. - if stem && self.stempool.contains_tx(tx.hash()) { + if stem && self.stempool.contains_tx(&tx) { return self.add_to_pool(src, tx, false, header); - } else if self.txpool.contains_tx(tx.hash()) { + } else if self.txpool.contains_tx(&tx) { return Err(PoolError::DuplicateTx); } + // Attempt to deaggregate the tx if not stem tx. + let entry = if stem { + PoolEntry::new(tx, src) + } else { + self.deaggregate_tx(PoolEntry::new(tx, src))? + }; + let ref tx = entry.tx; + // Check this tx is valid based on current header version. // NRD kernels only valid post HF3 and if NRD feature enabled. - self.verify_kernel_variants(&tx, header)?; + self.verify_kernel_variants(tx, header)?; // Do we have the capacity to accept this transaction? - let acceptability = self.is_acceptable(&tx, stem); + let acceptability = self.is_acceptable(tx, stem); let mut evict = false; if !stem && acceptability.as_ref().err() == Some(&PoolError::OverCapacity) { evict = true; @@ -199,7 +200,7 @@ where .map_err(PoolError::InvalidTx)?; // Check the tx lock_time is valid based on current chain state. - self.blockchain.verify_tx_lock_height(&tx)?; + self.blockchain.verify_tx_lock_height(tx)?; // If stem we want to account for the txpool. let extra_tx = if stem { @@ -210,9 +211,9 @@ where // Locate outputs being spent from pool and current utxo. let (spent_pool, spent_utxo) = if stem { - self.stempool.locate_spends(&tx, extra_tx.clone()) + self.stempool.locate_spends(tx, extra_tx.clone()) } else { - self.txpool.locate_spends(&tx, None) + self.txpool.locate_spends(tx, None) }?; // Check coinbase maturity before we go any further. @@ -225,25 +226,21 @@ where .verify_coinbase_maturity(coinbase_inputs.as_slice().into())?; // Convert the tx to "v2" compatibility with "features and commit" inputs. - let tx_v2 = self.convert_tx_v2(tx, &spent_pool, &spent_utxo)?; - let entry = PoolEntry::new(tx_v2, src); + let ref entry = self.convert_tx_v2(entry, &spent_pool, &spent_utxo)?; // If this is a stem tx then attempt to add it to stempool. // If the adapter fails to accept the new stem tx then fallback to fluff via txpool. if stem { - self.add_to_stempool(&entry, header, extra_tx)?; - if self.adapter.stem_tx_accepted(&entry).is_ok() { + self.add_to_stempool(entry, header, extra_tx)?; + if self.adapter.stem_tx_accepted(entry).is_ok() { return Ok(()); } } - // Deaggregate this tx against the current txpool. - let entry = self.deaggregate_tx(&entry)?; - // Add tx to txpool. - self.add_to_txpool(&entry, header)?; - self.add_to_reorg_cache(&entry); - self.adapter.tx_accepted(&entry); + self.add_to_txpool(entry, header)?; + self.add_to_reorg_cache(entry); + self.adapter.tx_accepted(entry); // Transaction passed all the checks but we have to make space for it if evict { @@ -259,10 +256,11 @@ where /// Conversion is done using outputs previously looked up in both the pool and the current utxo. fn convert_tx_v2( &self, - tx: Transaction, + entry: PoolEntry, spent_pool: &[OutputIdentifier], spent_utxo: &[OutputIdentifier], - ) -> Result { + ) -> Result { + let tx = entry.tx; debug!( "convert_tx_v2: {} ({} -> v2)", tx.hash(), @@ -281,7 +279,7 @@ where // Validate the tx to ensure our converted inputs are correct. tx.validate(Weighting::AsTransaction, self.verifier_cache.clone())?; - Ok(tx) + Ok(PoolEntry::new(tx, entry.src)) } // Evict a transaction from the txpool. From 783598949495f1371b95708dec4510794d47dff1 Mon Sep 17 00:00:00 2001 From: antiochp <30642645+antiochp@users.noreply.github.com> Date: Sat, 15 Aug 2020 08:41:10 +0100 Subject: [PATCH 13/16] fix nrd kernel test --- pool/tests/nrd_kernel_relative_height.rs | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/pool/tests/nrd_kernel_relative_height.rs b/pool/tests/nrd_kernel_relative_height.rs index 6cba4421a1..0caf5f65a9 100644 --- a/pool/tests/nrd_kernel_relative_height.rs +++ b/pool/tests/nrd_kernel_relative_height.rs @@ -87,6 +87,12 @@ fn test_nrd_kernel_relative_height() -> Result<(), PoolError> { aggsig::sign_with_blinding(&keychain.secp(), &msg, &excess, Some(&pubkey)).unwrap(); kernel.verify().unwrap(); + // Generate a 2nd NRD kernel sharing the same excess commitment but with different signature. + let mut kernel2 = kernel.clone(); + kernel2.excess_sig = + aggsig::sign_with_blinding(&keychain.secp(), &msg, &excess, Some(&pubkey)).unwrap(); + kernel2.verify().unwrap(); + let tx1 = test_transaction_with_kernel( &keychain, vec![10, 20], @@ -99,7 +105,7 @@ fn test_nrd_kernel_relative_height() -> Result<(), PoolError> { &keychain, vec![24], vec![18], - kernel.clone(), + kernel2.clone(), excess.clone(), ); From 58d976078afdcdcd95a7c02245a9e7956d200bf6 Mon Sep 17 00:00:00 2001 From: antiochp <30642645+antiochp@users.noreply.github.com> Date: Fri, 21 Aug 2020 08:22:03 +0100 Subject: [PATCH 14/16] move init conversion earlier --- chain/src/chain.rs | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/chain/src/chain.rs b/chain/src/chain.rs index 45bd0cff54..9af4212c50 100644 --- a/chain/src/chain.rs +++ b/chain/src/chain.rs @@ -34,7 +34,7 @@ use crate::types::{ BlockStatus, ChainAdapter, CommitPos, NoStatus, Options, Tip, TxHashsetWriteStatus, }; use crate::util::secp::pedersen::{Commitment, RangeProof}; -use crate::util::RwLock; +use crate::{util::RwLock, ChainStore}; use grin_store::Error::NotFoundErr; use std::collections::HashMap; use std::fs::{self, File}; @@ -171,6 +171,10 @@ impl Chain { ) -> Result { let store = Arc::new(store::ChainStore::new(&db_root)?); + // DB migrations to be run prior to the chain being used. + // Migrate full blocks to protocol version v3. + Chain::migrate_db_v2_v3(&store)?; + // open the txhashset, creating a new one if necessary let mut txhashset = txhashset::TxHashSet::open(db_root.clone(), store.clone(), None)?; @@ -218,10 +222,6 @@ impl Chain { genesis: genesis.header, }; - // DB migrations to be run prior to the chain being used. - // Migrate full blocks to protocol version v3. - chain.migrate_db_v2_v3()?; - chain.log_heads()?; Ok(chain) @@ -1406,8 +1406,8 @@ impl Chain { /// Migrate our local db from v2 to v3. /// "commit only" inputs. - fn migrate_db_v2_v3(&self) -> Result<(), Error> { - let store_v2 = self.store.with_version(ProtocolVersion(2)); + fn migrate_db_v2_v3(store: &ChainStore) -> Result<(), Error> { + let store_v2 = store.with_version(ProtocolVersion(2)); let batch = store_v2.batch()?; for (_, block) in batch.blocks_iter()? { batch.migrate_block(&block, ProtocolVersion(3))?; From a5be2237e4d7e416acd9ee21336d57ce78e23cfd Mon Sep 17 00:00:00 2001 From: antiochp <30642645+antiochp@users.noreply.github.com> Date: Thu, 3 Sep 2020 14:29:00 +0100 Subject: [PATCH 15/16] cleanup --- core/src/core/transaction.rs | 62 ++++++++++++++++++++---------------- core/tests/common.rs | 22 ++++++++++++- core/tests/transaction.rs | 7 ++-- 3 files changed, 60 insertions(+), 31 deletions(-) diff --git a/core/src/core/transaction.rs b/core/src/core/transaction.rs index d77f47f56c..90004033bb 100644 --- a/core/src/core/transaction.rs +++ b/core/src/core/transaction.rs @@ -458,6 +458,8 @@ pub struct TxKernel { impl DefaultHashable for TxKernel {} hashable_ord!(TxKernel); +/// We want to be able to put kernels in a hashset in the pool. +/// So we need to be able to hash them. impl ::std::hash::Hash for TxKernel { fn hash(&self, state: &mut H) { let mut vec = Vec::new(); @@ -1555,13 +1557,13 @@ impl From<&OutputIdentifier> for Input { } } -impl ::std::hash::Hash for Input { - fn hash(&self, state: &mut H) { - let mut vec = Vec::new(); - ser::serialize_default(&mut vec, &self).expect("serialization failed"); - ::std::hash::Hash::hash(&vec, state); - } -} +// impl ::std::hash::Hash for Input { +// fn hash(&self, state: &mut H) { +// let mut vec = Vec::new(); +// ser::serialize_default(&mut vec, &self).expect("serialization failed"); +// ::std::hash::Hash::hash(&vec, state); +// } +// } /// Implementation of Writeable for a transaction Input, defines how to write /// an Input as binary. @@ -1615,45 +1617,56 @@ impl Input { /// We need to wrap commitments so they can be sorted with hashable_ord. #[derive(Serialize, Deserialize, Debug, Clone, Copy)] -pub struct CommitWrapper(Commitment); +#[serde(transparent)] +pub struct CommitWrapper { + #[serde( + serialize_with = "secp_ser::as_hex", + deserialize_with = "secp_ser::commitment_from_hex" + )] + commit: Commitment, +} impl DefaultHashable for CommitWrapper {} hashable_ord!(CommitWrapper); impl From for CommitWrapper { fn from(commit: Commitment) -> Self { - CommitWrapper(commit) + CommitWrapper { commit } } } impl From for CommitWrapper { fn from(input: Input) -> Self { - CommitWrapper(input.commitment()) + CommitWrapper { + commit: input.commitment(), + } } } impl From<&Input> for CommitWrapper { fn from(input: &Input) -> Self { - CommitWrapper(input.commitment()) + CommitWrapper { + commit: input.commitment(), + } } } impl AsRef for CommitWrapper { fn as_ref(&self) -> &Commitment { - &self.0 + &self.commit } } impl Readable for CommitWrapper { fn read(reader: &mut R) -> Result { let commit = Commitment::read(reader)?; - Ok(CommitWrapper(commit)) + Ok(CommitWrapper { commit }) } } impl Writeable for CommitWrapper { fn write(&self, writer: &mut W) -> Result<(), ser::Error> { - self.0.write(writer) + self.commit.write(writer) } } @@ -1686,7 +1699,7 @@ impl From<&Inputs> for Vec { impl CommitWrapper { /// Wrapped commitment. pub fn commitment(&self) -> Commitment { - self.0 + self.commit } } /// Wrapper around a vec of inputs. @@ -1876,13 +1889,13 @@ impl AsRef for Output { } } -impl ::std::hash::Hash for Output { - fn hash(&self, state: &mut H) { - let mut vec = Vec::new(); - ser::serialize_default(&mut vec, &self).expect("serialization failed"); - ::std::hash::Hash::hash(&vec, state); - } -} +// impl ::std::hash::Hash for Output { +// fn hash(&self, state: &mut H) { +// let mut vec = Vec::new(); +// ser::serialize_default(&mut vec, &self).expect("serialization failed"); +// ::std::hash::Hash::hash(&vec, state); +// } +// } /// Implementation of Writeable for a transaction Output, defines how to write /// an Output as binary. @@ -2041,11 +2054,6 @@ impl OutputIdentifier { proof, } } - - /// Is this a coinbase? - pub fn is_coinbase(&self) -> bool { - self.features.is_coinbase() - } } impl ToHex for OutputIdentifier { diff --git a/core/tests/common.rs b/core/tests/common.rs index a85f59788a..955f2c3359 100644 --- a/core/tests/common.rs +++ b/core/tests/common.rs @@ -15,7 +15,9 @@ //! Common test functions use grin_core::core::hash::DefaultHashable; -use grin_core::core::{Block, BlockHeader, KernelFeatures, Transaction}; +use grin_core::core::{ + Block, BlockHeader, KernelFeatures, OutputFeatures, OutputIdentifier, Transaction, +}; use grin_core::libtx::{ build::{self, input, output}, proof::{ProofBuild, ProofBuilder}, @@ -64,6 +66,24 @@ pub fn tx1i1o() -> Transaction { tx } +#[allow(dead_code)] +pub fn tx1i10_v2_compatible() -> Transaction { + let tx = tx1i1o(); + + let inputs: Vec<_> = tx.inputs().into(); + let inputs: Vec<_> = inputs + .iter() + .map(|input| OutputIdentifier { + features: OutputFeatures::Plain, + commit: input.commitment(), + }) + .collect(); + Transaction { + body: tx.body.replace_inputs(inputs.as_slice().into()), + ..tx + } +} + // utility producing a transaction with a single input // and two outputs (one change output) // Note: this tx has an "offset" kernel diff --git a/core/tests/transaction.rs b/core/tests/transaction.rs index 493a877046..733ebc7236 100644 --- a/core/tests/transaction.rs +++ b/core/tests/transaction.rs @@ -15,7 +15,7 @@ //! Transaction integration tests pub mod common; -use crate::common::tx1i1o; +use crate::common::tx1i10_v2_compatible; use crate::core::core::transaction::{self, Error}; use crate::core::core::verifier_cache::LruVerifierCache; use crate::core::core::{KernelFeatures, Output, OutputFeatures, Transaction, Weighting}; @@ -32,8 +32,10 @@ use util::RwLock; // This test ensures we exercise this serialization/deserialization code. #[test] fn test_transaction_json_ser_deser() { - let tx1 = tx1i1o(); + let tx1 = tx1i10_v2_compatible(); + let value = serde_json::to_value(&tx1).unwrap(); + println!("{:?}", value); assert!(value["offset"].is_string()); assert_eq!(value["body"]["inputs"][0]["features"], "Plain"); @@ -50,7 +52,6 @@ fn test_transaction_json_ser_deser() { let tx2: Transaction = serde_json::from_value(value).unwrap(); assert_eq!(tx1, tx2); - let tx1 = tx1i1o(); let str = serde_json::to_string(&tx1).unwrap(); println!("{}", str); let tx2: Transaction = serde_json::from_str(&str).unwrap(); From b5dae01c3bbe3e0c049b5e8e0ed0cc52fc561e18 Mon Sep 17 00:00:00 2001 From: antiochp <30642645+antiochp@users.noreply.github.com> Date: Mon, 7 Sep 2020 10:02:24 +0100 Subject: [PATCH 16/16] cleanup based on PR feedback --- chain/src/chain.rs | 6 +++--- chain/src/pipe.rs | 2 +- chain/src/txhashset/txhashset.rs | 2 +- chain/src/txhashset/utxo_view.rs | 8 ++++---- chain/tests/test_coinbase_maturity.rs | 6 +++--- core/src/core/transaction.rs | 16 ---------------- pool/src/pool.rs | 4 +++- pool/src/transaction_pool.rs | 5 +---- pool/src/types.rs | 4 ++-- pool/tests/common.rs | 4 ++-- servers/src/common/adapters.rs | 4 ++-- 11 files changed, 22 insertions(+), 39 deletions(-) diff --git a/chain/src/chain.rs b/chain/src/chain.rs index 9af4212c50..530224f359 100644 --- a/chain/src/chain.rs +++ b/chain/src/chain.rs @@ -296,7 +296,7 @@ impl Chain { pipe::rewind_and_apply_fork(&previous_header, ext, batch)?; ext.extension .utxo_view(ext.header_extension) - .validate_inputs(block.inputs(), batch) + .validate_inputs(&block.inputs(), batch) .map(|outputs| outputs.into_iter().map(|(out, _)| out).collect()) })?; let inputs = inputs.as_slice().into(); @@ -647,7 +647,7 @@ impl Chain { /// that would be spent by the inputs. pub fn validate_inputs( &self, - inputs: Inputs, + inputs: &Inputs, ) -> Result, Error> { let header_pmmr = self.header_pmmr.read(); let txhashset = self.txhashset.read(); @@ -663,7 +663,7 @@ impl Chain { /// Verify we are not attempting to spend a coinbase output /// that has not yet sufficiently matured. - pub fn verify_coinbase_maturity(&self, inputs: Inputs) -> Result<(), Error> { + pub fn verify_coinbase_maturity(&self, inputs: &Inputs) -> Result<(), Error> { let height = self.next_block_height()?; let header_pmmr = self.header_pmmr.read(); let txhashset = self.txhashset.read(); diff --git a/chain/src/pipe.rs b/chain/src/pipe.rs index c71ccc822a..39f64bd4b9 100644 --- a/chain/src/pipe.rs +++ b/chain/src/pipe.rs @@ -424,7 +424,7 @@ fn verify_coinbase_maturity( let header_extension = &ext.header_extension; extension .utxo_view(header_extension) - .verify_coinbase_maturity(block.inputs(), block.header.height, batch) + .verify_coinbase_maturity(&block.inputs(), block.header.height, batch) } /// Verify kernel sums across the full utxo and kernel sets based on block_sums diff --git a/chain/src/txhashset/txhashset.rs b/chain/src/txhashset/txhashset.rs index 7ceffe0c94..2e99ac1135 100644 --- a/chain/src/txhashset/txhashset.rs +++ b/chain/src/txhashset/txhashset.rs @@ -1105,7 +1105,7 @@ impl<'a> Extension<'a> { // Remove the spent outputs from the output_pos index. let spent = self .utxo_view(header_ext) - .validate_inputs(b.inputs(), batch)?; + .validate_inputs(&b.inputs(), batch)?; for (out, pos) in &spent { self.apply_input(out.commitment(), *pos)?; affected_pos.push(pos.pos); diff --git a/chain/src/txhashset/utxo_view.rs b/chain/src/txhashset/utxo_view.rs index 7d61ab1e80..b963f3a4d7 100644 --- a/chain/src/txhashset/utxo_view.rs +++ b/chain/src/txhashset/utxo_view.rs @@ -56,7 +56,7 @@ impl<'a> UTXOView<'a> { for output in block.outputs() { self.validate_output(output, batch)?; } - self.validate_inputs(block.inputs(), batch) + self.validate_inputs(&block.inputs(), batch) } /// Validate a transaction against the current UTXO set. @@ -70,7 +70,7 @@ impl<'a> UTXOView<'a> { for output in tx.outputs() { self.validate_output(output, batch)?; } - self.validate_inputs(tx.inputs(), batch) + self.validate_inputs(&tx.inputs(), batch) } /// Validate the provided inputs. @@ -78,7 +78,7 @@ impl<'a> UTXOView<'a> { /// that would be spent by the provided inputs. pub fn validate_inputs( &self, - inputs: Inputs, + inputs: &Inputs, batch: &Batch<'_>, ) -> Result, Error> { match inputs { @@ -166,7 +166,7 @@ impl<'a> UTXOView<'a> { /// that have not sufficiently matured. pub fn verify_coinbase_maturity( &self, - inputs: Inputs, + inputs: &Inputs, height: u64, batch: &Batch<'_>, ) -> Result<(), Error> { diff --git a/chain/tests/test_coinbase_maturity.rs b/chain/tests/test_coinbase_maturity.rs index b6f071ce34..3f5587c973 100644 --- a/chain/tests/test_coinbase_maturity.rs +++ b/chain/tests/test_coinbase_maturity.rs @@ -122,7 +122,7 @@ fn test_coinbase_maturity() { // Confirm the tx attempting to spend the coinbase output // is not valid at the current block height given the current chain state. - match chain.verify_coinbase_maturity(coinbase_txn.inputs()) { + match chain.verify_coinbase_maturity(&coinbase_txn.inputs()) { Ok(_) => {} Err(e) => match e.kind() { ErrorKind::ImmatureCoinbase => {} @@ -204,7 +204,7 @@ fn test_coinbase_maturity() { // Confirm the tx attempting to spend the coinbase output // is not valid at the current block height given the current chain state. - match chain.verify_coinbase_maturity(coinbase_txn.inputs()) { + match chain.verify_coinbase_maturity(&coinbase_txn.inputs()) { Ok(_) => {} Err(e) => match e.kind() { ErrorKind::ImmatureCoinbase => {} @@ -255,7 +255,7 @@ fn test_coinbase_maturity() { // Confirm the tx spending the coinbase output is now valid. // The coinbase output has matured sufficiently based on current chain state. chain - .verify_coinbase_maturity(coinbase_txn.inputs()) + .verify_coinbase_maturity(&coinbase_txn.inputs()) .unwrap(); let txs = &[coinbase_txn]; diff --git a/core/src/core/transaction.rs b/core/src/core/transaction.rs index 90004033bb..04ac4ad17e 100644 --- a/core/src/core/transaction.rs +++ b/core/src/core/transaction.rs @@ -1557,14 +1557,6 @@ impl From<&OutputIdentifier> for Input { } } -// impl ::std::hash::Hash for Input { -// fn hash(&self, state: &mut H) { -// let mut vec = Vec::new(); -// ser::serialize_default(&mut vec, &self).expect("serialization failed"); -// ::std::hash::Hash::hash(&vec, state); -// } -// } - /// Implementation of Writeable for a transaction Input, defines how to write /// an Input as binary. impl Writeable for Input { @@ -1889,14 +1881,6 @@ impl AsRef for Output { } } -// impl ::std::hash::Hash for Output { -// fn hash(&self, state: &mut H) { -// let mut vec = Vec::new(); -// ser::serialize_default(&mut vec, &self).expect("serialization failed"); -// ::std::hash::Hash::hash(&vec, state); -// } -// } - /// Implementation of Writeable for a transaction Output, defines how to write /// an Output as binary. impl Writeable for Output { diff --git a/pool/src/pool.rs b/pool/src/pool.rs index eea7f67865..c3b446b0a7 100644 --- a/pool/src/pool.rs +++ b/pool/src/pool.rs @@ -269,6 +269,8 @@ where Ok(valid_txs) } + /// Lookup unspent outputs to be spent by the provided transaction. + /// We look for unspent outputs in the current txpool and then in the current utxo. pub fn locate_spends( &self, tx: &Transaction, @@ -292,7 +294,7 @@ where transaction::cut_through(&mut inputs[..], &mut outputs[..])?; // Lookup remaining outputs to be spent from the current utxo. - let spent_utxo = self.blockchain.validate_inputs(spent_utxo.into())?; + let spent_utxo = self.blockchain.validate_inputs(&spent_utxo.into())?; Ok((spent_pool.to_vec(), spent_utxo)) } diff --git a/pool/src/transaction_pool.rs b/pool/src/transaction_pool.rs index d11bcbac0f..29496834e8 100644 --- a/pool/src/transaction_pool.rs +++ b/pool/src/transaction_pool.rs @@ -160,9 +160,6 @@ where stem: bool, header: &BlockHeader, ) -> Result<(), PoolError> { - // - // TODO - hash here is not sufficient as we have v2 vs. v3 txs... - // // Quick check for duplicate txs. // Our stempool is private and we do not want to reveal anything about the txs contained. // If this is a stem tx and is already present in stempool then fluff by adding to txpool. @@ -223,7 +220,7 @@ where .cloned() .collect(); self.blockchain - .verify_coinbase_maturity(coinbase_inputs.as_slice().into())?; + .verify_coinbase_maturity(&coinbase_inputs.as_slice().into())?; // Convert the tx to "v2" compatibility with "features and commit" inputs. let ref entry = self.convert_tx_v2(entry, &spent_pool, &spent_utxo)?; diff --git a/pool/src/types.rs b/pool/src/types.rs index bc34cb8f62..9f6eea428e 100644 --- a/pool/src/types.rs +++ b/pool/src/types.rs @@ -275,7 +275,7 @@ impl From for PoolError { pub trait BlockChain: Sync + Send { /// Verify any coinbase outputs being spent /// have matured sufficiently. - fn verify_coinbase_maturity(&self, inputs: Inputs) -> Result<(), PoolError>; + fn verify_coinbase_maturity(&self, inputs: &Inputs) -> Result<(), PoolError>; /// Verify any coinbase outputs being spent /// have matured sufficiently. @@ -287,7 +287,7 @@ pub trait BlockChain: Sync + Send { /// Validate inputs against the current utxo. /// Returns the vec of output identifiers that would be spent /// by these inputs if they can all be successfully spent. - fn validate_inputs(&self, inputs: Inputs) -> Result, PoolError>; + fn validate_inputs(&self, inputs: &Inputs) -> Result, PoolError>; fn chain_head(&self) -> Result; diff --git a/pool/tests/common.rs b/pool/tests/common.rs index 7e3e1468b3..52ef16a890 100644 --- a/pool/tests/common.rs +++ b/pool/tests/common.rs @@ -136,14 +136,14 @@ impl BlockChain for ChainAdapter { }) } - fn validate_inputs(&self, inputs: Inputs) -> Result, PoolError> { + fn validate_inputs(&self, inputs: &Inputs) -> Result, PoolError> { self.chain .validate_inputs(inputs) .map(|outputs| outputs.into_iter().map(|(out, _)| out).collect::>()) .map_err(|_| PoolError::Other("failed to validate inputs".into())) } - fn verify_coinbase_maturity(&self, inputs: Inputs) -> Result<(), PoolError> { + fn verify_coinbase_maturity(&self, inputs: &Inputs) -> Result<(), PoolError> { self.chain .verify_coinbase_maturity(inputs) .map_err(|_| PoolError::ImmatureCoinbase) diff --git a/servers/src/common/adapters.rs b/servers/src/common/adapters.rs index f1f8f9e2e8..cd17e069b0 100644 --- a/servers/src/common/adapters.rs +++ b/servers/src/common/adapters.rs @@ -953,14 +953,14 @@ impl pool::BlockChain for PoolToChainAdapter { .map_err(|_| pool::PoolError::Other("failed to validate tx".to_string())) } - fn validate_inputs(&self, inputs: Inputs) -> Result, pool::PoolError> { + fn validate_inputs(&self, inputs: &Inputs) -> Result, pool::PoolError> { self.chain() .validate_inputs(inputs) .map(|outputs| outputs.into_iter().map(|(out, _)| out).collect::>()) .map_err(|_| pool::PoolError::Other("failed to validate tx".to_string())) } - fn verify_coinbase_maturity(&self, inputs: Inputs) -> Result<(), pool::PoolError> { + fn verify_coinbase_maturity(&self, inputs: &Inputs) -> Result<(), pool::PoolError> { self.chain() .verify_coinbase_maturity(inputs) .map_err(|_| pool::PoolError::ImmatureCoinbase)