diff --git a/api/src/types.rs b/api/src/types.rs index 1ca36a272d..7fff5ef295 100644 --- a/api/src/types.rs +++ b/api/src/types.rs @@ -629,11 +629,8 @@ impl BlockPrintable { include_proof: bool, include_merkle_proof: bool, ) -> Result { - let inputs = block - .inputs() - .iter() - .map(|x| x.commitment().to_hex()) - .collect(); + let inputs: Vec<_> = block.inputs().into(); + let inputs = inputs.iter().map(|x| x.to_hex()).collect(); let outputs = block .outputs() .iter() diff --git a/chain/src/chain.rs b/chain/src/chain.rs index 285600bba6..3fa606fe30 100644 --- a/chain/src/chain.rs +++ b/chain/src/chain.rs @@ -500,8 +500,15 @@ impl Chain { /// Returns Ok(Some(pos)) if output is unspent. /// Returns Ok(None) if output is spent. /// Returns Err if something went wrong beyond not finding the output. - pub fn get_unspent(&self, output_ref: &OutputIdentifier) -> Result, Error> { - self.txhashset.read().get_unspent(output_ref) + pub fn get_unspent(&self, output_id: &OutputIdentifier) -> Result, Error> { + self.txhashset.read().get_unspent(output_id) + } + + pub fn get_unspent_by_commitment( + &self, + commitment: Commitment, + ) -> Result, Error> { + self.txhashset.read().get_unspent_by_commitment(commitment) } /// Retrieves an unspent output using its PMMR position @@ -560,7 +567,8 @@ impl Chain { let header_pmmr = self.header_pmmr.read(); let txhashset = self.txhashset.read(); txhashset::utxo_view(&header_pmmr, &txhashset, |utxo, batch| { - utxo.verify_coinbase_maturity(&tx.inputs(), height, batch)?; + let inputs: Vec<_> = tx.inputs().into(); + utxo.verify_coinbase_maturity(&inputs, height, batch)?; Ok(()) }) } diff --git a/chain/src/error.rs b/chain/src/error.rs index e4634c45d2..ccb7ed744e 100644 --- a/chain/src/error.rs +++ b/chain/src/error.rs @@ -77,9 +77,9 @@ pub enum ErrorKind { /// Error from underlying secp lib #[fail(display = "Secp Lib Error")] Secp(secp::Error), - /// One of the inputs in the block has already been spent - #[fail(display = "Already Spent: {:?}", _0)] - AlreadySpent(Commitment), + /// One of the inputs in the block has already been spent or was never spendable to begin with. + #[fail(display = "Not Unspent: {:?}", _0)] + NotUnspent(Commitment), /// An output with that commitment already exists (should be unique) #[fail(display = "Duplicate Commitment: {:?}", _0)] DuplicateCommitment(Commitment), diff --git a/chain/src/pipe.rs b/chain/src/pipe.rs index 014c18372a..0e5743b715 100644 --- a/chain/src/pipe.rs +++ b/chain/src/pipe.rs @@ -18,12 +18,13 @@ use crate::core::consensus; use crate::core::core::hash::Hashed; use crate::core::core::verifier_cache::VerifierCache; use crate::core::core::Committed; -use crate::core::core::{Block, BlockHeader, BlockSums}; +use crate::core::core::{Block, BlockHeader, BlockSums, Inputs}; use crate::core::pow; use crate::error::{Error, ErrorKind}; use crate::store; use crate::txhashset; use crate::types::{Options, Tip}; +use crate::util::secp::pedersen::Commitment; use crate::util::RwLock; use grin_store; use std::sync::Arc; @@ -76,6 +77,40 @@ fn validate_pow_only(header: &BlockHeader, ctx: &mut BlockContext<'_>) -> Result Ok(()) } +/// We may receive blocks from peers that contain CommitOnly inputs (v3). +/// We want to convert these to FeaturesAndCommit inputs (v2) before saving to the db. +/// This is necessary to support backward compatibility with v2 peers. +/// API also expects blocks in v2 format. +/// It is more efficient to store them in v2 format internally vs. converting them during block relay. +fn convert_block_v2( + b: &Block, + ext: &txhashset::ExtensionPair<'_>, + batch: &store::Batch<'_>, +) -> Result { + debug!( + "convert_block_v2: {} at {} ({})", + b.header.hash(), + b.header.height, + b.inputs().version_str(), + ); + match b.inputs() { + Inputs::CommitOnly(inputs) => { + let utxo_view = ext.extension.utxo_view(ext.header_extension); + let inputs: Result, _> = inputs + .into_iter() + .map(|x| utxo_view.get_unspent(x.commitment(), batch)) + .collect(); + let mut inputs = inputs?; + + // Make sure the converted inputs are sorted correctly. + inputs.sort_unstable(); + + Ok(b.clone().replace_inputs(inputs.into())) + } + Inputs::FeaturesAndCommit(_) => Ok(b.clone()), + } +} + /// Runs the block processing pipeline, including validation and finding a /// place for the new block in the chain. /// Returns new head if chain head updated and the "fork point" rewound to when processing the new block. @@ -84,12 +119,13 @@ pub fn process_block( ctx: &mut BlockContext<'_>, ) -> Result<(Option, BlockHeader), Error> { debug!( - "pipe: process_block {} at {} [in/out/kern: {}/{}/{}]", + "pipe: process_block {} at {} [in/out/kern: {}/{}/{}] ({})", b.hash(), b.header.height, b.inputs().len(), b.outputs().len(), b.kernels().len(), + b.inputs().version_str(), ); // Read current chain head from db via the batch. @@ -123,35 +159,42 @@ pub fn process_block( // Validate the block itself, make sure it is internally consistent. // Use the verifier_cache for verifying rangeproofs and kernel signatures. - validate_block(b, ctx)?; + validate_block(&b, ctx)?; // Start a chain extension unit of work dependent on the success of the // internal validation and saving operations let header_pmmr = &mut ctx.header_pmmr; let txhashset = &mut ctx.txhashset; let batch = &mut ctx.batch; - let fork_point = txhashset::extending(header_pmmr, txhashset, batch, |ext, batch| { + let (fork_point, b) = txhashset::extending(header_pmmr, txhashset, batch, |ext, batch| { let fork_point = rewind_and_apply_fork(&prev, ext, batch)?; + // Convert block to v2 for backward compatibility. + // Convert *before* we apply to avoid looking for outputs after we spend them... + let b = convert_block_v2(b, ext, batch)?; + + // Verify all inputs and outputs are unique and that no input spends an output in this block. + verify_cut_through(&b)?; + // Check any coinbase being spent have matured sufficiently. // This needs to be done within the context of a potentially // rewound txhashset extension to reflect chain state prior // to applying the new block. - verify_coinbase_maturity(b, ext, batch)?; + verify_coinbase_maturity(&b, ext, batch)?; // Validate the block against the UTXO set. - validate_utxo(b, ext, batch)?; + validate_utxo(&b, ext, batch)?; // Using block_sums (utxo_sum, kernel_sum) for the previous block from the db // we can verify_kernel_sums across the full UTXO sum and full kernel sum // accounting for inputs/outputs/kernels in this new block. // We know there are no double-spends etc. if this verifies successfully. - verify_block_sums(b, batch)?; + verify_block_sums(&b, batch)?; // Apply the block to the txhashset state. // Validate the txhashset roots and sizes against the block header. // Block is invalid if there are any discrepencies. - apply_block_to_txhashset(b, ext, batch)?; + apply_block_to_txhashset(&b, ext, batch)?; // If applying this block does not increase the work on the chain then // we know we have not yet updated the chain to produce a new chain head. @@ -162,14 +205,16 @@ pub fn process_block( ext.extension.force_rollback(); } - Ok(fork_point) + Ok((fork_point, b)) })?; - // Add the validated block to the db. + // Add the converted (and validated) block to the db. // Note we do this in the outer batch, not the child batch from the extension // as we only commit the child batch if the extension increases total work. // We want to save the block to the db regardless. - add_block(b, &ctx.batch)?; + // Note: We want to save the "converted" block to the db (with input features) for reference later. + validate_block(&b, ctx)?; + add_block(&b, &ctx.batch)?; // If we have no "tail" then set it now. if ctx.batch.tail().is_err() { @@ -398,6 +443,13 @@ fn validate_header(header: &BlockHeader, ctx: &mut BlockContext<'_>) -> Result<( } fn validate_block(block: &Block, ctx: &mut BlockContext<'_>) -> Result<(), Error> { + debug!( + "validate_block: {} at {} ({})", + block.header.hash(), + block.header.height, + block.inputs().version_str(), + ); + let prev = ctx.batch.get_previous_header(&block.header)?; block .validate(&prev.total_kernel_offset, ctx.verifier_cache.clone()) @@ -405,6 +457,11 @@ fn validate_block(block: &Block, ctx: &mut BlockContext<'_>) -> Result<(), Error Ok(()) } +fn verify_cut_through(block: &Block) -> Result<(), Error> { + block.verify_cut_through()?; + Ok(()) +} + /// Verify the block is not spending coinbase outputs before they have sufficiently matured. fn verify_coinbase_maturity( block: &Block, @@ -413,9 +470,10 @@ fn verify_coinbase_maturity( ) -> Result<(), Error> { let extension = &ext.extension; let header_extension = &ext.header_extension; + let inputs: Vec = block.inputs().into(); extension .utxo_view(header_extension) - .verify_coinbase_maturity(&block.inputs(), block.header.height, batch) + .verify_coinbase_maturity(&inputs, block.header.height, batch) } /// Verify kernel sums across the full utxo and kernel sets based on block_sums @@ -462,8 +520,8 @@ fn apply_block_to_txhashset( /// Officially adds the block to our chain (possibly on a losing fork). /// Header must be added separately (assume this has been done previously). -fn add_block(b: &Block, batch: &store::Batch<'_>) -> Result<(), Error> { - batch.save_block(b)?; +fn add_block(block: &Block, batch: &store::Batch<'_>) -> Result<(), Error> { + batch.save_block(block)?; Ok(()) } diff --git a/chain/src/txhashset/txhashset.rs b/chain/src/txhashset/txhashset.rs index f22946ba63..0e95ca7aaf 100644 --- a/chain/src/txhashset/txhashset.rs +++ b/chain/src/txhashset/txhashset.rs @@ -21,7 +21,7 @@ use crate::core::core::hash::{Hash, Hashed}; use crate::core::core::merkle_proof::MerkleProof; use crate::core::core::pmmr::{self, Backend, ReadonlyPMMR, RewindablePMMR, PMMR}; use crate::core::core::{ - Block, BlockHeader, Input, KernelFeatures, Output, OutputIdentifier, TxKernel, + Block, BlockHeader, Inputs, KernelFeatures, Output, OutputIdentifier, TxKernel, }; use crate::core::global; use crate::core::ser::{PMMRable, ProtocolVersion}; @@ -258,8 +258,7 @@ impl TxHashSet { /// We look in the index to find the output MMR pos. /// Then we check the entry in the output MMR and confirm the hash matches. pub fn get_unspent(&self, output_id: &OutputIdentifier) -> Result, Error> { - let commit = output_id.commit; - match self.commit_index.get_output_pos_height(&commit) { + match self.commit_index.get_output_pos_height(&output_id.commit) { Ok(Some((pos, height))) => { let output_pmmr: ReadonlyPMMR<'_, Output, _> = ReadonlyPMMR::at(&self.output_pmmr_h.backend, self.output_pmmr_h.last_pos); @@ -278,6 +277,26 @@ impl TxHashSet { } } + /// Get an unspent output (and associated MMR pos) by commitment. + pub fn get_unspent_by_commitment( + &self, + commitment: Commitment, + ) -> Result, Error> { + match self.commit_index.get_output_pos_height(&commitment) { + Ok(Some((pos, height))) => { + let output_pmmr: ReadonlyPMMR<'_, Output, _> = + ReadonlyPMMR::at(&self.output_pmmr_h.backend, self.output_pmmr_h.last_pos); + if let Some(out) = output_pmmr.get_data(pos) { + Ok(Some((out, CommitPos { pos, height }))) + } else { + Ok(None) + } + } + Ok(None) => Ok(None), + Err(e) => Err(ErrorKind::StoreErr(e, "txhashset unspent check".to_string()).into()), + } + } + /// returns the last N nodes inserted into the tree (i.e. the 'bottom' /// nodes at level 0 /// TODO: These need to return the actual data from the flat-files instead @@ -1034,8 +1053,7 @@ impl<'a> Extension<'a> { } /// Apply a new block to the current txhashet extension (output, rangeproof, kernel MMRs). - /// Returns a vec of commit_pos representing the pos and height of the outputs spent - /// by this block. + /// Supports both CommitOnly and FeaturesAndCommit input variant. pub fn apply_block(&mut self, b: &Block, batch: &Batch<'_>) -> Result<(), Error> { let mut affected_pos = vec![]; @@ -1051,14 +1069,42 @@ impl<'a> Extension<'a> { // Remove the output from the output and rangeproof MMRs. // Add spent_pos to affected_pos to update the accumulator later on. // Remove the spent output from the output_pos index. - let mut spent = vec![]; - for input in b.inputs() { - let pos = self.apply_input(input, batch)?; + let spent: Result, Error> = match b.inputs() { + Inputs::CommitOnly(inputs) => inputs + .into_iter() + .map(|input| { + let (out, pos) = self.apply_input(input.commitment(), batch)?; + if input.commitment() != out.commitment() { + Err(ErrorKind::TxHashSetErr("output pmmr mismatch".into()).into()) + } else { + Ok((out, pos)) + } + }) + .collect(), + Inputs::FeaturesAndCommit(inputs) => inputs + .into_iter() + .map(|input| { + let (out, pos) = self.apply_input(input.commitment(), batch)?; + if input != out { + Err(ErrorKind::TxHashSetErr("output pmmr mismatch".into()).into()) + } else { + Ok((out, pos)) + } + }) + .collect(), + }; + + let (spent_outputs, spent_pos): (Vec<_>, Vec<_>) = spent?.into_iter().unzip(); + + batch.save_spent_index(&b.hash(), &spent_pos)?; + + for out in spent_outputs { + batch.delete_output_pos_height(&out.commitment())?; + } + + for pos in spent_pos { affected_pos.push(pos.pos); - batch.delete_output_pos_height(&input.commitment())?; - spent.push(pos); } - batch.save_spent_index(&b.hash(), &spent)?; // Apply the kernels to the kernel MMR. // Note: This validates and NRD relative height locks via the "recent" kernel index. @@ -1089,15 +1135,16 @@ impl<'a> Extension<'a> { ) } - fn apply_input(&mut self, input: &Input, batch: &Batch<'_>) -> Result { - let commit = input.commitment(); - if let Some((pos, height)) = batch.get_output_pos_height(&commit)? { - // First check this input corresponds to an existing entry in the output MMR. - if let Some(out) = self.output_pmmr.get_data(pos) { - if OutputIdentifier::from(input) != out { - return Err(ErrorKind::TxHashSetErr("output pmmr mismatch".to_string()).into()); - } - } + fn apply_input( + &mut self, + input: Commitment, + batch: &Batch<'_>, + ) -> Result<(OutputIdentifier, CommitPos), Error> { + if let Some((pos, height)) = batch.get_output_pos_height(&input)? { + let output = self + .output_pmmr + .get_data(pos) + .ok_or(ErrorKind::NotUnspent(input))?; // Now prune the output_pmmr, rproof_pmmr and their storage. // Input is not valid if we cannot prune successfully (to spend an unspent @@ -1107,13 +1154,13 @@ impl<'a> Extension<'a> { self.rproof_pmmr .prune(pos) .map_err(ErrorKind::TxHashSetErr)?; - Ok(CommitPos { pos, height }) + Ok((output, CommitPos { pos, height })) } - Ok(false) => Err(ErrorKind::AlreadySpent(commit).into()), + Ok(false) => Err(ErrorKind::NotUnspent(input).into()), Err(e) => Err(ErrorKind::TxHashSetErr(e).into()), } } else { - Err(ErrorKind::AlreadySpent(commit).into()) + Err(ErrorKind::NotUnspent(input).into()) } } @@ -1331,8 +1378,9 @@ impl<'a> Extension<'a> { // reused output commitment. For example an output at pos 1, spent, reused at pos 2. // The output_pos index should be updated to reflect the old pos 1 when unspent. if let Ok(spent) = spent { - for (x, y) in block.inputs().iter().zip(spent) { - batch.save_output_pos_height(&x.commitment(), y.pos, y.height)?; + let inputs: Vec<_> = block.inputs().into(); + for (x, y) in inputs.iter().zip(spent) { + batch.save_output_pos_height(x, y.pos, y.height)?; } } diff --git a/chain/src/txhashset/utxo_view.rs b/chain/src/txhashset/utxo_view.rs index 8d4c5c4d87..fef4d7ef83 100644 --- a/chain/src/txhashset/utxo_view.rs +++ b/chain/src/txhashset/utxo_view.rs @@ -16,11 +16,11 @@ use crate::core::core::hash::{Hash, Hashed}; use crate::core::core::pmmr::{self, ReadonlyPMMR}; -use crate::core::core::{Block, BlockHeader, Input, Output, OutputIdentifier, Transaction}; +use crate::core::core::{Block, BlockHeader, Output, OutputIdentifier, Transaction}; use crate::core::global; use crate::error::{Error, ErrorKind}; use crate::store::Batch; -use crate::util::secp::pedersen::RangeProof; +use crate::util::secp::pedersen::{Commitment, RangeProof}; use grin_store::pmmr::PMMRBackend; /// Readonly view of the UTXO set (based on output MMR). @@ -48,11 +48,18 @@ impl<'a> UTXOView<'a> { /// Every input must spend an output that currently exists in the UTXO set. /// No duplicate outputs. pub fn validate_block(&self, block: &Block, batch: &Batch<'_>) -> Result<(), Error> { + debug!( + "validate_block: {} at {} ({})", + block.header.hash(), + block.header.height, + block.inputs().version_str(), + ); for output in block.outputs() { self.validate_output(output, batch)?; } - for input in block.inputs() { + let inputs: Vec<_> = block.inputs().into(); + for input in inputs { self.validate_input(input, batch)?; } Ok(()) @@ -62,11 +69,14 @@ impl<'a> UTXOView<'a> { /// Every input must spend an output that currently exists in the UTXO set. /// No duplicate outputs. pub fn validate_tx(&self, tx: &Transaction, batch: &Batch<'_>) -> Result<(), Error> { + debug!("validate_tx: {} ({})", tx.hash(), tx.inputs().version_str(),); + for output in tx.outputs() { self.validate_output(output, batch)?; } - for input in tx.inputs() { + let inputs: Vec<_> = tx.inputs().into(); + for input in inputs { self.validate_input(input, batch)?; } Ok(()) @@ -75,15 +85,15 @@ impl<'a> UTXOView<'a> { // Input is valid if it is spending an (unspent) output // that currently exists in the output MMR. // Compare against the entry in output MMR at the expected pos. - fn validate_input(&self, input: &Input, batch: &Batch<'_>) -> Result<(), Error> { - if let Ok(pos) = batch.get_output_pos(&input.commitment()) { + fn validate_input(&self, input: Commitment, batch: &Batch<'_>) -> Result<(), Error> { + if let Ok(pos) = batch.get_output_pos(&input) { if let Some(out) = self.output_pmmr.get_data(pos) { - if OutputIdentifier::from(input) == out { + if input == out.commitment() { return Ok(()); } } } - Err(ErrorKind::AlreadySpent(input.commitment()).into()) + Err(ErrorKind::NotUnspent(input).into()) } // Output is valid if it would not result in a duplicate commitment in the output MMR. @@ -98,6 +108,22 @@ impl<'a> UTXOView<'a> { Ok(()) } + /// Retrieve unspent output by commitment. + pub fn get_unspent( + &self, + commitment: Commitment, + batch: &Batch<'_>, + ) -> Result { + if let Ok(pos) = batch.get_output_pos(&commitment) { + if let Some(out) = self.output_pmmr.get_data(pos) { + if commitment == out.commitment() { + return Ok(out); + } + } + } + Err(ErrorKind::NotUnspent(commitment).into()) + } + /// Retrieves an unspent output using its PMMR position pub fn get_unspent_output_at(&self, pos: u64) -> Result { match self.output_pmmr.get_data(pos) { @@ -113,40 +139,57 @@ impl<'a> UTXOView<'a> { /// that have not sufficiently matured. pub fn verify_coinbase_maturity( &self, - inputs: &[Input], + inputs: &[Commitment], height: u64, batch: &Batch<'_>, ) -> Result<(), Error> { - // Find the greatest output pos of any coinbase - // outputs we are attempting to spend. - let pos = inputs - .iter() - .filter(|x| x.is_coinbase()) - .filter_map(|x| batch.get_output_pos(&x.commitment()).ok()) - .max() - .unwrap_or(0); - - if pos > 0 { - // If we have not yet reached 1440 blocks then - // we can fail immediately as coinbase cannot be mature. - if height < global::coinbase_maturity() { - return Err(ErrorKind::ImmatureCoinbase.into()); + // Find the pos of every output being spent by the given the inputs. + let spent_pos: Result, _> = inputs + .into_iter() + .map(|x| batch.get_output_pos(x)) + .collect(); + + // Sort pos in descending order. + let mut spent_pos = spent_pos?; + spent_pos.sort(); + spent_pos.reverse(); + + // Find the first (max) coinbase pos. + let max_coinbase = spent_pos.into_iter().find(|pos| { + if let Some(out) = self.output_pmmr.get_data(*pos) { + out.features.is_coinbase() + } else { + false } - - // Find the "cutoff" pos in the output MMR based on the - // header from 1,000 blocks ago. - let cutoff_height = height.saturating_sub(global::coinbase_maturity()); - let cutoff_header = self.get_header_by_height(cutoff_height, batch)?; - let cutoff_pos = cutoff_header.output_mmr_size; - - // If any output pos exceed the cutoff_pos - // we know they have not yet sufficiently matured. - if pos > cutoff_pos { - return Err(ErrorKind::ImmatureCoinbase.into()); + }); + + // Check the coinbase maturity rule on the max coinbase output. + // If this one is mature then all others will be mature also. + match max_coinbase { + None => Ok(()), + Some(pos) => { + if pos > 0 { + // If we have not yet reached 1440 blocks then + // we can fail immediately as coinbase cannot be mature. + if height < global::coinbase_maturity() { + return Err(ErrorKind::ImmatureCoinbase.into()); + } + + // Find the "cutoff" pos in the output MMR based on the + // header from 1,000 blocks ago. + let cutoff_height = height.saturating_sub(global::coinbase_maturity()); + let cutoff_header = self.get_header_by_height(cutoff_height, batch)?; + let cutoff_pos = cutoff_header.output_mmr_size; + + // If any output pos exceed the cutoff_pos + // we know they have not yet sufficiently matured. + if pos > cutoff_pos { + return Err(ErrorKind::ImmatureCoinbase.into()); + } + } + Ok(()) } } - - Ok(()) } /// Get the header hash for the specified pos from the underlying MMR backend. diff --git a/core/src/core/block.rs b/core/src/core/block.rs index e5b1c48c69..79fe90f916 100644 --- a/core/src/core/block.rs +++ b/core/src/core/block.rs @@ -20,8 +20,8 @@ use crate::core::compact_block::{CompactBlock, CompactBlockBody}; use crate::core::hash::{DefaultHashable, Hash, Hashed, ZERO_HASH}; use crate::core::verifier_cache::VerifierCache; use crate::core::{ - transaction, Commitment, Input, KernelFeatures, Output, Transaction, TransactionBody, TxKernel, - Weighting, + transaction, Commitment, Inputs, KernelFeatures, Output, Transaction, TransactionBody, + TxKernel, Weighting, }; use crate::global; use crate::pow::{verify_size, Difficulty, Proof, ProofOfWork}; @@ -543,7 +543,7 @@ impl Block { reward_output: (Output, TxKernel), ) -> Result { let mut block = - Block::from_reward(prev, txs, reward_output.0, reward_output.1, difficulty)?; + Block::from_reward(prev, &txs, reward_output.0, reward_output.1, difficulty)?; // Now set the pow on the header so block hashing works as expected. { @@ -554,6 +554,12 @@ impl Block { Ok(block) } + /// Consume self and return updated block with inputs fully replaced. + pub fn replace_inputs(mut self, inputs: Inputs) -> Block { + self.body = self.body.replace_inputs(inputs); + self + } + /// Hydrate a block from a compact block. /// Note: caller must validate the block themselves, we do not validate it /// here. @@ -562,36 +568,32 @@ impl Block { let header = cb.header.clone(); - let mut all_inputs = HashSet::new(); - let mut all_outputs = HashSet::new(); - let mut all_kernels = HashSet::new(); + let mut inputs: Vec = vec![]; + let mut outputs = vec![]; + let mut kernels = vec![]; // collect all the inputs, outputs and kernels from the txs for tx in txs { - all_inputs.extend(tx.inputs()); - all_outputs.extend(tx.outputs()); - all_kernels.extend(tx.kernels()); + let tx_inputs: Vec<_> = tx.inputs().into(); + inputs.extend_from_slice(tx_inputs.as_slice()); + outputs.extend_from_slice(tx.outputs()); + kernels.extend_from_slice(tx.kernels()); } - // include the coinbase output(s) and kernel(s) from the compact_block - { - let body: CompactBlockBody = cb.into(); - all_outputs.extend(body.out_full); - all_kernels.extend(body.kern_full); - } + let (inputs, outputs) = transaction::cut_through(&mut inputs, &mut outputs)?; - // convert the sets to vecs - let all_inputs = Vec::from_iter(all_inputs); - let all_outputs = Vec::from_iter(all_outputs); - let all_kernels = Vec::from_iter(all_kernels); + // include the coinbase output(s) and kernel(s) from the compact_block + let mut outputs = outputs.to_vec(); + outputs.extend_from_slice(cb.out_full()); + kernels.extend_from_slice(cb.kern_full()); // Initialize a tx body and sort everything. - let body = TransactionBody::init(&all_inputs, &all_outputs, &all_kernels, false)?; + let body = TransactionBody::init(inputs.into(), &outputs, &kernels, false)?; // Finally return the full block. // Note: we have not actually validated the block here, // caller must validate the block. - Block { header, body }.cut_through() + Ok(Block { header, body }) } /// Build a new empty block from a specified header @@ -612,12 +614,9 @@ impl Block { reward_kern: TxKernel, difficulty: Difficulty, ) -> Result { - // A block is just a big transaction, aggregate and add the reward output - // and reward kernel. At this point the tx is technically invalid but the - // tx body is valid if we account for the reward (i.e. as a block). - let agg_tx = transaction::aggregate(txs)? - .with_output(reward_out) - .with_kernel(reward_kern); + // Build a single cut-through aggregate transacton from the provided transactions. + // We will add the reward output and reward kernel when we construct the block below. + let agg_tx = transaction::aggregate(txs)?; // Now add the kernel offset of the previous block for a total let total_kernel_offset = committed::sum_kernel_offsets( @@ -635,7 +634,7 @@ impl Block { // Now build the block with all the above information. // Note: We have not validated the block here. // Caller must validate the block as necessary. - Block { + Ok(Block { header: BlockHeader { version, height, @@ -648,32 +647,32 @@ impl Block { }, ..Default::default() }, - body: agg_tx.into(), - } - .cut_through() + body: TransactionBody::from(agg_tx) + .with_output(reward_out) + .with_kernel(reward_kern), + }) } /// Consumes this block and returns a new block with the coinbase output /// and kernels added pub fn with_reward(mut self, reward_out: Output, reward_kern: TxKernel) -> Block { - self.body.outputs = vec![reward_out]; - self.body.kernels = vec![reward_kern]; + self.body = self.body.with_output(reward_out).with_kernel(reward_kern); self } /// Get inputs - pub fn inputs(&self) -> &[Input] { - &self.body.inputs + pub fn inputs(&self) -> Inputs { + self.body.inputs() } /// Get outputs pub fn outputs(&self) -> &[Output] { - &self.body.outputs + self.body.outputs() } /// Get kernels pub fn kernels(&self) -> &[TxKernel] { - &self.body.kernels + self.body.kernels() } /// Sum of all fees (inputs less outputs) in the block @@ -681,24 +680,6 @@ impl Block { self.body.fee() } - /// Matches any output with a potential spending input, eliminating them - /// from the block. Provides a simple way to cut-through the block. The - /// elimination is stable with respect to the order of inputs and outputs. - /// Method consumes the block. - pub fn cut_through(self) -> Result { - let mut inputs = self.inputs().to_vec(); - let mut outputs = self.outputs().to_vec(); - let (inputs, outputs) = transaction::cut_through(&mut inputs, &mut outputs)?; - - // Initialize tx body and sort everything. - let body = TransactionBody::init(inputs, outputs, self.kernels(), false)?; - - Ok(Block { - header: self.header, - body, - }) - } - /// "Lightweight" validation that we can perform quickly during read/deserialization. /// Subset of full validation that skips expensive verification steps, specifically - /// * rangeproof verification (on the body) @@ -752,20 +733,25 @@ impl Block { Ok(kernel_sum) } + /// Verify inputs and outputs are fully cut-through. + /// Block must contain no input spending an output in the same block. + pub fn verify_cut_through(&self) -> Result<(), Error> { + self.body.verify_cut_through()?; + Ok(()) + } + /// Validate the coinbase.body.outputs generated by miners. /// Check the sum of coinbase-marked outputs match /// the sum of coinbase-marked kernels accounting for fees. pub fn verify_coinbase(&self) -> Result<(), Error> { let cb_outs = self - .body - .outputs + .outputs() .iter() .filter(|out| out.is_coinbase()) .collect::>(); let cb_kerns = self - .body - .kernels + .kernels() .iter() .filter(|kernel| kernel.is_coinbase()) .collect::>(); diff --git a/core/src/core/committed.rs b/core/src/core/committed.rs index 9fb2d7277a..381210a1ff 100644 --- a/core/src/core/committed.rs +++ b/core/src/core/committed.rs @@ -58,7 +58,7 @@ pub trait Committed { /// Gather the kernel excesses and sum them. fn sum_kernel_excesses( &self, - offset: &BlindingFactor, + offset: BlindingFactor, ) -> Result<(Commitment, Commitment), Error> { // then gather the kernel excess commitments let kernel_commits = self.kernels_committed(); @@ -72,7 +72,7 @@ pub trait Committed { let secp = static_secp_instance(); let secp = secp.lock(); let mut commits = vec![kernel_sum]; - if *offset != BlindingFactor::zero() { + if offset != BlindingFactor::zero() { let key = offset.secret_key(&secp)?; let offset_commit = secp.commit(0, key)?; commits.push(offset_commit); @@ -129,7 +129,7 @@ pub trait Committed { let utxo_sum = self.sum_commitments(overage)?; // Sum the kernel excesses accounting for the kernel offset. - let (kernel_sum, kernel_sum_plus_offset) = self.sum_kernel_excesses(&kernel_offset)?; + let (kernel_sum, kernel_sum_plus_offset) = self.sum_kernel_excesses(kernel_offset)?; if utxo_sum != kernel_sum_plus_offset { return Err(Error::KernelSumMismatch); diff --git a/core/src/core/hash.rs b/core/src/core/hash.rs index 2dc968fa43..d9eff5eb0f 100644 --- a/core/src/core/hash.rs +++ b/core/src/core/hash.rs @@ -220,6 +220,7 @@ impl DefaultHashable for (D, E) {} impl DefaultHashable for (D, E, F) {} /// Implement Hashed trait for external types here +impl DefaultHashable for util::secp::pedersen::Commitment {} impl DefaultHashable for util::secp::pedersen::RangeProof {} impl DefaultHashable for Vec {} impl DefaultHashable for u8 {} diff --git a/core/src/core/transaction.rs b/core/src/core/transaction.rs index 54f5a4618a..c1d212bb94 100644 --- a/core/src/core/transaction.rs +++ b/core/src/core/transaction.rs @@ -28,6 +28,7 @@ use keychain::{self, BlindingFactor}; use std::cmp::Ordering; use std::cmp::{max, min}; use std::convert::{TryFrom, TryInto}; +use std::default::Default; use std::sync::Arc; use std::{error, fmt}; use util::secp; @@ -381,6 +382,8 @@ pub enum Error { /// Validation error relating to cut-through (tx is spending its own /// output). CutThrough, + /// Valication error relating to input variants. + InvalidInputVariant, /// Validation error relating to output features. /// It is invalid for a transaction to contain a coinbase output, for example. InvalidOutputFeatures, @@ -648,40 +651,208 @@ pub enum Weighting { /// No max weight limit (skip the weight check). NoLimit, } - -/// TransactionBody is a common abstraction for transaction and block #[derive(Serialize, Deserialize, Debug, Clone)] -pub struct TransactionBody { - /// List of inputs spent by the transaction. - pub inputs: Vec, - /// List of outputs the transaction produces. - pub outputs: Vec, - /// List of kernels that make up this transaction (usually a single kernel). - pub kernels: Vec, +pub struct CommitWrapper(Commitment); + +impl CommitWrapper { + pub fn commitment(&self) -> Commitment { + self.0 + } } -/// PartialEq -impl PartialEq for TransactionBody { - fn eq(&self, l: &TransactionBody) -> bool { - self.inputs == l.inputs && self.outputs == l.outputs && self.kernels == l.kernels +impl DefaultHashable for CommitWrapper {} + +impl Writeable for CommitWrapper { + fn write(&self, writer: &mut W) -> Result<(), ser::Error> { + self.0.write(writer)?; + Ok(()) } } +hashable_ord!(CommitWrapper); + +/// Inputs wrapping a collection of individual inputs. +/// We support both v2 and v3 inputs for backward compatibility with v2 peers. +/// Note: This is not just a serialization difference as the data differs and we cannot +/// trivially convert between versions (specificall downgrading v3 -> v2). +/// We must take care to convert these prior to relay/broadcast to v2 peers. +#[derive(Serialize, Deserialize, Debug, Clone, PartialEq)] +pub enum Inputs { + /// Inputs specifying commitment only (v3 blocks and transactions). + CommitOnly(Vec), + /// Inputs specifying features and commitment (v2 blocks and transaction). + FeaturesAndCommit(Vec), +} + +impl From<&[Commitment]> for Inputs { + fn from(inputs: &[Commitment]) -> Self { + let mut wrappers: Vec<_> = inputs.into_iter().map(|x| CommitWrapper(*x)).collect(); + wrappers.sort_unstable(); + Inputs::CommitOnly(wrappers) + } +} + +impl From> for Inputs { + fn from(inputs: Vec) -> Self { + let mut wrappers: Vec<_> = inputs.into_iter().map(|x| CommitWrapper(x)).collect(); + wrappers.sort_unstable(); + Inputs::CommitOnly(wrappers) + } +} + +impl From<&[OutputIdentifier]> for Inputs { + fn from(inputs: &[OutputIdentifier]) -> Self { + Inputs::FeaturesAndCommit(inputs.to_vec()) + } +} + +impl From> for Inputs { + fn from(inputs: Vec) -> Self { + Inputs::FeaturesAndCommit(inputs) + } +} + +impl Default for Inputs { + fn default() -> Self { + Inputs::CommitOnly(vec![]) + } +} + +impl Inputs { + /// Inputs length. + pub fn len(&self) -> usize { + match self { + Inputs::CommitOnly(inputs) => inputs.len(), + Inputs::FeaturesAndCommit(inputs) => inputs.len(), + } + } + + fn sort_unstable(&mut self) { + match self { + Inputs::CommitOnly(inputs) => { + inputs.sort_unstable(); + } + Inputs::FeaturesAndCommit(inputs) => { + inputs.sort_unstable(); + } + } + } + + fn add_input(&mut self, input: OutputIdentifier) { + match self { + Inputs::CommitOnly(inputs) => { + let input = CommitWrapper(input.commitment()); + if let Err(e) = inputs.binary_search(&input) { + inputs.insert(e, input); + } + } + Inputs::FeaturesAndCommit(inputs) => { + if let Err(e) = inputs.binary_search(&input) { + inputs.insert(e, input); + } + } + } + } + + fn verify_sorted_and_unique(&self) -> Result<(), ser::Error> { + match self { + Inputs::CommitOnly(inputs) => inputs.verify_sorted_and_unique(), + Inputs::FeaturesAndCommit(inputs) => inputs.verify_sorted_and_unique(), + } + } + + // We can verify cut-through by taking all the inputs and outputs, sorting them in a single vec + // and checking we have no duplicate entries (every entry must be unique). + fn verify_cut_through(&self, outputs: &[Output]) -> Result<(), Error> { + match self { + Inputs::CommitOnly(inputs) => { + let mut puts = inputs.clone(); + puts.extend(outputs.iter().map(|x| CommitWrapper(x.commitment()))); + puts.sort(); + puts.verify_sorted_and_unique()?; + } + Inputs::FeaturesAndCommit(inputs) => { + let mut puts = inputs.clone(); + puts.extend(outputs.iter().map(|x| OutputIdentifier::from(x))); + puts.sort(); + puts.verify_sorted_and_unique()?; + } + } + Ok(()) + } + + /// Convenience for debug/logging. + /// Caution: Do not rely on this for anything. + pub fn version_str(&self) -> &str { + match self { + Inputs::CommitOnly(_) => "v3", + Inputs::FeaturesAndCommit(_) => "v2", + } + } +} + +impl From<&Inputs> for Vec { + fn from(inputs: &Inputs) -> Self { + match inputs { + Inputs::CommitOnly(inputs) => inputs.iter().map(|x| x.commitment()).collect(), + Inputs::FeaturesAndCommit(inputs) => inputs.iter().map(|x| x.commitment()).collect(), + } + } +} + +impl From for Vec { + fn from(inputs: Inputs) -> Self { + match inputs { + Inputs::CommitOnly(inputs) => inputs.iter().map(|x| x.commitment()).collect(), + Inputs::FeaturesAndCommit(inputs) => inputs.iter().map(|x| x.commitment()).collect(), + } + } +} + +/// If v2 format then write the inputs out. +/// If v3 format then write the commitments out (ensure sorted correctly during conversion). +impl Writeable for Inputs { + fn write(&self, writer: &mut W) -> Result<(), ser::Error> { + match writer.protocol_version().value() { + 0..=2 => match self { + Inputs::CommitOnly(inputs) => { + if writer.serialization_mode() == ser::SerializationMode::Hash { + inputs.write(writer)?; + } else { + return Err(ser::Error::ProtocolVersionError); + } + } + Inputs::FeaturesAndCommit(inputs) => inputs.write(writer)?, + }, + 3..=ProtocolVersion::MAX => { + let inputs: Vec<_> = self.into(); + inputs.write(writer)? + } + } + Ok(()) + } +} + +/// TransactionBody is a common abstraction for transaction and block +#[derive(Serialize, Deserialize, Debug, Clone, PartialEq)] +pub struct TransactionBody { + inputs: Inputs, + outputs: Vec, + kernels: Vec, +} + /// Implementation of Writeable for a body, defines how to /// write the body as binary. impl Writeable for TransactionBody { fn write(&self, writer: &mut W) -> Result<(), ser::Error> { - ser_multiwrite!( - writer, - [write_u64, self.inputs.len() as u64], - [write_u64, self.outputs.len() as u64], - [write_u64, self.kernels.len() as u64] - ); - + // Write the lengths first. + writer.write_u64(self.inputs.len() as u64)?; + writer.write_u64(self.outputs.len() as u64)?; + writer.write_u64(self.kernels.len() as u64)?; + // Then the data itself. self.inputs.write(writer)?; self.outputs.write(writer)?; self.kernels.write(writer)?; - Ok(()) } } @@ -705,12 +876,24 @@ impl Readable for TransactionBody { return Err(ser::Error::TooLargeReadErr); } - let inputs = read_multi(reader, input_len)?; + // v2: inputs are represented as vec of output_identifiers + // v3: inputs are represented as vec of commitments + let inputs: Inputs = match reader.protocol_version().value() { + 0..=2 => { + let inputs: Vec = read_multi(reader, input_len)?; + inputs.into() + } + 3..=ProtocolVersion::MAX => { + let inputs: Vec = read_multi(reader, input_len)?; + inputs.into() + } + }; + let outputs = read_multi(reader, output_len)?; let kernels = read_multi(reader, kernel_len)?; // Initialize tx body and verify everything is sorted. - let body = TransactionBody::init(&inputs, &outputs, &kernels, true) + let body = TransactionBody::init(inputs, &outputs, &kernels, true) .map_err(|_| ser::Error::CorruptedData)?; Ok(body) @@ -719,7 +902,10 @@ impl Readable for TransactionBody { impl Committed for TransactionBody { fn inputs_committed(&self) -> Vec { - self.inputs.iter().map(|x| x.commitment()).collect() + match &self.inputs { + Inputs::CommitOnly(inputs) => inputs.iter().map(|x| x.commitment()).collect(), + Inputs::FeaturesAndCommit(inputs) => inputs.iter().map(|x| x.commitment()).collect(), + } } fn outputs_committed(&self) -> Vec { @@ -747,7 +933,7 @@ impl TransactionBody { /// Creates a new empty transaction (no inputs or outputs, zero fee). pub fn empty() -> TransactionBody { TransactionBody { - inputs: vec![], + inputs: Default::default(), outputs: vec![], kernels: vec![], } @@ -760,17 +946,32 @@ impl TransactionBody { self.kernels.sort_unstable(); } + /// Transaction inputs. + pub fn inputs(&self) -> Inputs { + self.inputs.clone() + } + + /// Transaction outputs. + pub fn outputs(&self) -> &[Output] { + self.outputs.as_slice() + } + + /// Transaction kernels. + pub fn kernels(&self) -> &[TxKernel] { + self.kernels.as_slice() + } + /// Creates a new transaction body initialized with /// the provided inputs, outputs and kernels. /// Guarantees inputs, outputs, kernels are sorted lexicographically. pub fn init( - inputs: &[Input], + inputs: Inputs, outputs: &[Output], kernels: &[TxKernel], verify_sorted: bool, ) -> Result { let mut body = TransactionBody { - inputs: inputs.to_vec(), + inputs, outputs: outputs.to_vec(), kernels: kernels.to_vec(), }; @@ -783,16 +984,23 @@ impl TransactionBody { // If we are not verifying sort order then sort in place and return. body.sort(); } + Ok(body) } - /// Builds a new body with the provided inputs added. Existing - /// inputs, if any, are kept intact. - /// Sort order is maintained. - pub fn with_input(mut self, input: Input) -> TransactionBody { - if let Err(e) = self.inputs.binary_search(&input) { - self.inputs.insert(e, input) - }; + /// Adds input to this transaction body. + /// Consumes self and returns the updated transaction body. + /// Existing inputs, if any, are kept intact. + /// Sort order of inputs is maintained. + pub fn with_input(mut self, input: OutputIdentifier) -> TransactionBody { + self.inputs.add_input(input); + self + } + + /// Consumes self and returns the updated transaction body with inputs fully replaced. + pub fn replace_inputs(mut self, inputs: Inputs) -> TransactionBody { + self.inputs = inputs; + self.inputs.sort_unstable(); self } @@ -806,6 +1014,11 @@ impl TransactionBody { self } + pub fn replace_outputs(mut self, outputs: &[Output]) -> TransactionBody { + self.outputs = outputs.to_vec(); + self + } + /// Builds a new TransactionBody with the provided kernel added. Existing /// kernels, if any, are kept intact. /// Sort order is maintained. @@ -954,25 +1167,9 @@ impl TransactionBody { Ok(()) } - // Verify that no input is spending an output from the same block. - // Assumes inputs and outputs are sorted - fn verify_cut_through(&self) -> Result<(), Error> { - let mut inputs = self.inputs.iter().map(|x| x.hash()).peekable(); - let mut outputs = self.outputs.iter().map(|x| x.hash()).peekable(); - while let (Some(ih), Some(oh)) = (inputs.peek(), outputs.peek()) { - match ih.cmp(oh) { - Ordering::Less => { - inputs.next(); - } - Ordering::Greater => { - outputs.next(); - } - Ordering::Equal => { - return Err(Error::CutThrough); - } - } - } - Ok(()) + /// Verify all inputs and outputs are unique and that no input spends an output from the same block. + pub fn verify_cut_through(&self) -> Result<(), Error> { + self.inputs.verify_cut_through(self.outputs()) } /// Verify we have no invalid outputs or kernels in the transaction @@ -1008,7 +1205,6 @@ impl TransactionBody { self.verify_weight(weighting)?; self.verify_no_nrd_duplicates()?; self.verify_sorted()?; - self.verify_cut_through()?; Ok(()) } @@ -1030,12 +1226,8 @@ impl TransactionBody { // Now batch verify all those unverified rangeproofs if !outputs.is_empty() { - let mut commits = vec![]; - let mut proofs = vec![]; - for x in &outputs { - commits.push(x.commit); - proofs.push(x.proof); - } + let (commits, proofs): (Vec, Vec) = + outputs.iter().map(|x| (x.commitment(), x.proof())).unzip(); Output::batch_verify_proofs(&commits, &proofs)?; } @@ -1140,7 +1332,7 @@ impl Transaction { /// Creates a new transaction initialized with /// the provided inputs, outputs, kernels - pub fn new(inputs: &[Input], outputs: &[Output], kernels: &[TxKernel]) -> Transaction { + pub fn new(inputs: Inputs, outputs: &[Output], kernels: &[TxKernel]) -> Transaction { let offset = BlindingFactor::zero(); // Initialize a new tx body and sort everything. @@ -1150,6 +1342,33 @@ impl Transaction { Transaction { offset, body } } + /// Find the transaction output matching the provided commitment. + pub fn get_output(&self, commitment: &Commitment) -> Option { + self.outputs() + .into_iter() + .find(|x| &x.commitment() == commitment) + .cloned() + } + + /// Convert a v2 transaction into a v3 transaction. + pub fn convert_to_v3(self) -> Transaction { + debug!( + "convert_to_v3: {} ({})", + self.hash(), + self.inputs().version_str(), + ); + + let inputs: Vec = self.inputs().into(); + + Transaction { + body: TransactionBody { + inputs: inputs.into(), + ..self.body + }, + ..self + } + } + /// Creates a new transaction using this transaction as a template /// and with the specified offset. pub fn with_offset(self, offset: BlindingFactor) -> Transaction { @@ -1159,13 +1378,21 @@ impl Transaction { /// Builds a new transaction with the provided inputs added. Existing /// inputs, if any, are kept intact. /// Sort order is maintained. - pub fn with_input(self, input: Input) -> Transaction { + pub fn with_input(self, input: OutputIdentifier) -> Transaction { Transaction { body: self.body.with_input(input), ..self } } + /// Builds a new transaction replacing any inputs with those provided. + pub fn replace_inputs(self, inputs: Inputs) -> Transaction { + Transaction { + body: self.body.replace_inputs(inputs), + ..self + } + } + /// Builds a new transaction with the provided output added. Existing /// outputs, if any, are kept intact. /// Sort order is maintained. @@ -1176,6 +1403,13 @@ impl Transaction { } } + pub fn replace_outputs(self, outputs: &[Output]) -> Transaction { + Transaction { + body: self.body.replace_outputs(outputs), + ..self + } + } + /// Builds a new transaction with the provided kernel added. Existing /// kernels, if any, are kept intact. /// Sort order is maintained. @@ -1195,8 +1429,8 @@ impl Transaction { } /// Get inputs - pub fn inputs(&self) -> &[Input] { - &self.body.inputs + pub fn inputs(&self) -> Inputs { + self.body.inputs.clone() } /// Get outputs @@ -1244,6 +1478,7 @@ impl Transaction { verifier: Arc>, ) -> Result<(), Error> { self.body.verify_features()?; + self.body.verify_cut_through()?; self.body.validate(weighting, verifier)?; self.verify_kernel_sums(self.overage(), self.offset.clone())?; Ok(()) @@ -1271,31 +1506,28 @@ impl Transaction { } } -/// Matches any output with a potential spending input, eliminating them -/// from the Vec. Provides a simple way to cut-through a block or aggregated -/// transaction. The elimination is stable with respect to the order of inputs -/// and outputs. +/// Cut-through inputs and outputs. +/// Returns new slices without any allocations. pub fn cut_through<'a>( - inputs: &'a mut [Input], + inputs: &'a mut [Commitment], outputs: &'a mut [Output], -) -> Result<(&'a [Input], &'a [Output]), Error> { - // assemble output commitments set, checking they're all unique - outputs.sort_unstable(); - if outputs.windows(2).any(|pair| pair[0] == pair[1]) { - return Err(Error::AggregationError); - } +) -> Result<(&'a [Commitment], &'a [Output]), Error> { + // Sort inputs and outputs consistently by commitment. + // Comparison will be done by commitment later. inputs.sort_unstable(); + outputs.sort_unstable_by_key(|x| x.commitment()); + let mut inputs_idx = 0; let mut outputs_idx = 0; let mut ncut = 0; while inputs_idx < inputs.len() && outputs_idx < outputs.len() { - match inputs[inputs_idx].hash().cmp(&outputs[outputs_idx].hash()) { + match inputs[inputs_idx].cmp(&outputs[outputs_idx].commitment()) { Ordering::Less => { - inputs.swap(inputs_idx - ncut, inputs_idx); + inputs[inputs_idx - ncut] = inputs[inputs_idx]; inputs_idx += 1; } Ordering::Greater => { - outputs.swap(outputs_idx - ncut, outputs_idx); + outputs[outputs_idx - ncut] = outputs[outputs_idx]; outputs_idx += 1; } Ordering::Equal => { @@ -1341,7 +1573,7 @@ pub fn aggregate(txs: &[Transaction]) -> Result { n_kernels += tx.kernels().len(); } - let mut inputs: Vec = Vec::with_capacity(n_inputs); + let mut inputs: Vec = Vec::with_capacity(n_inputs); let mut outputs: Vec = Vec::with_capacity(n_outputs); let mut kernels: Vec = Vec::with_capacity(n_kernels); @@ -1352,7 +1584,8 @@ pub fn aggregate(txs: &[Transaction]) -> Result { // we will sum these later to give a single aggregate offset kernel_offsets.push(tx.offset.clone()); - inputs.extend_from_slice(tx.inputs()); + let tx_inputs: Vec<_> = tx.inputs().into(); + inputs.extend_from_slice(&tx_inputs); outputs.extend_from_slice(tx.outputs()); kernels.extend_from_slice(tx.kernels()); } @@ -1360,9 +1593,6 @@ pub fn aggregate(txs: &[Transaction]) -> Result { // Sort inputs and outputs during cut_through. let (inputs, outputs) = cut_through(&mut inputs, &mut outputs)?; - // Now sort kernels. - kernels.sort_unstable(); - // now sum the kernel_offsets up to give us an aggregate offset for the // transaction let total_kernel_offset = committed::sum_kernel_offsets(kernel_offsets, vec![])?; @@ -1372,15 +1602,13 @@ pub fn aggregate(txs: &[Transaction]) -> Result { // * cut-through outputs // * full set of tx kernels // * sum of all kernel offsets - let tx = Transaction::new(inputs, outputs, &kernels).with_offset(total_kernel_offset); - - Ok(tx) + Ok(Transaction::new(inputs.into(), outputs, &kernels).with_offset(total_kernel_offset)) } /// Attempt to deaggregate a multi-kernel transaction based on multiple /// transactions pub fn deaggregate(mk_tx: Transaction, txs: &[Transaction]) -> Result { - let mut inputs: Vec = vec![]; + let mut inputs: Vec = vec![]; let mut outputs: Vec = vec![]; let mut kernels: Vec = vec![]; @@ -1389,9 +1617,10 @@ pub fn deaggregate(mk_tx: Transaction, txs: &[Transaction]) -> Result = tx.inputs().into(); + let mk_inputs: Vec<_> = mk_tx.inputs().into(); + for mk_input in &mk_inputs { + if !tx_inputs.contains(mk_input) && !inputs.contains(&mk_input) { inputs.push(*mk_input); } } @@ -1437,85 +1666,7 @@ pub fn deaggregate(mk_tx: Transaction, txs: &[Transaction]) -> Result(&self, state: &mut H) { - let mut vec = Vec::new(); - ser::serialize_default(&mut vec, &self).expect("serialization failed"); - ::std::hash::Hash::hash(&vec, state); - } -} - -/// Implementation of Writeable for a transaction Input, defines how to write -/// an Input as binary. -impl Writeable for Input { - fn write(&self, writer: &mut W) -> Result<(), ser::Error> { - self.features.write(writer)?; - self.commit.write(writer)?; - Ok(()) - } -} - -/// Implementation of Readable for a transaction Input, defines how to read -/// an Input from a binary stream. -impl Readable for Input { - fn read(reader: &mut R) -> Result { - let features = OutputFeatures::read(reader)?; - let commit = Commitment::read(reader)?; - Ok(Input::new(features, commit)) - } -} - -/// The input for a transaction, which spends a pre-existing unspent output. -/// The input commitment is a reproduction of the commitment of the output -/// being spent. Input must also provide the original output features and the -/// hash of the block the output originated from. -impl Input { - /// Build a new input from the data required to identify and verify an - /// output being spent. - pub fn new(features: OutputFeatures, commit: Commitment) -> Input { - Input { features, commit } - } - - /// The input commitment which _partially_ identifies the output being - /// spent. In the presence of a fork we need additional info to uniquely - /// identify the output. Specifically the block hash (to correctly - /// calculate lock_height for coinbase outputs). - pub fn commitment(&self) -> Commitment { - self.commit - } - - /// Is this a coinbase input? - pub fn is_coinbase(&self) -> bool { - self.features.is_coinbase() - } - - /// Is this a plain input? - pub fn is_plain(&self) -> bool { - self.features.is_plain() - } + Ok(Transaction::new(inputs.into(), &outputs, &kernels).with_offset(total_kernel_offset)) } // Enum of various supported kernel "features". @@ -1611,7 +1762,7 @@ impl PMMRable for Output { type E = OutputIdentifier; fn as_elmt(&self) -> OutputIdentifier { - OutputIdentifier::from(self) + self.into() } fn elmt_size() -> Option { @@ -1681,7 +1832,7 @@ impl Output { /// An output_identifier can be build from either an input _or_ an output and /// contains everything we need to uniquely identify an output being spent. /// Needed because it is not sufficient to pass a commitment around. -#[derive(Serialize, Deserialize, Debug, Clone, PartialEq)] +#[derive(Serialize, Deserialize, Debug, Clone, Copy)] pub struct OutputIdentifier { /// Output features (coinbase vs. regular transaction output) /// We need to include this when hashing to ensure coinbase maturity can be @@ -1692,6 +1843,7 @@ pub struct OutputIdentifier { } impl DefaultHashable for OutputIdentifier {} +hashable_ord!(OutputIdentifier); impl OutputIdentifier { /// Build a new output_identifier. @@ -1749,15 +1901,6 @@ impl From<&Output> for OutputIdentifier { } } -impl From<&Input> for OutputIdentifier { - fn from(input: &Input) -> Self { - OutputIdentifier { - features: input.features, - commit: input.commit, - } - } -} - #[cfg(test)] mod test { use super::*; @@ -1956,37 +2099,34 @@ mod test { assert!(commit == commit_2); } + // Test short_id() against some hashable things. #[test] - fn input_short_id() { - let keychain = ExtKeychain::from_seed(&[0; 32], false).unwrap(); - let key_id = ExtKeychain::derive_key_id(1, 1, 0, 0, 0); - let commit = keychain - .commit(5, &key_id, SwitchCommitmentType::Regular) - .unwrap(); + fn test_short_id() -> Result<(), Error> { + let hash = + Hash::from_hex("3a42e66e46dd7633b57d1f921780a1ac715e6b93c19ee52ab714178eb3a9f673")?; - let input = Input { - features: OutputFeatures::Plain, - commit, - }; + let short_id = vec![0, 0, 0, 1].short_id(&hash, 0); + assert_eq!(short_id, ShortId::from_hex("b1f0ba1bce57")?); - let block_hash = - Hash::from_hex("3a42e66e46dd7633b57d1f921780a1ac715e6b93c19ee52ab714178eb3a9f673") - .unwrap(); + let short_id = vec![0, 0, 0, 1].short_id(&hash, 1); + assert_eq!(short_id, ShortId::from_hex("e1731a8d06cc")?); - let nonce = 0; + let short_id = vec![0, 0, 0, 2].short_id(&hash, 0); + assert_eq!(short_id, ShortId::from_hex("2d2580a98340")?); - let short_id = input.short_id(&block_hash, nonce); - assert_eq!(short_id, ShortId::from_hex("c4b05f2ba649").unwrap()); + let hash = + Hash::from_hex("3a42e66e46dd7633b57d1f921780a1ac715e6b93c19ee52ab714178eb3a9f600")?; - // now generate the short_id for a *very* similar output (single feature flag - // different) and check it generates a different short_id - let input = Input { - features: OutputFeatures::Coinbase, - commit, - }; + let short_id = vec![0, 0, 0, 1].short_id(&hash, 0); + assert_eq!(short_id, ShortId::from_hex("77a2ed874654")?); - let short_id = input.short_id(&block_hash, nonce); - assert_eq!(short_id, ShortId::from_hex("3f0377c624e9").unwrap()); + let short_id = vec![0, 0, 0, 1].short_id(&hash, 1); + assert_eq!(short_id, ShortId::from_hex("1cf41143b8ea")?); + + let short_id = vec![0, 0, 0, 2].short_id(&hash, 0); + assert_eq!(short_id, ShortId::from_hex("8185df893296")?); + + Ok(()) } #[test] diff --git a/core/src/genesis.rs b/core/src/genesis.rs index 6cb2eaa7d0..060bfd63e9 100644 --- a/core/src/genesis.rs +++ b/core/src/genesis.rs @@ -21,6 +21,7 @@ use crate::core; use crate::core::hash::Hash; +use crate::core::Inputs; use crate::pow::{Difficulty, Proof, ProofOfWork}; use chrono::prelude::{TimeZone, Utc}; use keychain::BlindingFactor; @@ -33,7 +34,7 @@ use util::secp::Signature; /// is small enough to mine it on the fly, so it does not contain its own /// proof of work solution. Can also be easily mutated for different tests. pub fn genesis_dev() -> core::Block { - core::Block::with_header(core::BlockHeader { + let gen = core::Block::with_header(core::BlockHeader { height: 0, timestamp: Utc.ymd(1997, 8, 4).and_hms(0, 0, 0), pow: ProofOfWork { @@ -41,7 +42,10 @@ pub fn genesis_dev() -> core::Block { ..Default::default() }, ..Default::default() - }) + }); + + // We need to be compatible with v1/v2 here so inputs must be of the correct enum variant (even if empty). + gen.replace_inputs(Inputs::FeaturesAndCommit(vec![])) } /// Floonet genesis block @@ -153,7 +157,9 @@ pub fn genesis_floo() -> core::Block { ], }, }; - gen.with_reward(output, kernel) + // We need to be compatible with v1/v2 here so inputs must be of the correct enum variant (even if empty). + gen.replace_inputs(Inputs::FeaturesAndCommit(vec![])) + .with_reward(output, kernel) } /// Mainnet genesis block @@ -265,7 +271,10 @@ pub fn genesis_main() -> core::Block { ], }, }; - gen.with_reward(output, kernel) + + // We need to be compatible with v1/v2 here so inputs must be of the correct enum variant (even if empty). + gen.replace_inputs(Inputs::FeaturesAndCommit(vec![])) + .with_reward(output, kernel) } #[cfg(test)] diff --git a/core/src/global.rs b/core/src/global.rs index 5578650021..f5945504d8 100644 --- a/core/src/global.rs +++ b/core/src/global.rs @@ -42,7 +42,7 @@ use util::OneTime; /// Note: We also use a specific (possible different) protocol version /// for both the backend database and MMR data files. /// This defines the p2p layer protocol version for this node. -pub const PROTOCOL_VERSION: u32 = 2; +pub const PROTOCOL_VERSION: u32 = 3; /// Automated testing edge_bits pub const AUTOMATED_TESTING_MIN_EDGE_BITS: u8 = 10; diff --git a/core/src/libtx/build.rs b/core/src/libtx/build.rs index 9acce70015..e3022bdab7 100644 --- a/core/src/libtx/build.rs +++ b/core/src/libtx/build.rs @@ -31,7 +31,9 @@ //! ] //! ) -use crate::core::{Input, KernelFeatures, Output, OutputFeatures, Transaction, TxKernel}; +use crate::core::{ + KernelFeatures, Output, OutputFeatures, OutputIdentifier, Transaction, TxKernel, +}; use crate::libtx::proof::{self, ProofBuild}; use crate::libtx::{aggsig, Error}; use keychain::{BlindSum, BlindingFactor, Identifier, Keychain, SwitchCommitmentType}; @@ -71,7 +73,7 @@ where .keychain .commit(value, &key_id, SwitchCommitmentType::Regular)?; // TODO: proper support for different switch commitment schemes - let input = Input::new(features, commit); + let input = OutputIdentifier { features, commit }; Ok(( tx.with_input(input), sum.sub_key_id(key_id.to_value_path(value)), diff --git a/core/src/ser.rs b/core/src/ser.rs index bbc1c1e4e6..60680e9856 100644 --- a/core/src/ser.rs +++ b/core/src/ser.rs @@ -69,6 +69,8 @@ pub enum Error { DuplicateError, /// Block header version (hard-fork schedule). InvalidBlockVersion, + /// Protocol version related error. + ProtocolVersionError, } impl From for Error { @@ -92,6 +94,7 @@ impl fmt::Display for Error { Error::TooLargeReadErr => f.write_str("too large read"), Error::HexError(ref e) => write!(f, "hex error {:?}", e), Error::InvalidBlockVersion => f.write_str("invalid block version"), + Error::ProtocolVersionError => f.write_str("protocol version error"), } } } @@ -115,6 +118,7 @@ impl error::Error for Error { Error::TooLargeReadErr => "too large read", Error::HexError(_) => "hex error", Error::InvalidBlockVersion => "invalid block version", + Error::ProtocolVersionError => "protocol version error", } } } diff --git a/core/tests/block.rs b/core/tests/block.rs index 27b95406cc..ca94523c87 100644 --- a/core/tests/block.rs +++ b/core/tests/block.rs @@ -16,13 +16,13 @@ mod common; use crate::common::{new_block, tx1i2o, tx2i1o, txspend1i1o}; use crate::core::consensus::{self, BLOCK_OUTPUT_WEIGHT, TESTING_THIRD_HARD_FORK}; use crate::core::core::block::{Block, BlockHeader, Error, HeaderVersion}; +use crate::core::core::committed::Committed; use crate::core::core::hash::Hashed; use crate::core::core::id::ShortIdentifiable; -use crate::core::core::transaction::{ - self, KernelFeatures, NRDRelativeHeight, OutputFeatures, Transaction, -}; +use crate::core::core::transaction::{self, KernelFeatures, NRDRelativeHeight, Transaction}; use crate::core::core::verifier_cache::{LruVerifierCache, VerifierCache}; -use crate::core::core::{Committed, CompactBlock}; +use crate::core::core::CompactBlock; +use crate::core::core::{OutputFeatures, OutputIdentifier}; use crate::core::libtx::build::{self, input, output}; use crate::core::libtx::ProofBuilder; use crate::core::{global, ser}; @@ -347,7 +347,7 @@ fn remove_coinbase_output_flag() { let mut output = b.outputs()[0].clone(); output.features = OutputFeatures::Plain; - b.body.outputs = vec![output]; + b.body = b.body.replace_outputs(&[output]); assert_eq!(b.verify_coinbase(), Err(Error::CoinbaseSumMismatch)); assert!(b @@ -458,7 +458,7 @@ fn empty_block_serialized_size() { } #[test] -fn block_single_tx_serialized_size() { +fn block_single_tx_serialized_size_default() { test_setup(); let keychain = ExtKeychain::from_random_seed(false).unwrap(); let builder = ProofBuilder::new(&keychain); @@ -468,25 +468,25 @@ fn block_single_tx_serialized_size() { let b = new_block(&[tx1], &keychain, &builder, &prev, &key_id); let mut vec = Vec::new(); ser::serialize_default(&mut vec, &b).expect("serialization failed"); - assert_eq!(vec.len(), 2_670); + assert_eq!(vec.len(), 2_669); } #[test] -fn empty_compact_block_serialized_size() { +fn block_single_tx_serialized_size_v3() { test_setup(); let keychain = ExtKeychain::from_random_seed(false).unwrap(); let builder = ProofBuilder::new(&keychain); + let tx1 = tx1i2o(); let prev = BlockHeader::default(); let key_id = ExtKeychain::derive_key_id(1, 1, 0, 0, 0); - let b = new_block(&[], &keychain, &builder, &prev, &key_id); - let cb: CompactBlock = b.into(); + let b = new_block(&[tx1], &keychain, &builder, &prev, &key_id); let mut vec = Vec::new(); - ser::serialize_default(&mut vec, &cb).expect("serialization failed"); - assert_eq!(vec.len(), 1_104); + ser::serialize(&mut vec, ser::ProtocolVersion(3), &b).expect("serialization failed"); + assert_eq!(vec.len(), 2_669); } #[test] -fn compact_block_single_tx_serialized_size() { +fn block_single_tx_serialized_size_v2() { test_setup(); let keychain = ExtKeychain::from_random_seed(false).unwrap(); let builder = ProofBuilder::new(&keychain); @@ -494,26 +494,50 @@ fn compact_block_single_tx_serialized_size() { let prev = BlockHeader::default(); let key_id = ExtKeychain::derive_key_id(1, 1, 0, 0, 0); let b = new_block(&[tx1], &keychain, &builder, &prev, &key_id); + + // We need to jump through some hoops here to convert the block to v2 "compatibility mode". + // Real block processing would look input features up in the utxo. + // We just use plain feautres here for testing. + let inputs: Vec<_> = b.inputs().into(); + let inputs: Vec<_> = inputs + .into_iter() + .map(|x| OutputIdentifier { + features: OutputFeatures::Plain, + commit: x, + }) + .collect(); + let b = b.replace_inputs(inputs.into()); + + let mut vec = Vec::new(); + ser::serialize(&mut vec, ser::ProtocolVersion(2), &b).expect("serialization failed"); + + // Note: additional byte for the features on the single input. + assert_eq!(vec.len(), 2_670); +} + +#[test] +fn empty_compact_block_serialized_size() { + test_setup(); + let keychain = ExtKeychain::from_random_seed(false).unwrap(); + let builder = ProofBuilder::new(&keychain); + let prev = BlockHeader::default(); + let key_id = ExtKeychain::derive_key_id(1, 1, 0, 0, 0); + let b = new_block(&[], &keychain, &builder, &prev, &key_id); let cb: CompactBlock = b.into(); let mut vec = Vec::new(); ser::serialize_default(&mut vec, &cb).expect("serialization failed"); - assert_eq!(vec.len(), 1_110); + assert_eq!(vec.len(), 1_104); } #[test] -fn block_10_tx_serialized_size() { +fn compact_block_single_tx_serialized_size() { test_setup(); let keychain = ExtKeychain::from_random_seed(false).unwrap(); let builder = ProofBuilder::new(&keychain); - - let mut txs = vec![]; - for _ in 0..10 { - let tx = tx1i2o(); - txs.push(tx); - } + let tx1 = tx1i2o(); let prev = BlockHeader::default(); let key_id = ExtKeychain::derive_key_id(1, 1, 0, 0, 0); - let b = new_block(&txs, &keychain, &builder, &prev, &key_id); + let b = new_block(&[tx1], &keychain, &builder, &prev, &key_id); // Default protocol version. { @@ -676,7 +700,7 @@ fn same_amount_outputs_copy_range_proof() { // now we reconstruct the transaction, swapping the rangeproofs so they // have the wrong privkey - let ins = tx.inputs(); + let ins: Vec<_> = tx.inputs().into(); let mut outs = tx.outputs().to_vec(); let kernels = tx.kernels(); outs[0].proof = outs[1].proof; @@ -684,7 +708,7 @@ fn same_amount_outputs_copy_range_proof() { let key_id = keychain::ExtKeychain::derive_key_id(1, 4, 0, 0, 0); let prev = BlockHeader::default(); let b = new_block( - &[Transaction::new(ins, &outs, kernels)], + &[Transaction::new(ins.into(), &outs, &kernels)], &keychain, &builder, &prev, @@ -729,7 +753,7 @@ fn wrong_amount_range_proof() { .unwrap(); // we take the range proofs from tx2 into tx1 and rebuild the transaction - let ins = tx1.inputs(); + let ins: Vec<_> = tx1.inputs().into(); let mut outs = tx1.outputs().to_vec(); let kernels = tx1.kernels(); outs[0].proof = tx2.outputs()[0].proof; @@ -738,7 +762,7 @@ fn wrong_amount_range_proof() { let key_id = keychain::ExtKeychain::derive_key_id(1, 4, 0, 0, 0); let prev = BlockHeader::default(); let b = new_block( - &[Transaction::new(ins, &outs, kernels)], + &[Transaction::new(ins.into(), &outs, &kernels)], &keychain, &builder, &prev, diff --git a/core/tests/core.rs b/core/tests/core.rs index c4b5c325b8..596db6d0da 100644 --- a/core/tests/core.rs +++ b/core/tests/core.rs @@ -21,7 +21,8 @@ use self::core::core::block::Error::KernelLockHeight; use self::core::core::hash::{Hashed, ZERO_HASH}; use self::core::core::verifier_cache::{LruVerifierCache, VerifierCache}; use self::core::core::{ - aggregate, deaggregate, KernelFeatures, Output, Transaction, TxKernel, Weighting, + aggregate, deaggregate, KernelFeatures, Output, OutputFeatures, OutputIdentifier, Transaction, + TxKernel, Weighting, }; use self::core::libtx::build::{self, initial_tx, input, output, with_excess}; use self::core::libtx::{aggsig, ProofBuilder}; @@ -46,9 +47,22 @@ fn simple_tx_ser() { { let mut vec = Vec::new(); ser::serialize_default(&mut vec, &tx).expect("serialization failed"); - assert_eq!(vec.len(), 947); + assert_eq!(vec.len(), 945); } + // "Convert" our tx to v2 compatibility mode here. + // In reality we would look features up in txpool and utxo. + // We just use plain features here for testing. + let inputs: Vec<_> = tx.inputs().into(); + let inputs: Vec<_> = inputs + .into_iter() + .map(|x| OutputIdentifier { + features: OutputFeatures::Plain, + commit: x, + }) + .collect(); + let tx = tx.replace_inputs(inputs.into()); + // Explicit protocol version 1. { let mut vec = Vec::new(); @@ -557,9 +571,7 @@ fn reward_empty_block() { let b = new_block(&[], &keychain, &builder, &previous_header, &key_id); - b.cut_through() - .unwrap() - .validate(&BlindingFactor::zero(), verifier_cache()) + b.validate(&BlindingFactor::zero(), verifier_cache()) .unwrap(); } @@ -578,11 +590,7 @@ fn reward_with_tx_block() { let previous_header = BlockHeader::default(); let block = new_block(&[tx1], &keychain, &builder, &previous_header, &key_id); - block - .cut_through() - .unwrap() - .validate(&BlindingFactor::zero(), vc.clone()) - .unwrap(); + block.validate(&BlindingFactor::zero(), vc.clone()).unwrap(); } #[test] diff --git a/p2p/src/peer.rs b/p2p/src/peer.rs index d77da384ad..2a95d3afa4 100644 --- a/p2p/src/peer.rs +++ b/p2p/src/peer.rs @@ -317,26 +317,32 @@ impl Peer { /// dropped if the remote peer is known to already have the transaction. /// We support broadcast of lightweight tx kernel hash /// so track known txs by kernel hash. - pub fn send_transaction(&self, tx: &core::Transaction) -> Result { - let kernel = &tx.kernels()[0]; - + pub fn send_transaction(&self, kernel_hash: Hash) -> Result { if self .info .capabilities .contains(Capabilities::TX_KERNEL_HASH) { - return self.send_tx_kernel_hash(kernel.hash()); + return self.send_tx_kernel_hash(kernel_hash); } - if !self.tracking_adapter.has_recv(kernel.hash()) { - debug!("Send full tx {} to {}", tx.hash(), self.info.addr); - self.send(tx, msg::Type::Transaction)?; - Ok(true) + // Fallback to sending full transaction. + if !self.tracking_adapter.has_recv(kernel_hash) { + if let Some(tx) = self.tracking_adapter.get_transaction(kernel_hash) { + debug!("Send full tx {} to {}", kernel_hash, self.info.addr); + self.send(tx, msg::Type::Transaction)?; + Ok(true) + } else { + debug!( + "Not sending tx {} to {} (cannot find it...)", + kernel_hash, self.info.addr + ); + Ok(false) + } } else { debug!( "Not sending tx {} to {} (already seen)", - tx.hash(), - self.info.addr + kernel_hash, self.info.addr ); Ok(false) } @@ -345,9 +351,17 @@ impl Peer { /// Sends the provided stem transaction to the remote peer. /// Note: tracking adapter is ignored for stem transactions (while under /// embargo). - pub fn send_stem_transaction(&self, tx: &core::Transaction) -> Result<(), Error> { - debug!("Send (stem) tx {} to {}", tx.hash(), self.info.addr); - self.send(tx, msg::Type::StemTransaction) + pub fn send_stem_transaction(&self, kernel_hash: Hash) -> Result<(), Error> { + if let Some(tx) = self.tracking_adapter.get_stem_transaction(kernel_hash) { + debug!("Send (stem) tx {} to {}", kernel_hash, self.info.addr); + self.send(tx, msg::Type::StemTransaction) + } else { + debug!( + "Not sending (stem) tx {} to {} (cannot find it...)", + kernel_hash, self.info.addr + ); + Err(Error::Other("Cannot find tx to send".into())) + } } /// Sends a request for block headers from the provided block locator @@ -469,6 +483,10 @@ impl ChainAdapter for TrackingAdapter { self.adapter.get_transaction(kernel_hash) } + fn get_stem_transaction(&self, kernel_hash: Hash) -> Option { + self.adapter.get_stem_transaction(kernel_hash) + } + fn tx_kernel_received( &self, kernel_hash: Hash, diff --git a/p2p/src/peers.rs b/p2p/src/peers.rs index afa0c9d08f..7351e0535d 100644 --- a/p2p/src/peers.rs +++ b/p2p/src/peers.rs @@ -341,12 +341,11 @@ impl Peers { /// Broadcasts the provided transaction to all our connected peers. /// A peer implementation may drop the broadcast request /// if it knows the remote peer already has the transaction. - pub fn broadcast_transaction(&self, tx: &core::Transaction) { - let count = self.broadcast("transaction", |p| p.send_transaction(tx)); + pub fn broadcast_transaction(&self, kernel_hash: Hash) { + let count = self.broadcast("transaction", |p| p.send_transaction(kernel_hash)); debug!( "broadcast_transaction: {} to {} peers, done.", - tx.hash(), - count, + kernel_hash, count, ); } @@ -429,7 +428,7 @@ impl Peers { } }; for peer in peers.values() { - if peer.is_banned() { + if Peer::is_banned(peer) { debug!("clean_peers {:?}, peer banned", peer.info.addr); rm.push(peer.info.addr.clone()); } else if !peer.is_connected() { @@ -556,6 +555,10 @@ impl ChainAdapter for Peers { self.adapter.get_transaction(kernel_hash) } + fn get_stem_transaction(&self, kernel_hash: Hash) -> Option { + self.adapter.get_stem_transaction(kernel_hash) + } + fn tx_kernel_received( &self, kernel_hash: Hash, diff --git a/p2p/src/protocol.rs b/p2p/src/protocol.rs index bfdf4b56d2..225554ca6b 100644 --- a/p2p/src/protocol.rs +++ b/p2p/src/protocol.rs @@ -114,7 +114,10 @@ impl MessageHandler for Protocol { h, msg.header.msg_len, ); let tx = adapter.get_transaction(h); + if let Some(tx) = tx { + debug!("tx found: {} ({})", h, tx.inputs().version_str()); + Ok(Some(Msg::new( Type::Transaction, tx, diff --git a/p2p/src/serv.rs b/p2p/src/serv.rs index aab5b35692..41707635ac 100644 --- a/p2p/src/serv.rs +++ b/p2p/src/serv.rs @@ -292,6 +292,9 @@ impl ChainAdapter for DummyAdapter { fn get_transaction(&self, _h: Hash) -> Option { None } + fn get_stem_transaction(&self, _h: Hash) -> Option { + None + } fn tx_kernel_received(&self, _h: Hash, _peer_info: &PeerInfo) -> Result { Ok(true) diff --git a/p2p/src/types.rs b/p2p/src/types.rs index 713ca19299..0fe8abdea2 100644 --- a/p2p/src/types.rs +++ b/p2p/src/types.rs @@ -88,6 +88,7 @@ pub enum Error { PeerNotBanned, PeerException, Internal, + Other(String), } impl From for Error { @@ -555,6 +556,8 @@ pub trait ChainAdapter: Sync + Send { fn get_transaction(&self, kernel_hash: Hash) -> Option; + fn get_stem_transaction(&self, kernel_hash: Hash) -> Option; + fn tx_kernel_received( &self, kernel_hash: Hash, diff --git a/pool/src/pool.rs b/pool/src/pool.rs index 5b7f284b96..9f13293988 100644 --- a/pool/src/pool.rs +++ b/pool/src/pool.rs @@ -20,7 +20,8 @@ use self::core::core::id::{ShortId, ShortIdentifiable}; use self::core::core::transaction; use self::core::core::verifier_cache::VerifierCache; use self::core::core::{ - Block, BlockHeader, BlockSums, Committed, Transaction, TxKernel, Weighting, + Block, BlockHeader, BlockSums, Committed, Inputs, OutputIdentifier, Transaction, TxKernel, + Weighting, }; use self::util::RwLock; use crate::types::{BlockChain, PoolEntry, PoolError}; @@ -60,15 +61,8 @@ where } /// Does the transaction pool contain an entry for the given transaction? - pub fn contains_tx(&self, hash: Hash) -> bool { - self.entries.iter().any(|x| x.tx.hash() == hash) - } - - pub fn get_tx(&self, hash: Hash) -> Option { - self.entries - .iter() - .find(|x| x.tx.hash() == hash) - .map(|x| x.tx.clone()) + pub fn contains_tx(&self, tx_hash: Hash) -> bool { + self.entries.iter().any(|x| x.tx.hash() == tx_hash) } /// Query the tx pool for an individual tx matching the given public excess. @@ -84,7 +78,45 @@ where None } + // look them up via - + // txpool.get_unspent, or + // utxo.get_unspent + pub fn convert_tx_to_v2( + &self, + tx: Transaction, + other_tx: Option, + ) -> Result { + debug!( + "convert_tx_to_v2: {} ({})", + tx.hash(), + tx.inputs().version_str(), + ); + + match tx.inputs() { + Inputs::CommitOnly(inputs) => { + let pool_tx = self.all_transactions_aggregate()?; + let txs: Vec = vec![pool_tx, other_tx].into_iter().flatten().collect(); + let agg_tx = transaction::aggregate(&txs)?; + + let inputs: Vec = inputs + .into_iter() + .map(|input| { + agg_tx + .get_output(&input.commitment()) + .map(|out| OutputIdentifier::from(&out)) + .or(self.blockchain.get_unspent(input.commitment()).ok()) + }) + .flatten() + .collect(); + Ok(tx.replace_inputs(inputs.into())) + } + Inputs::FeaturesAndCommit(_) => Ok(tx), + } + } + /// Query the tx pool for an individual tx matching the given kernel hash. + /// Used to provide transactions to peers (stem relay or requested by peer). + /// Note: Tx must be in v2 format to support backward compatibility with v2 peers. pub fn retrieve_tx_by_kernel_hash(&self, hash: Hash) -> Option { for x in &self.entries { for k in x.tx.kernels() { @@ -96,6 +128,18 @@ where None } + /// Query the tx pool for existence of tx matching the given kernel hash. + pub fn tx_by_kernel_hash_exists(&self, hash: Hash) -> bool { + for x in &self.entries { + for k in x.tx.kernels() { + if k.hash() == hash { + return true; + } + } + } + false + } + /// Query the tx pool for all known txs based on kernel short_ids /// from the provided compact_block. /// Note: does not validate that we return the full set of required txs. @@ -185,7 +229,7 @@ where pub fn add_to_pool( &mut self, entry: PoolEntry, - extra_txs: Vec, + extra_txs: &[Transaction], header: &BlockHeader, ) -> Result<(), PoolError> { // Combine all the txs from the pool with any extra txs provided. @@ -196,23 +240,18 @@ where return Err(PoolError::DuplicateTx); } - txs.extend(extra_txs); + txs.extend_from_slice(extra_txs); - let agg_tx = if txs.is_empty() { - // If we have nothing to aggregate then simply return the tx itself. - entry.tx.clone() - } else { - // Create a single aggregated tx from the existing pool txs and the - // new entry - txs.push(entry.tx.clone()); - transaction::aggregate(&txs)? - }; + let pool_tx = transaction::aggregate(&txs)?; + let agg_tx = transaction::aggregate(&[pool_tx, entry.tx.clone()])?; // Validate aggregated tx (existing pool + new tx), ignoring tx weight limits. // Validate against known chain state at the provided header. self.validate_raw_tx(&agg_tx, header, Weighting::NoLimit)?; + // If we get here successfully then we can safely add the entry to the pool. self.log_pool_add(&entry, header); + self.entries.push(entry); Ok(()) @@ -220,15 +259,17 @@ where fn log_pool_add(&self, entry: &PoolEntry, header: &BlockHeader) { debug!( - "add_to_pool [{}]: {} ({:?}) [in/out/kern: {}/{}/{}] pool: {} (at block {})", + "add_to_pool [{}]: {} ({:?}) [in/out/kern: {}/{}/{}] ({}) pool: {} ({} at {})", self.name, entry.tx.hash(), entry.src, entry.tx.inputs().len(), entry.tx.outputs().len(), entry.tx.kernels().len(), + entry.tx.inputs().version_str(), self.size(), header.hash(), + header.height, ); } @@ -320,7 +361,7 @@ where } for x in existing_entries { - let _ = self.add_to_pool(x, extra_txs.clone(), header); + let _ = self.add_to_pool(x, &extra_txs, header); } Ok(()) @@ -354,12 +395,13 @@ where let mut insert_pos = None; let mut is_rejected = false; - for input in entry.tx.inputs() { - if rejected.contains(&input.commitment()) { + let tx_inputs: Vec<_> = entry.tx.inputs().into(); + for input in &tx_inputs { + if rejected.contains(input) { // Depends on a rejected tx, so reject this one. is_rejected = true; continue; - } else if let Some(pos) = output_commits.get(&input.commitment()) { + } else if let Some(pos) = output_commits.get(input) { if insert_pos.is_some() { // Multiple dependencies so reject this tx (pick it up in next block). is_rejected = true; @@ -460,13 +502,17 @@ where /// Quick reconciliation step - we can evict any txs in the pool where /// inputs or kernels intersect with the block. pub fn reconcile_block(&mut self, block: &Block) { + let block_inputs: Vec<_> = block.inputs().into(); // Filter txs in the pool based on the latest block. // Reject any txs where we see a matching tx kernel in the block. // Also reject any txs where we see a conflicting tx, // where an input is spent in a different tx. self.entries.retain(|x| { + let tx_inputs: Vec<_> = x.tx.inputs().into(); !x.tx.kernels().iter().any(|y| block.kernels().contains(y)) - && !x.tx.inputs().iter().any(|y| block.inputs().contains(y)) + && !tx_inputs + .iter() + .any(|y| block_inputs.binary_search(y).is_ok()) }); } diff --git a/pool/src/transaction_pool.rs b/pool/src/transaction_pool.rs index 2ce71effa5..64b1812c5e 100644 --- a/pool/src/transaction_pool.rs +++ b/pool/src/transaction_pool.rs @@ -88,7 +88,7 @@ where // Add tx to stempool (passing in all txs from txpool to validate against). fn add_to_stempool(&mut self, entry: PoolEntry, header: &BlockHeader) -> Result<(), PoolError> { self.stempool - .add_to_pool(entry, self.txpool.all_transactions(), header)?; + .add_to_pool(entry, &self.txpool.all_transactions(), header)?; Ok(()) } @@ -122,7 +122,7 @@ where entry.src = TxSource::Deaggregate; } } - self.txpool.add_to_pool(entry.clone(), vec![], header)?; + self.txpool.add_to_pool(entry.clone(), &[], header)?; // We now need to reconcile the stempool based on the new state of the txpool. // Some stempool txs may no longer be valid and we need to evict them. @@ -160,6 +160,11 @@ where stem: bool, header: &BlockHeader, ) -> Result<(), PoolError> { + // First step is to convert transaction to consistent v3 format. + // We handle transactions internally in v3 format. + // We will convert to v2 on the way out, if relaying to v2 peer for backward compatibility. + let tx = tx.convert_to_v3(); + // Quick check for duplicate txs. // Our stempool is private and we do not want to reveal anything about the txs contained. // If this is a stem tx and is already present in stempool then fluff by adding to txpool. @@ -273,9 +278,30 @@ where Ok(()) } + /// Check txpool for existence of tx by kernel hash. + pub fn tx_by_kernel_hash_exists(&self, hash: Hash) -> bool { + self.txpool.tx_by_kernel_hash_exists(hash) + } + /// Retrieve individual transaction for the given kernel hash. - pub fn retrieve_tx_by_kernel_hash(&self, hash: Hash) -> Option { - self.txpool.retrieve_tx_by_kernel_hash(hash) + pub fn retrieve_tx_by_kernel_hash(&self, hash: Hash) -> Result, PoolError> { + self.txpool + .retrieve_tx_by_kernel_hash(hash) + .map(|tx| self.txpool.convert_tx_to_v2(tx, None)) + .transpose() + } + + pub fn retrieve_stem_tx_by_kernel_hash( + &self, + hash: Hash, + ) -> Result, PoolError> { + self.stempool + .retrieve_tx_by_kernel_hash(hash) + .map(|tx| { + let txpool_tx = self.txpool.all_transactions_aggregate()?; + self.stempool.convert_tx_to_v2(tx, txpool_tx) + }) + .transpose() } /// Retrieve all transactions matching the provided "compact block" diff --git a/pool/src/types.rs b/pool/src/types.rs index 75f7825943..dc370a8c39 100644 --- a/pool/src/types.rs +++ b/pool/src/types.rs @@ -20,12 +20,13 @@ use chrono::prelude::{DateTime, Utc}; use self::core::consensus; use self::core::core::block; use self::core::core::committed; -use self::core::core::hash::Hash; +use self::core::core::hash::{Hash, Hashed}; use self::core::core::transaction::{self, Transaction}; -use self::core::core::{BlockHeader, BlockSums}; +use self::core::core::{BlockHeader, BlockSums, OutputIdentifier}; use failure::Fail; use grin_core as core; use grin_keychain as keychain; +use grin_util::secp::pedersen::Commitment; /// Dandelion "epoch" length. const DANDELION_EPOCH_SECS: u16 = 600; @@ -159,6 +160,12 @@ pub struct PoolEntry { pub tx: Transaction, } +impl PoolEntry { + pub fn kernel_hash(&self) -> Hash { + self.tx.kernels()[0].hash() + } +} + /// Used to make decisions based on transaction acceptance priority from /// various sources. For example, a node may want to bypass pool size /// restrictions when accepting a transaction from a local wallet. @@ -278,6 +285,7 @@ pub trait BlockChain: Sync + Send { fn get_block_header(&self, hash: &Hash) -> Result; fn get_block_sums(&self, hash: &Hash) -> Result; + fn get_unspent(&self, commitment: Commitment) -> Result; } /// Bridge between the transaction pool and the rest of the system. Handles diff --git a/servers/src/common/adapters.rs b/servers/src/common/adapters.rs index 4c1806fd2d..307c47a327 100644 --- a/servers/src/common/adapters.rs +++ b/servers/src/common/adapters.rs @@ -30,12 +30,13 @@ use crate::common::types::{ChainValidationMode, DandelionEpoch, ServerConfig}; use crate::core::core::hash::{Hash, Hashed}; use crate::core::core::transaction::Transaction; use crate::core::core::verifier_cache::VerifierCache; -use crate::core::core::{BlockHeader, BlockSums, CompactBlock}; +use crate::core::core::{BlockHeader, BlockSums, CompactBlock, OutputIdentifier}; use crate::core::pow::Difficulty; use crate::core::{core, global}; use crate::p2p; use crate::p2p::types::PeerInfo; use crate::pool::{self, BlockChain, PoolAdapter}; +use crate::util::secp::pedersen::Commitment; use crate::util::OneTime; use chrono::prelude::*; use chrono::Duration; @@ -73,8 +74,19 @@ where Ok(self.chain().head()?.height) } + // Called when a peer requests a transaction. fn get_transaction(&self, kernel_hash: Hash) -> Option { - self.tx_pool.read().retrieve_tx_by_kernel_hash(kernel_hash) + self.tx_pool + .read() + .retrieve_tx_by_kernel_hash(kernel_hash) + .unwrap_or(None) + } + + fn get_stem_transaction(&self, kernel_hash: Hash) -> Option { + self.tx_pool + .read() + .retrieve_stem_tx_by_kernel_hash(kernel_hash) + .unwrap_or(None) } fn tx_kernel_received( @@ -86,10 +98,7 @@ where if self.sync_state.is_syncing() { return Ok(true); } - - let tx = self.tx_pool.read().retrieve_tx_by_kernel_hash(kernel_hash); - - if tx.is_none() { + if !self.tx_pool.read().tx_by_kernel_hash_exists(kernel_hash) { self.request_transaction(kernel_hash, peer_info); } Ok(true) @@ -113,13 +122,11 @@ where hook.on_transaction_received(&tx); } - let tx_hash = tx.hash(); - let mut tx_pool = self.tx_pool.write(); - match tx_pool.add_to_pool(source, tx, stem, &header) { + match tx_pool.add_to_pool(source, tx.clone(), stem, &header) { Ok(_) => Ok(true), Err(e) => { - debug!("Transaction {} rejected: {:?}", tx_hash, e); + debug!("Transaction {} rejected: {:?}", tx.hash(), e); Ok(false) } } @@ -827,7 +834,7 @@ impl DandelionAdapter for PoolToNetAdapter { impl pool::PoolAdapter for PoolToNetAdapter { fn tx_accepted(&self, entry: &pool::PoolEntry) { - self.peers().broadcast_transaction(&entry.tx); + self.peers().broadcast_transaction(entry.kernel_hash()); } fn stem_tx_accepted(&self, entry: &pool::PoolEntry) -> Result<(), pool::PoolError> { @@ -841,7 +848,7 @@ impl pool::PoolAdapter for PoolToNetAdapter { // If node is configured to always stem our (pushed via api) txs then do so. if epoch.is_stem() || (entry.src.is_pushed() && epoch.always_stem_our_txs()) { if let Some(peer) = epoch.relay_peer(&self.peers()) { - match peer.send_stem_transaction(&entry.tx) { + match peer.send_stem_transaction(entry.kernel_hash()) { Ok(_) => { info!("Stemming this epoch, relaying to next peer."); Ok(()) @@ -932,6 +939,14 @@ impl pool::BlockChain for PoolToChainAdapter { .map_err(|_| pool::PoolError::Other("failed to get block_sums".to_string())) } + fn get_unspent(&self, commitment: Commitment) -> Result { + if let Ok(Some((output, _))) = self.chain().get_unspent_by_commitment(commitment) { + Ok(output) + } else { + Err(pool::PoolError::Other("failed to get unspent".to_string())) + } + } + fn validate_tx(&self, tx: &Transaction) -> Result<(), pool::PoolError> { self.chain() .validate_tx(tx) diff --git a/servers/src/mining/mine_block.rs b/servers/src/mining/mine_block.rs index 54dcb55c82..6a15297152 100644 --- a/servers/src/mining/mine_block.rs +++ b/servers/src/mining/mine_block.rs @@ -177,6 +177,10 @@ fn build_block( let (output, kernel, block_fees) = get_coinbase(wallet_listener_url, block_fees)?; let mut b = core::Block::from_reward(&head, &txs, output, kernel, difficulty.difficulty)?; + // block we just built should be fully cut-through but lets double check + // it is our mining reward to lose if this block is not valid + b.verify_cut_through()?; + // making sure we're not spending time mining a useless block b.validate(&head.total_kernel_offset, verifier_cache)?;