From 312d06a8f35fe952106d24c537646df350762e4f Mon Sep 17 00:00:00 2001 From: Mirko <48352201+Mirko-von-Leipzig@users.noreply.github.com> Date: Fri, 29 Nov 2024 16:22:17 +0200 Subject: [PATCH 1/2] feat(block-producer): improve mempool config (#543) --- .../block-producer/src/batch_builder/batch.rs | 46 ++----- .../src/block_builder/prover/block_witness.rs | 5 +- .../block-producer/src/domain/transaction.rs | 8 ++ crates/block-producer/src/errors.rs | 38 +----- crates/block-producer/src/lib.rs | 12 +- .../block-producer/src/mempool/batch_graph.rs | 34 ++--- crates/block-producer/src/mempool/mod.rs | 118 ++++++++++++++++-- .../src/mempool/transaction_graph.rs | 46 ++++--- crates/block-producer/src/server/mod.rs | 18 +-- 9 files changed, 203 insertions(+), 122 deletions(-) diff --git a/crates/block-producer/src/batch_builder/batch.rs b/crates/block-producer/src/batch_builder/batch.rs index be0c28cb8..225112d00 100644 --- a/crates/block-producer/src/batch_builder/batch.rs +++ b/crates/block-producer/src/batch_builder/batch.rs @@ -10,8 +10,7 @@ use miden_objects::{ crypto::hash::blake::{Blake3Digest, Blake3_256}, notes::{NoteHeader, NoteId, Nullifier}, transaction::{InputNoteCommitment, OutputNote, ProvenTransaction, TransactionId}, - AccountDeltaError, Digest, MAX_ACCOUNTS_PER_BATCH, MAX_INPUT_NOTES_PER_BATCH, - MAX_OUTPUT_NOTES_PER_BATCH, + AccountDeltaError, Digest, }; use tracing::instrument; @@ -76,13 +75,11 @@ impl TransactionBatch { /// for transforming unauthenticated notes into authenticated notes. /// /// # Errors + /// /// Returns an error if: - /// - The number of output notes across all transactions exceeds 4096. /// - There are duplicated output notes or unauthenticated notes found across all transactions /// in the batch. /// - Hashes for corresponding input notes and output notes don't match. - /// - /// TODO: enforce limit on the number of created nullifiers. #[instrument(target = "miden-block-producer", name = "new_batch", skip_all, err)] pub fn new( txs: Vec, @@ -101,11 +98,7 @@ impl TransactionBatch { vacant.insert(AccountUpdate::new(tx)); }, Entry::Occupied(occupied) => occupied.into_mut().merge_tx(tx).map_err(|error| { - BuildBatchError::AccountUpdateError { - account_id: tx.account_id(), - error, - txs: txs.clone(), - } + BuildBatchError::AccountUpdateError { account_id: tx.account_id(), error } })?, }; @@ -113,15 +106,11 @@ impl TransactionBatch { for note in tx.get_unauthenticated_notes() { let id = note.id(); if !unauthenticated_input_notes.insert(id) { - return Err(BuildBatchError::DuplicateUnauthenticatedNote(id, txs.clone())); + return Err(BuildBatchError::DuplicateUnauthenticatedNote(id)); } } } - if updated_accounts.len() > MAX_ACCOUNTS_PER_BATCH { - return Err(BuildBatchError::TooManyAccountsInBatch(txs)); - } - // Populate batch produced nullifiers and match output notes with corresponding // unauthenticated input notes in the same batch, which are removed from the unauthenticated // input notes set. @@ -136,7 +125,7 @@ impl TransactionBatch { // Header is presented only for unauthenticated input notes. let input_note = match input_note.header() { Some(input_note_header) => { - if output_notes.remove_note(input_note_header, &txs)? { + if output_notes.remove_note(input_note_header)? { continue; } @@ -154,16 +143,8 @@ impl TransactionBatch { input_notes.push(input_note) } - if input_notes.len() > MAX_INPUT_NOTES_PER_BATCH { - return Err(BuildBatchError::TooManyInputNotes(input_notes.len(), txs)); - } - let output_notes = output_notes.into_notes(); - if output_notes.len() > MAX_OUTPUT_NOTES_PER_BATCH { - return Err(BuildBatchError::TooManyNotesCreated(output_notes.len(), txs)); - } - // Build the output notes SMT. let output_notes_smt = BatchNoteTree::with_contiguous_leaves( output_notes.iter().map(|note| (note.id(), note.metadata())), @@ -249,7 +230,7 @@ impl OutputNoteTracker { for tx in txs { for note in tx.output_notes().iter() { if output_note_index.insert(note.id(), output_notes.len()).is_some() { - return Err(BuildBatchError::DuplicateOutputNote(note.id(), txs.to_vec())); + return Err(BuildBatchError::DuplicateOutputNote(note.id())); } output_notes.push(Some(note.clone())); } @@ -258,11 +239,7 @@ impl OutputNoteTracker { Ok(Self { output_notes, output_note_index }) } - pub fn remove_note( - &mut self, - input_note_header: &NoteHeader, - txs: &[ProvenTransaction], - ) -> Result { + pub fn remove_note(&mut self, input_note_header: &NoteHeader) -> Result { let id = input_note_header.id(); if let Some(note_index) = self.output_note_index.remove(&id) { if let Some(output_note) = mem::take(&mut self.output_notes[note_index]) { @@ -273,7 +250,6 @@ impl OutputNoteTracker { id, input_hash, output_hash, - txs: txs.to_vec(), }); } @@ -322,7 +298,7 @@ mod tests { )); match OutputNoteTracker::new(&txs) { - Err(BuildBatchError::DuplicateOutputNote(note_id, _)) => { + Err(BuildBatchError::DuplicateOutputNote(note_id)) => { assert_eq!(note_id, duplicate_output_note.id()) }, res => panic!("Unexpected result: {res:?}"), @@ -336,8 +312,8 @@ mod tests { let note_to_remove = mock_note(4); - assert!(tracker.remove_note(note_to_remove.header(), &txs).unwrap()); - assert!(!tracker.remove_note(note_to_remove.header(), &txs).unwrap()); + assert!(tracker.remove_note(note_to_remove.header()).unwrap()); + assert!(!tracker.remove_note(note_to_remove.header()).unwrap()); // Check that output notes are in the expected order and consumed note was removed assert_eq!( @@ -358,7 +334,7 @@ mod tests { let duplicate_note = mock_note(5); txs.push(mock_proven_tx(4, vec![duplicate_note.clone()], vec![mock_output_note(9)])); match TransactionBatch::new(txs, Default::default()) { - Err(BuildBatchError::DuplicateUnauthenticatedNote(note_id, _)) => { + Err(BuildBatchError::DuplicateUnauthenticatedNote(note_id)) => { assert_eq!(note_id, duplicate_note.id()) }, res => panic!("Unexpected result: {res:?}"), diff --git a/crates/block-producer/src/block_builder/prover/block_witness.rs b/crates/block-producer/src/block_builder/prover/block_witness.rs index aad05ce51..af837339c 100644 --- a/crates/block-producer/src/block_builder/prover/block_witness.rs +++ b/crates/block-producer/src/block_builder/prover/block_witness.rs @@ -7,7 +7,7 @@ use miden_objects::{ notes::Nullifier, transaction::TransactionId, vm::{AdviceInputs, StackInputs}, - BlockHeader, Digest, Felt, BLOCK_NOTE_TREE_DEPTH, MAX_BATCHES_PER_BLOCK, ZERO, + BlockHeader, Digest, Felt, BLOCK_NOTE_TREE_DEPTH, ZERO, }; use crate::{ @@ -35,9 +35,6 @@ impl BlockWitness { mut block_inputs: BlockInputs, batches: &[TransactionBatch], ) -> Result<(Self, Vec), BuildBlockError> { - if batches.len() > MAX_BATCHES_PER_BLOCK { - return Err(BuildBlockError::TooManyBatchesInBlock(batches.len())); - } Self::validate_nullifiers(&block_inputs, batches)?; let batch_created_notes_roots = batches diff --git a/crates/block-producer/src/domain/transaction.rs b/crates/block-producer/src/domain/transaction.rs index e040fe52b..738fccf36 100644 --- a/crates/block-producer/src/domain/transaction.rs +++ b/crates/block-producer/src/domain/transaction.rs @@ -99,6 +99,14 @@ impl AuthenticatedTransaction { self.inner.output_notes().iter().map(|note| note.id()) } + pub fn output_note_count(&self) -> usize { + self.inner.output_notes().num_notes() + } + + pub fn input_note_count(&self) -> usize { + self.inner.input_notes().num_notes() + } + /// Notes which were unauthenticate in the transaction __and__ which were /// not authenticated by the store inputs. pub fn unauthenticated_notes(&self) -> impl Iterator + '_ { diff --git a/crates/block-producer/src/errors.rs b/crates/block-producer/src/errors.rs index 493116f4c..46a50be6e 100644 --- a/crates/block-producer/src/errors.rs +++ b/crates/block-producer/src/errors.rs @@ -4,9 +4,8 @@ use miden_objects::{ accounts::AccountId, crypto::merkle::{MerkleError, MmrError}, notes::{NoteId, Nullifier}, - transaction::{ProvenTransaction, TransactionId}, - AccountDeltaError, Digest, TransactionInputError, MAX_ACCOUNTS_PER_BATCH, - MAX_BATCHES_PER_BLOCK, MAX_INPUT_NOTES_PER_BATCH, MAX_OUTPUT_NOTES_PER_BATCH, + transaction::TransactionId, + AccountDeltaError, Digest, TransactionInputError, }; use miden_processor::ExecutionError; use thiserror::Error; @@ -94,51 +93,26 @@ impl From for tonic::Status { // Batch building errors // ================================================================================================= -/// Error that may happen while building a transaction batch. -/// -/// These errors are returned from the batch builder to the transaction queue, instead of -/// dropping the transactions, they are included into the error values, so that the transaction -/// queue can re-queue them. +/// Error encountered while building a batch. #[derive(Debug, PartialEq, Eq, Error)] pub enum BuildBatchError { - #[error( - "Too many input notes in the batch. Got: {0}, limit: {}", - MAX_INPUT_NOTES_PER_BATCH - )] - TooManyInputNotes(usize, Vec), - - #[error( - "Too many notes created in the batch. Got: {0}, limit: {}", - MAX_OUTPUT_NOTES_PER_BATCH - )] - TooManyNotesCreated(usize, Vec), - - #[error( - "Too many account updates in the batch. Got: {}, limit: {}", - .0.len(), - MAX_ACCOUNTS_PER_BATCH - )] - TooManyAccountsInBatch(Vec), - #[error("Duplicated unauthenticated transaction input note ID in the batch: {0}")] - DuplicateUnauthenticatedNote(NoteId, Vec), + DuplicateUnauthenticatedNote(NoteId), #[error("Duplicated transaction output note ID in the batch: {0}")] - DuplicateOutputNote(NoteId, Vec), + DuplicateOutputNote(NoteId), #[error("Note hashes mismatch for note {id}: (input: {input_hash}, output: {output_hash})")] NoteHashesMismatch { id: NoteId, input_hash: Digest, output_hash: Digest, - txs: Vec, }, #[error("Failed to merge transaction delta into account {account_id}: {error}")] AccountUpdateError { account_id: AccountId, error: AccountDeltaError, - txs: Vec, }, #[error("Nothing actually went wrong, failure was injected on purpose")] @@ -202,8 +176,6 @@ pub enum BuildBlockError { InconsistentNullifiers(Vec), #[error("unauthenticated transaction notes not found in the store or in outputs of other transactions in the block: {0:?}")] UnauthenticatedNotesNotFound(Vec), - #[error("too many batches in block. Got: {0}, max: {MAX_BATCHES_PER_BLOCK}")] - TooManyBatchesInBlock(usize), #[error("failed to merge transaction delta into account {account_id}: {error}")] AccountUpdateError { account_id: AccountId, diff --git a/crates/block-producer/src/lib.rs b/crates/block-producer/src/lib.rs index 961e1b5e7..cf82a3570 100644 --- a/crates/block-producer/src/lib.rs +++ b/crates/block-producer/src/lib.rs @@ -21,7 +21,7 @@ pub mod server; pub const COMPONENT: &str = "miden-block-producer"; /// The number of transactions per batch -const SERVER_BATCH_SIZE: usize = 2; +const SERVER_MAX_TXS_PER_BATCH: usize = 2; /// The frequency at which blocks are produced const SERVER_BLOCK_FREQUENCY: Duration = Duration::from_secs(10); @@ -37,3 +37,13 @@ const SERVER_MAX_BATCHES_PER_BLOCK: usize = 4; /// This determines the grace period incoming transactions have between fetching their input from /// the store and verification in the mempool. const SERVER_MEMPOOL_STATE_RETENTION: usize = 5; + +const _: () = assert!( + SERVER_MAX_BATCHES_PER_BLOCK <= miden_objects::MAX_BATCHES_PER_BLOCK, + "Server constraint cannot exceed the protocol's constraint" +); + +const _: () = assert!( + SERVER_MAX_TXS_PER_BATCH <= miden_objects::MAX_ACCOUNTS_PER_BATCH, + "Server constraint cannot exceed the protocol's constraint" +); diff --git a/crates/block-producer/src/mempool/batch_graph.rs b/crates/block-producer/src/mempool/batch_graph.rs index b83af277d..1eebd71e8 100644 --- a/crates/block-producer/src/mempool/batch_graph.rs +++ b/crates/block-producer/src/mempool/batch_graph.rs @@ -4,7 +4,7 @@ use miden_objects::transaction::TransactionId; use super::{ dependency_graph::{DependencyGraph, GraphError}, - BatchJobId, + BatchJobId, BlockBudget, BudgetStatus, }; use crate::batch_builder::batch::TransactionBatch; @@ -143,7 +143,9 @@ impl BatchGraph { // dependency graph, and therefore must all be in the batches mapping. let batches = batch_ids .into_iter() - .map(|batch_id| (batch_id, self.batches.remove(&batch_id).unwrap())) + .map(|batch_id| { + (batch_id, self.batches.remove(&batch_id).expect("batch should be removed")) + }) .collect::>(); for tx in batches.values().flatten() { @@ -203,24 +205,28 @@ impl BatchGraph { self.inner.promote_pending(id, batch) } - /// Returns at most `count` batches which are ready for inclusion in a block. - pub fn select_block(&mut self, count: usize) -> BTreeMap { + /// Selects the next set of batches ready for inclusion in a block while adhering to the given + /// budget. + pub fn select_block( + &mut self, + mut budget: BlockBudget, + ) -> BTreeMap { let mut batches = BTreeMap::new(); - for _ in 0..count { - // This strategy just selects arbitrary roots for now. This is valid but not very - // interesting or efficient. - let Some(batch_id) = self.inner.roots().first().copied() else { + while let Some(batch_id) = self.inner.roots().first().copied() { + // SAFETY: Since it was a root batch, it must definitely have a processed batch + // associated with it. + let batch = self.inner.get(&batch_id).expect("root should be in graph").clone(); + + // Adhere to block's budget. + if budget.check_then_subtract(&batch) == BudgetStatus::Exceeded { break; - }; + } // SAFETY: This is definitely a root since we just selected it from the set of roots. - self.inner.process_root(batch_id).unwrap(); - // SAFETY: Since it was a root batch, it must definitely have a processed batch - // associated with it. - let batch = self.inner.get(&batch_id).unwrap(); + self.inner.process_root(batch_id).expect("root should be processed"); - batches.insert(batch_id, batch.clone()); + batches.insert(batch_id, batch); } batches diff --git a/crates/block-producer/src/mempool/mod.rs b/crates/block-producer/src/mempool/mod.rs index e793ef1df..5a59f2aca 100644 --- a/crates/block-producer/src/mempool/mod.rs +++ b/crates/block-producer/src/mempool/mod.rs @@ -6,12 +6,15 @@ use std::{ use batch_graph::BatchGraph; use inflight_state::InflightState; +use miden_objects::{ + MAX_ACCOUNTS_PER_BATCH, MAX_INPUT_NOTES_PER_BATCH, MAX_OUTPUT_NOTES_PER_BATCH, +}; use tokio::sync::Mutex; use transaction_graph::TransactionGraph; use crate::{ batch_builder::batch::TransactionBatch, domain::transaction::AuthenticatedTransaction, - errors::AddTransactionError, + errors::AddTransactionError, SERVER_MAX_BATCHES_PER_BLOCK, SERVER_MAX_TXS_PER_BATCH, }; mod batch_graph; @@ -73,6 +76,102 @@ impl BlockNumber { } } +// MEMPOOL BUDGET +// ================================================================================================ + +/// Limits placed on a batch's contents. +#[derive(Copy, Clone, Debug, PartialEq, Eq)] +pub struct BatchBudget { + /// Maximum number of transactions allowed in a batch. + transactions: usize, + /// Maximum number of input notes allowed. + input_notes: usize, + /// Maximum number of output notes allowed. + output_notes: usize, + /// Maximum number of updated accounts. + accounts: usize, +} + +/// Limits placed on a blocks's contents. +#[derive(Copy, Clone, Debug, PartialEq, Eq)] +pub struct BlockBudget { + /// Maximum number of batches allowed in a block. + batches: usize, +} + +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +enum BudgetStatus { + /// The operation remained within the budget. + WithinScope, + /// The operation exceeded the budget. + Exceeded, +} + +impl Default for BatchBudget { + fn default() -> Self { + Self { + transactions: SERVER_MAX_TXS_PER_BATCH, + input_notes: MAX_INPUT_NOTES_PER_BATCH, + output_notes: MAX_OUTPUT_NOTES_PER_BATCH, + accounts: MAX_ACCOUNTS_PER_BATCH, + } + } +} + +impl Default for BlockBudget { + fn default() -> Self { + Self { batches: SERVER_MAX_BATCHES_PER_BLOCK } + } +} + +impl BatchBudget { + /// Attempts to consume the transaction's resources from the budget. + /// + /// Returns [BudgetStatus::Exceeded] if the transaction would exceed the remaining budget, + /// otherwise returns [BudgetStatus::Ok] and subtracts the resources from the budger. + #[must_use] + fn check_then_subtract(&mut self, tx: &AuthenticatedTransaction) -> BudgetStatus { + // This type assertion reminds us to update the account check if we ever support multiple + // account updates per tx. + let _: miden_objects::accounts::AccountId = tx.account_update().account_id(); + const ACCOUNT_UPDATES_PER_TX: usize = 1; + + let output_notes = tx.output_note_count(); + let input_notes = tx.input_note_count(); + + if self.transactions == 0 + || self.accounts < ACCOUNT_UPDATES_PER_TX + || self.input_notes < input_notes + || self.output_notes < output_notes + { + return BudgetStatus::Exceeded; + } + + self.transactions -= 1; + self.accounts -= ACCOUNT_UPDATES_PER_TX; + self.input_notes -= input_notes; + self.output_notes -= output_notes; + + BudgetStatus::WithinScope + } +} + +impl BlockBudget { + /// Attempts to consume the batch's resources from the budget. + /// + /// Returns [BudgetStatus::Exceeded] if the batch would exceed the remaining budget, + /// otherwise returns [BudgetStatus::Ok]. + #[must_use] + fn check_then_subtract(&mut self, _batch: &TransactionBatch) -> BudgetStatus { + if self.batches == 0 { + BudgetStatus::Exceeded + } else { + self.batches -= 1; + BudgetStatus::WithinScope + } + } +} + // MEMPOOL // ================================================================================================ @@ -96,24 +195,25 @@ pub struct Mempool { /// The current block height of the chain. chain_tip: BlockNumber, + /// The current inflight block, if any. block_in_progress: Option>, - batch_transaction_limit: usize, - block_batch_limit: usize, + batch_budget: BatchBudget, + block_budget: BlockBudget, } impl Mempool { /// Creates a new [Mempool] with the provided configuration. pub fn new( chain_tip: BlockNumber, - batch_limit: usize, - block_limit: usize, + batch_budget: BatchBudget, + block_budget: BlockBudget, state_retention: usize, ) -> SharedMempool { Arc::new(Mutex::new(Self { chain_tip, - batch_transaction_limit: batch_limit, - block_batch_limit: block_limit, + batch_budget, + block_budget, state: InflightState::new(chain_tip, state_retention), block_in_progress: Default::default(), transactions: Default::default(), @@ -149,7 +249,7 @@ impl Mempool { /// /// Returns `None` if no transactions are available. pub fn select_batch(&mut self) -> Option<(BatchJobId, Vec)> { - let (batch, parents) = self.transactions.select_batch(self.batch_transaction_limit); + let (batch, parents) = self.transactions.select_batch(self.batch_budget); if batch.is_empty() { return None; } @@ -199,7 +299,7 @@ impl Mempool { pub fn select_block(&mut self) -> (BlockNumber, BTreeMap) { assert!(self.block_in_progress.is_none(), "Cannot have two blocks inflight."); - let batches = self.batches.select_block(self.block_batch_limit); + let batches = self.batches.select_block(self.block_budget); self.block_in_progress = Some(batches.keys().cloned().collect()); (self.chain_tip.next(), batches) diff --git a/crates/block-producer/src/mempool/transaction_graph.rs b/crates/block-producer/src/mempool/transaction_graph.rs index 089566269..abefdbce2 100644 --- a/crates/block-producer/src/mempool/transaction_graph.rs +++ b/crates/block-producer/src/mempool/transaction_graph.rs @@ -2,7 +2,10 @@ use std::collections::BTreeSet; use miden_objects::transaction::TransactionId; -use super::dependency_graph::{DependencyGraph, GraphError}; +use super::{ + dependency_graph::{DependencyGraph, GraphError}, + BatchBudget, BudgetStatus, +}; use crate::domain::transaction::AuthenticatedTransaction; // TRANSACTION GRAPH @@ -60,7 +63,9 @@ impl TransactionGraph { self.inner.promote_pending(transaction.id(), transaction) } - /// Selects a set of up-to count transactions for the next batch, as well as their parents. + /// Selects a set transactions for the next batch such that they adhere to the given budget. + /// + /// Also returns the transactions' parents. /// /// Internally these transactions are considered processed and cannot be emitted in future /// batches. @@ -72,26 +77,28 @@ impl TransactionGraph { /// - [Self::prune_committed] pub fn select_batch( &mut self, - count: usize, + mut budget: BatchBudget, ) -> (Vec, BTreeSet) { // This strategy just selects arbitrary roots for now. This is valid but not very // interesting or efficient. - let mut batch = Vec::with_capacity(count); + let mut batch = Vec::with_capacity(budget.transactions); let mut parents = BTreeSet::new(); - for _ in 0..count { - let Some(root) = self.inner.roots().first().cloned() else { + while let Some(root) = self.inner.roots().first().cloned() { + // SAFETY: Since it was a root batch, it must definitely have a processed batch + // associated with it. + let tx = self.inner.get(&root).unwrap().clone(); + + // Adhere to batch budget. + if budget.check_then_subtract(&tx) == BudgetStatus::Exceeded { break; - }; + } // SAFETY: This is definitely a root since we just selected it from the set of roots. self.inner.process_root(root).unwrap(); - // SAFETY: Since it was a root batch, it must definitely have a processed batch - // associated with it. - let tx = self.inner.get(&root).unwrap(); let tx_parents = self.inner.parents(&root).unwrap(); - batch.push(tx.clone()); + batch.push(tx); parents.extend(tx_parents); } @@ -151,7 +158,7 @@ mod tests { // ================================================================================================ #[test] - fn select_batch_respects_limit() { + fn select_batch_respects_transaction_limit() { // These transactions are independent and just used to ensure we have more available // transactions than we want in the batch. let txs = (0..10) @@ -163,25 +170,30 @@ mod tests { uut.insert(tx, [].into()).unwrap(); } - let (batch, parents) = uut.select_batch(0); + let (batch, parents) = + uut.select_batch(BatchBudget { transactions: 0, ..Default::default() }); assert!(batch.is_empty()); assert!(parents.is_empty()); - let (batch, parents) = uut.select_batch(3); + let (batch, parents) = + uut.select_batch(BatchBudget { transactions: 3, ..Default::default() }); assert_eq!(batch.len(), 3); assert!(parents.is_empty()); - let (batch, parents) = uut.select_batch(4); + let (batch, parents) = + uut.select_batch(BatchBudget { transactions: 4, ..Default::default() }); assert_eq!(batch.len(), 4); assert!(parents.is_empty()); // We expect this to be partially filled. - let (batch, parents) = uut.select_batch(4); + let (batch, parents) = + uut.select_batch(BatchBudget { transactions: 4, ..Default::default() }); assert_eq!(batch.len(), 3); assert!(parents.is_empty()); // And thereafter empty. - let (batch, parents) = uut.select_batch(100); + let (batch, parents) = + uut.select_batch(BatchBudget { transactions: 100, ..Default::default() }); assert!(batch.is_empty()); assert!(parents.is_empty()); } diff --git a/crates/block-producer/src/server/mod.rs b/crates/block-producer/src/server/mod.rs index e1f3bf0b7..ba9abaea5 100644 --- a/crates/block-producer/src/server/mod.rs +++ b/crates/block-producer/src/server/mod.rs @@ -20,9 +20,9 @@ use crate::{ config::BlockProducerConfig, domain::transaction::AuthenticatedTransaction, errors::{AddTransactionError, VerifyTxError}, - mempool::{BlockNumber, Mempool, SharedMempool}, + mempool::{BatchBudget, BlockBudget, BlockNumber, Mempool, SharedMempool}, store::{DefaultStore, Store}, - COMPONENT, SERVER_BATCH_SIZE, SERVER_MAX_BATCHES_PER_BLOCK, SERVER_MEMPOOL_STATE_RETENTION, + COMPONENT, SERVER_MEMPOOL_STATE_RETENTION, }; /// Represents an initialized block-producer component where the RPC connection is open, @@ -34,8 +34,8 @@ use crate::{ pub struct BlockProducer { batch_builder: BatchBuilder, block_builder: BlockBuilder, - batch_limit: usize, - block_limit: usize, + batch_budget: BatchBudget, + block_budget: BlockBudget, state_retention: usize, rpc_listener: TcpListener, store: DefaultStore, @@ -76,8 +76,8 @@ impl BlockProducer { Ok(Self { batch_builder: Default::default(), block_builder: BlockBuilder::new(store.clone()), - batch_limit: SERVER_BATCH_SIZE, - block_limit: SERVER_MAX_BATCHES_PER_BLOCK, + batch_budget: Default::default(), + block_budget: Default::default(), state_retention: SERVER_MEMPOOL_STATE_RETENTION, store, rpc_listener, @@ -89,15 +89,15 @@ impl BlockProducer { let Self { batch_builder, block_builder, - batch_limit, - block_limit, + batch_budget, + block_budget, state_retention, rpc_listener, store, chain_tip, } = self; - let mempool = Mempool::new(chain_tip, batch_limit, block_limit, state_retention); + let mempool = Mempool::new(chain_tip, batch_budget, block_budget, state_retention); // Spawn rpc server and batch and block provers. // From 39828bff73772907d2c1b6adf9c269cc05303e9b Mon Sep 17 00:00:00 2001 From: Mirko <48352201+Mirko-von-Leipzig@users.noreply.github.com> Date: Fri, 29 Nov 2024 16:52:10 +0200 Subject: [PATCH 2/2] fix(mempool): allow internal batch dependencies (#549) Notably also merges `next` in. --- CHANGELOG.md | 16 +- Cargo.lock | 146 ++++++++--------- Cargo.toml | 12 +- bin/node/Cargo.toml | 2 +- bin/node/src/commands/start.rs | 43 +++-- crates/block-producer/Cargo.toml | 2 +- .../block-producer/src/batch_builder/mod.rs | 2 +- .../block-producer/src/block_builder/mod.rs | 2 +- crates/block-producer/src/errors.rs | 15 ++ .../block-producer/src/mempool/batch_graph.rs | 35 ++++- .../src/mempool/dependency_graph.rs | 2 +- .../mempool/inflight_state/account_state.rs | 8 +- .../src/mempool/inflight_state/mod.rs | 2 +- crates/block-producer/src/mempool/mod.rs | 18 ++- .../src/mempool/transaction_graph.rs | 10 +- crates/block-producer/src/server/mod.rs | 44 +++--- crates/block-producer/src/store/mod.rs | 6 - crates/rpc/src/server/api.rs | 3 - crates/store/src/db/mod.rs | 4 +- crates/store/src/db/sql.rs | 4 +- crates/store/src/db/tests.rs | 50 +++--- crates/store/src/errors.rs | 147 +++++++++++------- crates/store/src/server/api.rs | 6 +- crates/store/src/state.rs | 47 +++--- 24 files changed, 363 insertions(+), 263 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 7257a5da3..ed4c12a1b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,22 +1,28 @@ # Changelog -## v0.6.0 (TBD) +## v0.6.0 (2024-11-05) +### Enhancements + +- Added `GetAccountProofs` endpoint (#506). + +### Changes + +- [BREAKING] Added `kernel_root` to block header's protobuf message definitions (#496). +- [BREAKING] Renamed `off-chain` and `on-chain` to `private` and `public` respectively for the account storage modes (#489). - Optimized state synchronizations by removing unnecessary fetching and parsing of note details (#462). - [BREAKING] Changed `GetAccountDetailsResponse` field to `details` (#481). - Improve `--version` by adding build metadata (#495). -- [BREAKING] Added `kernel_root` to block header's protobuf message definitions (#496). -- [BREAKING] Renamed `off-chain` and `on-chain` to `private` and `public` respectively for the account storage modes (#489). - [BREAKING] Introduced additional limits for note/account number (#503). - [BREAKING] Removed support for basic wallets in genesis creation (#510). -- Added `GetAccountProofs` endpoint (#506). - Migrated faucet from actix-web to axum (#511). - Changed the `BlockWitness` to pass the inputs to the VM using only advice provider (#516). -- [BREAKING] Changed faucet storage type in the genesis to public. Using faucet from the genesis for faucet web app. Added support for faucet restarting without blockchain restarting (#517). - [BREAKING] Improved store API errors (return "not found" instead of "internal error" status if requested account(s) not found) (#518). - Added `AccountCode` as part of `GetAccountProofs` endpoint response (#521). - [BREAKING] Migrated to v0.11 version of Miden VM (#528). - Reduce cloning in the store's `apply_block` (#532). +- [BREAKING] Changed faucet storage type in the genesis to public. Using faucet from the genesis for faucet web app. Added support for faucet restarting without blockchain restarting (#517). +- [BREAKING] Improved `ApplyBlockError` in the store (#535). ## 0.5.1 (2024-09-12) diff --git a/Cargo.lock b/Cargo.lock index 4de7bc004..e5468123c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -55,9 +55,9 @@ dependencies = [ [[package]] name = "anstream" -version = "0.6.17" +version = "0.6.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "23a1e53f0f5d86382dafe1cf314783b2044280f406e7e1506368220ad11b1338" +checksum = "8acc5369981196006228e28809f761875c0327210a891e941f4c683b3a99529b" dependencies = [ "anstyle", "anstyle-parse", @@ -395,9 +395,9 @@ dependencies = [ [[package]] name = "cc" -version = "1.1.34" +version = "1.1.35" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "67b9470d453346108f93a59222a9a1a5724db32d0a4727b7ab7ace4b4d822dc9" +checksum = "0f57c4b4da2a9d619dd035f27316d7a426305b75be93d09e92f2b9229c34feaf" dependencies = [ "jobserver", "libc", @@ -966,9 +966,9 @@ dependencies = [ [[package]] name = "hashbrown" -version = "0.15.0" +version = "0.15.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e087f84d4f86bf4b218b927129862374b72199ae7d8657835f1e89000eea4fb" +checksum = "3a9bfc1af68b1726ea47d3d5109de126281def866b33970e10fbab11b5dafab3" [[package]] name = "hashlink" @@ -1066,9 +1066,9 @@ dependencies = [ [[package]] name = "hyper-timeout" -version = "0.5.1" +version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3203a961e5c83b6f5498933e78b6b263e208c197b63e9c6c53cc82ffd3f63793" +checksum = "2b90d566bffbce6a75bd8b09a05aa8c2cb1fabb6cb348f8840c9e4c90a0d83b0" dependencies = [ "hyper", "hyper-util", @@ -1148,7 +1148,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "707907fe3c25f5424cce2cb7e1cbcafee6bdbe735ca90ef77c29e84591e5b9da" dependencies = [ "equivalent", - "hashbrown 0.15.0", + "hashbrown 0.15.1", ] [[package]] @@ -1403,8 +1403,9 @@ checksum = "78ca9ab1a0babb1e7d5695e3530886289c18cf2f87ec19a575a0abdce112e3a3" [[package]] name = "miden-air" -version = "0.10.5" -source = "git+https://github.com/0xPolygonMiden/miden-vm.git?branch=next#f1c0553859c42c56f28d1d6aaf66d3e2f9907bd1" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ea00ea451d1547978817a37c1904ff34f4a24de2c23f9b77b282a0bc570ac3f1" dependencies = [ "miden-core", "miden-thiserror", @@ -1414,8 +1415,9 @@ dependencies = [ [[package]] name = "miden-assembly" -version = "0.10.5" -source = "git+https://github.com/0xPolygonMiden/miden-vm.git?branch=next#f1c0553859c42c56f28d1d6aaf66d3e2f9907bd1" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "58925d5ab3d625b8e828267a64dd555b1fe37cd5a1d89d09224920d3255de5a9" dependencies = [ "aho-corasick", "lalrpop", @@ -1426,13 +1428,14 @@ dependencies = [ "rustc_version 0.4.1", "smallvec", "tracing", - "unicode-width", + "unicode-width 0.2.0", ] [[package]] name = "miden-core" -version = "0.10.5" -source = "git+https://github.com/0xPolygonMiden/miden-vm.git?branch=next#f1c0553859c42c56f28d1d6aaf66d3e2f9907bd1" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6ba2f0ffd0362c91c0eedba391a3c17a8047389e15020ec95b0d7e840375bf03" dependencies = [ "lock_api", "loom", @@ -1450,9 +1453,9 @@ dependencies = [ [[package]] name = "miden-crypto" -version = "0.11.0" +version = "0.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e0ca714c8242f329b9ea6f1a5bf0e93f1490f348f982e3a606d91b884254308" +checksum = "f50a68deed96cde1f51eb623f75828e320f699e0d798f11592f8958ba8b512c3" dependencies = [ "blake3", "cc", @@ -1502,13 +1505,14 @@ version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7e392e0a8c34b32671012b439de35fa8987bf14f0f8aac279b97f8b8cc6e263b" dependencies = [ - "unicode-width", + "unicode-width 0.1.14", ] [[package]] name = "miden-lib" version = "0.6.0" -source = "git+https://github.com/0xPolygonMiden/miden-base.git?branch=next#f621c437bd5eb67d7b4a82d98a3eec8d238bb6cb" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "816404c10b0799f12d3b53b3a9baa9af99fa340fe1a579e0919bba57718fa97a" dependencies = [ "miden-assembly", "miden-objects", @@ -1544,7 +1548,7 @@ dependencies = [ "terminal_size", "textwrap", "trybuild", - "unicode-width", + "unicode-width 0.1.14", ] [[package]] @@ -1705,7 +1709,8 @@ dependencies = [ [[package]] name = "miden-objects" version = "0.6.0" -source = "git+https://github.com/0xPolygonMiden/miden-base.git?branch=next#f621c437bd5eb67d7b4a82d98a3eec8d238bb6cb" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8a6f54dde939928e438488b36651485a0e80057025b7e30343d4340a524a1651" dependencies = [ "getrandom", "miden-assembly", @@ -1719,8 +1724,9 @@ dependencies = [ [[package]] name = "miden-processor" -version = "0.10.6" -source = "git+https://github.com/0xPolygonMiden/miden-vm.git?branch=next#f1c0553859c42c56f28d1d6aaf66d3e2f9907bd1" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3e1a756be679bfe2bdb209b9f4f12b4b59e8cac7c9f884ad90535b32bbd75923" dependencies = [ "miden-air", "miden-core", @@ -1730,12 +1736,14 @@ dependencies = [ [[package]] name = "miden-prover" -version = "0.10.5" -source = "git+https://github.com/0xPolygonMiden/miden-vm.git?branch=next#f1c0553859c42c56f28d1d6aaf66d3e2f9907bd1" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5b6c2fc7ed9831493e1c18fb9d15a4b780ec624e9192f494ad3900c19bfe5b17" dependencies = [ "miden-air", "miden-processor", "tracing", + "winter-maybe-async", "winter-prover", ] @@ -1745,8 +1753,9 @@ version = "0.6.0" [[package]] name = "miden-stdlib" -version = "0.10.5" -source = "git+https://github.com/0xPolygonMiden/miden-vm.git?branch=next#f1c0553859c42c56f28d1d6aaf66d3e2f9907bd1" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "09948fda011d044273d1bbc56669da410ee3167f96bf1ad9885c946cfbed5757" dependencies = [ "miden-assembly", ] @@ -1774,7 +1783,8 @@ dependencies = [ [[package]] name = "miden-tx" version = "0.6.0" -source = "git+https://github.com/0xPolygonMiden/miden-base.git?branch=next#f621c437bd5eb67d7b4a82d98a3eec8d238bb6cb" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8d433744a3f02233ec53b151bfe88b7a008b4eae5dfd41bb38a9889eb67efaf7" dependencies = [ "async-trait", "miden-assembly", @@ -1787,13 +1797,14 @@ dependencies = [ "rand_chacha", "regex", "walkdir", - "winter-maybe-async 0.10.1", + "winter-maybe-async", ] [[package]] name = "miden-verifier" -version = "0.10.5" -source = "git+https://github.com/0xPolygonMiden/miden-vm.git?branch=next#f1c0553859c42c56f28d1d6aaf66d3e2f9907bd1" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "636b2eab9dc0a9fdd6d30d9ece3cf276b012e9ecbccd934d4c0befa07c84cfd0" dependencies = [ "miden-air", "miden-core", @@ -1818,7 +1829,7 @@ dependencies = [ "terminal_size", "textwrap", "thiserror", - "unicode-width", + "unicode-width 0.1.14", ] [[package]] @@ -2534,9 +2545,9 @@ dependencies = [ [[package]] name = "rustix" -version = "0.38.38" +version = "0.38.39" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aa260229e6538e52293eeb577aabd09945a09d6d9cc0fc550ed7529056c2e32a" +checksum = "375116bee2be9ed569afe2154ea6a99dfdffd257f533f187498c2a8f5feaf4ee" dependencies = [ "bitflags", "errno", @@ -2885,23 +2896,23 @@ checksum = "23d434d3f8967a09480fb04132ebe0a3e088c173e6d0ee7897abbdf4eab0f8b9" dependencies = [ "smawk", "unicode-linebreak", - "unicode-width", + "unicode-width 0.1.14", ] [[package]] name = "thiserror" -version = "1.0.66" +version = "1.0.68" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5d171f59dbaa811dbbb1aee1e73db92ec2b122911a48e1390dfe327a821ddede" +checksum = "02dd99dc800bbb97186339685293e1cc5d9df1f8fae2d0aecd9ff1c77efea892" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.66" +version = "1.0.68" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b08be0f17bd307950653ce45db00cd31200d82b624b36e181337d9c7d92765b5" +checksum = "a7c61ec9a6f64d2793d8a45faba21efbe3ced62a886d44c36a009b2b519b4c7e" dependencies = [ "proc-macro2", "quote", @@ -3344,6 +3355,12 @@ version = "0.1.14" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7dd6e30e90baa6f72411720665d41d89b9a3d039dc45b8faea1ddd07f617f6af" +[[package]] +name = "unicode-width" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1fc81956842c57dac11422a97c3b8195a1ff727f06e85c84ed2e8aa277c9a0fd" + [[package]] name = "unicode-xid" version = "0.2.6" @@ -3786,9 +3803,9 @@ dependencies = [ [[package]] name = "winter-air" -version = "0.9.0" +version = "0.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b72f12b88ebb060b52c0e9aece9bb64a9fc38daf7ba689dd5ce63271b456c883" +checksum = "29bec0b06b741543f43e3a6677b95b200d4cad2daab76e6721e14345345bfd0e" dependencies = [ "libm", "winter-crypto", @@ -3799,9 +3816,9 @@ dependencies = [ [[package]] name = "winter-crypto" -version = "0.9.0" +version = "0.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "00fbb724d2d9fbfd3aa16ea27f5e461d4fe1d74b0c9e0ed1bf79e9e2a955f4d5" +checksum = "163da45f1d4d65cac361b8df4835a6daa95b3399154e16eb0305c178c6f6c1f4" dependencies = [ "blake3", "sha3", @@ -3811,9 +3828,9 @@ dependencies = [ [[package]] name = "winter-fri" -version = "0.9.0" +version = "0.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3ab6077cf4c23c0411f591f4ba29378e27f26acb8cef3c51cadd93daaf6080b3" +checksum = "3b7b394670d68979a4cc21a37a95ef8ef350cf84be9256c53effe3052df50d26" dependencies = [ "winter-crypto", "winter-math", @@ -3822,24 +3839,13 @@ dependencies = [ [[package]] name = "winter-math" -version = "0.9.3" +version = "0.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5b0e685b3b872d82e58a86519294a814b7bc7a4d3cd2c93570a7d80c0c5a1aba" +checksum = "5a8ba832121679e79b004b0003018c85873956d742a39c348c247f680fe15e00" dependencies = [ "winter-utils", ] -[[package]] -name = "winter-maybe-async" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ce0f4161cdde50de809b3869c1cb083a09e92e949428ea28f04c0d64045875c" -dependencies = [ - "proc-macro2", - "quote", - "syn", -] - [[package]] name = "winter-maybe-async" version = "0.10.1" @@ -3852,24 +3858,24 @@ dependencies = [ [[package]] name = "winter-prover" -version = "0.9.0" +version = "0.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f17e3dbae97050f58e01ed4f12906e247841575a0518632e052941a1c37468df" +checksum = "2f55f0153d26691caaf969066a13a824bcf3c98719d71b0f569bf8dc40a06fb9" dependencies = [ "tracing", "winter-air", "winter-crypto", "winter-fri", "winter-math", - "winter-maybe-async 0.9.0", + "winter-maybe-async", "winter-utils", ] [[package]] name = "winter-rand-utils" -version = "0.9.0" +version = "0.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f2b827c901ab0c316d89812858ff451d60855c0a5c7ae734b098c62a28624181" +checksum = "4a7616d11fcc26552dada45c803a884ac97c253218835b83a2c63e1c2a988639" dependencies = [ "rand", "winter-utils", @@ -3877,18 +3883,18 @@ dependencies = [ [[package]] name = "winter-utils" -version = "0.9.3" +version = "0.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "961e81e9388877a25db1c034ba38253de2055f569633ae6a665d857a0556391b" +checksum = "76b116c8ade0172506f8bda32dc674cf6b230adc8516e5138a0173ae69158a4f" dependencies = [ "rayon", ] [[package]] name = "winter-verifier" -version = "0.9.0" +version = "0.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "324002ade90f21e85599d51a232a80781efc8cb46f511f8bc89f9c5a4eb9cb65" +checksum = "2ae1648768f96f5e6321a48a5bff5cc3101d2e51b23a6a095c6c9c9e133ecb61" dependencies = [ "winter-air", "winter-crypto", @@ -3899,9 +3905,9 @@ dependencies = [ [[package]] name = "winterfell" -version = "0.9.0" +version = "0.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "01151ac5fe2d783950743e8a110e0a2f26994f888b4cbe848699142cb3ea1e5b" +checksum = "39c8336dc6a035698780b8cc624f875e479bd6bf6e1846670f3ef4485c125882" dependencies = [ "winter-air", "winter-prover", diff --git a/Cargo.toml b/Cargo.toml index 9e0d53b99..ce5eb685e 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -25,8 +25,8 @@ exclude = [".github/"] readme = "README.md" [workspace.dependencies] -miden-air = { git = "https://github.com/0xPolygonMiden/miden-vm.git", branch = "next" } -miden-lib = { git = "https://github.com/0xPolygonMiden/miden-base.git", branch = "next" } +miden-air = { version = "0.11" } +miden-lib = { version = "0.6" } miden-node-block-producer = { path = "crates/block-producer", version = "0.6" } miden-node-faucet = { path = "bin/faucet", version = "0.6" } miden-node-proto = { path = "crates/proto", version = "0.6" } @@ -35,10 +35,10 @@ miden-node-rpc-proto = { path = "crates/rpc-proto", version = "0.6" } miden-node-store = { path = "crates/store", version = "0.6" } miden-node-test-macro = { path = "crates/test-macro" } miden-node-utils = { path = "crates/utils", version = "0.6" } -miden-objects = { git = "https://github.com/0xPolygonMiden/miden-base.git", branch = "next" } -miden-processor = { git = "https://github.com/0xPolygonMiden/miden-vm.git", branch = "next" } -miden-stdlib = { git = "https://github.com/0xPolygonMiden/miden-vm.git", branch = "next", default-features = false } -miden-tx = { git = "https://github.com/0xPolygonMiden/miden-base.git", branch = "next" } +miden-objects = { version = "0.6"} +miden-processor = { version = "0.11" } +miden-stdlib = { version = "0.11", default-features = false } +miden-tx = { version = "0.6"} prost = { version = "0.13" } rand = { version = "0.8" } thiserror = { version = "1.0" } diff --git a/bin/node/Cargo.toml b/bin/node/Cargo.toml index b855cbe64..d2d9b5d91 100644 --- a/bin/node/Cargo.toml +++ b/bin/node/Cargo.toml @@ -14,7 +14,7 @@ repository.workspace = true [features] # Makes `make-genesis` subcommand run faster. Is only suitable for testing. # INFO: Make sure that all your components have matching features for them to function. -testing = ["miden-lib/testing"] +testing = ["miden-lib/testing", "miden-objects/testing"] tracing-forest = ["miden-node-block-producer/tracing-forest"] [dependencies] diff --git a/bin/node/src/commands/start.rs b/bin/node/src/commands/start.rs index acbd356ac..f1e46fbf9 100644 --- a/bin/node/src/commands/start.rs +++ b/bin/node/src/commands/start.rs @@ -1,3 +1,5 @@ +use std::collections::HashMap; + use anyhow::{Context, Result}; use miden_node_block_producer::server::BlockProducer; use miden_node_rpc::server::Rpc; @@ -16,22 +18,41 @@ pub async fn start_node(config: NodeConfig) -> Result<()> { // Start store. The store endpoint is available after loading completes. let store = Store::init(store).await.context("Loading store")?; - join_set.spawn(async move { store.serve().await.context("Serving store") }); + let store_id = join_set.spawn(async move { store.serve().await.context("Serving store") }).id(); // Start block-producer. The block-producer's endpoint is available after loading completes. let block_producer = BlockProducer::init(block_producer).await.context("Loading block-producer")?; - join_set.spawn(async move { block_producer.serve().await.context("Serving block-producer") }); + let block_producer_id = join_set + .spawn(async move { block_producer.serve().await.context("Serving block-producer") }) + .id(); // Start RPC component. let rpc = Rpc::init(rpc).await.context("Loading RPC")?; - join_set.spawn(async move { rpc.serve().await.context("Serving RPC") }); - - // block on all tasks - while let Some(res) = join_set.join_next().await { - // For now, if one of the components fails, crash the node - res??; - } - - Ok(()) + let rpc_id = join_set.spawn(async move { rpc.serve().await.context("Serving RPC") }).id(); + + // Lookup table so we can identify the failed component. + let component_ids = HashMap::from([ + (store_id, "store"), + (block_producer_id, "block-producer"), + (rpc_id, "rpc"), + ]); + + // SAFETY: The joinset is definitely not empty. + let component_result = join_set.join_next_with_id().await.unwrap(); + + // We expect components to run indefinitely, so we treat any return as fatal. + // + // Map all outcomes to an error, and provide component context. + let (id, err) = match component_result { + Ok((id, Ok(_))) => (id, Err(anyhow::anyhow!("Component completed unexpectedly"))), + Ok((id, Err(err))) => (id, Err(err)), + Err(join_err) => (join_err.id(), Err(join_err).context("Joining component task")), + }; + let component = component_ids.get(&id).unwrap_or(&"unknown"); + + // We could abort and gracefully shutdown the other components, but since we're crashing the + // node there is no point. + + err.context(format!("Component {component} failed")) } diff --git a/crates/block-producer/Cargo.toml b/crates/block-producer/Cargo.toml index ec4eec1b1..f6d987b2e 100644 --- a/crates/block-producer/Cargo.toml +++ b/crates/block-producer/Cargo.toml @@ -44,4 +44,4 @@ miden-objects = { workspace = true, features = ["testing"] } miden-tx = { workspace = true, features = ["testing"] } rand_chacha = { version = "0.3", default-features = false } tokio = { workspace = true, features = ["test-util"] } -winterfell = { version = "0.9" } +winterfell = { version = "0.10" } diff --git a/crates/block-producer/src/batch_builder/mod.rs b/crates/block-producer/src/batch_builder/mod.rs index 830c68c3b..30070b58e 100644 --- a/crates/block-producer/src/batch_builder/mod.rs +++ b/crates/block-producer/src/batch_builder/mod.rs @@ -48,7 +48,7 @@ impl Default for BatchBuilder { } impl BatchBuilder { - /// Starts the [BatchProducer], creating and proving batches at the configured interval. + /// Starts the [BatchBuilder], creating and proving batches at the configured interval. /// /// A pool of batch-proving workers is spawned, which are fed new batch jobs periodically. /// A batch is skipped if there are no available workers, or if there are no transactions diff --git a/crates/block-producer/src/block_builder/mod.rs b/crates/block-producer/src/block_builder/mod.rs index f67501cbe..a8c017c7f 100644 --- a/crates/block-producer/src/block_builder/mod.rs +++ b/crates/block-producer/src/block_builder/mod.rs @@ -59,7 +59,7 @@ impl BlockBuilder { /// /// Block production is sequential and consists of /// - /// 1. Pulling the next set of batches from the [Mempool] + /// 1. Pulling the next set of batches from the mempool /// 2. Compiling these batches into the next block /// 3. Proving the block (this is simulated using random sleeps) /// 4. Committing the block to the store diff --git a/crates/block-producer/src/errors.rs b/crates/block-producer/src/errors.rs index 46a50be6e..9ff3a76bb 100644 --- a/crates/block-producer/src/errors.rs +++ b/crates/block-producer/src/errors.rs @@ -9,9 +9,24 @@ use miden_objects::{ }; use miden_processor::ExecutionError; use thiserror::Error; +use tokio::task::JoinError; use crate::mempool::BlockNumber; +// Block-producer errors +// ================================================================================================= + +#[derive(Debug, Error)] +pub enum BlockProducerError { + /// A block-producer task completed although it should have ran indefinitely. + #[error("task {task} completed unexpectedly")] + TaskFailedSuccesfully { task: &'static str }, + + /// A block-producer task panic'd. + #[error("error joining {task} task")] + JoinError { task: &'static str, source: JoinError }, +} + // Transaction verification errors // ================================================================================================= diff --git a/crates/block-producer/src/mempool/batch_graph.rs b/crates/block-producer/src/mempool/batch_graph.rs index 1eebd71e8..dea63d992 100644 --- a/crates/block-producer/src/mempool/batch_graph.rs +++ b/crates/block-producer/src/mempool/batch_graph.rs @@ -24,11 +24,11 @@ use crate::batch_builder::batch::TransactionBatch; /// Committed batches (i.e. included in blocks) may be [pruned](Self::prune_committed) from the /// graph to bound the graph's size. /// -/// Batches may also be outright [purged](Self::purge_subgraphs) from the graph. This is useful for +/// Batches may also be outright [purged](Self::remove_batches) from the graph. This is useful for /// batches which may have become invalid due to external considerations e.g. expired transactions. /// /// # Batch lifecycle -/// ``` +/// ```text /// │ /// insert│ /// ┌─────▼─────┐ @@ -50,7 +50,7 @@ use crate::batch_builder::batch::TransactionBatch; /// │ ◄────┘ /// └───────────┘ /// ``` -#[derive(Default, Clone)] +#[derive(Default, Debug, Clone)] pub struct BatchGraph { /// Tracks the interdependencies between batches. inner: DependencyGraph, @@ -81,6 +81,10 @@ pub enum BatchInsertError { impl BatchGraph { /// Inserts a new batch into the graph. /// + /// Parents are the transactions on which the given transactions have a direct dependency. This + /// includes transactions within the same batch i.e. a transaction and parent transaction may + /// both be in this batch. + /// /// # Errors /// /// Returns an error if: @@ -91,7 +95,7 @@ impl BatchGraph { &mut self, id: BatchJobId, transactions: Vec, - parents: BTreeSet, + mut parents: BTreeSet, ) -> Result<(), BatchInsertError> { let duplicates = transactions .iter() @@ -102,7 +106,11 @@ impl BatchGraph { return Err(BatchInsertError::DuplicateTransactions(duplicates)); } - // Reverse lookup parent transaction batches. + // Reverse lookup parent batch IDs. Take care to allow for parent transactions within this + // batch i.e. internal dependencies. + transactions.iter().for_each(|tx| { + parents.remove(tx); + }); let parent_batches = parents .into_iter() .map(|tx| { @@ -233,7 +241,7 @@ impl BatchGraph { } } -#[cfg(test)] +#[cfg(any(test, doctest))] mod tests { use super::*; use crate::test_utils::Random; @@ -284,13 +292,24 @@ mod tests { assert_eq!(err, expected); } + #[test] + fn insert_with_internal_parent_succeeds() { + // Ensure that a batch with internal dependencies can be inserted. + let mut rng = Random::with_random_seed(); + let parent = rng.draw_tx_id(); + let child = rng.draw_tx_id(); + + let mut uut = BatchGraph::default(); + uut.insert(BatchJobId::new(2), vec![parent, child], [parent].into()).unwrap(); + } + // PURGE_SUBGRAPHS TESTS // ================================================================================================ #[test] fn purge_subgraphs_returns_all_purged_transaction_sets() { - //! Ensure that purge_subgraphs returns both parent and child batches when the parent is - //! pruned. Further ensure that a disjoint batch is not pruned. + // Ensure that purge_subgraphs returns both parent and child batches when the parent is + // pruned. Further ensure that a disjoint batch is not pruned. let mut rng = Random::with_random_seed(); let parent_batch_txs = (0..5).map(|_| rng.draw_tx_id()).collect::>(); let child_batch_txs = (0..5).map(|_| rng.draw_tx_id()).collect::>(); diff --git a/crates/block-producer/src/mempool/dependency_graph.rs b/crates/block-producer/src/mempool/dependency_graph.rs index 37b247590..9814d0059 100644 --- a/crates/block-producer/src/mempool/dependency_graph.rs +++ b/crates/block-producer/src/mempool/dependency_graph.rs @@ -12,7 +12,7 @@ use std::{ /// Forms the basis of our transaction and batch dependency graphs. /// /// # Node lifecycle -/// ``` +/// ```text /// │ /// │ /// insert_pending│ diff --git a/crates/block-producer/src/mempool/inflight_state/account_state.rs b/crates/block-producer/src/mempool/inflight_state/account_state.rs index f929ab114..dc79a339a 100644 --- a/crates/block-producer/src/mempool/inflight_state/account_state.rs +++ b/crates/block-producer/src/mempool/inflight_state/account_state.rs @@ -115,15 +115,15 @@ impl InflightAccountState { } } -/// Describes the emptiness of an [AccountState]. +/// Describes the emptiness of an [InflightAccountState]. /// -/// Is marked as #[must_use] so that callers handle prune empty accounts. +/// Is marked as `#[must_use]` so that callers handle prune empty accounts. #[must_use] #[derive(Clone, Copy, PartialEq, Eq)] pub enum AccountStatus { - /// [AccountState] contains no state and should be pruned. + /// [InflightAccountState] contains no state and should be pruned. Empty, - /// [AccountState] contains state and should be kept. + /// [InflightAccountState] contains state and should be kept. NonEmpty, } diff --git a/crates/block-producer/src/mempool/inflight_state/mod.rs b/crates/block-producer/src/mempool/inflight_state/mod.rs index 233613c26..e26c1b73e 100644 --- a/crates/block-producer/src/mempool/inflight_state/mod.rs +++ b/crates/block-producer/src/mempool/inflight_state/mod.rs @@ -29,7 +29,7 @@ use super::BlockNumber; pub struct InflightState { /// Account states from inflight transactions. /// - /// Accounts which are [AccountStatus::Empty] are immediately pruned. + /// Accounts which are empty are immediately pruned. accounts: BTreeMap, /// Nullifiers produced by the input notes of inflight transactions. diff --git a/crates/block-producer/src/mempool/mod.rs b/crates/block-producer/src/mempool/mod.rs index 5a59f2aca..c56b72208 100644 --- a/crates/block-producer/src/mempool/mod.rs +++ b/crates/block-producer/src/mempool/mod.rs @@ -238,7 +238,9 @@ impl Mempool { // Add transaction to inflight state. let parents = self.state.add_transaction(&transaction)?; - self.transactions.insert(transaction, parents).expect("Malformed graph"); + self.transactions + .insert(transaction, parents) + .expect("Transaction should insert after passing inflight state"); Ok(self.chain_tip.0) } @@ -258,7 +260,9 @@ impl Mempool { let batch_id = self.next_batch_id; self.next_batch_id.increment(); - self.batches.insert(batch_id, tx_ids, parents).expect("Malformed graph"); + self.batches + .insert(batch_id, tx_ids, parents) + .expect("Selected batch should insert"); Some((batch_id, batch)) } @@ -279,14 +283,16 @@ impl Mempool { let batches = removed_batches.keys().copied().collect::>(); let transactions = removed_batches.into_values().flatten().collect(); - self.transactions.requeue_transactions(transactions).expect("Malformed graph"); + self.transactions + .requeue_transactions(transactions) + .expect("Transaction should requeue"); tracing::warn!(%batch, descendents=?batches, "Batch failed, dropping all inflight descendent batches, impacted transactions are back in queue."); } /// Marks a batch as proven if it exists. pub fn batch_proved(&mut self, batch_id: BatchJobId, batch: TransactionBatch) { - self.batches.submit_proof(batch_id, batch).expect("Malformed graph"); + self.batches.submit_proof(batch_id, batch).expect("Batch proof should submit"); } /// Select batches for the next block. @@ -338,13 +344,13 @@ impl Mempool { let batches = self.block_in_progress.take().expect("No block in progress to be failed"); // Remove all transactions from the graphs. - let purged = self.batches.remove_batches(batches).expect("Bad graph"); + let purged = self.batches.remove_batches(batches).expect("Batch should be removed"); let transactions = purged.into_values().flatten().collect(); let transactions = self .transactions .remove_transactions(transactions) - .expect("Transaction graph is malformed"); + .expect("Failed transactions should be removed"); // Rollback state. self.state.revert_transactions(transactions); diff --git a/crates/block-producer/src/mempool/transaction_graph.rs b/crates/block-producer/src/mempool/transaction_graph.rs index abefdbce2..ced50cdd1 100644 --- a/crates/block-producer/src/mempool/transaction_graph.rs +++ b/crates/block-producer/src/mempool/transaction_graph.rs @@ -19,14 +19,14 @@ use crate::domain::transaction::AuthenticatedTransaction; /// /// Transactions from failed batches may be [re-queued](Self::requeue_transactions) for batch /// selection. Successful batches will eventually form part of a committed block at which point the -/// transaction data may be safely [pruned](Self::prune_committed). +/// transaction data may be safely [pruned](Self::commit_transactions). /// /// Transactions may also be outright [purged](Self::remove_transactions) from the graph. This is /// useful for transactions which may have become invalid due to external considerations e.g. /// expired transactions. /// /// # Transaction lifecycle: -/// ``` +/// ```text /// │ /// insert│ /// ┌─────▼─────┐ @@ -53,7 +53,7 @@ impl TransactionGraph { /// /// # Errors /// - /// Follows the error conditions of [DependencyGraph::insert]. + /// Follows the error conditions of [DependencyGraph::insert_pending]. pub fn insert( &mut self, transaction: AuthenticatedTransaction, @@ -74,7 +74,7 @@ impl TransactionGraph { /// /// See also: /// - [Self::requeue_transactions] - /// - [Self::prune_committed] + /// - [Self::commit_transactions] pub fn select_batch( &mut self, mut budget: BatchBudget, @@ -109,7 +109,7 @@ impl TransactionGraph { /// /// # Errors /// - /// Follows the error conditions of [DependencyGraph::requeue]. + /// Follows the error conditions of [DependencyGraph::revert_subgraphs]. pub fn requeue_transactions( &mut self, transactions: BTreeSet, diff --git a/crates/block-producer/src/server/mod.rs b/crates/block-producer/src/server/mod.rs index ba9abaea5..dbd32ed30 100644 --- a/crates/block-producer/src/server/mod.rs +++ b/crates/block-producer/src/server/mod.rs @@ -1,4 +1,4 @@ -use std::net::ToSocketAddrs; +use std::{collections::HashMap, net::ToSocketAddrs}; use miden_node_proto::generated::{ block_producer::api_server, requests::SubmitProvenTransactionRequest, @@ -19,7 +19,7 @@ use crate::{ block_builder::BlockBuilder, config::BlockProducerConfig, domain::transaction::AuthenticatedTransaction, - errors::{AddTransactionError, VerifyTxError}, + errors::{AddTransactionError, BlockProducerError, VerifyTxError}, mempool::{BatchBudget, BlockBudget, BlockNumber, Mempool, SharedMempool}, store::{DefaultStore, Store}, COMPONENT, SERVER_MEMPOOL_STATE_RETENTION, @@ -49,7 +49,6 @@ impl BlockProducer { pub async fn init(config: BlockProducerConfig) -> Result { info!(target: COMPONENT, %config, "Initializing server"); - // TODO: Does this actually need an arc to be properly shared? let store = DefaultStore::new( store_client::ApiClient::connect(config.store_url.to_string()) .await @@ -85,7 +84,7 @@ impl BlockProducer { }) } - pub async fn serve(self) -> Result<(), ApiError> { + pub async fn serve(self) -> Result<(), BlockProducerError> { let Self { batch_builder, block_builder, @@ -125,38 +124,33 @@ impl BlockProducer { BlockProducerRpcServer::new(mempool, store) .serve(rpc_listener) .await - .expect("Really the rest should throw errors instead of panic'ing.") + .expect("block-producer failed") }) .id(); - // Wait for any task to end. They should run forever, so this is an unexpected result. + let task_ids = HashMap::from([ + (batch_builder_id, "batch-builder"), + (block_builder_id, "block-builder"), + (rpc_id, "rpc"), + ]); + // Wait for any task to end. They should run indefinitely, so this is an unexpected result. + // // SAFETY: The JoinSet is definitely not empty. let task_result = tasks.join_next_with_id().await.unwrap(); + let task_id = match &task_result { - Ok((id, _)) => *id, + Ok((id, ())) => *id, Err(err) => err.id(), }; + let task = task_ids.get(&task_id).unwrap_or(&"unknown"); - let task_name = match task_id { - id if id == batch_builder_id => "batch-builder", - id if id == block_builder_id => "block-builder", - id if id == rpc_id => "rpc", - _ => { - tracing::warn!("An unknown task ID was detected in the block-producer."); - "unknown" - }, - }; - - tracing::error!( - task = task_name, - result = ?task_result, - "Block-producer task ended unexpectedly, aborting" - ); - - tasks.abort_all(); + // We could abort the other tasks here, but not much point as we're probably crashing the + // node. - Ok(()) + task_result + .map_err(|source| BlockProducerError::JoinError { task, source }) + .map(|(_, ())| Err(BlockProducerError::TaskFailedSuccesfully { task }))? } } diff --git a/crates/block-producer/src/store/mod.rs b/crates/block-producer/src/store/mod.rs index 476c05a07..2b34d75d4 100644 --- a/crates/block-producer/src/store/mod.rs +++ b/crates/block-producer/src/store/mod.rs @@ -168,9 +168,6 @@ impl DefaultStore { } } -// FIXME: remove the allow when the upstream clippy issue is fixed: -// https://github.com/rust-lang/rust-clippy/issues/12281 -#[allow(clippy::blocks_in_conditions)] #[async_trait] impl ApplyBlock for DefaultStore { #[instrument(target = "miden-block-producer", skip_all, err)] @@ -188,9 +185,6 @@ impl ApplyBlock for DefaultStore { } } -// FIXME: remove the allow when the upstream clippy issue is fixed: -// https://github.com/rust-lang/rust-clippy/issues/12281 -#[allow(clippy::blocks_in_conditions)] #[async_trait] impl Store for DefaultStore { #[instrument(target = "miden-block-producer", skip_all, err)] diff --git a/crates/rpc/src/server/api.rs b/crates/rpc/src/server/api.rs index 5c25bd480..56f97c788 100644 --- a/crates/rpc/src/server/api.rs +++ b/crates/rpc/src/server/api.rs @@ -56,9 +56,6 @@ impl RpcApi { } } -// FIXME: remove the allow when the upstream clippy issue is fixed: -// https://github.com/rust-lang/rust-clippy/issues/12281 -#[allow(clippy::blocks_in_conditions)] #[tonic::async_trait] impl api_server::Api for RpcApi { #[instrument( diff --git a/crates/store/src/db/mod.rs b/crates/store/src/db/mod.rs index 6e04e67b1..45c462cf4 100644 --- a/crates/store/src/db/mod.rs +++ b/crates/store/src/db/mod.rs @@ -443,9 +443,7 @@ impl Db { )?; let _ = allow_acquire.send(()); - acquire_done - .blocking_recv() - .map_err(DatabaseError::ApplyBlockFailedClosedChannel)?; + acquire_done.blocking_recv()?; transaction.commit()?; diff --git a/crates/store/src/db/sql.rs b/crates/store/src/db/sql.rs index 9980f40dd..8320f0db4 100644 --- a/crates/store/src/db/sql.rs +++ b/crates/store/src/db/sql.rs @@ -254,7 +254,7 @@ pub fn upsert_accounts( debug_assert_eq!(account_id, u64::from(account.id())); if account.hash() != update.new_state_hash() { - return Err(DatabaseError::ApplyBlockFailedAccountHashesMismatch { + return Err(DatabaseError::AccountHashesMismatch { calculated: account.hash(), expected: update.new_state_hash(), }); @@ -1151,7 +1151,7 @@ fn apply_delta( let actual_hash = account.hash(); if &actual_hash != final_state_hash { - return Err(DatabaseError::ApplyBlockFailedAccountHashesMismatch { + return Err(DatabaseError::AccountHashesMismatch { calculated: actual_hash, expected: *final_state_hash, }); diff --git a/crates/store/src/db/tests.rs b/crates/store/src/db/tests.rs index a79166003..a63f982f4 100644 --- a/crates/store/src/db/tests.rs +++ b/crates/store/src/db/tests.rs @@ -8,8 +8,8 @@ use miden_objects::{ ACCOUNT_ID_REGULAR_ACCOUNT_UPDATABLE_CODE_OFF_CHAIN, }, delta::AccountUpdateDetails, - Account, AccountCode, AccountDelta, AccountId, AccountStorage, AccountStorageDelta, - AccountVaultDelta, StorageSlot, + Account, AccountCode, AccountComponent, AccountDelta, AccountId, AccountStorage, + AccountStorageDelta, AccountType, AccountVaultDelta, StorageSlot, }, assets::{Asset, AssetVault, FungibleAsset, NonFungibleAsset, NonFungibleAssetDetails}, block::{BlockAccountUpdate, BlockNoteIndex, BlockNoteTree}, @@ -334,14 +334,6 @@ fn test_sql_public_account_details() { let non_fungible_faucet_id = AccountId::try_from(ACCOUNT_ID_NON_FUNGIBLE_FAUCET_ON_CHAIN).unwrap(); - let mut storage = AccountStorage::new( - std::iter::repeat(StorageSlot::Value(Word::default())).take(6).collect(), - ) - .unwrap(); - storage.set_item(1, num_to_word(1)).unwrap(); - storage.set_item(3, num_to_word(3)).unwrap(); - storage.set_item(5, num_to_word(5)).unwrap(); - let nft1 = Asset::NonFungible( NonFungibleAsset::new( &NonFungibleAssetDetails::new(non_fungible_faucet_id, vec![1, 2, 3]).unwrap(), @@ -349,6 +341,8 @@ fn test_sql_public_account_details() { .unwrap(), ); + let (code, storage) = mock_account_code_and_storage(account_id.account_type()); + let mut account = Account::from_parts( account_id, AssetVault::new(&[ @@ -357,7 +351,7 @@ fn test_sql_public_account_details() { ]) .unwrap(), storage, - mock_account_code(), + code, ZERO, ); @@ -983,12 +977,30 @@ fn insert_transactions(conn: &mut Connection) -> usize { count } -pub fn mock_account_code() -> AccountCode { - let account_code = "\ - export.account_procedure_1 - push.1.2 - add - end - "; - AccountCode::compile(account_code, TransactionKernel::assembler(), false).unwrap() +fn mock_account_code_and_storage(account_type: AccountType) -> (AccountCode, AccountStorage) { + let component_code = "\ + export.account_procedure_1 + push.1.2 + add + end + "; + + let component_storage = vec![ + StorageSlot::Value(Word::default()), + StorageSlot::Value(num_to_word(1)), + StorageSlot::Value(Word::default()), + StorageSlot::Value(num_to_word(3)), + StorageSlot::Value(Word::default()), + StorageSlot::Value(num_to_word(5)), + ]; + + let component = AccountComponent::compile( + component_code, + TransactionKernel::testing_assembler(), + component_storage, + ) + .unwrap() + .with_supported_type(account_type); + + Account::initialize_from_components(account_type, &[component]).unwrap() } diff --git a/crates/store/src/errors.rs b/crates/store/src/errors.rs index 700c9d3c5..bb563c36a 100644 --- a/crates/store/src/errors.rs +++ b/crates/store/src/errors.rs @@ -37,50 +37,56 @@ pub enum NullifierTreeError { #[derive(Debug, Error)] pub enum DatabaseError { - #[error("Missing database connection: {0}")] - MissingDbConnection(#[from] PoolError), - #[error("SQLite error: {0}")] - SqliteError(#[from] rusqlite::Error), - #[error("SQLite error: {0}")] - FromSqlError(#[from] FromSqlError), - #[error("Hex parsing error: {0}")] - FromHexError(#[from] hex::FromHexError), - #[error("I/O error: {0}")] - IoError(#[from] io::Error), + // ERRORS WITH AUTOMATIC CONVERSIONS FROM NESTED ERROR TYPES + // --------------------------------------------------------------------------------------------- #[error("Account error: {0}")] AccountError(#[from] AccountError), - #[error("Block error: {0}")] - BlockError(#[from] BlockError), - #[error("Note error: {0}")] - NoteError(#[from] NoteError), - #[error("Migration error: {0}")] - MigrationError(#[from] rusqlite_migration::Error), #[error("Account delta error: {0}")] AccountDeltaError(#[from] AccountDeltaError), - #[error("SQLite pool interaction task failed: {0}")] - InteractError(String), + #[error("Block error: {0}")] + BlockError(#[from] BlockError), + #[error("Closed channel: {0}")] + ClosedChannel(#[from] RecvError), #[error("Deserialization of BLOB data from database failed: {0}")] DeserializationError(DeserializationError), - #[error("Invalid Felt: {0}")] - InvalidFelt(String), - #[error("Block applying was broken because of closed channel on state side: {0}")] - ApplyBlockFailedClosedChannel(RecvError), + #[error("Hex parsing error: {0}")] + FromHexError(#[from] hex::FromHexError), + #[error("SQLite error: {0}")] + FromSqlError(#[from] FromSqlError), + #[error("I/O error: {0}")] + IoError(#[from] io::Error), + #[error("Migration error: {0}")] + MigrationError(#[from] rusqlite_migration::Error), + #[error("Missing database connection: {0}")] + MissingDbConnection(#[from] PoolError), + #[error("Note error: {0}")] + NoteError(#[from] NoteError), + #[error("SQLite error: {0}")] + SqliteError(#[from] rusqlite::Error), + + // OTHER ERRORS + // --------------------------------------------------------------------------------------------- + #[error("Account hashes mismatch (expected {expected}, but calculated is {calculated})")] + AccountHashesMismatch { + expected: RpoDigest, + calculated: RpoDigest, + }, #[error("Account {0} not found in the database")] AccountNotFoundInDb(AccountId), #[error("Accounts {0:?} not found in the database")] AccountsNotFoundInDb(Vec), #[error("Account {0} is not on the chain")] AccountNotOnChain(AccountId), - #[error("Failed to apply block because of public account final hashes mismatch (expected {expected}, \ - but calculated is {calculated}")] - ApplyBlockFailedAccountHashesMismatch { - expected: RpoDigest, - calculated: RpoDigest, - }, #[error("Block {0} not found in the database")] BlockNotFoundInDb(BlockNumber), - #[error("Unsupported database version. There is no migration chain from/to this version. Remove database files \ - and try again.")] + #[error("SQLite pool interaction task failed: {0}")] + InteractError(String), + #[error("Invalid Felt: {0}")] + InvalidFelt(String), + #[error( + "Unsupported database version. There is no migration chain from/to this version. \ + Remove all database files and try again." + )] UnsupportedDatabaseVersion, } @@ -132,12 +138,17 @@ pub enum DatabaseSetupError { #[derive(Debug, Error)] pub enum GenesisError { + // ERRORS WITH AUTOMATIC CONVERSIONS FROM NESTED ERROR TYPES + // --------------------------------------------------------------------------------------------- #[error("Database error: {0}")] DatabaseError(#[from] DatabaseError), #[error("Block error: {0}")] BlockError(#[from] BlockError), #[error("Merkle error: {0}")] MerkleError(#[from] MerkleError), + + // OTHER ERRORS + // --------------------------------------------------------------------------------------------- #[error("Apply block failed: {0}")] ApplyBlockFailed(String), #[error("Failed to read genesis file \"{genesis_filepath}\": {error}")] @@ -145,58 +156,74 @@ pub enum GenesisError { genesis_filepath: String, error: io::Error, }, - #[error("Failed to deserialize genesis file: {0}")] - GenesisFileDeserializationError(DeserializationError), #[error("Block header in store doesn't match block header in genesis file. Expected {expected_genesis_header:?}, but store contained {block_header_in_store:?}")] GenesisBlockHeaderMismatch { expected_genesis_header: Box, block_header_in_store: Box, }, + #[error("Failed to deserialize genesis file: {0}")] + GenesisFileDeserializationError(DeserializationError), #[error("Retrieving genesis block header failed: {0}")] SelectBlockHeaderByBlockNumError(Box), } // ENDPOINT ERRORS // ================================================================================================= +#[derive(Error, Debug)] +pub enum InvalidBlockError { + #[error("Duplicated nullifiers {0:?}")] + DuplicatedNullifiers(Vec), + #[error("Invalid output note type: {0:?}")] + InvalidOutputNoteType(Box), + #[error("Invalid tx hash: expected {expected}, but got {actual}")] + InvalidTxHash { expected: RpoDigest, actual: RpoDigest }, + #[error("Received invalid account tree root")] + NewBlockInvalidAccountRoot, + #[error("New block number must be 1 greater than the current block number")] + NewBlockInvalidBlockNum, + #[error("New block chain root is not consistent with chain MMR")] + NewBlockInvalidChainRoot, + #[error("Received invalid note root")] + NewBlockInvalidNoteRoot, + #[error("Received invalid nullifier root")] + NewBlockInvalidNullifierRoot, + #[error("New block `prev_hash` must match the chain's tip")] + NewBlockInvalidPrevHash, +} #[derive(Error, Debug)] pub enum ApplyBlockError { + // ERRORS WITH AUTOMATIC CONVERSIONS FROM NESTED ERROR TYPES + // --------------------------------------------------------------------------------------------- #[error("Database error: {0}")] DatabaseError(#[from] DatabaseError), #[error("I/O error: {0}")] IoError(#[from] io::Error), #[error("Task join error: {0}")] TokioJoinError(#[from] tokio::task::JoinError), + #[error("Invalid block error: {0}")] + InvalidBlockError(#[from] InvalidBlockError), + + // OTHER ERRORS + // --------------------------------------------------------------------------------------------- + #[error("Block applying was cancelled because of closed channel on database side: {0}")] + ClosedChannel(RecvError), #[error("Concurrent write detected")] ConcurrentWrite, - #[error("New block number must be 1 greater than the current block number")] - NewBlockInvalidBlockNum, - #[error("New block `prev_hash` must match the chain's tip")] - NewBlockInvalidPrevHash, - #[error("New block chain root is not consistent with chain MMR")] - NewBlockInvalidChainRoot, - #[error("Received invalid account tree root")] - NewBlockInvalidAccountRoot, - #[error("Received invalid note root")] - NewBlockInvalidNoteRoot, - #[error("Received invalid nullifier root")] - NewBlockInvalidNullifierRoot, - #[error("Duplicated nullifiers {0:?}")] - DuplicatedNullifiers(Vec), - #[error("Block applying was broken because of closed channel on database side: {0}")] - BlockApplyingBrokenBecauseOfClosedChannel(RecvError), - #[error("Failed to create notes tree: {0}")] - FailedToCreateNoteTree(MerkleError), #[error("Database doesn't have any block header data")] DbBlockHeaderEmpty, - #[error("Failed to get MMR peaks for forest ({forest}): {error}")] - FailedToGetMmrPeaksForForest { forest: usize, error: MmrError }, - #[error("Failed to update nullifier tree: {0}")] - FailedToUpdateNullifierTree(NullifierTreeError), - #[error("Invalid output note type: {0:?}")] - InvalidOutputNoteType(OutputNote), - #[error("Invalid tx hash: expected {expected}, but got {actual}")] - InvalidTxHash { expected: RpoDigest, actual: RpoDigest }, + #[error("Database update task failed: {0}")] + DbUpdateTaskFailed(String), +} + +impl From for Status { + fn from(err: ApplyBlockError) -> Self { + match err { + ApplyBlockError::InvalidBlockError(_) => Status::invalid_argument(err.to_string()), + + _ => Status::internal(err.to_string()), + } + } } #[derive(Error, Debug)] @@ -209,10 +236,10 @@ pub enum GetBlockHeaderError { #[derive(Error, Debug)] pub enum GetBlockInputsError { - #[error("Database error: {0}")] - DatabaseError(#[from] DatabaseError), #[error("Account error: {0}")] AccountError(#[from] AccountError), + #[error("Database error: {0}")] + DatabaseError(#[from] DatabaseError), #[error("Database doesn't have any block header data")] DbBlockHeaderEmpty, #[error("Failed to get MMR peaks for forest ({forest}): {error}")] diff --git a/crates/store/src/server/api.rs b/crates/store/src/server/api.rs index 51cb21ef4..3b6db4bd1 100644 --- a/crates/store/src/server/api.rs +++ b/crates/store/src/server/api.rs @@ -50,9 +50,6 @@ pub struct StoreApi { pub(super) state: Arc, } -// FIXME: remove the allow when the upstream clippy issue is fixed: -// https://github.com/rust-lang/rust-clippy/issues/12281 -#[allow(clippy::blocks_in_conditions)] #[tonic::async_trait] impl api_server::Api for StoreApi { // CLIENT ENDPOINTS @@ -372,8 +369,7 @@ impl api_server::Api for StoreApi { nullifier_count = block.nullifiers().len(), ); - // TODO: Why the error is swallowed here? Fix or add a comment with explanation. - let _ = self.state.apply_block(block).await; + self.state.apply_block(block).await?; Ok(Response::new(ApplyBlockResponse {})) } diff --git a/crates/store/src/state.rs b/crates/store/src/state.rs index 3ff4c4197..597219dac 100644 --- a/crates/store/src/state.rs +++ b/crates/store/src/state.rs @@ -41,13 +41,13 @@ use crate::{ db::{Db, NoteRecord, NoteSyncUpdate, NullifierInfo, StateSyncUpdate}, errors::{ ApplyBlockError, DatabaseError, GetBlockHeaderError, GetBlockInputsError, - GetNoteInclusionProofError, NoteSyncError, StateInitializationError, StateSyncError, + GetNoteInclusionProofError, InvalidBlockError, NoteSyncError, StateInitializationError, + StateSyncError, }, nullifier_tree::NullifierTree, types::{AccountId, BlockNumber}, COMPONENT, }; - // STRUCTURES // ================================================================================================ @@ -99,7 +99,9 @@ struct InnerState { impl InnerState { /// Returns the latest block number. fn latest_block_num(&self) -> BlockNumber { - (self.chain_mmr.forest() + 1).try_into().expect("block number overflow") + (self.chain_mmr.forest() - 1) + .try_into() + .expect("chain_mmr always has, at least, the genesis block") } } @@ -173,10 +175,11 @@ impl State { let tx_hash = block.compute_tx_hash(); if header.tx_hash() != tx_hash { - return Err(ApplyBlockError::InvalidTxHash { + return Err(InvalidBlockError::InvalidTxHash { expected: tx_hash, actual: header.tx_hash(), - }); + } + .into()); } let block_num = header.block_num(); @@ -190,10 +193,10 @@ impl State { .ok_or(ApplyBlockError::DbBlockHeaderEmpty)?; if block_num != prev_block.block_num() + 1 { - return Err(ApplyBlockError::NewBlockInvalidBlockNum); + return Err(InvalidBlockError::NewBlockInvalidBlockNum.into()); } if header.prev_hash() != prev_block.hash() { - return Err(ApplyBlockError::NewBlockInvalidPrevHash); + return Err(InvalidBlockError::NewBlockInvalidPrevHash.into()); } let block_data = block.to_bytes(); @@ -227,7 +230,7 @@ impl State { .cloned() .collect(); if !duplicate_nullifiers.is_empty() { - return Err(ApplyBlockError::DuplicatedNullifiers(duplicate_nullifiers)); + return Err(InvalidBlockError::DuplicatedNullifiers(duplicate_nullifiers).into()); } // compute updates for the in-memory data structures @@ -235,7 +238,7 @@ impl State { // new_block.chain_root must be equal to the chain MMR root prior to the update let peaks = inner.chain_mmr.peaks(); if peaks.hash_peaks() != header.chain_root() { - return Err(ApplyBlockError::NewBlockInvalidChainRoot); + return Err(InvalidBlockError::NewBlockInvalidChainRoot.into()); } // compute update for nullifier tree @@ -244,7 +247,7 @@ impl State { ); if nullifier_tree_update.root() != header.nullifier_root() { - return Err(ApplyBlockError::NewBlockInvalidNullifierRoot); + return Err(InvalidBlockError::NewBlockInvalidNullifierRoot.into()); } // compute update for account tree @@ -258,7 +261,7 @@ impl State { ); if account_tree_update.root() != header.account_root() { - return Err(ApplyBlockError::NewBlockInvalidAccountRoot); + return Err(InvalidBlockError::NewBlockInvalidAccountRoot.into()); } ( @@ -272,7 +275,7 @@ impl State { // build note tree let note_tree = block.build_note_tree(); if note_tree.root() != header.note_root() { - return Err(ApplyBlockError::NewBlockInvalidNoteRoot); + return Err(InvalidBlockError::NewBlockInvalidNoteRoot.into()); } let notes = block @@ -281,7 +284,11 @@ impl State { let details = match note { OutputNote::Full(note) => Some(note.to_bytes()), OutputNote::Header(_) => None, - note => return Err(ApplyBlockError::InvalidOutputNoteType(note.clone())), + note => { + return Err(InvalidBlockError::InvalidOutputNoteType(Box::new( + note.clone(), + ))) + }, }; let merkle_path = note_tree.get_note_path(note_index); @@ -295,7 +302,7 @@ impl State { merkle_path, }) }) - .collect::, ApplyBlockError>>()?; + .collect::, InvalidBlockError>>()?; // Signals the transaction is ready to be committed, and the write lock can be acquired let (allow_acquire, acquired_allowed) = oneshot::channel::<()>(); @@ -313,9 +320,7 @@ impl State { ); // Wait for the message from the DB update task, that we ready to commit the DB transaction - acquired_allowed - .await - .map_err(ApplyBlockError::BlockApplyingBrokenBecauseOfClosedChannel)?; + acquired_allowed.await.map_err(ApplyBlockError::ClosedChannel)?; // Awaiting the block saving task to complete without errors block_save_task.await??; @@ -339,13 +344,17 @@ impl State { // Notify the DB update task that the write lock has been acquired, so it can commit // the DB transaction - let _ = inform_acquire_done.send(()); + inform_acquire_done + .send(()) + .map_err(|_| ApplyBlockError::DbUpdateTaskFailed("Receiver was dropped".into()))?; // TODO: shutdown #91 // Await for successful commit of the DB transaction. If the commit fails, we mustn't // change in-memory state, so we return a block applying error and don't proceed with // in-memory updates. - db_update_task.await??; + db_update_task + .await? + .map_err(|err| ApplyBlockError::DbUpdateTaskFailed(err.to_string()))?; // Update the in-memory data structures after successful commit of the DB transaction inner