From 7045a4dcec9aebc860d0c0edda3b994074f03a0b Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Mon, 23 Oct 2023 23:37:19 -0400 Subject: [PATCH 001/122] chore: pull thru is_prepare_phase_start --- stackslib/src/burnchains/burnchain.rs | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/stackslib/src/burnchains/burnchain.rs b/stackslib/src/burnchains/burnchain.rs index c0ce9db054..77910f5a08 100644 --- a/stackslib/src/burnchains/burnchain.rs +++ b/stackslib/src/burnchains/burnchain.rs @@ -511,6 +511,12 @@ impl Burnchain { .is_reward_cycle_start(self.first_block_height, burn_height) } + /// Is this burnchain block height the start of the prepare phase? + pub fn is_prepare_phase_start(&self, burn_height: u64) -> bool { + self.pox_constants + .is_prepare_phase_start(self.first_block_height, burn_height) + } + pub fn reward_cycle_to_block_height(&self, reward_cycle: u64) -> u64 { self.pox_constants .reward_cycle_to_block_height(self.first_block_height, reward_cycle) From 8b4fc4363dd5c59242e050e813bb99a9bf9b5809 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Mon, 23 Oct 2023 23:37:44 -0400 Subject: [PATCH 002/122] feat: add is_prepare_phase_start to PoxConstants --- stackslib/src/burnchains/mod.rs | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/stackslib/src/burnchains/mod.rs b/stackslib/src/burnchains/mod.rs index c33ed1bb05..c332437c6c 100644 --- a/stackslib/src/burnchains/mod.rs +++ b/stackslib/src/burnchains/mod.rs @@ -479,6 +479,13 @@ impl PoxConstants { (effective_height % (self.reward_cycle_length as u64)) == 1 } + /// Is this burnchain block height the start of a prepare phase? + pub fn is_prepare_phase_start(&self, first_block_height: u64, burn_height: u64) -> bool { + let effective_height = burn_height - first_block_height; + (effective_height % u64::from(self.reward_cycle_length)) + == u64::from((self.reward_cycle_length - self.prepare_length) + 1) + } + pub fn reward_cycle_to_block_height(&self, first_block_height: u64, reward_cycle: u64) -> u64 { // NOTE: the `+ 1` is because the height of the first block of a reward cycle is mod 1, not // mod 0. From 64791752d268f55364554a1e9d5f3eac047d1ad2 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Mon, 23 Oct 2023 23:38:07 -0400 Subject: [PATCH 003/122] feat: track last processed reward cycle in sortition DB MARF --- stackslib/src/chainstate/burn/db/sortdb.rs | 122 ++++++++++++++------- 1 file changed, 82 insertions(+), 40 deletions(-) diff --git a/stackslib/src/chainstate/burn/db/sortdb.rs b/stackslib/src/chainstate/burn/db/sortdb.rs index 4b880112d4..d44aa56879 100644 --- a/stackslib/src/chainstate/burn/db/sortdb.rs +++ b/stackslib/src/chainstate/burn/db/sortdb.rs @@ -87,12 +87,9 @@ use crate::chainstate::ChainstateDB; use crate::core::AST_RULES_PRECHECK_SIZE; use crate::core::FIRST_BURNCHAIN_CONSENSUS_HASH; use crate::core::FIRST_STACKS_BLOCK_HASH; -use crate::core::{ - StacksEpoch, StacksEpochExtension, StacksEpochId, NAKAMOTO_TENURE_BLOCK_ACCEPTANCE_PERIOD, - STACKS_EPOCH_MAX, -}; +use crate::core::{StacksEpoch, StacksEpochExtension, StacksEpochId, STACKS_EPOCH_MAX}; use crate::net::neighbors::MAX_NEIGHBOR_BLOCK_DELAY; -use crate::net::{Error as NetError, Error}; +use crate::net::Error as NetError; use crate::util_lib::db::tx_begin_immediate; use crate::util_lib::db::tx_busy_handler; use crate::util_lib::db::DBTx; @@ -1108,6 +1105,11 @@ impl db_keys { format!("{}", index) } + /// reward cycle ID that was last processed + pub fn last_reward_cycle_key() -> &'static str { + "sortition_db::last_reward_cycle" + } + pub fn reward_set_size_to_string(size: usize) -> String { to_hex( &u16::try_from(size) @@ -1122,6 +1124,22 @@ impl db_keys { byte_buff.copy_from_slice(&bytes[0..2]); u16::from_le_bytes(byte_buff) } + + pub fn last_reward_cycle_to_string(rc: u64) -> String { + to_hex(&rc.to_le_bytes()) + } + + pub fn last_reward_cycle_from_string(rc_str: &str) -> u64 { + let bytes = hex_bytes(rc_str).expect("CORRUPTION: bad format written for reward cycle ID"); + assert_eq!( + bytes.len(), + 8, + "CORRUPTION: expected 8 bytes for reward cycle" + ); + let mut rc_buff = [0; 8]; + rc_buff.copy_from_slice(&bytes[0..8]); + u64::from_le_bytes(rc_buff) + } } /// Trait for structs that provide a chaintip-indexed handle into the @@ -1805,6 +1823,7 @@ impl<'a> SortitionHandleTx<'a> { burn_tip.block_height ); + // NOTE: in Nakamoto, this only works if this is a tenure-start block let num_rows = self.execute("UPDATE snapshots SET stacks_block_accepted = 1, stacks_block_height = ?1, arrival_index = ?2 WHERE consensus_hash = ?3 AND winning_stacks_block_hash = ?4", args)?; assert!(num_rows > 0); @@ -1842,36 +1861,23 @@ impl<'a> SortitionHandleConn<'a> { SortitionHandleConn::open_reader(connection, &sn.sortition_id) } - /// Does the sortition db expect to receive unknown blocks from - /// this tenure? - /// - /// This is used by nakamoto nodes while they are at or near the - /// current chain tip: only recent tenures can receive blocks this - /// way. Otherwise, the `BlockHeaderHash` must have been - /// explicitly confirmed by a block commit. - pub fn expects_blocks_from_tenure( - &self, - miner_pk: &Secp256k1PublicKey, - ) -> Result, db_error> { - let to_check = Hash160::from_node_public_key(miner_pk); - let mut cur_tip = self.context.chain_tip.clone(); - for _ in 0..NAKAMOTO_TENURE_BLOCK_ACCEPTANCE_PERIOD { - let cur_snapshot = SortitionDB::get_block_snapshot(self.sqlite(), &cur_tip)? - .ok_or_else(|| db_error::NotFoundError)?; - if cur_snapshot.miner_pk_hash == Some(to_check) { - return Ok(Some(cur_snapshot)); - } - cur_tip = cur_snapshot.parent_sortition_id.clone(); - } - Ok(None) - } - /// Does the sortition db expect to receive blocks from - /// signed by this stacker set? + /// signed by stacker set? pub fn expects_stacker_signature( &self, + consensus_hash: &ConsensusHash, _stacker_signature: &MessageSignature, ) -> Result { + // is this consensus hash in this fork? + let Some(bhh) = SortitionDB::get_burnchain_header_hash_by_consensus(self, consensus_hash)? + else { + return Ok(false); + }; + let Some(_sortition_id) = self.get_sortition_id_for_bhh(&bhh)? else { + return Ok(false); + }; + + // TODO: query set of stacker signers in order to get the aggregate public key Ok(true) } @@ -1925,6 +1931,15 @@ impl<'a> SortitionHandleConn<'a> { Ok(anchor_block_txid) } + /// Get the last processed reward cycle + pub fn get_last_processed_reward_cycle(&self) -> Result { + let encoded_rc = self + .get_indexed(&self.context.chain_tip, &db_keys::last_reward_cycle_key())? + .expect("FATAL: no last-processed reward cycle"); + + Ok(db_keys::last_reward_cycle_from_string(&encoded_rc)) + } + pub fn get_reward_cycle_unlocks( &mut self, cycle: u64, @@ -1991,12 +2006,13 @@ impl<'a> SortitionHandleConn<'a> { SortitionDB::get_block_snapshot(self.conn(), &sortition_id) } - /// Has `burn_header_hash` been processed in the current fork? - pub fn processed_block( - &self, - burn_header_hash: &BurnchainHeaderHash, - ) -> Result { - self.get_sortition_id_for_bhh(burn_header_hash) + /// Has `consensus_hash` been processed in the current fork? + pub fn processed_block(&self, consensus_hash: &ConsensusHash) -> Result { + let Some(bhh) = SortitionDB::get_burnchain_header_hash_by_consensus(self, consensus_hash)? + else { + return Ok(false); + }; + self.get_sortition_id_for_bhh(&bhh) .map(|result| result.is_some()) } @@ -3850,7 +3866,7 @@ impl SortitionDB { pub fn find_snapshots_with_dirty_canonical_block_pointers( conn: &DBConn, canonical_stacks_height: u64, - ) -> Result, Error> { + ) -> Result, db_error> { let dirty_sortitions : Vec = query_rows(conn, "SELECT sortition_id FROM snapshots WHERE canonical_stacks_tip_height > ?1 AND pox_valid = 1", &[&u64_to_sql(canonical_stacks_height)?])?; Ok(dirty_sortitions) } @@ -4476,6 +4492,20 @@ impl SortitionDB { ) } + pub fn get_burnchain_header_hash_by_consensus( + conn: &Connection, + consensus_hash: &ConsensusHash, + ) -> Result, db_error> { + let qry = "SELECT burn_header_hash FROM snapshots WHERE consensus_hash = ?1 AND pox_valid = 1 LIMIT 1"; + let args = [&consensus_hash]; + query_row_panic(conn, qry, &args, || { + format!( + "FATAL: multiple block snapshots for the same block with consensus hash {}", + consensus_hash + ) + }) + } + pub fn get_sortition_id_by_consensus( conn: &Connection, consensus_hash: &ConsensusHash, @@ -5066,6 +5096,8 @@ impl<'a> SortitionHandleTx<'a> { let mut sn = snapshot.clone(); sn.index_root = root_hash.clone(); + // TODO: update canonical Stacks tip across burnchain forks + // preserve memoized stacks chain tip from this burn chain fork sn.canonical_stacks_tip_height = parent_sn.canonical_stacks_tip_height; sn.canonical_stacks_tip_hash = parent_sn.canonical_stacks_tip_hash; @@ -5768,6 +5800,9 @@ impl<'a> SortitionHandleTx<'a> { snapshot.block_height ); } + + let reward_cycle = reward_info.reward_cycle; + // if we've selected an anchor _and_ know of the anchor, // write the reward set information if let Some(mut reward_set) = reward_info.known_selected_anchor_block_owned() { @@ -5841,6 +5876,10 @@ impl<'a> SortitionHandleTx<'a> { keys.push(db_keys::pox_affirmation_map().to_string()); values.push(cur_affirmation_map.encode()); + // last reward cycle + keys.push(db_keys::last_reward_cycle_key().to_string()); + values.push(db_keys::last_reward_cycle_to_string(reward_cycle)); + pox_payout_addrs } else { // if this snapshot consumed some reward set entries AND @@ -5922,6 +5961,8 @@ impl<'a> SortitionHandleTx<'a> { values.push("".to_string()); keys.push(db_keys::pox_last_selected_anchor_txid().to_string()); values.push("".to_string()); + keys.push(db_keys::last_reward_cycle_key().to_string()); + values.push(db_keys::last_reward_cycle_to_string(0)); // no payouts vec![] @@ -6157,7 +6198,8 @@ impl<'a> SortitionHandleTx<'a> { .map(|(ch, bhh, height, _, _)| (ch, bhh, height)) } - /// Update the given tip's canonical Stacks block pointer + /// Update the given tip's canonical Stacks block pointer. + /// Does so on all sortitions of the same height as tip. fn update_new_block_arrivals( &mut self, tip: &BlockSnapshot, @@ -6169,7 +6211,7 @@ impl<'a> SortitionHandleTx<'a> { &best_chh, &best_bhh, &u64_to_sql(best_height)?, - &tip.sortition_id, + &u64_to_sql(tip.block_height)?, ]; debug!( @@ -6177,7 +6219,7 @@ impl<'a> SortitionHandleTx<'a> { &tip.block_height, &tip.burn_header_hash, &best_chh, &best_bhh, best_height ); self.execute("UPDATE snapshots SET canonical_stacks_tip_consensus_hash = ?1, canonical_stacks_tip_hash = ?2, canonical_stacks_tip_height = ?3 - WHERE sortition_id = ?4", args) + WHERE block_height = ?4", args) .map_err(db_error::SqliteError)?; Ok(()) From 35c0fd333fafcdbe0b21d3e8f74fec7bfc5f655d Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Mon, 23 Oct 2023 23:38:36 -0400 Subject: [PATCH 004/122] refactor: add support for both epoch2 and epoch3 processing to ChainsCoordinator --- stackslib/src/chainstate/coordinator/mod.rs | 226 ++++++++++++++------ 1 file changed, 156 insertions(+), 70 deletions(-) diff --git a/stackslib/src/chainstate/coordinator/mod.rs b/stackslib/src/chainstate/coordinator/mod.rs index ab7e46fb7b..c1e3ece273 100644 --- a/stackslib/src/chainstate/coordinator/mod.rs +++ b/stackslib/src/chainstate/coordinator/mod.rs @@ -65,7 +65,9 @@ use crate::chainstate::stacks::{ accounts::MinerReward, ChainStateBootData, ClarityTx, MinerRewardInfo, StacksChainState, StacksEpochReceipt, StacksHeaderInfo, }, - events::{StacksTransactionEvent, StacksTransactionReceipt, TransactionOrigin}, + events::{ + StacksBlockEventData, StacksTransactionEvent, StacksTransactionReceipt, TransactionOrigin, + }, miner::{signal_mining_blocked, signal_mining_ready, MinerStatus}, Error as ChainstateError, StacksBlock, StacksBlockHeader, TransactionPayload, }; @@ -96,6 +98,7 @@ pub enum PoxAnchorBlockStatus { #[derive(Debug, PartialEq)] pub struct RewardCycleInfo { + pub reward_cycle: u64, pub anchor_status: PoxAnchorBlockStatus, } @@ -137,7 +140,7 @@ impl RewardCycleInfo { pub trait BlockEventDispatcher { fn announce_block( &self, - block: &StacksBlock, + block: StacksBlockEventData, metadata: &StacksHeaderInfo, receipts: &[StacksTransactionReceipt], parent: &StacksBlockId, @@ -193,18 +196,18 @@ pub struct ChainsCoordinator< FE: FeeEstimator + ?Sized, B: BurnchainHeaderReader, > { - canonical_sortition_tip: Option, - burnchain_blocks_db: BurnchainDB, - chain_state_db: StacksChainState, - sortition_db: SortitionDB, - burnchain: Burnchain, - atlas_db: Option, - dispatcher: Option<&'a T>, - cost_estimator: Option<&'a mut CE>, - fee_estimator: Option<&'a mut FE>, - reward_set_provider: R, - notifier: N, - atlas_config: AtlasConfig, + pub canonical_sortition_tip: Option, + pub burnchain_blocks_db: BurnchainDB, + pub chain_state_db: StacksChainState, + pub sortition_db: SortitionDB, + pub burnchain: Burnchain, + pub atlas_db: Option, + pub dispatcher: Option<&'a T>, + pub cost_estimator: Option<&'a mut CE>, + pub fee_estimator: Option<&'a mut FE>, + pub reward_set_provider: R, + pub notifier: N, + pub atlas_config: AtlasConfig, config: ChainsCoordinatorConfig, burnchain_indexer: B, } @@ -220,6 +223,8 @@ pub enum Error { DBError(DBError), NotPrepareEndBlock, NotPoXAnchorBlock, + NotInPreparePhase, + RewardSetAlreadyProcessed, } impl From for Error { @@ -266,6 +271,40 @@ impl RewardSetProvider for OnChainRewardSetProvider { let cur_epoch = SortitionDB::get_stacks_epoch(sortdb.conn(), current_burn_height)?.expect( &format!("FATAL: no epoch for burn height {}", current_burn_height), ); + if cur_epoch.epoch_id < StacksEpochId::Epoch30 { + // Stacks 2.x epoch + return self.get_reward_set_epoch2( + current_burn_height, + chainstate, + burnchain, + sortdb, + block_id, + cur_epoch, + ); + } else { + // Nakamoto epoch + return self.get_reward_set_nakamoto( + current_burn_height, + chainstate, + burnchain, + sortdb, + block_id, + ); + } + } +} + +impl OnChainRewardSetProvider { + fn get_reward_set_epoch2( + &self, + // Todo: `current_burn_height` is a misleading name: should be the `cycle_start_burn_height` + current_burn_height: u64, + chainstate: &mut StacksChainState, + burnchain: &Burnchain, + sortdb: &SortitionDB, + block_id: &StacksBlockId, + cur_epoch: StacksEpoch, + ) -> Result { match cur_epoch.epoch_id { StacksEpochId::Epoch10 | StacksEpochId::Epoch20 @@ -347,8 +386,8 @@ impl< dispatcher: &'a mut T, comms: CoordinatorReceivers, atlas_config: AtlasConfig, - cost_estimator: Option<&mut CE>, - fee_estimator: Option<&mut FE>, + cost_estimator: Option<&'a mut CE>, + fee_estimator: Option<&'a mut FE>, miner_status: Arc>, burnchain_indexer: B, atlas_db: AtlasDB, @@ -392,52 +431,80 @@ impl< burnchain_indexer, }; + let mut nakamoto_available = false; loop { - // timeout so that we handle Ctrl-C a little gracefully - let bits = comms.wait_on(); - if (bits & (CoordinatorEvents::NEW_STACKS_BLOCK as u8)) != 0 { - signal_mining_blocked(miner_status.clone()); - debug!("Received new stacks block notice"); - match inst.handle_new_stacks_block() { - Ok(missing_block_opt) => { - if missing_block_opt.is_some() { - debug!( - "Missing affirmed anchor block: {:?}", - &missing_block_opt.as_ref().expect("unreachable") - ); - } - } - Err(e) => { - warn!("Error processing new stacks block: {:?}", e); - } + if nakamoto_available + || inst + .can_process_nakamoto() + .expect("FATAL: could not determine if Nakamoto is available") + { + // short-circuit to avoid gratuitous I/O + nakamoto_available = true; + if !inst.handle_comms_nakamoto(&comms, miner_status.clone()) { + return; + } + } else { + if !inst.handle_comms_epoch2(&comms, miner_status.clone()) { + return; } - - signal_mining_ready(miner_status.clone()); } - if (bits & (CoordinatorEvents::NEW_BURN_BLOCK as u8)) != 0 { - signal_mining_blocked(miner_status.clone()); - debug!("Received new burn block notice"); - match inst.handle_new_burnchain_block() { - Ok(missing_block_opt) => { - if missing_block_opt.is_some() { - debug!( - "Missing canonical anchor block {}", - &missing_block_opt.clone().unwrap() - ); - } - } - Err(e) => { - warn!("Error processing new burn block: {:?}", e); + } + } + + /// This is the Stacks 2.x coordinator loop body, which handles communications + /// from the given `comms`. It returns `true` if the coordinator is still running, and `false` + /// if not. + pub fn handle_comms_epoch2( + &mut self, + comms: &CoordinatorReceivers, + miner_status: Arc>, + ) -> bool { + // timeout so that we handle Ctrl-C a little gracefully + let bits = comms.wait_on(); + if (bits & (CoordinatorEvents::NEW_STACKS_BLOCK as u8)) != 0 { + signal_mining_blocked(miner_status.clone()); + debug!("Received new stacks block notice"); + match self.handle_new_stacks_block() { + Ok(missing_block_opt) => { + if missing_block_opt.is_some() { + debug!( + "Missing affirmed anchor block: {:?}", + &missing_block_opt.as_ref().expect("unreachable") + ); } } - signal_mining_ready(miner_status.clone()); + Err(e) => { + warn!("Error processing new stacks block: {:?}", e); + } } - if (bits & (CoordinatorEvents::STOP as u8)) != 0 { - signal_mining_blocked(miner_status.clone()); - debug!("Received stop notice"); - return; + + signal_mining_ready(miner_status.clone()); + } + if (bits & (CoordinatorEvents::NEW_BURN_BLOCK as u8)) != 0 { + signal_mining_blocked(miner_status.clone()); + debug!("Received new burn block notice"); + match self.handle_new_burnchain_block() { + Ok(missing_block_opt) => { + if missing_block_opt.is_some() { + debug!( + "Missing canonical anchor block {}", + &missing_block_opt.clone().unwrap() + ); + } + } + Err(e) => { + warn!("Error processing new burn block: {:?}", e); + } } + signal_mining_ready(miner_status.clone()); + } + if (bits & (CoordinatorEvents::STOP as u8)) != 0 { + signal_mining_blocked(miner_status.clone()); + debug!("Received stop notice"); + return false; } + + return true; } } @@ -571,18 +638,20 @@ pub fn get_reward_cycle_info( ); if burnchain.is_reward_cycle_start(burn_height) { + let reward_cycle = burnchain + .block_height_to_reward_cycle(burn_height) + .expect("FATAL: no reward cycle for burn height"); + if burnchain .pox_constants .is_after_pox_sunset_end(burn_height, epoch_at_height.epoch_id) { return Ok(Some(RewardCycleInfo { + reward_cycle, anchor_status: PoxAnchorBlockStatus::NotSelected, })); } - let reward_cycle = burnchain - .block_height_to_reward_cycle(burn_height) - .expect("FATAL: no reward cycle for burn height"); debug!("Beginning reward cycle"; "burn_height" => burn_height, "reward_cycle" => reward_cycle, @@ -638,13 +707,17 @@ pub fn get_reward_cycle_info( ); PoxAnchorBlockStatus::SelectedAndUnknown(stacks_block_hash, txid) }; - Ok(Some(RewardCycleInfo { anchor_status })) + Ok(Some(RewardCycleInfo { + reward_cycle, + anchor_status, + })) } else { debug!( "PoX anchor block NOT chosen for reward cycle {} at burn height {}", reward_cycle, burn_height ); Ok(Some(RewardCycleInfo { + reward_cycle, anchor_status: PoxAnchorBlockStatus::NotSelected, })) } @@ -653,12 +726,15 @@ pub fn get_reward_cycle_info( } } -struct PaidRewards { - pox: Vec<(PoxAddress, u64)>, - burns: u64, +/// PoX payout event to be sent to connected event observers +pub struct PaidRewards { + pub pox: Vec<(PoxAddress, u64)>, + pub burns: u64, } -fn calculate_paid_rewards(ops: &[BlockstackOperationType]) -> PaidRewards { +/// Determine the rewards paid for a given set of burnchain operations. All of these operations +/// ought to be from the same burnchain block. +pub fn calculate_paid_rewards(ops: &[BlockstackOperationType]) -> PaidRewards { let mut reward_recipients: HashMap<_, u64> = HashMap::new(); let mut burn_amt = 0; for op in ops.iter() { @@ -686,7 +762,7 @@ fn calculate_paid_rewards(ops: &[BlockstackOperationType]) -> PaidRewards { } } -fn dispatcher_announce_burn_ops( +pub fn dispatcher_announce_burn_ops( dispatcher: &T, burn_header: &BurnchainBlockHeader, paid_rewards: PaidRewards, @@ -2200,7 +2276,6 @@ impl< }; let canonical_burnchain_tip = self.burnchain_blocks_db.get_canonical_chain_tip()?; - // let canonical_affirmation_map = self.get_canonical_affirmation_map()?; let canonical_affirmation_map = self.get_canonical_affirmation_map(&canonical_snapshot.sortition_id)?; @@ -2267,6 +2342,19 @@ impl< for unprocessed_block in sortitions_to_process.into_iter() { let BurnchainBlockData { header, ops } = unprocessed_block; + + // only evaluate epoch 2.x. + // NOTE: epoch 3 starts _right after_ the first block in the first epoch3 reward cycle, + // so we use the 2.x rules to process the PoX reward set. + let sortition_epoch = + SortitionDB::get_stacks_epoch(self.sortition_db.conn(), header.block_height)? + .expect("FATAL: no epoch defined for a valid block height"); + + if sortition_epoch.epoch_id >= StacksEpochId::Epoch30 { + // stop processing + break; + } + if already_processed_burn_blocks.contains(&header.block_hash) { // don't re-process something we recursively processed already, by means of finding // a heretofore missing anchor block @@ -2348,7 +2436,6 @@ impl< ); stacks_blocks_to_reaccept.push(( sortition.consensus_hash.clone(), - stacks_block_header.anchored_header.parent().clone(), sortition.winning_stacks_block_hash.clone(), stacks_block_header.anchored_header.height(), )); @@ -2362,7 +2449,6 @@ impl< } sortition } else { - // new sortition -- go evaluate it. // bind a reference here to avoid tripping up the borrow-checker let dispatcher_ref = &self.dispatcher; let (next_snapshot, _) = self @@ -2400,7 +2486,7 @@ impl< { // get borrow checker to drop sort_tx let mut sort_tx = self.sortition_db.tx_begin()?; - for (ch, parent_bhh, bhh, height) in stacks_blocks_to_reaccept.into_iter() { + for (ch, bhh, height) in stacks_blocks_to_reaccept.into_iter() { debug!( "Check if Stacks block {}/{} height {} is compatible with `{}`", &ch, &bhh, height, &heaviest_am @@ -2422,7 +2508,7 @@ impl< "Stacks block {}/{} height {} is compatible with `{}`; will reaccept", &ch, &bhh, height, &heaviest_am ); - compatible_stacks_blocks.push((ch, parent_bhh, bhh, height)); + compatible_stacks_blocks.push((ch, bhh, height)); } else { debug!("Stacks block {}/{} height {} is NOT compatible with `{}`; will NOT reaccept", &ch, &bhh, height, &heaviest_am); } @@ -2433,7 +2519,7 @@ impl< let mut sortition_db_handle = SortitionHandleTx::begin(&mut self.sortition_db, &next_snapshot.sortition_id)?; - for (ch, _parent_bhh, bhh, height) in compatible_stacks_blocks.into_iter() { + for (ch, bhh, height) in compatible_stacks_blocks.into_iter() { debug!("Re-accept Stacks block {}/{} height {}", &ch, &bhh, height); revalidated_stacks_block = true; sortition_db_handle.set_stacks_block_accepted(&ch, &bhh, height)?; @@ -2604,7 +2690,7 @@ impl< } /// Process any Atlas attachment events and forward them to the Atlas subsystem - fn process_atlas_attachment_events( + pub fn process_atlas_attachment_events( atlas_db: Option<&mut AtlasDB>, atlas_config: &AtlasConfig, block_receipt: &StacksEpochReceipt, From e7a17c3fe9cbfa817dbebe0db9ab56994b179d22 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Mon, 23 Oct 2023 23:39:03 -0400 Subject: [PATCH 005/122] chore: use StacksBlockEventData instead of StacksBlock for event handler --- stackslib/src/chainstate/coordinator/tests.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/stackslib/src/chainstate/coordinator/tests.rs b/stackslib/src/chainstate/coordinator/tests.rs index e4d2df1380..8374f6d399 100644 --- a/stackslib/src/chainstate/coordinator/tests.rs +++ b/stackslib/src/chainstate/coordinator/tests.rs @@ -72,6 +72,7 @@ use crate::chainstate::stacks::boot::POX_3_NAME; use crate::chainstate::stacks::db::{ accounts::MinerReward, ClarityTx, StacksChainState, StacksHeaderInfo, }; +use crate::chainstate::stacks::events::StacksBlockEventData; use crate::chainstate::stacks::*; use crate::clarity_vm::clarity::ClarityConnection; use crate::core; @@ -427,7 +428,7 @@ pub struct NullEventDispatcher; impl BlockEventDispatcher for NullEventDispatcher { fn announce_block( &self, - _block: &StacksBlock, + _block: StacksBlockEventData, _metadata: &StacksHeaderInfo, _receipts: &[StacksTransactionReceipt], _parent: &StacksBlockId, From 9e0266050a7300972ee12da05f57e5d284da9ded Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Mon, 23 Oct 2023 23:39:45 -0400 Subject: [PATCH 006/122] feat: only track consensus_hash and parent_block_id in Nakamoto blocks (update the structs and DB tables to reflect this). Also, add top-level Nakamoto block-processing. --- stackslib/src/chainstate/nakamoto/mod.rs | 731 +++++++++++++++++------ 1 file changed, 542 insertions(+), 189 deletions(-) diff --git a/stackslib/src/chainstate/nakamoto/mod.rs b/stackslib/src/chainstate/nakamoto/mod.rs index 1c261f7649..0b6b5328f0 100644 --- a/stackslib/src/chainstate/nakamoto/mod.rs +++ b/stackslib/src/chainstate/nakamoto/mod.rs @@ -60,7 +60,16 @@ use crate::chainstate::stacks::{MINER_BLOCK_CONSENSUS_HASH, MINER_BLOCK_HEADER_H use crate::clarity_vm::clarity::{ClarityInstance, PreCommitClarityBlock}; use crate::clarity_vm::database::SortitionDBRef; use crate::monitoring; -use crate::util_lib::db::{query_row_panic, query_rows, u64_to_sql, Error as DBError, FromRow}; +use crate::util_lib::db::{ + query_row_panic, query_rows, u64_to_sql, DBConn, Error as DBError, FromRow, +}; + +use crate::chainstate::coordinator::BlockEventDispatcher; +use crate::chainstate::coordinator::Error; + +use crate::net::Error as net_error; + +pub mod coordinator; #[cfg(test)] pub mod tests; @@ -88,17 +97,18 @@ lazy_static! { pub static ref NAKAMOTO_CHAINSTATE_SCHEMA_1: Vec = vec![ r#" -- Table for staging nakamoto blocks + -- TODO: this goes into its own DB at some point CREATE TABLE nakamoto_staging_blocks ( + -- SHA512/256 hash of this block block_hash TEXT NOT NULL, -- the consensus hash of the burnchain block that selected this block's **tenure** consensus_hash TEXT NOT NULL, - burn_view TEXT NOT NULL, -- the parent index_block_hash parent_block_id TEXT NOT NULL, - -- has the burnchain view that this block depends on been processed? + -- has the burnchain block with this block's `consensus_hash` been processed? burn_attachable INT NOT NULL, - -- has the parent stacks block that this block depends on been processed? + -- has the parent Stacks block been processed? stacks_attachable INT NOT NULL, -- set to 1 if this block can never be attached orphaned INT NOT NULL, @@ -106,11 +116,15 @@ lazy_static! { processed INT NOT NULL, height INT NOT NULL, - - index_block_hash TEXT NOT NULL, -- used internally; hash of consensus hash and anchored_block_hash - download_time INT NOT NULL, -- how long the block was in-flight - arrival_time INT NOT NULL, -- when this block was stored - processed_time INT NOT NULL, -- when this block was processed + + -- used internally -- this is the StacksBlockId of this block's consensus hash and block hash + index_block_hash TEXT NOT NULL, + -- how long the block was in-flight + download_time INT NOT NULL, + -- when this block was stored + arrival_time INT NOT NULL, + -- when this block was processed + processed_time INT NOT NULL, -- block data data BLOB NOT NULL, @@ -118,7 +132,7 @@ lazy_static! { PRIMARY KEY(block_hash,consensus_hash) );"#.into(), r#" - -- Table for Nakamoto Block Headers + -- Table for Nakamoto block headers CREATE TABLE nakamoto_block_headers ( -- The following fields all correspond to entries in the StacksHeaderInfo struct block_height INTEGER NOT NULL, @@ -131,6 +145,8 @@ lazy_static! { burn_header_height INT NOT NULL, -- timestamp from burnchain block header that generated this consensus hash burn_header_timestamp INT NOT NULL, + -- size of this block, in bytes. + -- encoded as TEXT for compatibility block_size TEXT NOT NULL, -- The following fields all correspond to entries in the NakamotoBlockHeader struct version INTEGER NOT NULL, @@ -138,35 +154,35 @@ lazy_static! { chain_length INTEGER NOT NULL, -- this field is the total amount of BTC spent in the chain history (including this block) burn_spent INTEGER NOT NULL, - -- the parent BlockHeaderHash - parent TEXT NOT NULL, - -- the latest bitcoin block whose data is viewable from this stacks block - burn_view TEXT NOT NULL, + -- the consensus hash of the burnchain block that selected this block's tenure + consensus_hash TEXT NOT NULL, + -- the parent StacksBlockId + parent_block_id TEXT NOT NULL, + -- Merkle root of a Merkle tree constructed out of all the block's transactions + tx_merkle_root TEXT NOT NULL, + -- root hash of the Stacks chainstate MARF + state_index_root TEXT NOT NULL, -- miner's signature over the block miner_signature TEXT NOT NULL, -- stackers' signature over the block stacker_signature TEXT NOT NULL, - tx_merkle_root TEXT NOT NULL, - state_index_root TEXT NOT NULL, - -- the consensus hash of the burnchain block that selected this block's tenure - consensus_hash TEXT NOT NULL, - -- the consensus hash of the burnchain block that selected this block's tenure - parent_consensus_hash TEXT NOT NULL, -- The following fields are not part of either the StacksHeaderInfo struct -- or its contained NakamotoBlockHeader struct, but are used for querying + -- what kind of header this is (nakamoto or stacks 2.x) header_type TEXT NOT NULL, + -- hash of the block block_hash TEXT NOT NULL, -- index_block_hash is the hash of the block hash and consensus hash of the burn block that selected it, -- and is guaranteed to be globally unique (across all Stacks forks and across all PoX forks). -- index_block_hash is the block hash fed into the MARF index. index_block_hash TEXT NOT NULL, - -- the total cost of the block + -- the ExecutionCost of the block cost TEXT NOT NULL, -- the total cost up to and including this block in the current tenure total_tenure_cost TEXT NOT NULL, - -- the parent index_block_hash - parent_block_id TEXT NOT NULL, - -- this field is the total number of *tenures* in the chain history (including this tenure) + -- this field is the total number of *tenures* in the chain history (including this tenure), + -- as of the _end_ of this block. A block can contain multiple TenureChanges; if so, then this + -- is the height of the _last_ TenureChange. tenure_height INTEGER NOT NULL, -- this field is true if this is the first block of a new tenure tenure_changed INTEGER NOT NULL, @@ -189,8 +205,7 @@ lazy_static! { pub struct SetupBlockResult<'a, 'b> { pub clarity_tx: ClarityTx<'a, 'b>, pub tx_receipts: Vec, - pub matured_miner_rewards_opt: - Option<(MinerReward, Vec, MinerReward, MinerRewardInfo)>, + pub matured_miner_rewards_opt: Option<(MinerReward, MinerReward, MinerRewardInfo)>, pub evaluated_epoch: StacksEpochId, pub applied_epoch_transition: bool, pub burn_stack_stx_ops: Vec, @@ -208,11 +223,12 @@ pub struct NakamotoBlockHeader { /// Total amount of BTC spent producing the sortition that /// selected this block's miner. pub burn_spent: u64, - /// The block hash of the immediate parent of this block. - pub parent: BlockHeaderHash, - /// The bitcoin block whose data has been handled most recently by - /// the Stacks chain as of this block. - pub burn_view: BurnchainHeaderHash, + /// The consensus hash of the burnchain block that selected this tenure. The consensus hash + /// uniquely identifies this tenure, including across all Bitcoin forks. + pub consensus_hash: ConsensusHash, + /// The index block hash of the immediate parent of this block. + /// This is the hash of the parent block's hash and consensus hash. + pub parent_block_id: StacksBlockId, /// The root of a SHA512/256 merkle tree over all this block's /// contained transactions pub tx_merkle_root: Sha512Trunc256Sum, @@ -221,12 +237,8 @@ pub struct NakamotoBlockHeader { /// Recoverable ECDSA signature from the tenure's miner. pub miner_signature: MessageSignature, /// Recoverable ECDSA signature from the stacker set active during the tenure. + /// TODO: This is a placeholder pub stacker_signature: MessageSignature, - /// The consensus hash of the burnchain block that selected this tenure. - pub consensus_hash: ConsensusHash, - /// The consensus hash of the burnchain block that selected the tenure of this block's parent. - /// (note: nakamoto blocks produced in the same tenure as their parent will have the same consensus hash) - pub parent_consensus_hash: ConsensusHash, } #[derive(Debug, Clone)] @@ -246,27 +258,23 @@ impl FromRow for NakamotoBlockHeader { .map_err(|_| DBError::ParseError)?; let burn_spent_i64: i64 = row.get("burn_spent")?; let burn_spent = burn_spent_i64.try_into().map_err(|_| DBError::ParseError)?; - let parent = row.get("parent")?; - let burn_view = row.get("burn_view")?; - let stacker_signature = row.get("stacker_signature")?; - let miner_signature = row.get("miner_signature")?; + let consensus_hash = row.get("consensus_hash")?; + let parent_block_id = row.get("parent_block_id")?; let tx_merkle_root = row.get("tx_merkle_root")?; let state_index_root = row.get("state_index_root")?; - let consensus_hash = row.get("consensus_hash")?; - let parent_consensus_hash = row.get("parent_consensus_hash")?; + let stacker_signature = row.get("stacker_signature")?; + let miner_signature = row.get("miner_signature")?; Ok(NakamotoBlockHeader { version, chain_length, burn_spent, - parent, - burn_view, - stacker_signature, - miner_signature, + consensus_hash, + parent_block_id, tx_merkle_root, state_index_root, - consensus_hash, - parent_consensus_hash, + stacker_signature, + miner_signature, }) } } @@ -276,14 +284,12 @@ impl StacksMessageCodec for NakamotoBlockHeader { write_next(fd, &self.version)?; write_next(fd, &self.chain_length)?; write_next(fd, &self.burn_spent)?; - write_next(fd, &self.parent)?; - write_next(fd, &self.burn_view)?; + write_next(fd, &self.consensus_hash)?; + write_next(fd, &self.parent_block_id)?; write_next(fd, &self.tx_merkle_root)?; write_next(fd, &self.state_index_root)?; write_next(fd, &self.miner_signature)?; write_next(fd, &self.stacker_signature)?; - write_next(fd, &self.consensus_hash)?; - write_next(fd, &self.parent_consensus_hash)?; Ok(()) } @@ -293,14 +299,12 @@ impl StacksMessageCodec for NakamotoBlockHeader { version: read_next(fd)?, chain_length: read_next(fd)?, burn_spent: read_next(fd)?, - parent: read_next(fd)?, - burn_view: read_next(fd)?, + consensus_hash: read_next(fd)?, + parent_block_id: read_next(fd)?, tx_merkle_root: read_next(fd)?, state_index_root: read_next(fd)?, miner_signature: read_next(fd)?, stacker_signature: read_next(fd)?, - consensus_hash: read_next(fd)?, - parent_consensus_hash: read_next(fd)?, }) } } @@ -312,12 +316,10 @@ impl NakamotoBlockHeader { write_next(fd, &self.version)?; write_next(fd, &self.chain_length)?; write_next(fd, &self.burn_spent)?; - write_next(fd, &self.parent)?; - write_next(fd, &self.burn_view)?; + write_next(fd, &self.consensus_hash)?; + write_next(fd, &self.parent_block_id)?; write_next(fd, &self.tx_merkle_root)?; write_next(fd, &self.state_index_root)?; - write_next(fd, &self.consensus_hash)?; - write_next(fd, &self.parent_consensus_hash)?; Ok(Sha512Trunc256Sum::from_hasher(hasher)) } @@ -334,6 +336,14 @@ impl NakamotoBlockHeader { BlockHeaderHash::from_serializer(self) .expect("BUG: failed to serialize block header hash struct") } + + pub fn block_id(&self) -> StacksBlockId { + StacksBlockId::new(&self.consensus_hash, &self.block_hash()) + } + + pub fn is_first_mined(&self) -> bool { + StacksBlockHeader::is_first_index_block_hash(&self.parent_block_id) + } } impl NakamotoBlock { @@ -354,8 +364,8 @@ impl NakamotoBlock { warn!( "Block contains multiple TenureChange transactions"; "tenure_change_txs" => tenure_changes.len(), - "parent_block_id" => %self.header.parent, - "burn_view" => %self.header.burn_view, + "parent_block_id" => %self.header.parent_block_id, + "consensus_hash" => %self.header.consensus_hash, ); } @@ -375,7 +385,7 @@ impl NakamotoBlock { } pub fn is_first_mined(&self) -> bool { - StacksBlockHeader::is_first_block_hash(&self.header.parent) + self.header.is_first_mined() } pub fn get_coinbase_tx(&self) -> Option<&StacksTransaction> { @@ -384,6 +394,10 @@ impl NakamotoBlock { _ => None, } } + + pub fn block_id(&self) -> StacksBlockId { + self.header.block_id() + } } impl NakamotoChainState { @@ -409,22 +423,47 @@ impl NakamotoChainState { Ok(()) } + /// Modify the staging database that a given stacks block can never be processed. + /// This will update the attachable status for children blocks, as well as marking the stacks + /// block itself as orphaned. + pub fn set_block_orphaned( + staging_db_tx: &rusqlite::Transaction, + block: &StacksBlockId, + ) -> Result<(), ChainstateError> { + let update_dependents = + "UPDATE nakamoto_staging_blocks SET stacks_attachable = 0, orphaned = 1 + WHERE parent_block_id = ?"; + staging_db_tx.execute(&update_dependents, &[&block])?; + + let clear_staged_block = + "UPDATE nakamoto_staging_blocks SET processed = 1, processed_time = ?2, orphaned = 1 + WHERE index_block_hash = ?1"; + staging_db_tx.execute( + &clear_staged_block, + params![&block, &u64_to_sql(get_epoch_time_secs())?], + )?; + + Ok(()) + } + /// Notify the staging database that a given burn block has been processed. /// This is required for staged blocks to be eligible for processing. pub fn set_burn_block_processed( staging_db_tx: &rusqlite::Transaction, - block: &BurnchainHeaderHash, + consensus_hash: &ConsensusHash, ) -> Result<(), ChainstateError> { let update_dependents = "UPDATE nakamoto_staging_blocks SET burn_attachable = 1 - WHERE burn_view = ?"; - staging_db_tx.execute(&update_dependents, &[&block])?; + WHERE consensus_hash = ?"; + staging_db_tx.execute(&update_dependents, &[consensus_hash])?; Ok(()) } - pub fn next_ready_block( + /// Find the next ready-to-process Nakamoto block, given a connection to the staging blocks DB. + /// Returns (the block, the size of the block) + pub fn next_ready_nakamoto_block( staging_db_conn: &Connection, - ) -> Result, ChainstateError> { + ) -> Result, ChainstateError> { let query = "SELECT data FROM nakamoto_staging_blocks WHERE burn_attachable = 1 AND stacks_attachable = 1 @@ -435,7 +474,7 @@ impl NakamotoChainState { .query_row_and_then(query, NO_PARAMS, |row| { let data: Vec = row.get("data")?; let block = NakamotoBlock::consensus_deserialize(&mut data.as_slice())?; - Ok(Some(block)) + Ok(Some((block, data.len() as u64))) }) .or_else(|e| { if let ChainstateError::DBError(DBError::SqliteError( @@ -449,6 +488,268 @@ impl NakamotoChainState { }) } + /// Extract and parse a nakamoto block from the DB, and verify its integrity. + fn load_nakamoto_block( + staging_db_conn: &Connection, + consensus_hash: &ConsensusHash, + block_hash: &BlockHeaderHash, + ) -> Result, ChainstateError> { + let query = "SELECT data FROM nakamoto_staging_blocks WHERE consensus_hash = ?1 AND block_hash = ?2"; + staging_db_conn + .query_row_and_then( + query, + rusqlite::params![consensus_hash, block_hash], + |row| { + let data: Vec = row.get("data")?; + let block = NakamotoBlock::consensus_deserialize(&mut data.as_slice())?; + if &block.header.block_hash() != block_hash { + panic!( + "Staging DB corruption: expected {}, got {}", + &block_hash, + &block.header.block_hash() + ); + } + Ok(Some(block)) + }, + ) + .or_else(|e| { + if let ChainstateError::DBError(DBError::SqliteError( + rusqlite::Error::QueryReturnedNoRows, + )) = e + { + Ok(None) + } else { + Err(e) + } + }) + } + + /// Process the next ready block. + /// If there exists a ready Nakamoto block, then this method returns Ok(Some(..)) with the + /// receipt. Otherwise, it returns Ok(None). + /// + /// It returns Err(..) on DB error, or if the child block does not connect to the parent. + /// The caller should keep calling this until it gets Ok(None) + pub fn process_next_nakamoto_block<'a, T: BlockEventDispatcher>( + stacks_chain_state: &mut StacksChainState, + sort_tx: &mut SortitionHandleTx, + dispatcher_opt: Option<&'a T>, + ) -> Result, ChainstateError> { + let (mut chainstate_tx, clarity_instance) = stacks_chain_state.chainstate_tx_begin()?; + let Some((next_ready_block, block_size)) = + Self::next_ready_nakamoto_block(&chainstate_tx.tx)? + else { + // no more blocks + return Ok(None); + }; + + let block_id = next_ready_block.block_id(); + + // find corresponding snapshot + let next_ready_block_snapshot = SortitionDB::get_block_snapshot_consensus( + sort_tx, + &next_ready_block.header.consensus_hash, + )? + .expect(&format!( + "CORRUPTION: staging Nakamoto block {}/{} does not correspond to a burn block", + &next_ready_block.header.consensus_hash, + &next_ready_block.header.block_hash() + )); + + debug!("Process staging Nakamoto block"; + "consensus_hash" => %next_ready_block.header.consensus_hash, + "block_hash" => %next_ready_block.header.block_hash(), + "burn_block_hash" => %next_ready_block_snapshot.burn_header_hash + ); + + // find parent header + let Some(parent_header_info) = + Self::get_block_header(&chainstate_tx.tx, &next_ready_block.header.parent_block_id)? + else { + // no parent; cannot process yet + debug!("Cannot process Nakamoto block: missing parent header"; + "consensus_hash" => %next_ready_block.header.consensus_hash, + "block_hash" => %next_ready_block.header.block_hash(), + "parent_block_id" => %next_ready_block.header.parent_block_id + ); + return Ok(None); + }; + + // sanity check -- must attach to parent + let parent_block_id = StacksBlockId::new( + &parent_header_info.consensus_hash, + &parent_header_info.anchored_header.block_hash(), + ); + if parent_block_id != next_ready_block.header.parent_block_id { + let msg = "Discontinuous Nakamoto Stacks block"; + warn!("{}", &msg; + "child parent_block_id" => %next_ready_block.header.parent_block_id, + "expected parent_block_id" => %parent_block_id + ); + let _ = Self::set_block_orphaned(&chainstate_tx.tx, &block_id); + chainstate_tx.commit()?; + return Err(ChainstateError::InvalidStacksBlock(msg.into())); + } + + // find commit and sortition burns if this is a tenure-start block + // TODO: store each *tenure* + let (commit_burn, sortition_burn) = if next_ready_block.tenure_changed(&parent_block_id) { + // find block-commit to get commit-burn + let block_commit = sort_tx + .get_block_commit( + &next_ready_block_snapshot.winning_block_txid, + &next_ready_block_snapshot.sortition_id, + )? + .expect("FATAL: no block-commit for tenure-start block"); + + let sort_burn = SortitionDB::get_block_burn_amount( + sort_tx.deref().deref(), + &next_ready_block_snapshot, + )?; + (block_commit.burn_fee, sort_burn) + } else { + (0, 0) + }; + + // attach the block to the chain state and calculate the next chain tip. + let pox_constants = sort_tx.context.pox_constants.clone(); + let (epoch_receipt, clarity_commit) = match NakamotoChainState::append_block( + &mut chainstate_tx, + clarity_instance, + sort_tx, + &pox_constants, + &parent_header_info, + &next_ready_block_snapshot.burn_header_hash, + next_ready_block_snapshot + .block_height + .try_into() + .expect("Failed to downcast u64 to u32"), + next_ready_block_snapshot.burn_header_timestamp, + &next_ready_block, + block_size, + commit_burn, + sortition_burn, + ) { + Ok(next_chain_tip_info) => next_chain_tip_info, + Err(e) => { + test_debug!( + "Failed to append {}/{}: {:?}", + &next_ready_block.header.consensus_hash, + &next_ready_block.header.block_hash(), + &e + ); + let _ = Self::set_block_orphaned(&chainstate_tx.tx, &block_id); + chainstate_tx.commit()?; + return Err(e); + } + }; + + assert_eq!( + epoch_receipt.header.anchored_header.block_hash(), + next_ready_block.header.block_hash() + ); + assert_eq!( + epoch_receipt.header.consensus_hash, + next_ready_block.header.consensus_hash + ); + + NakamotoChainState::set_block_processed(&chainstate_tx.tx, &block_id)?; + + // set stacks block accepted + sort_tx.set_stacks_block_accepted( + &next_ready_block.header.consensus_hash, + &next_ready_block.header.block_hash(), + next_ready_block.header.chain_length, + )?; + + // announce the block, if we're connected to an event dispatcher + if let Some(dispatcher) = dispatcher_opt { + dispatcher.announce_block( + ( + next_ready_block, + parent_header_info.anchored_header.block_hash(), + ) + .into(), + &epoch_receipt.header.clone(), + &epoch_receipt.tx_receipts, + &parent_block_id, + next_ready_block_snapshot.winning_block_txid, + &epoch_receipt.matured_rewards, + epoch_receipt.matured_rewards_info.as_ref(), + epoch_receipt.parent_burn_block_hash, + epoch_receipt.parent_burn_block_height, + epoch_receipt.parent_burn_block_timestamp, + &epoch_receipt.anchored_block_cost, + &epoch_receipt.parent_microblocks_cost, + &pox_constants, + ); + } + + // this will panic if the Clarity commit fails. + clarity_commit.commit(); + chainstate_tx.commit() + .unwrap_or_else(|e| { + error!("Failed to commit chainstate transaction after committing Clarity block. The chainstate database is now corrupted."; + "error" => ?e); + panic!() + }); + + Ok(Some(epoch_receipt)) + } + + /// Process some staging blocks, up to max_blocks. + /// Return new chain tips. + pub fn process_nakamoto_blocks<'a, T: BlockEventDispatcher>( + stacks_chain_state: &mut StacksChainState, + mut sort_tx: SortitionHandleTx, + max_blocks: usize, + dispatcher_opt: Option<&'a T>, + ) -> Result>, ChainstateError> { + debug!("Process up to {} new blocks", max_blocks); + let mut ret = vec![]; + + if max_blocks == 0 { + // nothing to do + return Ok(vec![]); + } + + for _ in 0..max_blocks { + // process up to max_blocks pending blocks + match Self::process_next_nakamoto_block( + stacks_chain_state, + &mut sort_tx, + dispatcher_opt, + ) { + Ok(next_tip_opt) => { + ret.push(next_tip_opt); + } + Err(ChainstateError::InvalidStacksBlock(msg)) => { + warn!("Encountered invalid block: {}", &msg); + ret.push(None); + continue; + } + Err(ChainstateError::NetError(net_error::DeserializeError(msg))) => { + // happens if we load a zero-sized block (i.e. an invalid block) + warn!("Encountered invalid block: {}", &msg); + ret.push(None); + continue; + } + Err(e) => { + error!("Unrecoverable error when processing blocks: {:?}", &e); + return Err(e); + } + } + } + + sort_tx.commit()?; + Ok(ret) + } + + /// Accept a Nakamoto block into the staging blocks DB. + /// Fails if: + /// * the public key cannot be recovered from the miner's signature + /// * the stackers during the tenure didn't sign it + /// * a DB error occurs pub fn accept_block( block: NakamotoBlock, sortdb: &SortitionHandleConn, @@ -462,42 +763,31 @@ impl NakamotoChainState { return ChainstateError::InvalidStacksBlock("Unrecoverable miner public key".into()); })?; - if sortdb - .expects_blocks_from_tenure(&recovered_miner_pk)? - .is_none() - { - let msg = format!("Received block, signed by {recovered_miner_pk:?}, but this pubkey was not associated with recent tenures"); - warn!("{}", msg); - return Err(ChainstateError::InvalidStacksBlock(msg)); - }; - - if !sortdb.expects_stacker_signature(&block.header.stacker_signature)? { + if !sortdb.expects_stacker_signature( + &block.header.consensus_hash, + &block.header.stacker_signature, + )? { let msg = format!("Received block, signed by {recovered_miner_pk:?}, but the stacker signature does not match the active stacking cycle"); warn!("{}", msg); return Err(ChainstateError::InvalidStacksBlock(msg)); } - let parent_block_id = - StacksBlockId::new(&block.header.parent_consensus_hash, &block.header.parent); + // if the burnchain block of this Stacks block's tenure has been processed, then it + // is ready to be processed from the perspective of the burnchain + let burn_attachable = sortdb.processed_block(&block.header.consensus_hash)?; - // if the burnview of this block has been processed, then it - // is ready to be processed from the perspective of the - // burnchain - let burn_attachable = sortdb.processed_block(&block.header.burn_view)?; // check if the parent Stacks Block ID has been processed. if so, then this block is stacks_attachable let stacks_attachable = block.is_first_mined() || staging_db_tx.query_row( "SELECT 1 FROM nakamoto_staging_blocks WHERE index_block_hash = ? AND processed = 1", - rusqlite::params![&parent_block_id], + rusqlite::params![&block.header.parent_block_id], |_row| Ok(()) ).optional()?.is_some(); - let block_hash = block.header.block_hash(); - let block_id = StacksBlockId::new(&block.header.consensus_hash, &block_hash); + let block_id = block.block_id(); staging_db_tx.execute( "INSERT INTO nakamoto_staging_blocks ( block_hash, consensus_hash, - burn_view, parent_block_id, burn_attachable, stacks_attachable, @@ -509,12 +799,12 @@ impl NakamotoChainState { download_time, arrival_time, processed_time, - data ) VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9, ?10, ?11, ?12, ?13, ?14)", + data + ) VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9, ?10, ?11, ?12, ?13)", params![ - &block_hash, + &block.header.block_hash(), &block.header.consensus_hash, - &block.header.burn_view, - &parent_block_id, + &block.header.parent_block_id, if burn_attachable { 1 } else { 0 }, if stacks_attachable { 1 } else { 0 }, 0, @@ -540,9 +830,9 @@ impl NakamotoChainState { parent_block_hash: &BlockHeaderHash, parent_consensus_hash: &ConsensusHash, block_hash: &BlockHeaderHash, - coinbase_tx: &StacksTransaction, block_consensus_hash: &ConsensusHash, block_height: u64, + coinbase_tx: &StacksTransaction, parent_fees: u128, burnchain_commit_burn: u64, burnchain_sortition_burn: u64, @@ -613,9 +903,9 @@ impl NakamotoChainState { .map_err(|_| ChainstateError::DBError(DBError::ParseError)) } - /// Return a Nakamoto StacksHeaderInfo at a given tenure height in the fork identified by `tip_index_hash` - /// Prior to Nakamoto, `tenure_height` is equivalent to stacks block height. - /// This returns the first Stacks block header in the tenure. + /// Return a Nakamoto StacksHeaderInfo at a given tenure height in the fork identified by `tip_index_hash`. + /// * For Stacks 2.x, this is the Stacks block's header + /// * For Stacks 3.x (Nakamoto), this is the first block in the miner's tenure. pub fn get_header_by_tenure_height( tx: &mut StacksDBTx, tip_index_hash: &StacksBlockId, @@ -714,16 +1004,50 @@ impl NakamotoChainState { Ok(result) } + /// Load the canonical Stacks block header (either epoch-2 rules or Nakamoto) + pub fn get_canonical_block_header( + conn: &Connection, + sortdb: &SortitionDB, + ) -> Result, ChainstateError> { + let (consensus_hash, block_bhh) = + SortitionDB::get_canonical_stacks_chain_tip_hash(sortdb.conn())?; + let index_block_hash = StacksBlockId::new(&consensus_hash, &block_bhh); + Self::get_block_header(conn, &index_block_hash) + } + + /// Get the first block header in a Nakamoto tenure + pub fn get_nakamoto_tenure_start_block_header( + conn: &Connection, + consensus_hash: &ConsensusHash, + ) -> Result, ChainstateError> { + let sql = "SELECT * FROM nakamoto_block_headers WHERE consensus_hash = ?1 ORDER BY block_height ASC LIMIT 1"; + query_row_panic(conn, sql, &[&consensus_hash], || { + "FATAL: multiple rows for the same consensus hash".to_string() + }) + .map_err(ChainstateError::DBError) + } + + /// Get the last block header in a Nakamoto tenure + pub fn get_nakamoto_tenure_finish_block_header( + conn: &Connection, + consensus_hash: &ConsensusHash, + ) -> Result, ChainstateError> { + let sql = "SELECT * FROM nakamoto_block_headers WHERE consensus_hash = ?1 ORDER BY block_height DESC LIMIT 1"; + query_row_panic(conn, sql, &[&consensus_hash], || { + "FATAL: multiple rows for the same consensus hash".to_string() + }) + .map_err(ChainstateError::DBError) + } + /// Insert a nakamoto block header that is paired with an /// already-existing block commit and snapshot /// /// `header` should be a pointer to the header in `tip_info`. pub fn insert_stacks_block_header( tx: &Connection, - parent_id: &StacksBlockId, tip_info: &StacksHeaderInfo, header: &NakamotoBlockHeader, - anchored_block_cost: &ExecutionCost, + block_cost: &ExecutionCost, total_tenure_cost: &ExecutionCost, tenure_height: u64, tenure_changed: bool, @@ -763,19 +1087,16 @@ impl NakamotoChainState { &header.version, &u64_to_sql(header.chain_length)?, &u64_to_sql(header.burn_spent)?, - &header.parent, - &header.parent_consensus_hash, - &header.burn_view, &header.miner_signature, &header.stacker_signature, &header.tx_merkle_root, &header.state_index_root, &block_hash, &index_block_hash, - anchored_block_cost, + block_cost, total_tenure_cost, &tenure_tx_fees.to_string(), - parent_id, + &header.parent_block_id, &u64_to_sql(tenure_height)?, if tenure_changed { &1i64 } else { &0 }, ]; @@ -787,8 +1108,8 @@ impl NakamotoChainState { burn_header_timestamp, block_size, header_type, - version, chain_length, burn_spent, parent, parent_consensus_hash, - burn_view, miner_signature, stacker_signature, tx_merkle_root, state_index_root, + version, chain_length, burn_spent, + miner_signature, stacker_signature, tx_merkle_root, state_index_root, block_hash, index_block_hash, @@ -798,7 +1119,7 @@ impl NakamotoChainState { parent_block_id, tenure_height, tenure_changed) - VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9, ?10, ?11, ?12, ?13, ?14, ?15, ?16, ?17, ?18, ?19, ?20, ?21, ?22, ?23, ?24, ?25, ?26)", + VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9, ?10, ?11, ?12, ?13, ?14, ?15, ?16, ?17, ?18, ?19, ?20, ?21, ?22, ?23)", args )?; @@ -816,10 +1137,10 @@ impl NakamotoChainState { new_burnchain_height: u32, new_burnchain_timestamp: u64, block_reward: Option<&MinerPaymentSchedule>, - mature_miner_payouts: Option<(MinerReward, Vec, MinerReward, MinerRewardInfo)>, // (miner, [users], parent, matured rewards) + mature_miner_payouts: Option<(MinerReward, MinerReward, MinerRewardInfo)>, // (miner, parent, matured rewards) anchor_block_cost: &ExecutionCost, total_tenure_cost: &ExecutionCost, - anchor_block_size: u64, + block_size: u64, applied_epoch_transition: bool, burn_stack_stx_ops: Vec, burn_transfer_stx_ops: Vec, @@ -828,10 +1149,36 @@ impl NakamotoChainState { tenure_changed: bool, block_fees: u128, ) -> Result { - if new_tip.parent != FIRST_STACKS_BLOCK_HASH { + if new_tip.parent_block_id + != StacksBlockHeader::make_index_block_hash( + &FIRST_BURNCHAIN_CONSENSUS_HASH, + &FIRST_STACKS_BLOCK_HASH, + ) + { // not the first-ever block, so linkage must occur - assert_eq!(new_tip.parent, parent_tip.block_hash()); - assert_eq!(&new_tip.parent_consensus_hash, parent_consensus_hash); + match parent_tip { + StacksBlockHeaderTypes::Epoch2(stacks_header) => { + // this is the first nakamoto block + assert_eq!(parent_tip.block_hash(), stacks_header.block_hash()); + assert_eq!( + new_tip.parent_block_id, + StacksBlockHeader::make_index_block_hash( + &parent_consensus_hash, + &parent_tip.block_hash() + ) + ); + } + StacksBlockHeaderTypes::Nakamoto(nakamoto_header) => { + // nakamoto blocks link to their parent via index block hashes + assert_eq!( + new_tip.parent_block_id, + StacksBlockHeader::make_index_block_hash( + &nakamoto_header.consensus_hash, + &parent_tip.block_hash() + ) + ); + } + } } assert_eq!( @@ -843,6 +1190,16 @@ impl NakamotoChainState { ); let parent_hash = StacksBlockId::new(parent_consensus_hash, &parent_tip.block_hash()); + assert_eq!( + parent_hash, + new_tip.parent_block_id, + "FATAL: parent_consensus_hash/parent_block_hash ({}/{}) {} != {}", + parent_consensus_hash, + &parent_tip.block_hash(), + &parent_hash, + &new_tip.parent_block_id + ); + let new_block_hash = new_tip.block_hash(); let index_block_hash = StacksBlockId::new(&new_tip.consensus_hash, &new_block_hash); @@ -861,7 +1218,7 @@ impl NakamotoChainState { burn_header_hash: new_burn_header_hash.clone(), burn_header_height: new_burnchain_height, burn_header_timestamp: new_burnchain_timestamp, - anchored_block_size: anchor_block_size, + anchored_block_size: block_size, }; let tenure_fees = block_fees @@ -880,7 +1237,6 @@ impl NakamotoChainState { Self::insert_stacks_block_header( headers_tx.deref_mut(), - &parent_hash, &new_tip_info, &new_tip, anchor_block_cost, @@ -904,8 +1260,7 @@ impl NakamotoChainState { burn_delegate_stx_ops, )?; - if let Some((miner_payout, user_payouts, parent_payout, reward_info)) = mature_miner_payouts - { + if let Some((miner_payout, parent_payout, reward_info)) = mature_miner_payouts { let rewarded_miner_block_id = StacksBlockId::new( &reward_info.from_block_consensus_hash, &reward_info.from_stacks_block_hash, @@ -921,14 +1276,6 @@ impl NakamotoChainState { &rewarded_miner_block_id, &miner_payout, )?; - for user_payout in user_payouts.into_iter() { - StacksChainState::insert_matured_child_user_reward( - headers_tx.deref_mut(), - &rewarded_parent_miner_block_id, - &rewarded_miner_block_id, - &user_payout, - )?; - } StacksChainState::insert_matured_parent_miner_reward( headers_tx.deref_mut(), &rewarded_parent_miner_block_id, @@ -958,15 +1305,15 @@ impl NakamotoChainState { /// Returns stx lockup events. pub fn finish_block( clarity_tx: &mut ClarityTx, - miner_payouts: Option<&(MinerReward, Vec, MinerReward, MinerRewardInfo)>, + miner_payouts: Option<&(MinerReward, MinerReward, MinerRewardInfo)>, ) -> Result, ChainstateError> { // add miner payments - if let Some((ref miner_reward, ref user_rewards, ref parent_reward, _)) = miner_payouts { + if let Some((ref miner_reward, ref parent_reward, _)) = miner_payouts { // grant in order by miner, then users let matured_ustx = StacksChainState::process_matured_miner_rewards( clarity_tx, miner_reward, - user_rewards, + &[], parent_reward, )?; @@ -987,27 +1334,28 @@ impl NakamotoChainState { /// microblock fees, microblock burns, list of microblock tx receipts, /// miner rewards tuples, the stacks epoch id, and a boolean that /// represents whether the epoch transition has been applied. - /// - /// The `burn_dbconn`, `sortition_dbconn`, and `conn` arguments - /// all reference the same sortition database through different - /// interfaces. `burn_dbconn` and `sortition_dbconn` should - /// reference the same object. The reason to provide both is that - /// `SortitionDBRef` captures trait functions that Clarity does - /// not need, and Rust does not support trait upcasting (even - /// though it would theoretically be safe). pub fn setup_block<'a, 'b>( + // Transaction against the chainstate chainstate_tx: &'b mut ChainstateTx, + // Clarity connection to the chainstate clarity_instance: &'a mut ClarityInstance, + // Reference to the sortition DB sortition_dbconn: &'b dyn SortitionDBRef, + // PoX constants for the system pox_constants: &PoxConstants, + // Stacks chain tip chain_tip: &StacksHeaderInfo, - burn_view: BurnchainHeaderHash, - burn_view_height: u32, + // Burnchain block hash and height of the tenure for this Stacks block + burn_header_hash: BurnchainHeaderHash, + burn_header_height: u32, + // Parent Stacks block's tenure's burnchain block hash and its consensus hash parent_consensus_hash: ConsensusHash, parent_header_hash: BlockHeaderHash, + // are we in mainnet or testnet? mainnet: bool, - miner_id_opt: Option, + // is this the start of a new tenure? tenure_changed: bool, + // What tenure height are we in? tenure_height: u64, ) -> Result, ChainstateError> { let parent_index_hash = StacksBlockId::new(&parent_consensus_hash, &parent_header_hash); @@ -1072,8 +1420,8 @@ impl NakamotoChainState { chainstate_tx, &parent_index_hash, sortition_dbconn.sqlite_conn(), - &burn_view, - burn_view_height.into(), + &burn_header_hash, + burn_header_height.into(), )?; let mut clarity_tx = StacksChainState::chainstate_block_begin( @@ -1097,17 +1445,16 @@ impl NakamotoChainState { ) }); let matured_miner_rewards_opt = match matured_miner_rewards_result { - Some(Ok(x)) => x, + Some(Ok(Some((miner, _user_burns, parent, reward_info)))) => { + Some((miner, parent, reward_info)) + } + Some(Ok(None)) => None, Some(Err(e)) => { - if miner_id_opt.is_some() { - return Err(e); - } else { - let msg = format!("Failed to load miner rewards: {:?}", &e); - warn!("{}", &msg); + let msg = format!("Failed to load miner rewards: {:?}", &e); + warn!("{}", &msg); - clarity_tx.rollback_block(); - return Err(ChainstateError::InvalidStacksBlock(msg)); - } + clarity_tx.rollback_block(); + return Err(ChainstateError::InvalidStacksBlock(msg)); } None => None, }; @@ -1131,7 +1478,7 @@ impl NakamotoChainState { // is this stacks block the first of a new epoch? let (applied_epoch_transition, mut tx_receipts) = - StacksChainState::process_epoch_transition(&mut clarity_tx, burn_view_height)?; + StacksChainState::process_epoch_transition(&mut clarity_tx, burn_header_height)?; debug!( "Setup block: Processed epoch transition at {}/{}", @@ -1143,7 +1490,7 @@ impl NakamotoChainState { let auto_unlock_events = if evaluated_epoch >= StacksEpochId::Epoch21 { let unlock_events = StacksChainState::check_and_handle_reward_start( - burn_view_height.into(), + burn_header_height.into(), sortition_dbconn.as_burn_state_db(), sortition_dbconn, &mut clarity_tx, @@ -1160,7 +1507,7 @@ impl NakamotoChainState { vec![] }; - let active_pox_contract = pox_constants.active_pox_contract(burn_view_height.into()); + let active_pox_contract = pox_constants.active_pox_contract(burn_header_height.into()); // process stacking & transfer operations from burnchain ops tx_receipts.extend(StacksChainState::process_stacking_ops( @@ -1216,6 +1563,7 @@ impl NakamotoChainState { }) } + /// Append a Nakamoto Stacks block to the Stacks chain state. fn append_block<'a>( chainstate_tx: &mut ChainstateTx, clarity_instance: &'a mut ClarityInstance, @@ -1237,7 +1585,6 @@ impl NakamotoChainState { ); let ast_rules = ASTRules::PrecheckSize; - let mainnet = chainstate_tx.get_config().mainnet; let next_block_height = block.header.chain_length; @@ -1253,25 +1600,40 @@ impl NakamotoChainState { ) }; - if parent_ch != block.header.parent_consensus_hash { + let parent_block_id = StacksChainState::get_index_hash(&parent_ch, &parent_block_hash); + if parent_block_id != block.header.parent_block_id { warn!("Error processing nakamoto block: Parent consensus hash does not match db view"; - "db_view" => %parent_ch, - "block_view" => %block.header.parent_consensus_hash); + "db.parent_block_id" => %parent_block_id, + "header.parent_block_id" => %block.header.parent_block_id); return Err(ChainstateError::InvalidStacksBlock( - "Parent consensus hash does not match".into(), + "Parent block does not match".into(), )); } - // check that the burnchain block that this block is associated with has been processed - let burn_view_hash = block.header.burn_view.clone(); + // check that the burnchain block that this block is associated with has been processed. + // N.B. we must first get its hash, and then verify that it's in the same Bitcoin fork as + // our `burn_dbconn` indicates. + let burn_header_hash = SortitionDB::get_burnchain_header_hash_by_consensus( + burn_dbconn, + &block.header.consensus_hash, + )? + .ok_or_else(|| { + warn!( + "Unrecognized consensus hash"; + "block_hash" => %block.header.block_hash(), + "consensus_hash" => %block.header.consensus_hash, + ); + ChainstateError::NoSuchBlockError + })?; + let sortition_tip = burn_dbconn.context.chain_tip.clone(); - let burn_view_height = burn_dbconn - .get_block_snapshot(&burn_view_hash, &sortition_tip)? + let burn_header_height = burn_dbconn + .get_block_snapshot(&burn_header_hash, &sortition_tip)? .ok_or_else(|| { warn!( "Tried to process Nakamoto block before its burn view was processed"; "block_hash" => block.header.block_hash(), - "burn_view" => %burn_view_hash, + "burn_header_hash" => %burn_header_hash, ); ChainstateError::NoSuchBlockError })? @@ -1279,8 +1641,6 @@ impl NakamotoChainState { let block_hash = block.header.block_hash(); - let parent_block_id = StacksChainState::get_index_hash(&parent_ch, &parent_block_hash); - let tenure_changed = block.tenure_changed(&parent_block_id); if !tenure_changed && (block.is_first_mined() || parent_ch != block.header.consensus_hash) { return Err(ChainstateError::ExpectedTenureChange); @@ -1322,14 +1682,13 @@ impl NakamotoChainState { burn_dbconn, pox_constants, &parent_chain_tip, - burn_view_hash, - burn_view_height.try_into().map_err(|_| { + burn_header_hash, + burn_header_height.try_into().map_err(|_| { ChainstateError::InvalidStacksBlock("Burn block height exceeded u32".into()) })?, parent_ch, parent_block_hash, mainnet, - None, tenure_changed, tenure_height, )?; @@ -1339,7 +1698,7 @@ impl NakamotoChainState { debug!( "Append nakamoto block"; "block" => format!("{}/{block_hash}", block.header.consensus_hash), - "parent_block" => format!("{parent_ch}/{parent_block_hash}"), + "parent_block" => %block.header.parent_block_id, "stacks_height" => next_block_height, "total_burns" => block.header.burn_spent, "evaluated_epoch" => %evaluated_epoch @@ -1372,22 +1731,18 @@ impl NakamotoChainState { // obtain reward info for receipt -- consolidate miner, user, and parent rewards into a // single list, but keep the miner/user/parent/info tuple for advancing the chain tip - let (matured_rewards, miner_payouts_opt) = if let Some(matured_miner_rewards) = - matured_miner_rewards_opt - { - let (miner_reward, mut user_rewards, parent_reward, reward_ptr) = matured_miner_rewards; - - let mut ret = vec![]; - ret.push(miner_reward.clone()); - ret.append(&mut user_rewards); - ret.push(parent_reward.clone()); - ( - ret, - Some((miner_reward, user_rewards, parent_reward, reward_ptr)), - ) - } else { - (vec![], None) - }; + // TODO: drop user burn support + let (matured_rewards, miner_payouts_opt) = + if let Some(matured_miner_rewards) = matured_miner_rewards_opt { + let (miner_reward, parent_reward, reward_ptr) = matured_miner_rewards; + + let mut ret = vec![]; + ret.push(miner_reward.clone()); + ret.push(parent_reward.clone()); + (ret, Some((miner_reward, parent_reward, reward_ptr))) + } else { + (vec![], None) + }; let mut lockup_events = match Self::finish_block(&mut clarity_tx, miner_payouts_opt.as_ref()) { @@ -1505,13 +1860,13 @@ impl NakamotoChainState { &parent_tenure_header.anchored_header.block_hash(), &parent_tenure_header.consensus_hash, &block_hash, + &block.header.consensus_hash, + next_block_height, block .get_coinbase_tx() .ok_or(ChainstateError::InvalidStacksBlock( "No coinbase transaction in tenure changing block".into(), ))?, - &block.header.consensus_hash, - next_block_height, parent_tenure_fees, burnchain_commit_burn, burnchain_sortition_burn, @@ -1523,9 +1878,7 @@ impl NakamotoChainState { None }; - let matured_rewards_info = miner_payouts_opt - .as_ref() - .map(|(_, _, _, info)| info.clone()); + let matured_rewards_info = miner_payouts_opt.as_ref().map(|(_, _, info)| info.clone()); let new_tip = Self::advance_tip( &mut chainstate_tx.tx, From 5c55d049c89e3d320f063595d91d0bc166b85533 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Mon, 23 Oct 2023 23:40:26 -0400 Subject: [PATCH 007/122] chore: update Nakamoto block tests to reflect new structs --- stackslib/src/chainstate/nakamoto/tests.rs | 32 +++++++++------------- 1 file changed, 13 insertions(+), 19 deletions(-) diff --git a/stackslib/src/chainstate/nakamoto/tests.rs b/stackslib/src/chainstate/nakamoto/tests.rs index ac3a86acb9..ad6575d648 100644 --- a/stackslib/src/chainstate/nakamoto/tests.rs +++ b/stackslib/src/chainstate/nakamoto/tests.rs @@ -134,8 +134,6 @@ pub fn nakamoto_advance_tip_simple() { version: 100, chain_length: 1, burn_spent: 5, - parent: FIRST_STACKS_BLOCK_HASH, - burn_view: tip.burn_header_hash.clone(), tx_merkle_root: Sha512Trunc256Sum([0; 32]), state_index_root: TrieHash::from_hex( "9f283c59142dec747911897fc120f1d2af8c0384830a95e1847803ee31a70ab1", @@ -143,8 +141,8 @@ pub fn nakamoto_advance_tip_simple() { .unwrap(), stacker_signature: MessageSignature([0; 65]), miner_signature: MessageSignature([0; 65]), - consensus_hash: ConsensusHash([0; 20]), - parent_consensus_hash: FIRST_BURNCHAIN_CONSENSUS_HASH, + consensus_hash: tip.consensus_hash.clone(), + parent_block_id: parent_block_id.clone(), }, txs: vec![coinbase_tx, tenure_tx], }; @@ -289,14 +287,12 @@ pub fn staging_blocks() { version: 100, chain_length: 1, burn_spent: 10, - parent: BlockHeaderHash([1; 32]), - burn_view: BurnchainHeaderHash([1; 32]), tx_merkle_root: Sha512Trunc256Sum([0; 32]), state_index_root: TrieHash([0; 32]), stacker_signature: MessageSignature([0; 65]), miner_signature: MessageSignature([0; 65]), consensus_hash: ConsensusHash([2; 20]), - parent_consensus_hash: ConsensusHash([1; 20]), + parent_block_id: StacksBlockId([1; 32]), }, txs: vec![], }; @@ -318,20 +314,20 @@ pub fn staging_blocks() { let sortdb_conn = sort_db.index_handle_at_tip(); assert!( - NakamotoChainState::next_ready_block(&chainstate_tx) + NakamotoChainState::next_ready_nakamoto_block(&chainstate_tx) .unwrap() .is_none(), "No block should be ready yet", ); - let block_parent_id = - StacksBlockId::new(&block.header.parent_consensus_hash, &block.header.parent); + let block_parent_id = block.header.parent_block_id.clone(); NakamotoChainState::set_block_processed(&chainstate_tx, &block_parent_id).unwrap(); // block should be ready -- the burn view was processed before the block was inserted. - let ready_block = NakamotoChainState::next_ready_block(&chainstate_tx) + let ready_block = NakamotoChainState::next_ready_nakamoto_block(&chainstate_tx) .unwrap() - .unwrap(); + .unwrap() + .0; assert_eq!(ready_block.header.block_hash(), block.header.block_hash()); @@ -388,10 +384,10 @@ pub fn nakamoto_advance_tip_multiple() { let mut last_block: Option = None; let index_roots = [ "c76d48e971b2ea3c78c476486455090da37df260a41eef355d4e9330faf166c0", - "443403486d617e96e44aa6ff6056e575a7d29fd02a987452502e20c98929fe20", - "1c078414b996a42eabd7fc0b731d8ac49a74141313bdfbe4166349c3d1d27946", - "69cafb50ad1debcd0dee83d58b1a06060a5dd9597ec153e6129edd80c4368226", - "449f086937fda06db5859ce69c2c6bdd7d4d104bf4a6d2745bc81a17391daf36", + "20185974f1ab02d25c6920d755594ff9c104f70ae185aa8c112245eaef0078fd", + "a079309c45f5cb70be6f67cd442d50ba6c2154d77b819321a63e4ed077e46e59", + "1679af6d97e102a5762e88a876e74c0083ffb492f98bde249a36a6f53b81a2ad", + "5c989f8cbdfe054b3b8c1c2a4667e97d4f43b2eef6caffe569a61598e1492b04", ]; for i in 1..6 { @@ -483,14 +479,12 @@ pub fn nakamoto_advance_tip_multiple() { version: 100, chain_length: i.into(), burn_spent: 10, - parent, - burn_view: parent_snapshot.burn_header_hash.clone(), tx_merkle_root: Sha512Trunc256Sum([0; 32]), state_index_root: TrieHash::from_hex(&index_roots[usize::from(i) - 1]).unwrap(), stacker_signature: MessageSignature([0; 65]), miner_signature: MessageSignature([0; 65]), consensus_hash: new_ch, - parent_consensus_hash: parent_snapshot.consensus_hash.clone(), + parent_block_id: StacksBlockId::new(&parent_snapshot.consensus_hash, &parent), }, txs: vec![coinbase_tx, transacter_tx, tenure_tx], }; From 0eed15a1c2fdf7357b820c6228a3070de6870615 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Mon, 23 Oct 2023 23:40:49 -0400 Subject: [PATCH 008/122] feat: is_first_index_block_hash --- stackslib/src/chainstate/stacks/block.rs | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/stackslib/src/chainstate/stacks/block.rs b/stackslib/src/chainstate/stacks/block.rs index a3b9324c5a..23346c9290 100644 --- a/stackslib/src/chainstate/stacks/block.rs +++ b/stackslib/src/chainstate/stacks/block.rs @@ -108,6 +108,15 @@ impl StacksBlockHeader { *to_check == FIRST_STACKS_BLOCK_HASH } + /// Is this the first-ever index block hash? + pub fn is_first_index_block_hash(to_check: &StacksBlockId) -> bool { + to_check + == &StacksBlockHeader::make_index_block_hash( + &FIRST_BURNCHAIN_CONSENSUS_HASH, + &FIRST_STACKS_BLOCK_HASH, + ) + } + /// Is this a first-mined block header? i.e. builds off of the boot code? pub fn is_first_mined(&self) -> bool { Self::is_first_block_hash(&self.parent_block) From 18de40061fe41f3100b0efb09962c67f7950244b Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Mon, 23 Oct 2023 23:41:06 -0400 Subject: [PATCH 009/122] feat: remove get_stacks_chain_tip() in favor of new Nakamoto-aware get_canonical_block_header() --- stackslib/src/chainstate/stacks/db/blocks.rs | 43 +++++++------------- 1 file changed, 14 insertions(+), 29 deletions(-) diff --git a/stackslib/src/chainstate/stacks/db/blocks.rs b/stackslib/src/chainstate/stacks/db/blocks.rs index b2857231ae..106ce34c91 100644 --- a/stackslib/src/chainstate/stacks/db/blocks.rs +++ b/stackslib/src/chainstate/stacks/db/blocks.rs @@ -60,11 +60,13 @@ use crate::chainstate::burn::db::sortdb::*; use crate::chainstate::burn::operations::*; use crate::chainstate::burn::BlockSnapshot; use crate::chainstate::coordinator::BlockEventDispatcher; +use crate::chainstate::nakamoto::NakamotoChainState; use crate::chainstate::stacks::address::PoxAddress; use crate::chainstate::stacks::address::StacksAddressExtensions; use crate::chainstate::stacks::db::accounts::MinerReward; use crate::chainstate::stacks::db::transactions::TransactionNonceMismatch; use crate::chainstate::stacks::db::*; +use crate::chainstate::stacks::events::StacksBlockEventData; use crate::chainstate::stacks::index::MarfTrieId; use crate::chainstate::stacks::Error; use crate::chainstate::stacks::StacksBlockHeader; @@ -194,7 +196,7 @@ pub struct DummyEventDispatcher; impl BlockEventDispatcher for DummyEventDispatcher { fn announce_block( &self, - _block: &StacksBlock, + _block: StacksBlockEventData, _metadata: &StacksHeaderInfo, _receipts: &[StacksTransactionReceipt], _parent: &StacksBlockId, @@ -6528,7 +6530,7 @@ impl StacksChainState { &next_staging_block.parent_anchored_block_hash, ); dispatcher.announce_block( - &block, + block.into(), &epoch_receipt.header.clone(), &epoch_receipt.tx_receipts, &parent_id, @@ -6676,21 +6678,6 @@ impl StacksChainState { } } - /// Get the highest processed block on the canonical burn chain. - /// Break ties on lexigraphical ordering of the block hash - /// (i.e. arbitrarily). The staging block will be returned, but no block data will be filled - /// in. - pub fn get_stacks_chain_tip( - &self, - sortdb: &SortitionDB, - ) -> Result, Error> { - let (consensus_hash, block_bhh) = - SortitionDB::get_canonical_stacks_chain_tip_hash(sortdb.conn())?; - let sql = "SELECT * FROM staging_blocks WHERE processed = 1 AND orphaned = 0 AND consensus_hash = ?1 AND anchored_block_hash = ?2"; - let args: &[&dyn ToSql] = &[&consensus_hash, &block_bhh]; - query_row(&self.db(), sql, args).map_err(Error::DBError) - } - /// Get the parent block of `staging_block`. pub fn get_stacks_block_parent( &self, @@ -11414,12 +11401,11 @@ pub mod test { let sortdb = peer.sortdb.take().unwrap(); // definitely missing some blocks -- there are empty sortitions - let stacks_tip = peer - .chainstate() - .get_stacks_chain_tip(&sortdb) - .unwrap() - .unwrap(); - assert_eq!(stacks_tip.height, 8); + let stacks_tip = + NakamotoChainState::get_canonical_block_header(peer.chainstate().db(), &sortdb) + .unwrap() + .unwrap(); + assert_eq!(stacks_tip.anchored_header.height(), 8); // but we did process all burnchain operations let (consensus_hash, block_bhh) = @@ -12085,12 +12071,11 @@ pub mod test { let sortdb = peer.sortdb.take().unwrap(); // definitely missing some blocks -- there are empty sortitions - let stacks_tip = peer - .chainstate() - .get_stacks_chain_tip(&sortdb) - .unwrap() - .unwrap(); - assert_eq!(stacks_tip.height, 13); + let stacks_tip = + NakamotoChainState::get_canonical_block_header(peer.chainstate().db(), &sortdb) + .unwrap() + .unwrap(); + assert_eq!(stacks_tip.anchored_header.height(), 13); // but we did process all burnchain operations let (consensus_hash, block_bhh) = From defd3162bd1ae594243e7dcdc3fe84f0ada15d14 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Mon, 23 Oct 2023 23:41:36 -0400 Subject: [PATCH 010/122] chore: document StacksHeaderInfo fields, and add accessor for nakamoto state --- stackslib/src/chainstate/stacks/db/mod.rs | 24 +++++++++++++++++------ 1 file changed, 18 insertions(+), 6 deletions(-) diff --git a/stackslib/src/chainstate/stacks/db/mod.rs b/stackslib/src/chainstate/stacks/db/mod.rs index 3a90a269e9..caeaafa336 100644 --- a/stackslib/src/chainstate/stacks/db/mod.rs +++ b/stackslib/src/chainstate/stacks/db/mod.rs @@ -188,14 +188,23 @@ impl From for StacksBlockHeaderTypes { #[derive(Debug, Clone, PartialEq)] pub struct StacksHeaderInfo { + /// Stacks block header pub anchored_header: StacksBlockHeaderTypes, + /// Last microblock header (Stacks 2.x only; this is None in Stacks 3.x) pub microblock_tail: Option, + /// Height of this Stacks block pub stacks_block_height: u64, + /// MARF root hash of the headers DB (not consensus critical) pub index_root: TrieHash, + /// consensus hash of the burnchain block in which this miner was selected to produce this block pub consensus_hash: ConsensusHash, + /// Hash of the burnchain block in which this miner was selected to produce this block pub burn_header_hash: BurnchainHeaderHash, + /// Height of the burnchain block pub burn_header_height: u32, + /// Timestamp of the burnchain block pub burn_header_timestamp: u64, + /// Size of the block corresponding to `anchored_header` in bytes pub anchored_block_size: u64, } @@ -264,13 +273,9 @@ impl StacksBlockHeaderTypes { } pub fn is_first_mined(&self) -> bool { - StacksBlockHeader::is_first_block_hash(self.parent()) - } - - pub fn parent(&self) -> &BlockHeaderHash { match self { - StacksBlockHeaderTypes::Epoch2(x) => &x.parent_block, - StacksBlockHeaderTypes::Nakamoto(x) => &x.parent, + StacksBlockHeaderTypes::Epoch2(x) => x.is_first_mined(), + StacksBlockHeaderTypes::Nakamoto(x) => x.is_first_mined(), } } @@ -287,6 +292,13 @@ impl StacksBlockHeaderTypes { _ => None, } } + + pub fn as_stacks_nakamoto(&self) -> Option<&NakamotoBlockHeader> { + match &self { + StacksBlockHeaderTypes::Nakamoto(ref x) => Some(x), + _ => None, + } + } } impl StacksHeaderInfo { From 489f385ccde1bf9fc23d75fc651a1f39056f69b3 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Mon, 23 Oct 2023 23:42:00 -0400 Subject: [PATCH 011/122] feat: capture event observer data in StacksBlockEventData, which is compatible with both StacksBlock and NakamotoBlock --- stackslib/src/chainstate/stacks/events.rs | 33 +++++++++++++++++++++++ 1 file changed, 33 insertions(+) diff --git a/stackslib/src/chainstate/stacks/events.rs b/stackslib/src/chainstate/stacks/events.rs index 8b138c4b37..77358da4b2 100644 --- a/stackslib/src/chainstate/stacks/events.rs +++ b/stackslib/src/chainstate/stacks/events.rs @@ -1,5 +1,7 @@ use crate::burnchains::Txid; use crate::chainstate::burn::operations::BlockstackOperationType; +use crate::chainstate::nakamoto::NakamotoBlock; +use crate::chainstate::stacks::StacksBlock; use crate::chainstate::stacks::StacksMicroblockHeader; use crate::chainstate::stacks::StacksTransaction; use clarity::vm::analysis::ContractAnalysis; @@ -9,6 +11,7 @@ use clarity::vm::types::{ AssetIdentifier, PrincipalData, QualifiedContractIdentifier, StandardPrincipalData, Value, }; use stacks_common::codec::StacksMessageCodec; +use stacks_common::types::chainstate::BlockHeaderHash; use stacks_common::types::chainstate::StacksAddress; use stacks_common::util::hash::to_hex; @@ -55,3 +58,33 @@ pub struct StacksTransactionReceipt { /// This is really a string-formatted CheckError (which can't be clone()'ed) pub vm_error: Option, } + +#[derive(Clone)] +pub struct StacksBlockEventData { + pub block_hash: BlockHeaderHash, + pub parent_block_hash: BlockHeaderHash, + pub parent_microblock_hash: BlockHeaderHash, + pub parent_microblock_sequence: u16, +} + +impl From for StacksBlockEventData { + fn from(block: StacksBlock) -> StacksBlockEventData { + StacksBlockEventData { + block_hash: block.block_hash(), + parent_block_hash: block.header.parent_block, + parent_microblock_hash: block.header.parent_microblock, + parent_microblock_sequence: block.header.parent_microblock_sequence, + } + } +} + +impl From<(NakamotoBlock, BlockHeaderHash)> for StacksBlockEventData { + fn from(block: (NakamotoBlock, BlockHeaderHash)) -> StacksBlockEventData { + StacksBlockEventData { + block_hash: block.0.header.block_hash(), + parent_block_hash: block.1, + parent_microblock_hash: BlockHeaderHash([0u8; 32]), + parent_microblock_sequence: 0, + } + } +} From 28eedabf4fb0afe0bd1489040aaffb3106871901 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Mon, 23 Oct 2023 23:42:29 -0400 Subject: [PATCH 012/122] chore: remove compiler warning --- stackslib/src/chainstate/stacks/index/storage.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stackslib/src/chainstate/stacks/index/storage.rs b/stackslib/src/chainstate/stacks/index/storage.rs index 78330b78f8..01da9b9804 100644 --- a/stackslib/src/chainstate/stacks/index/storage.rs +++ b/stackslib/src/chainstate/stacks/index/storage.rs @@ -873,7 +873,7 @@ impl TrieRAM { for j in 0..node_data.len() { let next_node = &mut self.data[node_data[j] as usize].0; if !next_node.is_leaf() { - let mut ptrs = next_node.ptrs_mut(); + let ptrs = next_node.ptrs_mut(); let num_children = ptrs.len(); for k in 0..num_children { if ptrs[k].id != TrieNodeID::Empty as u8 && !is_backptr(ptrs[k].id) { From 0337f1ee899b887c0e523afc3a5ca302218dbed7 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Mon, 23 Oct 2023 23:42:41 -0400 Subject: [PATCH 013/122] chore: remove nakamoto acceptance period --- stackslib/src/core/mod.rs | 4 ---- 1 file changed, 4 deletions(-) diff --git a/stackslib/src/core/mod.rs b/stackslib/src/core/mod.rs index 102fedf7d4..59b184cd4b 100644 --- a/stackslib/src/core/mod.rs +++ b/stackslib/src/core/mod.rs @@ -63,10 +63,6 @@ pub const PEER_VERSION_EPOCH_2_2: u8 = 0x07; pub const PEER_VERSION_EPOCH_2_3: u8 = 0x08; pub const PEER_VERSION_EPOCH_2_4: u8 = 0x09; -/// How many tenures back from the chain tip does the node accept -/// blocks from a miner. -pub const NAKAMOTO_TENURE_BLOCK_ACCEPTANCE_PERIOD: usize = 3; - // this should be updated to the latest network epoch version supported by // this node. this will be checked by the `validate_epochs()` method. pub const PEER_NETWORK_EPOCH: u32 = PEER_VERSION_EPOCH_2_4 as u32; From 1895e4557899dc30599a650dffda6dfd51b22c65 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Mon, 23 Oct 2023 23:42:54 -0400 Subject: [PATCH 014/122] chore: remove `analyze-fees` directive (since it's only compatible with epoch2) and make the remaining directives compatible with epoch3 --- stackslib/src/main.rs | 236 ++++++++---------------------------------- 1 file changed, 46 insertions(+), 190 deletions(-) diff --git a/stackslib/src/main.rs b/stackslib/src/main.rs index c7e7af0298..c56b70dec7 100644 --- a/stackslib/src/main.rs +++ b/stackslib/src/main.rs @@ -46,9 +46,11 @@ use blockstack_lib::burnchains::Burnchain; use blockstack_lib::burnchains::Txid; use blockstack_lib::burnchains::BLOCKSTACK_MAGIC_MAINNET; use blockstack_lib::chainstate::burn::ConsensusHash; +use blockstack_lib::chainstate::nakamoto::NakamotoChainState; use blockstack_lib::chainstate::stacks::db::blocks::DummyEventDispatcher; use blockstack_lib::chainstate::stacks::db::blocks::StagingBlock; use blockstack_lib::chainstate::stacks::db::ChainStateBootData; +use blockstack_lib::chainstate::stacks::db::StacksBlockHeaderTypes; use blockstack_lib::chainstate::stacks::index::marf::MARFOpenOpts; use blockstack_lib::chainstate::stacks::index::marf::MarfConnection; use blockstack_lib::chainstate::stacks::index::marf::MARF; @@ -327,178 +329,6 @@ fn main() { process::exit(0); } - if argv[1] == "analyze-fees" { - if argv.len() < 4 { - eprintln!("Usage: {} analyze-fees CHAIN_STATE_DIR NUM_BLOCKS", argv[0]); - process::exit(1); - } - - let chain_state_path = format!("{}/mainnet/chainstate/", &argv[2]); - let sort_db_path = format!("{}/mainnet/burnchain/sortition", &argv[2]); - let (chainstate, _) = - StacksChainState::open(true, CHAIN_ID_MAINNET, &chain_state_path, None).unwrap(); - let sort_db = SortitionDB::open(&sort_db_path, false, PoxConstants::mainnet_default()) - .expect(&format!("Failed to open {}", &sort_db_path)); - - let num_blocks = argv[3].parse::().unwrap(); - - let mut block_info = chainstate - .get_stacks_chain_tip(&sort_db) - .unwrap() - .expect("FATAL: no chain tip"); - block_info.block_data = StacksChainState::load_block_bytes( - &chainstate.blocks_path, - &block_info.consensus_hash, - &block_info.anchored_block_hash, - ) - .unwrap() - .expect("No such block"); - - let mut tx_fees = HashMap::new(); - let mut tx_mined_heights = HashMap::new(); - let mut tx_mined_deltas: HashMap> = HashMap::new(); - - for _i in 0..num_blocks { - let block_hash = StacksBlockHeader::make_index_block_hash( - &block_info.consensus_hash, - &block_info.anchored_block_hash, - ); - debug!("Consider block {} ({} of {})", &block_hash, _i, num_blocks); - - let block = - StacksBlock::consensus_deserialize(&mut io::Cursor::new(&block_info.block_data)) - .map_err(|_e| { - eprintln!("Failed to decode block {}", &block_hash); - process::exit(1); - }) - .unwrap(); - - let microblocks = - StacksChainState::find_parent_microblock_stream(chainstate.db(), &block_info) - .unwrap() - .unwrap_or(vec![]); - - let mut txids_at_height = vec![]; - - for mblock in microblocks.iter() { - for tx in mblock.txs.iter() { - tx_fees.insert(tx.txid(), tx.get_tx_fee()); - txids_at_height.push(tx.txid()); - } - } - - for tx in block.txs.iter() { - if tx.get_tx_fee() > 0 { - // not a coinbase - tx_fees.insert(tx.txid(), tx.get_tx_fee()); - txids_at_height.push(tx.txid()); - } - } - - tx_mined_heights.insert(block_info.height, txids_at_height); - - // next block - block_info = match StacksChainState::load_staging_block_info( - chainstate.db(), - &StacksBlockHeader::make_index_block_hash( - &block_info.parent_consensus_hash, - &block_info.parent_anchored_block_hash, - ), - ) - .unwrap() - { - Some(blk) => blk, - None => { - break; - } - }; - block_info.block_data = StacksChainState::load_block_bytes( - &chainstate.blocks_path, - &block_info.consensus_hash, - &block_info.anchored_block_hash, - ) - .unwrap() - .expect("No such block"); - } - - let estimator = Box::new(UnitEstimator); - let metric = Box::new(UnitMetric); - let mempool_db = - MemPoolDB::open(true, CHAIN_ID_MAINNET, &chain_state_path, estimator, metric) - .expect("Failed to open mempool db"); - - let mut total_txs = 0; - for (_, txids) in tx_mined_heights.iter() { - total_txs += txids.len(); - } - - let mut tx_cnt = 0; - for (mined_height, txids) in tx_mined_heights.iter() { - for txid in txids.iter() { - tx_cnt += 1; - if tx_cnt % 100 == 0 { - debug!("Check tx {} of {}", tx_cnt, total_txs); - } - - if let Some(txinfo) = MemPoolDB::get_tx(&mempool_db.db, txid).unwrap() { - let delta = mined_height.saturating_sub(txinfo.metadata.block_height); - if let Some(txids_at_delta) = tx_mined_deltas.get_mut(&delta) { - txids_at_delta.push(txid.clone()); - } else { - tx_mined_deltas.insert(delta, vec![txid.clone()]); - } - } - } - } - - let mut deltas: Vec<_> = tx_mined_deltas.keys().collect(); - deltas.sort(); - - let mut reports = vec![]; - for delta in deltas { - let mut delta_tx_fees = vec![]; - let empty_txids = vec![]; - let txids = tx_mined_deltas.get(&delta).unwrap_or(&empty_txids); - if txids.len() == 0 { - continue; - } - for txid in txids.iter() { - delta_tx_fees.push(*tx_fees.get(txid).unwrap_or(&0)); - } - delta_tx_fees.sort(); - let total_tx_fees = delta_tx_fees.iter().fold(0, |acc, x| acc + x); - - let avg_tx_fee = if delta_tx_fees.len() > 0 { - total_tx_fees / (delta_tx_fees.len() as u64) - } else { - 0 - }; - let min_tx_fee = *delta_tx_fees.iter().min().unwrap_or(&0); - let median_tx_fee = delta_tx_fees[delta_tx_fees.len() / 2]; - let percent_90_tx_fee = delta_tx_fees[(delta_tx_fees.len() * 90) / 100]; - let percent_95_tx_fee = delta_tx_fees[(delta_tx_fees.len() * 95) / 100]; - let percent_99_tx_fee = delta_tx_fees[(delta_tx_fees.len() * 99) / 100]; - let max_tx_fee = *delta_tx_fees.iter().max().unwrap_or(&0); - - reports.push(json!({ - "delta": format!("{}", delta), - "tx_total": format!("{}", delta_tx_fees.len()), - "tx_fees": json!({ - "avg": format!("{}", avg_tx_fee), - "min": format!("{}", min_tx_fee), - "max": format!("{}", max_tx_fee), - "p50": format!("{}", median_tx_fee), - "p90": format!("{}", percent_90_tx_fee), - "p95": format!("{}", percent_95_tx_fee), - "p99": format!("{}", percent_99_tx_fee), - }), - })); - } - - println!("{}", serde_json::Value::Array(reports).to_string()); - process::exit(0); - } - if argv[1] == "get-block-inventory" { if argv.len() < 3 { eprintln!( @@ -757,11 +587,13 @@ simulating a miner. let mut mempool_db = MemPoolDB::open(true, chain_id, &chain_state_path, estimator, metric) .expect("Failed to open mempool db"); - let stacks_block = chain_state.get_stacks_chain_tip(&sort_db).unwrap().unwrap(); + let header_tip = NakamotoChainState::get_canonical_block_header(chain_state.db(), &sort_db) + .unwrap() + .unwrap(); let parent_header = StacksChainState::get_anchored_block_header_info( chain_state.db(), - &stacks_block.consensus_hash, - &stacks_block.anchored_block_hash, + &header_tip.consensus_hash, + &header_tip.anchored_header.block_hash(), ) .expect("Failed to load chain tip header info") .expect("Failed to load chain tip header info"); @@ -1458,27 +1290,49 @@ simulating a miner. tx.commit().unwrap(); } - let stacks_chain_tip = chain_state.get_stacks_chain_tip(&sort_db).unwrap().unwrap(); + let header_tip = NakamotoChainState::get_canonical_block_header(chain_state.db(), &sort_db) + .unwrap() + .unwrap(); // Find ancestor block - let mut stacks_block = stacks_chain_tip.to_owned(); + let mut stacks_header = header_tip.to_owned(); loop { - let stacks_parent_block = chain_state - .get_stacks_block_parent(&stacks_block) - .unwrap() - .unwrap(); - if stacks_parent_block.height < mine_tip_height { + let parent_block_id = match stacks_header.anchored_header { + StacksBlockHeaderTypes::Nakamoto(ref nakamoto_header) => { + nakamoto_header.parent_block_id.clone() + } + StacksBlockHeaderTypes::Epoch2(ref epoch2_header) => { + let block_info = StacksChainState::load_staging_block( + chain_state.db(), + &chain_state.blocks_path, + &stacks_header.consensus_hash, + &epoch2_header.block_hash(), + ) + .unwrap() + .unwrap(); + StacksBlockId::new( + &block_info.parent_consensus_hash, + &epoch2_header.parent_block, + ) + } + }; + + let stacks_parent_header = + NakamotoChainState::get_block_header(chain_state.db(), &parent_block_id) + .unwrap() + .unwrap(); + if stacks_parent_header.anchored_header.height() < mine_tip_height { break; } - stacks_block = stacks_parent_block; + stacks_header = stacks_parent_header; } info!( "Found stacks_chain_tip with height {}", - stacks_chain_tip.height + header_tip.anchored_header.height() ); info!( "Mining off parent block with height {}", - stacks_block.height + header_tip.anchored_header.height() ); info!( @@ -1522,8 +1376,8 @@ simulating a miner. let result = mempool_db.submit( &mut chain_state, &sort_db, - &stacks_block.consensus_hash, - &stacks_block.anchored_block_hash, + &stacks_header.consensus_hash, + &stacks_header.anchored_header.block_hash(), &raw_tx, None, &ExecutionCost::max_value(), @@ -1549,10 +1403,12 @@ simulating a miner. let start = get_epoch_time_ms(); - let parent_header = StacksChainState::get_anchored_block_header_info( + let parent_header = NakamotoChainState::get_block_header( chain_state.db(), - &stacks_block.consensus_hash, - &stacks_block.anchored_block_hash, + &StacksBlockId::new( + &stacks_header.consensus_hash, + &stacks_header.anchored_header.block_hash(), + ), ) .expect("Failed to load chain tip header info") .expect("Failed to load chain tip header info"); From 30331a7c71f7f423b7a4e886211dcad49025a915 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Mon, 23 Oct 2023 23:43:23 -0400 Subject: [PATCH 015/122] chore: use get_canonical_block_header --- stackslib/src/net/download.rs | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/stackslib/src/net/download.rs b/stackslib/src/net/download.rs index 32eaa1882f..32dec4e1d5 100644 --- a/stackslib/src/net/download.rs +++ b/stackslib/src/net/download.rs @@ -47,6 +47,7 @@ use crate::burnchains::Burnchain; use crate::burnchains::BurnchainView; use crate::chainstate::burn::db::sortdb::{BlockHeaderCache, SortitionDB, SortitionDBConn}; use crate::chainstate::burn::BlockSnapshot; +use crate::chainstate::nakamoto::NakamotoChainState; use crate::chainstate::stacks::db::StacksChainState; use crate::chainstate::stacks::Error as chainstate_error; use crate::chainstate::stacks::StacksBlockHeader; @@ -3294,18 +3295,20 @@ pub mod test { >| { let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); - let stacks_tip_opt = chainstate.get_stacks_chain_tip(sortdb).unwrap(); + let stacks_tip_opt = + NakamotoChainState::get_canonical_block_header(chainstate.db(), sortdb) + .unwrap(); let parent_tip = match stacks_tip_opt { None => { StacksChainState::get_genesis_header_info(chainstate.db()).unwrap() } - Some(staging_block) => { + Some(header) => { let ic = sortdb.index_conn(); let snapshot = SortitionDB::get_block_snapshot_for_winning_stacks_block( &ic, &tip.sortition_id, - &staging_block.anchored_block_hash, + &header.anchored_header.block_hash(), ) .unwrap() .unwrap(); // succeeds because we don't fork From e47ab7569f325d76c7b1ad176fead0b50a6348aa Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Tue, 24 Oct 2023 00:06:17 -0400 Subject: [PATCH 016/122] chore: use get_canonical_block_header() and document test methods --- stackslib/src/net/mod.rs | 62 +++++++++++++++++++++++++++++----------- 1 file changed, 46 insertions(+), 16 deletions(-) diff --git a/stackslib/src/net/mod.rs b/stackslib/src/net/mod.rs index fe01ca1c75..051b8c722b 100644 --- a/stackslib/src/net/mod.rs +++ b/stackslib/src/net/mod.rs @@ -83,6 +83,7 @@ use crate::chainstate::burn::operations::PegOutFulfillOp; use crate::chainstate::burn::operations::PegOutRequestOp; use crate::chainstate::burn::{ConsensusHash, Opcodes}; use crate::chainstate::coordinator::Error as coordinator_error; +use crate::chainstate::nakamoto::NakamotoChainState; use crate::chainstate::stacks::db::blocks::MemPoolRejection; use crate::chainstate::stacks::index::Error as marf_error; use crate::chainstate::stacks::Error as chainstate_error; @@ -2432,7 +2433,9 @@ pub mod test { use crate::chainstate::stacks::tests::*; use crate::chainstate::stacks::StacksMicroblockHeader; use crate::chainstate::stacks::*; - use crate::chainstate::stacks::{db::accounts::MinerReward, events::StacksTransactionReceipt}; + use crate::chainstate::stacks::{ + db::accounts::MinerReward, events::StacksBlockEventData, events::StacksTransactionReceipt, + }; use crate::chainstate::*; use crate::core::StacksEpoch; use crate::core::StacksEpochExtension; @@ -2653,7 +2656,7 @@ pub mod test { #[derive(Clone)] pub struct TestEventObserverBlock { - pub block: StacksBlock, + pub block: StacksBlockEventData, pub metadata: StacksHeaderInfo, pub receipts: Vec, pub parent: StacksBlockId, @@ -2681,7 +2684,7 @@ pub mod test { impl BlockEventDispatcher for TestEventObserver { fn announce_block( &self, - block: &StacksBlock, + block: StacksBlockEventData, metadata: &StacksHeaderInfo, receipts: &[events::StacksTransactionReceipt], parent: &StacksBlockId, @@ -3330,12 +3333,13 @@ pub mod test { let burn_tip_height = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()) .unwrap() .block_height; - let stacks_tip_height = stacks_node - .chainstate - .get_stacks_chain_tip(&sortdb) - .unwrap() - .map(|blkdat| blkdat.height) - .unwrap_or(0); + let stacks_tip_height = NakamotoChainState::get_canonical_block_header( + stacks_node.chainstate.db(), + &sortdb, + ) + .unwrap() + .map(|hdr| hdr.anchored_header.height()) + .unwrap_or(0); let ibd = TestPeer::infer_initial_burnchain_block_download( &self.config.burnchain, stacks_tip_height, @@ -3382,12 +3386,13 @@ pub mod test { let burn_tip_height = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()) .unwrap() .block_height; - let stacks_tip_height = stacks_node - .chainstate - .get_stacks_chain_tip(&sortdb) - .unwrap() - .map(|blkdat| blkdat.height) - .unwrap_or(0); + let stacks_tip_height = NakamotoChainState::get_canonical_block_header( + stacks_node.chainstate.db(), + &sortdb, + ) + .unwrap() + .map(|hdr| hdr.anchored_header.height()) + .unwrap_or(0); let ibd = TestPeer::infer_initial_burnchain_block_download( &self.config.burnchain, stacks_tip_height, @@ -3492,6 +3497,18 @@ pub mod test { } } + /// Generate and commit the next burnchain block with the given block operations. + /// * if `set_consensus_hash` is true, then each op's consensus_hash field will be set to + /// that of the resulting block snapshot. + /// * if `set_burn_hash` is true, then each op's burnchain header hash field will be set to + /// that of the resulting block snapshot. + /// + /// Returns ( + /// burnchain tip block height, + /// burnchain tip block hash, + /// burnchain tip consensus hash, + /// Option + /// ) fn inner_next_burnchain_block( &mut self, mut blockstack_ops: Vec, @@ -3562,6 +3579,7 @@ pub mod test { ) .unwrap(); + // NOTE: this is harmless in the Nakamoto epoch, but it will never be read Burnchain::process_affirmation_maps( &self.config.burnchain, &mut burnchain_db, @@ -3600,6 +3618,8 @@ pub mod test { ) } + /// Pre-process an epoch 2.x Stacks block. + /// Validate it and store it to staging. pub fn preprocess_stacks_block(&mut self, block: &StacksBlock) -> Result { let sortdb = self.sortdb.take().unwrap(); let mut node = self.stacks_node.take().unwrap(); @@ -3664,6 +3684,8 @@ pub mod test { res } + /// Preprocess epoch 2.x microblocks. + /// Validate them and store them to staging. pub fn preprocess_stacks_microblocks( &mut self, microblocks: &Vec, @@ -3714,6 +3736,8 @@ pub mod test { res } + /// Store the given epoch 2.x Stacks block and microblock to staging, and then try and + /// process them. pub fn process_stacks_epoch_at_tip( &mut self, block: &StacksBlock, @@ -3747,6 +3771,8 @@ pub mod test { self.stacks_node = Some(node); } + /// Store the given epoch 2.x Stacks block and microblock to the given node's staging, + /// using the given sortition DB as well, and then try and process them. fn inner_process_stacks_epoch_at_tip( &mut self, sortdb: &SortitionDB, @@ -3777,6 +3803,8 @@ pub mod test { Ok(()) } + /// Store the given epoch 2.x Stacks block and microblock to the given node's staging, + /// and then try and process them. pub fn process_stacks_epoch_at_tip_checked( &mut self, block: &StacksBlock, @@ -3791,6 +3819,8 @@ pub mod test { res } + /// Accept a new Stacks block and microblocks via the relayer, and then try to process + /// them. pub fn process_stacks_epoch( &mut self, block: &StacksBlock, @@ -3951,7 +3981,7 @@ pub mod test { } /// Make a tenure with the given transactions. Creates a coinbase tx with the given nonce, and then increments - /// the provided reference. + /// the provided reference. pub fn tenure_with_txs( &mut self, txs: &[StacksTransaction], From 7e75e42be5840b10ca81b1889832496ead2b102e Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Tue, 24 Oct 2023 00:06:40 -0400 Subject: [PATCH 017/122] chore: use get_canonical_block_header --- stackslib/src/net/relay.rs | 76 +++++++++++++++++++++++++------------- 1 file changed, 50 insertions(+), 26 deletions(-) diff --git a/stackslib/src/net/relay.rs b/stackslib/src/net/relay.rs index def67166c7..05548a1882 100644 --- a/stackslib/src/net/relay.rs +++ b/stackslib/src/net/relay.rs @@ -43,6 +43,7 @@ use crate::chainstate::burn::BlockSnapshot; use crate::chainstate::burn::ConsensusHash; use crate::chainstate::coordinator::comm::CoordinatorChannels; use crate::chainstate::coordinator::BlockEventDispatcher; +use crate::chainstate::nakamoto::NakamotoChainState; use crate::chainstate::stacks::db::unconfirmed::ProcessedUnconfirmedState; use crate::chainstate::stacks::db::{StacksChainState, StacksEpochReceipt, StacksHeaderInfo}; use crate::chainstate::stacks::events::StacksTransactionReceipt; @@ -1576,21 +1577,22 @@ impl Relayer { mempool: &mut MemPoolDB, event_observer: Option<&dyn MemPoolEventDispatcher>, ) -> Result, StacksTransaction)>, net_error> { - let chain_tip = match chainstate.get_stacks_chain_tip(sortdb)? { - Some(tip) => tip, - None => { - debug!( - "No Stacks chain tip; dropping {} transaction(s)", - network_result.pushed_transactions.len() - ); - return Ok(vec![]); - } - }; + let chain_tip = + match NakamotoChainState::get_canonical_block_header(chainstate.db(), sortdb)? { + Some(tip) => tip, + None => { + debug!( + "No Stacks chain tip; dropping {} transaction(s)", + network_result.pushed_transactions.len() + ); + return Ok(vec![]); + } + }; let epoch_id = SortitionDB::get_stacks_epoch(sortdb.conn(), network_result.burn_height)? .expect("FATAL: no epoch defined") .epoch_id; - let chain_height = chain_tip.height; + let chain_height = chain_tip.anchored_header.height(); Relayer::filter_problematic_transactions(network_result, chainstate.mainnet, epoch_id); if let Err(e) = PeerNetwork::store_transactions( @@ -4753,7 +4755,11 @@ pub mod test { let tip_opt = peers[1] .with_db_state(|sortdb, chainstate, _, _| { - let tip_opt = chainstate.get_stacks_chain_tip(sortdb).unwrap(); + let tip_opt = NakamotoChainState::get_canonical_block_header( + chainstate.db(), + sortdb, + ) + .unwrap(); Ok(tip_opt) }) .unwrap(); @@ -4883,7 +4889,11 @@ pub mod test { let tip_opt = peers[1] .with_db_state(|sortdb, chainstate, _, _| { - let tip_opt = chainstate.get_stacks_chain_tip(sortdb).unwrap(); + let tip_opt = NakamotoChainState::get_canonical_block_header( + chainstate.db(), + sortdb, + ) + .unwrap(); Ok(tip_opt) }) .unwrap(); @@ -4896,10 +4906,14 @@ pub mod test { if let Some(tip) = tip_opt { debug!( "Push at {}, need {}", - tip.height - peers[1].config.burnchain.first_block_height - 1, + tip.anchored_header.height() + - peers[1].config.burnchain.first_block_height + - 1, *pushed_i ); - if tip.height - peers[1].config.burnchain.first_block_height - 1 + if tip.anchored_header.height() + - peers[1].config.burnchain.first_block_height + - 1 == *pushed_i as u64 { // next block @@ -4922,10 +4936,14 @@ pub mod test { } debug!( "Sortition at {}, need {}", - tip.height - peers[1].config.burnchain.first_block_height - 1, + tip.anchored_header.height() + - peers[1].config.burnchain.first_block_height + - 1, *i ); - if tip.height - peers[1].config.burnchain.first_block_height - 1 + if tip.anchored_header.height() + - peers[1].config.burnchain.first_block_height + - 1 == *i as u64 { let event_id = { @@ -5548,15 +5566,17 @@ pub mod test { microblock_parent_opt: Option<&StacksMicroblockHeader>| { let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); - let stacks_tip_opt = chainstate.get_stacks_chain_tip(sortdb).unwrap(); + let stacks_tip_opt = + NakamotoChainState::get_canonical_block_header(chainstate.db(), sortdb) + .unwrap(); let parent_tip = match stacks_tip_opt { None => StacksChainState::get_genesis_header_info(chainstate.db()).unwrap(), - Some(staging_block) => { + Some(header_tip) => { let ic = sortdb.index_conn(); let snapshot = SortitionDB::get_block_snapshot_for_winning_stacks_block( &ic, &tip.sortition_id, - &staging_block.anchored_block_hash, + &header_tip.anchored_header.block_hash(), ) .unwrap() .unwrap(); // succeeds because we don't fork @@ -5717,15 +5737,17 @@ pub mod test { microblock_parent_opt: Option<&StacksMicroblockHeader>| { let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); - let stacks_tip_opt = chainstate.get_stacks_chain_tip(sortdb).unwrap(); + let stacks_tip_opt = + NakamotoChainState::get_canonical_block_header(chainstate.db(), sortdb) + .unwrap(); let parent_tip = match stacks_tip_opt { None => StacksChainState::get_genesis_header_info(chainstate.db()).unwrap(), - Some(staging_block) => { + Some(header_tip) => { let ic = sortdb.index_conn(); let snapshot = SortitionDB::get_block_snapshot_for_winning_stacks_block( &ic, &tip.sortition_id, - &staging_block.anchored_block_hash, + &header_tip.anchored_header.block_hash(), ) .unwrap() .unwrap(); // succeeds because we don't fork @@ -5896,15 +5918,17 @@ pub mod test { microblock_parent_opt: Option<&StacksMicroblockHeader>| { let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); - let stacks_tip_opt = chainstate.get_stacks_chain_tip(sortdb).unwrap(); + let stacks_tip_opt = + NakamotoChainState::get_canonical_block_header(chainstate.db(), sortdb) + .unwrap(); let parent_tip = match stacks_tip_opt { None => StacksChainState::get_genesis_header_info(chainstate.db()).unwrap(), - Some(staging_block) => { + Some(header_tip) => { let ic = sortdb.index_conn(); let snapshot = SortitionDB::get_block_snapshot_for_winning_stacks_block( &ic, &tip.sortition_id, - &staging_block.anchored_block_hash, + &header_tip.anchored_header.block_hash(), ) .unwrap() .unwrap(); // succeeds because we don't fork From 144de2dda83ab692983fbdf9e939678224292d35 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Tue, 24 Oct 2023 00:06:57 -0400 Subject: [PATCH 018/122] chore: use get_canonical_block_header --- stackslib/src/net/rpc.rs | 57 ++++++++++++++++++++++------------------ 1 file changed, 31 insertions(+), 26 deletions(-) diff --git a/stackslib/src/net/rpc.rs b/stackslib/src/net/rpc.rs index f3f9b1f7df..301e742454 100644 --- a/stackslib/src/net/rpc.rs +++ b/stackslib/src/net/rpc.rs @@ -62,6 +62,7 @@ use crate::burnchains::*; use crate::chainstate::burn::db::sortdb::SortitionDB; use crate::chainstate::burn::ConsensusHash; use crate::chainstate::burn::Opcodes; +use crate::chainstate::nakamoto::NakamotoChainState; use crate::chainstate::stacks::boot::{POX_1_NAME, POX_2_NAME, POX_3_NAME}; use crate::chainstate::stacks::db::blocks::CheckError; use crate::chainstate::stacks::db::{blocks::MINIMUM_TX_FEE_RATE_PER_BYTE, StacksChainState}; @@ -2047,10 +2048,10 @@ impl ConversationHttp { if let Some(unconfirmed_chain_tip) = unconfirmed_chain_tip_opt { Ok(Some(unconfirmed_chain_tip)) } else { - match chainstate.get_stacks_chain_tip(sortdb)? { + match NakamotoChainState::get_canonical_block_header(chainstate.db(), sortdb)? { Some(tip) => Ok(Some(StacksBlockHeader::make_index_block_hash( &tip.consensus_hash, - &tip.anchored_block_hash, + &tip.anchored_header.block_hash(), ))), None => { let response_metadata = HttpResponseMetadata::from_http_request_type( @@ -2068,24 +2069,26 @@ impl ConversationHttp { } } TipRequest::SpecificTip(tip) => Ok(Some(*tip).clone()), - TipRequest::UseLatestAnchoredTip => match chainstate.get_stacks_chain_tip(sortdb)? { - Some(tip) => Ok(Some(StacksBlockHeader::make_index_block_hash( - &tip.consensus_hash, - &tip.anchored_block_hash, - ))), - None => { - let response_metadata = HttpResponseMetadata::from_http_request_type( - req, - Some(canonical_stacks_tip_height), - ); - warn!("Failed to load Stacks chain tip"); - let response = HttpResponseType::ServerError( - response_metadata, - format!("Failed to load Stacks chain tip"), - ); - response.send(http, fd).and_then(|_| Ok(None)) + TipRequest::UseLatestAnchoredTip => { + match NakamotoChainState::get_canonical_block_header(chainstate.db(), sortdb)? { + Some(tip) => Ok(Some(StacksBlockHeader::make_index_block_hash( + &tip.consensus_hash, + &tip.anchored_header.block_hash(), + ))), + None => { + let response_metadata = HttpResponseMetadata::from_http_request_type( + req, + Some(canonical_stacks_tip_height), + ); + warn!("Failed to load Stacks chain tip"); + let response = HttpResponseType::ServerError( + response_metadata, + format!("Failed to load Stacks chain tip"), + ); + response.send(http, fd).and_then(|_| Ok(None)) + } } - }, + } } } @@ -2532,9 +2535,8 @@ impl ConversationHttp { let response_metadata = HttpResponseMetadata::from_http_request_type(req, Some(canonical_stacks_tip_height)); let response = HttpResponseType::MemPoolTxStream(response_metadata); - let height = chainstate - .get_stacks_chain_tip(sortdb)? - .map(|blk| blk.height) + let height = NakamotoChainState::get_canonical_block_header(chainstate.db(), sortdb)? + .map(|hdr| hdr.anchored_header.height()) .unwrap_or(0); debug!( @@ -3103,7 +3105,7 @@ impl ConversationHttp { None } HttpRequestType::PostTransaction(ref _md, ref tx, ref attachment) => { - match chainstate.get_stacks_chain_tip(sortdb)? { + match NakamotoChainState::get_canonical_block_header(chainstate.db(), sortdb)? { Some(tip) => { let accepted = ConversationHttp::handle_post_transaction( &mut self.connection.protocol, @@ -3112,7 +3114,7 @@ impl ConversationHttp { chainstate, sortdb, tip.consensus_hash, - tip.anchored_block_hash, + tip.anchored_header.block_hash(), mempool, tx.clone(), &mut network.atlasdb, @@ -4865,10 +4867,13 @@ mod test { let mut sortdb = peer_server.sortdb.as_mut().unwrap(); let chainstate = &mut peer_server.stacks_node.as_mut().unwrap().chainstate; let stacks_block_id = { - let tip = chainstate.get_stacks_chain_tip(sortdb).unwrap().unwrap(); + let tip = + NakamotoChainState::get_canonical_block_header(chainstate.db(), sortdb) + .unwrap() + .unwrap(); StacksBlockHeader::make_index_block_hash( &tip.consensus_hash, - &tip.anchored_block_hash, + &tip.anchored_header.block_hash(), ) }; let pox_info = RPCPoxInfoData::from_db( From a7ab086c837e41c674c61c1195a061c006383cf5 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Tue, 24 Oct 2023 00:07:19 -0400 Subject: [PATCH 019/122] feat: use get_canonical_block_header --- stackslib/src/net/stackerdb/config.rs | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/stackslib/src/net/stackerdb/config.rs b/stackslib/src/net/stackerdb/config.rs index a32f41bafa..ed5120dd37 100644 --- a/stackslib/src/net/stackerdb/config.rs +++ b/stackslib/src/net/stackerdb/config.rs @@ -53,6 +53,7 @@ use stacks_common::types::StacksEpochId; use stacks_common::util::hash::Hash160; use crate::chainstate::burn::db::sortdb::SortitionDB; +use crate::chainstate::nakamoto::NakamotoChainState; use crate::chainstate::stacks::db::StacksChainState; use crate::chainstate::stacks::Error as chainstate_error; use crate::clarity_vm::clarity::{ClarityReadOnlyConnection, Error as clarity_error}; @@ -445,9 +446,9 @@ impl StackerDBConfig { sortition_db: &SortitionDB, contract_id: &QualifiedContractIdentifier, ) -> Result { - let chain_tip = chainstate - .get_stacks_chain_tip(sortition_db)? - .ok_or(net_error::NoSuchStackerDB(contract_id.clone()))?; + let chain_tip = + NakamotoChainState::get_canonical_block_header(chainstate.db(), sortition_db)? + .ok_or(net_error::NoSuchStackerDB(contract_id.clone()))?; let burn_tip = SortitionDB::get_block_snapshot_consensus( sortition_db.conn(), @@ -455,8 +456,10 @@ impl StackerDBConfig { )? .expect("FATAL: missing snapshot for Stacks block"); - let chain_tip_hash = - StacksBlockId::new(&chain_tip.consensus_hash, &chain_tip.anchored_block_hash); + let chain_tip_hash = StacksBlockId::new( + &chain_tip.consensus_hash, + &chain_tip.anchored_header.block_hash(), + ); let cur_epoch = SortitionDB::get_stacks_epoch(sortition_db.conn(), burn_tip.block_height)? .expect("FATAL: no epoch defined"); From 7705961d68e7d648ed51e453c1cf6d4629b7b3bd Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Tue, 24 Oct 2023 00:07:40 -0400 Subject: [PATCH 020/122] chore: use StacksBlockEventData --- testnet/stacks-node/src/event_dispatcher.rs | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/testnet/stacks-node/src/event_dispatcher.rs b/testnet/stacks-node/src/event_dispatcher.rs index 77fba0b4d0..6d3f216ea4 100644 --- a/testnet/stacks-node/src/event_dispatcher.rs +++ b/testnet/stacks-node/src/event_dispatcher.rs @@ -16,7 +16,7 @@ use stacks::chainstate::stacks::address::PoxAddress; use stacks::chainstate::stacks::db::unconfirmed::ProcessedUnconfirmedState; use stacks::chainstate::stacks::db::StacksHeaderInfo; use stacks::chainstate::stacks::events::{ - StacksTransactionEvent, StacksTransactionReceipt, TransactionOrigin, + StacksBlockEventData, StacksTransactionEvent, StacksTransactionReceipt, TransactionOrigin, }; use stacks::chainstate::stacks::miner::TransactionEvent; use stacks::chainstate::stacks::TransactionPayload; @@ -354,7 +354,7 @@ impl EventObserver { fn make_new_block_processed_payload( &self, filtered_events: Vec<(usize, &(bool, Txid, &StacksTransactionEvent))>, - block: &StacksBlock, + block: StacksBlockEventData, metadata: &StacksHeaderInfo, receipts: &[StacksTransactionReceipt], parent_index_hash: &StacksBlockId, @@ -385,17 +385,17 @@ impl EventObserver { // Wrap events json!({ - "block_hash": format!("0x{}", block.block_hash()), + "block_hash": format!("0x{}", block.block_hash), "block_height": metadata.stacks_block_height, "burn_block_hash": format!("0x{}", metadata.burn_header_hash), "burn_block_height": metadata.burn_header_height, "miner_txid": format!("0x{}", winner_txid), "burn_block_time": metadata.burn_header_timestamp, "index_block_hash": format!("0x{}", metadata.index_block_hash()), - "parent_block_hash": format!("0x{}", block.header.parent_block), + "parent_block_hash": format!("0x{}", block.parent_block_hash), "parent_index_block_hash": format!("0x{}", parent_index_hash), - "parent_microblock": format!("0x{}", block.header.parent_microblock), - "parent_microblock_sequence": block.header.parent_microblock_sequence, + "parent_microblock": format!("0x{}", block.parent_microblock_hash), + "parent_microblock_sequence": block.parent_microblock_sequence, "matured_miner_rewards": mature_rewards.clone(), "events": serialized_events, "transactions": serialized_txs, @@ -481,7 +481,7 @@ impl StackerDBEventDispatcher for EventDispatcher { impl BlockEventDispatcher for EventDispatcher { fn announce_block( &self, - block: &StacksBlock, + block: StacksBlockEventData, metadata: &StacksHeaderInfo, receipts: &[StacksTransactionReceipt], parent: &StacksBlockId, @@ -683,7 +683,7 @@ impl EventDispatcher { pub fn process_chain_tip( &self, - block: &StacksBlock, + block: StacksBlockEventData, metadata: &StacksHeaderInfo, receipts: &[StacksTransactionReceipt], parent_index_hash: &StacksBlockId, @@ -733,7 +733,7 @@ impl EventDispatcher { let payload = self.registered_observers[observer_id] .make_new_block_processed_payload( filtered_events, - block, + block.clone(), metadata, receipts, parent_index_hash, @@ -1095,7 +1095,7 @@ mod test { let payload = observer.make_new_block_processed_payload( filtered_events, - &block, + block.into(), &metadata, &receipts, &parent_index_hash, From d4b68a31222967eb16f4bbf20732058ec270fab1 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Tue, 24 Oct 2023 00:07:53 -0400 Subject: [PATCH 021/122] chore: use get_canonical_block_header --- testnet/stacks-node/src/neon_node.rs | 31 +++++++++++++++------------- 1 file changed, 17 insertions(+), 14 deletions(-) diff --git a/testnet/stacks-node/src/neon_node.rs b/testnet/stacks-node/src/neon_node.rs index 78e474ac70..aa0c31c9a1 100644 --- a/testnet/stacks-node/src/neon_node.rs +++ b/testnet/stacks-node/src/neon_node.rs @@ -165,6 +165,7 @@ use stacks::chainstate::burn::BlockSnapshot; use stacks::chainstate::burn::ConsensusHash; use stacks::chainstate::coordinator::comm::CoordinatorChannels; use stacks::chainstate::coordinator::{get_next_recipients, OnChainRewardSetProvider}; +use stacks::chainstate::nakamoto::NakamotoChainState; use stacks::chainstate::stacks::address::PoxAddress; use stacks::chainstate::stacks::db::unconfirmed::UnconfirmedTxMap; use stacks::chainstate::stacks::db::StacksHeaderInfo; @@ -1362,9 +1363,9 @@ impl BlockMinerThread { burn_db: &mut SortitionDB, chain_state: &mut StacksChainState, ) -> Option { - if let Some(stacks_tip) = chain_state - .get_stacks_chain_tip(burn_db) - .expect("FATAL: could not query chain tip") + if let Some(stacks_tip) = + NakamotoChainState::get_canonical_block_header(chain_state.db(), burn_db) + .expect("FATAL: could not query chain tip") { let miner_address = self .keychain @@ -1376,7 +1377,7 @@ impl BlockMinerThread { &self.burn_block, miner_address, &stacks_tip.consensus_hash, - &stacks_tip.anchored_block_hash, + &stacks_tip.anchored_header.block_hash(), ) { Ok(parent_info) => Some(parent_info), Err(Error::BurnchainTipChanged) => { @@ -1766,16 +1767,16 @@ impl BlockMinerThread { let sort_tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()) .expect("FATAL: could not query canonical sortition DB tip"); - if let Some(stacks_tip) = chainstate - .get_stacks_chain_tip(sortdb) - .expect("FATAL: could not query canonical Stacks chain tip") + if let Some(stacks_tip) = + NakamotoChainState::get_canonical_block_header(chainstate.db(), sortdb) + .expect("FATAL: could not query canonical Stacks chain tip") { // if a block hasn't been processed within some deadline seconds of receipt, don't block // mining let process_deadline = get_epoch_time_secs() - unprocessed_block_deadline; let has_unprocessed = StacksChainState::has_higher_unprocessed_blocks( chainstate.db(), - stacks_tip.height, + stacks_tip.anchored_header.height(), process_deadline, ) .expect("FATAL: failed to query staging blocks"); @@ -1797,7 +1798,9 @@ impl BlockMinerThread { // NOTE: this could be None if it's not part of the canonical PoX fork any // longer if let Some(highest_unprocessed_block_sn) = highest_unprocessed_block_sn_opt { - if stacks_tip.height + (burnchain.pox_constants.prepare_length as u64) - 1 + if stacks_tip.anchored_header.height() + + (burnchain.pox_constants.prepare_length as u64) + - 1 >= highest_unprocessed.height && highest_unprocessed_block_sn.block_height + (burnchain.pox_constants.prepare_length as u64) @@ -2010,9 +2013,9 @@ impl BlockMinerThread { let cur_burn_chain_tip = SortitionDB::get_canonical_burn_chain_tip(burn_db.conn()) .expect("FATAL: failed to query sortition DB for canonical burn chain tip"); - if let Some(stacks_tip) = chain_state - .get_stacks_chain_tip(&burn_db) - .expect("FATAL: could not query chain tip") + if let Some(stacks_tip) = + NakamotoChainState::get_canonical_block_header(chain_state.db(), &burn_db) + .expect("FATAL: could not query chain tip") { let is_miner_blocked = self .globals @@ -2027,7 +2030,7 @@ impl BlockMinerThread { &chain_state, self.config.miner.unprocessed_block_deadline_secs, ); - if stacks_tip.anchored_block_hash != anchored_block.header.parent_block + if stacks_tip.anchored_header.block_hash() != anchored_block.header.parent_block || parent_block_info.parent_consensus_hash != stacks_tip.consensus_hash || cur_burn_chain_tip.burn_header_hash != self.burn_block.burn_header_hash || is_miner_blocked @@ -2046,7 +2049,7 @@ impl BlockMinerThread { "old_tip_burn_block_height" => self.burn_block.block_height, "old_tip_burn_block_sortition_id" => %self.burn_block.sortition_id, "attempt" => attempt, - "new_stacks_tip_block_hash" => %stacks_tip.anchored_block_hash, + "new_stacks_tip_block_hash" => %stacks_tip.anchored_header.block_hash(), "new_stacks_tip_consensus_hash" => %stacks_tip.consensus_hash, "new_tip_burn_block_height" => cur_burn_chain_tip.block_height, "new_tip_burn_block_sortition_id" => %cur_burn_chain_tip.sortition_id, From 11b2bb6483a124972e15554cf700b2b1395a80fc Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Tue, 24 Oct 2023 00:08:09 -0400 Subject: [PATCH 022/122] chore: use StacksBlockEventData --- testnet/stacks-node/src/run_loop/mod.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/testnet/stacks-node/src/run_loop/mod.rs b/testnet/stacks-node/src/run_loop/mod.rs index 176dcf6922..d9987f0ac0 100644 --- a/testnet/stacks-node/src/run_loop/mod.rs +++ b/testnet/stacks-node/src/run_loop/mod.rs @@ -181,7 +181,7 @@ pub fn announce_boot_receipts( debug!("Push {} boot receipts", &boot_receipts.len()); event_dispatcher.announce_block( - &block_0, + block_0.into(), &block_header_0, boot_receipts, &StacksBlockId::sentinel(), From 85f925d42fc2947ff58b231d693e879210fb028b Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Tue, 24 Oct 2023 00:08:27 -0400 Subject: [PATCH 023/122] feat: WIP initial coordinator implementation for Nakamoto --- .../chainstate/nakamoto/coordinator/mod.rs | 720 ++++++++++++++++++ .../chainstate/nakamoto/coordinator/tests.rs | 24 + 2 files changed, 744 insertions(+) create mode 100644 stackslib/src/chainstate/nakamoto/coordinator/mod.rs create mode 100644 stackslib/src/chainstate/nakamoto/coordinator/tests.rs diff --git a/stackslib/src/chainstate/nakamoto/coordinator/mod.rs b/stackslib/src/chainstate/nakamoto/coordinator/mod.rs new file mode 100644 index 0000000000..fc04febdd3 --- /dev/null +++ b/stackslib/src/chainstate/nakamoto/coordinator/mod.rs @@ -0,0 +1,720 @@ +// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation +// Copyright (C) 2020-2023 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use std::collections::VecDeque; +use std::sync::Arc; +use std::sync::Mutex; + +use clarity::vm::database::BurnStateDB; + +use crate::burnchains::db::BurnchainBlockData; +use crate::burnchains::db::BurnchainDB; +use crate::burnchains::db::BurnchainHeaderReader; +use crate::burnchains::Burnchain; +use crate::burnchains::BurnchainBlockHeader; +use crate::chainstate::burn::db::sortdb::SortitionDB; +use crate::chainstate::burn::BlockSnapshot; +use crate::chainstate::coordinator::comm::{ + CoordinatorChannels, CoordinatorCommunication, CoordinatorEvents, CoordinatorNotices, + CoordinatorReceivers, +}; + +use crate::chainstate::coordinator::{ + calculate_paid_rewards, dispatcher_announce_burn_ops, BlockEventDispatcher, ChainsCoordinator, + Error, OnChainRewardSetProvider, PaidRewards, PoxAnchorBlockStatus, RewardCycleInfo, + RewardSetProvider, +}; +use crate::chainstate::nakamoto::NakamotoChainState; +use crate::chainstate::stacks::boot::RewardSet; +use crate::chainstate::stacks::db::StacksChainState; +use crate::chainstate::stacks::miner::signal_mining_blocked; +use crate::chainstate::stacks::miner::signal_mining_ready; +use crate::chainstate::stacks::miner::MinerStatus; + +use crate::cost_estimates::CostEstimator; +use crate::cost_estimates::FeeEstimator; + +use crate::monitoring::increment_stx_blocks_processed_counter; + +use crate::util_lib::db::Error as DBError; + +use stacks_common::types::chainstate::BlockHeaderHash; +use stacks_common::types::chainstate::BurnchainHeaderHash; +use stacks_common::types::chainstate::SortitionId; +use stacks_common::types::chainstate::StacksBlockId; +use stacks_common::types::StacksEpoch; +use stacks_common::types::StacksEpochId; + +#[cfg(test)] +pub mod tests; + +impl OnChainRewardSetProvider { + pub fn get_reward_set_nakamoto( + &self, + // NOTE: this value is the first burnchain block in the prepare phase which has a Stacks + // block (unlike in Stacks 2.x, where this is the first block of the reward phase) + current_burn_height: u64, + chainstate: &mut StacksChainState, + burnchain: &Burnchain, + sortdb: &SortitionDB, + block_id: &StacksBlockId, + ) -> Result { + let registered_addrs = + chainstate.get_reward_addresses(burnchain, sortdb, current_burn_height, block_id)?; + + let liquid_ustx = chainstate.get_liquid_ustx(block_id); + + let (threshold, participation) = StacksChainState::get_reward_threshold_and_participation( + &burnchain.pox_constants, + ®istered_addrs[..], + liquid_ustx, + ); + + let cur_epoch = + SortitionDB::get_stacks_epoch(sortdb.conn(), current_burn_height)?.expect(&format!( + "FATAL: no epoch defined for burn height {}", + current_burn_height + )); + + if participation == 0 { + // no one is stacking. This is a fatal error. + error!("No PoX participation. Aborting."); + panic!(); + } + + info!("PoX reward cycle threshold computed"; + "burn_height" => current_burn_height, + "threshold" => threshold, + "participation" => participation, + "liquid_ustx" => liquid_ustx, + "registered_addrs" => registered_addrs.len()); + + Ok(StacksChainState::make_reward_set( + threshold, + registered_addrs, + cur_epoch.epoch_id, + )) + } +} + +/// Find the ordered sequence of sortitions from a given burnchain block back to the start of +/// the burnchain block's reward cycle's prepare phase. If the burnchain block is not in a prepare +/// phase, then the returned list is empty. If the burnchain block is in a prepare phase, then all +/// consensus hashes back to the first block in the prepare phase are loaded and returned in +/// ascending height order. +fn find_prepare_phase_sortitions( + sort_db: &SortitionDB, + burnchain: &Burnchain, + sortition_tip: &SortitionId, +) -> Result, Error> { + let sn = SortitionDB::get_block_snapshot(sort_db.conn(), sortition_tip)? + .ok_or(DBError::NotFoundError)?; + + let mut sns = vec![]; + let mut height = sn.block_height; + sns.push(sn); + + while burnchain.is_in_prepare_phase(height) && height > 0 { + let Some(sn) = SortitionDB::get_block_snapshot( + sort_db.conn(), + &sns.last() + .as_ref() + .expect("FATAL; unreachable: sns is never empty") + .sortition_id, + )? + else { + break; + }; + height = sn.block_height.saturating_sub(1); + sns.push(sn); + } + + sns.reverse(); + Ok(sns) +} + +/// Try to get the reward cycle information for a Nakamoto reward cycle. +/// In Nakamoto, the PoX anchor block for reward cycle _R_ is the last Stacks block mined in the +/// _R - 1_'s reward phase phase (i.e. which takes place toward the end of reward cycle). +/// +/// If this method returns None, the caller should try again when there are more Stacks blocks. In +/// Nakamoto, every reward cycle _must_ have a PoX anchor block; otherwise, the chain halts. +/// +/// Returns Ok(Some(reward-cycle-info)) if we found the first sortition in the prepare phase. It +/// will be `SelectedAndKnown`. +/// Returns Ok(None) if we're still waiting for the PoX anchor block sortition +/// Returns Err(Error::NotInPreparePhase) if `burn_height` is not in the prepare phase +/// Returns Err(Error::RewardCycleAlreadyProcessed) if the reward set for this reward cycle has +/// already been processed. +pub fn get_nakamoto_reward_cycle_info( + burn_height: u64, + sortition_tip: &SortitionId, + burnchain: &Burnchain, + chain_state: &mut StacksChainState, + sort_db: &SortitionDB, + provider: &U, +) -> Result, Error> { + let epoch_at_height = SortitionDB::get_stacks_epoch(sort_db.conn(), burn_height)? + .expect(&format!( + "FATAL: no epoch defined for burn height {}", + burn_height + )) + .epoch_id; + + assert!( + epoch_at_height >= StacksEpochId::Epoch30, + "FATAL: called a nakamoto function outside of epoch 3" + ); + + if !burnchain.is_in_prepare_phase(burn_height) { + return Err(Error::NotInPreparePhase); + } + + // calculating the reward set for the _next_ reward cycle + let reward_cycle = burnchain + .block_height_to_reward_cycle(burn_height) + .expect("FATAL: no reward cycle for burn height") + + 1; + + // only proceed if we have not yet calculated the PoX reward info for this reward cycle. + let last_processed_reward_cycle = { + let ic = sort_db.index_handle(sortition_tip); + ic.get_last_processed_reward_cycle()? + }; + + if last_processed_reward_cycle >= reward_cycle { + return Err(Error::RewardSetAlreadyProcessed); + } + + debug!("Processing reward set for Nakamoto reward cycle"; + "burn_height" => burn_height, + "reward_cycle" => reward_cycle, + "reward_cycle_length" => burnchain.pox_constants.reward_cycle_length, + "prepare_phase_length" => burnchain.pox_constants.prepare_length); + + // find the last Stacks block processed in the preceeding prepare phase + // (i.e. the parent of the first Stacks block processed in the prepare phase). + // Note that we may not have processed it yet. But, if we do find it, then it's + // unique (and since Nakamoto Stacks blocks are processed in order, the anchor block + // cannot change later). + let prepare_phase_sortitions = + find_prepare_phase_sortitions(sort_db, burnchain, sortition_tip)?; + for sn in prepare_phase_sortitions.into_iter() { + if !sn.sortition { + continue; + } + + // find the first Stacks block processed in the prepare phase + let Some(prepare_start_block_header) = + NakamotoChainState::get_nakamoto_tenure_start_block_header( + chain_state.db(), + &sn.consensus_hash, + )? + else { + // no header for this snapshot (possibly invalid) + continue; + }; + + let parent_block_id = &prepare_start_block_header + .anchored_header + .as_stacks_nakamoto() + .expect("FATAL: queried non-Nakamoto tenure start header") + .parent_block_id; + + // find the parent of this Stacks block + let anchor_block_header = + NakamotoChainState::get_block_header(chain_state.db(), &parent_block_id)? + .expect("FATAL: no parent for processed Stacks block in prepare phase"); + + let anchor_block_sn = SortitionDB::get_block_snapshot_consensus( + sort_db.conn(), + &anchor_block_header.consensus_hash, + )? + .expect("FATAL: no snapshot for winning PoX anchor block"); + + let stacks_block_hash = anchor_block_header.anchored_header.block_hash(); + let txid = anchor_block_sn.winning_block_txid; + + info!( + "Anchor block selected for cycle {}: {}/{}", + reward_cycle, &anchor_block_header.consensus_hash, &stacks_block_hash + ); + + let block_id = StacksBlockId::new(&anchor_block_header.consensus_hash, &stacks_block_hash); + let reward_set = + provider.get_reward_set(burn_height, chain_state, burnchain, sort_db, &block_id)?; + + debug!( + "Stacks anchor block {}/{} cycle {} is processed", + &anchor_block_header.consensus_hash, &stacks_block_hash, reward_cycle + ); + let anchor_status = + PoxAnchorBlockStatus::SelectedAndKnown(stacks_block_hash, txid, reward_set); + return Ok(Some(RewardCycleInfo { + reward_cycle, + anchor_status, + })); + } + + // no stacks block known yet + info!("No PoX anchor block known yet for cycle {}", reward_cycle); + return Ok(None); +} + +impl< + 'a, + T: BlockEventDispatcher, + N: CoordinatorNotices, + U: RewardSetProvider, + CE: CostEstimator + ?Sized, + FE: FeeEstimator + ?Sized, + B: BurnchainHeaderReader, + > ChainsCoordinator<'a, T, N, U, CE, FE, B> +{ + /// Check to see if we're in the last of the 2.x epochs, and we have the first PoX anchor block + /// for epoch 3. + /// NOTE: the first block in epoch3 must be after the first block in the reward phase, so as + /// to ensure that the PoX stackers have been selected for this cycle. This means that we + /// don't proceed to process Nakamoto blocks until the reward cycle has begun. Also, the last + /// reward cycle of epoch2 _must_ be PoX so we have stackers who can sign. + /// + /// TODO: how do signers register their initial keys? Can we just deploy a pre-registration + /// contract? + pub fn can_process_nakamoto(&mut self) -> Result { + let canonical_sortition_tip = self + .canonical_sortition_tip + .clone() + .expect("FAIL: checking epoch status, but we don't have a canonical sortition tip"); + + let canonical_sn = + SortitionDB::get_block_snapshot(self.sortition_db.conn(), &canonical_sortition_tip)? + .expect("FATAL: canonical sortition tip has no sortition"); + + // what epoch are we in? + let cur_epoch = + SortitionDB::get_stacks_epoch(self.sortition_db.conn(), canonical_sn.block_height)? + .expect(&format!( + "BUG: no epoch defined at height {}", + canonical_sn.block_height + )); + + if cur_epoch.epoch_id < StacksEpochId::Epoch30 { + return Ok(false); + } + + // in epoch3 + let all_epochs = SortitionDB::get_stacks_epochs(self.sortition_db.conn())?; + let epoch_3_idx = StacksEpoch::find_epoch_by_id(&all_epochs, StacksEpochId::Epoch30) + .expect("FATAL: epoch3 not defined"); + + let epoch3 = &all_epochs[epoch_3_idx]; + let first_epoch3_reward_cycle = self + .burnchain + .block_height_to_reward_cycle(epoch3.start_height) + .expect("FATAL: epoch3 block height has no reward cycle"); + + // only proceed if we have processed the _anchor block_ for this reward cycle + let handle_conn = self.sortition_db.index_handle(&canonical_sortition_tip); + let last_processed_rc = handle_conn.get_last_processed_reward_cycle()?; + Ok(last_processed_rc >= first_epoch3_reward_cycle) + } + + /// This is the main loop body for the coordinator in epoch 3. + /// Returns true if the coordinator is still running. + /// Returns false otherwise. + pub fn handle_comms_nakamoto( + &mut self, + comms: &CoordinatorReceivers, + miner_status: Arc>, + ) -> bool { + // timeout so that we handle Ctrl-C a little gracefully + let bits = comms.wait_on(); + if (bits & (CoordinatorEvents::NEW_STACKS_BLOCK as u8)) != 0 { + signal_mining_blocked(miner_status.clone()); + debug!("Received new Nakamoto stacks block notice"); + match self.handle_new_nakamoto_stacks_block() { + Ok(missing_block_opt) => { + if missing_block_opt.is_some() { + debug!( + "Missing affirmed anchor block: {:?}", + &missing_block_opt.as_ref().expect("unreachable") + ); + } + } + Err(e) => { + warn!("Error processing new stacks block: {:?}", e); + } + } + + signal_mining_ready(miner_status.clone()); + } + if (bits & (CoordinatorEvents::NEW_BURN_BLOCK as u8)) != 0 { + signal_mining_blocked(miner_status.clone()); + debug!("Received new burn block notice"); + match self.handle_new_nakamoto_burnchain_block() { + Ok(missing_block_opt) => { + if missing_block_opt.is_some() { + debug!( + "Missing canonical anchor block {}", + &missing_block_opt.clone().unwrap() + ); + } + } + Err(e) => { + warn!("Error processing new burn block: {:?}", e); + } + } + signal_mining_ready(miner_status.clone()); + } + if (bits & (CoordinatorEvents::STOP as u8)) != 0 { + signal_mining_blocked(miner_status.clone()); + debug!("Received stop notice"); + return false; + } + + return true; + } + + /// Handle one or more new Nakamoto Stacks blocks. + /// If we process a PoX anchor block, then return its block hash. This unblocks processing the + /// next reward cycle's burnchain blocks. + pub fn handle_new_nakamoto_stacks_block(&mut self) -> Result, Error> { + let canonical_sortition_tip = self.canonical_sortition_tip.clone().expect( + "FAIL: processing a new Stacks block, but don't have a canonical sortition tip", + ); + + loop { + // process at most one block per loop pass + let sortdb_handle = self + .sortition_db + .tx_handle_begin(&canonical_sortition_tip)?; + + let mut processed_blocks = NakamotoChainState::process_nakamoto_blocks( + &mut self.chain_state_db, + sortdb_handle, + 1, + self.dispatcher, + )?; + + if processed_blocks.len() == 0 { + // out of blocks + break; + } + + let Some(Some(block_receipt)) = processed_blocks.pop() else { + // this block was invalid + debug!("Bump blocks processed (invalid)"); + self.notifier.notify_stacks_block_processed(); + increment_stx_blocks_processed_counter(); + continue; + }; + + // only bump the coordinator's state if the processed block + // is in our sortition fork + let block_hash = block_receipt.header.anchored_header.block_hash(); + let in_sortition_set = self + .sortition_db + .is_stacks_block_in_sortition_set(&canonical_sortition_tip, &block_hash)?; + + if !in_sortition_set { + continue; + } + + let ( + canonical_stacks_block_id, + canonical_stacks_block_height, + canonical_stacks_consensus_hash, + ) = { + let nakamoto_header = block_receipt + .header + .anchored_header + .as_stacks_nakamoto() + .expect("FATAL: unreachable: processed a non-Nakamoto block"); + + ( + nakamoto_header.block_id(), + nakamoto_header.chain_length, + nakamoto_header.consensus_hash.clone(), + ) + }; + + debug!("Bump blocks processed ({})", &canonical_stacks_block_id); + + self.notifier.notify_stacks_block_processed(); + increment_stx_blocks_processed_counter(); + + // process Atlas events + Self::process_atlas_attachment_events( + self.atlas_db.as_mut(), + &self.atlas_config, + &block_receipt, + canonical_stacks_block_height, + ); + + // update cost estimator + if let Some(ref mut estimator) = self.cost_estimator { + let stacks_epoch = self + .sortition_db + .index_conn() + .get_stacks_epoch_by_epoch_id(&block_receipt.evaluated_epoch) + .expect("Could not find a stacks epoch."); + estimator.notify_block( + &block_receipt.tx_receipts, + &stacks_epoch.block_limit, + &stacks_epoch.epoch_id, + ); + } + + // update fee estimator + if let Some(ref mut estimator) = self.fee_estimator { + let stacks_epoch = self + .sortition_db + .index_conn() + .get_stacks_epoch_by_epoch_id(&block_receipt.evaluated_epoch) + .expect("Could not find a stacks epoch."); + if let Err(e) = estimator.notify_block(&block_receipt, &stacks_epoch.block_limit) { + warn!("FeeEstimator failed to process block receipt"; + "stacks_block" => %block_hash, + "stacks_height" => %block_receipt.header.stacks_block_height, + "error" => %e); + } + } + + let stacks_sn = SortitionDB::get_block_snapshot_consensus( + &self.sortition_db.conn(), + &canonical_stacks_consensus_hash, + )? + .expect(&format!( + "FATAL: unreachable: consensus hash {} has no snapshot", + &canonical_stacks_consensus_hash + )); + + // are we in the prepare phase? + if !self.burnchain.is_in_prepare_phase(stacks_sn.block_height) { + // next ready stacks block + continue; + } + + // is the upcoming reward cycle processed yet? + let current_reward_cycle = self + .burnchain + .block_height_to_reward_cycle(stacks_sn.block_height) + .expect(&format!( + "FATAL: unreachable: burnchain block height has no reward cycle" + )); + + let last_processed_reward_cycle = { + let ic = self.sortition_db.index_handle(&canonical_sortition_tip); + ic.get_last_processed_reward_cycle()? + }; + + if last_processed_reward_cycle > current_reward_cycle { + // already processed upcoming reward cycle + continue; + } + + // This is the first Stacks block in the prepare phase for the next reward cycle. + // Pause here and process the next sortitions + return Ok(Some(block_hash)); + } + + // no PoX anchor block found + Ok(None) + } + + /// Given a burnchain header, find the PoX reward cycle info + pub fn get_nakamoto_reward_cycle_info( + &mut self, + burn_header: &BurnchainBlockHeader, + ) -> Result, Error> { + let sortition_tip_id = self + .canonical_sortition_tip + .as_ref() + .expect("FATAL: Processing anchor block, but no known sortition tip"); + + get_nakamoto_reward_cycle_info( + burn_header.block_height, + sortition_tip_id, + &self.burnchain, + &mut self.chain_state_db, + &self.sortition_db, + &self.reward_set_provider, + ) + } + + /// Process the next-available burnchain block, if possible. + /// Burnchain blocks can only be processed for the last-known PoX reward set, which is to say, + /// burnchain block processing can be blocked on the unavailability of the next PoX anchor + /// block. If the next PoX anchor block is not available, then no burnchain block processing + /// happens, and the hash of the PoX anchor block is returned instead. + /// + /// Returns Ok(None) if all burnchain blocks are processed + /// Returns Ok(Some(hash)) if burnchain block processing is blocked on a missing PoX anchor + /// block + /// Returns Err(..) if an error occurred while processing (i.e. a DB error). + pub fn handle_new_nakamoto_burnchain_block( + &mut self, + ) -> Result, Error> { + // highest burnchain block we've downloaded + let canonical_burnchain_tip = self.burnchain_blocks_db.get_canonical_chain_tip()?; + + debug!("Handle new canonical burnchain tip"; + "height" => %canonical_burnchain_tip.block_height, + "block_hash" => %canonical_burnchain_tip.block_hash.to_string()); + + // Retrieve all the direct ancestors of this block with an unprocessed sortition + let mut cursor = canonical_burnchain_tip.block_hash.clone(); + let mut sortitions_to_process = VecDeque::new(); + + // We halt the ancestry research as soon as we find a processed parent + let mut last_processed_ancestor = loop { + if let Some(found_sortition) = self.sortition_db.is_sortition_processed(&cursor)? { + debug!( + "Ancestor sortition {} of block {} is processed", + &found_sortition, &cursor + ); + break found_sortition; + } + + let current_block = + BurnchainDB::get_burnchain_block(&self.burnchain_blocks_db.conn(), &cursor) + .map_err(|e| { + warn!( + "ChainsCoordinator: could not retrieve block burnhash={}", + &cursor + ); + Error::NonContiguousBurnchainBlock(e) + })?; + + debug!( + "Unprocessed block: ({}, {})", + ¤t_block.header.block_hash.to_string(), + current_block.header.block_height + ); + + let parent = current_block.header.parent_block_hash.clone(); + sortitions_to_process.push_front(current_block); + cursor = parent; + }; + + let dbg_burn_header_hashes: Vec<_> = sortitions_to_process + .iter() + .map(|block| { + format!( + "({}, {})", + &block.header.block_hash.to_string(), + block.header.block_height + ) + }) + .collect(); + + debug!( + "Unprocessed burn chain blocks [{}]", + dbg_burn_header_hashes.join(", ") + ); + + // Unlike in Stacks 2.x, there can be neither chain reorgs nor PoX reorgs unless Bitcoin itself + // reorgs. But if this happens, then we will have already found the set of + // (newly-canonical) burnchain blocks which lack sortitions -- they'll be in + // `sortitions_to_process`. So, we can proceed to process all outstanding sortitions until + // we come across a PoX anchor block that we don't have yet. + for unprocessed_block in sortitions_to_process.into_iter() { + let BurnchainBlockData { header, ops } = unprocessed_block; + let reward_cycle = self + .burnchain + .block_height_to_reward_cycle(header.block_height) + .unwrap_or(u64::MAX); + + debug!( + "Process burn block {} reward cycle {} in {}", + header.block_height, reward_cycle, &self.burnchain.working_dir, + ); + + // calculate paid rewards during this burnchain block if we announce + // to an events dispatcher + let paid_rewards = if self.dispatcher.is_some() { + calculate_paid_rewards(&ops) + } else { + PaidRewards { + pox: vec![], + burns: 0, + } + }; + + // is this the burnchain block that selected the PoX anchor block? + let reward_cycle_info = self.get_nakamoto_reward_cycle_info(&header)?; + if let Some(rc_info) = reward_cycle_info { + // in nakamoto, if we have any reward cycle info at all, it will be known. + assert!( + rc_info.is_reward_info_known(), + "FATAL: unknown PoX anchor block in Nakamoto" + ); + return Ok(Some( + rc_info + .selected_anchor_block() + .expect("FATAL: Nakamoto always has a PoX anchor block") + .0 + .to_owned(), + )); + } + + // process next sortition + let dispatcher_ref = &self.dispatcher; + let (next_snapshot, _) = self + .sortition_db + .evaluate_sortition( + &header, + ops, + &self.burnchain, + &last_processed_ancestor, + reward_cycle_info, + |reward_set_info| { + if let Some(dispatcher) = dispatcher_ref { + dispatcher_announce_burn_ops( + *dispatcher, + &header, + paid_rewards, + reward_set_info, + ); + } + }, + ) + .map_err(|e| { + error!("ChainsCoordinator: unable to evaluate sortition: {:?}", e); + Error::FailedToProcessSortition(e) + })?; + + let sortition_id = next_snapshot.sortition_id; + + self.notifier.notify_sortition_processed(); + + debug!( + "Sortition processed"; + "sortition_id" => &sortition_id.to_string(), + "burn_header_hash" => &next_snapshot.burn_header_hash.to_string(), + "burn_height" => next_snapshot.block_height + ); + + // always bump canonical sortition tip: + // if this code path is invoked, the canonical burnchain tip + // has moved, so we should move our canonical sortition tip as well. + self.canonical_sortition_tip = Some(sortition_id.clone()); + last_processed_ancestor = sortition_id; + } + + Ok(None) + } +} diff --git a/stackslib/src/chainstate/nakamoto/coordinator/tests.rs b/stackslib/src/chainstate/nakamoto/coordinator/tests.rs new file mode 100644 index 0000000000..da0e9ab0a9 --- /dev/null +++ b/stackslib/src/chainstate/nakamoto/coordinator/tests.rs @@ -0,0 +1,24 @@ +// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation +// Copyright (C) 2020-2023 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use crate::net::test::{TestPeer, TestPeerConfig}; + +/// Mine two reward cycles without any interruptions. +#[test] +fn test_simple_nakamoto_coordinator_bootup() { + let peer_config = TestPeerConfig::new(function_name!(), 0, 0); + let mut peer = TestPeer::new(peer_config); +} From b59e27b215b101f29bb65b65115fe2feaa8ef31d Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 2 Nov 2023 13:52:28 -0400 Subject: [PATCH 024/122] chore: run stacker dkg test as part of integration test battery --- .github/workflows/bitcoin-tests.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/bitcoin-tests.yml b/.github/workflows/bitcoin-tests.yml index ac1dd45a26..5c2a341421 100644 --- a/.github/workflows/bitcoin-tests.yml +++ b/.github/workflows/bitcoin-tests.yml @@ -142,6 +142,7 @@ jobs: - tests::epoch_24::verify_auto_unlock_behavior - tests::stackerdb::test_stackerdb_load_store - tests::stackerdb::test_stackerdb_event_observer + - tests::signer::test_stackerdb_dkg steps: - name: Checkout the latest code id: git_checkout From e4d8beda60458852f918cbfbff5127cf1c7a8e5c Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 2 Nov 2023 13:54:03 -0400 Subject: [PATCH 025/122] chore: add Epoch25 --- clarity/src/vm/analysis/mod.rs | 1 + clarity/src/vm/analysis/type_checker/mod.rs | 2 ++ 2 files changed, 3 insertions(+) diff --git a/clarity/src/vm/analysis/mod.rs b/clarity/src/vm/analysis/mod.rs index 19e38af647..901cdfce69 100644 --- a/clarity/src/vm/analysis/mod.rs +++ b/clarity/src/vm/analysis/mod.rs @@ -141,6 +141,7 @@ pub fn run_analysis( | StacksEpochId::Epoch22 | StacksEpochId::Epoch23 | StacksEpochId::Epoch24 + | StacksEpochId::Epoch25 | StacksEpochId::Epoch30 => { TypeChecker2_1::run_pass(&epoch, &mut contract_analysis, db) } diff --git a/clarity/src/vm/analysis/type_checker/mod.rs b/clarity/src/vm/analysis/type_checker/mod.rs index dd2f145333..b3b8d6fb45 100644 --- a/clarity/src/vm/analysis/type_checker/mod.rs +++ b/clarity/src/vm/analysis/type_checker/mod.rs @@ -53,6 +53,7 @@ impl FunctionType { | StacksEpochId::Epoch22 | StacksEpochId::Epoch23 | StacksEpochId::Epoch24 + | StacksEpochId::Epoch25 | StacksEpochId::Epoch30 => self.check_args_2_1(accounting, args, clarity_version), StacksEpochId::Epoch10 => unreachable!("Epoch10 is not supported"), } @@ -73,6 +74,7 @@ impl FunctionType { | StacksEpochId::Epoch22 | StacksEpochId::Epoch23 | StacksEpochId::Epoch24 + | StacksEpochId::Epoch25 | StacksEpochId::Epoch30 => { self.check_args_by_allowing_trait_cast_2_1(db, clarity_version, func_args) } From 97ed00dbf94f139d4da8861220c3a406ec5d6042 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 2 Nov 2023 13:54:13 -0400 Subject: [PATCH 026/122] chore: Epoch25 --- clarity/src/vm/costs/mod.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/clarity/src/vm/costs/mod.rs b/clarity/src/vm/costs/mod.rs index 31b22f956f..aa0c6710e9 100644 --- a/clarity/src/vm/costs/mod.rs +++ b/clarity/src/vm/costs/mod.rs @@ -724,6 +724,7 @@ impl LimitedCostTracker { | StacksEpochId::Epoch22 | StacksEpochId::Epoch23 | StacksEpochId::Epoch24 + | StacksEpochId::Epoch25 | StacksEpochId::Epoch30 => COSTS_3_NAME.to_string(), } } From c55e21717ec665ff338af9e9e0fcfc5507f57011 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 2 Nov 2023 13:54:30 -0400 Subject: [PATCH 027/122] feat: add pox-3 unlock and pox-4 activation --- clarity/src/vm/database/clarity_db.rs | 41 ++++++++++++++++++++++++--- 1 file changed, 37 insertions(+), 4 deletions(-) diff --git a/clarity/src/vm/database/clarity_db.rs b/clarity/src/vm/database/clarity_db.rs index 96013cde27..c7c058175c 100644 --- a/clarity/src/vm/database/clarity_db.rs +++ b/clarity/src/vm/database/clarity_db.rs @@ -112,7 +112,9 @@ pub trait HeadersDB { pub trait BurnStateDB { fn get_v1_unlock_height(&self) -> u32; fn get_v2_unlock_height(&self) -> u32; + fn get_v3_unlock_height(&self) -> u32; fn get_pox_3_activation_height(&self) -> u32; + fn get_pox_4_activation_height(&self) -> u32; /// Returns the *burnchain block height* for the `sortition_id` is associated with. fn get_burn_block_height(&self, sortition_id: &SortitionId) -> Option; @@ -200,10 +202,18 @@ impl BurnStateDB for &dyn BurnStateDB { fn get_v2_unlock_height(&self) -> u32 { (*self).get_v2_unlock_height() } + + fn get_v3_unlock_height(&self) -> u32 { + (*self).get_v3_unlock_height() + } fn get_pox_3_activation_height(&self) -> u32 { (*self).get_pox_3_activation_height() } + + fn get_pox_4_activation_height(&self) -> u32 { + (*self).get_pox_4_activation_height() + } fn get_burn_block_height(&self, sortition_id: &SortitionId) -> Option { (*self).get_burn_block_height(sortition_id) @@ -378,10 +388,18 @@ impl BurnStateDB for NullBurnStateDB { fn get_v2_unlock_height(&self) -> u32 { u32::MAX } + + fn get_v3_unlock_height(&self) -> u32 { + u32::MAX + } fn get_pox_3_activation_height(&self) -> u32 { u32::MAX } + + fn get_pox_4_activation_height(&self) -> u32 { + u32::MAX + } fn get_pox_prepare_length(&self) -> u32 { panic!("NullBurnStateDB should not return PoX info"); @@ -819,6 +837,11 @@ impl<'a> ClarityDatabase<'a> { pub fn get_pox_3_activation_height(&self) -> u32 { self.burn_state_db.get_pox_3_activation_height() } + + /// Return the height for PoX 4 activation from the burn state db + pub fn get_pox_4_activation_height(&self) -> u32 { + self.burn_state_db.get_pox_4_activation_height() + } /// Return the height for PoX v2 -> v3 auto unlocks /// from the burn state db @@ -829,6 +852,16 @@ impl<'a> ClarityDatabase<'a> { u32::MAX } } + + /// Return the height for PoX v3 -> v4 auto unlocks + /// from the burn state db + pub fn get_v3_unlock_height(&mut self) -> u32 { + if self.get_clarity_epoch_version() >= StacksEpochId::Epoch24 { + self.burn_state_db.get_v3_unlock_height() + } else { + u32::MAX + } + } /// Get the last-known burnchain block height. /// Note that this is _not_ the burnchain height in which this block was mined! @@ -1906,8 +1939,8 @@ impl<'a> ClarityDatabase<'a> { stx_balance.amount_locked(), stx_balance.unlock_height(), cur_burn_height, - stx_balance.get_available_balance_at_burn_block(cur_burn_height, self.get_v1_unlock_height(), self.get_v2_unlock_height()), - stx_balance.has_unlockable_tokens_at_burn_block(cur_burn_height, self.get_v1_unlock_height(), self.get_v2_unlock_height())); + stx_balance.get_available_balance_at_burn_block(cur_burn_height, self.get_v1_unlock_height(), self.get_v2_unlock_height(), self.get_v3_unlock_height()), + stx_balance.has_unlockable_tokens_at_burn_block(cur_burn_height, self.get_v1_unlock_height(), self.get_v2_unlock_height(), self.get_v3_unlock_height())); STXBalanceSnapshot::new(principal, stx_balance, cur_burn_height, self) } @@ -1925,8 +1958,8 @@ impl<'a> ClarityDatabase<'a> { stx_balance.amount_locked(), stx_balance.unlock_height(), cur_burn_height, - stx_balance.get_available_balance_at_burn_block(cur_burn_height, self.get_v1_unlock_height(), self.get_v2_unlock_height()), - stx_balance.has_unlockable_tokens_at_burn_block(cur_burn_height, self.get_v1_unlock_height(), self.get_v2_unlock_height())); + stx_balance.get_available_balance_at_burn_block(cur_burn_height, self.get_v1_unlock_height(), self.get_v2_unlock_height(), self.get_v3_unlock_height()), + stx_balance.has_unlockable_tokens_at_burn_block(cur_burn_height, self.get_v1_unlock_height(), self.get_v2_unlock_height(), self.get_v3_unlock_height())); STXBalanceSnapshot::new(principal, stx_balance, cur_burn_height, self) } From db59dcdbe416cd0e8aadba4cbcb1ff9a01566f2a Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 2 Nov 2023 13:54:51 -0400 Subject: [PATCH 028/122] feat: add pox-4 unlock --- clarity/src/vm/database/structures.rs | 247 +++++++++++++++++++++++++- 1 file changed, 242 insertions(+), 5 deletions(-) diff --git a/clarity/src/vm/database/structures.rs b/clarity/src/vm/database/structures.rs index 89a635765e..35ed8b9469 100644 --- a/clarity/src/vm/database/structures.rs +++ b/clarity/src/vm/database/structures.rs @@ -146,6 +146,11 @@ pub enum STXBalance { amount_locked: u128, unlock_height: u64, }, + LockedPoxFour { + amount_unlocked: u128, + amount_locked: u128, + unlock_height: u64 + }, } /// Lifetime-limited handle to an uncommitted balance structure. @@ -225,6 +230,24 @@ impl ClaritySerializable for STXBalance { .write_all(&unlock_height.to_be_bytes()) .expect("STXBalance serialization: failed writing unlock_height."); } + STXBalance::LockedPoxFour { + amount_unlocked, + amount_locked, + unlock_height, + } => { + buffer + .write_all(&[STXBalance::pox_4_version]) + .expect("STXBalance serialization: failed to write PoX version byte"); + buffer + .write_all(&amount_unlocked.to_be_bytes()) + .expect("STXBalance serialization: failed writing amount_unlocked."); + buffer + .write_all(&amount_locked.to_be_bytes()) + .expect("STXBalance serialization: failed writing amount_locked."); + buffer + .write_all(&unlock_height.to_be_bytes()) + .expect("STXBalance serialization: failed writing unlock_height."); + } } to_hex(buffer.as_slice()) } @@ -301,6 +324,12 @@ impl ClarityDeserializable for STXBalance { amount_locked, unlock_height, } + } else if version == &STXBalance::pox_4_version { + STXBalance::LockedPoxFour { + amount_unlocked, + amount_locked, + unlock_height, + } } else { unreachable!("Version is checked for pox_3 or pox_2 version compliance above"); } @@ -358,38 +387,45 @@ impl<'db, 'conn> STXBalanceSnapshot<'db, 'conn> { pub fn get_available_balance(&mut self) -> u128 { let v1_unlock_height = self.db_ref.get_v1_unlock_height(); let v2_unlock_height = self.db_ref.get_v2_unlock_height(); + let v3_unlock_height = self.db_ref.get_v3_unlock_height(); self.balance.get_available_balance_at_burn_block( self.burn_block_height, v1_unlock_height, v2_unlock_height, + v3_unlock_height ) } pub fn canonical_balance_repr(&mut self) -> STXBalance { let v1_unlock_height = self.db_ref.get_v1_unlock_height(); let v2_unlock_height = self.db_ref.get_v2_unlock_height(); + let v3_unlock_height = self.db_ref.get_v3_unlock_height(); self.balance - .canonical_repr_at_block(self.burn_block_height, v1_unlock_height, v2_unlock_height) + .canonical_repr_at_block(self.burn_block_height, v1_unlock_height, v2_unlock_height, v3_unlock_height) .0 } pub fn has_locked_tokens(&mut self) -> bool { let v1_unlock_height = self.db_ref.get_v1_unlock_height(); let v2_unlock_height = self.db_ref.get_v2_unlock_height(); + let v3_unlock_height = self.db_ref.get_v3_unlock_height(); self.balance.has_locked_tokens_at_burn_block( self.burn_block_height, v1_unlock_height, v2_unlock_height, + v3_unlock_height ) } pub fn has_unlockable_tokens(&mut self) -> bool { let v1_unlock_height = self.db_ref.get_v1_unlock_height(); let v2_unlock_height = self.db_ref.get_v2_unlock_height(); + let v3_unlock_height = self.db_ref.get_v3_unlock_height(); self.balance.has_unlockable_tokens_at_burn_block( self.burn_block_height, v1_unlock_height, v2_unlock_height, + v3_unlock_height, ) } @@ -683,6 +719,120 @@ impl<'db, 'conn> STXBalanceSnapshot<'db, 'conn> { STXBalance::LockedPoxThree { .. } ) } + + //////////////// Pox-4 ////////////////// + + /// Lock `amount_to_lock` tokens on this account until `unlock_burn_height`. + /// After calling, this method will set the balance to a "LockedPoxFour" balance, + /// because this method is only invoked as a result of PoX4 interactions + pub fn lock_tokens_v4(&mut self, amount_to_lock: u128, unlock_burn_height: u64) { + let unlocked = self.unlock_available_tokens_if_any(); + if unlocked > 0 { + debug!("Consolidated after account-token-lock"); + } + + // caller needs to have checked this + assert!(amount_to_lock > 0, "BUG: cannot lock 0 tokens"); + + if unlock_burn_height <= self.burn_block_height { + // caller needs to have checked this + panic!("FATAL: cannot set a lock with expired unlock burn height"); + } + + if self.has_locked_tokens() { + // caller needs to have checked this + panic!("FATAL: account already has locked tokens"); + } + + // from `unlock_available_tokens_if_any` call above, `self.balance` should + // be canonicalized already + + let new_amount_unlocked = self + .balance + .get_total_balance() + .checked_sub(amount_to_lock) + .expect("FATAL: account locks more STX than balance possessed"); + + self.balance = STXBalance::LockedPoxFour { + amount_unlocked: new_amount_unlocked, + amount_locked: amount_to_lock, + unlock_height: unlock_burn_height, + }; + } + + /// Extend this account's current lock to `unlock_burn_height`. + /// After calling, this method will set the balance to a "LockedPoxFour" balance, + /// because this method is only invoked as a result of PoX3 interactions + pub fn extend_lock_v4(&mut self, unlock_burn_height: u64) { + let unlocked = self.unlock_available_tokens_if_any(); + if unlocked > 0 { + debug!("Consolidated after extend-token-lock"); + } + + if !self.has_locked_tokens() { + // caller needs to have checked this + panic!("FATAL: account does not have locked tokens"); + } + + if unlock_burn_height <= self.burn_block_height { + // caller needs to have checked this + panic!("FATAL: cannot set a lock with expired unlock burn height"); + } + + self.balance = STXBalance::LockedPoxFour { + amount_unlocked: self.balance.amount_unlocked(), + amount_locked: self.balance.amount_locked(), + unlock_height: unlock_burn_height, + }; + } + + /// Increase the account's current lock to `new_total_locked`. + /// Panics if `self` was not locked by V3 PoX. + pub fn increase_lock_v4(&mut self, new_total_locked: u128) { + let unlocked = self.unlock_available_tokens_if_any(); + if unlocked > 0 { + debug!("Consolidated after extend-token-lock"); + } + + if !self.has_locked_tokens() { + // caller needs to have checked this + panic!("FATAL: account does not have locked tokens"); + } + + if !self.is_v4_locked() { + // caller needs to have checked this + panic!("FATAL: account must be locked by pox-3"); + } + + assert!( + self.balance.amount_locked() <= new_total_locked, + "FATAL: account must lock more after `increase_lock_v3`" + ); + + let total_amount = self + .balance + .amount_unlocked() + .checked_add(self.balance.amount_locked()) + .expect("STX balance overflowed u128"); + let amount_unlocked = total_amount + .checked_sub(new_total_locked) + .expect("STX underflow: more is locked than total balance"); + + self.balance = STXBalance::LockedPoxFour { + amount_unlocked, + amount_locked: new_total_locked, + unlock_height: self.balance.unlock_height(), + }; + } + + /// Return true iff `self` represents a snapshot that has a lock + /// created by PoX v3. + pub fn is_v4_locked(&mut self) -> bool { + matches!( + self.canonical_balance_repr(), + STXBalance::LockedPoxFour { .. } + ) + } /////////////// GENERAL ////////////////////// @@ -718,6 +868,15 @@ impl<'db, 'conn> STXBalanceSnapshot<'db, 'conn> { amount_locked, unlock_height: new_unlock_height, }, + STXBalance::LockedPoxFour { + amount_unlocked, + amount_locked, + .. + } => STXBalance::LockedPoxFour { + amount_unlocked, + amount_locked, + unlock_height: new_unlock_height, + }, }; } @@ -728,6 +887,7 @@ impl<'db, 'conn> STXBalanceSnapshot<'db, 'conn> { self.burn_block_height, self.db_ref.get_v1_unlock_height(), self.db_ref.get_v2_unlock_height(), + self.db_ref.get_v3_unlock_height(), ); self.balance = new_balance; unlocked @@ -740,6 +900,7 @@ impl STXBalance { pub const v2_and_v3_size: usize = 41; pub const pox_2_version: u8 = 0; pub const pox_3_version: u8 = 1; + pub const pox_4_version: u8 = 2; pub fn zero() -> STXBalance { STXBalance::Unlocked { amount: 0 } @@ -756,7 +917,8 @@ impl STXBalance { STXBalance::Unlocked { .. } => 0, STXBalance::LockedPoxOne { unlock_height, .. } | STXBalance::LockedPoxTwo { unlock_height, .. } - | STXBalance::LockedPoxThree { unlock_height, .. } => *unlock_height, + | STXBalance::LockedPoxThree { unlock_height, .. } + | STXBalance::LockedPoxFour { unlock_height, .. } => *unlock_height, } } @@ -764,7 +926,7 @@ impl STXBalance { /// *while* factoring in the PoX 2 early unlock for PoX 1 and PoX 3 early unlock for PoX 2. /// This value is still lazy: this unlock height may be less than the current /// burn block height, if so it will be updated in a canonicalized view. - pub fn effective_unlock_height(&self, v1_unlock_height: u32, v2_unlock_height: u32) -> u64 { + pub fn effective_unlock_height(&self, v1_unlock_height: u32, v2_unlock_height: u32, v3_unlock_height: u32) -> u64 { match self { STXBalance::Unlocked { .. } => 0, STXBalance::LockedPoxOne { unlock_height, .. } => { @@ -781,7 +943,14 @@ impl STXBalance { *unlock_height } } - STXBalance::LockedPoxThree { unlock_height, .. } => *unlock_height, + STXBalance::LockedPoxThree { unlock_height, .. } => { + if *unlock_height >= (v3_unlock_height as u64) { + v3_unlock_height as u64 + } else { + *unlock_height + } + } + STXBalance::LockedPoxFour { unlock_height, .. } => *unlock_height, } } @@ -792,7 +961,8 @@ impl STXBalance { STXBalance::Unlocked { .. } => 0, STXBalance::LockedPoxOne { amount_locked, .. } | STXBalance::LockedPoxTwo { amount_locked, .. } - | STXBalance::LockedPoxThree { amount_locked, .. } => *amount_locked, + | STXBalance::LockedPoxThree { amount_locked, .. } + | STXBalance::LockedPoxFour { amount_locked, .. } => *amount_locked, } } @@ -811,6 +981,9 @@ impl STXBalance { } | STXBalance::LockedPoxThree { amount_unlocked, .. + } + | STXBalance::LockedPoxFour { + amount_unlocked, .. } => *amount_unlocked, } } @@ -828,6 +1001,9 @@ impl STXBalance { } | STXBalance::LockedPoxThree { amount_unlocked, .. + } + | STXBalance::LockedPoxFour { + amount_unlocked, .. } => { *amount_unlocked = amount_unlocked.checked_sub(delta).expect("STX underflow"); } @@ -847,6 +1023,9 @@ impl STXBalance { } | STXBalance::LockedPoxThree { amount_unlocked, .. + } + | STXBalance::LockedPoxFour { + amount_unlocked, .. } => { if let Some(new_amount) = amount_unlocked.checked_add(delta) { *amount_unlocked = new_amount; @@ -867,11 +1046,13 @@ impl STXBalance { burn_block_height: u64, v1_unlock_height: u32, v2_unlock_height: u32, + v3_unlock_height: u32, ) -> (STXBalance, u128) { if self.has_unlockable_tokens_at_burn_block( burn_block_height, v1_unlock_height, v2_unlock_height, + v3_unlock_height, ) { ( STXBalance::Unlocked { @@ -889,11 +1070,13 @@ impl STXBalance { burn_block_height: u64, v1_unlock_height: u32, v2_unlock_height: u32, + v3_unlock_height: u32, ) -> u128 { if self.has_unlockable_tokens_at_burn_block( burn_block_height, v1_unlock_height, v2_unlock_height, + v3_unlock_height, ) { self.get_total_balance() } else { @@ -908,6 +1091,9 @@ impl STXBalance { STXBalance::LockedPoxThree { amount_unlocked, .. } => *amount_unlocked, + STXBalance::LockedPoxFour { + amount_unlocked, .. + } => *amount_unlocked, } } } @@ -917,11 +1103,13 @@ impl STXBalance { burn_block_height: u64, v1_unlock_height: u32, v2_unlock_height: u32, + v3_unlock_height: u32, ) -> (u128, u64) { if self.has_unlockable_tokens_at_burn_block( burn_block_height, v1_unlock_height, v2_unlock_height, + v3_unlock_height, ) { (0, 0) } else { @@ -942,6 +1130,11 @@ impl STXBalance { unlock_height, .. } => (*amount_locked, *unlock_height), + STXBalance::LockedPoxFour { + amount_locked, + unlock_height, + .. + } => (*amount_locked, *unlock_height), } } } @@ -964,6 +1157,11 @@ impl STXBalance { amount_locked, .. } => (*amount_unlocked, *amount_locked), + STXBalance::LockedPoxFour { + amount_unlocked, + amount_locked, + .. + } => (*amount_unlocked, *amount_locked), }; unlocked.checked_add(locked).expect("STX overflow") } @@ -985,6 +1183,7 @@ impl STXBalance { burn_block_height: u64, v1_unlock_height: u32, v2_unlock_height: u32, + v3_unlock_height: u32, ) -> bool { match self { STXBalance::Unlocked { .. } => false, @@ -1027,6 +1226,23 @@ impl STXBalance { amount_locked, unlock_height, .. + } => { + if *amount_locked == 0 { + return false; + } + if *unlock_height <= burn_block_height { + return false; + } + // if unlockable due to Stacks 2.5 early unlock + if v3_unlock_height as u64 <= burn_block_height { + return false; + } + true + } + STXBalance::LockedPoxFour { + amount_locked, + unlock_height, + .. } => { if *amount_locked == 0 { return false; @@ -1044,6 +1260,7 @@ impl STXBalance { burn_block_height: u64, v1_unlock_height: u32, v2_unlock_height: u32, + v3_unlock_height: u32, ) -> bool { match self { STXBalance::Unlocked { .. } => false, @@ -1087,6 +1304,24 @@ impl STXBalance { amount_locked, unlock_height, .. + } => { + if *amount_locked == 0 { + return false; + } + // if normally unlockable, return true + if *unlock_height <= burn_block_height { + return true; + } + // if unlockable due to Stacks 2.5 early unlock + if v3_unlock_height as u64 <= burn_block_height { + return true; + } + false + } + STXBalance::LockedPoxFour { + amount_locked, + unlock_height, + .. } => { if *amount_locked == 0 { return false; @@ -1106,11 +1341,13 @@ impl STXBalance { burn_block_height: u64, v1_unlock_height: u32, v2_unlock_height: u32, + v3_unlock_height: u32, ) -> bool { self.get_available_balance_at_burn_block( burn_block_height, v1_unlock_height, v2_unlock_height, + v3_unlock_height, ) >= amount } } From f10b817b2c81c0a50d1565fd6632459d098dd5fd Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 2 Nov 2023 13:55:20 -0400 Subject: [PATCH 029/122] chore: add v3 unlock and pox-4 activation --- clarity/src/vm/docs/mod.rs | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/clarity/src/vm/docs/mod.rs b/clarity/src/vm/docs/mod.rs index cf0d142f02..8f1dafc660 100644 --- a/clarity/src/vm/docs/mod.rs +++ b/clarity/src/vm/docs/mod.rs @@ -2772,10 +2772,18 @@ mod test { fn get_v2_unlock_height(&self) -> u32 { u32::MAX } + + fn get_v3_unlock_height(&self) -> u32 { + u32::MAX + } fn get_pox_3_activation_height(&self) -> u32 { u32::MAX } + + fn get_pox_4_activation_height(&self) -> u32 { + u32::MAX + } fn get_pox_prepare_length(&self) -> u32 { panic!("Docs db should not return PoX info") From d04fa764cd2b3b775fd24524f9079df9a4f478e9 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 2 Nov 2023 13:55:32 -0400 Subject: [PATCH 030/122] chore: add Epoch25, pox-3 unlock, and pox-4 activation --- clarity/src/vm/functions/assets.rs | 3 ++- clarity/src/vm/functions/mod.rs | 2 ++ 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/clarity/src/vm/functions/assets.rs b/clarity/src/vm/functions/assets.rs index c0c4fde945..5653d7cbb3 100644 --- a/clarity/src/vm/functions/assets.rs +++ b/clarity/src/vm/functions/assets.rs @@ -236,6 +236,7 @@ pub fn special_stx_account( .canonical_balance_repr(); let v1_unlock_ht = env.global_context.database.get_v1_unlock_height(); let v2_unlock_ht = env.global_context.database.get_v2_unlock_height(); + let v3_unlock_ht = env.global_context.database.get_v3_unlock_height(); TupleData::from_data(vec![ ( @@ -248,7 +249,7 @@ pub fn special_stx_account( ), ( "unlock-height".try_into().unwrap(), - Value::UInt(stx_balance.effective_unlock_height(v1_unlock_ht, v2_unlock_ht) as u128), + Value::UInt(stx_balance.effective_unlock_height(v1_unlock_ht, v2_unlock_ht, v3_unlock_ht) as u128), ), ]) .map(Value::Tuple) diff --git a/clarity/src/vm/functions/mod.rs b/clarity/src/vm/functions/mod.rs index 7668305f72..ef10d351a1 100644 --- a/clarity/src/vm/functions/mod.rs +++ b/clarity/src/vm/functions/mod.rs @@ -62,6 +62,8 @@ macro_rules! switch_on_global_epoch { StacksEpochId::Epoch23 => $Epoch205Version(args, env, context), // Note: We reuse 2.05 for 2.4. StacksEpochId::Epoch24 => $Epoch205Version(args, env, context), + // Note: We reuse 2.05 for 2.5. + StacksEpochId::Epoch25 => $Epoch205Version(args, env, context), // Note: We reuse 2.05 for 3.0. StacksEpochId::Epoch30 => $Epoch205Version(args, env, context), } From e98fe5139bcd6856605cd6c3de817d88b30ef779 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 2 Nov 2023 13:56:02 -0400 Subject: [PATCH 031/122] chore: add v3 unlock and pox-4 activation --- clarity/src/vm/test_util/mod.rs | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/clarity/src/vm/test_util/mod.rs b/clarity/src/vm/test_util/mod.rs index aec31e1e41..ffe9db8de0 100644 --- a/clarity/src/vm/test_util/mod.rs +++ b/clarity/src/vm/test_util/mod.rs @@ -232,10 +232,18 @@ impl BurnStateDB for UnitTestBurnStateDB { fn get_v2_unlock_height(&self) -> u32 { u32::MAX } + + fn get_v3_unlock_height(&self) -> u32 { + u32::MAX + } fn get_pox_3_activation_height(&self) -> u32 { u32::MAX } + + fn get_pox_4_activation_height(&self) -> u32 { + u32::MAX + } fn get_pox_prepare_length(&self) -> u32 { 1 From b25e422f381d25944da4310af7ee777cd0ed1e56 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 2 Nov 2023 13:56:17 -0400 Subject: [PATCH 032/122] chore: add Epoch25 --- clarity/src/vm/tests/mod.rs | 3 +++ clarity/src/vm/types/signatures.rs | 8 +++++++- clarity/src/vm/version.rs | 1 + 3 files changed, 11 insertions(+), 1 deletion(-) diff --git a/clarity/src/vm/tests/mod.rs b/clarity/src/vm/tests/mod.rs index f179dd0e78..c73db0cdad 100644 --- a/clarity/src/vm/tests/mod.rs +++ b/clarity/src/vm/tests/mod.rs @@ -102,6 +102,7 @@ epochs_template! { Epoch22, Epoch23, Epoch24, + Epoch25, } clarity_template! { @@ -115,6 +116,8 @@ clarity_template! { (Epoch23, Clarity2), (Epoch24, Clarity1), (Epoch24, Clarity2), + (Epoch25, Clarity1), + (Epoch25, Clarity2), } #[cfg(test)] diff --git a/clarity/src/vm/types/signatures.rs b/clarity/src/vm/types/signatures.rs index 1d25438cb5..6fc8d5f014 100644 --- a/clarity/src/vm/types/signatures.rs +++ b/clarity/src/vm/types/signatures.rs @@ -529,6 +529,7 @@ impl TypeSignature { | StacksEpochId::Epoch22 | StacksEpochId::Epoch23 | StacksEpochId::Epoch24 + | StacksEpochId::Epoch25 | StacksEpochId::Epoch30 => self.admits_type_v2_1(other), StacksEpochId::Epoch10 => unreachable!("epoch 1.0 not supported"), } @@ -728,7 +729,11 @@ impl TypeSignature { // Epoch-2.2 had a regression in canonicalization, so it must be preserved here. | StacksEpochId::Epoch22 => self.clone(), // Note for future epochs: Epochs >= 2.3 should use the canonicalize_v2_1() routine - StacksEpochId::Epoch21 | StacksEpochId::Epoch23 | StacksEpochId::Epoch24 | StacksEpochId::Epoch30 => self.canonicalize_v2_1(), + StacksEpochId::Epoch21 + | StacksEpochId::Epoch23 + | StacksEpochId::Epoch24 + | StacksEpochId::Epoch25 + | StacksEpochId::Epoch30 => self.canonicalize_v2_1(), } } @@ -1060,6 +1065,7 @@ impl TypeSignature { | StacksEpochId::Epoch22 | StacksEpochId::Epoch23 | StacksEpochId::Epoch24 + | StacksEpochId::Epoch25 | StacksEpochId::Epoch30 => Self::least_supertype_v2_1(a, b), StacksEpochId::Epoch10 => unreachable!("Clarity 1.0 is not supported"), } diff --git a/clarity/src/vm/version.rs b/clarity/src/vm/version.rs index 6da73f7dc6..f64d4ee878 100644 --- a/clarity/src/vm/version.rs +++ b/clarity/src/vm/version.rs @@ -36,6 +36,7 @@ impl ClarityVersion { StacksEpochId::Epoch22 => ClarityVersion::Clarity2, StacksEpochId::Epoch23 => ClarityVersion::Clarity2, StacksEpochId::Epoch24 => ClarityVersion::Clarity2, + StacksEpochId::Epoch25 => ClarityVersion::Clarity2, StacksEpochId::Epoch30 => ClarityVersion::Clarity2, } } From 29954510309664ab3b630c486fb8a5e38dca0f35 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 2 Nov 2023 13:56:27 -0400 Subject: [PATCH 033/122] chore: Epoch25 --- stacks-common/src/types/mod.rs | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/stacks-common/src/types/mod.rs b/stacks-common/src/types/mod.rs index f60a70efd6..ee37690877 100644 --- a/stacks-common/src/types/mod.rs +++ b/stacks-common/src/types/mod.rs @@ -74,6 +74,7 @@ pub enum StacksEpochId { Epoch22 = 0x0200f, Epoch23 = 0x02014, Epoch24 = 0x02019, + Epoch25 = 0x0201a, Epoch30 = 0x03000, } @@ -92,7 +93,9 @@ impl StacksEpochId { | StacksEpochId::Epoch21 | StacksEpochId::Epoch22 | StacksEpochId::Epoch23 => false, - StacksEpochId::Epoch24 | StacksEpochId::Epoch30 => true, + StacksEpochId::Epoch24 + | StacksEpochId::Epoch25 + | StacksEpochId::Epoch30 => true, } } } @@ -107,6 +110,7 @@ impl std::fmt::Display for StacksEpochId { StacksEpochId::Epoch22 => write!(f, "2.2"), StacksEpochId::Epoch23 => write!(f, "2.3"), StacksEpochId::Epoch24 => write!(f, "2.4"), + StacksEpochId::Epoch25 => write!(f, "2.5"), StacksEpochId::Epoch30 => write!(f, "3.0"), } } @@ -124,6 +128,7 @@ impl TryFrom for StacksEpochId { x if x == StacksEpochId::Epoch22 as u32 => Ok(StacksEpochId::Epoch22), x if x == StacksEpochId::Epoch23 as u32 => Ok(StacksEpochId::Epoch23), x if x == StacksEpochId::Epoch24 as u32 => Ok(StacksEpochId::Epoch24), + x if x == StacksEpochId::Epoch25 as u32 => Ok(StacksEpochId::Epoch25), x if x == StacksEpochId::Epoch30 as u32 => Ok(StacksEpochId::Epoch30), _ => Err("Invalid epoch"), } From 467e8934cfe3488d21933353febcbf3416957590 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 2 Nov 2023 13:56:41 -0400 Subject: [PATCH 034/122] chore: add pox-3 unlock and pox-4 activation height to pox constants --- stackslib/src/burnchains/mod.rs | 30 +++++++++++++++++++++++++++--- 1 file changed, 27 insertions(+), 3 deletions(-) diff --git a/stackslib/src/burnchains/mod.rs b/stackslib/src/burnchains/mod.rs index c332437c6c..2d5df93a57 100644 --- a/stackslib/src/burnchains/mod.rs +++ b/stackslib/src/burnchains/mod.rs @@ -50,7 +50,7 @@ use crate::chainstate::burn::operations::BlockstackOperationType; use crate::chainstate::burn::operations::Error as op_error; use crate::chainstate::burn::operations::LeaderKeyRegisterOp; use crate::chainstate::stacks::address::PoxAddress; -use crate::chainstate::stacks::boot::{POX_1_NAME, POX_2_NAME, POX_3_NAME}; +use crate::chainstate::stacks::boot::{POX_1_NAME, POX_2_NAME, POX_3_NAME, POX_4_NAME}; use crate::chainstate::stacks::StacksPublicKey; use crate::core::*; use crate::net::neighbors::MAX_NEIGHBOR_BLOCK_DELAY; @@ -311,8 +311,12 @@ pub struct PoxConstants { pub v1_unlock_height: u32, /// The auto unlock height for PoX v2 lockups during Epoch 2.2 pub v2_unlock_height: u32, + /// The auto unlock height for PoX v3 lockups during Epoch 2.5 + pub v3_unlock_height: u32, /// After this burn height, reward cycles use pox-3 for reward set data pub pox_3_activation_height: u32, + /// After this burn height, reward cycles use pox-4 for reward set data + pub pox_4_activation_height: u32, _shadow: PhantomData<()>, } @@ -327,13 +331,17 @@ impl PoxConstants { sunset_end: u64, v1_unlock_height: u32, v2_unlock_height: u32, + v3_unlock_height: u32, pox_3_activation_height: u32, + pox_4_activation_height: u32, ) -> PoxConstants { assert!(anchor_threshold > (prepare_length / 2)); assert!(prepare_length < reward_cycle_length); assert!(sunset_start <= sunset_end); assert!(v2_unlock_height >= v1_unlock_height); + assert!(v3_unlock_height >= v2_unlock_height); assert!(pox_3_activation_height >= v2_unlock_height); + assert!(pox_4_activation_height >= v3_unlock_height); PoxConstants { reward_cycle_length, @@ -345,23 +353,28 @@ impl PoxConstants { sunset_end, v1_unlock_height, v2_unlock_height, + v3_unlock_height, pox_3_activation_height, + pox_4_activation_height, _shadow: PhantomData, } } #[cfg(test)] pub fn test_default() -> PoxConstants { // 20 reward slots; 10 prepare-phase slots - PoxConstants::new(10, 5, 3, 25, 5, 5000, 10000, u32::MAX, u32::MAX, u32::MAX) + PoxConstants::new(10, 5, 3, 25, 5, 5000, 10000, u32::MAX, u32::MAX, u32::MAX, u32::MAX, u32::MAX) } /// Returns the PoX contract that is "active" at the given burn block height pub fn static_active_pox_contract( v1_unlock_height: u64, pox_3_activation_height: u64, + pox_4_activation_height: u64, burn_height: u64, ) -> &'static str { - if burn_height > pox_3_activation_height { + if burn_height > pox_4_activation_height { + POX_4_NAME + } else if burn_height > pox_3_activation_height { POX_3_NAME } else if burn_height > v1_unlock_height { POX_2_NAME @@ -375,6 +388,7 @@ impl PoxConstants { Self::static_active_pox_contract( self.v1_unlock_height as u64, self.pox_3_activation_height as u64, + self.pox_4_activation_height as u64, burn_height, ) } @@ -404,9 +418,13 @@ impl PoxConstants { BITCOIN_MAINNET_FIRST_BLOCK_HEIGHT + POX_SUNSET_END, POX_V1_MAINNET_EARLY_UNLOCK_HEIGHT, POX_V2_MAINNET_EARLY_UNLOCK_HEIGHT, + POX_V3_MAINNET_EARLY_UNLOCK_HEIGHT, BITCOIN_MAINNET_STACKS_24_BURN_HEIGHT .try_into() .expect("Epoch transition height must be <= u32::MAX"), + BITCOIN_MAINNET_STACKS_25_BURN_HEIGHT + .try_into() + .expect("Epoch transition height must be <= u32::MAX"), ) } @@ -421,9 +439,13 @@ impl PoxConstants { BITCOIN_TESTNET_FIRST_BLOCK_HEIGHT + POX_SUNSET_END, POX_V1_TESTNET_EARLY_UNLOCK_HEIGHT, POX_V2_TESTNET_EARLY_UNLOCK_HEIGHT, + POX_V3_TESTNET_EARLY_UNLOCK_HEIGHT, BITCOIN_TESTNET_STACKS_24_BURN_HEIGHT .try_into() .expect("Epoch transition height must be <= u32::MAX"), + BITCOIN_TESTNET_STACKS_25_BURN_HEIGHT + .try_into() + .expect("Epoch transition height must be <= u32::MAX"), ) // total liquid supply is 40000000000000000 µSTX } @@ -439,6 +461,8 @@ impl PoxConstants { 1_000_000, 2_000_000, 3_000_000, + 4_000_000, + 5_000_000, ) } From e2e94136ba24c3f99c177b4a3253fde2e9184b22 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 2 Nov 2023 13:57:20 -0400 Subject: [PATCH 035/122] chore: mock pox-3 unlock and pox-4 activation --- stackslib/src/burnchains/tests/affirmation.rs | 24 +++++++++++++++++++ stackslib/src/burnchains/tests/db.rs | 8 +++++++ 2 files changed, 32 insertions(+) diff --git a/stackslib/src/burnchains/tests/affirmation.rs b/stackslib/src/burnchains/tests/affirmation.rs index ef4f8c5ba3..49097a1938 100644 --- a/stackslib/src/burnchains/tests/affirmation.rs +++ b/stackslib/src/burnchains/tests/affirmation.rs @@ -497,6 +497,8 @@ fn test_read_prepare_phase_commits() { u32::MAX, u32::MAX, u32::MAX, + u32::MAX, + u32::MAX, ); burnchain.first_block_height = first_height; burnchain.first_block_hash = first_bhh.clone(); @@ -574,6 +576,8 @@ fn test_parent_block_commits() { u32::MAX, u32::MAX, u32::MAX, + u32::MAX, + u32::MAX, ); burnchain.first_block_height = first_height; burnchain.first_block_hash = first_bhh.clone(); @@ -676,6 +680,8 @@ fn test_filter_orphan_block_commits() { u32::MAX, u32::MAX, u32::MAX, + u32::MAX, + u32::MAX, ); burnchain.first_block_height = first_height; burnchain.first_block_hash = first_bhh.clone(); @@ -747,6 +753,8 @@ fn test_filter_missed_block_commits() { u32::MAX, u32::MAX, u32::MAX, + u32::MAX, + u32::MAX, ); burnchain.first_block_height = first_height; burnchain.first_block_hash = first_bhh.clone(); @@ -818,6 +826,8 @@ fn test_find_heaviest_block_commit() { u32::MAX, u32::MAX, u32::MAX, + u32::MAX, + u32::MAX, ); burnchain.first_block_height = first_height; burnchain.first_block_hash = first_bhh.clone(); @@ -1041,6 +1051,8 @@ fn test_find_heaviest_parent_commit_many_commits() { u32::MAX, u32::MAX, u32::MAX, + u32::MAX, + u32::MAX, ); burnchain.first_block_height = first_height; burnchain.first_block_hash = first_bhh.clone(); @@ -1304,6 +1316,8 @@ fn test_update_pox_affirmation_maps_3_forks() { u32::MAX, u32::MAX, u32::MAX, + u32::MAX, + u32::MAX, ); burnchain.first_block_height = first_height; burnchain.first_block_hash = first_bhh.clone(); @@ -1564,6 +1578,8 @@ fn test_update_pox_affirmation_maps_unique_anchor_block() { u32::MAX, u32::MAX, u32::MAX, + u32::MAX, + u32::MAX, ); burnchain.first_block_height = first_height; burnchain.first_block_hash = first_bhh.clone(); @@ -1767,6 +1783,8 @@ fn test_update_pox_affirmation_maps_absent() { u32::MAX, u32::MAX, u32::MAX, + u32::MAX, + u32::MAX, ); burnchain.first_block_height = first_height; burnchain.first_block_hash = first_bhh.clone(); @@ -2240,6 +2258,8 @@ fn test_update_pox_affirmation_maps_nothing() { u32::MAX, u32::MAX, u32::MAX, + u32::MAX, + u32::MAX, ); burnchain.first_block_height = first_height; burnchain.first_block_hash = first_bhh.clone(); @@ -2517,6 +2537,8 @@ fn test_update_pox_affirmation_fork_2_cycles() { u32::MAX, u32::MAX, u32::MAX, + u32::MAX, + u32::MAX, ); burnchain.first_block_height = first_height; burnchain.first_block_hash = first_bhh.clone(); @@ -2819,6 +2841,8 @@ fn test_update_pox_affirmation_fork_duel() { u32::MAX, u32::MAX, u32::MAX, + u32::MAX, + u32::MAX, ); burnchain.first_block_height = first_height; burnchain.first_block_hash = first_bhh.clone(); diff --git a/stackslib/src/burnchains/tests/db.rs b/stackslib/src/burnchains/tests/db.rs index c0bd183b92..7312f406d0 100644 --- a/stackslib/src/burnchains/tests/db.rs +++ b/stackslib/src/burnchains/tests/db.rs @@ -523,6 +523,8 @@ fn test_get_commit_at() { u32::MAX, u32::MAX, u32::MAX, + u32::MAX, + u32::MAX, ); burnchain.first_block_height = first_height; burnchain.first_block_hash = first_bhh.clone(); @@ -648,6 +650,8 @@ fn test_get_set_check_anchor_block() { u32::MAX, u32::MAX, u32::MAX, + u32::MAX, + u32::MAX, ); burnchain.first_block_height = first_height; burnchain.first_block_hash = first_bhh.clone(); @@ -743,6 +747,8 @@ fn test_update_block_descendancy() { u32::MAX, u32::MAX, u32::MAX, + u32::MAX, + u32::MAX, ); burnchain.first_block_height = first_height; burnchain.first_block_hash = first_bhh.clone(); @@ -872,6 +878,8 @@ fn test_update_block_descendancy_with_fork() { u32::MAX, u32::MAX, u32::MAX, + u32::MAX, + u32::MAX, ); burnchain.first_block_height = first_height; burnchain.first_block_hash = first_bhh.clone(); From d8074edee54a1a80a6b3a108be9237ae81551904 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 2 Nov 2023 13:57:46 -0400 Subject: [PATCH 036/122] feat: add nakamoto miner hash160 to leader block commits and refactor epoch 2.x block-commit from nakamoto block-commit --- stackslib/src/burnchains/tests/mod.rs | 29 +++++++++++++++++++++------ 1 file changed, 23 insertions(+), 6 deletions(-) diff --git a/stackslib/src/burnchains/tests/mod.rs b/stackslib/src/burnchains/tests/mod.rs index 2658a9305f..32c4e444b4 100644 --- a/stackslib/src/burnchains/tests/mod.rs +++ b/stackslib/src/burnchains/tests/mod.rs @@ -38,7 +38,8 @@ use crate::chainstate::burn::*; use crate::chainstate::coordinator::comm::*; use crate::chainstate::coordinator::*; use crate::chainstate::stacks::*; -use crate::core::STACKS_EPOCH_2_1_MARKER; +use crate::core::STACKS_EPOCH_2_4_MARKER; +use crate::core::STACKS_EPOCH_3_0_MARKER; use crate::cost_estimates::{CostEstimator, FeeEstimator}; use crate::stacks_common::deps_common::bitcoin::network::serialize::BitcoinHash; use crate::types::chainstate::{BlockHeaderHash, SortitionId, VRFSeed}; @@ -377,13 +378,16 @@ impl TestBurnchainBlock { Txid::from_test_data(txop.block_height, txop.vtxindex, &txop.burn_header_hash, 0); txop.consensus_hash = self.parent_snapshot.consensus_hash.clone(); + let miner_pubkey_hash160 = miner.nakamoto_miner_hash160(); + txop.set_nakamoto_signing_key(&miner_pubkey_hash160); + self.txs .push(BlockstackOperationType::LeaderKeyRegister(txop.clone())); txop } - pub fn add_leader_block_commit( + pub(crate) fn inner_add_block_commit( &mut self, ic: &SortitionDBConn, miner: &mut TestMiner, @@ -392,6 +396,7 @@ impl TestBurnchainBlock { leader_key: &LeaderKeyRegisterOp, fork_snapshot: Option<&BlockSnapshot>, parent_block_snapshot: Option<&BlockSnapshot>, + epoch_marker: u8 ) -> LeaderBlockCommitOp { let input = (Txid([0; 32]), 0); let pubks = miner @@ -468,16 +473,28 @@ impl TestBurnchainBlock { txop.txid = Txid::from_test_data(txop.block_height, txop.vtxindex, &txop.burn_header_hash, 0); - txop.memo = vec![STACKS_EPOCH_2_1_MARKER << 3]; + txop.memo = vec![epoch_marker << 3]; self.txs .push(BlockstackOperationType::LeaderBlockCommit(txop.clone())); miner.block_commits.push(txop.clone()); txop } - - // TODO: user burn support - + + /// Add an epoch 2.x block-commit + pub fn add_leader_block_commit( + &mut self, + ic: &SortitionDBConn, + miner: &mut TestMiner, + block_hash: &BlockHeaderHash, + burn_fee: u64, + leader_key: &LeaderKeyRegisterOp, + fork_snapshot: Option<&BlockSnapshot>, + parent_block_snapshot: Option<&BlockSnapshot>, + ) -> LeaderBlockCommitOp { + self.inner_add_block_commit(ic, miner, block_hash, burn_fee, leader_key, fork_snapshot, parent_block_snapshot, STACKS_EPOCH_2_4_MARKER) + } + pub fn patch_from_chain_tip(&mut self, parent_snapshot: &BlockSnapshot) -> () { assert_eq!(parent_snapshot.block_height + 1, self.block_height); From afc54fecfc58b23a38fa2b4ca21b01af2afc562d Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 2 Nov 2023 13:58:22 -0400 Subject: [PATCH 037/122] feat: store the serialized PoX reward set information as JSON, paired with the first sortition ID of the prepare phase. This enables Nakamoto to process reward sets early and eagerly, before they must be written to the sortition DB MARF (it also will enable fetching the reward set info via RPC) --- stackslib/src/chainstate/burn/db/sortdb.rs | 73 +++++++++++++++++++--- 1 file changed, 65 insertions(+), 8 deletions(-) diff --git a/stackslib/src/chainstate/burn/db/sortdb.rs b/stackslib/src/chainstate/burn/db/sortdb.rs index d44aa56879..54eb5aaf7b 100644 --- a/stackslib/src/chainstate/burn/db/sortdb.rs +++ b/stackslib/src/chainstate/burn/db/sortdb.rs @@ -875,8 +875,15 @@ const SORTITION_DB_SCHEMA_8: &'static [&'static str] = &[ );"#, ]; -const SORTITION_DB_SCHEMA_9: &'static [&'static str] = - &[r#"ALTER TABLE snapshots ADD miner_pk_hash TEXT DEFAULT NULL"#]; +const SORTITION_DB_SCHEMA_9: &'static [&'static str] = &[ + r#"ALTER TABLE snapshots ADD miner_pk_hash TEXT DEFAULT NULL"#, + r#" + -- eagerly-processed reward sets, before they're applied to the start of the next reward cycle + CREATE TABLE preprocessed_reward_sets ( + sortition_id TEXT PRIMARY KEY, + reward_set TEXT NOT NULL + );"# +]; const SORTITION_DB_INDEXES: &'static [&'static str] = &[ "CREATE INDEX IF NOT EXISTS snapshots_block_hashes ON snapshots(block_height,index_root,winning_stacks_block_hash);", @@ -1823,9 +1830,10 @@ impl<'a> SortitionHandleTx<'a> { burn_tip.block_height ); - // NOTE: in Nakamoto, this only works if this is a tenure-start block - let num_rows = self.execute("UPDATE snapshots SET stacks_block_accepted = 1, stacks_block_height = ?1, arrival_index = ?2 WHERE consensus_hash = ?3 AND winning_stacks_block_hash = ?4", args)?; - assert!(num_rows > 0); + // NOTE: in Nakamoto, this may return zero rows since blocks are no longer coupled to + // snapshots. However, it will update at least one row if the block is a tenure-start + // block. + self.execute("UPDATE snapshots SET stacks_block_accepted = 1, stacks_block_height = ?1, arrival_index = ?2 WHERE consensus_hash = ?3 AND winning_stacks_block_hash = ?4", args)?; // update arrival data across all Stacks forks let (best_ch, best_bhh, best_height) = self.find_new_block_arrivals(burn_tip)?; @@ -3173,6 +3181,15 @@ impl SortitionDB { // TODO: This should move to Epoch 30 once it is added || version == "8" } + StacksEpochId::Epoch25 => { + version == "3" + || version == "4" + || version == "5" + || version == "6" + || version == "7" + // TODO: This should move to Epoch 30 once it is added + || version == "8" + } StacksEpochId::Epoch30 => { version == "3" || version == "4" @@ -3480,6 +3497,34 @@ impl SortitionDB { return Ok(last_rules); } + + /// Store a pre-processed reward set. + /// `sortition_id` is the first sortition ID of the prepare phase + pub fn store_preprocessed_reward_set(sort_tx: &mut DBTx, sortition_id: &SortitionId, rc_info: &RewardCycleInfo) -> Result<(), db_error> { + let sql = "INSERT INTO preprocessed_reward_sets (sortition_id,reward_set) VALUES (?1,?2)"; + let rc_json = serde_json::to_string(rc_info).map_err(db_error::SerializationError)?; + let args: &[&dyn ToSql] = &[sortition_id, &rc_json]; + sort_tx.execute(sql, args)?; + Ok(()) + } + + /// Get a pre-processed reawrd set. + /// `sortition_id` is the first sortition ID of the prepare phase. + pub fn get_preprocessed_reward_set(sortdb: &DBConn, sortition_id: &SortitionId) -> Result, db_error> { + let sql = "SELECT reward_set FROM preprocessed_reward_sets WHERE sortition_id = ?1"; + let args: &[&dyn ToSql] = &[sortition_id]; + let reward_set_opt : Option = sortdb.query_row(sql, args, |row| row.get(0)) + .optional() + .map_err(db_error::from)?; + + if let Some(reward_set_str) = reward_set_opt { + let rc_info : RewardCycleInfo = serde_json::from_str(&reward_set_str).map_err(|_| db_error::ParseError)?; + Ok(Some(rc_info)) + } + else { + Ok(None) + } + } } impl<'a> SortitionDBTx<'a> { @@ -4825,7 +4870,9 @@ impl SortitionDB { }) } - /// Get a block commit by its committed block + /// Get a block commit by its committed block. + /// For Nakamoto, `consensus_hash` and `block_hash` are the hashes that combine to form the + /// `last_tenure_id` (i.e. the index block hash of the first block in the last tenure) pub fn get_block_commit_for_stacks_block( conn: &Connection, consensus_hash: &ConsensusHash, @@ -4869,6 +4916,16 @@ impl SortitionDB { None => Ok(None), } } + + /// Get a block snapshot for a winning Nakamoto tenure in a given burn chain fork. + pub fn get_block_snapshot_for_winning_nakamoto_tenure( + ic: &SortitionDBConn, + tip: &SortitionId, + last_tenure_id: &StacksBlockId, + ) -> Result, db_error> { + let block_hash = BlockHeaderHash(last_tenure_id.0.clone()); + Self::get_block_snapshot_for_winning_stacks_block(ic, tip, &block_hash) + } /// Merge the result of get_stacks_header_hashes() into a BlockHeaderCache pub fn merge_block_header_cache( @@ -5096,8 +5153,6 @@ impl<'a> SortitionHandleTx<'a> { let mut sn = snapshot.clone(); sn.index_root = root_hash.clone(); - // TODO: update canonical Stacks tip across burnchain forks - // preserve memoized stacks chain tip from this burn chain fork sn.canonical_stacks_tip_height = parent_sn.canonical_stacks_tip_height; sn.canonical_stacks_tip_hash = parent_sn.canonical_stacks_tip_hash; @@ -10290,6 +10345,8 @@ pub mod tests { u32::MAX, u32::MAX, u32::MAX, + u32::MAX, + u32::MAX, ); let mut burnchain = Burnchain::regtest(path_root); From b7000b099a5cb87303f36e97f6e24d84fc51454f Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 2 Nov 2023 13:59:28 -0400 Subject: [PATCH 038/122] feat: add epoch 2.5 marker and add converstion for block header hash to index block hash in nakamoto --- .../burn/operations/leader_block_commit.rs | 21 +++++++++++++++++-- 1 file changed, 19 insertions(+), 2 deletions(-) diff --git a/stackslib/src/chainstate/burn/operations/leader_block_commit.rs b/stackslib/src/chainstate/burn/operations/leader_block_commit.rs index cf241c0017..fbb593e663 100644 --- a/stackslib/src/chainstate/burn/operations/leader_block_commit.rs +++ b/stackslib/src/chainstate/burn/operations/leader_block_commit.rs @@ -37,17 +37,20 @@ use crate::chainstate::burn::SortitionId; use crate::chainstate::stacks::address::PoxAddress; use crate::chainstate::stacks::index::storage::TrieFileStorage; use crate::chainstate::stacks::{StacksPrivateKey, StacksPublicKey}; +use crate::core::STACKS_EPOCH_2_05_MARKER; +use crate::core::STACKS_EPOCH_2_1_MARKER; use crate::core::STACKS_EPOCH_2_2_MARKER; use crate::core::STACKS_EPOCH_2_3_MARKER; use crate::core::STACKS_EPOCH_2_4_MARKER; +use crate::core::STACKS_EPOCH_2_5_MARKER; +use crate::core::STACKS_EPOCH_3_0_MARKER; use crate::core::{StacksEpoch, StacksEpochId}; -use crate::core::{STACKS_EPOCH_2_05_MARKER, STACKS_EPOCH_2_1_MARKER, STACKS_EPOCH_3_0_MARKER}; use crate::net::Error as net_error; use stacks_common::address::AddressHashMode; use stacks_common::codec::{write_next, Error as codec_error, StacksMessageCodec}; use stacks_common::types::chainstate::TrieHash; use stacks_common::types::chainstate::{ - BlockHeaderHash, BurnchainHeaderHash, StacksAddress, VRFSeed, + BlockHeaderHash, BurnchainHeaderHash, StacksAddress, StacksBlockId, VRFSeed, }; use stacks_common::util::hash::to_hex; use stacks_common::util::log; @@ -165,6 +168,12 @@ impl LeaderBlockCommitOp { self.burn_parent_modulus as u64 % BURN_BLOCK_MINED_AT_MODULUS } + /// In Nakamoto, the block header hash is actually the index block hash of the first Nakamoto + /// block of the last tenure (the "tenure id"). This helper obtains it. + pub fn last_tenure_id(&self) -> StacksBlockId { + StacksBlockId(self.block_header_hash.0.clone()) + } + fn parse_data(data: &Vec) -> Option { /* Wire format: @@ -761,6 +770,7 @@ impl LeaderBlockCommitOp { StacksEpochId::Epoch22 => self.check_epoch_commit_marker(STACKS_EPOCH_2_2_MARKER), StacksEpochId::Epoch23 => self.check_epoch_commit_marker(STACKS_EPOCH_2_3_MARKER), StacksEpochId::Epoch24 => self.check_epoch_commit_marker(STACKS_EPOCH_2_4_MARKER), + StacksEpochId::Epoch25 => self.check_epoch_commit_marker(STACKS_EPOCH_2_5_MARKER), StacksEpochId::Epoch30 => self.check_epoch_commit_marker(STACKS_EPOCH_3_0_MARKER), } } @@ -780,6 +790,7 @@ impl LeaderBlockCommitOp { | StacksEpochId::Epoch22 | StacksEpochId::Epoch23 | StacksEpochId::Epoch24 + | StacksEpochId::Epoch25 | StacksEpochId::Epoch30 => { // correct behavior -- uses *sortition height* to find the intended sortition ID let sortition_height = self @@ -1789,6 +1800,8 @@ mod tests { u32::MAX, u32::MAX, u32::MAX, + u32::MAX, + u32::MAX, ), peer_version: 0x012345678, network_id: 0x9abcdef0, @@ -2334,6 +2347,8 @@ mod tests { u32::MAX, u32::MAX, u32::MAX, + u32::MAX, + u32::MAX, ), peer_version: 0x012345678, network_id: 0x9abcdef0, @@ -3035,6 +3050,8 @@ mod tests { u32::MAX, u32::MAX, u32::MAX, + u32::MAX, + u32::MAX, ), peer_version: 0x012345678, network_id: 0x9abcdef0, From d7057b7d4295df4cfbefe1ff6d679dc5f6f2bc36 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 2 Nov 2023 14:00:06 -0400 Subject: [PATCH 039/122] feat: add setter for nakamoto miner key hash160, and document where it goes in the protocol wire format --- .../burn/operations/leader_key_register.rs | 32 +++++++++++++------ 1 file changed, 23 insertions(+), 9 deletions(-) diff --git a/stackslib/src/chainstate/burn/operations/leader_key_register.rs b/stackslib/src/chainstate/burn/operations/leader_key_register.rs index 953a6ce7da..03bf35afe1 100644 --- a/stackslib/src/chainstate/burn/operations/leader_key_register.rs +++ b/stackslib/src/chainstate/burn/operations/leader_key_register.rs @@ -83,16 +83,27 @@ impl LeaderKeyRegisterOp { self.memo.get(0..20).map(Hash160::from_bytes).flatten() } + /// Set the miner public key hash160 for block-signing + pub fn set_nakamoto_signing_key(&mut self, pubkey_hash160: &Hash160) { + if self.memo.len() < 20 { + let mut new_memo = vec![0; 20]; + new_memo[0..self.memo.len()].copy_from_slice(&self.memo); + self.memo = new_memo; + } + self.memo[0..20].copy_from_slice(&pubkey_hash160.0); + } + fn parse_data(data: &Vec) -> Option { /* Wire format: - 0 2 3 23 55 80 - |------|--|---------------|-----------------------|---------------------------| - magic op consensus hash proving public key memo - (ignored) (ignored) + 0 2 3 23 55 75 80 + |------|--|---------------|-----------------------|-----------------------|---------| + magic op consensus hash proving public key block-signing hash160 memo + (ignored) (ignored) - Note that `data` is missing the first 3 bytes -- the magic and op have been stripped + Note that `data` is missing the first 3 bytes -- the magic and op have been stripped. + `block-signing hash160` is new to Nakamoto. */ // memo can be empty, and magic + op are omitted if data.len() < 52 { @@ -180,10 +191,13 @@ impl StacksMessageCodec for LeaderKeyRegisterOp { /* Wire format: - 0 2 3 23 55 80 - |------|--|---------------|-----------------------|---------------------------| - magic op consensus hash proving public key memo - (ignored) (ignored) + 0 2 3 23 55 75 80 + |------|--|---------------|-----------------------|-----------------------|---------| + magic op consensus hash proving public key block-signing hash160 memo + (ignored) (ignored) + + Note that `data` is missing the first 3 bytes -- the magic and op have been stripped. + `block-signing hash160` is new to Nakamoto, and is contained within the first 20 bytes of `memo` */ fn consensus_serialize(&self, fd: &mut W) -> Result<(), codec_error> { write_next(fd, &(Opcodes::LeaderKeyRegister as u8))?; From 2fe7e8bf140926bdf47a2ab95494c033c1f6863e Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 2 Nov 2023 14:02:08 -0400 Subject: [PATCH 040/122] feat: store reward set info by first prepare-phase sortition so that nakamoto can load epoch 2.5's reward set when it boots up --- stackslib/src/chainstate/coordinator/mod.rs | 88 ++++++++++++++++++--- 1 file changed, 79 insertions(+), 9 deletions(-) diff --git a/stackslib/src/chainstate/coordinator/mod.rs b/stackslib/src/chainstate/coordinator/mod.rs index c1e3ece273..ba1666bfca 100644 --- a/stackslib/src/chainstate/coordinator/mod.rs +++ b/stackslib/src/chainstate/coordinator/mod.rs @@ -58,6 +58,7 @@ use crate::chainstate::coordinator::comm::{ }; use crate::chainstate::stacks::address::PoxAddress; use crate::chainstate::stacks::boot::POX_3_NAME; +use crate::chainstate::stacks::boot::POX_4_NAME; use crate::chainstate::stacks::index::marf::MARFOpenOpts; use crate::chainstate::stacks::index::MarfTrieId; use crate::chainstate::stacks::{ @@ -89,14 +90,14 @@ pub mod tests; /// The 3 different states for the current /// reward cycle's relationship to its PoX anchor -#[derive(Debug, Clone, PartialEq)] +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] pub enum PoxAnchorBlockStatus { SelectedAndKnown(BlockHeaderHash, Txid, RewardSet), SelectedAndUnknown(BlockHeaderHash, Txid), NotSelected, } -#[derive(Debug, PartialEq)] +#[derive(Debug, PartialEq, Serialize, Deserialize)] pub struct RewardCycleInfo { pub reward_cycle: u64, pub anchor_status: PoxAnchorBlockStatus, @@ -316,7 +317,7 @@ impl OnChainRewardSetProvider { info!("PoX reward cycle defaulting to burn in Epochs 2.2 and 2.3"); return Ok(RewardSet::empty()); } - StacksEpochId::Epoch24 | StacksEpochId::Epoch30 => { + StacksEpochId::Epoch24 => { // Epoch 2.4 computes reward sets, but *only* if PoX-3 is active if burnchain .pox_constants @@ -331,6 +332,21 @@ impl OnChainRewardSetProvider { return Ok(RewardSet::empty()); } } + StacksEpochId::Epoch25 | StacksEpochId::Epoch30 => { + // Epoch 2.5 and 3.0 compute reward sets, but *only* if PoX-4 is active + if burnchain + .pox_constants + .active_pox_contract(current_burn_height) + != POX_4_NAME + { + // Note: this should not happen in mainnet or testnet, because the no reward cycle start height + // exists between Epoch 2.5's instantiation height and the pox-4 activation height. + // However, this *will* happen in testing if Epoch 2.5's instantiation height is set == a reward cycle + // start height + info!("PoX reward cycle defaulting to burn in Epoch 2.5 because cycle start is before PoX-4 activation"); + return Ok(RewardSet::empty()); + } + } }; let registered_addrs = @@ -629,7 +645,7 @@ pub fn get_reward_cycle_info( burnchain: &Burnchain, burnchain_db: &BurnchainDB, chain_state: &mut StacksChainState, - sort_db: &SortitionDB, + sort_db: &mut SortitionDB, provider: &U, always_use_affirmation_maps: bool, ) -> Result, Error> { @@ -637,7 +653,7 @@ pub fn get_reward_cycle_info( &format!("FATAL: no epoch defined for burn height {}", burn_height), ); - if burnchain.is_reward_cycle_start(burn_height) { + let reward_cycle_info = if burnchain.is_reward_cycle_start(burn_height) { let reward_cycle = burnchain .block_height_to_reward_cycle(burn_height) .expect("FATAL: no reward cycle for burn height"); @@ -723,7 +739,30 @@ pub fn get_reward_cycle_info( } } else { Ok(None) + }; + + if let Ok(Some(reward_cycle_info)) = reward_cycle_info.as_ref() { + // cache the reward cycle info as of the first sortition in the prepare phase, so that + // the Nakamoto epoch can go find it later + let ic = sort_db.index_handle(sortition_tip); + let prev_reward_cycle = burnchain + .block_height_to_reward_cycle(burn_height) + .expect("FATAL: no reward cycle for burn height"); + + if prev_reward_cycle > 1 { + let prev_reward_cycle_start = burnchain.reward_cycle_to_block_height(prev_reward_cycle - 1); + let prepare_phase_start = prev_reward_cycle_start + u64::from(burnchain.pox_constants.reward_cycle_length) - u64::from(burnchain.pox_constants.prepare_length); + let first_prepare_sn = SortitionDB::get_ancestor_snapshot(&ic, prepare_phase_start, sortition_tip)? + .expect("FATAL: no start-of-prepare-phase sortition"); + + let mut tx = sort_db.tx_begin()?; + if SortitionDB::get_preprocessed_reward_set(&mut tx, &first_prepare_sn.sortition_id)?.is_none() { + SortitionDB::store_preprocessed_reward_set(&mut tx, &first_prepare_sn.sortition_id, &reward_cycle_info)?; + } + tx.commit()?; + } } + reward_cycle_info } /// PoX payout event to be sent to connected event observers @@ -2212,9 +2251,40 @@ impl< } /// Outermost call to process a burnchain block. + /// Will call the Stacks 2.x or Nakamoto handler, depending on whether or not /// Not called internally. + /// NOTE: the 2.x and Nakamoto handlers return `Some(..)` in _different_ circumstances. If + /// that matters to you, then you should call them directly. pub fn handle_new_burnchain_block(&mut self) -> Result, Error> { - self.inner_handle_new_burnchain_block(&mut HashSet::new()) + let canonical_burnchain_tip = self.burnchain_blocks_db.get_canonical_chain_tip()?; + let epochs = SortitionDB::get_stacks_epochs(self.sortition_db.conn())?; + let target_epoch_index = StacksEpoch::find_epoch(&epochs, canonical_burnchain_tip.block_height).expect("FATAL: epoch not defined for burnchain height"); + let target_epoch = epochs.get(target_epoch_index).expect("FATAL: StacksEpoch::find_epoch() returned an invalid index"); + if target_epoch.epoch_id < StacksEpochId::Epoch30 { + // burnchain has not yet advanced to epoch 3.0 + self.handle_new_epoch2_burnchain_block(&mut HashSet::new()) + } + else { + // burnchain has advanced to epoch 3.0, but has our sortition DB? + let canonical_snapshot = match self.canonical_sortition_tip.as_ref() { + Some(sn_tip) => SortitionDB::get_block_snapshot(self.sortition_db.conn(), sn_tip)? + .expect(&format!( + "FATAL: do not have previously-calculated highest valid sortition tip {}", + sn_tip + )), + None => SortitionDB::get_canonical_burn_chain_tip(&self.sortition_db.conn())?, + }; + let target_epoch_index = StacksEpoch::find_epoch(&epochs, canonical_snapshot.block_height).expect("FATAL: epoch not defined for BlockSnapshot height"); + let target_epoch = epochs.get(target_epoch_index).expect("FATAL: StacksEpoch::find_epoch() returned an invalid index"); + + if target_epoch.epoch_id < StacksEpochId::Epoch30 { + // need to catch the sortition DB up + self.handle_new_epoch2_burnchain_block(&mut HashSet::new())?; + } + + // proceed to process sortitions in epoch 3.0 + self.handle_new_nakamoto_burnchain_block() + } } /// Are affirmation maps active during the epoch? @@ -2232,7 +2302,7 @@ impl< /// this happens, *and* if re-processing the new affirmed history is *blocked on* the /// unavailability of a PoX anchor block that *must now* exist, then return the hash of this /// anchor block. - fn inner_handle_new_burnchain_block( + pub fn handle_new_epoch2_burnchain_block( &mut self, already_processed_burn_blocks: &mut HashSet, ) -> Result, Error> { @@ -2683,7 +2753,7 @@ impl< &self.burnchain, &self.burnchain_blocks_db, &mut self.chain_state_db, - &self.sortition_db, + &mut self.sortition_db, &self.reward_set_provider, self.config.always_use_affirmation_maps, ) @@ -3195,7 +3265,7 @@ impl< self.canonical_sortition_tip = Some(prep_end.sortition_id); // Start processing from the beginning of the new PoX reward set - self.inner_handle_new_burnchain_block(already_processed_burn_blocks) + self.handle_new_epoch2_burnchain_block(already_processed_burn_blocks) } } From 1a95fd1227dde22e599c5c9e7555e1f2157ee706 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 2 Nov 2023 14:02:40 -0400 Subject: [PATCH 041/122] chore: fill in mocked pox-3 unlock and pox-4 activation --- stackslib/src/chainstate/coordinator/tests.rs | 60 ++++++++++++++++--- 1 file changed, 51 insertions(+), 9 deletions(-) diff --git a/stackslib/src/chainstate/coordinator/tests.rs b/stackslib/src/chainstate/coordinator/tests.rs index 8374f6d399..eac7ed36a9 100644 --- a/stackslib/src/chainstate/coordinator/tests.rs +++ b/stackslib/src/chainstate/coordinator/tests.rs @@ -73,6 +73,7 @@ use crate::chainstate::stacks::db::{ accounts::MinerReward, ClarityTx, StacksChainState, StacksHeaderInfo, }; use crate::chainstate::stacks::events::StacksBlockEventData; +use crate::chainstate::stacks::miner::BlockBuilder; use crate::chainstate::stacks::*; use crate::clarity_vm::clarity::ClarityConnection; use crate::core; @@ -545,6 +546,8 @@ pub fn get_burnchain(path: &str, pox_consts: Option) -> Burnchain u32::MAX, u32::MAX, u32::MAX, + u32::MAX, + u32::MAX, ) }); b @@ -623,7 +626,7 @@ fn make_genesis_block_with_recipients( let mut tx = StacksTransaction::new( TransactionVersion::Testnet, tx_auth, - TransactionPayload::Coinbase(CoinbasePayload([0u8; 32]), None), + TransactionPayload::Coinbase(CoinbasePayload([0u8; 32]), None, None), ); tx.chain_id = 0x80000000; tx.anchor_mode = TransactionAnchorMode::OnChainOnly; @@ -855,7 +858,7 @@ fn make_stacks_block_with_input( let mut tx = StacksTransaction::new( TransactionVersion::Testnet, tx_auth, - TransactionPayload::Coinbase(CoinbasePayload([0u8; 32]), None), + TransactionPayload::Coinbase(CoinbasePayload([0u8; 32]), None, None), ); tx.chain_id = 0x80000000; tx.anchor_mode = TransactionAnchorMode::OnChainOnly; @@ -992,6 +995,8 @@ fn missed_block_commits_2_05() { u32::MAX, u32::MAX, u32::MAX, + u32::MAX, + u32::MAX, )); let burnchain_conf = get_burnchain(path, pox_consts.clone()); @@ -1312,6 +1317,8 @@ fn missed_block_commits_2_1() { u32::MAX, u32::MAX, u32::MAX, + u32::MAX, + u32::MAX, )); let burnchain_conf = get_burnchain(path, pox_consts.clone()); @@ -1656,6 +1663,8 @@ fn late_block_commits_2_1() { u32::MAX, u32::MAX, u32::MAX, + u32::MAX, + u32::MAX, )); let burnchain_conf = get_burnchain(path, pox_consts.clone()); @@ -2713,6 +2722,7 @@ fn test_pox_btc_ops() { let sunset_ht = 8000; let pox_v1_unlock_ht = u32::MAX; let pox_v2_unlock_ht = u32::MAX; + let pox_v3_unlock_ht = u32::MAX; let pox_consts = Some(PoxConstants::new( 5, 3, @@ -2723,6 +2733,8 @@ fn test_pox_btc_ops() { sunset_ht, pox_v1_unlock_ht, pox_v2_unlock_ht, + pox_v3_unlock_ht, + u32::MAX, u32::MAX, )); let burnchain_conf = get_burnchain(path, pox_consts.clone()); @@ -2904,7 +2916,8 @@ fn test_pox_btc_ops() { stacker_balance.get_available_balance_at_burn_block( burn_height as u64, pox_v1_unlock_ht, - pox_v2_unlock_ht + pox_v2_unlock_ht, + pox_v3_unlock_ht, ), balance as u128, "No lock should be active" @@ -2995,6 +3008,7 @@ fn test_stx_transfer_btc_ops() { let pox_v1_unlock_ht = u32::MAX; let pox_v2_unlock_ht = u32::MAX; + let pox_v3_unlock_ht = u32::MAX; let sunset_ht = 8000; let pox_consts = Some(PoxConstants::new( 5, @@ -3006,6 +3020,8 @@ fn test_stx_transfer_btc_ops() { sunset_ht, pox_v1_unlock_ht, pox_v2_unlock_ht, + pox_v3_unlock_ht, + u32::MAX, u32::MAX, )); let burnchain_conf = get_burnchain(path, pox_consts.clone()); @@ -3209,7 +3225,8 @@ fn test_stx_transfer_btc_ops() { sender_balance.get_available_balance_at_burn_block( burn_height as u64, pox_v1_unlock_ht, - pox_v2_unlock_ht + pox_v2_unlock_ht, + pox_v3_unlock_ht, ), (balance as u128) - transfer_amt, "Transfer should have decremented balance" @@ -3218,7 +3235,8 @@ fn test_stx_transfer_btc_ops() { recipient_balance.get_available_balance_at_burn_block( burn_height as u64, pox_v1_unlock_ht, - pox_v2_unlock_ht + pox_v2_unlock_ht, + pox_v3_unlock_ht, ), transfer_amt, "Recipient should have incremented balance" @@ -3228,7 +3246,8 @@ fn test_stx_transfer_btc_ops() { sender_balance.get_available_balance_at_burn_block( burn_height as u64, pox_v1_unlock_ht, - pox_v2_unlock_ht + pox_v2_unlock_ht, + pox_v3_unlock_ht, ), balance as u128, ); @@ -3236,7 +3255,8 @@ fn test_stx_transfer_btc_ops() { recipient_balance.get_available_balance_at_burn_block( burn_height as u64, pox_v1_unlock_ht, - pox_v2_unlock_ht + pox_v2_unlock_ht, + pox_v3_unlock_ht, ), 0, ); @@ -3326,7 +3346,9 @@ fn test_sbtc_ops() { let pox_v1_unlock_ht = 12; let pox_v2_unlock_ht = 14; - let pox_3_activation_ht = 16; + let pox_v3_unlock_ht = 16; + let pox_3_activation_ht = 15; + let pox_4_activation_ht = 16; let sunset_ht = 8000; let pox_consts = Some(PoxConstants::new( 100, @@ -3338,7 +3360,9 @@ fn test_sbtc_ops() { sunset_ht, pox_v1_unlock_ht, pox_v2_unlock_ht, + pox_v3_unlock_ht, pox_3_activation_ht, + pox_4_activation_ht, )); let burnchain_conf = get_burnchain(path, pox_consts.clone()); @@ -3684,6 +3708,8 @@ fn test_delegate_stx_btc_ops() { pox_v1_unlock_ht, pox_v2_unlock_ht, u32::MAX, + u32::MAX, + u32::MAX, )); let burnchain_conf = get_burnchain(path, pox_consts.clone()); @@ -3989,6 +4015,8 @@ fn test_initial_coinbase_reward_distributions() { u32::MAX, u32::MAX, u32::MAX, + u32::MAX, + u32::MAX, )); let burnchain_conf = get_burnchain(path, pox_consts.clone()); @@ -4228,6 +4256,8 @@ fn test_epoch_switch_cost_contract_instantiation() { u32::MAX, u32::MAX, u32::MAX, + u32::MAX, + u32::MAX, )); let burnchain_conf = get_burnchain(path, pox_consts.clone()); @@ -4429,6 +4459,8 @@ fn test_epoch_switch_pox_2_contract_instantiation() { 10, u32::MAX, u32::MAX, + u32::MAX, + u32::MAX, )); let burnchain_conf = get_burnchain(path, pox_consts.clone()); @@ -4622,7 +4654,7 @@ fn test_epoch_switch_pox_3_contract_instantiation() { let _r = std::fs::remove_dir_all(path); let sunset_ht = 8000; - let pox_consts = Some(PoxConstants::new(6, 3, 3, 25, 5, 10, sunset_ht, 10, 14, 16)); + let pox_consts = Some(PoxConstants::new(6, 3, 3, 25, 5, 10, sunset_ht, 10, 14, u32::MAX, 16, u32::MAX)); let burnchain_conf = get_burnchain(path, pox_consts.clone()); let vrf_keys: Vec<_> = (0..25).map(|_| VRFPrivateKey::new()).collect(); @@ -4826,6 +4858,8 @@ fn atlas_stop_start() { 10, u32::MAX, u32::MAX, + u32::MAX, + u32::MAX, )); let burnchain_conf = get_burnchain(path, pox_consts.clone()); @@ -5135,6 +5169,8 @@ fn test_epoch_verify_active_pox_contract() { pox_v1_unlock_ht, pox_v2_unlock_ht, u32::MAX, + u32::MAX, + u32::MAX, )); let burnchain_conf = get_burnchain(path, pox_consts.clone()); @@ -5426,6 +5462,8 @@ fn test_sortition_with_sunset() { u32::MAX, u32::MAX, u32::MAX, + u32::MAX, + u32::MAX, )); let burnchain_conf = get_burnchain(path, pox_consts.clone()); @@ -5735,6 +5773,8 @@ fn test_sortition_with_sunset_and_epoch_switch() { v1_unlock_ht, u32::MAX, u32::MAX, + u32::MAX, + u32::MAX, )); let burnchain_conf = get_burnchain(path, pox_consts.clone()); @@ -6084,6 +6124,8 @@ fn test_pox_processable_block_in_different_pox_forks() { u32::MAX, u32::MAX, u32::MAX, + u32::MAX, + u32::MAX, )); let b = get_burnchain(path, pox_consts.clone()); let b_blind = get_burnchain(path_blinded, pox_consts.clone()); From abb66ba4aa859c84fa4ca97841584d8fa9ce1bcc Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 2 Nov 2023 14:03:44 -0400 Subject: [PATCH 042/122] feat: store preprocessed reward set info from start of prepare phase, so the nakamoto block handlers can know to block burnchain block processing until the reward addresses are known --- .../chainstate/nakamoto/coordinator/mod.rs | 164 +++++++++++++----- 1 file changed, 119 insertions(+), 45 deletions(-) diff --git a/stackslib/src/chainstate/nakamoto/coordinator/mod.rs b/stackslib/src/chainstate/nakamoto/coordinator/mod.rs index fc04febdd3..50bc7b4c6f 100644 --- a/stackslib/src/chainstate/nakamoto/coordinator/mod.rs +++ b/stackslib/src/chainstate/nakamoto/coordinator/mod.rs @@ -38,6 +38,7 @@ use crate::chainstate::coordinator::{ RewardSetProvider, }; use crate::chainstate::nakamoto::NakamotoChainState; +use crate::chainstate::stacks::Error as ChainstateError; use crate::chainstate::stacks::boot::RewardSet; use crate::chainstate::stacks::db::StacksChainState; use crate::chainstate::stacks::miner::signal_mining_blocked; @@ -49,6 +50,8 @@ use crate::cost_estimates::FeeEstimator; use crate::monitoring::increment_stx_blocks_processed_counter; +use crate::net::Error as NetError; + use crate::util_lib::db::Error as DBError; use stacks_common::types::chainstate::BlockHeaderHash; @@ -77,6 +80,8 @@ impl OnChainRewardSetProvider { let liquid_ustx = chainstate.get_liquid_ustx(block_id); + debug!("PoX addrs at {} ({}): {:?}", block_id, registered_addrs.len(), ®istered_addrs); + let (threshold, participation) = StacksChainState::get_reward_threshold_and_participation( &burnchain.pox_constants, ®istered_addrs[..], @@ -89,7 +94,7 @@ impl OnChainRewardSetProvider { current_burn_height )); - if participation == 0 { + if cur_epoch.epoch_id >= StacksEpochId::Epoch30 && participation == 0 { // no one is stacking. This is a fatal error. error!("No PoX participation. Aborting."); panic!(); @@ -133,7 +138,7 @@ fn find_prepare_phase_sortitions( &sns.last() .as_ref() .expect("FATAL; unreachable: sns is never empty") - .sortition_id, + .parent_sortition_id, )? else { break; @@ -153,8 +158,7 @@ fn find_prepare_phase_sortitions( /// If this method returns None, the caller should try again when there are more Stacks blocks. In /// Nakamoto, every reward cycle _must_ have a PoX anchor block; otherwise, the chain halts. /// -/// Returns Ok(Some(reward-cycle-info)) if we found the first sortition in the prepare phase. It -/// will be `SelectedAndKnown`. +/// Returns Ok(Some(reward-cycle-info)) if we found the first sortition in the prepare phase. /// Returns Ok(None) if we're still waiting for the PoX anchor block sortition /// Returns Err(Error::NotInPreparePhase) if `burn_height` is not in the prepare phase /// Returns Err(Error::RewardCycleAlreadyProcessed) if the reward set for this reward cycle has @@ -164,7 +168,7 @@ pub fn get_nakamoto_reward_cycle_info( sortition_tip: &SortitionId, burnchain: &Burnchain, chain_state: &mut StacksChainState, - sort_db: &SortitionDB, + sort_db: &mut SortitionDB, provider: &U, ) -> Result, Error> { let epoch_at_height = SortitionDB::get_stacks_epoch(sort_db.conn(), burn_height)? @@ -212,6 +216,19 @@ pub fn get_nakamoto_reward_cycle_info( // cannot change later). let prepare_phase_sortitions = find_prepare_phase_sortitions(sort_db, burnchain, sortition_tip)?; + + // did we already calculate the reward cycle info? If so, then return it. + let first_sortition_id = if let Some(first_sn) = prepare_phase_sortitions.first() { + if let Some(persisted_reward_cycle_info) = SortitionDB::get_preprocessed_reward_set(sort_db.conn(), &first_sn.sortition_id)? { + return Ok(Some(persisted_reward_cycle_info)); + } + first_sn.sortition_id.clone() + } + else { + // can't do anything + return Ok(None); + }; + for sn in prepare_phase_sortitions.into_iter() { if !sn.sortition { continue; @@ -263,10 +280,18 @@ pub fn get_nakamoto_reward_cycle_info( ); let anchor_status = PoxAnchorBlockStatus::SelectedAndKnown(stacks_block_hash, txid, reward_set); - return Ok(Some(RewardCycleInfo { + + let rc_info = RewardCycleInfo { reward_cycle, anchor_status, - })); + }; + + // persist this + let mut tx = sort_db.tx_begin()?; + SortitionDB::store_preprocessed_reward_set(&mut tx, &first_sortition_id, &rc_info)?; + tx.commit()?; + + return Ok(Some(rc_info)); } // no stacks block known yet @@ -346,11 +371,11 @@ impl< signal_mining_blocked(miner_status.clone()); debug!("Received new Nakamoto stacks block notice"); match self.handle_new_nakamoto_stacks_block() { - Ok(missing_block_opt) => { - if missing_block_opt.is_some() { + Ok(new_anchor_block_opt) => { + if let Some(bhh) = new_anchor_block_opt { debug!( - "Missing affirmed anchor block: {:?}", - &missing_block_opt.as_ref().expect("unreachable") + "Found next PoX anchor block, waiting for reward cycle processing"; + "pox_anchor_block_hash" => %bhh ); } } @@ -390,7 +415,9 @@ impl< /// Handle one or more new Nakamoto Stacks blocks. /// If we process a PoX anchor block, then return its block hash. This unblocks processing the - /// next reward cycle's burnchain blocks. + /// next reward cycle's burnchain blocks. Subsequent calls to this function will terminate + /// with Some(pox-anchor-block-hash) until the reward cycle info is processed in the sortition + /// DB. pub fn handle_new_nakamoto_stacks_block(&mut self) -> Result, Error> { let canonical_sortition_tip = self.canonical_sortition_tip.clone().expect( "FAIL: processing a new Stacks block, but don't have a canonical sortition tip", @@ -398,28 +425,44 @@ impl< loop { // process at most one block per loop pass - let sortdb_handle = self + let mut sortdb_handle = self .sortition_db .tx_handle_begin(&canonical_sortition_tip)?; - let mut processed_blocks = NakamotoChainState::process_nakamoto_blocks( + let mut processed_block_receipt = match NakamotoChainState::process_next_nakamoto_block( &mut self.chain_state_db, - sortdb_handle, - 1, + &mut sortdb_handle, self.dispatcher, - )?; + ) { + Ok(receipt_opt) => receipt_opt, + Err(ChainstateError::InvalidStacksBlock(msg)) => { + warn!("Encountered invalid block: {}", &msg); + + // try again + self.notifier.notify_stacks_block_processed(); + increment_stx_blocks_processed_counter(); + continue; + } + Err(ChainstateError::NetError(NetError::DeserializeError(msg))) => { + // happens if we load a zero-sized block (i.e. an invalid block) + warn!("Encountered invalid block (codec error): {}", &msg); + + // try again + self.notifier.notify_stacks_block_processed(); + increment_stx_blocks_processed_counter(); + continue; + } + Err(e) => { + // something else happened + return Err(e.into()); + } + }; - if processed_blocks.len() == 0 { + sortdb_handle.commit()?; + + let Some(block_receipt) = processed_block_receipt.take() else { // out of blocks break; - } - - let Some(Some(block_receipt)) = processed_blocks.pop() else { - // this block was invalid - debug!("Bump blocks processed (invalid)"); - self.notifier.notify_stacks_block_processed(); - increment_stx_blocks_processed_counter(); - continue; }; // only bump the coordinator's state if the processed block @@ -550,7 +593,7 @@ impl< sortition_tip_id, &self.burnchain, &mut self.chain_state_db, - &self.sortition_db, + &mut self.sortition_db, &self.reward_set_provider, ) } @@ -561,9 +604,6 @@ impl< /// block. If the next PoX anchor block is not available, then no burnchain block processing /// happens, and the hash of the PoX anchor block is returned instead. /// - /// Returns Ok(None) if all burnchain blocks are processed - /// Returns Ok(Some(hash)) if burnchain block processing is blocked on a missing PoX anchor - /// block /// Returns Err(..) if an error occurred while processing (i.e. a DB error). pub fn handle_new_nakamoto_burnchain_block( &mut self, @@ -654,22 +694,56 @@ impl< } }; - // is this the burnchain block that selected the PoX anchor block? - let reward_cycle_info = self.get_nakamoto_reward_cycle_info(&header)?; - if let Some(rc_info) = reward_cycle_info { - // in nakamoto, if we have any reward cycle info at all, it will be known. - assert!( - rc_info.is_reward_info_known(), - "FATAL: unknown PoX anchor block in Nakamoto" - ); - return Ok(Some( - rc_info - .selected_anchor_block() - .expect("FATAL: Nakamoto always has a PoX anchor block") - .0 - .to_owned(), - )); + if self.burnchain.is_in_prepare_phase(header.block_height) { + // try to eagerly load up the reward cycle information, so we can persist it and + // make it available to signers. If we're at the _end_ of the prepare phase, then + // we have no choice but to block. + let reward_cycle_info = self.get_nakamoto_reward_cycle_info(&header)?; + if let Some(rc_info) = reward_cycle_info { + // in nakamoto, if we have any reward cycle info at all, it will be known. + assert!( + rc_info.known_selected_anchor_block().is_some(), + "FATAL: unknown PoX anchor block in Nakamoto" + ); + } + } + + let reward_cycle_info = if self.burnchain.is_reward_cycle_start(header.block_height) { + // we're at the end of the prepare phase, so we'd better have obtained the reward + // cycle info of we must block. + let prepare_phase_sortitions = + find_prepare_phase_sortitions(&self.sortition_db, &self.burnchain, &last_processed_ancestor)?; + + if let Some(first_sn) = prepare_phase_sortitions.first() { + let reward_cycle_info = SortitionDB::get_preprocessed_reward_set(&self.sortition_db.conn(), &first_sn.sortition_id)?; + if let Some(rc_info) = reward_cycle_info.as_ref() { + // we must have an anchor block + assert!(rc_info.known_selected_anchor_block().is_some(), "FATAL: do not know prior reward cycle anchor block"); + } + else { + // have to block -- we don't have the reward cycle information + debug!("Do not yet have PoX anchor block for next reward cycle -- no anchor block found"; + "next_reward_cycle" => self.burnchain.block_height_to_reward_cycle(header.block_height), + "sortition_id" => %first_sn.sortition_id + ); + return Ok(None); + } + reward_cycle_info + } + else { + // have to block -- we don't have any sortitions in the preceding prepare + // phase. + // this is really unreachable, but don't panic just yet. + debug!("Do not yet have PoX anchor block for next reward cycle -- no prepare-phase sortitions"; + "next_reward_cycle" => self.burnchain.block_height_to_reward_cycle(header.block_height) + ); + return Ok(None); + } } + else { + // not starting a reward cycle anyway + None + }; // process next sortition let dispatcher_ref = &self.dispatcher; From 66249f472ba65e66365df65d116b83578732024e Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 2 Nov 2023 14:04:53 -0400 Subject: [PATCH 043/122] feat: MVP TestPeer test to create a single non-empty Nakamoto tenure, which boots through all epochs from 2.0 to 3.0 --- .../chainstate/nakamoto/coordinator/tests.rs | 91 ++++++++++++++++++- 1 file changed, 88 insertions(+), 3 deletions(-) diff --git a/stackslib/src/chainstate/nakamoto/coordinator/tests.rs b/stackslib/src/chainstate/nakamoto/coordinator/tests.rs index da0e9ab0a9..6fc00d3320 100644 --- a/stackslib/src/chainstate/nakamoto/coordinator/tests.rs +++ b/stackslib/src/chainstate/nakamoto/coordinator/tests.rs @@ -16,9 +16,94 @@ use crate::net::test::{TestPeer, TestPeerConfig}; -/// Mine two reward cycles without any interruptions. +use clarity::vm::types::PrincipalData; + +use crate::chainstate::stacks::address::PoxAddress; +use crate::chainstate::stacks::boot::test::make_pox_4_lockup; +use crate::chainstate::stacks::CoinbasePayload; +use crate::chainstate::stacks::StacksTransaction; +use crate::chainstate::stacks::StacksTransactionSigner; +use crate::chainstate::stacks::TenureChangeCause; +use crate::chainstate::stacks::TransactionAnchorMode; +use crate::chainstate::stacks::TransactionAuth; +use crate::chainstate::stacks::TransactionPayload; +use crate::chainstate::stacks::TransactionVersion; + +use crate::clarity::vm::types::StacksAddressExtensions; + +use stacks_common::address::C32_ADDRESS_VERSION_TESTNET_SINGLESIG; +use stacks_common::address::AddressHashMode; +use stacks_common::types::chainstate::StacksAddress; +use stacks_common::types::chainstate::StacksPublicKey; +use stacks_common::types::chainstate::StacksPrivateKey; +use stacks_common::types::StacksEpoch; +use stacks_common::util::vrf::VRFProof; + +use crate::core::StacksEpochExtension; + +/// Make a peer and transition it into the Nakamoto epoch. +/// The node needs to be stacking; otherwise, Nakamoto won't activate. +fn boot_nakamoto(test_name: &str, mut initial_balances: Vec<(PrincipalData, u64)>) -> TestPeer { + let mut peer_config = TestPeerConfig::new(test_name, 0, 0); + let private_key = peer_config.private_key.clone(); + let addr = StacksAddress::from_public_keys( + C32_ADDRESS_VERSION_TESTNET_SINGLESIG, + &AddressHashMode::SerializeP2PKH, + 1, + &vec![StacksPublicKey::from_private(&private_key)], + ) + .unwrap(); + + // reward cycles are 5 blocks long + // first 25 blocks are boot-up + // reward cycle 6 instantiates pox-3 + // we stack in reward cycle 7 so pox-3 is evaluated to find reward set participation + peer_config.epochs = Some(StacksEpoch::unit_test_3_0_only(36)); + peer_config.initial_balances = vec![(addr.to_account_principal(), 1_000_000_000_000_000_000)]; + peer_config.initial_balances.append(&mut initial_balances); + peer_config.burnchain.pox_constants.v2_unlock_height = 21; + peer_config.burnchain.pox_constants.pox_3_activation_height = 26; + peer_config.burnchain.pox_constants.v3_unlock_height = 27; + peer_config.burnchain.pox_constants.pox_4_activation_height = 31; + + let mut peer = TestPeer::new(peer_config); + let mut peer_nonce = 0; + + // advance through cycle 6 + for _ in 0..5 { + peer.tenure_with_txs(&[], &mut peer_nonce); + } + + // stack to pox-3 in cycle 7 + for sortition_height in 0..5 { + let txs = if sortition_height == 0 { + // stack them all + let stack_tx = make_pox_4_lockup(&private_key, 0, 1_000_000_000_000_000_000, PoxAddress::from_legacy(AddressHashMode::SerializeP2PKH, addr.bytes.clone()), 12, 34); + vec![stack_tx] + } + else { + vec![] + }; + + peer.tenure_with_txs(&txs, &mut peer_nonce); + } + + // peer is at the start of cycle 8 + peer +} + +/// Mine a single Nakamoto tenure #[test] fn test_simple_nakamoto_coordinator_bootup() { - let peer_config = TestPeerConfig::new(function_name!(), 0, 0); - let mut peer = TestPeer::new(peer_config); + let mut peer = boot_nakamoto(function_name!(), vec![]); + + let (burn_ops, tenure_change, vrf_proof) = peer.begin_nakamoto_tenure(TenureChangeCause::BlockFound); + let (_, _, consensus_hash) = peer.next_burnchain_block(burn_ops); + let blocks_and_sizes = peer.make_nakamoto_tenure(&consensus_hash, tenure_change, vrf_proof, |_miner, _chainstate, _sort_dbconn, _count| { vec![] }); + let blocks = blocks_and_sizes + .into_iter() + .map(|(block, _, _)| block) + .collect(); + + peer.process_nakamoto_tenure(blocks); } From e2313b37838a1c25194f478234f255069ee532d0 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 2 Nov 2023 14:05:56 -0400 Subject: [PATCH 044/122] feat: nakamoto block header constructors; verify the miner signature on the block from its leader key; only allow nakamoto blocks to build atop an epoch 2.x block if there are not yet any nakamoto headers --- stackslib/src/chainstate/nakamoto/mod.rs | 307 +++++++++++++++++------ 1 file changed, 228 insertions(+), 79 deletions(-) diff --git a/stackslib/src/chainstate/nakamoto/mod.rs b/stackslib/src/chainstate/nakamoto/mod.rs index 0b6b5328f0..91ed24b7ca 100644 --- a/stackslib/src/chainstate/nakamoto/mod.rs +++ b/stackslib/src/chainstate/nakamoto/mod.rs @@ -38,7 +38,10 @@ use stacks_common::types::StacksEpochId; use stacks_common::util::get_epoch_time_secs; use stacks_common::util::hash::{Hash160, MerkleHashFunc, MerkleTree, Sha512Trunc256Sum}; use stacks_common::util::retry::BoundReader; -use stacks_common::util::secp256k1::{MessageSignature, Secp256k1PublicKey}; +use stacks_common::util::secp256k1::{MessageSignature}; +use stacks_common::types::chainstate::StacksPrivateKey; +use stacks_common::types::chainstate::StacksPublicKey; +use stacks_common::types::PrivateKey; use super::burn::db::sortdb::{SortitionHandleConn, SortitionHandleTx}; use super::burn::operations::{DelegateStxOp, StackStxOp, TransferStxOp}; @@ -64,16 +67,21 @@ use crate::util_lib::db::{ query_row_panic, query_rows, u64_to_sql, DBConn, Error as DBError, FromRow, }; +use crate::core::BOOT_BLOCK_HASH; + use crate::chainstate::coordinator::BlockEventDispatcher; use crate::chainstate::coordinator::Error; use crate::net::Error as net_error; pub mod coordinator; +pub mod miner; #[cfg(test)] pub mod tests; +pub const NAKAMOTO_BLOCK_VERSION: u8 = 0; + define_named_enum!(HeaderTypeNames { Nakamoto("nakamoto"), Epoch2("epoch2"), @@ -202,16 +210,26 @@ lazy_static! { ]; } +/// Result of preparing to produce or validate a block pub struct SetupBlockResult<'a, 'b> { + /// Handle to the ClarityVM pub clarity_tx: ClarityTx<'a, 'b>, + /// Transaction receipts from any Stacks-on-Bitcoin transactions and epoch transition events pub tx_receipts: Vec, + /// Miner rewards that can be paid now: (this-miner-reward, parent-miner-reward, miner-info) pub matured_miner_rewards_opt: Option<(MinerReward, MinerReward, MinerRewardInfo)>, + /// Epoch in which this block was set up pub evaluated_epoch: StacksEpochId, + /// Whether or not we applied an epoch transition in this block pub applied_epoch_transition: bool, + /// stack-stx Stacks-on-Bitcoin txs pub burn_stack_stx_ops: Vec, + /// transfer-stx Stacks-on-Bitcoin txs pub burn_transfer_stx_ops: Vec, - pub auto_unlock_events: Vec, + /// delegate-stx Stacks-on-Bitcoin txs pub burn_delegate_stx_ops: Vec, + /// STX auto-unlock events from PoX + pub auto_unlock_events: Vec, } #[derive(Debug, Clone, PartialEq)] @@ -310,6 +328,8 @@ impl StacksMessageCodec for NakamotoBlockHeader { } impl NakamotoBlockHeader { + /// Calculate the message digest to sign. + /// This includes all fields _except_ the signatures. pub fn signature_hash(&self) -> Result { let mut hasher = Sha512_256::new(); let fd = &mut hasher; @@ -323,10 +343,10 @@ impl NakamotoBlockHeader { Ok(Sha512Trunc256Sum::from_hasher(hasher)) } - pub fn recover_miner_pk(&self) -> Option { + pub fn recover_miner_pk(&self) -> Option { let signed_hash = self.signature_hash().ok()?; let recovered_pk = - Secp256k1PublicKey::recover_to_pubkey(signed_hash.bits(), &self.miner_signature) + StacksPublicKey::recover_to_pubkey(signed_hash.bits(), &self.miner_signature) .ok()?; Some(recovered_pk) @@ -344,6 +364,67 @@ impl NakamotoBlockHeader { pub fn is_first_mined(&self) -> bool { StacksBlockHeader::is_first_index_block_hash(&self.parent_block_id) } + + /// Sign the block header by the miner + pub fn sign_miner(&mut self, privk: &StacksPrivateKey) -> Result<(), ChainstateError> { + let sighash = self.signature_hash()?.0; + let sig = privk + .sign(&sighash) + .map_err(|se| net_error::SigningError(se.to_string()))?; + self.miner_signature = sig; + Ok(()) + } + + /// Make an "empty" header whose block data needs to be filled in. + /// This is used by the miner code. + pub fn from_parent_empty( + chain_length: u64, + burn_spent: u64, + consensus_hash: ConsensusHash, + parent_block_id: StacksBlockId, + ) -> NakamotoBlockHeader { + NakamotoBlockHeader { + version: NAKAMOTO_BLOCK_VERSION, + chain_length, + burn_spent, + consensus_hash, + parent_block_id, + tx_merkle_root: Sha512Trunc256Sum([0u8; 32]), + state_index_root: TrieHash([0u8; 32]), + miner_signature: MessageSignature::empty(), + stacker_signature: MessageSignature::empty() + } + } + + /// Make a completely empty header + pub fn empty() -> NakamotoBlockHeader { + NakamotoBlockHeader { + version: 0, + chain_length: 0, + burn_spent: 0, + consensus_hash: ConsensusHash([0u8; 20]), + parent_block_id: StacksBlockId([0u8; 32]), + tx_merkle_root: Sha512Trunc256Sum([0u8; 32]), + state_index_root: TrieHash([0u8; 32]), + miner_signature: MessageSignature::empty(), + stacker_signature: MessageSignature::empty() + } + } + + /// Make a genesis header (testing only) + pub fn genesis() -> NakamotoBlockHeader { + NakamotoBlockHeader { + version: 0, + chain_length: 0, + burn_spent: 0, + consensus_hash: FIRST_BURNCHAIN_CONSENSUS_HASH.clone(), + parent_block_id: StacksBlockId(BOOT_BLOCK_HASH.0.clone()), + tx_merkle_root: Sha512Trunc256Sum([0u8; 32]), + state_index_root: TrieHash([0u8; 32]), + miner_signature: MessageSignature::empty(), + stacker_signature: MessageSignature::empty() + } + } } impl NakamotoBlock { @@ -388,11 +469,37 @@ impl NakamotoBlock { self.header.is_first_mined() } + /// Get the coinbase transaction in Nakamoto. + /// It's the first non-TenureChange transaction + /// (and, all preceding transactions _must_ be TenureChanges) pub fn get_coinbase_tx(&self) -> Option<&StacksTransaction> { - match self.txs.get(0).map(|x| &x.payload) { - Some(TransactionPayload::Coinbase(..)) => Some(&self.txs[0]), - _ => None, + let mut tx_ref = None; + for tx in self.txs.iter() { + if let TransactionPayload::TenureChange(..) = &tx.payload { + if tx_ref.is_none() { + continue; + } + // non-TenureChange tx precedes a coinbase, so there's no valid coinbase. + // (a coinbase in any other position is invalid anyway). + return None; + } + else if let TransactionPayload::Coinbase(..) = &tx.payload { + if tx_ref.is_none() { + // contender + tx_ref = Some(tx); + } + else { + // multiple coinbases, so none of them are valid. + return None; + } + } + else if tx_ref.is_none() { + // non-Coinbase and non-TenureChange tx, so there's no valid coinbase. + // (a coinbase in any other position is invalid anyway) + return None; + } } + tx_ref } pub fn block_id(&self) -> StacksBlockId { @@ -613,7 +720,7 @@ impl NakamotoChainState { // attach the block to the chain state and calculate the next chain tip. let pox_constants = sort_tx.context.pox_constants.clone(); - let (epoch_receipt, clarity_commit) = match NakamotoChainState::append_block( + let (receipt, clarity_commit) = match NakamotoChainState::append_block( &mut chainstate_tx, clarity_instance, sort_tx, @@ -645,11 +752,11 @@ impl NakamotoChainState { }; assert_eq!( - epoch_receipt.header.anchored_header.block_hash(), + receipt.header.anchored_header.block_hash(), next_ready_block.header.block_hash() ); assert_eq!( - epoch_receipt.header.consensus_hash, + receipt.header.consensus_hash, next_ready_block.header.consensus_hash ); @@ -670,17 +777,17 @@ impl NakamotoChainState { parent_header_info.anchored_header.block_hash(), ) .into(), - &epoch_receipt.header.clone(), - &epoch_receipt.tx_receipts, + &receipt.header.clone(), + &receipt.tx_receipts, &parent_block_id, next_ready_block_snapshot.winning_block_txid, - &epoch_receipt.matured_rewards, - epoch_receipt.matured_rewards_info.as_ref(), - epoch_receipt.parent_burn_block_hash, - epoch_receipt.parent_burn_block_height, - epoch_receipt.parent_burn_block_timestamp, - &epoch_receipt.anchored_block_cost, - &epoch_receipt.parent_microblocks_cost, + &receipt.matured_rewards, + receipt.matured_rewards_info.as_ref(), + receipt.parent_burn_block_hash, + receipt.parent_burn_block_height, + receipt.parent_burn_block_timestamp, + &receipt.anchored_block_cost, + &receipt.parent_microblocks_cost, &pox_constants, ); } @@ -694,55 +801,7 @@ impl NakamotoChainState { panic!() }); - Ok(Some(epoch_receipt)) - } - - /// Process some staging blocks, up to max_blocks. - /// Return new chain tips. - pub fn process_nakamoto_blocks<'a, T: BlockEventDispatcher>( - stacks_chain_state: &mut StacksChainState, - mut sort_tx: SortitionHandleTx, - max_blocks: usize, - dispatcher_opt: Option<&'a T>, - ) -> Result>, ChainstateError> { - debug!("Process up to {} new blocks", max_blocks); - let mut ret = vec![]; - - if max_blocks == 0 { - // nothing to do - return Ok(vec![]); - } - - for _ in 0..max_blocks { - // process up to max_blocks pending blocks - match Self::process_next_nakamoto_block( - stacks_chain_state, - &mut sort_tx, - dispatcher_opt, - ) { - Ok(next_tip_opt) => { - ret.push(next_tip_opt); - } - Err(ChainstateError::InvalidStacksBlock(msg)) => { - warn!("Encountered invalid block: {}", &msg); - ret.push(None); - continue; - } - Err(ChainstateError::NetError(net_error::DeserializeError(msg))) => { - // happens if we load a zero-sized block (i.e. an invalid block) - warn!("Encountered invalid block: {}", &msg); - ret.push(None); - continue; - } - Err(e) => { - error!("Unrecoverable error when processing blocks: {:?}", &e); - return Err(e); - } - } - } - - sort_tx.commit()?; - Ok(ret) + Ok(Some(receipt)) } /// Accept a Nakamoto block into the staging blocks DB. @@ -750,24 +809,79 @@ impl NakamotoChainState { /// * the public key cannot be recovered from the miner's signature /// * the stackers during the tenure didn't sign it /// * a DB error occurs + /// Does nothing if: + /// * we already have the block + /// Returns true if we stored the block; false if not. pub fn accept_block( block: NakamotoBlock, sortdb: &SortitionHandleConn, staging_db_tx: &rusqlite::Transaction, - ) -> Result<(), ChainstateError> { - let recovered_miner_pk = block.header.recover_miner_pk().ok_or_else(|| { + ) -> Result { + // do nothing if we already have this block + if let Some(_) = Self::get_block_header(&staging_db_tx, &block.header.block_id())? { + debug!("Already have block {}", &block.header.block_id()); + return Ok(false) + } + + // identify the winning block-commit + let sortition = SortitionDB::get_block_snapshot_consensus(sortdb, &block.header.consensus_hash)? + .ok_or(ChainstateError::NoSuchBlockError) + .map_err(|e| { + warn!("No block snapshot for {}", &block.header.consensus_hash); + e + })?; + + let block_commit = SortitionDB::get_block_commit(sortdb, &sortition.winning_block_txid, &sortition.sortition_id)? + .ok_or(ChainstateError::NoSuchBlockError) + .map_err(|e| { + warn!("No block commit {} off of sortition tip {}", &sortition.winning_block_txid, &sortition.sortition_id); + e + })?; + + // identify the leader key for this block-commit + let leader_key = SortitionDB::get_leader_key_at(sortdb, u64::from(block_commit.key_block_ptr), u32::from(block_commit.key_vtxindex), &sortition.sortition_id)? + .ok_or(ChainstateError::NoSuchBlockError) + .map_err(|e| { + warn!("No leader key at {},{} for block-commit {} off of sortition tip {}", block_commit.key_block_ptr, block_commit.key_vtxindex, &block_commit.txid, &sortition.sortition_id); + e + })?; + + let miner_pubkey_hash160 = leader_key.interpret_nakamoto_signing_key() + .ok_or(ChainstateError::NoSuchBlockError) + .map_err(|e| { + warn!( + "Leader key did not contain a hash160 of the miner signing public key"; + "leader_key" => format!("{:?}", &leader_key), + ); + e + })?; + + let recovered_miner_pubk = block.header.recover_miner_pk().ok_or_else(|| { warn!( - "Stacks block downloaded with unrecoverable miner public key"; + "Nakamoto Stacks block downloaded with unrecoverable miner public key"; "block_hash" => %block.header.block_hash(), + "block_id" => %block.header.block_id(), ); return ChainstateError::InvalidStacksBlock("Unrecoverable miner public key".into()); })?; + let recovered_miner_hash160 = Hash160::from_node_public_key(&recovered_miner_pubk); + if recovered_miner_hash160 != miner_pubkey_hash160 { + warn!( + "Nakamoto Stacks block signature from {recovered_miner_pubk:?} mismatch: {recovered_miner_hash160} != {miner_pubkey_hash160} from leader-key"; + "block_hash" => %block.header.block_hash(), + "block_id" => %block.header.block_id(), + "leader_key" => format!("{:?}", &leader_key), + "block_commit" => format!("{:?}", &block_commit) + ); + return Err(ChainstateError::InvalidStacksBlock("Invalid miner signature".into())); + } + if !sortdb.expects_stacker_signature( &block.header.consensus_hash, &block.header.stacker_signature, )? { - let msg = format!("Received block, signed by {recovered_miner_pk:?}, but the stacker signature does not match the active stacking cycle"); + let msg = format!("Received block, signed by {recovered_miner_pubk:?}, but the stacker signature does not match the active stacking cycle"); warn!("{}", msg); return Err(ChainstateError::InvalidStacksBlock(msg)); } @@ -777,11 +891,28 @@ impl NakamotoChainState { let burn_attachable = sortdb.processed_block(&block.header.consensus_hash)?; // check if the parent Stacks Block ID has been processed. if so, then this block is stacks_attachable - let stacks_attachable = block.is_first_mined() || staging_db_tx.query_row( - "SELECT 1 FROM nakamoto_staging_blocks WHERE index_block_hash = ? AND processed = 1", - rusqlite::params![&block.header.parent_block_id], - |_row| Ok(()) - ).optional()?.is_some(); + let stacks_attachable = + // block is the first-ever mined (test only) + block.is_first_mined() + // block attaches to a processed nakamoto block + || staging_db_tx.query_row( + "SELECT 1 FROM nakamoto_staging_blocks WHERE index_block_hash = ? AND processed = 1 AND orphaned = 0", + rusqlite::params![&block.header.parent_block_id], + |_row| Ok(()) + ).optional()?.is_some() + // block attaches to a Stacks epoch 2.x block, and there are no nakamoto blocks at all + || ( + staging_db_tx.query_row( + "SELECT 1 FROM block_headers WHERE index_block_hash = ?", + rusqlite::params![&block.header.parent_block_id], + |_row| Ok(()) + ).optional()?.is_some() + && staging_db_tx.query_row( + "SELECT 1 FROM nakamoto_block_headers LIMIT 1", + rusqlite::NO_PARAMS, + |_row| Ok(()) + ).optional()?.is_none() + ); let block_id = block.block_id(); staging_db_tx.execute( @@ -818,7 +949,7 @@ impl NakamotoChainState { ], )?; - Ok(()) + Ok(true) } /// Create the block reward for a NakamotoBlock @@ -844,7 +975,7 @@ impl NakamotoChainState { let recipient = if epoch_id >= StacksEpochId::Epoch21 { // pay to tx-designated recipient, or if there is none, pay to the origin match coinbase_tx.try_as_coinbase() { - Some((_, recipient_opt)) => recipient_opt + Some((_, recipient_opt, _)) => recipient_opt .cloned() .unwrap_or(miner_addr.to_account_principal()), None => miner_addr.to_account_principal(), @@ -1039,6 +1170,24 @@ impl NakamotoChainState { .map_err(ChainstateError::DBError) } + /// Get the status of a Nakamoto block. + /// Returns Some(accepted?, orphaned?) on success + /// Returns None if there's no such block + /// Returns Err on DBError + pub fn get_nakamoto_block_status( + conn: &Connection, + consensus_hash: &ConsensusHash, + block_hash: &BlockHeaderHash + ) -> Result, ChainstateError> { + let sql = "SELECT (processed, orphaned) FROM nakamoto_block_headers WHERE consensus_hash = ?1 AND block_hash = ?2"; + let args: &[&dyn ToSql] = &[consensus_hash, block_hash]; + Ok(query_row_panic(conn, sql, args, || { + "FATAL: multiple rows for the same consensus hash and block hash".to_string() + }) + .map_err(ChainstateError::DBError)? + .map(|(processed, orphaned): (u32, u32)| (processed != 0, orphaned != 0))) + } + /// Insert a nakamoto block header that is paired with an /// already-existing block commit and snapshot /// From eb028aa04943d84ff00cc3dcfde628ba3398862b Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 2 Nov 2023 14:06:48 -0400 Subject: [PATCH 045/122] feat: add Nakamoto features to TestBurnchainBlock, TestStacksNode, and TestPeer --- .../src/chainstate/nakamoto/tests/node.rs | 624 ++++++++++++++++++ 1 file changed, 624 insertions(+) create mode 100644 stackslib/src/chainstate/nakamoto/tests/node.rs diff --git a/stackslib/src/chainstate/nakamoto/tests/node.rs b/stackslib/src/chainstate/nakamoto/tests/node.rs new file mode 100644 index 0000000000..67dde8a752 --- /dev/null +++ b/stackslib/src/chainstate/nakamoto/tests/node.rs @@ -0,0 +1,624 @@ +// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation +// Copyright (C) 2020-2022 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use std::cell::RefCell; +use std::collections::HashMap; +use std::collections::HashSet; +use std::collections::VecDeque; +use std::fs; +use std::io; +use std::path::{Path, PathBuf}; + +use clarity::vm::clarity::ClarityConnection; +use clarity::vm::costs::LimitedCostTracker; +use clarity::vm::costs::ExecutionCost; +use clarity::vm::types::*; +use rand::seq::SliceRandom; +use rand::thread_rng; +use rand::Rng; +use stacks_common::address::*; +use stacks_common::consts::{FIRST_BURNCHAIN_CONSENSUS_HASH, FIRST_STACKS_BLOCK_HASH}; +use stacks_common::util::sleep_ms; +use stacks_common::util::vrf::VRFProof; +use stacks_common::util::vrf::VRFPublicKey; +use stacks_common::util::hash::Hash160; +use stacks_common::types::chainstate::SortitionId; +use stacks_common::types::chainstate::StacksBlockId; +use stacks_common::types::chainstate::BlockHeaderHash; + +use crate::burnchains::tests::*; +use crate::burnchains::*; +use crate::chainstate::burn::db::sortdb::*; +use crate::chainstate::burn::operations::{ + BlockstackOperationType, LeaderBlockCommitOp, LeaderKeyRegisterOp, UserBurnSupportOp, +}; +use crate::chainstate::burn::*; +use crate::chainstate::coordinator::Error as CoordinatorError; +use crate::chainstate::coordinator::get_next_recipients; +use crate::chainstate::coordinator::OnChainRewardSetProvider; +use crate::chainstate::nakamoto::NakamotoBlock; +use crate::chainstate::nakamoto::NakamotoChainState; +use crate::chainstate::nakamoto::miner::NakamotoBlockBuilder; +use crate::chainstate::stacks::address::PoxAddress; +use crate::chainstate::stacks::db::blocks::test::store_staging_block; +use crate::chainstate::stacks::db::test::*; +use crate::chainstate::stacks::db::*; +use crate::chainstate::stacks::miner::*; +use crate::chainstate::stacks::StacksBlock; +use crate::chainstate::stacks::Error as ChainstateError; +use crate::chainstate::stacks::C32_ADDRESS_VERSION_TESTNET_SINGLESIG; +use crate::chainstate::stacks::*; +use crate::cost_estimates::metrics::UnitMetric; +use crate::cost_estimates::UnitEstimator; +use crate::net::test::*; +use crate::util_lib::boot::boot_code_addr; +use crate::util_lib::db::Error as db_error; + +use crate::chainstate::stacks::tests::TestStacksNode; + +use crate::net::relay::Relayer; +use crate::net::test::{TestPeer, TestPeerConfig}; + +use crate::core::{STACKS_EPOCH_3_0_MARKER, BOOT_BLOCK_HASH}; + +impl TestBurnchainBlock { + pub fn add_nakamoto_tenure_commit( + &mut self, + ic: &SortitionDBConn, + miner: &mut TestMiner, + last_tenure_id: &StacksBlockId, + burn_fee: u64, + leader_key: &LeaderKeyRegisterOp, + fork_snapshot: Option<&BlockSnapshot>, + parent_block_snapshot: Option<&BlockSnapshot>, + ) -> LeaderBlockCommitOp { + let tenure_id_as_block_hash = BlockHeaderHash(last_tenure_id.0.clone()); + self.inner_add_block_commit(ic, miner, &tenure_id_as_block_hash, burn_fee, leader_key, fork_snapshot, parent_block_snapshot, STACKS_EPOCH_3_0_MARKER) + } +} + +impl TestMiner { + pub fn nakamoto_miner_key(&self) -> StacksPrivateKey { + self.privks[0].clone() + } + + pub fn nakamoto_miner_hash160(&self) -> Hash160 { + let pubk = StacksPublicKey::from_private(&self.nakamoto_miner_key()); + Hash160::from_node_public_key(&pubk) + } + + pub fn make_nakamoto_coinbase(&mut self, recipient: Option, vrf_proof: VRFProof) -> StacksTransaction { + let mut tx_coinbase = StacksTransaction::new( + TransactionVersion::Testnet, + self.as_transaction_auth().unwrap(), + TransactionPayload::Coinbase( + CoinbasePayload([(self.nonce % 256) as u8; 32]), + recipient, + Some(vrf_proof) + ), + ); + tx_coinbase.chain_id = 0x80000000; + tx_coinbase.anchor_mode = TransactionAnchorMode::OnChainOnly; + tx_coinbase.auth.set_origin_nonce(self.nonce); + + let mut tx_signer = StacksTransactionSigner::new(&tx_coinbase); + self.sign_as_origin(&mut tx_signer); + let tx_coinbase_signed = tx_signer.get_tx().unwrap(); + tx_coinbase_signed + } + + pub fn make_nakamoto_tenure_change(&mut self, tenure_change: TenureChangePayload) -> StacksTransaction { + let mut tx_tenure_change = StacksTransaction::new( + TransactionVersion::Testnet, + // TODO: this needs to be a schnorr signature + self.as_transaction_auth().unwrap(), + TransactionPayload::TenureChange(tenure_change) + ); + tx_tenure_change.chain_id = 0x80000000; + tx_tenure_change.anchor_mode = TransactionAnchorMode::OnChainOnly; + tx_tenure_change.auth.set_origin_nonce(self.nonce); + + // TODO: This needs to be changed to an aggregate signature from the stackers + let mut tx_signer = StacksTransactionSigner::new(&tx_tenure_change); + self.sign_as_origin(&mut tx_signer); + let tx_tenure_change_signed = tx_signer.get_tx().unwrap(); + tx_tenure_change_signed + } + + pub fn sign_nakamoto_block(&self, block: &mut NakamotoBlock) { + block.header.sign_miner(&self.nakamoto_miner_key()).unwrap(); + } +} + + +impl TestStacksNode { + pub fn add_nakamoto_tenure_commit( + sortdb: &SortitionDB, + burn_block: &mut TestBurnchainBlock, + miner: &mut TestMiner, + last_tenure_start: &StacksBlockId, + burn_amount: u64, + key_op: &LeaderKeyRegisterOp, + parent_block_snapshot: Option<&BlockSnapshot>, + ) -> LeaderBlockCommitOp { + let block_commit_op = { + let ic = sortdb.index_conn(); + let parent_snapshot = burn_block.parent_snapshot.clone(); + burn_block.add_nakamoto_tenure_commit( + &ic, + miner, + last_tenure_start, + burn_amount, + key_op, + Some(&parent_snapshot), + parent_block_snapshot, + ) + }; + block_commit_op + } + + pub fn get_last_nakamoto_tenure(&self, miner: &TestMiner) -> Option> { + match miner.last_block_commit() { + None => None, + Some(block_commit_op) => { + let last_tenure_id = block_commit_op.last_tenure_id(); + match self.nakamoto_commit_ops.get(&last_tenure_id) { + None => None, + Some(idx) => self.nakamoto_blocks.get(*idx).cloned(), + } + } + } + } + + pub fn get_nakamoto_tenure(&self, last_tenure_id: &StacksBlockId) -> Option> { + match self.nakamoto_commit_ops.get(last_tenure_id) { + None => None, + Some(idx) => Some(self.nakamoto_blocks[*idx].clone()), + } + } + + /// Begin the next nakamoto tenure by triggering a tenure-change. + /// Follow this call with a call to self.add_nakamoto_tenure_blocks() to add the corresponding + /// blocks, once they've been generated. + pub fn make_nakamoto_tenure_commitment( + &mut self, + sortdb: &SortitionDB, + burn_block: &mut TestBurnchainBlock, + miner: &mut TestMiner, + last_tenure_id: &StacksBlockId, + burn_amount: u64, + miner_key: &LeaderKeyRegisterOp, + parent_block_snapshot_opt: Option<&BlockSnapshot>, + ) -> LeaderBlockCommitOp { + test_debug!( + "Miner {}: Commit to Nakamoto tenure starting at {}", + miner.id, + &last_tenure_id, + ); + + // send block commit for this block + let block_commit_op = TestStacksNode::add_nakamoto_tenure_commit( + sortdb, + burn_block, + miner, + &last_tenure_id, + burn_amount, + miner_key, + parent_block_snapshot_opt, + ); + + test_debug!( + "Miner {}: Nakamoto tenure commit transaction builds on {},{} (parent snapshot is {:?})", + miner.id, + block_commit_op.parent_block_ptr, + block_commit_op.parent_vtxindex, + &parent_block_snapshot_opt + ); + + // NOTE: self.nakamoto_commit_ops[block_header_hash] now contains an index into + // self.nakamoto_blocks that doesn't exist. The caller needs to follow this call with a + // call to self.add_nakamoto_tenure_blocks() + self.nakamoto_commit_ops.insert( + last_tenure_id.clone(), + self.nakamoto_blocks.len(), + ); + block_commit_op + } + + /// Record the nakamoto tenure blocks + pub fn add_nakamoto_tenure_blocks(&mut self, tenure_blocks: Vec) { + self.nakamoto_blocks.push(tenure_blocks); + } + + /// Begin the next Nakamoto tenure. + /// Create a block-commit, as well as a tenure change and VRF proof for use in a follow-on call + /// to make_nakamoto_tenure_blocks() + pub fn begin_nakamoto_tenure( + &mut self, + sortdb: &SortitionDB, + miner: &mut TestMiner, + burn_block: &mut TestBurnchainBlock, + miner_key: &LeaderKeyRegisterOp, + // parent Stacks block, if this is the first Nakamoto tenure + parent_stacks_block: Option<&StacksBlock>, + // parent Nakamoto blocks, if we're building atop a previous Nakamoto tenure + parent_nakamoto_tenure: Option<&[NakamotoBlock]>, + burn_amount: u64, + tenure_change_cause: TenureChangeCause + ) -> (LeaderBlockCommitOp, TenureChangePayload, VRFProof) { + let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); + let proof = miner + .make_proof( + &miner_key.public_key, + &burn_block.parent_snapshot.sortition_hash, + ) + .expect(&format!( + "FATAL: no private key for {}", + miner_key.public_key.to_hex() + )); + + let (last_tenure_id, previous_tenure_end, previous_tenure_blocks, parent_block_snapshot_opt) = if let Some(parent_blocks) = parent_nakamoto_tenure { + // parent is an epoch 3 nakamoto block + let first_parent = parent_blocks.first().unwrap(); + let last_parent = parent_blocks.last().unwrap(); + let parent_tenure_id = StacksBlockId::new(&first_parent.header.consensus_hash, &first_parent.header.block_hash()); + let ic = sortdb.index_conn(); + let parent_sortition = SortitionDB::get_block_snapshot_for_winning_nakamoto_tenure( + &ic, + &tip.sortition_id, + &parent_tenure_id, + ) + .unwrap() + .unwrap(); + + test_debug!( + "Work in {} {} for Nakamoto parent: {},{}", + burn_block.block_height, + burn_block.parent_snapshot.burn_header_hash, + parent_sortition.total_burn, + last_parent.header.chain_length + 1, + ); + + (parent_tenure_id, last_parent.header.block_id(), parent_blocks.len(), Some(parent_sortition)) + } + else if let Some(parent_stacks_block) = parent_stacks_block { + // building off an existing stacks block + let parent_stacks_block_snapshot = { + let ic = sortdb.index_conn(); + let parent_stacks_block_snapshot = + SortitionDB::get_block_snapshot_for_winning_stacks_block( + &ic, + &burn_block.parent_snapshot.sortition_id, + &parent_stacks_block.block_hash(), + ) + .unwrap() + .unwrap(); + parent_stacks_block_snapshot + }; + + let parent_chain_tip = StacksChainState::get_anchored_block_header_info( + self.chainstate.db(), + &parent_stacks_block_snapshot.consensus_hash, + &parent_stacks_block.header.block_hash(), + ) + .unwrap() + .unwrap(); + + let parent_tenure_id = parent_chain_tip.index_block_hash(); + + test_debug!( + "Work in {} {} for Stacks 2.x parent: {},{}", + burn_block.block_height, + burn_block.parent_snapshot.burn_header_hash, + parent_stacks_block_snapshot.total_burn, + parent_chain_tip.anchored_header.height(), + ); + + (parent_tenure_id.clone(), parent_tenure_id, 1, Some(parent_stacks_block_snapshot)) + } + else { + // first epoch is a nakamoto epoch (testing only) + let parent_tenure_id = StacksBlockId::new(&FIRST_BURNCHAIN_CONSENSUS_HASH, &FIRST_STACKS_BLOCK_HASH); + (parent_tenure_id.clone(), parent_tenure_id, 0, None) + }; + + let previous_tenure_blocks = u32::try_from(previous_tenure_blocks).expect("FATAL: too many blocks from last miner"); + let tenure_change_payload = TenureChangePayload { + previous_tenure_end, + previous_tenure_blocks, + cause: tenure_change_cause, + pubkey_hash: miner.nakamoto_miner_hash160(), + signature: SchnorrThresholdSignature::empty(), + signers: vec![] + }; + + let block_commit_op = self.make_nakamoto_tenure_commitment( + sortdb, + burn_block, + miner, + &last_tenure_id, + burn_amount, + miner_key, + parent_block_snapshot_opt.as_ref(), + ); + + (block_commit_op, tenure_change_payload, proof) + } + + /// Construct a full Nakamoto tenure with the given block builder. + /// The first block will contain a coinbase and a tenure-change + pub fn make_nakamoto_tenure_blocks( + chainstate: &StacksChainState, + sortdb: &SortitionDB, + miner: &mut TestMiner, + proof: VRFProof, + tenure_change_payload: TenureChangePayload, + mut block_builder: F + ) -> Vec<(NakamotoBlock, u64, ExecutionCost)> + where + F: FnMut(&mut TestMiner, &StacksChainState, &SortitionDBConn, usize) -> Vec + { + let mut tenure_change = Some(miner.make_nakamoto_tenure_change(tenure_change_payload)); + let mut coinbase = Some(miner.make_nakamoto_coinbase(None, proof.clone())); + + let mut blocks = vec![]; + let mut block_count = 0; + loop { + let mut txs = vec![]; + if let Some(tenure_change) = tenure_change.take() { + txs.push(tenure_change); + } + if let Some(coinbase) = coinbase.take() { + txs.push(coinbase); + } + let mut next_block_txs = block_builder(miner, chainstate, &sortdb.index_conn(), block_count); + txs.append(&mut next_block_txs); + + if txs.len() == 0 { + break; + } + + let parent_tip_opt = NakamotoChainState::get_canonical_block_header(chainstate.db(), &sortdb).unwrap(); + let burn_tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); + + // make a block + let builder = if let Some(parent_tip) = parent_tip_opt { + NakamotoBlockBuilder::new_from_parent( + &parent_tip.index_block_hash(), + &parent_tip, + &burn_tip.consensus_hash, + burn_tip.total_burn, + if block_count == 0 { Some(proof.clone()) } else { None } + ).unwrap() + } + else { + NakamotoBlockBuilder::new_tenure_from_genesis(&proof) + }; + + let (mut nakamoto_block, size, cost) = builder.make_nakamoto_block_from_txs(chainstate, &sortdb.index_conn(), txs).unwrap(); + miner.sign_nakamoto_block(&mut nakamoto_block); + blocks.push((nakamoto_block, size, cost)); + block_count += 1; + } + blocks + } +} + +impl<'a> TestPeer<'a> { + /// Get the Nakamoto parent linkage data for building atop the last-produced tenure or + /// Stacks 2.x block. + /// Returns (last-tenure-id, epoch2-parent, nakamoto-parent-tenure, parent-sortition) + fn get_nakamoto_parent(miner: &TestMiner, stacks_node: &TestStacksNode, sortdb: &SortitionDB) -> (StacksBlockId, Option, Option>, Option) { + let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); + if let Some(parent_blocks) = stacks_node.get_last_nakamoto_tenure(miner) { + // parent is an epoch 3 nakamoto block + let first_parent = parent_blocks.first().unwrap(); + let parent_tenure_id = StacksBlockId::new(&first_parent.header.consensus_hash, &first_parent.header.block_hash()); + let ic = sortdb.index_conn(); + let parent_sortition_opt = SortitionDB::get_block_snapshot_for_winning_nakamoto_tenure( + &ic, + &tip.sortition_id, + &parent_tenure_id, + ) + .unwrap(); + let last_tenure_id = StacksBlockId::new(&first_parent.header.consensus_hash, &first_parent.header.block_hash()); + (last_tenure_id, None, Some(parent_blocks), parent_sortition_opt) + } + else { + // parent may be an epoch 2.x block + let (parent_opt, parent_sortition_opt) = if let Some(parent_block) = stacks_node.get_last_anchored_block(miner) { + let ic = sortdb.index_conn(); + let sort_opt = SortitionDB::get_block_snapshot_for_winning_stacks_block( + &ic, + &tip.sortition_id, + &parent_block.block_hash(), + ) + .unwrap(); + (Some(parent_block), sort_opt) + } + else { + (None, None) + }; + + let last_tenure_id = if let Some(last_epoch2_block) = parent_opt.as_ref() { + let parent_sort = parent_sortition_opt.as_ref().unwrap(); + StacksBlockId::new(&parent_sort.consensus_hash, &last_epoch2_block.header.block_hash()) + } + else { + // must be a genesis block (testing only!) + StacksBlockId(BOOT_BLOCK_HASH.0.clone()) + }; + (last_tenure_id, parent_opt, None, parent_sortition_opt) + } + } + + /// Start the next Nakamoto tenure. + /// This generates the VRF key and block-commit txs, as well as the TenureChange and + /// VRFProof + pub fn begin_nakamoto_tenure( + &mut self, + tenure_change_cause: TenureChangeCause + ) -> (Vec, TenureChangePayload, VRFProof) { + let mut sortdb = self.sortdb.take().unwrap(); + let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); + + let mut burn_block = TestBurnchainBlock::new(&tip, 0); + let mut stacks_node = self.stacks_node.take().unwrap(); + + let (last_tenure_id, parent_block_opt, parent_tenure_opt, parent_sortition_opt) = Self::get_nakamoto_parent(&self.miner, &stacks_node, &sortdb); + let last_key = stacks_node.get_last_key(&self.miner); + + let network_id = self.config.network_id; + let chainstate_path = self.chainstate_path.clone(); + let burn_block_height = burn_block.block_height; + + let (mut block_commit_op, tenure_change_payload, vrf_proof) = stacks_node.begin_nakamoto_tenure( + &sortdb, + &mut self.miner, + &mut burn_block, + &last_key, + parent_block_opt.as_ref(), + parent_tenure_opt.as_ref().map(|blocks| blocks.as_slice()), + 1000, + tenure_change_cause + ); + + // patch up block-commit -- these blocks all mine off of genesis + if last_tenure_id == StacksBlockId(BOOT_BLOCK_HASH.0.clone()) { + block_commit_op.parent_block_ptr = 0; + block_commit_op.parent_vtxindex = 0; + } + + let leader_key_op = stacks_node.add_key_register(&mut burn_block, &mut self.miner); + + // patch in reward set info + match get_next_recipients( + &tip, + &mut stacks_node.chainstate, + &mut sortdb, + &self.config.burnchain, + &OnChainRewardSetProvider(), + true, + ) { + Ok(recipients) => { + block_commit_op.commit_outs = match recipients { + Some(info) => { + let mut recipients = info + .recipients + .into_iter() + .map(|x| x.0) + .collect::>(); + if recipients.len() == 1 { + recipients.push(PoxAddress::standard_burn_address(false)); + } + recipients + } + None => { + if self + .config + .burnchain + .is_in_prepare_phase(burn_block.block_height) + { + vec![PoxAddress::standard_burn_address(false)] + } else { + vec![ + PoxAddress::standard_burn_address(false), + PoxAddress::standard_burn_address(false), + ] + } + } + }; + test_debug!( + "Block commit at height {} has {} recipients: {:?}", + block_commit_op.block_height, + block_commit_op.commit_outs.len(), + &block_commit_op.commit_outs + ); + } + Err(e) => { + panic!("Failure fetching recipient set: {:?}", e); + } + }; + + self.stacks_node = Some(stacks_node); + self.sortdb = Some(sortdb); + ( + vec![ + BlockstackOperationType::LeaderKeyRegister(leader_key_op), + BlockstackOperationType::LeaderBlockCommit(block_commit_op), + ], + tenure_change_payload, + vrf_proof + ) + } + + /// Produce a Nakamoto tenure, after processing the block-commit from + /// begin_nakamoto_tenure(). You'd process the burnchain ops from begin_nakamoto_tenure(), + /// take the consensus hash, and feed it in here. + pub fn make_nakamoto_tenure( + &mut self, + consensus_hash: &ConsensusHash, + tenure_change_payload: TenureChangePayload, + vrf_proof: VRFProof, + block_builder: F + ) -> Vec<(NakamotoBlock, u64, ExecutionCost)> + where + F: FnMut(&mut TestMiner, &StacksChainState, &SortitionDBConn, usize) -> Vec + { + let stacks_node = self.stacks_node.take().unwrap(); + let sortdb = self.sortdb.take().unwrap(); + + let (last_tenure_id, parent_block_opt, _parent_tenure_opt, parent_sortition_opt) = Self::get_nakamoto_parent(&self.miner, &stacks_node, &sortdb); + let blocks = TestStacksNode::make_nakamoto_tenure_blocks(&stacks_node.chainstate, &sortdb, &mut self.miner, vrf_proof, tenure_change_payload, block_builder); + + self.stacks_node = Some(stacks_node); + self.sortdb = Some(sortdb); + + blocks + } + + /// Accept a new Nakamoto tenure via the relayer, and then try to process them. + /// Call this after make_nakamoto_tenure() + pub fn process_nakamoto_tenure( + &mut self, + blocks: Vec + ) { + debug!("Peer will process {} Nakamoto blocks", blocks.len()); + + let sortdb = self.sortdb.take().unwrap(); + let mut node = self.stacks_node.take().unwrap(); + + let tip = SortitionDB::get_canonical_sortition_tip(sortdb.conn()).unwrap(); + let sort_handle = sortdb.index_handle(&tip); + + node.add_nakamoto_tenure_blocks(blocks.clone()); + for block in blocks.into_iter() { + let block_id = block.block_id(); + debug!("Process Nakamoto block {} ({:?}", &block_id, &block.header); + let accepted = Relayer::process_new_nakamoto_block(&sort_handle, &mut node.chainstate, block).unwrap(); + if accepted { + test_debug!("Accepted Nakamoto block {}", &block_id); + self.coord.handle_new_nakamoto_stacks_block().unwrap(); + } + else { + test_debug!("Did NOT accept Nakamoto block {}", &block_id); + } + } + + self.sortdb = Some(sortdb); + self.stacks_node = Some(node); + } +} From 9512c7a52a09650d4bb24b3d8d45e6197cc2b289 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 2 Nov 2023 14:07:27 -0400 Subject: [PATCH 046/122] chore: create nakamoto/tests/ directory --- .../chainstate/nakamoto/{tests.rs => tests/mod.rs} | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) rename stackslib/src/chainstate/nakamoto/{tests.rs => tests/mod.rs} (97%) diff --git a/stackslib/src/chainstate/nakamoto/tests.rs b/stackslib/src/chainstate/nakamoto/tests/mod.rs similarity index 97% rename from stackslib/src/chainstate/nakamoto/tests.rs rename to stackslib/src/chainstate/nakamoto/tests/mod.rs index ad6575d648..97aa74fccd 100644 --- a/stackslib/src/chainstate/nakamoto/tests.rs +++ b/stackslib/src/chainstate/nakamoto/tests/mod.rs @@ -25,7 +25,7 @@ use stacks_common::types::chainstate::{ TrieHash, }; use stacks_common::types::{PrivateKey, StacksEpoch, StacksEpochId}; -use stacks_common::util::hash::{Hash160, Sha512Trunc256Sum}; +use stacks_common::util::hash::{Hash160, Sha512Trunc256Sum, hex_bytes}; use stacks_common::util::secp256k1::{MessageSignature, Secp256k1PublicKey}; use stacks_common::util::vrf::{VRFPrivateKey, VRFProof}; use stdext::prelude::Integer; @@ -55,6 +55,8 @@ fn test_path(name: &str) -> String { format!("/tmp/stacks-node-tests/nakamoto-tests/{}", name) } +pub mod node; + #[test] pub fn nakamoto_advance_tip_simple() { let path = test_path(function_name!()); @@ -97,7 +99,10 @@ pub fn nakamoto_advance_tip_simple() { let chain_tip_burn_header_hash = BurnchainHeaderHash([0; 32]); let chain_tip_burn_header_height = 1; let chain_tip_burn_header_timestamp = 100; - let coinbase_tx_payload = TransactionPayload::Coinbase(CoinbasePayload([0; 32]), None); + + let proof_bytes = hex_bytes("9275df67a68c8745c0ff97b48201ee6db447f7c93b23ae24cdc2400f52fdb08a1a6ac7ec71bf9c9c76e96ee4675ebff60625af28718501047bfd87b810c2d2139b73c23bd69de66360953a642c2a330a").unwrap(); + let proof = VRFProof::from_bytes(&proof_bytes).unwrap(); + let coinbase_tx_payload = TransactionPayload::Coinbase(CoinbasePayload([0; 32]), None, Some(proof)); let mut coinbase_tx = StacksTransaction::new( TransactionVersion::Testnet, TransactionAuth::from_p2pkh(&stacker_sk).unwrap(), @@ -420,7 +425,9 @@ pub fn nakamoto_advance_tip_multiple() { .into(), }; - let coinbase_tx_payload = TransactionPayload::Coinbase(CoinbasePayload([i; 32]), None); + let proof_bytes = hex_bytes("9275df67a68c8745c0ff97b48201ee6db447f7c93b23ae24cdc2400f52fdb08a1a6ac7ec71bf9c9c76e96ee4675ebff60625af28718501047bfd87b810c2d2139b73c23bd69de66360953a642c2a330a").unwrap(); + let proof = VRFProof::from_bytes(&proof_bytes).unwrap(); + let coinbase_tx_payload = TransactionPayload::Coinbase(CoinbasePayload([i; 32]), None, Some(proof)); let mut coinbase_tx = StacksTransaction::new( TransactionVersion::Testnet, TransactionAuth::from_p2pkh(&miner_sk).unwrap(), From 9b31de9a3c513cc32a7d17c065eb168a6c1e3763 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 2 Nov 2023 14:07:50 -0400 Subject: [PATCH 047/122] feat: static checks on Nakamoto coinbase and tenure change transactions in a Stacks block, as well as unit tests --- stackslib/src/chainstate/stacks/block.rs | 105 ++++++++++++++++++----- 1 file changed, 82 insertions(+), 23 deletions(-) diff --git a/stackslib/src/chainstate/stacks/block.rs b/stackslib/src/chainstate/stacks/block.rs index 23346c9290..2cb7eb98ce 100644 --- a/stackslib/src/chainstate/stacks/block.rs +++ b/stackslib/src/chainstate/stacks/block.rs @@ -582,25 +582,35 @@ impl StacksBlock { txs: &[StacksTransaction], epoch_id: StacksEpochId, ) -> bool { - if epoch_id < StacksEpochId::Epoch21 { - // nothing new since the start of the system is supported. - // Expand this list of things to check for as needed. - // * no pay-to-contract coinbases - // * no versioned smart contract payloads - for tx in txs.iter() { - if let TransactionPayload::Coinbase(_, ref recipient_opt) = &tx.payload { - if recipient_opt.is_some() { - // not supported - error!("Coinbase pay-to-alt-recipient not supported before Stacks 2.1"; "txid" => %tx.txid()); - return false; - } + for tx in txs.iter() { + if let TransactionPayload::Coinbase(_, ref recipient_opt, ref proof_opt) = &tx.payload { + if proof_opt.is_some() && epoch_id < StacksEpochId::Epoch30 { + // not supported + error!("Coinbase with VRF proof not supported before Stacks 3.0"; "txid" => %tx.txid()); + return false; } - if let TransactionPayload::SmartContract(_, ref version_opt) = &tx.payload { - if version_opt.is_some() { - // not supported - error!("Versioned smart contracts not supported before Stacks 2.1"); - return false; - } + if proof_opt.is_none() && epoch_id >= StacksEpochId::Epoch30 { + // not supported + error!("Coinbase with VRF proof is required in Stacks 3.0 and later"; "txid" => %tx.txid()); + return false; + } + if recipient_opt.is_some() && epoch_id < StacksEpochId::Epoch21 { + // not supported + error!("Coinbase pay-to-alt-recipient not supported before Stacks 2.1"; "txid" => %tx.txid()); + return false; + } + } + if let TransactionPayload::SmartContract(_, ref version_opt) = &tx.payload { + if version_opt.is_some() { + // not supported + error!("Versioned smart contracts not supported before Stacks 2.1"); + return false; + } + } + if let TransactionPayload::TenureChange(..) = &tx.payload { + if epoch_id < StacksEpochId::Epoch30 { + error!("TenureChange transaction not supported before Stacks 3.0"; "txid" => %tx.txid()); + return false; } } } @@ -1444,13 +1454,13 @@ mod test { let tx_coinbase = StacksTransaction::new( TransactionVersion::Testnet, origin_auth.clone(), - TransactionPayload::Coinbase(CoinbasePayload([0u8; 32]), None), + TransactionPayload::Coinbase(CoinbasePayload([0u8; 32]), None, None), ); let tx_coinbase_2 = StacksTransaction::new( TransactionVersion::Testnet, origin_auth.clone(), - TransactionPayload::Coinbase(CoinbasePayload([1u8; 32]), None), + TransactionPayload::Coinbase(CoinbasePayload([1u8; 32]), None, None), ); let mut tx_invalid_coinbase = tx_coinbase.clone(); @@ -1578,7 +1588,7 @@ mod test { let tx_coinbase = StacksTransaction::new( TransactionVersion::Testnet, origin_auth.clone(), - TransactionPayload::Coinbase(CoinbasePayload([0u8; 32]), None), + TransactionPayload::Coinbase(CoinbasePayload([0u8; 32]), None, None), ); let mut tx_coinbase_offchain = tx_coinbase.clone(); @@ -1709,7 +1719,7 @@ mod test { let tx_coinbase = StacksTransaction::new( TransactionVersion::Testnet, origin_auth.clone(), - TransactionPayload::Coinbase(CoinbasePayload([0u8; 32]), None), + TransactionPayload::Coinbase(CoinbasePayload([0u8; 32]), None, None), ); let tx_coinbase_contract = StacksTransaction::new( @@ -1720,8 +1730,17 @@ mod test { Some(PrincipalData::Contract( QualifiedContractIdentifier::transient(), )), + None, ), ); + + let proof_bytes = hex_bytes("9275df67a68c8745c0ff97b48201ee6db447f7c93b23ae24cdc2400f52fdb08a1a6ac7ec71bf9c9c76e96ee4675ebff60625af28718501047bfd87b810c2d2139b73c23bd69de66360953a642c2a330a").unwrap(); + let proof = VRFProof::from_bytes(&proof_bytes[..].to_vec()).unwrap(); + let tx_coinbase_proof = StacksTransaction::new( + TransactionVersion::Testnet, + origin_auth.clone(), + TransactionPayload::Coinbase(CoinbasePayload([0u8; 32]), None, Some(proof)), + ); let stx_address = StacksAddress { version: 0, @@ -1779,6 +1798,20 @@ mod test { ), ); + let tenure_change_payload = TenureChangePayload { + previous_tenure_end: StacksBlockId([0x00; 32]), + previous_tenure_blocks: 0, + cause: TenureChangeCause::BlockFound, + pubkey_hash: Hash160([0x00; 20]), + signature: SchnorrThresholdSignature::empty(), + signers: vec![] + }; + let tx_tenure_change = StacksTransaction::new( + TransactionVersion::Testnet, + origin_auth.clone(), + TransactionPayload::TenureChange(tenure_change_payload) + ); + let dup_txs = vec![ tx_coinbase.clone(), tx_transfer.clone(), @@ -1790,6 +1823,9 @@ mod test { let no_coinbase = vec![tx_transfer.clone()]; let coinbase_contract = vec![tx_coinbase_contract.clone()]; let versioned_contract = vec![tx_versioned_smart_contract.clone()]; + let nakamoto_coinbase = vec![tx_coinbase_proof.clone()]; + let tenure_change_tx = vec![tx_tenure_change.clone()]; + let nakamoto_txs = vec![tx_coinbase_proof.clone(), tx_tenure_change.clone()]; assert!(!StacksBlock::validate_transactions_unique(&dup_txs)); assert!(!StacksBlock::validate_transactions_network( @@ -1806,7 +1842,6 @@ mod test { &coinbase_contract, StacksEpochId::Epoch2_05 )); - assert!(StacksBlock::validate_transactions_static_epoch( &coinbase_contract, StacksEpochId::Epoch21 @@ -1820,6 +1855,30 @@ mod test { &versioned_contract, StacksEpochId::Epoch21 )); + assert!(!StacksBlock::validate_transactions_static_epoch( + &nakamoto_coinbase, + StacksEpochId::Epoch21 + )); + assert!(StacksBlock::validate_transactions_static_epoch( + &nakamoto_coinbase, + StacksEpochId::Epoch30 + )); + assert!(!StacksBlock::validate_transactions_static_epoch( + &coinbase_contract, + StacksEpochId::Epoch30 + )); + assert!(!StacksBlock::validate_transactions_static_epoch( + &tenure_change_tx, + StacksEpochId::Epoch21 + )); + assert!(StacksBlock::validate_transactions_static_epoch( + &nakamoto_txs, + StacksEpochId::Epoch30 + )); + assert!(!StacksBlock::validate_transactions_static_epoch( + &nakamoto_txs, + StacksEpochId::Epoch21 + )); } // TODO: From 881b0a05e71cebf18f34e37ee079f1b6165b21a3 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 2 Nov 2023 14:08:14 -0400 Subject: [PATCH 048/122] chore: add v3 unlock and pox-4 activation --- stackslib/src/chainstate/stacks/boot/contract_tests.rs | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/stackslib/src/chainstate/stacks/boot/contract_tests.rs b/stackslib/src/chainstate/stacks/boot/contract_tests.rs index 5396831c1c..a8af6a7b92 100644 --- a/stackslib/src/chainstate/stacks/boot/contract_tests.rs +++ b/stackslib/src/chainstate/stacks/boot/contract_tests.rs @@ -407,10 +407,18 @@ impl BurnStateDB for TestSimBurnStateDB { fn get_v2_unlock_height(&self) -> u32 { u32::MAX } + + fn get_v3_unlock_height(&self) -> u32 { + u32::MAX + } fn get_pox_3_activation_height(&self) -> u32 { u32::MAX } + + fn get_pox_4_activation_height(&self) -> u32 { + u32::MAX + } fn get_pox_prepare_length(&self) -> u32 { self.pox_constants.prepare_length From cad3de19b5de0221afd95e56fe4f79787a5e24ca Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 2 Nov 2023 14:08:35 -0400 Subject: [PATCH 049/122] feat: stubbed pox-4 reward set handlers and testing stack-stx implementation --- stackslib/src/chainstate/stacks/boot/mod.rs | 146 +++++++++++++++++++- 1 file changed, 144 insertions(+), 2 deletions(-) diff --git a/stackslib/src/chainstate/stacks/boot/mod.rs b/stackslib/src/chainstate/stacks/boot/mod.rs index 969ff22aef..33ef391ce5 100644 --- a/stackslib/src/chainstate/stacks/boot/mod.rs +++ b/stackslib/src/chainstate/stacks/boot/mod.rs @@ -84,9 +84,11 @@ pub const BOOT_CODE_GENESIS: &'static str = std::include_str!("genesis.clar"); pub const POX_1_NAME: &'static str = "pox"; pub const POX_2_NAME: &'static str = "pox-2"; pub const POX_3_NAME: &'static str = "pox-3"; +pub const POX_4_NAME: &'static str = "pox-4"; const POX_2_BODY: &'static str = std::include_str!("pox-2.clar"); const POX_3_BODY: &'static str = std::include_str!("pox-3.clar"); +const POX_4_BODY: &'static str = std::include_str!("pox-4.clar"); pub const COSTS_1_NAME: &'static str = "costs"; pub const COSTS_2_NAME: &'static str = "costs-2"; @@ -107,6 +109,10 @@ lazy_static! { format!("{}\n{}", BOOT_CODE_POX_MAINNET_CONSTS, POX_3_BODY); pub static ref POX_3_TESTNET_CODE: String = format!("{}\n{}", BOOT_CODE_POX_TESTNET_CONSTS, POX_3_BODY); + pub static ref POX_4_MAINNET_CODE: String = + format!("{}\n{}", BOOT_CODE_POX_MAINNET_CONSTS, POX_4_BODY); + pub static ref POX_4_TESTNET_CODE: String = + format!("{}\n{}", BOOT_CODE_POX_TESTNET_CONSTS, POX_4_BODY); pub static ref BOOT_CODE_COST_VOTING_TESTNET: String = make_testnet_cost_voting(); pub static ref STACKS_BOOT_CODE_MAINNET: [(&'static str, &'static str); 6] = [ ("pox", &BOOT_CODE_POX_MAINNET), @@ -165,7 +171,7 @@ pub struct PoxStartCycleInfo { pub missed_reward_slots: Vec<(PrincipalData, u128)>, } -#[derive(Debug, PartialEq, Clone)] +#[derive(Debug, PartialEq, Clone, Serialize, Deserialize)] pub struct RewardSet { pub rewarded_addresses: Vec, pub start_cycle_state: PoxStartCycleInfo, @@ -350,6 +356,18 @@ impl StacksChainState { ) -> Result, Error> { Self::handle_pox_cycle_start(clarity, cycle_number, cycle_info, POX_3_NAME) } + + /// Do all the necessary Clarity operations at the start of a PoX reward cycle. + /// Currently, this just means applying any auto-unlocks to Stackers who qualified. + /// + /// This should only be called for PoX v4 cycles. + pub fn handle_pox_cycle_start_pox_4( + clarity: &mut ClarityTransactionConnection, + cycle_number: u64, + cycle_info: Option, + ) -> Result, Error> { + Self::handle_pox_cycle_start(clarity, cycle_number, cycle_info, POX_4_NAME) + } /// Do all the necessary Clarity operations at the start of a PoX reward cycle. /// Currently, this just means applying any auto-unlocks to Stackers who qualified. @@ -969,6 +987,97 @@ impl StacksChainState { Ok(ret) } + + /// Get all PoX reward addresses from .pox-4 + /// TODO: also return their stacker signer keys (as part of `RawRewardSetEntry` + fn get_reward_addresses_pox_4( + &mut self, + sortdb: &SortitionDB, + block_id: &StacksBlockId, + reward_cycle: u64, + ) -> Result, Error> { + if !self.is_pox_active(sortdb, block_id, reward_cycle as u128, POX_4_NAME)? { + debug!( + "PoX was voted disabled in block {} (reward cycle {})", + block_id, reward_cycle + ); + return Ok(vec![]); + } + + // how many in this cycle? + let num_addrs = self + .eval_boot_code_read_only( + sortdb, + block_id, + POX_4_NAME, + &format!("(get-reward-set-size u{})", reward_cycle), + )? + .expect_u128(); + + debug!( + "At block {:?} (reward cycle {}): {} PoX reward addresses", + block_id, reward_cycle, num_addrs + ); + + let mut ret = vec![]; + for i in 0..num_addrs { + // value should be (optional (tuple (pox-addr (tuple (...))) (total-ustx uint))). + let tuple = self + .eval_boot_code_read_only( + sortdb, + block_id, + POX_4_NAME, + &format!("(get-reward-set-pox-address u{} u{})", reward_cycle, i), + )? + .expect_optional() + .expect(&format!( + "FATAL: missing PoX address in slot {} out of {} in reward cycle {}", + i, num_addrs, reward_cycle + )) + .expect_tuple(); + + let pox_addr_tuple = tuple + .get("pox-addr") + .expect(&format!("FATAL: no `pox-addr` in return value from (get-reward-set-pox-address u{} u{})", reward_cycle, i)) + .to_owned(); + + let reward_address = PoxAddress::try_from_pox_tuple(self.mainnet, &pox_addr_tuple) + .expect(&format!( + "FATAL: not a valid PoX address: {:?}", + &pox_addr_tuple + )); + + let total_ustx = tuple + .get("total-ustx") + .expect(&format!("FATAL: no 'total-ustx' in return value from (get-reward-set-pox-address u{} u{})", reward_cycle, i)) + .to_owned() + .expect_u128(); + + let stacker = tuple + .get("stacker") + .expect(&format!( + "FATAL: no 'stacker' in return value from (get-reward-set-pox-address u{} u{})", + reward_cycle, i + )) + .to_owned() + .expect_optional() + .map(|value| value.expect_principal()); + + debug!( + "Parsed PoX reward address"; + "stacked_ustx" => total_ustx, + "reward_address" => %reward_address, + "stacker" => ?stacker, + ); + ret.push(RawRewardSetEntry { + reward_address, + amount_stacked: total_ustx, + stacker, + }) + } + + Ok(ret) + } /// Get the sequence of reward addresses, as well as the PoX-specified hash mode (which gets /// lost in the conversion to StacksAddress) @@ -990,10 +1099,12 @@ impl StacksChainState { .pox_constants .active_pox_contract(reward_cycle_start_height); + debug!("Active PoX contract at {} (burn height {}): {}", block_id, current_burn_height, &pox_contract_name); let result = match pox_contract_name { x if x == POX_1_NAME => self.get_reward_addresses_pox_1(sortdb, block_id, reward_cycle), x if x == POX_2_NAME => self.get_reward_addresses_pox_2(sortdb, block_id, reward_cycle), x if x == POX_3_NAME => self.get_reward_addresses_pox_3(sortdb, block_id, reward_cycle), + x if x == POX_4_NAME => self.get_reward_addresses_pox_4(sortdb, block_id, reward_cycle), unknown_contract => { panic!("Blockchain implementation failure: PoX contract name '{}' is unknown. Chainstate is corrupted.", unknown_contract); @@ -1112,7 +1223,7 @@ pub mod test { #[test] fn get_reward_threshold_units() { let test_pox_constants = - PoxConstants::new(501, 1, 1, 1, 5, 5000, 10000, u32::MAX, u32::MAX, u32::MAX); + PoxConstants::new(501, 1, 1, 1, 5, 5000, 10000, u32::MAX, u32::MAX, u32::MAX, u32::MAX, u32::MAX); // when the liquid amount = the threshold step, // the threshold should always be the step size. let liquid = POX_THRESHOLD_STEPS_USTX; @@ -1492,6 +1603,37 @@ pub mod test { ) -> StacksTransaction { make_pox_2_or_3_lockup(key, nonce, amount, addr, lock_period, burn_ht, POX_3_NAME) } + + /// TODO: add signer key + pub fn make_pox_4_lockup( + key: &StacksPrivateKey, + nonce: u64, + amount: u128, + addr: PoxAddress, + lock_period: u128, + burn_ht: u64, + ) -> StacksTransaction { + // ;; TODO: add signer key + // (define-public (stack-stx (amount-ustx uint) + // (pox-addr (tuple (version (buff 1)) (hashbytes (buff 32)))) + // (burn-height uint) + // (lock-period uint)) + let addr_tuple = Value::Tuple(addr.as_clarity_tuple().unwrap()); + let payload = TransactionPayload::new_contract_call( + boot_code_test_addr(), + "pox-4", + "stack-stx", + vec![ + Value::UInt(amount), + addr_tuple, + Value::UInt(burn_ht as u128), + Value::UInt(lock_period), + ], + ) + .unwrap(); + + make_tx(key, nonce, 0, payload) + } pub fn make_pox_2_or_3_lockup( key: &StacksPrivateKey, From f9c8d09805fa5004ee0f2bca978a5bc3753f59cc Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 2 Nov 2023 14:09:04 -0400 Subject: [PATCH 050/122] chore: extend invariant tests for epoch 2.5, and add v3 unlock --- stackslib/src/chainstate/stacks/boot/pox_2_tests.rs | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/stackslib/src/chainstate/stacks/boot/pox_2_tests.rs b/stackslib/src/chainstate/stacks/boot/pox_2_tests.rs index 1944edfc63..d6ef06c15a 100644 --- a/stackslib/src/chainstate/stacks/boot/pox_2_tests.rs +++ b/stackslib/src/chainstate/stacks/boot/pox_2_tests.rs @@ -480,6 +480,7 @@ pub fn check_stacker_link_invariants(peer: &mut TestPeer, tip: &StacksBlockId, c "cycle" => cycle_number, "cycle_start" => cycle_start, "pox_3_activation" => peer.config.burnchain.pox_constants.pox_3_activation_height, + "pox_4_activation" => peer.config.burnchain.pox_constants.pox_4_activation_height, "epoch_2_4_start" => cycle_start_epoch.start_height, ); return; @@ -527,6 +528,15 @@ pub fn check_stacker_link_invariants(peer: &mut TestPeer, tip: &StacksBlockId, c // the invariant checks will not make sense for the same reasons as above continue; } + + if tip_epoch.epoch_id >= StacksEpochId::Epoch25 + && current_burn_height + <= peer.config.burnchain.pox_constants.pox_4_activation_height + { + // if the tip is epoch-2.5, and pox-5 isn't the active pox contract yet, + // the invariant checks will not make sense for the same reasons as above + continue; + } let StackingStateCheckData { pox_addr, @@ -1238,6 +1248,7 @@ fn test_simple_pox_2_auto_unlock(alice_first: bool) { height_target + 1, burnchain.pox_constants.v1_unlock_height, burnchain.pox_constants.v2_unlock_height, + burnchain.pox_constants.v3_unlock_height, ); assert_eq!(bob_bal.amount_locked(), POX_THRESHOLD_STEPS_USTX); @@ -1267,6 +1278,7 @@ fn test_simple_pox_2_auto_unlock(alice_first: bool) { height_target + 1, burnchain.pox_constants.v1_unlock_height, burnchain.pox_constants.v2_unlock_height, + burnchain.pox_constants.v3_unlock_height, ); assert_eq!(bob_bal.amount_locked(), 0); @@ -1280,6 +1292,7 @@ fn test_simple_pox_2_auto_unlock(alice_first: bool) { height_target + 1, burnchain.pox_constants.v1_unlock_height, burnchain.pox_constants.v2_unlock_height, + burnchain.pox_constants.v3_unlock_height, ); assert_eq!(bob_bal.amount_locked(), 0); From 8baf2d3d3b3e23486421c410af281091c1d3ed38 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 2 Nov 2023 14:09:25 -0400 Subject: [PATCH 051/122] chore: mock v3 unlock and pox-4 activation --- stackslib/src/chainstate/stacks/boot/pox_3_tests.rs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/stackslib/src/chainstate/stacks/boot/pox_3_tests.rs b/stackslib/src/chainstate/stacks/boot/pox_3_tests.rs index 692e2f754f..a8ef5f1bad 100644 --- a/stackslib/src/chainstate/stacks/boot/pox_3_tests.rs +++ b/stackslib/src/chainstate/stacks/boot/pox_3_tests.rs @@ -147,7 +147,9 @@ fn make_test_epochs_pox() -> (Vec, PoxConstants) { pox_constants.anchor_threshold = 1; pox_constants.v1_unlock_height = (EMPTY_SORTITIONS + EPOCH_2_1_HEIGHT + 1) as u32; pox_constants.v2_unlock_height = (EMPTY_SORTITIONS + EPOCH_2_2_HEIGHT + 1) as u32; + pox_constants.v3_unlock_height = u32::MAX; pox_constants.pox_3_activation_height = (EMPTY_SORTITIONS + EPOCH_2_4_HEIGHT + 1) as u32; + pox_constants.pox_4_activation_height = u32::MAX; (epochs, pox_constants) } From 676dca241e9869d5a235bdc6224a99b0bbc33ad9 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 2 Nov 2023 14:10:10 -0400 Subject: [PATCH 052/122] feat: add pox-4 activation for epoch 2.5 --- stackslib/src/chainstate/stacks/db/blocks.rs | 31 ++++++++++++++++---- 1 file changed, 26 insertions(+), 5 deletions(-) diff --git a/stackslib/src/chainstate/stacks/db/blocks.rs b/stackslib/src/chainstate/stacks/db/blocks.rs index 106ce34c91..7da5cf4a16 100644 --- a/stackslib/src/chainstate/stacks/db/blocks.rs +++ b/stackslib/src/chainstate/stacks/db/blocks.rs @@ -4177,7 +4177,7 @@ impl StacksChainState { let recipient = if epoch_id >= StacksEpochId::Epoch21 { // pay to tx-designated recipient, or if there is none, pay to the origin match coinbase_tx.try_as_coinbase() { - Some((_, recipient_opt)) => recipient_opt + Some((_, recipient_opt, _)) => recipient_opt .cloned() .unwrap_or(miner_addr.to_account_principal()), None => miner_addr.to_account_principal(), @@ -4609,6 +4609,11 @@ impl StacksChainState { current_epoch = StacksEpochId::Epoch24; } StacksEpochId::Epoch24 => { + receipts.append(&mut clarity_tx.block.initialize_epoch_2_5()?); + current_epoch = StacksEpochId::Epoch25; + } + StacksEpochId::Epoch25 => { + receipts.append(&mut clarity_tx.block.initialize_epoch_3_0()?); current_epoch = StacksEpochId::Epoch30; } StacksEpochId::Epoch30 => { @@ -5206,7 +5211,9 @@ impl StacksChainState { | StacksEpochId::Epoch22 | StacksEpochId::Epoch23 | StacksEpochId::Epoch24 + | StacksEpochId::Epoch25 | StacksEpochId::Epoch30 => { + // TODO: sbtc ops in epoch 3.0 StacksChainState::get_stacking_and_transfer_and_delegate_burn_ops_v210( chainstate_tx, parent_index_hash, @@ -5291,13 +5298,20 @@ impl StacksChainState { pox_start_cycle_info, ) } - StacksEpochId::Epoch24 | StacksEpochId::Epoch30 => { + StacksEpochId::Epoch24 => { Self::handle_pox_cycle_start_pox_3( clarity_tx, pox_reward_cycle, pox_start_cycle_info, ) } + StacksEpochId::Epoch25 | StacksEpochId::Epoch30 => { + Self::handle_pox_cycle_start_pox_4( + clarity_tx, + pox_reward_cycle, + pox_start_cycle_info, + ) + } } })?; debug!("check_and_handle_reward_start: handled pox cycle start"); @@ -6898,12 +6912,13 @@ impl StacksChainState { return Err(MemPoolRejection::BadAddressVersionByte); } - let (block_height, v1_unlock_height, v2_unlock_height) = clarity_connection + let (block_height, v1_unlock_height, v2_unlock_height, v3_unlock_height) = clarity_connection .with_clarity_db_readonly(|ref mut db| { ( db.get_current_burnchain_block_height() as u64, db.get_v1_unlock_height(), db.get_v2_unlock_height(), + db.get_v3_unlock_height(), ) }); @@ -6913,6 +6928,7 @@ impl StacksChainState { block_height, v1_unlock_height, v2_unlock_height, + v3_unlock_height, ) { match &tx.payload { TransactionPayload::TokenTransfer(..) => { @@ -6925,6 +6941,7 @@ impl StacksChainState { block_height, v1_unlock_height, v2_unlock_height, + v3_unlock_height, ), )); } @@ -6949,6 +6966,7 @@ impl StacksChainState { block_height, v1_unlock_height, v2_unlock_height, + v3_unlock_height, ) { return Err(MemPoolRejection::NotEnoughFunds( total_spent, @@ -6956,6 +6974,7 @@ impl StacksChainState { block_height, v1_unlock_height, v2_unlock_height, + v3_unlock_height, ), )); } @@ -6967,6 +6986,7 @@ impl StacksChainState { block_height, v1_unlock_height, v2_unlock_height, + v3_unlock_height, ) { return Err(MemPoolRejection::NotEnoughFunds( fee as u128, @@ -6974,6 +6994,7 @@ impl StacksChainState { block_height, v1_unlock_height, v2_unlock_height, + v3_unlock_height, ), )); } @@ -7120,7 +7141,7 @@ pub mod test { let mut tx_coinbase = StacksTransaction::new( TransactionVersion::Testnet, auth, - TransactionPayload::Coinbase(CoinbasePayload([0u8; 32]), None), + TransactionPayload::Coinbase(CoinbasePayload([0u8; 32]), None, None), ); tx_coinbase.anchor_mode = TransactionAnchorMode::OnChainOnly; let mut tx_signer = StacksTransactionSigner::new(&tx_coinbase); @@ -7185,7 +7206,7 @@ pub mod test { let mut tx_coinbase = StacksTransaction::new( TransactionVersion::Testnet, auth.clone(), - TransactionPayload::Coinbase(CoinbasePayload([0u8; 32]), None), + TransactionPayload::Coinbase(CoinbasePayload([0u8; 32]), None, None), ); tx_coinbase.anchor_mode = TransactionAnchorMode::OnChainOnly; let mut tx_signer = StacksTransactionSigner::new(&tx_coinbase); From d30cb60cd3fdc159c717b1c510689d090191a5ce Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 2 Nov 2023 14:10:34 -0400 Subject: [PATCH 053/122] chore: epoch2.5 --- stackslib/src/chainstate/stacks/db/mod.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/stackslib/src/chainstate/stacks/db/mod.rs b/stackslib/src/chainstate/stacks/db/mod.rs index caeaafa336..1ae076a76c 100644 --- a/stackslib/src/chainstate/stacks/db/mod.rs +++ b/stackslib/src/chainstate/stacks/db/mod.rs @@ -259,6 +259,7 @@ impl DBConfig { StacksEpochId::Epoch22 => self.version == "3" || self.version == "4", StacksEpochId::Epoch23 => self.version == "3" || self.version == "4", StacksEpochId::Epoch24 => self.version == "3" || self.version == "4", + StacksEpochId::Epoch25 => self.version == "3" || self.version == "4", StacksEpochId::Epoch30 => self.version == "3" || self.version == "4", } } From f6ba0c18c6fe285ede938dcf4c5d1fdcef9a47b5 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 2 Nov 2023 14:10:48 -0400 Subject: [PATCH 054/122] chore: use v3 unlock and pox-4 activation --- .../src/chainstate/stacks/db/transactions.rs | 21 ++++++++++++++++--- 1 file changed, 18 insertions(+), 3 deletions(-) diff --git a/stackslib/src/chainstate/stacks/db/transactions.rs b/stackslib/src/chainstate/stacks/db/transactions.rs index d3c0764754..7a911c9c86 100644 --- a/stackslib/src/chainstate/stacks/db/transactions.rs +++ b/stackslib/src/chainstate/stacks/db/transactions.rs @@ -468,12 +468,13 @@ impl StacksChainState { fee: u64, payer_account: StacksAccount, ) -> Result { - let (cur_burn_block_height, v1_unlock_ht, v2_unlock_ht) = clarity_tx + let (cur_burn_block_height, v1_unlock_ht, v2_unlock_ht, v3_unlock_ht) = clarity_tx .with_clarity_db_readonly(|ref mut db| { ( db.get_current_burnchain_block_height(), db.get_v1_unlock_height(), db.get_v2_unlock_height(), + db.get_v3_unlock_height(), ) }); @@ -483,6 +484,7 @@ impl StacksChainState { cur_burn_block_height as u64, v1_unlock_ht, v2_unlock_ht, + v3_unlock_ht, ); if consolidated_balance < fee as u128 { @@ -8103,7 +8105,7 @@ pub mod test { assert_eq!( StacksChainState::get_account(&mut conn, &addr.into()) .stx_balance - .get_available_balance_at_burn_block(0, 0, 0), + .get_available_balance_at_burn_block(0, 0, 0, 0), (1000000000 - fee) as u128 ); @@ -8544,9 +8546,15 @@ pub mod test { fn get_v2_unlock_height(&self) -> u32 { u32::MAX } + fn get_v3_unlock_height(&self) -> u32 { + u32::MAX + } fn get_pox_3_activation_height(&self) -> u32 { u32::MAX } + fn get_pox_4_activation_height(&self) -> u32 { + u32::MAX + } fn get_burn_block_height(&self, sortition_id: &SortitionId) -> Option { Some(sortition_id.0[0] as u32) } @@ -8611,7 +8619,8 @@ pub mod test { StacksEpochId::Epoch22 => self.get_stacks_epoch(3), StacksEpochId::Epoch23 => self.get_stacks_epoch(4), StacksEpochId::Epoch24 => self.get_stacks_epoch(5), - StacksEpochId::Epoch30 => self.get_stacks_epoch(6), + StacksEpochId::Epoch25 => self.get_stacks_epoch(6), + StacksEpochId::Epoch30 => self.get_stacks_epoch(7), } } fn get_pox_payout_addrs( @@ -8759,9 +8768,15 @@ pub mod test { fn get_v2_unlock_height(&self) -> u32 { u32::MAX } + fn get_v3_unlock_height(&self) -> u32 { + u32::MAX + } fn get_pox_3_activation_height(&self) -> u32 { u32::MAX } + fn get_pox_4_activation_height(&self) -> u32 { + u32::MAX + } fn get_burn_block_height(&self, sortition_id: &SortitionId) -> Option { Some(sortition_id.0[0] as u32) } From 28ad529492d6e3d868d111b20748457130c89ba9 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 2 Nov 2023 14:11:05 -0400 Subject: [PATCH 055/122] feat: add BlockBuilder trait for mining transactions so Nakamoto can reuse the block-builder logic; separate out transaction mempool selection and application logic so that both nakamoto and stacks 2.x's BlockBuilder impls can call it. --- stackslib/src/chainstate/stacks/miner.rs | 737 ++++++++++++----------- 1 file changed, 394 insertions(+), 343 deletions(-) diff --git a/stackslib/src/chainstate/stacks/miner.rs b/stackslib/src/chainstate/stacks/miner.rs index d6c3ab8bfa..ea18d7f9bf 100644 --- a/stackslib/src/chainstate/stacks/miner.rs +++ b/stackslib/src/chainstate/stacks/miner.rs @@ -173,6 +173,7 @@ pub fn set_mining_spend_amount(miner_status: Arc>, amt: u64) .set_spend_amount(amt); } +/// Policy settings for how mining will proceed #[derive(Debug, Clone)] pub struct BlockBuilderSettings { pub max_miner_time_ms: u64, @@ -214,7 +215,7 @@ struct MicroblockMinerRuntime { /// The value of `BlockLimitFunction` holds the state of the size of the block being built. /// As the value increases, the less we can add to blocks. #[derive(PartialEq)] -enum BlockLimitFunction { +pub enum BlockLimitFunction { /// The block size limit has not been hit, and there are no restrictions on what can be added to /// a block. NO_LIMIT_HIT, @@ -608,6 +609,43 @@ impl TransactionResult { } } +/// Trait that defines what it means to be a block builder +pub trait BlockBuilder { + fn try_mine_tx_with_len( + &mut self, + clarity_tx: &mut ClarityTx, + tx: &StacksTransaction, + tx_len: u64, + limit_behavior: &BlockLimitFunction, + ast_rules: ASTRules, + ) -> TransactionResult; + + /// Append a transaction if doing so won't exceed the epoch data size. + /// Errors out if we fail to mine the tx (exceed budget, or the transaction is invalid). + fn try_mine_tx( + &mut self, + clarity_tx: &mut ClarityTx, + tx: &StacksTransaction, + ast_rules: ASTRules, + ) -> Result { + let tx_len = tx.tx_len(); + match self.try_mine_tx_with_len( + clarity_tx, + tx, + tx_len, + &BlockLimitFunction::NO_LIMIT_HIT, + ast_rules, + ) { + TransactionResult::Success(s) => Ok(TransactionResult::Success(s)), + TransactionResult::Skipped(TransactionSkipped { error, .. }) + | TransactionResult::ProcessingError(TransactionError { error, .. }) => Err(error), + TransactionResult::Problematic(TransactionProblematic { tx, .. }) => { + Err(Error::ProblematicTransaction(tx.txid())) + } + } + } +} + /// /// Independent structure for building microblocks: /// StacksBlockBuilder cannot be used, since microblocks should only be broadcasted @@ -1539,291 +1577,51 @@ impl StacksBlockBuilder { } /// Append a transaction if doing so won't exceed the epoch data size. - /// Errors out if we fail to mine the tx (exceed budget, or the transaction is invalid). - pub fn try_mine_tx( + /// Does not check for errors + #[cfg(test)] + pub fn force_mine_tx( &mut self, clarity_tx: &mut ClarityTx, tx: &StacksTransaction, - ast_rules: ASTRules, - ) -> Result { - let tx_len = tx.tx_len(); - match self.try_mine_tx_with_len( - clarity_tx, - tx, - tx_len, - &BlockLimitFunction::NO_LIMIT_HIT, - ast_rules, - ) { - TransactionResult::Success(s) => Ok(TransactionResult::Success(s)), - TransactionResult::Skipped(TransactionSkipped { error, .. }) - | TransactionResult::ProcessingError(TransactionError { error, .. }) => Err(error), - TransactionResult::Problematic(TransactionProblematic { tx, .. }) => { - Err(Error::ProblematicTransaction(tx.txid())) - } - } - } + ) -> Result<(), Error> { + let mut tx_bytes = vec![]; + tx.consensus_serialize(&mut tx_bytes) + .map_err(Error::CodecError)?; + let tx_len = tx_bytes.len() as u64; - /// Append a transaction if doing so won't exceed the epoch data size. - /// Errors out if we exceed budget, or the transaction is invalid. - fn try_mine_tx_with_len( - &mut self, - clarity_tx: &mut ClarityTx, - tx: &StacksTransaction, - tx_len: u64, - limit_behavior: &BlockLimitFunction, - ast_rules: ASTRules, - ) -> TransactionResult { if self.bytes_so_far + tx_len >= MAX_EPOCH_SIZE.into() { - return TransactionResult::skipped_due_to_error(&tx, Error::BlockTooBigError); + warn!( + "Epoch size is {} >= {}", + self.bytes_so_far + tx_len, + MAX_EPOCH_SIZE + ); } - match limit_behavior { - BlockLimitFunction::CONTRACT_LIMIT_HIT => { - match &tx.payload { - TransactionPayload::ContractCall(cc) => { - // once we've hit the runtime limit once, allow boot code contract calls, but do not try to eval - // other contract calls - if !cc.address.is_boot_code_addr() { - return TransactionResult::skipped( - &tx, - "BlockLimitFunction::CONTRACT_LIMIT_HIT".to_string(), - ); - } - } - TransactionPayload::SmartContract(..) => { - return TransactionResult::skipped( - &tx, - "BlockLimitFunction::CONTRACT_LIMIT_HIT".to_string(), - ); - } - _ => {} - } - } - BlockLimitFunction::LIMIT_REACHED => { - return TransactionResult::skipped( - &tx, - "BlockLimitFunction::LIMIT_REACHED".to_string(), - ) - } - BlockLimitFunction::NO_LIMIT_HIT => {} - }; - let quiet = !cfg!(test); - let result = if !self.anchored_done { - // building up the anchored blocks - if tx.anchor_mode != TransactionAnchorMode::OnChainOnly - && tx.anchor_mode != TransactionAnchorMode::Any - { - return TransactionResult::skipped_due_to_error( - tx, - Error::InvalidStacksTransaction( - "Invalid transaction anchor mode for anchored data".to_string(), - false, - ), - ); - } - - // preemptively skip problematic transactions - if let Err(e) = Relayer::static_check_problematic_relayed_tx( - clarity_tx.config.mainnet, - clarity_tx.get_epoch(), - &tx, - ast_rules, - ) { - info!( - "Detected problematic tx {} while mining; dropping from mempool", - tx.txid() - ); - return TransactionResult::problematic(&tx, Error::NetError(e)); - } - let (fee, receipt) = match StacksChainState::process_transaction( - clarity_tx, tx, quiet, ast_rules, - ) { - Ok((fee, receipt)) => (fee, receipt), + if !self.anchored_done { + // save + match StacksChainState::process_transaction(clarity_tx, tx, quiet, ASTRules::Typical) { + Ok((fee, receipt)) => { + self.total_anchored_fees += fee; + } Err(e) => { - let (is_problematic, e) = - TransactionResult::is_problematic(&tx, e, clarity_tx.get_epoch()); - if is_problematic { - return TransactionResult::problematic(&tx, e); - } else { - match e { - Error::CostOverflowError(cost_before, cost_after, total_budget) => { - clarity_tx.reset_cost(cost_before.clone()); - if total_budget.proportion_largest_dimension(&cost_before) - < TX_BLOCK_LIMIT_PROPORTION_HEURISTIC - { - warn!( - "Transaction {} consumed over {}% of block budget, marking as invalid; budget was {}", - tx.txid(), - 100 - TX_BLOCK_LIMIT_PROPORTION_HEURISTIC, - &total_budget - ); - return TransactionResult::error( - &tx, - Error::TransactionTooBigError, - ); - } else { - warn!( - "Transaction {} reached block cost {}; budget was {}", - tx.txid(), - &cost_after, - &total_budget - ); - return TransactionResult::skipped_due_to_error( - &tx, - Error::BlockTooBigError, - ); - } - } - _ => return TransactionResult::error(&tx, e), - } - } + warn!("Invalid transaction {} in anchored block, but forcing inclusion (error: {:?})", &tx.txid(), &e); } - }; - info!("Include tx"; - "tx" => %tx.txid(), - "payload" => tx.payload.name(), - "origin" => %tx.origin_address()); + } - // save self.txs.push(tx.clone()); - self.total_anchored_fees += fee; - - TransactionResult::success(&tx, fee, receipt) } else { - // building up the microblocks - if tx.anchor_mode != TransactionAnchorMode::OffChainOnly - && tx.anchor_mode != TransactionAnchorMode::Any - { - return TransactionResult::skipped_due_to_error( - tx, - Error::InvalidStacksTransaction( - "Invalid transaction anchor mode for streamed data".to_string(), - false, - ), - ); - } - - // preemptively skip problematic transactions - if let Err(e) = Relayer::static_check_problematic_relayed_tx( - clarity_tx.config.mainnet, - clarity_tx.get_epoch(), - &tx, - ast_rules, - ) { - info!( - "Detected problematic tx {} while mining; dropping from mempool", - tx.txid() - ); - return TransactionResult::problematic(&tx, Error::NetError(e)); - } - let (fee, receipt) = match StacksChainState::process_transaction( - clarity_tx, tx, quiet, ast_rules, - ) { - Ok((fee, receipt)) => (fee, receipt), - Err(e) => { - let (is_problematic, e) = - TransactionResult::is_problematic(&tx, e, clarity_tx.get_epoch()); - if is_problematic { - return TransactionResult::problematic(&tx, e); - } else { - match e { - Error::CostOverflowError(cost_before, cost_after, total_budget) => { - clarity_tx.reset_cost(cost_before.clone()); - if total_budget.proportion_largest_dimension(&cost_before) - < TX_BLOCK_LIMIT_PROPORTION_HEURISTIC - { - warn!( - "Transaction {} consumed over {}% of block budget, marking as invalid; budget was {}", - tx.txid(), - 100 - TX_BLOCK_LIMIT_PROPORTION_HEURISTIC, - &total_budget - ); - return TransactionResult::error( - &tx, - Error::TransactionTooBigError, - ); - } else { - warn!( - "Transaction {} reached block cost {}; budget was {}", - tx.txid(), - &cost_after, - &total_budget - ); - return TransactionResult::skipped_due_to_error( - &tx, - Error::BlockTooBigError, - ); - } - } - _ => return TransactionResult::error(&tx, e), - } - } - } - }; - debug!( - "Include tx {} ({}) in microblock", - tx.txid(), - tx.payload.name() - ); - - // save - self.micro_txs.push(tx.clone()); - self.total_streamed_fees += fee; - - TransactionResult::success(&tx, fee, receipt) - }; - - self.bytes_so_far += tx_len; - result - } - - /// Append a transaction if doing so won't exceed the epoch data size. - /// Does not check for errors - #[cfg(test)] - pub fn force_mine_tx( - &mut self, - clarity_tx: &mut ClarityTx, - tx: &StacksTransaction, - ) -> Result<(), Error> { - let mut tx_bytes = vec![]; - tx.consensus_serialize(&mut tx_bytes) - .map_err(Error::CodecError)?; - let tx_len = tx_bytes.len() as u64; - - if self.bytes_so_far + tx_len >= MAX_EPOCH_SIZE.into() { - warn!( - "Epoch size is {} >= {}", - self.bytes_so_far + tx_len, - MAX_EPOCH_SIZE - ); - } - - let quiet = !cfg!(test); - if !self.anchored_done { - // save - match StacksChainState::process_transaction(clarity_tx, tx, quiet, ASTRules::Typical) { - Ok((fee, receipt)) => { - self.total_anchored_fees += fee; - } - Err(e) => { - warn!("Invalid transaction {} in anchored block, but forcing inclusion (error: {:?})", &tx.txid(), &e); - } - } - - self.txs.push(tx.clone()); - } else { - match StacksChainState::process_transaction(clarity_tx, tx, quiet, ASTRules::Typical) { - Ok((fee, receipt)) => { - self.total_streamed_fees += fee; - } - Err(e) => { - warn!( - "Invalid transaction {} in microblock, but forcing inclusion (error: {:?})", - &tx.txid(), - &e - ); - } + match StacksChainState::process_transaction(clarity_tx, tx, quiet, ASTRules::Typical) { + Ok((fee, receipt)) => { + self.total_streamed_fees += fee; + } + Err(e) => { + warn!( + "Invalid transaction {} in microblock, but forcing inclusion (error: {:?})", + &tx.txid(), + &e + ); + } } self.micro_txs.push(tx.clone()); @@ -2342,80 +2140,43 @@ impl StacksBlockBuilder { Ok(builder) } - /// Given access to the mempool, mine an anchored block with no more than the given execution cost. - /// returns the assembled block, and the consumed execution budget. - pub fn build_anchored_block( - chainstate_handle: &StacksChainState, // not directly used; used as a handle to open other chainstates - burn_dbconn: &SortitionDBConn, + /// Select transactions for block inclusion from the mempool. + /// Applies them to the ongoing ClarityTx. + /// If invalid transactions are encountered, they are dropped from the mempool. + /// Returns whether or not the miner got blocked, as well as the gathered tx events + pub fn select_and_apply_transactions( + epoch_tx: &mut ClarityTx, + builder: &mut B, mempool: &mut MemPoolDB, - parent_stacks_header: &StacksHeaderInfo, // Stacks header we're building off of - total_burn: u64, // the burn so far on the burnchain (i.e. from the last burnchain block) - proof: VRFProof, // proof over the burnchain's last seed - pubkey_hash: Hash160, - coinbase_tx: &StacksTransaction, + parent_stacks_header: &StacksHeaderInfo, + coinbase_tx: Option<&StacksTransaction>, settings: BlockBuilderSettings, event_observer: Option<&dyn MemPoolEventDispatcher>, - ) -> Result<(StacksBlock, ExecutionCost, u64), Error> { - let mempool_settings = settings.mempool_settings; + ast_rules: ASTRules + ) -> Result<(bool, Vec), Error> { let max_miner_time_ms = settings.max_miner_time_ms; - - if let TransactionPayload::Coinbase(..) = coinbase_tx.payload { - } else { - return Err(Error::MemPoolError( - "Not a coinbase transaction".to_string(), - )); - } - - let (tip_consensus_hash, tip_block_hash, tip_height) = ( - parent_stacks_header.consensus_hash.clone(), - parent_stacks_header.anchored_header.block_hash(), - parent_stacks_header.stacks_block_height, - ); - - debug!( - "Build anchored block off of {}/{} height {}", - &tip_consensus_hash, &tip_block_hash, tip_height - ); - - let (mut chainstate, _) = chainstate_handle.reopen()?; - - let mut builder = StacksBlockBuilder::make_block_builder( - chainstate.mainnet, - parent_stacks_header, - proof, - total_burn, - pubkey_hash, - )?; - + let mempool_settings = settings.mempool_settings.clone(); + let tip_height = parent_stacks_header.stacks_block_height; let ts_start = get_epoch_time_ms(); - - let mut miner_epoch_info = builder.pre_epoch_begin(&mut chainstate, burn_dbconn)?; - let ast_rules = miner_epoch_info.ast_rules; - if ast_rules != ASTRules::Typical { - builder.header.version = cmp::max( - STACKS_BLOCK_VERSION_AST_PRECHECK_SIZE, - builder.header.version, - ); - } - - let (mut epoch_tx, confirmed_mblock_cost) = - builder.epoch_begin(burn_dbconn, &mut miner_epoch_info)?; let stacks_epoch_id = epoch_tx.get_epoch(); let block_limit = epoch_tx .block_limit() .expect("Failed to obtain block limit from miner's block connection"); let mut tx_events = Vec::new(); - tx_events.push( - builder - .try_mine_tx(&mut epoch_tx, coinbase_tx, ast_rules.clone())? - .convert_to_event(), - ); - mempool.reset_nonce_cache()?; + if let Some(coinbase_tx) = coinbase_tx { + tx_events.push( + builder + .try_mine_tx(epoch_tx, coinbase_tx, ast_rules.clone())? + .convert_to_event(), + ); + } + mempool.reset_nonce_cache()?; mempool.estimate_tx_rates(100, &block_limit, &stacks_epoch_id)?; + let mut block_limit_hit = BlockLimitFunction::NO_LIMIT_HIT; let mut considered = HashSet::new(); // txids of all transactions we looked at let mut mined_origin_nonces: HashMap = HashMap::new(); // map addrs of mined transaction origins to the nonces we used let mut mined_sponsor_nonces: HashMap = HashMap::new(); // map addrs of mined transaction sponsors to the nonces we used @@ -2423,21 +2184,20 @@ impl StacksBlockBuilder { let mut invalidated_txs = vec![]; let mut to_drop_and_blacklist = vec![]; - let mut block_limit_hit = BlockLimitFunction::NO_LIMIT_HIT; let deadline = ts_start + (max_miner_time_ms as u128); let mut num_txs = 0; let mut blocked = false; debug!( - "Anchored block transaction selection begins (child of {})", + "Block transaction selection begins (child of {})", &parent_stacks_header.anchored_header.block_hash() ); let result = { - let mut intermediate_result = Ok(0); + let mut intermediate_result : Result<_, Error> = Ok(0); while block_limit_hit != BlockLimitFunction::LIMIT_REACHED { let mut num_considered = 0; intermediate_result = mempool.iterate_candidates( - &mut epoch_tx, + epoch_tx, &mut tx_events, tip_height, mempool_settings.clone(), @@ -2604,7 +2364,7 @@ impl StacksBlockBuilder { break; } } - debug!("Anchored block transaction selection finished (child of {}): {} transactions selected ({} considered)", &parent_stacks_header.anchored_header.block_hash(), num_txs, considered.len()); + debug!("Block transaction selection finished (child of {}): {} transactions selected ({} considered)", &parent_stacks_header.anchored_header.block_hash(), num_txs, considered.len()); intermediate_result }; @@ -2615,14 +2375,91 @@ impl StacksBlockBuilder { observer.mempool_txs_dropped(to_drop_and_blacklist, MemPoolDropReason::PROBLEMATIC); } - match result { - Ok(_) => {} + if let Err(e) = result { + warn!("Failure building block: {}", e); + return Err(e); + } + + Ok((blocked, tx_events)) + } + + /// Given access to the mempool, mine an anchored block with no more than the given execution cost. + /// returns the assembled block, and the consumed execution budget. + pub fn build_anchored_block( + chainstate_handle: &StacksChainState, // not directly used; used as a handle to open other chainstates + burn_dbconn: &SortitionDBConn, + mempool: &mut MemPoolDB, + parent_stacks_header: &StacksHeaderInfo, // Stacks header we're building off of + total_burn: u64, // the burn so far on the burnchain (i.e. from the last burnchain block) + proof: VRFProof, // proof over the burnchain's last seed + pubkey_hash: Hash160, + coinbase_tx: &StacksTransaction, + settings: BlockBuilderSettings, + event_observer: Option<&dyn MemPoolEventDispatcher>, + ) -> Result<(StacksBlock, ExecutionCost, u64), Error> { + if let TransactionPayload::Coinbase(..) = coinbase_tx.payload { + } else { + return Err(Error::MemPoolError( + "Not a coinbase transaction".to_string(), + )); + } + + let (tip_consensus_hash, tip_block_hash, tip_height) = ( + parent_stacks_header.consensus_hash.clone(), + parent_stacks_header.anchored_header.block_hash(), + parent_stacks_header.stacks_block_height, + ); + + debug!( + "Build anchored block off of {}/{} height {}", + &tip_consensus_hash, &tip_block_hash, tip_height + ); + + let (mut chainstate, _) = chainstate_handle.reopen()?; + + let mut builder = StacksBlockBuilder::make_block_builder( + chainstate.mainnet, + parent_stacks_header, + proof, + total_burn, + pubkey_hash, + )?; + + let ts_start = get_epoch_time_ms(); + + let mut miner_epoch_info = builder.pre_epoch_begin(&mut chainstate, burn_dbconn)?; + let ast_rules = miner_epoch_info.ast_rules; + if ast_rules != ASTRules::Typical { + builder.header.version = cmp::max( + STACKS_BLOCK_VERSION_AST_PRECHECK_SIZE, + builder.header.version, + ); + } + + let (mut epoch_tx, confirmed_mblock_cost) = + builder.epoch_begin(burn_dbconn, &mut miner_epoch_info)?; + + let block_limit = epoch_tx + .block_limit() + .expect("Failed to obtain block limit from miner's block connection"); + + let (blocked, tx_events) = match Self::select_and_apply_transactions( + &mut epoch_tx, + &mut builder, + mempool, + parent_stacks_header, + Some(coinbase_tx), + settings, + event_observer, + ast_rules + ) { + Ok(x) => x, Err(e) => { warn!("Failure building block: {}", e); epoch_tx.rollback_block(); return Err(e); } - } + }; if blocked { debug!( @@ -2632,9 +2469,6 @@ impl StacksBlockBuilder { return Err(Error::MinerAborted); } - // the prior do_rebuild logic wasn't necessary - // a transaction that caused a budget exception is rolled back in process_transaction - // save the block so we can build microblocks off of it let block = builder.mine_anchored_block(&mut epoch_tx); let size = builder.bytes_so_far; @@ -2676,3 +2510,220 @@ impl StacksBlockBuilder { Ok((block, consumed, size)) } } + +impl BlockBuilder for StacksBlockBuilder { + /// Append a transaction if doing so won't exceed the epoch data size. + /// Errors out if we exceed budget, or the transaction is invalid. + fn try_mine_tx_with_len( + &mut self, + clarity_tx: &mut ClarityTx, + tx: &StacksTransaction, + tx_len: u64, + limit_behavior: &BlockLimitFunction, + ast_rules: ASTRules, + ) -> TransactionResult { + if self.bytes_so_far + tx_len >= MAX_EPOCH_SIZE.into() { + return TransactionResult::skipped_due_to_error(&tx, Error::BlockTooBigError); + } + + match limit_behavior { + BlockLimitFunction::CONTRACT_LIMIT_HIT => { + match &tx.payload { + TransactionPayload::ContractCall(cc) => { + // once we've hit the runtime limit once, allow boot code contract calls, but do not try to eval + // other contract calls + if !cc.address.is_boot_code_addr() { + return TransactionResult::skipped( + &tx, + "BlockLimitFunction::CONTRACT_LIMIT_HIT".to_string(), + ); + } + } + TransactionPayload::SmartContract(..) => { + return TransactionResult::skipped( + &tx, + "BlockLimitFunction::CONTRACT_LIMIT_HIT".to_string(), + ); + } + _ => {} + } + } + BlockLimitFunction::LIMIT_REACHED => { + return TransactionResult::skipped( + &tx, + "BlockLimitFunction::LIMIT_REACHED".to_string(), + ) + } + BlockLimitFunction::NO_LIMIT_HIT => {} + }; + + let quiet = !cfg!(test); + let result = if !self.anchored_done { + // building up the anchored blocks + if tx.anchor_mode != TransactionAnchorMode::OnChainOnly + && tx.anchor_mode != TransactionAnchorMode::Any + { + return TransactionResult::skipped_due_to_error( + tx, + Error::InvalidStacksTransaction( + "Invalid transaction anchor mode for anchored data".to_string(), + false, + ), + ); + } + + // preemptively skip problematic transactions + if let Err(e) = Relayer::static_check_problematic_relayed_tx( + clarity_tx.config.mainnet, + clarity_tx.get_epoch(), + &tx, + ast_rules, + ) { + info!( + "Detected problematic tx {} while mining; dropping from mempool", + tx.txid() + ); + return TransactionResult::problematic(&tx, Error::NetError(e)); + } + let (fee, receipt) = match StacksChainState::process_transaction( + clarity_tx, tx, quiet, ast_rules, + ) { + Ok((fee, receipt)) => (fee, receipt), + Err(e) => { + let (is_problematic, e) = + TransactionResult::is_problematic(&tx, e, clarity_tx.get_epoch()); + if is_problematic { + return TransactionResult::problematic(&tx, e); + } else { + match e { + Error::CostOverflowError(cost_before, cost_after, total_budget) => { + clarity_tx.reset_cost(cost_before.clone()); + if total_budget.proportion_largest_dimension(&cost_before) + < TX_BLOCK_LIMIT_PROPORTION_HEURISTIC + { + warn!( + "Transaction {} consumed over {}% of block budget, marking as invalid; budget was {}", + tx.txid(), + 100 - TX_BLOCK_LIMIT_PROPORTION_HEURISTIC, + &total_budget + ); + return TransactionResult::error( + &tx, + Error::TransactionTooBigError, + ); + } else { + warn!( + "Transaction {} reached block cost {}; budget was {}", + tx.txid(), + &cost_after, + &total_budget + ); + return TransactionResult::skipped_due_to_error( + &tx, + Error::BlockTooBigError, + ); + } + } + _ => return TransactionResult::error(&tx, e), + } + } + } + }; + info!("Include tx"; + "tx" => %tx.txid(), + "payload" => tx.payload.name(), + "origin" => %tx.origin_address()); + + // save + self.txs.push(tx.clone()); + self.total_anchored_fees += fee; + + TransactionResult::success(&tx, fee, receipt) + } else { + // building up the microblocks + if tx.anchor_mode != TransactionAnchorMode::OffChainOnly + && tx.anchor_mode != TransactionAnchorMode::Any + { + return TransactionResult::skipped_due_to_error( + tx, + Error::InvalidStacksTransaction( + "Invalid transaction anchor mode for streamed data".to_string(), + false, + ), + ); + } + + // preemptively skip problematic transactions + if let Err(e) = Relayer::static_check_problematic_relayed_tx( + clarity_tx.config.mainnet, + clarity_tx.get_epoch(), + &tx, + ast_rules, + ) { + info!( + "Detected problematic tx {} while mining; dropping from mempool", + tx.txid() + ); + return TransactionResult::problematic(&tx, Error::NetError(e)); + } + let (fee, receipt) = match StacksChainState::process_transaction( + clarity_tx, tx, quiet, ast_rules, + ) { + Ok((fee, receipt)) => (fee, receipt), + Err(e) => { + let (is_problematic, e) = + TransactionResult::is_problematic(&tx, e, clarity_tx.get_epoch()); + if is_problematic { + return TransactionResult::problematic(&tx, e); + } else { + match e { + Error::CostOverflowError(cost_before, cost_after, total_budget) => { + clarity_tx.reset_cost(cost_before.clone()); + if total_budget.proportion_largest_dimension(&cost_before) + < TX_BLOCK_LIMIT_PROPORTION_HEURISTIC + { + warn!( + "Transaction {} consumed over {}% of block budget, marking as invalid; budget was {}", + tx.txid(), + 100 - TX_BLOCK_LIMIT_PROPORTION_HEURISTIC, + &total_budget + ); + return TransactionResult::error( + &tx, + Error::TransactionTooBigError, + ); + } else { + warn!( + "Transaction {} reached block cost {}; budget was {}", + tx.txid(), + &cost_after, + &total_budget + ); + return TransactionResult::skipped_due_to_error( + &tx, + Error::BlockTooBigError, + ); + } + } + _ => return TransactionResult::error(&tx, e), + } + } + } + }; + debug!( + "Include tx {} ({}) in microblock", + tx.txid(), + tx.payload.name() + ); + + // save + self.micro_txs.push(tx.clone()); + self.total_streamed_fees += fee; + + TransactionResult::success(&tx, fee, receipt) + }; + + self.bytes_so_far += tx_len; + result + } +} From cb316482f651deb21a9f17fe7561814c447840e3 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 2 Nov 2023 14:12:03 -0400 Subject: [PATCH 056/122] feat: Nakamoto Coinbase contains the VRF proof, so add variants for it and initial test coverage --- stackslib/src/chainstate/stacks/mod.rs | 43 +++++++++++++++++++++----- 1 file changed, 36 insertions(+), 7 deletions(-) diff --git a/stackslib/src/chainstate/stacks/mod.rs b/stackslib/src/chainstate/stacks/mod.rs index 282a229b39..26139e007c 100644 --- a/stackslib/src/chainstate/stacks/mod.rs +++ b/stackslib/src/chainstate/stacks/mod.rs @@ -635,7 +635,7 @@ pub enum TenureChangeCause { BlockFound = 0, /// No winning block-commits, extend current tenure NoBlockFound = 1, - /// A “null miner” won the block-commit (see the MEV solution below) + /// A null miner won the block-commit NullMiner = 2, } @@ -658,7 +658,13 @@ pub struct SchnorrThresholdSignature { //pub scalar: wsts::Scalar, } -/// Reasons why a `TenureChange` transaction can be de +impl SchnorrThresholdSignature { + pub fn empty() -> SchnorrThresholdSignature { + SchnorrThresholdSignature {} + } +} + +/// Reasons why a `TenureChange` transaction can be bad pub enum TenureChangeError { SignatureInvalid, /// Not signed by required threshold (>70%) @@ -675,7 +681,7 @@ pub struct TenureChangePayload { /// The StacksBlockId of the last block from the previous tenure pub previous_tenure_end: StacksBlockId, /// The number of blocks produced in the previous tenure - pub previous_tenure_blocks: u16, + pub previous_tenure_blocks: u32, /// A flag to indicate which of the following triggered the tenure change pub cause: TenureChangeCause, /// The ECDSA public key hash of the current tenure @@ -691,8 +697,9 @@ pub enum TransactionPayload { TokenTransfer(PrincipalData, u64, TokenTransferMemo), ContractCall(TransactionContractCall), SmartContract(TransactionSmartContract, Option), - PoisonMicroblock(StacksMicroblockHeader, StacksMicroblockHeader), // the previous epoch leader sent two microblocks with the same sequence, and this is proof - Coinbase(CoinbasePayload, Option), + // the previous epoch leader sent two microblocks with the same sequence, and this is proof + PoisonMicroblock(StacksMicroblockHeader, StacksMicroblockHeader), + Coinbase(CoinbasePayload, Option, Option), TenureChange(TenureChangePayload), } @@ -717,9 +724,12 @@ pub enum TransactionPayloadID { ContractCall = 2, PoisonMicroblock = 3, Coinbase = 4, + // has an alt principal, but no VRF proof CoinbaseToAltRecipient = 5, VersionedSmartContract = 6, TenureChange = 7, + // has a VRF proof, and may have an alt principal + NakamotoCoinbase = 8, } /// Encoding of an asset type identifier @@ -1242,6 +1252,8 @@ pub mod test { version: 1, bytes: Hash160([0xff; 20]), }; + let proof_bytes = hex_bytes("9275df67a68c8745c0ff97b48201ee6db447f7c93b23ae24cdc2400f52fdb08a1a6ac7ec71bf9c9c76e96ee4675ebff60625af28718501047bfd87b810c2d2139b73c23bd69de66360953a642c2a330a").unwrap(); + let proof = VRFProof::from_bytes(&proof_bytes[..].to_vec()).unwrap(); let tx_payloads = vec![ TransactionPayload::TokenTransfer( stx_address.into(), @@ -1286,18 +1298,35 @@ pub mod test { }, Some(ClarityVersion::Clarity2), ), - TransactionPayload::Coinbase(CoinbasePayload([0x12; 32]), None), + TransactionPayload::Coinbase(CoinbasePayload([0x12; 32]), None, None), + TransactionPayload::Coinbase( + CoinbasePayload([0x12; 32]), + Some(PrincipalData::Contract( + QualifiedContractIdentifier::transient(), + )), + None, + ), + TransactionPayload::Coinbase( + CoinbasePayload([0x12; 32]), + Some(PrincipalData::Standard(StandardPrincipalData( + 0x01, [0x02; 20], + ))), + None, + ), + TransactionPayload::Coinbase(CoinbasePayload([0x12; 32]), None, Some(proof.clone())), TransactionPayload::Coinbase( CoinbasePayload([0x12; 32]), Some(PrincipalData::Contract( QualifiedContractIdentifier::transient(), )), + Some(proof.clone()), ), TransactionPayload::Coinbase( CoinbasePayload([0x12; 32]), Some(PrincipalData::Standard(StandardPrincipalData( 0x01, [0x02; 20], ))), + Some(proof.clone()), ), TransactionPayload::PoisonMicroblock(mblock_header_1, mblock_header_2), ]; @@ -1357,7 +1386,7 @@ pub mod test { let mut tx_coinbase = StacksTransaction::new( TransactionVersion::Mainnet, origin_auth.clone(), - TransactionPayload::Coinbase(CoinbasePayload([0u8; 32]), None), + TransactionPayload::Coinbase(CoinbasePayload([0u8; 32]), None, None), ); tx_coinbase.anchor_mode = TransactionAnchorMode::OnChainOnly; From f85d80bd80002265662891d9ccd83f7bfaa16678 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 2 Nov 2023 14:12:30 -0400 Subject: [PATCH 057/122] chore: new coinbase variant --- stackslib/src/chainstate/stacks/tests/accounting.rs | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/stackslib/src/chainstate/stacks/tests/accounting.rs b/stackslib/src/chainstate/stacks/tests/accounting.rs index ecdc9294a9..af7adf1818 100644 --- a/stackslib/src/chainstate/stacks/tests/accounting.rs +++ b/stackslib/src/chainstate/stacks/tests/accounting.rs @@ -203,7 +203,7 @@ fn test_bad_microblock_fees_pre_v210() { let mut tx_coinbase = StacksTransaction::new( TransactionVersion::Testnet, TransactionAuth::from_p2pkh(&pk).unwrap(), - TransactionPayload::Coinbase(CoinbasePayload([0x00; 32]), None), + TransactionPayload::Coinbase(CoinbasePayload([0x00; 32]), None, None), ); tx_coinbase.chain_id = 0x80000000; tx_coinbase.anchor_mode = TransactionAnchorMode::OnChainOnly; @@ -524,7 +524,7 @@ fn test_bad_microblock_fees_fix_transition() { let mut tx_coinbase = StacksTransaction::new( TransactionVersion::Testnet, TransactionAuth::from_p2pkh(&pk).unwrap(), - TransactionPayload::Coinbase(CoinbasePayload([0x00; 32]), None), + TransactionPayload::Coinbase(CoinbasePayload([0x00; 32]), None, None), ); tx_coinbase.chain_id = 0x80000000; tx_coinbase.anchor_mode = TransactionAnchorMode::OnChainOnly; @@ -878,7 +878,7 @@ fn test_get_block_info_v210() { let mut tx_coinbase = StacksTransaction::new( TransactionVersion::Testnet, TransactionAuth::from_p2pkh(&pk).unwrap(), - TransactionPayload::Coinbase(CoinbasePayload([0x00; 32]), None), + TransactionPayload::Coinbase(CoinbasePayload([0x00; 32]), None, None), ); tx_coinbase.chain_id = 0x80000000; tx_coinbase.anchor_mode = TransactionAnchorMode::OnChainOnly; @@ -1248,7 +1248,7 @@ fn test_get_block_info_v210_no_microblocks() { let mut tx_coinbase = StacksTransaction::new( TransactionVersion::Testnet, TransactionAuth::from_p2pkh(&pk).unwrap(), - TransactionPayload::Coinbase(CoinbasePayload([0x00; 32]), None), + TransactionPayload::Coinbase(CoinbasePayload([0x00; 32]), None, None), ); tx_coinbase.chain_id = 0x80000000; tx_coinbase.anchor_mode = TransactionAnchorMode::OnChainOnly; @@ -1630,6 +1630,7 @@ fn test_coinbase_pay_to_alt_recipient_v210(pay_to_contract: bool) { } else { alt_recipient_id }, + None, ), ); tx_coinbase.chain_id = 0x80000000; From 89b4e5e12987eed6b0ebd73bb5ce09587e431d64 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 2 Nov 2023 14:12:49 -0400 Subject: [PATCH 058/122] chore: v3 unlock height --- stackslib/src/chainstate/stacks/tests/block_construction.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/stackslib/src/chainstate/stacks/tests/block_construction.rs b/stackslib/src/chainstate/stacks/tests/block_construction.rs index 09db0e33aa..b6c0cebf76 100644 --- a/stackslib/src/chainstate/stacks/tests/block_construction.rs +++ b/stackslib/src/chainstate/stacks/tests/block_construction.rs @@ -4366,9 +4366,10 @@ fn mempool_incorporate_pox_unlocks() { let burn_block_height = db.get_current_burnchain_block_height() as u64; let v1_unlock_height = db.get_v1_unlock_height(); let v2_unlock_height = db.get_v2_unlock_height(); + let v3_unlock_height = db.get_v3_unlock_height(); let balance = db.get_account_stx_balance(&principal); info!("Checking balance"; "v1_unlock_height" => v1_unlock_height, "burn_block_height" => burn_block_height); - balance.get_available_balance_at_burn_block(burn_block_height, v1_unlock_height, v2_unlock_height) + balance.get_available_balance_at_burn_block(burn_block_height, v1_unlock_height, v2_unlock_height, v3_unlock_height) }) }).unwrap(); From 33553474ececd5b6d0fcde5ac6d179da61b265a2 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 2 Nov 2023 14:13:05 -0400 Subject: [PATCH 059/122] chore: add nakamoto state to TestStacksNode --- stackslib/src/chainstate/stacks/tests/mod.rs | 26 +++++++++++++++----- 1 file changed, 20 insertions(+), 6 deletions(-) diff --git a/stackslib/src/chainstate/stacks/tests/mod.rs b/stackslib/src/chainstate/stacks/tests/mod.rs index af56623f43..7d72951114 100644 --- a/stackslib/src/chainstate/stacks/tests/mod.rs +++ b/stackslib/src/chainstate/stacks/tests/mod.rs @@ -42,6 +42,7 @@ use crate::chainstate::burn::operations::{ }; use crate::chainstate::burn::*; use crate::chainstate::coordinator::Error as CoordinatorError; +use crate::chainstate::nakamoto::NakamotoBlock; use crate::chainstate::stacks::db::blocks::test::store_staging_block; use crate::chainstate::stacks::db::test::*; use crate::chainstate::stacks::db::*; @@ -270,7 +271,9 @@ pub struct TestStacksNode { pub key_ops: HashMap, // map VRF public keys to their locations in the prev_keys array pub anchored_blocks: Vec, pub microblocks: Vec>, + pub nakamoto_blocks: Vec>, pub commit_ops: HashMap, + pub nakamoto_commit_ops: HashMap, pub test_name: String, forkable: bool, } @@ -295,7 +298,9 @@ impl TestStacksNode { key_ops: HashMap::new(), anchored_blocks: vec![], microblocks: vec![], + nakamoto_blocks: vec![], commit_ops: HashMap::new(), + nakamoto_commit_ops: HashMap::new(), test_name: test_name.to_string(), forkable: true, } @@ -309,7 +314,9 @@ impl TestStacksNode { key_ops: HashMap::new(), anchored_blocks: vec![], microblocks: vec![], + nakamoto_blocks: vec![], commit_ops: HashMap::new(), + nakamoto_commit_ops: HashMap::new(), test_name: test_name.to_string(), forkable: true, } @@ -322,7 +329,9 @@ impl TestStacksNode { key_ops: HashMap::new(), anchored_blocks: vec![], microblocks: vec![], + nakamoto_blocks: vec![], commit_ops: HashMap::new(), + nakamoto_commit_ops: HashMap::new(), test_name: "".to_string(), forkable: false, } @@ -357,7 +366,9 @@ impl TestStacksNode { key_ops: self.key_ops.clone(), anchored_blocks: self.anchored_blocks.clone(), microblocks: self.microblocks.clone(), + nakamoto_blocks: self.nakamoto_blocks.clone(), commit_ops: self.commit_ops.clone(), + nakamoto_commit_ops: self.nakamoto_commit_ops.clone(), test_name: new_test_name.to_string(), forkable: true, } @@ -416,7 +427,7 @@ impl TestStacksNode { }; block_commit_op } - + pub fn get_last_key(&self, miner: &TestMiner) -> LeaderKeyRegisterOp { let last_vrf_pubkey = miner.last_VRF_public_key().unwrap(); let idx = *self.key_ops.get(&last_vrf_pubkey).unwrap(); @@ -434,7 +445,7 @@ impl TestStacksNode { } } } - + pub fn get_last_accepted_anchored_block( &self, sortdb: &SortitionDB, @@ -479,7 +490,7 @@ impl TestStacksNode { } return None; } - + pub fn get_microblock_stream( &self, miner: &TestMiner, @@ -497,7 +508,7 @@ impl TestStacksNode { Some(idx) => Some(self.anchored_blocks[*idx].clone()), } } - + pub fn get_last_winning_snapshot( ic: &SortitionDBConn, fork_tip: &BlockSnapshot, @@ -573,7 +584,9 @@ impl TestStacksNode { ); block_commit_op } - + + /// Mine a single Stacks block and a microblock stream. + /// Produce its block-commit. pub fn mine_stacks_block( &mut self, sortdb: &SortitionDB, @@ -1022,6 +1035,7 @@ pub fn make_coinbase_with_nonce( TransactionPayload::Coinbase( CoinbasePayload([(burnchain_height % 256) as u8; 32]), recipient, + None ), ); tx_coinbase.chain_id = 0x80000000; @@ -1246,7 +1260,7 @@ pub fn make_user_stacks_transfer( } pub fn make_user_coinbase(sender: &StacksPrivateKey, nonce: u64, tx_fee: u64) -> StacksTransaction { - let payload = TransactionPayload::Coinbase(CoinbasePayload([0; 32]), None); + let payload = TransactionPayload::Coinbase(CoinbasePayload([0; 32]), None, None); sign_standard_singlesig_tx(payload.into(), sender, nonce, tx_fee) } From ed1b62bc35d29215957797445b103e134a81e165 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 2 Nov 2023 14:13:31 -0400 Subject: [PATCH 060/122] feat: add new codec for Nakamoto coinbase --- .../src/chainstate/stacks/transaction.rs | 70 +++++++++++++++---- 1 file changed, 57 insertions(+), 13 deletions(-) diff --git a/stackslib/src/chainstate/stacks/transaction.rs b/stackslib/src/chainstate/stacks/transaction.rs index cc2ce7598b..0bd299c6c3 100644 --- a/stackslib/src/chainstate/stacks/transaction.rs +++ b/stackslib/src/chainstate/stacks/transaction.rs @@ -219,18 +219,32 @@ impl StacksMessageCodec for TransactionPayload { h1.consensus_serialize(fd)?; h2.consensus_serialize(fd)?; } - TransactionPayload::Coinbase(buf, recipient_opt) => { - match recipient_opt { - None => { + TransactionPayload::Coinbase(buf, recipient_opt, vrf_opt) => { + match (recipient_opt, vrf_opt) { + (None, None) => { // stacks 2.05 and earlier only use this path write_next(fd, &(TransactionPayloadID::Coinbase as u8))?; write_next(fd, buf)?; } - Some(recipient) => { + (Some(recipient), None) => { write_next(fd, &(TransactionPayloadID::CoinbaseToAltRecipient as u8))?; write_next(fd, buf)?; write_next(fd, &Value::Principal(recipient.clone()))?; } + (None, Some(vrf_proof)) => { + // nakamoto coinbase + // encode principal as (optional principal) + write_next(fd, &(TransactionPayloadID::NakamotoCoinbase as u8))?; + write_next(fd, buf)?; + write_next(fd, &Value::none())?; + write_next(fd, &vrf_proof.to_bytes().to_vec())?; + } + (Some(recipient), Some(vrf_proof)) => { + write_next(fd, &(TransactionPayloadID::NakamotoCoinbase as u8))?; + write_next(fd, buf)?; + write_next(fd, &Value::some(Value::Principal(recipient.clone())).expect("FATAL: failed to encode recipient principal as `optional`"))?; + write_next(fd, &vrf_proof.to_bytes().to_vec())?; + } } } TransactionPayload::TenureChange(tc) => { @@ -286,7 +300,7 @@ impl StacksMessageCodec for TransactionPayload { } x if x == TransactionPayloadID::Coinbase as u8 => { let payload: CoinbasePayload = read_next(fd)?; - TransactionPayload::Coinbase(payload, None) + TransactionPayload::Coinbase(payload, None, None) } x if x == TransactionPayloadID::CoinbaseToAltRecipient as u8 => { let payload: CoinbasePayload = read_next(fd)?; @@ -298,7 +312,37 @@ impl StacksMessageCodec for TransactionPayload { } }; - TransactionPayload::Coinbase(payload, Some(recipient)) + TransactionPayload::Coinbase(payload, Some(recipient), None) + } + x if x == TransactionPayloadID::TenureChange as u8 => { + let payload : TenureChangePayload = read_next(fd)?; + TransactionPayload::TenureChange(payload) + } + // TODO: gate this! + x if x == TransactionPayloadID::NakamotoCoinbase as u8 => { + let payload: CoinbasePayload = read_next(fd)?; + let principal_value_opt: Value = read_next(fd)?; + let recipient_opt = if let Value::Optional(optional_data) = principal_value_opt { + if let Some(principal_value) = optional_data.data { + if let Value::Principal(recipient_principal) = *principal_value { + Some(recipient_principal) + } + else { + None + } + } + else { + None + } + } + else { + return Err(codec_error::DeserializeError("Failed to parse nakamoto coinbase transaction -- did not receive an optional recipient principal value".to_string())); + }; + let vrf_proof_bytes: Vec = read_next(fd)?; + let Some(vrf_proof) = VRFProof::from_bytes(&vrf_proof_bytes) else { + return Err(codec_error::DeserializeError("Failed to decode coinbase VRF proof".to_string())); + }; + TransactionPayload::Coinbase(payload, recipient_opt, Some(vrf_proof)) } _ => { return Err(codec_error::DeserializeError(format!( @@ -620,10 +664,10 @@ impl StacksTransaction { } /// Try to convert to a coinbase payload - pub fn try_as_coinbase(&self) -> Option<(&CoinbasePayload, Option<&PrincipalData>)> { + pub fn try_as_coinbase(&self) -> Option<(&CoinbasePayload, Option<&PrincipalData>, Option<&VRFProof>)> { match &self.payload { - TransactionPayload::Coinbase(ref payload, ref recipient_opt) => { - Some((payload, recipient_opt.as_ref())) + TransactionPayload::Coinbase(ref payload, ref recipient_opt, ref vrf_proof_opt) => { + Some((payload, recipient_opt.as_ref(), vrf_proof_opt.as_ref())) } _ => None, } @@ -1590,12 +1634,12 @@ mod test { corrupt_h2.sequence += 1; TransactionPayload::PoisonMicroblock(corrupt_h1, corrupt_h2) } - TransactionPayload::Coinbase(ref buf, ref recipient_opt) => { + TransactionPayload::Coinbase(ref buf, ref recipient_opt, ref vrf_proof_opt) => { let mut corrupt_buf_bytes = buf.as_bytes().clone(); corrupt_buf_bytes[0] = (((corrupt_buf_bytes[0] as u16) + 1) % 256) as u8; let corrupt_buf = CoinbasePayload(corrupt_buf_bytes); - TransactionPayload::Coinbase(corrupt_buf, recipient_opt.clone()) + TransactionPayload::Coinbase(corrupt_buf, recipient_opt.clone(), vrf_proof_opt.clone()) } TransactionPayload::TenureChange(_) => todo!(), }; @@ -1833,7 +1877,7 @@ mod test { #[test] fn tx_stacks_transaction_payload_coinbase() { - let coinbase_payload = TransactionPayload::Coinbase(CoinbasePayload([0x12; 32]), None); + let coinbase_payload = TransactionPayload::Coinbase(CoinbasePayload([0x12; 32]), None, None); let coinbase_payload_bytes = vec![ // payload type ID TransactionPayloadID::Coinbase as u8, @@ -3334,7 +3378,7 @@ mod test { let tx_coinbase = StacksTransaction::new( TransactionVersion::Mainnet, auth.clone(), - TransactionPayload::Coinbase(CoinbasePayload([0u8; 32]), None), + TransactionPayload::Coinbase(CoinbasePayload([0u8; 32]), None, None), ); let tx_stx = StacksTransaction::new( From 664e09e6988b66f38184b0176093c07e3720fc3f Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 2 Nov 2023 14:13:49 -0400 Subject: [PATCH 061/122] feat: add epoch 2.5 / pox-4 activation --- stackslib/src/clarity_vm/clarity.rs | 175 +++++++++++++++++++++++++++- 1 file changed, 174 insertions(+), 1 deletion(-) diff --git a/stackslib/src/clarity_vm/clarity.rs b/stackslib/src/clarity_vm/clarity.rs index aa8a505229..899c09cf29 100644 --- a/stackslib/src/clarity_vm/clarity.rs +++ b/stackslib/src/clarity_vm/clarity.rs @@ -49,10 +49,12 @@ use crate::chainstate::stacks::boot::POX_2_MAINNET_CODE; use crate::chainstate::stacks::boot::POX_2_TESTNET_CODE; use crate::chainstate::stacks::boot::POX_3_MAINNET_CODE; use crate::chainstate::stacks::boot::POX_3_TESTNET_CODE; +use crate::chainstate::stacks::boot::POX_4_MAINNET_CODE; +use crate::chainstate::stacks::boot::POX_4_TESTNET_CODE; use crate::chainstate::stacks::boot::{ BOOT_CODE_COSTS, BOOT_CODE_COSTS_2, BOOT_CODE_COSTS_3, BOOT_CODE_COST_VOTING_TESTNET as BOOT_CODE_COST_VOTING, BOOT_CODE_POX_TESTNET, COSTS_2_NAME, - COSTS_3_NAME, POX_2_NAME, POX_3_NAME, + COSTS_3_NAME, POX_2_NAME, POX_3_NAME, POX_4_NAME, }; use crate::chainstate::stacks::db::StacksAccount; use crate::chainstate::stacks::db::StacksChainState; @@ -1276,6 +1278,169 @@ impl<'a, 'b> ClarityBlockConnection<'a, 'b> { (old_cost_tracker, Ok(vec![pox_3_initialization_receipt])) }) } + + pub fn initialize_epoch_2_5(&mut self) -> Result, Error> { + // use the `using!` statement to ensure that the old cost_tracker is placed + // back in all branches after initialization + using!(self.cost_track, "cost tracker", |old_cost_tracker| { + // epoch initialization is *free*. + // NOTE: this also means that cost functions won't be evaluated. + self.cost_track.replace(LimitedCostTracker::new_free()); + self.epoch = StacksEpochId::Epoch25; + self.as_transaction(|tx_conn| { + // bump the epoch in the Clarity DB + tx_conn + .with_clarity_db(|db| { + db.set_clarity_epoch_version(StacksEpochId::Epoch25); + Ok(()) + }) + .unwrap(); + + // require 3.0 rules henceforth in this connection as well + tx_conn.epoch = StacksEpochId::Epoch25; + }); + + /////////////////// .pox-4 //////////////////////// + let mainnet = self.mainnet; + let first_block_height = self.burn_state_db.get_burn_start_height(); + let pox_prepare_length = self.burn_state_db.get_pox_prepare_length(); + let pox_reward_cycle_length = self.burn_state_db.get_pox_reward_cycle_length(); + let pox_rejection_fraction = self.burn_state_db.get_pox_rejection_fraction(); + let pox_4_activation_height = self.burn_state_db.get_pox_4_activation_height(); + + let pox_4_first_cycle = PoxConstants::static_block_height_to_reward_cycle( + pox_4_activation_height as u64, + first_block_height as u64, + pox_reward_cycle_length as u64, + ) + .expect("PANIC: PoX-4 first reward cycle begins *before* first burn block height") + + 1; + + // get tx_version & boot code account information for pox-3 contract init + let tx_version = if mainnet { + TransactionVersion::Mainnet + } else { + TransactionVersion::Testnet + }; + + let boot_code_address = boot_code_addr(mainnet); + + let boot_code_auth = TransactionAuth::Standard( + TransactionSpendingCondition::Singlesig(SinglesigSpendingCondition { + signer: boot_code_address.bytes.clone(), + hash_mode: SinglesigHashMode::P2PKH, + key_encoding: TransactionPublicKeyEncoding::Uncompressed, + nonce: 0, + tx_fee: 0, + signature: MessageSignature::empty(), + }), + ); + + let boot_code_nonce = self.with_clarity_db_readonly(|db| { + db.get_account_nonce(&boot_code_address.clone().into()) + }); + + let boot_code_account = StacksAccount { + principal: PrincipalData::Standard(boot_code_address.into()), + nonce: boot_code_nonce, + stx_balance: STXBalance::zero(), + }; + + let pox_4_code = if mainnet { + &*POX_4_MAINNET_CODE + } else { + &*POX_4_TESTNET_CODE + }; + + let pox_4_contract_id = boot_code_id(POX_4_NAME, mainnet); + + let payload = TransactionPayload::SmartContract( + TransactionSmartContract { + name: ContractName::try_from(POX_4_NAME) + .expect("FATAL: invalid boot-code contract name"), + code_body: StacksString::from_str(pox_4_code) + .expect("FATAL: invalid boot code body"), + }, + Some(ClarityVersion::Clarity2), + ); + + let pox_4_contract_tx = + StacksTransaction::new(tx_version.clone(), boot_code_auth.clone(), payload); + + let pox_4_initialization_receipt = self.as_transaction(|tx_conn| { + // initialize with a synthetic transaction + debug!("Instantiate {} contract", &pox_4_contract_id); + let receipt = StacksChainState::process_transaction_payload( + tx_conn, + &pox_4_contract_tx, + &boot_code_account, + ASTRules::PrecheckSize, + ) + .expect("FATAL: Failed to process PoX 3 contract initialization"); + + // set burnchain params + let consts_setter = PrincipalData::from(pox_4_contract_id.clone()); + let params = vec![ + Value::UInt(first_block_height as u128), + Value::UInt(pox_prepare_length as u128), + Value::UInt(pox_reward_cycle_length as u128), + Value::UInt(pox_rejection_fraction as u128), + Value::UInt(pox_4_first_cycle as u128), + ]; + + let (_, _, _burnchain_params_events) = tx_conn + .run_contract_call( + &consts_setter, + None, + &pox_4_contract_id, + "set-burnchain-parameters", + ¶ms, + |_, _| false, + ) + .expect("Failed to set burnchain parameters in PoX-3 contract"); + + receipt + }); + + if pox_4_initialization_receipt.result != Value::okay_true() + || pox_4_initialization_receipt.post_condition_aborted + { + panic!( + "FATAL: Failure processing PoX 4 contract initialization: {:#?}", + &pox_4_initialization_receipt + ); + } + + debug!("Epoch 2.5 initialized"); + (old_cost_tracker, Ok(vec![pox_4_initialization_receipt])) + }) + } + + pub fn initialize_epoch_3_0(&mut self) -> Result, Error> { + // use the `using!` statement to ensure that the old cost_tracker is placed + // back in all branches after initialization + using!(self.cost_track, "cost tracker", |old_cost_tracker| { + // epoch initialization is *free*. + // NOTE: this also means that cost functions won't be evaluated. + self.cost_track.replace(LimitedCostTracker::new_free()); + self.epoch = StacksEpochId::Epoch30; + self.as_transaction(|tx_conn| { + // bump the epoch in the Clarity DB + tx_conn + .with_clarity_db(|db| { + db.set_clarity_epoch_version(StacksEpochId::Epoch30); + Ok(()) + }) + .unwrap(); + + // require 3.0 rules henceforth in this connection as well + tx_conn.epoch = StacksEpochId::Epoch30; + }); + + debug!("Epoch 3.0 initialized"); + (old_cost_tracker, Ok(vec![])) + }) + } pub fn start_transaction_processing<'c>(&'c mut self) -> ClarityTransactionConnection<'c, 'a> { let store = &mut self.datastore; @@ -2444,6 +2609,10 @@ mod tests { fn get_v2_unlock_height(&self) -> u32 { u32::MAX } + + fn get_v3_unlock_height(&self) -> u32 { + u32::MAX + } fn get_v1_unlock_height(&self) -> u32 { u32::MAX @@ -2452,6 +2621,10 @@ mod tests { fn get_pox_3_activation_height(&self) -> u32 { u32::MAX } + + fn get_pox_4_activation_height(&self) -> u32 { + u32::MAX + } fn get_pox_prepare_length(&self) -> u32 { panic!("BlockLimitBurnStateDB should not return PoX info"); From 24e19f5c2f26980156c12e7474099ee3338d29e7 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 2 Nov 2023 14:14:03 -0400 Subject: [PATCH 062/122] chore: add v3 unlock and pox-4 activation height --- stackslib/src/clarity_vm/database/mod.rs | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/stackslib/src/clarity_vm/database/mod.rs b/stackslib/src/clarity_vm/database/mod.rs index 9927760cd9..86e0dd9ef2 100644 --- a/stackslib/src/clarity_vm/database/mod.rs +++ b/stackslib/src/clarity_vm/database/mod.rs @@ -501,10 +501,18 @@ impl BurnStateDB for SortitionHandleTx<'_> { fn get_v2_unlock_height(&self) -> u32 { self.context.pox_constants.v2_unlock_height } + + fn get_v3_unlock_height(&self) -> u32 { + self.context.pox_constants.v3_unlock_height + } fn get_pox_3_activation_height(&self) -> u32 { self.context.pox_constants.pox_3_activation_height } + + fn get_pox_4_activation_height(&self) -> u32 { + self.context.pox_constants.pox_4_activation_height + } fn get_pox_prepare_length(&self) -> u32 { self.context.pox_constants.prepare_length @@ -620,10 +628,18 @@ impl BurnStateDB for SortitionDBConn<'_> { fn get_v2_unlock_height(&self) -> u32 { self.context.pox_constants.v2_unlock_height } + + fn get_v3_unlock_height(&self) -> u32 { + self.context.pox_constants.v3_unlock_height + } fn get_pox_3_activation_height(&self) -> u32 { self.context.pox_constants.pox_3_activation_height } + + fn get_pox_4_activation_height(&self) -> u32 { + self.context.pox_constants.pox_4_activation_height + } fn get_pox_prepare_length(&self) -> u32 { self.context.pox_constants.prepare_length From 49b44e7de7d04dc606a6ff2782adf4c391047ae6 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 2 Nov 2023 14:14:17 -0400 Subject: [PATCH 063/122] feat: mined_naakmoto_block_event trait --- stackslib/src/core/mempool.rs | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/stackslib/src/core/mempool.rs b/stackslib/src/core/mempool.rs index fcb87215d5..5000d7a254 100644 --- a/stackslib/src/core/mempool.rs +++ b/stackslib/src/core/mempool.rs @@ -45,6 +45,7 @@ use stacks_common::util::hash::Sha512Trunc256Sum; use crate::burnchains::Txid; use crate::chainstate::burn::db::sortdb::SortitionDB; use crate::chainstate::burn::ConsensusHash; +use crate::chainstate::nakamoto::NakamotoBlock; use crate::chainstate::stacks::events::StacksTransactionReceipt; use crate::chainstate::stacks::miner::TransactionEvent; use crate::chainstate::stacks::StacksBlock; @@ -241,6 +242,14 @@ pub trait MemPoolEventDispatcher { anchor_block_consensus_hash: ConsensusHash, anchor_block: BlockHeaderHash, ); + fn mined_nakamoto_block_event( + &self, + target_burn_height: u64, + block: &NakamotoBlock, + block_size_bytes: u64, + consumed: &ExecutionCost, + tx_results: Vec, + ); } #[derive(Debug, PartialEq, Clone)] From 32e69a3a6a4c69080936d30268042eb345e136d0 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 2 Nov 2023 14:14:32 -0400 Subject: [PATCH 064/122] feat: epoch 2.5 definitions --- stackslib/src/core/mod.rs | 404 +++++++++++++++++++++++++++++++++++++- 1 file changed, 395 insertions(+), 9 deletions(-) diff --git a/stackslib/src/core/mod.rs b/stackslib/src/core/mod.rs index 59b184cd4b..80e7806af7 100644 --- a/stackslib/src/core/mod.rs +++ b/stackslib/src/core/mod.rs @@ -62,10 +62,12 @@ pub const PEER_VERSION_EPOCH_2_1: u8 = 0x06; pub const PEER_VERSION_EPOCH_2_2: u8 = 0x07; pub const PEER_VERSION_EPOCH_2_3: u8 = 0x08; pub const PEER_VERSION_EPOCH_2_4: u8 = 0x09; +pub const PEER_VERSION_EPOCH_2_5: u8 = 0x0a; +pub const PEER_VERSION_EPOCH_3_0: u8 = 0x0b; // this should be updated to the latest network epoch version supported by // this node. this will be checked by the `validate_epochs()` method. -pub const PEER_NETWORK_EPOCH: u32 = PEER_VERSION_EPOCH_2_4 as u32; +pub const PEER_NETWORK_EPOCH: u32 = PEER_VERSION_EPOCH_3_0 as u32; // set the fourth byte of the peer version pub const PEER_VERSION_MAINNET: u32 = PEER_VERSION_MAINNET_MAJOR | PEER_NETWORK_EPOCH; @@ -124,6 +126,10 @@ pub const BITCOIN_MAINNET_STACKS_22_BURN_HEIGHT: u64 = 787_651; pub const BITCOIN_MAINNET_STACKS_23_BURN_HEIGHT: u64 = 788_240; /// This is Epoch-2.3, now Epoch-2.4, activation height proposed in SIP-024 pub const BITCOIN_MAINNET_STACKS_24_BURN_HEIGHT: u64 = 791_551; +/// This is Epoch-2.5, activation height proposed in SIP-021 +pub const BITCOIN_MAINNET_STACKS_25_BURN_HEIGHT: u64 = 1_000_000; +/// This is Epoch-3.0, activation height proposed in SIP-021 +pub const BITCOIN_MAINNET_STACKS_30_BURN_HEIGHT: u64 = 2_000_000; pub const BITCOIN_TESTNET_FIRST_BLOCK_HEIGHT: u64 = 2000000; pub const BITCOIN_TESTNET_FIRST_BLOCK_TIMESTAMP: u32 = 1622691840; @@ -134,6 +140,8 @@ pub const BITCOIN_TESTNET_STACKS_21_BURN_HEIGHT: u64 = 2_422_101; pub const BITCOIN_TESTNET_STACKS_22_BURN_HEIGHT: u64 = 2_431_300; pub const BITCOIN_TESTNET_STACKS_23_BURN_HEIGHT: u64 = 2_431_633; pub const BITCOIN_TESTNET_STACKS_24_BURN_HEIGHT: u64 = 2_432_545; +pub const BITCOIN_TESTNET_STACKS_25_BURN_HEIGHT: u64 = 20_000_000; +pub const BITCOIN_TESTNET_STACKS_30_BURN_HEIGHT: u64 = 30_000_000; pub const BITCOIN_REGTEST_FIRST_BLOCK_HEIGHT: u64 = 0; pub const BITCOIN_REGTEST_FIRST_BLOCK_TIMESTAMP: u32 = 0; @@ -183,6 +191,11 @@ pub const POX_V2_MAINNET_EARLY_UNLOCK_HEIGHT: u32 = pub const POX_V2_TESTNET_EARLY_UNLOCK_HEIGHT: u32 = (BITCOIN_TESTNET_STACKS_22_BURN_HEIGHT as u32) + 1; +pub const POX_V3_MAINNET_EARLY_UNLOCK_HEIGHT: u32 = + (BITCOIN_MAINNET_STACKS_24_BURN_HEIGHT as u32) + 1; +pub const POX_V3_TESTNET_EARLY_UNLOCK_HEIGHT: u32 = + (BITCOIN_TESTNET_STACKS_24_BURN_HEIGHT as u32) + 1; + /// Burn block height at which the ASTRules::PrecheckSize becomes the default behavior on mainnet pub const AST_RULES_PRECHECK_SIZE: u64 = 752000; // on or about Aug 30 2022 @@ -247,7 +260,7 @@ pub fn check_fault_injection(fault_name: &str) -> bool { } lazy_static! { - pub static ref STACKS_EPOCHS_MAINNET: [StacksEpoch; 7] = [ + pub static ref STACKS_EPOCHS_MAINNET: [StacksEpoch; 9] = [ StacksEpoch { epoch_id: StacksEpochId::Epoch10, start_height: 0, @@ -293,15 +306,29 @@ lazy_static! { StacksEpoch { epoch_id: StacksEpochId::Epoch24, start_height: BITCOIN_MAINNET_STACKS_24_BURN_HEIGHT, - end_height: STACKS_EPOCH_MAX, + end_height: BITCOIN_MAINNET_STACKS_25_BURN_HEIGHT, block_limit: BLOCK_LIMIT_MAINNET_21.clone(), network_epoch: PEER_VERSION_EPOCH_2_4 }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch25, + start_height: BITCOIN_MAINNET_STACKS_25_BURN_HEIGHT, + end_height: BITCOIN_MAINNET_STACKS_30_BURN_HEIGHT, + block_limit: BLOCK_LIMIT_MAINNET_21.clone(), + network_epoch: PEER_VERSION_EPOCH_2_5 + }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch30, + start_height: BITCOIN_MAINNET_STACKS_30_BURN_HEIGHT, + end_height: STACKS_EPOCH_MAX, + block_limit: BLOCK_LIMIT_MAINNET_21.clone(), + network_epoch: PEER_VERSION_EPOCH_3_0 + }, ]; } lazy_static! { - pub static ref STACKS_EPOCHS_TESTNET: [StacksEpoch; 7] = [ + pub static ref STACKS_EPOCHS_TESTNET: [StacksEpoch; 9] = [ StacksEpoch { epoch_id: StacksEpochId::Epoch10, start_height: 0, @@ -347,15 +374,30 @@ lazy_static! { StacksEpoch { epoch_id: StacksEpochId::Epoch24, start_height: BITCOIN_TESTNET_STACKS_24_BURN_HEIGHT, - end_height: STACKS_EPOCH_MAX, + end_height: BITCOIN_TESTNET_STACKS_25_BURN_HEIGHT, block_limit: BLOCK_LIMIT_MAINNET_21.clone(), network_epoch: PEER_VERSION_EPOCH_2_4 }, + StacksEpoch { + + epoch_id: StacksEpochId::Epoch25, + start_height: BITCOIN_MAINNET_STACKS_25_BURN_HEIGHT, + end_height: BITCOIN_MAINNET_STACKS_30_BURN_HEIGHT, + block_limit: BLOCK_LIMIT_MAINNET_21.clone(), + network_epoch: PEER_VERSION_EPOCH_2_5 + }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch30, + start_height: BITCOIN_MAINNET_STACKS_30_BURN_HEIGHT, + end_height: STACKS_EPOCH_MAX, + block_limit: BLOCK_LIMIT_MAINNET_21.clone(), + network_epoch: PEER_VERSION_EPOCH_3_0 + }, ]; } lazy_static! { - pub static ref STACKS_EPOCHS_REGTEST: [StacksEpoch; 7] = [ + pub static ref STACKS_EPOCHS_REGTEST: [StacksEpoch; 9] = [ StacksEpoch { epoch_id: StacksEpochId::Epoch10, start_height: 0, @@ -401,10 +443,24 @@ lazy_static! { StacksEpoch { epoch_id: StacksEpochId::Epoch24, start_height: 5000, - end_height: STACKS_EPOCH_MAX, + end_height: 6000, block_limit: HELIUM_BLOCK_LIMIT_20.clone(), network_epoch: PEER_VERSION_EPOCH_2_4 }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch25, + start_height: 6000, + end_height: 7000, + block_limit: BLOCK_LIMIT_MAINNET_21.clone(), + network_epoch: PEER_VERSION_EPOCH_2_5 + }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch30, + start_height: 7000, + end_height: STACKS_EPOCH_MAX, + block_limit: BLOCK_LIMIT_MAINNET_21.clone(), + network_epoch: PEER_VERSION_EPOCH_3_0 + }, ]; } @@ -428,9 +484,13 @@ pub static STACKS_EPOCH_2_3_MARKER: u8 = 0x08; /// *or greater*. pub static STACKS_EPOCH_2_4_MARKER: u8 = 0x09; +/// Stacks 2.5 epoch marker. All block-commits in 2.5 must have a memo bitfield with this value +/// *or greater*. +pub static STACKS_EPOCH_2_5_MARKER: u8 = 0x0a; + /// Stacks 3.0 epoch marker. All block-commits in 2.4 must have a memo bitfield with this value /// *or greater*. -pub static STACKS_EPOCH_3_0_MARKER: u8 = 0x09; +pub static STACKS_EPOCH_3_0_MARKER: u8 = 0x0b; #[test] fn test_ord_for_stacks_epoch() { @@ -453,6 +513,32 @@ fn test_ord_for_stacks_epoch() { assert_eq!(epochs[4].cmp(&epochs[1]), Ordering::Greater); assert_eq!(epochs[4].cmp(&epochs[2]), Ordering::Greater); assert_eq!(epochs[4].cmp(&epochs[3]), Ordering::Greater); + assert_eq!(epochs[5].cmp(&epochs[0]), Ordering::Greater); + assert_eq!(epochs[5].cmp(&epochs[1]), Ordering::Greater); + assert_eq!(epochs[5].cmp(&epochs[2]), Ordering::Greater); + assert_eq!(epochs[5].cmp(&epochs[3]), Ordering::Greater); + assert_eq!(epochs[5].cmp(&epochs[4]), Ordering::Greater); + assert_eq!(epochs[6].cmp(&epochs[0]), Ordering::Greater); + assert_eq!(epochs[6].cmp(&epochs[1]), Ordering::Greater); + assert_eq!(epochs[6].cmp(&epochs[2]), Ordering::Greater); + assert_eq!(epochs[6].cmp(&epochs[3]), Ordering::Greater); + assert_eq!(epochs[6].cmp(&epochs[4]), Ordering::Greater); + assert_eq!(epochs[6].cmp(&epochs[5]), Ordering::Greater); + assert_eq!(epochs[7].cmp(&epochs[0]), Ordering::Greater); + assert_eq!(epochs[7].cmp(&epochs[1]), Ordering::Greater); + assert_eq!(epochs[7].cmp(&epochs[2]), Ordering::Greater); + assert_eq!(epochs[7].cmp(&epochs[3]), Ordering::Greater); + assert_eq!(epochs[7].cmp(&epochs[4]), Ordering::Greater); + assert_eq!(epochs[7].cmp(&epochs[5]), Ordering::Greater); + assert_eq!(epochs[7].cmp(&epochs[6]), Ordering::Greater); + assert_eq!(epochs[8].cmp(&epochs[0]), Ordering::Greater); + assert_eq!(epochs[8].cmp(&epochs[1]), Ordering::Greater); + assert_eq!(epochs[8].cmp(&epochs[2]), Ordering::Greater); + assert_eq!(epochs[8].cmp(&epochs[3]), Ordering::Greater); + assert_eq!(epochs[8].cmp(&epochs[4]), Ordering::Greater); + assert_eq!(epochs[8].cmp(&epochs[5]), Ordering::Greater); + assert_eq!(epochs[8].cmp(&epochs[6]), Ordering::Greater); + assert_eq!(epochs[8].cmp(&epochs[7]), Ordering::Greater); } #[test] @@ -512,7 +598,13 @@ pub trait StacksEpochExtension { #[cfg(test)] fn unit_test_2_4(epoch_2_0_block_height: u64) -> Vec; #[cfg(test)] + fn unit_test_2_5(epoch_2_0_block_height: u64) -> Vec; + #[cfg(test)] + fn unit_test_3_0(epoch_2_0_block_height: u64) -> Vec; + #[cfg(test)] fn unit_test_2_1_only(epoch_2_0_block_height: u64) -> Vec; + #[cfg(test)] + fn unit_test_3_0_only(first_burnchain_height: u64) -> Vec; fn all( epoch_2_0_block_height: u64, epoch_2_05_block_height: u64, @@ -904,6 +996,225 @@ impl StacksEpochExtension for StacksEpoch { }, ] } + + #[cfg(test)] + fn unit_test_2_5(first_burnchain_height: u64) -> Vec { + info!( + "StacksEpoch unit_test_2_5 first_burn_height = {}", + first_burnchain_height + ); + + vec![ + StacksEpoch { + epoch_id: StacksEpochId::Epoch10, + start_height: 0, + end_height: first_burnchain_height, + block_limit: ExecutionCost::max_value(), + network_epoch: PEER_VERSION_EPOCH_1_0, + }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch20, + start_height: first_burnchain_height, + end_height: first_burnchain_height + 4, + block_limit: ExecutionCost::max_value(), + network_epoch: PEER_VERSION_EPOCH_2_0, + }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch2_05, + start_height: first_burnchain_height + 4, + end_height: first_burnchain_height + 8, + block_limit: ExecutionCost { + write_length: 205205, + write_count: 205205, + read_length: 205205, + read_count: 205205, + runtime: 205205, + }, + network_epoch: PEER_VERSION_EPOCH_2_05, + }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch21, + start_height: first_burnchain_height + 8, + end_height: first_burnchain_height + 12, + block_limit: ExecutionCost { + write_length: 210210, + write_count: 210210, + read_length: 210210, + read_count: 210210, + runtime: 210210, + }, + network_epoch: PEER_VERSION_EPOCH_2_1, + }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch22, + start_height: first_burnchain_height + 12, + end_height: first_burnchain_height + 16, + block_limit: ExecutionCost { + write_length: 210210, + write_count: 210210, + read_length: 210210, + read_count: 210210, + runtime: 210210, + }, + network_epoch: PEER_VERSION_EPOCH_2_2, + }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch23, + start_height: first_burnchain_height + 16, + end_height: first_burnchain_height + 20, + block_limit: ExecutionCost { + write_length: 210210, + write_count: 210210, + read_length: 210210, + read_count: 210210, + runtime: 210210, + }, + network_epoch: PEER_VERSION_EPOCH_2_3, + }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch24, + start_height: first_burnchain_height + 20, + end_height: STACKS_EPOCH_MAX, + block_limit: ExecutionCost { + write_length: 210210, + write_count: 210210, + read_length: 210210, + read_count: 210210, + runtime: 210210, + }, + network_epoch: PEER_VERSION_EPOCH_2_4, + }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch25, + start_height: first_burnchain_height + 24, + end_height: STACKS_EPOCH_MAX, + block_limit: ExecutionCost { + write_length: 210210, + write_count: 210210, + read_length: 210210, + read_count: 210210, + runtime: 210210, + }, + network_epoch: PEER_VERSION_EPOCH_2_5, + }, + ] + } + + #[cfg(test)] + fn unit_test_3_0(first_burnchain_height: u64) -> Vec { + info!( + "StacksEpoch unit_test_3_0 first_burn_height = {}", + first_burnchain_height + ); + + vec![ + StacksEpoch { + epoch_id: StacksEpochId::Epoch10, + start_height: 0, + end_height: first_burnchain_height, + block_limit: ExecutionCost::max_value(), + network_epoch: PEER_VERSION_EPOCH_1_0, + }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch20, + start_height: first_burnchain_height, + end_height: first_burnchain_height + 4, + block_limit: ExecutionCost::max_value(), + network_epoch: PEER_VERSION_EPOCH_2_0, + }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch2_05, + start_height: first_burnchain_height + 4, + end_height: first_burnchain_height + 8, + block_limit: ExecutionCost { + write_length: 205205, + write_count: 205205, + read_length: 205205, + read_count: 205205, + runtime: 205205, + }, + network_epoch: PEER_VERSION_EPOCH_2_05, + }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch21, + start_height: first_burnchain_height + 8, + end_height: first_burnchain_height + 12, + block_limit: ExecutionCost { + write_length: 210210, + write_count: 210210, + read_length: 210210, + read_count: 210210, + runtime: 210210, + }, + network_epoch: PEER_VERSION_EPOCH_2_1, + }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch22, + start_height: first_burnchain_height + 12, + end_height: first_burnchain_height + 16, + block_limit: ExecutionCost { + write_length: 210210, + write_count: 210210, + read_length: 210210, + read_count: 210210, + runtime: 210210, + }, + network_epoch: PEER_VERSION_EPOCH_2_2, + }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch23, + start_height: first_burnchain_height + 16, + end_height: first_burnchain_height + 20, + block_limit: ExecutionCost { + write_length: 210210, + write_count: 210210, + read_length: 210210, + read_count: 210210, + runtime: 210210, + }, + network_epoch: PEER_VERSION_EPOCH_2_3, + }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch24, + start_height: first_burnchain_height + 20, + end_height: STACKS_EPOCH_MAX, + block_limit: ExecutionCost { + write_length: 210210, + write_count: 210210, + read_length: 210210, + read_count: 210210, + runtime: 210210, + }, + network_epoch: PEER_VERSION_EPOCH_2_4, + }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch25, + start_height: first_burnchain_height + 24, + end_height: first_burnchain_height + 28, + block_limit: ExecutionCost { + write_length: 210210, + write_count: 210210, + read_length: 210210, + read_count: 210210, + runtime: 210210, + }, + network_epoch: PEER_VERSION_EPOCH_2_5, + }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch30, + start_height: first_burnchain_height + 28, + end_height: STACKS_EPOCH_MAX, + block_limit: ExecutionCost { + write_length: 210210, + write_count: 210210, + read_length: 210210, + read_count: 210210, + runtime: 210210, + }, + network_epoch: PEER_VERSION_EPOCH_3_0, + }, + ] + } #[cfg(test)] fn unit_test_2_1_only(first_burnchain_height: u64) -> Vec { @@ -955,6 +1266,80 @@ impl StacksEpochExtension for StacksEpoch { }, ] } + + #[cfg(test)] + fn unit_test_3_0_only(first_burnchain_height: u64) -> Vec { + info!( + "StacksEpoch unit_test first_burn_height = {}", + first_burnchain_height + ); + + vec![ + StacksEpoch { + epoch_id: StacksEpochId::Epoch10, + start_height: 0, + end_height: 0, + block_limit: ExecutionCost::max_value(), + network_epoch: PEER_VERSION_EPOCH_1_0, + }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch20, + start_height: 0, + end_height: 0, + block_limit: ExecutionCost::max_value(), + network_epoch: PEER_VERSION_EPOCH_2_0, + }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch2_05, + start_height: 0, + end_height: 0, + block_limit: ExecutionCost::max_value(), + network_epoch: PEER_VERSION_EPOCH_2_05, + }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch21, + start_height: 0, + end_height: 0, + block_limit: ExecutionCost::max_value(), + network_epoch: PEER_VERSION_EPOCH_2_1, + }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch22, + start_height: 0, + end_height: 0, + block_limit: ExecutionCost::max_value(), + network_epoch: PEER_VERSION_EPOCH_2_2, + }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch23, + start_height: 0, + end_height: 0, + block_limit: ExecutionCost::max_value(), + network_epoch: PEER_VERSION_EPOCH_2_3, + }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch24, + start_height: 0, + end_height: 0, + block_limit: ExecutionCost::max_value(), + network_epoch: PEER_VERSION_EPOCH_2_4, + }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch25, + start_height: 0, + end_height: first_burnchain_height, + block_limit: BLOCK_LIMIT_MAINNET_21.clone(), + network_epoch: PEER_VERSION_EPOCH_2_4, + }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch30, + start_height: first_burnchain_height, + end_height: STACKS_EPOCH_MAX, + block_limit: BLOCK_LIMIT_MAINNET_21, + network_epoch: PEER_VERSION_EPOCH_3_0, + }, + ] + } #[cfg(test)] fn unit_test(stacks_epoch_id: StacksEpochId, first_burnchain_height: u64) -> Vec { @@ -967,7 +1352,8 @@ impl StacksEpochExtension for StacksEpoch { StacksEpochId::Epoch22 => StacksEpoch::unit_test_2_2(first_burnchain_height), StacksEpochId::Epoch23 => StacksEpoch::unit_test_2_3(first_burnchain_height), StacksEpochId::Epoch24 => StacksEpoch::unit_test_2_4(first_burnchain_height), - StacksEpochId::Epoch30 => todo!(), + StacksEpochId::Epoch25 => StacksEpoch::unit_test_2_5(first_burnchain_height), + StacksEpochId::Epoch30 => StacksEpoch::unit_test_3_0(first_burnchain_height), } } From 2fb821a5a8fdb71d0242f8b8a12908660c9d904e Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 2 Nov 2023 14:14:49 -0400 Subject: [PATCH 065/122] chore: new Coinbase variant --- stackslib/src/cost_estimates/pessimistic.rs | 2 ++ stackslib/src/cost_estimates/tests/cost_estimators.rs | 2 +- stackslib/src/cost_estimates/tests/fee_medians.rs | 2 +- stackslib/src/cost_estimates/tests/fee_scalar.rs | 2 +- 4 files changed, 5 insertions(+), 3 deletions(-) diff --git a/stackslib/src/cost_estimates/pessimistic.rs b/stackslib/src/cost_estimates/pessimistic.rs index cff9914638..0ae3b8b833 100644 --- a/stackslib/src/cost_estimates/pessimistic.rs +++ b/stackslib/src/cost_estimates/pessimistic.rs @@ -234,6 +234,8 @@ impl PessimisticEstimator { StacksEpochId::Epoch23 => ":2.1", // reuse cost estimates in Epoch24 StacksEpochId::Epoch24 => ":2.1", + // reuse cost estimates in Epoch25 + StacksEpochId::Epoch25 => ":2.1", // reuse cost estimates in Epoch30 StacksEpochId::Epoch30 => ":2.1", }; diff --git a/stackslib/src/cost_estimates/tests/cost_estimators.rs b/stackslib/src/cost_estimates/tests/cost_estimators.rs index a3a4fd7745..037631aab6 100644 --- a/stackslib/src/cost_estimates/tests/cost_estimators.rs +++ b/stackslib/src/cost_estimates/tests/cost_estimators.rs @@ -79,7 +79,7 @@ fn make_dummy_coinbase_tx() -> StacksTransactionReceipt { StacksTransactionReceipt::from_coinbase(StacksTransaction::new( TransactionVersion::Mainnet, TransactionAuth::Standard(TransactionSpendingCondition::new_initial_sighash()), - TransactionPayload::Coinbase(CoinbasePayload([0; 32]), None), + TransactionPayload::Coinbase(CoinbasePayload([0; 32]), None, None), )) } diff --git a/stackslib/src/cost_estimates/tests/fee_medians.rs b/stackslib/src/cost_estimates/tests/fee_medians.rs index 93075b175a..c2fac17677 100644 --- a/stackslib/src/cost_estimates/tests/fee_medians.rs +++ b/stackslib/src/cost_estimates/tests/fee_medians.rs @@ -57,7 +57,7 @@ fn make_dummy_coinbase_tx() -> StacksTransaction { StacksTransaction::new( TransactionVersion::Mainnet, TransactionAuth::Standard(TransactionSpendingCondition::new_initial_sighash()), - TransactionPayload::Coinbase(CoinbasePayload([0; 32]), None), + TransactionPayload::Coinbase(CoinbasePayload([0; 32]), None, None), ) } diff --git a/stackslib/src/cost_estimates/tests/fee_scalar.rs b/stackslib/src/cost_estimates/tests/fee_scalar.rs index ecef42a9fd..20fa5a677d 100644 --- a/stackslib/src/cost_estimates/tests/fee_scalar.rs +++ b/stackslib/src/cost_estimates/tests/fee_scalar.rs @@ -77,7 +77,7 @@ fn make_dummy_coinbase_tx() -> StacksTransaction { StacksTransaction::new( TransactionVersion::Mainnet, TransactionAuth::Standard(TransactionSpendingCondition::new_initial_sighash()), - TransactionPayload::Coinbase(CoinbasePayload([0; 32]), None), + TransactionPayload::Coinbase(CoinbasePayload([0; 32]), None, None), ) } From e56f3e2a71e5e42e4eceb50606c8d802e3493b35 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 2 Nov 2023 14:14:58 -0400 Subject: [PATCH 066/122] chore: new coinbase variant --- stackslib/src/main.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/stackslib/src/main.rs b/stackslib/src/main.rs index c56b70dec7..7b4f1a5a46 100644 --- a/stackslib/src/main.rs +++ b/stackslib/src/main.rs @@ -605,7 +605,7 @@ simulating a miner. let mut coinbase_tx = StacksTransaction::new( TransactionVersion::Mainnet, tx_auth, - TransactionPayload::Coinbase(CoinbasePayload([0u8; 32]), None), + TransactionPayload::Coinbase(CoinbasePayload([0u8; 32]), None, None), ); coinbase_tx.chain_id = chain_id; @@ -1420,7 +1420,7 @@ simulating a miner. let mut coinbase_tx = StacksTransaction::new( TransactionVersion::Mainnet, tx_auth, - TransactionPayload::Coinbase(CoinbasePayload([0u8; 32]), None), + TransactionPayload::Coinbase(CoinbasePayload([0u8; 32]), None, None), ); coinbase_tx.chain_id = chain_id; From 096b6dc1246ba325d3140e91b51066b5f8cdbc75 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 2 Nov 2023 14:15:16 -0400 Subject: [PATCH 067/122] chore: mock pox-3 unlock and pox-4 activation --- stackslib/src/net/inv.rs | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/stackslib/src/net/inv.rs b/stackslib/src/net/inv.rs index 0cc15583f1..cce729d7ab 100644 --- a/stackslib/src/net/inv.rs +++ b/stackslib/src/net/inv.rs @@ -3109,6 +3109,8 @@ mod test { u32::MAX, u32::MAX, u32::MAX, + u32::MAX, + u32::MAX, ); let mut peer_inv = PeerBlocksInv::new(vec![0x01], vec![0x01], vec![0x01], 1, 1, 0); @@ -3138,6 +3140,8 @@ mod test { u32::MAX, u32::MAX, u32::MAX, + u32::MAX, + u32::MAX, ); let mut peer_inv = PeerBlocksInv::new(vec![0x01], vec![0x01], vec![0x01], 1, 1, 0); From 4f7c8d31afc9dc132f835504c550bec6c449bf77 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 2 Nov 2023 14:15:51 -0400 Subject: [PATCH 068/122] chore: update TestPeer to use the right burn block handler (temporary) --- stackslib/src/net/mod.rs | 39 ++++++++++++++++++++++++--------------- 1 file changed, 24 insertions(+), 15 deletions(-) diff --git a/stackslib/src/net/mod.rs b/stackslib/src/net/mod.rs index 051b8c722b..f5e153ca81 100644 --- a/stackslib/src/net/mod.rs +++ b/stackslib/src/net/mod.rs @@ -2778,6 +2778,8 @@ pub mod test { u32::MAX, u32::MAX, u32::MAX, + u32::MAX, + u32::MAX, ); let mut spending_account = TestMinerFactory::new().next_miner( @@ -3521,8 +3523,9 @@ pub mod test { Option, ) { let sortdb = self.sortdb.take().unwrap(); - let (block_height, block_hash) = { + let (block_height, block_hash, epoch_id) = { let tip = SortitionDB::get_canonical_burn_chain_tip(&sortdb.conn()).unwrap(); + let epoch_id = SortitionDB::get_stacks_epoch(&sortdb.conn(), tip.block_height + 1).unwrap().unwrap().epoch_id; if set_consensus_hash { TestPeer::set_ops_consensus_hash(&mut blockstack_ops, &tip.consensus_hash); @@ -3579,20 +3582,25 @@ pub mod test { ) .unwrap(); - // NOTE: this is harmless in the Nakamoto epoch, but it will never be read - Burnchain::process_affirmation_maps( - &self.config.burnchain, - &mut burnchain_db, - &indexer, - block_header.block_height, - ) - .unwrap(); - - (block_header.block_height, block_header_hash) + if epoch_id < StacksEpochId::Epoch30 { + Burnchain::process_affirmation_maps( + &self.config.burnchain, + &mut burnchain_db, + &indexer, + block_header.block_height, + ) + .unwrap(); + } + (block_header.block_height, block_header_hash, epoch_id) }; let missing_pox_anchor_block_hash_opt = - self.coord.handle_new_burnchain_block().unwrap(); + if epoch_id < StacksEpochId::Epoch30 { + self.coord.handle_new_burnchain_block().unwrap() + } + else { + self.coord.handle_new_nakamoto_burnchain_block().unwrap() + }; let pox_id = { let ic = sortdb.index_conn(); @@ -4044,7 +4052,8 @@ pub mod test { tip_id } - // Make a tenure + /// Make a tenure, using `tenure_builder` to generate a Stacks block and a list of + /// microblocks. pub fn make_tenure( &mut self, mut tenure_builder: F, @@ -4191,7 +4200,7 @@ pub mod test { ) } - // have this peer produce an anchored block and microblock tail using its internal miner. + /// Produce a default, non-empty tenure for epoch 2.x pub fn make_default_tenure( &mut self, ) -> ( @@ -4263,7 +4272,7 @@ pub mod test { microblocks, ) } - + pub fn to_neighbor(&self) -> Neighbor { self.config.to_neighbor() } From 7f60bfaf4d84c70df555674fc336888720b30598 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 2 Nov 2023 14:16:15 -0400 Subject: [PATCH 069/122] feat: add handler for processing new nakamoto blocks --- stackslib/src/net/relay.rs | 97 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 97 insertions(+) diff --git a/stackslib/src/net/relay.rs b/stackslib/src/net/relay.rs index 05548a1882..4f96f22aab 100644 --- a/stackslib/src/net/relay.rs +++ b/stackslib/src/net/relay.rs @@ -44,6 +44,7 @@ use crate::chainstate::burn::ConsensusHash; use crate::chainstate::coordinator::comm::CoordinatorChannels; use crate::chainstate::coordinator::BlockEventDispatcher; use crate::chainstate::nakamoto::NakamotoChainState; +use crate::chainstate::nakamoto::NakamotoBlock; use crate::chainstate::stacks::db::unconfirmed::ProcessedUnconfirmedState; use crate::chainstate::stacks::db::{StacksChainState, StacksEpochReceipt, StacksHeaderInfo}; use crate::chainstate::stacks::events::StacksTransactionReceipt; @@ -660,6 +661,76 @@ impl Relayer { } Ok(res) } + + /// Insert a staging Nakamoto block that got relayed to us somehow -- e.g. uploaded via http, + /// downloaded by us, or pushed via p2p. + /// Return Ok(true) if we stored it, Ok(false) if we didn't + pub fn process_new_nakamoto_block( + sort_handle: &SortitionHandleConn, + chainstate: &mut StacksChainState, + block: NakamotoBlock, + ) -> Result { + debug!( + "Handle incoming Nakamoto block {}/{}", + &block.header.consensus_hash, + &block.header.block_hash() + ); + + // do we have this block? don't lock the DB needlessly if so. + if let Some(_) = NakamotoChainState::get_block_header(chainstate.db(), &block.header.block_id())? { + debug!("Already have Nakamoto block {}", &block.header.block_id()); + return Ok(false); + } + + let block_sn = SortitionDB::get_block_snapshot_consensus(sort_handle, &block.header.consensus_hash)? + .ok_or(chainstate_error::DBError(db_error::NotFoundError))?; + + // don't relay this block if it's using the wrong AST rules (this would render at least one of its + // txs problematic). + let epoch_id = SortitionDB::get_stacks_epoch(sort_handle, block_sn.block_height)? + .expect("FATAL: no epoch defined") + .epoch_id; + + if epoch_id < StacksEpochId::Epoch30 { + error!("Nakamoto blocks are not supported in this epoch"); + return Err(chainstate_error::InvalidStacksBlock("Nakamoto blocks are not supported in this epoch".into())); + } + + if !Relayer::static_check_problematic_relayed_nakamoto_block( + chainstate.mainnet, + epoch_id, + &block, + ASTRules::PrecheckSize, + ) { + warn!( + "Nakamoto block is problematic; will not store or relay"; + "stacks_block_hash" => %block.header.block_hash(), + "consensus_hash" => %block.header.consensus_hash, + "burn_height" => block.header.chain_length, + "sortition_height" => block_sn.block_height, + ); + return Ok(false); + } + + let accept_msg = format!( + "Stored incoming Nakamoto block {}/{}", + &block.header.consensus_hash, + &block.header.block_hash() + ); + + let staging_db_tx = chainstate.db_tx_begin()?; + let accepted = NakamotoChainState::accept_block( + block, + sort_handle, + &staging_db_tx + )?; + staging_db_tx.commit()?; + + if accepted { + debug!("{}", &accept_msg); + } + Ok(accepted) + } /// Coalesce a set of microblocks into relayer hints and MicroblocksData messages, as calculated by /// process_new_blocks(). Make sure the messages don't get too big. @@ -1313,6 +1384,32 @@ impl Relayer { } true } + + /// Verify that a relayed block is not problematic -- i.e. it doesn't contain any problematic + /// transactions. This is a static check -- we only look at the block contents. + /// + /// Returns true if the check passed -- i.e. no problems. + /// Returns false if not + pub fn static_check_problematic_relayed_nakamoto_block( + mainnet: bool, + epoch_id: StacksEpochId, + block: &NakamotoBlock, + ast_rules: ASTRules, + ) -> bool { + for tx in block.txs.iter() { + if !Relayer::static_check_problematic_relayed_tx(mainnet, epoch_id, tx, ast_rules) + .is_ok() + { + info!( + "Nakamoto block {} with tx {} will not be stored or relayed", + block.header.block_hash(), + tx.txid() + ); + return false; + } + } + true + } /// Verify that a relayed microblock is not problematic -- i.e. it doesn't contain any /// problematic transactions. This is a static check -- we only look at the microblock From 61becfcaf6cc49cae595c5987ff8d0da58d32bbd Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 2 Nov 2023 14:16:29 -0400 Subject: [PATCH 070/122] chore: report pox-4 in /v2/info --- stackslib/src/net/rpc.rs | 22 ++++++++++++++++++++-- 1 file changed, 20 insertions(+), 2 deletions(-) diff --git a/stackslib/src/net/rpc.rs b/stackslib/src/net/rpc.rs index 301e742454..f1bac65427 100644 --- a/stackslib/src/net/rpc.rs +++ b/stackslib/src/net/rpc.rs @@ -63,7 +63,7 @@ use crate::chainstate::burn::db::sortdb::SortitionDB; use crate::chainstate::burn::ConsensusHash; use crate::chainstate::burn::Opcodes; use crate::chainstate::nakamoto::NakamotoChainState; -use crate::chainstate::stacks::boot::{POX_1_NAME, POX_2_NAME, POX_3_NAME}; +use crate::chainstate::stacks::boot::{POX_1_NAME, POX_2_NAME, POX_3_NAME, POX_4_NAME}; use crate::chainstate::stacks::db::blocks::CheckError; use crate::chainstate::stacks::db::{blocks::MINIMUM_TX_FEE_RATE_PER_BYTE, StacksChainState}; use crate::chainstate::stacks::Error as chain_error; @@ -337,6 +337,13 @@ impl RPCPoxInfoData { "PoX-3 first reward cycle begins before first burn block height".to_string(), ))? + 1; + + let pox_4_first_cycle = burnchain + .block_height_to_reward_cycle(burnchain.pox_constants.pox_4_activation_height as u64) + .ok_or(net_error::ChainstateError( + "PoX-4 first reward cycle begins before first burn block height".to_string(), + ))? + + 1; let data = chainstate .maybe_read_only_clarity_tx(&sortdb.index_conn(), tip, |clarity_tx| { @@ -549,6 +556,14 @@ impl RPCPoxInfoData { as u64, first_reward_cycle_id: pox_3_first_cycle, }, + RPCPoxContractVersion { + contract_id: boot_code_id(POX_4_NAME, chainstate.mainnet).to_string(), + activation_burnchain_block_height: burnchain + .pox_constants + .pox_4_activation_height + as u64, + first_reward_cycle_id: pox_4_first_cycle, + }, ], }) } @@ -1379,6 +1394,7 @@ impl ConversationHttp { let burn_block_height = clarity_db.get_current_burnchain_block_height() as u64; let v1_unlock_height = clarity_db.get_v1_unlock_height(); let v2_unlock_height = clarity_db.get_v2_unlock_height(); + let v3_unlock_height = clarity_db.get_v3_unlock_height(); let (balance, balance_proof) = if with_proof { clarity_db .get_with_proof::(&key) @@ -1408,11 +1424,13 @@ impl ConversationHttp { burn_block_height, v1_unlock_height, v2_unlock_height, + v3_unlock_height, ); let (locked, unlock_height) = balance.get_locked_balance_at_burn_block( burn_block_height, v1_unlock_height, v2_unlock_height, + v3_unlock_height, ); let balance = format!("0x{}", to_hex(&unlocked.to_be_bytes())); @@ -4312,7 +4330,7 @@ mod test { let mut tx_coinbase = StacksTransaction::new( TransactionVersion::Testnet, TransactionAuth::from_p2pkh(&privk1).unwrap(), - TransactionPayload::Coinbase(CoinbasePayload([0x00; 32]), None), + TransactionPayload::Coinbase(CoinbasePayload([0x00; 32]), None, None), ); tx_coinbase.chain_id = 0x80000000; tx_coinbase.anchor_mode = TransactionAnchorMode::OnChainOnly; From e964784a4d50fbb2010462794d383aed7e5adf03 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 2 Nov 2023 14:16:47 -0400 Subject: [PATCH 071/122] feat: initial implementation of the Nakamoto miner --- stackslib/src/chainstate/nakamoto/miner.rs | 742 +++++++++++++++++++++ 1 file changed, 742 insertions(+) create mode 100644 stackslib/src/chainstate/nakamoto/miner.rs diff --git a/stackslib/src/chainstate/nakamoto/miner.rs b/stackslib/src/chainstate/nakamoto/miner.rs new file mode 100644 index 0000000000..eeed6d9dae --- /dev/null +++ b/stackslib/src/chainstate/nakamoto/miner.rs @@ -0,0 +1,742 @@ +// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation +// Copyright (C) 2020 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use std::cmp; +use std::collections::HashMap; +use std::collections::HashSet; +use std::convert::From; +use std::fs; +use std::mem; +use std::sync::atomic::AtomicBool; +use std::sync::atomic::Ordering; +use std::sync::Arc; +use std::sync::Mutex; +use std::thread::ThreadId; + +use clarity::vm::analysis::{CheckError, CheckErrors}; +use clarity::vm::ast::errors::ParseErrors; +use clarity::vm::ast::ASTRules; +use clarity::vm::clarity::TransactionConnection; +use clarity::vm::database::BurnStateDB; +use clarity::vm::errors::Error as InterpreterError; +use clarity::vm::types::TypeSignature; +use clarity::vm::costs::ExecutionCost; + +use serde::Deserialize; +use stacks_common::util::get_epoch_time_ms; +use stacks_common::util::hash::MerkleTree; +use stacks_common::util::hash::Sha512Trunc256Sum; +use stacks_common::util::secp256k1::{MessageSignature, Secp256k1PrivateKey}; +use stacks_common::util::vrf::*; + +use crate::burnchains::PrivateKey; +use crate::burnchains::PublicKey; +use crate::chainstate::burn::db::sortdb::{SortitionDB, SortitionDBConn, SortitionHandleTx}; +use crate::chainstate::burn::operations::*; +use crate::chainstate::burn::*; +use crate::chainstate::stacks::address::StacksAddressExtensions; +use crate::chainstate::nakamoto::SetupBlockResult; +use crate::chainstate::nakamoto::NakamotoBlock; +use crate::chainstate::nakamoto::NakamotoBlockHeader; +use crate::chainstate::nakamoto::NakamotoChainState; +use crate::chainstate::stacks::db::accounts::MinerReward; +use crate::chainstate::stacks::db::transactions::{ + handle_clarity_runtime_error, ClarityRuntimeTxError, +}; +use crate::chainstate::stacks::db::{ + blocks::MemPoolRejection, ChainstateTx, ClarityTx, MinerRewardInfo, StacksChainState, + MINER_REWARD_MATURITY, +}; +use crate::chainstate::stacks::StacksBlockHeader; +use crate::chainstate::stacks::db::StacksHeaderInfo; +use crate::chainstate::stacks::miner::BlockBuilderSettings; +use crate::chainstate::stacks::miner::BlockLimitFunction; +use crate::chainstate::stacks::miner::TransactionResult; +use crate::chainstate::stacks::miner::TransactionSkipped; +use crate::chainstate::stacks::miner::TransactionError; +use crate::chainstate::stacks::miner::TransactionProblematic; +use crate::chainstate::stacks::events::{StacksTransactionEvent, StacksTransactionReceipt}; +use crate::chainstate::stacks::Error; +use crate::chainstate::stacks::*; +use crate::chainstate::stacks::miner::BlockBuilder; +use crate::clarity_vm::clarity::{ClarityConnection, ClarityInstance, Error as clarity_error}; +use crate::core::mempool::*; +use crate::core::*; +use crate::cost_estimates::metrics::CostMetric; +use crate::cost_estimates::CostEstimator; + +use crate::net::relay::Relayer; +use crate::net::Error as net_error; +use stacks_common::types::StacksPublicKeyBuffer; + +use crate::monitoring::{ + set_last_mined_block_transaction_count, set_last_mined_execution_cost_observed, +}; +use stacks_common::codec::{read_next, write_next, StacksMessageCodec}; +use stacks_common::types::chainstate::BurnchainHeaderHash; +use stacks_common::types::chainstate::StacksBlockId; +use stacks_common::types::chainstate::TrieHash; +use stacks_common::types::chainstate::{BlockHeaderHash, StacksAddress, ConsensusHash}; +use stacks_common::util::hash::Hash160; + +/// New tenure information +pub struct NakamotoTenureStart { + /// coinbase transaction for this miner + pub coinbase_tx: StacksTransaction, + /// VRF proof for this miner + pub vrf_proof: VRFProof +} + +pub struct NakamotoBlockBuilder { + /// if this is building atop an epoch 2 block, then this is that block's header + epoch2_parent_header: Option<(StacksBlockHeader, ConsensusHash)>, + /// if this is building atop an epoch 3 block, then this is that block's header + nakamoto_parent_header: Option, + /// VRF proof, if needed + vrf_proof: Option, + /// Total burn this block represents + total_burn: u64, + /// parent block-commit hash value + parent_commit_hash_value: BlockHeaderHash, + /// Matured miner rewards to process, if any. + /// If given, this is (parent-miner-reward, this-miner-reward, reward-info) + matured_miner_rewards_opt: Option<(MinerReward, MinerReward, MinerRewardInfo)>, + /// bytes of space consumed so far + bytes_so_far: u64, + /// transactions selected + txs: Vec, + /// header we're filling in + header: NakamotoBlockHeader, +} + +pub struct MinerTenureInfo<'a> { + pub chainstate_tx: ChainstateTx<'a>, + pub clarity_instance: &'a mut ClarityInstance, + pub burn_tip: BurnchainHeaderHash, + /// This is the expected burn tip height (i.e., the current burnchain tip + 1) + /// of the mined block + pub burn_tip_height: u32, + pub mainnet: bool, + pub chain_tip: StacksHeaderInfo, + pub parent_consensus_hash: ConsensusHash, + pub parent_header_hash: BlockHeaderHash, + pub tenure_start: bool, + pub tenure_height: u64, +} + +impl NakamotoBlockBuilder { + /// Make a block builder atop a Nakamoto parent for a new tenure + pub fn new_tenure_from_nakamoto_parent( + parent_tenure_id: &StacksBlockId, + parent: &NakamotoBlockHeader, + consensus_hash: &ConsensusHash, + total_burn: u64, + proof: &VRFProof + ) -> NakamotoBlockBuilder { + let parent_commit_hash_value = BlockHeaderHash(parent_tenure_id.0.clone()); + NakamotoBlockBuilder { + epoch2_parent_header: None, + nakamoto_parent_header: Some(parent.clone()), + total_burn, + vrf_proof: Some(proof.clone()), + parent_commit_hash_value, + matured_miner_rewards_opt: None, + bytes_so_far: 0, + txs: vec![], + header: NakamotoBlockHeader::from_parent_empty(parent.chain_length + 1, total_burn, consensus_hash.clone(), parent.block_id()) + } + } + + /// Make a block builder atop a Nakamoto parent for a new block within a tenure + pub fn continue_tenure_from_nakamoto_parent( + parent: &NakamotoBlockHeader, + consensus_hash: &ConsensusHash, + total_burn: u64, + ) -> NakamotoBlockBuilder { + let parent_commit_hash_value = BlockHeaderHash(parent.block_id().0.clone()); + NakamotoBlockBuilder { + epoch2_parent_header: None, + nakamoto_parent_header: Some(parent.clone()), + total_burn, + vrf_proof: None, + parent_commit_hash_value, + matured_miner_rewards_opt: None, + bytes_so_far: 0, + txs: vec![], + header: NakamotoBlockHeader::from_parent_empty(parent.chain_length + 1, total_burn, consensus_hash.clone(), parent.block_id()) + } + } + + /// Make a block builder atop an epoch 2 parent for a new tenure + pub fn new_tenure_from_epoch2_parent( + parent: &StacksBlockHeader, + parent_consensus_hash: &ConsensusHash, + consensus_hash: &ConsensusHash, + total_burn: u64, + proof: &VRFProof + ) -> NakamotoBlockBuilder { + NakamotoBlockBuilder { + epoch2_parent_header: Some((parent.clone(), parent_consensus_hash.clone())), + nakamoto_parent_header: None, + total_burn, + vrf_proof: Some(proof.clone()), + parent_commit_hash_value: parent.block_hash(), + matured_miner_rewards_opt: None, + bytes_so_far: 0, + txs: vec![], + header: NakamotoBlockHeader::from_parent_empty(parent.total_work.work + 1, total_burn, consensus_hash.clone(), StacksBlockId::new(parent_consensus_hash, &parent.block_hash())) + } + } + + /// Make a block builder from genesis (testing only) + pub fn new_tenure_from_genesis( + proof: &VRFProof + ) -> NakamotoBlockBuilder { + NakamotoBlockBuilder { + epoch2_parent_header: None, + nakamoto_parent_header: None, + total_burn: 0, + vrf_proof: Some(proof.clone()), + parent_commit_hash_value: FIRST_STACKS_BLOCK_HASH.clone(), + matured_miner_rewards_opt: None, + bytes_so_far: 0, + txs: vec![], + header: NakamotoBlockHeader::genesis() + } + } + + /// Make a Nakamoto block builder appropriate for building atop the given block header + pub fn new_from_parent( + // tenure ID -- this is the index block hash of the start block of the last tenure (i.e. + // the data we committed to in the block-commit). If this is an epoch 2.x parent, then + // this is just the index block hash of the parent Stacks block. + parent_tenure_id: &StacksBlockId, + // Stacks header we're building off of. + parent_stacks_header: &StacksHeaderInfo, + // consensus hash of this tenure's burnchain block + consensus_hash: &ConsensusHash, + // total BTC burn so far + total_burn: u64, + // VRF proof, if we're starting a _new_ tenure (instead of continuing an existing one) + vrf_proof_opt: Option, + ) -> Result { + let builder = if let Some(parent_nakamoto_header) = parent_stacks_header.anchored_header.as_stacks_nakamoto() { + // building atop a nakamoto block + // new tenure? + if let Some(vrf_proof) = vrf_proof_opt.as_ref() { + NakamotoBlockBuilder::new_tenure_from_nakamoto_parent(parent_tenure_id, parent_nakamoto_header, consensus_hash, total_burn, vrf_proof) + } + else { + NakamotoBlockBuilder::continue_tenure_from_nakamoto_parent(parent_nakamoto_header, consensus_hash, total_burn) + } + } + else if let Some(parent_epoch2_header) = parent_stacks_header.anchored_header.as_stacks_epoch2() { + // building atop a stacks 2.x block. + // we are necessarily starting a new tenure + if let Some(vrf_proof) = vrf_proof_opt.as_ref() { + NakamotoBlockBuilder::new_tenure_from_epoch2_parent(parent_epoch2_header, &parent_stacks_header.consensus_hash, consensus_hash, total_burn, vrf_proof) + } + else { + // not allowed + warn!("Failed to start a Nakamoto tenure atop a Stacks 2.x block -- missing a VRF proof"); + return Err(Error::ExpectedTenureChange); + } + } + else { + // not reachable -- no other choices + return Err(Error::InvalidStacksBlock("Parent is neither a Nakamoto block nor a Stacks 2.x block".into())); + }; + + Ok(builder) + } + + + /// This function should be called before `tenure_begin`. + /// It creates a MinerTenureInfo struct which owns connections to the chainstate and sortition + /// DBs, so that block-processing is guaranteed to terminate before the lives of these handles + /// expire. + pub fn load_tenure_info<'a>( + &self, + chainstate: &'a mut StacksChainState, + burn_dbconn: &'a SortitionDBConn, + tenure_start: bool + ) -> Result, Error> { + debug!( + "Nakamoto miner tenure begin" + ); + + let burn_tip = SortitionDB::get_canonical_chain_tip_bhh(burn_dbconn.conn())?; + let burn_tip_height = + SortitionDB::get_canonical_burn_chain_tip(burn_dbconn.conn())?.block_height as u32; + + let mainnet = chainstate.config().mainnet; + + let (chain_tip, parent_consensus_hash, parent_header_hash) = if let Some(nakamoto_parent_header) = self.nakamoto_parent_header.as_ref() { + // parent is a nakamoto block + let parent_header_info = NakamotoChainState::get_block_header( + chainstate.db(), + &StacksBlockId::new(&nakamoto_parent_header.consensus_hash, &nakamoto_parent_header.block_hash()) + )? + .ok_or(Error::NoSuchBlockError) + .map_err(|e| { + warn!("No such Nakamoto parent block {}/{} ({})", &nakamoto_parent_header.consensus_hash, &nakamoto_parent_header.block_hash(), &nakamoto_parent_header.block_id()); + e + })?; + + (parent_header_info, nakamoto_parent_header.consensus_hash.clone(), nakamoto_parent_header.block_hash()) + } + else if let Some((stacks_header, consensus_hash)) = self.epoch2_parent_header.as_ref() { + // parent is a Stacks epoch2 block + let parent_header_info = NakamotoChainState::get_block_header( + chainstate.db(), + &StacksBlockId::new(consensus_hash, &stacks_header.block_hash()) + )? + .ok_or(Error::NoSuchBlockError) + .map_err(|e| { + warn!("No such Stacks 2.x parent block {}/{} ({})", &consensus_hash, &stacks_header.block_hash(), &StacksBlockId::new(&consensus_hash, &stacks_header.block_hash())); + e + })?; + + (parent_header_info, consensus_hash.clone(), stacks_header.block_hash()) + } + else { + // parent is genesis (testing only) + (StacksHeaderInfo::regtest_genesis(), FIRST_BURNCHAIN_CONSENSUS_HASH.clone(), FIRST_STACKS_BLOCK_HASH.clone()) + }; + + let tenure_height = if let Ok(Some(parent_tenure_height)) = NakamotoChainState::get_tenure_height(chainstate.db(), &chain_tip.index_block_hash()) { + parent_tenure_height.checked_add(1).expect("Blockchain overflow") + } + else { + 0 + }; + + // data won't be committed, so do a concurrent transaction + let (chainstate_tx, clarity_instance) = chainstate.chainstate_tx_begin()?; + + Ok(MinerTenureInfo { + chainstate_tx, + clarity_instance, + burn_tip, + burn_tip_height, + mainnet, + chain_tip, + parent_consensus_hash, + parent_header_hash, + tenure_start, + tenure_height + }) + } + + /// Begin/resume mining a tenure's transactions. + /// Returns an open ClarityTx for mining the block. + /// NOTE: even though we don't yet know the block hash, the Clarity VM ensures that a + /// transaction can't query information about the _current_ block (i.e. information that is not + /// yet known). + pub fn tenure_begin<'a, 'b>( + &mut self, + burn_dbconn: &'a SortitionDBConn, + info: &'b mut MinerTenureInfo<'a>, + ) -> Result, Error> { + let SetupBlockResult { + clarity_tx, + matured_miner_rewards_opt, + .. + } = NakamotoChainState::setup_block( + &mut info.chainstate_tx, + info.clarity_instance, + burn_dbconn, + &burn_dbconn.context.pox_constants, + &info.chain_tip, + info.burn_tip, + info.burn_tip_height, + info.parent_consensus_hash, + info.parent_header_hash, + info.mainnet, + info.tenure_start, + info.tenure_height, + )?; + self.matured_miner_rewards_opt = matured_miner_rewards_opt; + Ok(clarity_tx) + } + + /// Finish up mining an epoch's transactions. + /// Return the ExecutionCost consumed so far. + pub fn tenure_finish(self, tx: ClarityTx) -> ExecutionCost { + let new_consensus_hash = MINER_BLOCK_CONSENSUS_HASH.clone(); + let new_block_hash = MINER_BLOCK_HEADER_HASH.clone(); + + let index_block_hash = + StacksBlockHeader::make_index_block_hash(&new_consensus_hash, &new_block_hash); + + // write out the trie... + let consumed = tx.commit_mined_block(&index_block_hash); + + test_debug!( + "\n\nFinished mining. Trie is in mined_blocks table.\n", + ); + + consumed + } + + /// Finish constructing a Nakamoto block. + /// The block will not be signed yet. + /// Returns the unsigned Nakamoto block + fn finalize_block(&mut self, clarity_tx: &mut ClarityTx) -> NakamotoBlock { + // done! Calculate state root and tx merkle root + let txid_vecs = self + .txs + .iter() + .map(|tx| tx.txid().as_bytes().to_vec()) + .collect(); + + let merkle_tree = MerkleTree::::new(&txid_vecs); + let tx_merkle_root = merkle_tree.root(); + let state_root_hash = clarity_tx.seal(); + + self.header.tx_merkle_root = tx_merkle_root; + self.header.state_index_root = state_root_hash; + + let block = NakamotoBlock { + header: self.header.clone(), + txs: self.txs.clone(), + }; + + test_debug!( + "\n\nMined Nakamoo block {}, {} transactions, state root is {}\n", + block.header.block_hash(), + block.txs.len(), + state_root_hash + ); + + info!( + "Miner: mined Nakamoto block"; + "block_hash" => %block.header.block_hash(), + "block_height" => block.header.chain_length, + "num_txs" => block.txs.len(), + "parent_block" => %block.header.parent_block_id, + "state_root" => %state_root_hash + ); + + block + } + + /// Finish building the Nakamoto block + pub fn mine_nakamoto_block(&mut self, clarity_tx: &mut ClarityTx) -> NakamotoBlock { + NakamotoChainState::finish_block( + clarity_tx, + self.matured_miner_rewards_opt.as_ref(), + ) + .expect("FATAL: call to `finish_block` failed"); + self.finalize_block(clarity_tx) + } + + /// Given access to the mempool, mine a nakamoto block. + /// It will not be signed. + pub fn build_nakamoto_block( + // not directly used; used as a handle to open other chainstates + chainstate_handle: &StacksChainState, + burn_dbconn: &SortitionDBConn, + mempool: &mut MemPoolDB, + // tenure ID -- this is the index block hash of the start block of the last tenure (i.e. + // the data we committed to in the block-commit) + parent_tenure_id: &StacksBlockId, + // Stacks header we're building off of. + parent_stacks_header: &StacksHeaderInfo, + // consensus hash of this block + consensus_hash: &ConsensusHash, + // the burn so far on the burnchain (i.e. from the last burnchain block) + total_burn: u64, + new_tenure_info: Option, + settings: BlockBuilderSettings, + event_observer: Option<&dyn MemPoolEventDispatcher>, + ) -> Result<(NakamotoBlock, ExecutionCost, u64), Error> { + let (tip_consensus_hash, tip_block_hash, tip_height) = ( + parent_stacks_header.consensus_hash.clone(), + parent_stacks_header.anchored_header.block_hash(), + parent_stacks_header.stacks_block_height, + ); + + debug!( + "Build Nakamoto block off of {}/{} height {}", + &tip_consensus_hash, &tip_block_hash, tip_height + ); + + let (mut chainstate, _) = chainstate_handle.reopen()?; + + let mut builder = NakamotoBlockBuilder::new_from_parent( + parent_tenure_id, + parent_stacks_header, + consensus_hash, + total_burn, + new_tenure_info.as_ref().map(|info| info.vrf_proof.clone()) + )?; + + let ts_start = get_epoch_time_ms(); + + let mut miner_tenure_info = builder.load_tenure_info(&mut chainstate, burn_dbconn, new_tenure_info.is_some())?; + let mut tenure_tx = builder.tenure_begin(burn_dbconn, &mut miner_tenure_info)?; + + let block_limit = tenure_tx + .block_limit() + .expect("Failed to obtain block limit from miner's block connection"); + + let (blocked, tx_events) = match StacksBlockBuilder::select_and_apply_transactions( + &mut tenure_tx, + &mut builder, + mempool, + parent_stacks_header, + new_tenure_info.as_ref().map(|info| &info.coinbase_tx), + settings, + event_observer, + ASTRules::PrecheckSize + ) { + Ok(x) => x, + Err(e) => { + warn!("Failure building block: {}", e); + tenure_tx.rollback_block(); + return Err(e); + } + }; + + if blocked { + debug!( + "Miner: block transaction selection aborted (child of {})", + &parent_stacks_header.anchored_header.block_hash() + ); + return Err(Error::MinerAborted); + } + + // save the block so we can build microblocks off of it + let block = builder.mine_nakamoto_block(&mut tenure_tx); + let size = builder.bytes_so_far; + let consumed = builder.tenure_finish(tenure_tx); + + let ts_end = get_epoch_time_ms(); + + if let Some(observer) = event_observer { + observer.mined_nakamoto_block_event( + SortitionDB::get_canonical_burn_chain_tip(burn_dbconn.conn())?.block_height + 1, + &block, + size, + &consumed, + tx_events, + ); + } + + set_last_mined_block_transaction_count(block.txs.len() as u64); + set_last_mined_execution_cost_observed(&consumed, &block_limit); + + info!( + "Miner: mined Nakamoto block"; + "block_hash" => %block.header.block_hash(), + "block_id" => %block.header.block_id(), + "height" => block.header.chain_length, + "tx_count" => block.txs.len(), + "parent_block_id" => %block.header.parent_block_id, + "block_size" => size, + "execution_consumed" => %consumed, + "%-full" => block_limit.proportion_largest_dimension(&consumed), + "assembly_time_ms" => ts_end.saturating_sub(ts_start), + ); + + Ok((block, consumed, size)) + } + + #[cfg(test)] + pub fn make_nakamoto_block_from_txs( + mut self, + chainstate_handle: &StacksChainState, + burn_dbconn: &SortitionDBConn, + mut txs: Vec, + ) -> Result<(NakamotoBlock, u64, ExecutionCost), Error> { + debug!("Build Nakamoto block from {} transactions", txs.len()); + let (mut chainstate, _) = chainstate_handle.reopen()?; + + let new_tenure = txs.iter().find(|txn| if let TransactionPayload::TenureChange(..) = txn.payload { true } else { false }).is_some(); + + let mut miner_tenure_info = self.load_tenure_info(&mut chainstate, burn_dbconn, new_tenure)?; + let mut tenure_tx = self.tenure_begin(burn_dbconn, &mut miner_tenure_info)?; + for tx in txs.drain(..) { + let tx_len = tx.tx_len(); + match self.try_mine_tx_with_len(&mut tenure_tx, &tx, tx_len, &BlockLimitFunction::NO_LIMIT_HIT, ASTRules::PrecheckSize) { + TransactionResult::Success(..) => { + debug!("Included {}", &tx.txid()); + } + TransactionResult::Skipped(TransactionSkipped { error, .. }) + | TransactionResult::ProcessingError(TransactionError { + error, .. + }) => { + match error { + Error::BlockTooBigError => { + // done mining -- our execution budget is exceeded. + // Make the block from the transactions we did manage to get + debug!("Block budget exceeded on tx {}", &tx.txid()); + } + Error::InvalidStacksTransaction(_emsg, true) => { + // if we have an invalid transaction that was quietly ignored, don't warn here either + test_debug!( + "Failed to apply tx {}: InvalidStacksTransaction '{:?}'", + &tx.txid(), + &_emsg + ); + continue; + } + Error::ProblematicTransaction(txid) => { + test_debug!("Encountered problematic transaction. Aborting"); + return Err(Error::ProblematicTransaction(txid)); + } + e => { + warn!("Failed to apply tx {}: {:?}", &tx.txid(), &e); + continue; + } + } + } + TransactionResult::Problematic(TransactionProblematic { + tx, .. + }) => { + // drop from the mempool + debug!("Encountered problematic transaction {}", &tx.txid()); + return Err(Error::ProblematicTransaction(tx.txid())); + } + } + } + let block = self.mine_nakamoto_block(&mut tenure_tx); + let size = self.bytes_so_far; + let cost = self.tenure_finish(tenure_tx); + Ok((block, size, cost)) + } +} + +impl BlockBuilder for NakamotoBlockBuilder { + /// Append a transaction if doing so won't exceed the epoch data size. + /// Errors out if we exceed budget, or the transaction is invalid. + fn try_mine_tx_with_len( + &mut self, + clarity_tx: &mut ClarityTx, + tx: &StacksTransaction, + tx_len: u64, + limit_behavior: &BlockLimitFunction, + ast_rules: ASTRules, + ) -> TransactionResult { + if self.bytes_so_far + tx_len >= MAX_EPOCH_SIZE.into() { + return TransactionResult::skipped_due_to_error(&tx, Error::BlockTooBigError); + } + + match limit_behavior { + BlockLimitFunction::CONTRACT_LIMIT_HIT => { + match &tx.payload { + TransactionPayload::ContractCall(cc) => { + // once we've hit the runtime limit once, allow boot code contract calls, but do not try to eval + // other contract calls + if !cc.address.is_boot_code_addr() { + return TransactionResult::skipped( + &tx, + "BlockLimitFunction::CONTRACT_LIMIT_HIT".to_string(), + ); + } + } + TransactionPayload::SmartContract(..) => { + return TransactionResult::skipped( + &tx, + "BlockLimitFunction::CONTRACT_LIMIT_HIT".to_string(), + ); + } + _ => {} + } + } + BlockLimitFunction::LIMIT_REACHED => { + return TransactionResult::skipped( + &tx, + "BlockLimitFunction::LIMIT_REACHED".to_string(), + ) + } + BlockLimitFunction::NO_LIMIT_HIT => {} + }; + + let quiet = !cfg!(test); + let result = { + // preemptively skip problematic transactions + if let Err(e) = Relayer::static_check_problematic_relayed_tx( + clarity_tx.config.mainnet, + clarity_tx.get_epoch(), + &tx, + ast_rules, + ) { + info!( + "Detected problematic tx {} while mining; dropping from mempool", + tx.txid() + ); + return TransactionResult::problematic(&tx, Error::NetError(e)); + } + let (fee, receipt) = match StacksChainState::process_transaction( + clarity_tx, tx, quiet, ast_rules, + ) { + Ok((fee, receipt)) => (fee, receipt), + Err(e) => { + let (is_problematic, e) = + TransactionResult::is_problematic(&tx, e, clarity_tx.get_epoch()); + if is_problematic { + return TransactionResult::problematic(&tx, e); + } else { + match e { + Error::CostOverflowError(cost_before, cost_after, total_budget) => { + clarity_tx.reset_cost(cost_before.clone()); + if total_budget.proportion_largest_dimension(&cost_before) + < TX_BLOCK_LIMIT_PROPORTION_HEURISTIC + { + warn!( + "Transaction {} consumed over {}% of block budget, marking as invalid; budget was {}", + tx.txid(), + 100 - TX_BLOCK_LIMIT_PROPORTION_HEURISTIC, + &total_budget + ); + return TransactionResult::error( + &tx, + Error::TransactionTooBigError, + ); + } else { + warn!( + "Transaction {} reached block cost {}; budget was {}", + tx.txid(), + &cost_after, + &total_budget + ); + return TransactionResult::skipped_due_to_error( + &tx, + Error::BlockTooBigError, + ); + } + } + _ => return TransactionResult::error(&tx, e), + } + } + } + }; + info!("Include tx"; + "tx" => %tx.txid(), + "payload" => tx.payload.name(), + "origin" => %tx.origin_address()); + + // save + self.txs.push(tx.clone()); + TransactionResult::success(&tx, fee, receipt) + }; + + self.bytes_so_far += tx_len; + result + } +} From 3de4117e9c2e4d36f5ffde5208f19adc2e47ad51 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 2 Nov 2023 14:17:06 -0400 Subject: [PATCH 072/122] feat: pox-4 stub (just a copy of pox-3 for now) --- .../src/chainstate/stacks/boot/pox-4.clar | 1320 +++++++++++++++++ 1 file changed, 1320 insertions(+) create mode 100644 stackslib/src/chainstate/stacks/boot/pox-4.clar diff --git a/stackslib/src/chainstate/stacks/boot/pox-4.clar b/stackslib/src/chainstate/stacks/boot/pox-4.clar new file mode 100644 index 0000000000..5878038a0b --- /dev/null +++ b/stackslib/src/chainstate/stacks/boot/pox-4.clar @@ -0,0 +1,1320 @@ +;; The .pox-3 contract +;; Error codes +(define-constant ERR_STACKING_UNREACHABLE 255) +(define-constant ERR_STACKING_CORRUPTED_STATE 254) +(define-constant ERR_STACKING_INSUFFICIENT_FUNDS 1) +(define-constant ERR_STACKING_INVALID_LOCK_PERIOD 2) +(define-constant ERR_STACKING_ALREADY_STACKED 3) +(define-constant ERR_STACKING_NO_SUCH_PRINCIPAL 4) +(define-constant ERR_STACKING_EXPIRED 5) +(define-constant ERR_STACKING_STX_LOCKED 6) +(define-constant ERR_STACKING_PERMISSION_DENIED 9) +(define-constant ERR_STACKING_THRESHOLD_NOT_MET 11) +(define-constant ERR_STACKING_POX_ADDRESS_IN_USE 12) +(define-constant ERR_STACKING_INVALID_POX_ADDRESS 13) +(define-constant ERR_STACKING_ALREADY_REJECTED 17) +(define-constant ERR_STACKING_INVALID_AMOUNT 18) +(define-constant ERR_NOT_ALLOWED 19) +(define-constant ERR_STACKING_ALREADY_DELEGATED 20) +(define-constant ERR_DELEGATION_EXPIRES_DURING_LOCK 21) +(define-constant ERR_DELEGATION_TOO_MUCH_LOCKED 22) +(define-constant ERR_DELEGATION_POX_ADDR_REQUIRED 23) +(define-constant ERR_INVALID_START_BURN_HEIGHT 24) +(define-constant ERR_NOT_CURRENT_STACKER 25) +(define-constant ERR_STACK_EXTEND_NOT_LOCKED 26) +(define-constant ERR_STACK_INCREASE_NOT_LOCKED 27) +(define-constant ERR_DELEGATION_NO_REWARD_SLOT 28) +(define-constant ERR_DELEGATION_WRONG_REWARD_SLOT 29) +(define-constant ERR_STACKING_IS_DELEGATED 30) +(define-constant ERR_STACKING_NOT_DELEGATED 31) + +;; PoX disabling threshold (a percent) +(define-constant POX_REJECTION_FRACTION u25) + +;; Valid values for burnchain address versions. +;; These first four correspond to address hash modes in Stacks 2.1, +;; and are defined in pox-mainnet.clar and pox-testnet.clar (so they +;; cannot be defined here again). +;; (define-constant ADDRESS_VERSION_P2PKH 0x00) +;; (define-constant ADDRESS_VERSION_P2SH 0x01) +;; (define-constant ADDRESS_VERSION_P2WPKH 0x02) +;; (define-constant ADDRESS_VERSION_P2WSH 0x03) +(define-constant ADDRESS_VERSION_NATIVE_P2WPKH 0x04) +(define-constant ADDRESS_VERSION_NATIVE_P2WSH 0x05) +(define-constant ADDRESS_VERSION_NATIVE_P2TR 0x06) +;; Keep these constants in lock-step with the address version buffs above +;; Maximum value of an address version as a uint +(define-constant MAX_ADDRESS_VERSION u6) +;; Maximum value of an address version that has a 20-byte hashbytes +;; (0x00, 0x01, 0x02, 0x03, and 0x04 have 20-byte hashbytes) +(define-constant MAX_ADDRESS_VERSION_BUFF_20 u4) +;; Maximum value of an address version that has a 32-byte hashbytes +;; (0x05 and 0x06 have 32-byte hashbytes) +(define-constant MAX_ADDRESS_VERSION_BUFF_32 u6) + +;; Data vars that store a copy of the burnchain configuration. +;; Implemented as data-vars, so that different configurations can be +;; used in e.g. test harnesses. +(define-data-var pox-prepare-cycle-length uint PREPARE_CYCLE_LENGTH) +(define-data-var pox-reward-cycle-length uint REWARD_CYCLE_LENGTH) +(define-data-var pox-rejection-fraction uint POX_REJECTION_FRACTION) +(define-data-var first-burnchain-block-height uint u0) +(define-data-var configured bool false) +(define-data-var first-2-1-reward-cycle uint u0) + +;; This function can only be called once, when it boots up +(define-public (set-burnchain-parameters (first-burn-height uint) + (prepare-cycle-length uint) + (reward-cycle-length uint) + (rejection-fraction uint) + (begin-2-1-reward-cycle uint)) + (begin + (asserts! (not (var-get configured)) (err ERR_NOT_ALLOWED)) + (var-set first-burnchain-block-height first-burn-height) + (var-set pox-prepare-cycle-length prepare-cycle-length) + (var-set pox-reward-cycle-length reward-cycle-length) + (var-set pox-rejection-fraction rejection-fraction) + (var-set first-2-1-reward-cycle begin-2-1-reward-cycle) + (var-set configured true) + (ok true)) +) + +;; The Stacking lock-up state and associated metadata. +;; Records are inserted into this map via `stack-stx`, `delegate-stack-stx`, `stack-extend` +;; `delegate-stack-extend` and burnchain transactions for invoking `stack-stx`, etc. +;; Records will be deleted from this map when auto-unlocks are processed +;; +;; This map de-normalizes some state from the `reward-cycle-pox-address-list` map +;; and the `pox-3` contract tries to keep this state in sync with the reward-cycle +;; state. The major invariants of this `stacking-state` map are: +;; (1) any entry in `reward-cycle-pox-address-list` with `some stacker` points to a real `stacking-state` +;; (2) `stacking-state.reward-set-indexes` matches the index of that `reward-cycle-pox-address-list` +;; (3) all `stacking-state.reward-set-indexes` match the index of their reward cycle entries +;; (4) `stacking-state.pox-addr` matches `reward-cycle-pox-address-list.pox-addr` +;; (5) if set, (len reward-set-indexes) == lock-period +;; (6) (reward-cycle-to-burn-height (+ lock-period first-reward-cycle)) == (get unlock-height (stx-account stacker)) +;; These invariants only hold while `cur-reward-cycle < (+ lock-period first-reward-cycle)` +;; +(define-map stacking-state + { stacker: principal } + { + ;; Description of the underlying burnchain address that will + ;; receive PoX'ed tokens. Translating this into an address + ;; depends on the burnchain being used. When Bitcoin is + ;; the burnchain, this gets translated into a p2pkh, p2sh, + ;; p2wpkh-p2sh, p2wsh-p2sh, p2wpkh, p2wsh, or p2tr UTXO, + ;; depending on the version. The `hashbytes` field *must* be + ;; either 20 bytes or 32 bytes, depending on the output. + pox-addr: { version: (buff 1), hashbytes: (buff 32) }, + ;; how long the uSTX are locked, in reward cycles. + lock-period: uint, + ;; reward cycle when rewards begin + first-reward-cycle: uint, + ;; indexes in each reward-set associated with this user. + ;; these indexes are only valid looking forward from + ;; `first-reward-cycle` (i.e., they do not correspond + ;; to entries in the reward set that may have been from + ;; previous stack-stx calls, or prior to an extend) + reward-set-indexes: (list 12 uint), + ;; principal of the delegate, if stacker has delegated + delegated-to: (optional principal) + } +) + +;; Delegation relationships +(define-map delegation-state + { stacker: principal } + { + amount-ustx: uint, ;; how many uSTX delegated? + delegated-to: principal, ;; who are we delegating? + until-burn-ht: (optional uint), ;; how long does the delegation last? + ;; does the delegate _need_ to use a specific + ;; pox recipient address? + pox-addr: (optional { version: (buff 1), hashbytes: (buff 32) }) + } +) + +;; allowed contract-callers +(define-map allowance-contract-callers + { sender: principal, contract-caller: principal } + { until-burn-ht: (optional uint) }) + +;; How many uSTX are stacked in a given reward cycle. +;; Updated when a new PoX address is registered, or when more STX are granted +;; to it. +(define-map reward-cycle-total-stacked + { reward-cycle: uint } + { total-ustx: uint } +) + +;; Internal map read by the Stacks node to iterate through the list of +;; PoX reward addresses on a per-reward-cycle basis. +(define-map reward-cycle-pox-address-list + { reward-cycle: uint, index: uint } + { + pox-addr: { version: (buff 1), hashbytes: (buff 32) }, + total-ustx: uint, + stacker: (optional principal) + } +) + +(define-map reward-cycle-pox-address-list-len + { reward-cycle: uint } + { len: uint } +) + +;; how much has been locked up for this address before +;; committing? +;; this map allows stackers to stack amounts < minimum +;; by paying the cost of aggregation during the commit +(define-map partial-stacked-by-cycle + { + pox-addr: { version: (buff 1), hashbytes: (buff 32) }, + reward-cycle: uint, + sender: principal + } + { stacked-amount: uint } +) + +;; This is identical to partial-stacked-by-cycle, but its data is never deleted. +;; It is used to preserve data for downstream clients to observe aggregate +;; commits. Each key/value pair in this map is simply the last value of +;; partial-stacked-by-cycle right after it was deleted (so, subsequent calls +;; to the `stack-aggregation-*` functions will overwrite this). +(define-map logged-partial-stacked-by-cycle + { + pox-addr: { version: (buff 1), hashbytes: (buff 32) }, + reward-cycle: uint, + sender: principal + } + { stacked-amount: uint } +) + +;; Amount of uSTX that reject PoX, by reward cycle +(define-map stacking-rejection + { reward-cycle: uint } + { amount: uint } +) + +;; Who rejected in which reward cycle +(define-map stacking-rejectors + { stacker: principal, reward-cycle: uint } + { amount: uint } +) + +;; Getter for stacking-rejectors +(define-read-only (get-pox-rejection (stacker principal) (reward-cycle uint)) + (map-get? stacking-rejectors { stacker: stacker, reward-cycle: reward-cycle })) + +;; Has PoX been rejected in the given reward cycle? +(define-read-only (is-pox-active (reward-cycle uint)) + (let ( + (reject-votes + (default-to + u0 + (get amount (map-get? stacking-rejection { reward-cycle: reward-cycle })))) + ) + ;; (100 * reject-votes) / stx-liquid-supply < pox-rejection-fraction + (< (* u100 reject-votes) + (* (var-get pox-rejection-fraction) stx-liquid-supply))) +) + +;; What's the reward cycle number of the burnchain block height? +;; Will runtime-abort if height is less than the first burnchain block (this is intentional) +(define-read-only (burn-height-to-reward-cycle (height uint)) + (/ (- height (var-get first-burnchain-block-height)) (var-get pox-reward-cycle-length))) + +;; What's the block height at the start of a given reward cycle? +(define-read-only (reward-cycle-to-burn-height (cycle uint)) + (+ (var-get first-burnchain-block-height) (* cycle (var-get pox-reward-cycle-length)))) + +;; What's the current PoX reward cycle? +(define-read-only (current-pox-reward-cycle) + (burn-height-to-reward-cycle burn-block-height)) + +;; Get the _current_ PoX stacking principal information. If the information +;; is expired, or if there's never been such a stacker, then returns none. +(define-read-only (get-stacker-info (stacker principal)) + (match (map-get? stacking-state { stacker: stacker }) + stacking-info + (if (<= (+ (get first-reward-cycle stacking-info) (get lock-period stacking-info)) (current-pox-reward-cycle)) + ;; present, but lock has expired + none + ;; present, and lock has not expired + (some stacking-info) + ) + ;; no state at all + none + )) + +(define-read-only (check-caller-allowed) + (or (is-eq tx-sender contract-caller) + (let ((caller-allowed + ;; if not in the caller map, return false + (unwrap! (map-get? allowance-contract-callers + { sender: tx-sender, contract-caller: contract-caller }) + false)) + (expires-at + ;; if until-burn-ht not set, then return true (because no expiry) + (unwrap! (get until-burn-ht caller-allowed) true))) + ;; is the caller allowance expired? + (if (>= burn-block-height expires-at) + false + true)))) + +(define-read-only (get-check-delegation (stacker principal)) + (let ((delegation-info (try! (map-get? delegation-state { stacker: stacker })))) + ;; did the existing delegation expire? + (if (match (get until-burn-ht delegation-info) + until-burn-ht (> burn-block-height until-burn-ht) + false) + ;; it expired, return none + none + ;; delegation is active + (some delegation-info)))) + +;; Get the size of the reward set for a reward cycle. +;; Note that this does _not_ return duplicate PoX addresses. +;; Note that this also _will_ return PoX addresses that are beneath +;; the minimum threshold -- i.e. the threshold can increase after insertion. +;; Used internally by the Stacks node, which filters out the entries +;; in this map to select PoX addresses with enough STX. +(define-read-only (get-reward-set-size (reward-cycle uint)) + (default-to + u0 + (get len (map-get? reward-cycle-pox-address-list-len { reward-cycle: reward-cycle })))) + +;; How many rejection votes have we been accumulating for the next block +(define-read-only (next-cycle-rejection-votes) + (default-to + u0 + (get amount (map-get? stacking-rejection { reward-cycle: (+ u1 (current-pox-reward-cycle)) })))) + +;; Add a single PoX address to a single reward cycle. +;; Used to build up a set of per-reward-cycle PoX addresses. +;; No checking will be done -- don't call if this PoX address is already registered in this reward cycle! +;; Returns the index into the reward cycle that the PoX address is stored to +(define-private (append-reward-cycle-pox-addr (pox-addr (tuple (version (buff 1)) (hashbytes (buff 32)))) + (reward-cycle uint) + (amount-ustx uint) + (stacker (optional principal))) + (let ((sz (get-reward-set-size reward-cycle))) + (map-set reward-cycle-pox-address-list + { reward-cycle: reward-cycle, index: sz } + { pox-addr: pox-addr, total-ustx: amount-ustx, stacker: stacker }) + (map-set reward-cycle-pox-address-list-len + { reward-cycle: reward-cycle } + { len: (+ u1 sz) }) + sz)) + +;; How many uSTX are stacked? +(define-read-only (get-total-ustx-stacked (reward-cycle uint)) + (default-to + u0 + (get total-ustx (map-get? reward-cycle-total-stacked { reward-cycle: reward-cycle }))) +) + +;; Called internally by the node to iterate through the list of PoX addresses in this reward cycle. +;; Returns (optional (tuple (pox-addr ) (total-ustx ))) +(define-read-only (get-reward-set-pox-address (reward-cycle uint) (index uint)) + (map-get? reward-cycle-pox-address-list { reward-cycle: reward-cycle, index: index })) + +(define-private (fold-unlock-reward-cycle (set-index uint) + (data-res (response { cycle: uint, + first-unlocked-cycle: uint, + stacker: principal + } int))) + (let ((data (try! data-res)) + (cycle (get cycle data)) + (first-unlocked-cycle (get first-unlocked-cycle data))) + ;; if current-cycle hasn't reached first-unlocked-cycle, just continue to next iter + (asserts! (>= cycle first-unlocked-cycle) (ok (merge data { cycle: (+ u1 cycle) }))) + (let ((cycle-entry (unwrap-panic (map-get? reward-cycle-pox-address-list { reward-cycle: cycle, index: set-index }))) + (cycle-entry-u (get stacker cycle-entry)) + (cycle-entry-total-ustx (get total-ustx cycle-entry)) + (cycle-last-entry-ix (- (get len (unwrap-panic (map-get? reward-cycle-pox-address-list-len { reward-cycle: cycle }))) u1))) + (asserts! (is-eq cycle-entry-u (some (get stacker data))) (err ERR_STACKING_CORRUPTED_STATE)) + (if (not (is-eq cycle-last-entry-ix set-index)) + ;; do a "move" if the entry to remove isn't last + (let ((move-entry (unwrap-panic (map-get? reward-cycle-pox-address-list { reward-cycle: cycle, index: cycle-last-entry-ix })))) + (map-set reward-cycle-pox-address-list + { reward-cycle: cycle, index: set-index } + move-entry) + (match (get stacker move-entry) moved-stacker + ;; if the moved entry had an associated stacker, update its state + (let ((moved-state (unwrap-panic (map-get? stacking-state { stacker: moved-stacker }))) + ;; calculate the index into the reward-set-indexes that `cycle` is at + (moved-cycle-index (- cycle (get first-reward-cycle moved-state))) + (moved-reward-list (get reward-set-indexes moved-state)) + ;; reward-set-indexes[moved-cycle-index] = set-index via slice?, append, concat. + (update-list (unwrap-panic (replace-at? moved-reward-list moved-cycle-index set-index)))) + (map-set stacking-state { stacker: moved-stacker } + (merge moved-state { reward-set-indexes: update-list }))) + ;; otherwise, we don't need to update stacking-state after move + true)) + ;; if not moving, just noop + true) + ;; in all cases, we now need to delete the last list entry + (map-delete reward-cycle-pox-address-list { reward-cycle: cycle, index: cycle-last-entry-ix }) + (map-set reward-cycle-pox-address-list-len { reward-cycle: cycle } { len: cycle-last-entry-ix }) + ;; finally, update `reward-cycle-total-stacked` + (map-set reward-cycle-total-stacked { reward-cycle: cycle } + { total-ustx: (- (get total-ustx (unwrap-panic (map-get? reward-cycle-total-stacked { reward-cycle: cycle }))) + cycle-entry-total-ustx) }) + (ok (merge data { cycle: (+ u1 cycle)} ))))) + +;; This method is called by the Stacks block processor directly in order to handle the contract state mutations +;; associated with an early unlock. This can only be invoked by the block processor: it is private, and no methods +;; from this contract invoke it. +(define-private (handle-unlock (user principal) (amount-locked uint) (cycle-to-unlock uint)) + (let ((user-stacking-state (unwrap-panic (map-get? stacking-state { stacker: user }))) + (first-cycle-locked (get first-reward-cycle user-stacking-state)) + (reward-set-indexes (get reward-set-indexes user-stacking-state))) + ;; iterate over each reward set the user is a member of, and remove them from the sets. only apply to reward sets after cycle-to-unlock. + (try! (fold fold-unlock-reward-cycle reward-set-indexes (ok { cycle: first-cycle-locked, first-unlocked-cycle: cycle-to-unlock, stacker: user }))) + ;; Now that we've cleaned up all the reward set entries for the user, delete the user's stacking-state + (map-delete stacking-state { stacker: user }) + (ok true))) + +;; Add a PoX address to the `cycle-index`-th reward cycle, if `cycle-index` is between 0 and the given num-cycles (exclusive). +;; Arguments are given as a tuple, so this function can be (folded ..)'ed onto a list of its arguments. +;; Used by add-pox-addr-to-reward-cycles. +;; No checking is done. +;; The returned tuple is the same as inputted `params`, but the `i` field is incremented if +;; the pox-addr was added to the given cycle. Also, `reward-set-indexes` grows to include all +;; of the `reward-cycle-index` key parts of the `reward-cycle-pox-address-list` which get added by this function. +;; This way, the caller knows which items in a given reward cycle's PoX address list got updated. +(define-private (add-pox-addr-to-ith-reward-cycle (cycle-index uint) (params (tuple + (pox-addr (tuple (version (buff 1)) (hashbytes (buff 32)))) + (reward-set-indexes (list 12 uint)) + (first-reward-cycle uint) + (num-cycles uint) + (stacker (optional principal)) + (amount-ustx uint) + (i uint)))) + (let ((reward-cycle (+ (get first-reward-cycle params) (get i params))) + (num-cycles (get num-cycles params)) + (i (get i params)) + (reward-set-index (if (< i num-cycles) + (let ((total-ustx (get-total-ustx-stacked reward-cycle)) + (reward-index + ;; record how many uSTX this pox-addr will stack for in the given reward cycle + (append-reward-cycle-pox-addr + (get pox-addr params) + reward-cycle + (get amount-ustx params) + (get stacker params) + ))) + ;; update running total + (map-set reward-cycle-total-stacked + { reward-cycle: reward-cycle } + { total-ustx: (+ (get amount-ustx params) total-ustx) }) + (some reward-index)) + none)) + (next-i (if (< i num-cycles) (+ i u1) i))) + { + pox-addr: (get pox-addr params), + first-reward-cycle: (get first-reward-cycle params), + num-cycles: num-cycles, + amount-ustx: (get amount-ustx params), + stacker: (get stacker params), + reward-set-indexes: (match + reward-set-index new (unwrap-panic (as-max-len? (append (get reward-set-indexes params) new) u12)) + (get reward-set-indexes params)), + i: next-i + })) + +;; Add a PoX address to a given sequence of reward cycle lists. +;; A PoX address can be added to at most 12 consecutive cycles. +;; No checking is done. +(define-private (add-pox-addr-to-reward-cycles (pox-addr (tuple (version (buff 1)) (hashbytes (buff 32)))) + (first-reward-cycle uint) + (num-cycles uint) + (amount-ustx uint) + (stacker principal)) + (let ((cycle-indexes (list u0 u1 u2 u3 u4 u5 u6 u7 u8 u9 u10 u11)) + (results (fold add-pox-addr-to-ith-reward-cycle cycle-indexes + { pox-addr: pox-addr, first-reward-cycle: first-reward-cycle, num-cycles: num-cycles, + reward-set-indexes: (list), amount-ustx: amount-ustx, i: u0, stacker: (some stacker) })) + (reward-set-indexes (get reward-set-indexes results))) + ;; For safety, add up the number of times (add-principal-to-ith-reward-cycle) returns 1. + ;; It _should_ be equal to num-cycles. + (asserts! (is-eq num-cycles (get i results)) (err ERR_STACKING_UNREACHABLE)) + (asserts! (is-eq num-cycles (len reward-set-indexes)) (err ERR_STACKING_UNREACHABLE)) + (ok reward-set-indexes))) + +(define-private (add-pox-partial-stacked-to-ith-cycle + (cycle-index uint) + (params { pox-addr: { version: (buff 1), hashbytes: (buff 32) }, + reward-cycle: uint, + num-cycles: uint, + amount-ustx: uint })) + (let ((pox-addr (get pox-addr params)) + (num-cycles (get num-cycles params)) + (reward-cycle (get reward-cycle params)) + (amount-ustx (get amount-ustx params))) + (let ((current-amount + (default-to u0 + (get stacked-amount + (map-get? partial-stacked-by-cycle { sender: tx-sender, pox-addr: pox-addr, reward-cycle: reward-cycle }))))) + (if (>= cycle-index num-cycles) + ;; do not add to cycles >= cycle-index + false + ;; otherwise, add to the partial-stacked-by-cycle + (map-set partial-stacked-by-cycle + { sender: tx-sender, pox-addr: pox-addr, reward-cycle: reward-cycle } + { stacked-amount: (+ amount-ustx current-amount) })) + ;; produce the next params tuple + { pox-addr: pox-addr, + reward-cycle: (+ u1 reward-cycle), + num-cycles: num-cycles, + amount-ustx: amount-ustx }))) + +;; Add a PoX address to a given sequence of partial reward cycle lists. +;; A PoX address can be added to at most 12 consecutive cycles. +;; No checking is done. +(define-private (add-pox-partial-stacked (pox-addr (tuple (version (buff 1)) (hashbytes (buff 32)))) + (first-reward-cycle uint) + (num-cycles uint) + (amount-ustx uint)) + (let ((cycle-indexes (list u0 u1 u2 u3 u4 u5 u6 u7 u8 u9 u10 u11))) + (fold add-pox-partial-stacked-to-ith-cycle cycle-indexes + { pox-addr: pox-addr, reward-cycle: first-reward-cycle, num-cycles: num-cycles, amount-ustx: amount-ustx }) + true)) + +;; What is the minimum number of uSTX to be stacked in the given reward cycle? +;; Used internally by the Stacks node, and visible publicly. +(define-read-only (get-stacking-minimum) + (/ stx-liquid-supply STACKING_THRESHOLD_25)) + +;; Is the address mode valid for a PoX address? +(define-read-only (check-pox-addr-version (version (buff 1))) + (<= (buff-to-uint-be version) MAX_ADDRESS_VERSION)) + +;; Is this buffer the right length for the given PoX address? +(define-read-only (check-pox-addr-hashbytes (version (buff 1)) (hashbytes (buff 32))) + (if (<= (buff-to-uint-be version) MAX_ADDRESS_VERSION_BUFF_20) + (is-eq (len hashbytes) u20) + (if (<= (buff-to-uint-be version) MAX_ADDRESS_VERSION_BUFF_32) + (is-eq (len hashbytes) u32) + false))) + +;; Is the given lock period valid? +(define-read-only (check-pox-lock-period (lock-period uint)) + (and (>= lock-period MIN_POX_REWARD_CYCLES) + (<= lock-period MAX_POX_REWARD_CYCLES))) + +;; Evaluate if a participant can stack an amount of STX for a given period. +;; This method is designed as a read-only method so that it can be used as +;; a set of guard conditions and also as a read-only RPC call that can be +;; performed beforehand. +(define-read-only (can-stack-stx (pox-addr (tuple (version (buff 1)) (hashbytes (buff 32)))) + (amount-ustx uint) + (first-reward-cycle uint) + (num-cycles uint)) + (begin + ;; minimum uSTX must be met + (asserts! (<= (get-stacking-minimum) amount-ustx) + (err ERR_STACKING_THRESHOLD_NOT_MET)) + + (minimal-can-stack-stx pox-addr amount-ustx first-reward-cycle num-cycles))) + +;; Evaluate if a participant can stack an amount of STX for a given period. +;; This method is designed as a read-only method so that it can be used as +;; a set of guard conditions and also as a read-only RPC call that can be +;; performed beforehand. +(define-read-only (minimal-can-stack-stx + (pox-addr (tuple (version (buff 1)) (hashbytes (buff 32)))) + (amount-ustx uint) + (first-reward-cycle uint) + (num-cycles uint)) + (begin + ;; amount must be valid + (asserts! (> amount-ustx u0) + (err ERR_STACKING_INVALID_AMOUNT)) + + ;; sender principal must not have rejected in this upcoming reward cycle + (asserts! (is-none (get-pox-rejection tx-sender first-reward-cycle)) + (err ERR_STACKING_ALREADY_REJECTED)) + + ;; lock period must be in acceptable range. + (asserts! (check-pox-lock-period num-cycles) + (err ERR_STACKING_INVALID_LOCK_PERIOD)) + + ;; address version must be valid + (asserts! (check-pox-addr-version (get version pox-addr)) + (err ERR_STACKING_INVALID_POX_ADDRESS)) + + ;; address hashbytes must be valid for the version + (asserts! (check-pox-addr-hashbytes (get version pox-addr) (get hashbytes pox-addr)) + (err ERR_STACKING_INVALID_POX_ADDRESS)) + + (ok true))) + +;; Revoke contract-caller authorization to call stacking methods +(define-public (disallow-contract-caller (caller principal)) + (begin + (asserts! (is-eq tx-sender contract-caller) + (err ERR_STACKING_PERMISSION_DENIED)) + (ok (map-delete allowance-contract-callers { sender: tx-sender, contract-caller: caller })))) + +;; Give a contract-caller authorization to call stacking methods +;; normally, stacking methods may only be invoked by _direct_ transactions +;; (i.e., the tx-sender issues a direct contract-call to the stacking methods) +;; by issuing an allowance, the tx-sender may call through the allowed contract +(define-public (allow-contract-caller (caller principal) (until-burn-ht (optional uint))) + (begin + (asserts! (is-eq tx-sender contract-caller) + (err ERR_STACKING_PERMISSION_DENIED)) + (ok (map-set allowance-contract-callers + { sender: tx-sender, contract-caller: caller } + { until-burn-ht: until-burn-ht })))) + +;; Lock up some uSTX for stacking! Note that the given amount here is in micro-STX (uSTX). +;; The STX will be locked for the given number of reward cycles (lock-period). +;; This is the self-service interface. tx-sender will be the Stacker. +;; +;; * The given stacker cannot currently be stacking. +;; * You will need the minimum uSTX threshold. This will be determined by (get-stacking-minimum) +;; at the time this method is called. +;; * You may need to increase the amount of uSTX locked up later, since the minimum uSTX threshold +;; may increase between reward cycles. +;; * The Stacker will receive rewards in the reward cycle following `start-burn-ht`. +;; Importantly, `start-burn-ht` may not be further into the future than the next reward cycle, +;; and in most cases should be set to the current burn block height. +;; +;; The tokens will unlock and be returned to the Stacker (tx-sender) automatically. +(define-public (stack-stx (amount-ustx uint) + (pox-addr (tuple (version (buff 1)) (hashbytes (buff 32)))) + (start-burn-ht uint) + (lock-period uint)) + ;; this stacker's first reward cycle is the _next_ reward cycle + (let ((first-reward-cycle (+ u1 (current-pox-reward-cycle))) + (specified-reward-cycle (+ u1 (burn-height-to-reward-cycle start-burn-ht)))) + ;; the start-burn-ht must result in the next reward cycle, do not allow stackers + ;; to "post-date" their `stack-stx` transaction + (asserts! (is-eq first-reward-cycle specified-reward-cycle) + (err ERR_INVALID_START_BURN_HEIGHT)) + + ;; must be called directly by the tx-sender or by an allowed contract-caller + (asserts! (check-caller-allowed) + (err ERR_STACKING_PERMISSION_DENIED)) + + ;; tx-sender principal must not be stacking + (asserts! (is-none (get-stacker-info tx-sender)) + (err ERR_STACKING_ALREADY_STACKED)) + + ;; tx-sender must not be delegating + (asserts! (is-none (get-check-delegation tx-sender)) + (err ERR_STACKING_ALREADY_DELEGATED)) + + ;; the Stacker must have sufficient unlocked funds + (asserts! (>= (stx-get-balance tx-sender) amount-ustx) + (err ERR_STACKING_INSUFFICIENT_FUNDS)) + + ;; ensure that stacking can be performed + (try! (can-stack-stx pox-addr amount-ustx first-reward-cycle lock-period)) + + ;; register the PoX address with the amount stacked + (let ((reward-set-indexes (try! (add-pox-addr-to-reward-cycles pox-addr first-reward-cycle lock-period amount-ustx tx-sender)))) + ;; add stacker record + (map-set stacking-state + { stacker: tx-sender } + { pox-addr: pox-addr, + reward-set-indexes: reward-set-indexes, + first-reward-cycle: first-reward-cycle, + lock-period: lock-period, + delegated-to: none }) + + ;; return the lock-up information, so the node can actually carry out the lock. + (ok { stacker: tx-sender, lock-amount: amount-ustx, unlock-burn-height: (reward-cycle-to-burn-height (+ first-reward-cycle lock-period)) })))) + +(define-public (revoke-delegate-stx) + (begin + ;; must be called directly by the tx-sender or by an allowed contract-caller + (asserts! (check-caller-allowed) + (err ERR_STACKING_PERMISSION_DENIED)) + (ok (map-delete delegation-state { stacker: tx-sender })))) + +;; Delegate to `delegate-to` the ability to stack from a given address. +;; This method _does not_ lock the funds, rather, it allows the delegate +;; to issue the stacking lock. +;; The caller specifies: +;; * amount-ustx: the total amount of ustx the delegate may be allowed to lock +;; * until-burn-ht: an optional burn height at which this delegation expires +;; * pox-addr: an optional address to which any rewards *must* be sent +(define-public (delegate-stx (amount-ustx uint) + (delegate-to principal) + (until-burn-ht (optional uint)) + (pox-addr (optional { version: (buff 1), + hashbytes: (buff 32) }))) + (begin + ;; must be called directly by the tx-sender or by an allowed contract-caller + (asserts! (check-caller-allowed) + (err ERR_STACKING_PERMISSION_DENIED)) + + ;; delegate-stx no longer requires the delegator to not currently + ;; be stacking. + ;; delegate-stack-* functions assert that + ;; 1. users can't swim in two pools at the same time. + ;; 2. users can't switch pools without cool down cycle. + ;; Other pool admins can't increase or extend. + ;; 3. users can't join a pool while already directly stacking. + + ;; pox-addr, if given, must be valid + (match pox-addr + address + (asserts! (check-pox-addr-version (get version address)) + (err ERR_STACKING_INVALID_POX_ADDRESS)) + true) + + ;; tx-sender must not be delegating + (asserts! (is-none (get-check-delegation tx-sender)) + (err ERR_STACKING_ALREADY_DELEGATED)) + + ;; add delegation record + (map-set delegation-state + { stacker: tx-sender } + { amount-ustx: amount-ustx, + delegated-to: delegate-to, + until-burn-ht: until-burn-ht, + pox-addr: pox-addr }) + + (ok true))) + +;; Commit partially stacked STX and allocate a new PoX reward address slot. +;; This allows a stacker/delegate to lock fewer STX than the minimal threshold in multiple transactions, +;; so long as: 1. The pox-addr is the same. +;; 2. This "commit" transaction is called _before_ the PoX anchor block. +;; This ensures that each entry in the reward set returned to the stacks-node is greater than the threshold, +;; but does not require it be all locked up within a single transaction +;; +;; Returns (ok uint) on success, where the given uint is the reward address's index in the list of reward +;; addresses allocated in this reward cycle. This index can then be passed to `stack-aggregation-increase` +;; to later increment the STX this PoX address represents, in amounts less than the stacking minimum. +;; +;; *New in Stacks 2.1.* +(define-private (inner-stack-aggregation-commit (pox-addr { version: (buff 1), hashbytes: (buff 32) }) + (reward-cycle uint)) + (let ((partial-stacked + ;; fetch the partial commitments + (unwrap! (map-get? partial-stacked-by-cycle { pox-addr: pox-addr, sender: tx-sender, reward-cycle: reward-cycle }) + (err ERR_STACKING_NO_SUCH_PRINCIPAL)))) + ;; must be called directly by the tx-sender or by an allowed contract-caller + (asserts! (check-caller-allowed) + (err ERR_STACKING_PERMISSION_DENIED)) + (let ((amount-ustx (get stacked-amount partial-stacked))) + (try! (can-stack-stx pox-addr amount-ustx reward-cycle u1)) + ;; Add the pox addr to the reward cycle, and extract the index of the PoX address + ;; so the delegator can later use it to call stack-aggregation-increase. + (let ((add-pox-addr-info + (add-pox-addr-to-ith-reward-cycle + u0 + { pox-addr: pox-addr, + first-reward-cycle: reward-cycle, + num-cycles: u1, + reward-set-indexes: (list), + stacker: none, + amount-ustx: amount-ustx, + i: u0 })) + (pox-addr-index (unwrap-panic + (element-at (get reward-set-indexes add-pox-addr-info) u0)))) + + ;; don't update the stacking-state map, + ;; because it _already has_ this stacker's state + ;; don't lock the STX, because the STX is already locked + ;; + ;; clear the partial-stacked state, and log it + (map-delete partial-stacked-by-cycle { pox-addr: pox-addr, sender: tx-sender, reward-cycle: reward-cycle }) + (map-set logged-partial-stacked-by-cycle { pox-addr: pox-addr, sender: tx-sender, reward-cycle: reward-cycle } partial-stacked) + (ok pox-addr-index))))) + +;; Legacy interface for stack-aggregation-commit. +;; Wraps inner-stack-aggregation-commit. See its docstring for details. +;; Returns (ok true) on success +;; Returns (err ...) on failure. +(define-public (stack-aggregation-commit (pox-addr { version: (buff 1), hashbytes: (buff 32) }) + (reward-cycle uint)) + (match (inner-stack-aggregation-commit pox-addr reward-cycle) + pox-addr-index (ok true) + commit-err (err commit-err))) + +;; Public interface to `inner-stack-aggregation-commit`. See its documentation for details. +;; *New in Stacks 2.1.* +(define-public (stack-aggregation-commit-indexed (pox-addr { version: (buff 1), hashbytes: (buff 32) }) + (reward-cycle uint)) + (inner-stack-aggregation-commit pox-addr reward-cycle)) + +;; Commit partially stacked STX to a PoX address which has already received some STX (more than the Stacking min). +;; This allows a delegator to lock up marginally more STX from new delegates, even if they collectively do not +;; exceed the Stacking minimum, so long as the target PoX address already represents at least as many STX as the +;; Stacking minimum. +;; +;; The `reward-cycle-index` is emitted as a contract event from `stack-aggregation-commit` when the initial STX are +;; locked up by this delegator. It must be passed here to add more STX behind this PoX address. If the delegator +;; called `stack-aggregation-commit` multiple times for the same PoX address, then any such `reward-cycle-index` will +;; work here. +;; +;; *New in Stacks 2.1* +;; +(define-public (stack-aggregation-increase (pox-addr { version: (buff 1), hashbytes: (buff 32) }) + (reward-cycle uint) + (reward-cycle-index uint)) + (let ((partial-stacked + ;; fetch the partial commitments + (unwrap! (map-get? partial-stacked-by-cycle { pox-addr: pox-addr, sender: tx-sender, reward-cycle: reward-cycle }) + (err ERR_STACKING_NO_SUCH_PRINCIPAL)))) + + ;; must be called directly by the tx-sender or by an allowed contract-caller + (asserts! (check-caller-allowed) + (err ERR_STACKING_PERMISSION_DENIED)) + + ;; reward-cycle must be in the future + (asserts! (> reward-cycle (current-pox-reward-cycle)) + (err ERR_STACKING_INVALID_LOCK_PERIOD)) + + (let ((amount-ustx (get stacked-amount partial-stacked)) + ;; reward-cycle must point to an existing record in reward-cycle-total-stacked + ;; infallible; getting something from partial-stacked-by-cycle succeeded so this must succeed + (existing-total (unwrap-panic (map-get? reward-cycle-total-stacked { reward-cycle: reward-cycle }))) + ;; reward-cycle and reward-cycle-index must point to an existing record in reward-cycle-pox-address-list + (existing-entry (unwrap! (map-get? reward-cycle-pox-address-list { reward-cycle: reward-cycle, index: reward-cycle-index }) + (err ERR_DELEGATION_NO_REWARD_SLOT))) + (increased-ustx (+ (get total-ustx existing-entry) amount-ustx)) + (total-ustx (+ (get total-ustx existing-total) amount-ustx))) + + ;; must be stackable + (try! (minimal-can-stack-stx pox-addr total-ustx reward-cycle u1)) + + ;; new total must exceed the stacking minimum + (asserts! (<= (get-stacking-minimum) total-ustx) + (err ERR_STACKING_THRESHOLD_NOT_MET)) + + ;; there must *not* be a stacker entry (since this is a delegator) + (asserts! (is-none (get stacker existing-entry)) + (err ERR_DELEGATION_WRONG_REWARD_SLOT)) + + ;; the given PoX address must match the one on record + (asserts! (is-eq pox-addr (get pox-addr existing-entry)) + (err ERR_DELEGATION_WRONG_REWARD_SLOT)) + + ;; update the pox-address list -- bump the total-ustx + (map-set reward-cycle-pox-address-list + { reward-cycle: reward-cycle, index: reward-cycle-index } + { pox-addr: pox-addr, + total-ustx: increased-ustx, + stacker: none }) + + ;; update the total ustx in this cycle + (map-set reward-cycle-total-stacked + { reward-cycle: reward-cycle } + { total-ustx: total-ustx }) + + ;; don't update the stacking-state map, + ;; because it _already has_ this stacker's state + ;; don't lock the STX, because the STX is already locked + ;; + ;; clear the partial-stacked state, and log it + (map-delete partial-stacked-by-cycle { pox-addr: pox-addr, sender: tx-sender, reward-cycle: reward-cycle }) + (map-set logged-partial-stacked-by-cycle { pox-addr: pox-addr, sender: tx-sender, reward-cycle: reward-cycle } partial-stacked) + (ok true)))) + +;; As a delegate, stack the given principal's STX using partial-stacked-by-cycle +;; Once the delegate has stacked > minimum, the delegate should call stack-aggregation-commit +(define-public (delegate-stack-stx (stacker principal) + (amount-ustx uint) + (pox-addr { version: (buff 1), hashbytes: (buff 32) }) + (start-burn-ht uint) + (lock-period uint)) + ;; this stacker's first reward cycle is the _next_ reward cycle + (let ((first-reward-cycle (+ u1 (current-pox-reward-cycle))) + (specified-reward-cycle (+ u1 (burn-height-to-reward-cycle start-burn-ht))) + (unlock-burn-height (reward-cycle-to-burn-height (+ (current-pox-reward-cycle) u1 lock-period)))) + ;; the start-burn-ht must result in the next reward cycle, do not allow stackers + ;; to "post-date" their `stack-stx` transaction + (asserts! (is-eq first-reward-cycle specified-reward-cycle) + (err ERR_INVALID_START_BURN_HEIGHT)) + + ;; must be called directly by the tx-sender or by an allowed contract-caller + (asserts! (check-caller-allowed) + (err ERR_STACKING_PERMISSION_DENIED)) + + ;; stacker must have delegated to the caller + (let ((delegation-info (unwrap! (get-check-delegation stacker) (err ERR_STACKING_PERMISSION_DENIED)))) + ;; must have delegated to tx-sender + (asserts! (is-eq (get delegated-to delegation-info) tx-sender) + (err ERR_STACKING_PERMISSION_DENIED)) + ;; must have delegated enough stx + (asserts! (>= (get amount-ustx delegation-info) amount-ustx) + (err ERR_DELEGATION_TOO_MUCH_LOCKED)) + ;; if pox-addr is set, must be equal to pox-addr + (asserts! (match (get pox-addr delegation-info) + specified-pox-addr (is-eq pox-addr specified-pox-addr) + true) + (err ERR_DELEGATION_POX_ADDR_REQUIRED)) + ;; delegation must not expire before lock period + (asserts! (match (get until-burn-ht delegation-info) + until-burn-ht (>= until-burn-ht + unlock-burn-height) + true) + (err ERR_DELEGATION_EXPIRES_DURING_LOCK))) + + ;; stacker principal must not be stacking + (asserts! (is-none (get-stacker-info stacker)) + (err ERR_STACKING_ALREADY_STACKED)) + + ;; the Stacker must have sufficient unlocked funds + (asserts! (>= (stx-get-balance stacker) amount-ustx) + (err ERR_STACKING_INSUFFICIENT_FUNDS)) + + ;; ensure that stacking can be performed + (try! (minimal-can-stack-stx pox-addr amount-ustx first-reward-cycle lock-period)) + + ;; register the PoX address with the amount stacked via partial stacking + ;; before it can be included in the reward set, this must be committed! + (add-pox-partial-stacked pox-addr first-reward-cycle lock-period amount-ustx) + + ;; add stacker record + (map-set stacking-state + { stacker: stacker } + { pox-addr: pox-addr, + first-reward-cycle: first-reward-cycle, + reward-set-indexes: (list), + lock-period: lock-period, + delegated-to: (some tx-sender) }) + + ;; return the lock-up information, so the node can actually carry out the lock. + (ok { stacker: stacker, + lock-amount: amount-ustx, + unlock-burn-height: unlock-burn-height }))) + +;; Reject Stacking for this reward cycle. +;; tx-sender votes all its uSTX for rejection. +;; Note that unlike PoX, rejecting PoX does not lock the tx-sender's +;; tokens. PoX rejection acts like a coin vote. +(define-public (reject-pox) + (let ( + (balance (stx-get-balance tx-sender)) + (vote-reward-cycle (+ u1 (current-pox-reward-cycle))) + ) + + ;; tx-sender principal must not have rejected in this upcoming reward cycle + (asserts! (is-none (get-pox-rejection tx-sender vote-reward-cycle)) + (err ERR_STACKING_ALREADY_REJECTED)) + + ;; tx-sender can't be a stacker + (asserts! (is-none (get-stacker-info tx-sender)) + (err ERR_STACKING_ALREADY_STACKED)) + + ;; vote for rejection + (map-set stacking-rejection + { reward-cycle: vote-reward-cycle } + { amount: (+ (next-cycle-rejection-votes) balance) } + ) + + ;; mark voted + (map-set stacking-rejectors + { stacker: tx-sender, reward-cycle: vote-reward-cycle } + { amount: balance } + ) + + (ok true)) +) + +;; Used for PoX parameters discovery +(define-read-only (get-pox-info) + (ok { + min-amount-ustx: (get-stacking-minimum), + reward-cycle-id: (current-pox-reward-cycle), + prepare-cycle-length: (var-get pox-prepare-cycle-length), + first-burnchain-block-height: (var-get first-burnchain-block-height), + reward-cycle-length: (var-get pox-reward-cycle-length), + rejection-fraction: (var-get pox-rejection-fraction), + current-rejection-votes: (next-cycle-rejection-votes), + total-liquid-supply-ustx: stx-liquid-supply, + }) +) + +;; Update the number of stacked STX in a given reward cycle entry. +;; `reward-cycle-index` is the index into the `reward-cycle-pox-address-list` map for a given reward cycle number. +;; `updates`, if `(some ..)`, encodes which PoX reward cycle entry (if any) gets updated. In particular, it must have +;; `(some stacker)` as the listed stacker, and must be an upcoming reward cycle. +(define-private (increase-reward-cycle-entry + (reward-cycle-index uint) + (updates (optional { first-cycle: uint, reward-cycle: uint, stacker: principal, add-amount: uint }))) + (let ((data (try! updates)) + (first-cycle (get first-cycle data)) + (reward-cycle (get reward-cycle data))) + (if (> first-cycle reward-cycle) + ;; not at first cycle to process yet + (some { first-cycle: first-cycle, reward-cycle: (+ u1 reward-cycle), stacker: (get stacker data), add-amount: (get add-amount data) }) + (let ((existing-entry (unwrap-panic (map-get? reward-cycle-pox-address-list { reward-cycle: reward-cycle, index: reward-cycle-index }))) + (existing-total (unwrap-panic (map-get? reward-cycle-total-stacked { reward-cycle: reward-cycle }))) + (add-amount (get add-amount data)) + (total-ustx (+ (get total-ustx existing-total) add-amount))) + ;; stacker must match + (asserts! (is-eq (get stacker existing-entry) (some (get stacker data))) none) + ;; update the pox-address list + (map-set reward-cycle-pox-address-list + { reward-cycle: reward-cycle, index: reward-cycle-index } + { pox-addr: (get pox-addr existing-entry), + ;; This addresses the bug in pox-2 (see SIP-022) + total-ustx: (+ (get total-ustx existing-entry) add-amount), + stacker: (some (get stacker data)) }) + ;; update the total + (map-set reward-cycle-total-stacked + { reward-cycle: reward-cycle } + { total-ustx: total-ustx }) + (some { first-cycle: first-cycle, + reward-cycle: (+ u1 reward-cycle), + stacker: (get stacker data), + add-amount: (get add-amount data) }))))) + +;; Increase the number of STX locked. +;; *New in Stacks 2.1* +;; This method locks up an additional amount of STX from `tx-sender`'s, indicated +;; by `increase-by`. The `tx-sender` must already be Stacking. +(define-public (stack-increase (increase-by uint)) + (let ((stacker-info (stx-account tx-sender)) + (amount-stacked (get locked stacker-info)) + (amount-unlocked (get unlocked stacker-info)) + (unlock-height (get unlock-height stacker-info)) + (cur-cycle (current-pox-reward-cycle)) + (first-increased-cycle (+ cur-cycle u1)) + (stacker-state (unwrap! (map-get? stacking-state + { stacker: tx-sender }) + (err ERR_STACK_INCREASE_NOT_LOCKED)))) + ;; tx-sender must be currently locked + (asserts! (> amount-stacked u0) + (err ERR_STACK_INCREASE_NOT_LOCKED)) + ;; must be called with positive `increase-by` + (asserts! (>= increase-by u1) + (err ERR_STACKING_INVALID_AMOUNT)) + ;; stacker must have enough stx to lock + (asserts! (>= amount-unlocked increase-by) + (err ERR_STACKING_INSUFFICIENT_FUNDS)) + ;; must be called directly by the tx-sender or by an allowed contract-caller + (asserts! (check-caller-allowed) + (err ERR_STACKING_PERMISSION_DENIED)) + ;; stacker must be directly stacking + (asserts! (> (len (get reward-set-indexes stacker-state)) u0) + (err ERR_STACKING_IS_DELEGATED)) + ;; stacker must not be delegating + (asserts! (is-none (get delegated-to stacker-state)) + (err ERR_STACKING_IS_DELEGATED)) + ;; update reward cycle amounts + (asserts! (is-some (fold increase-reward-cycle-entry + (get reward-set-indexes stacker-state) + (some { first-cycle: first-increased-cycle, + reward-cycle: (get first-reward-cycle stacker-state), + stacker: tx-sender, + add-amount: increase-by }))) + (err ERR_STACKING_UNREACHABLE)) + ;; NOTE: stacking-state map is unchanged: it does not track amount-stacked in PoX-3 + (ok { stacker: tx-sender, total-locked: (+ amount-stacked increase-by)}))) + +;; Extend an active Stacking lock. +;; *New in Stacks 2.1* +;; This method extends the `tx-sender`'s current lockup for an additional `extend-count` +;; and associates `pox-addr` with the rewards +(define-public (stack-extend (extend-count uint) + (pox-addr { version: (buff 1), hashbytes: (buff 32) })) + (let ((stacker-info (stx-account tx-sender)) + ;; to extend, there must already be an etry in the stacking-state + (stacker-state (unwrap! (get-stacker-info tx-sender) (err ERR_STACK_EXTEND_NOT_LOCKED))) + (amount-ustx (get locked stacker-info)) + (unlock-height (get unlock-height stacker-info)) + (cur-cycle (current-pox-reward-cycle)) + ;; first-extend-cycle will be the cycle in which tx-sender *would have* unlocked + (first-extend-cycle (burn-height-to-reward-cycle unlock-height)) + ;; new first cycle should be max(cur-cycle, stacker-state.first-reward-cycle) + (cur-first-reward-cycle (get first-reward-cycle stacker-state)) + (first-reward-cycle (if (> cur-cycle cur-first-reward-cycle) cur-cycle cur-first-reward-cycle))) + + ;; must be called with positive extend-count + (asserts! (>= extend-count u1) + (err ERR_STACKING_INVALID_LOCK_PERIOD)) + + ;; stacker must be directly stacking + (asserts! (> (len (get reward-set-indexes stacker-state)) u0) + (err ERR_STACKING_IS_DELEGATED)) + + ;; stacker must not be delegating + (asserts! (is-none (get delegated-to stacker-state)) + (err ERR_STACKING_IS_DELEGATED)) + + ;; TODO: add more assertions to sanity check the `stacker-info` values with + ;; the `stacker-state` values + + (let ((last-extend-cycle (- (+ first-extend-cycle extend-count) u1)) + (lock-period (+ u1 (- last-extend-cycle first-reward-cycle))) + (new-unlock-ht (reward-cycle-to-burn-height (+ u1 last-extend-cycle)))) + + ;; first cycle must be after the current cycle + (asserts! (> first-extend-cycle cur-cycle) (err ERR_STACKING_INVALID_LOCK_PERIOD)) + ;; lock period must be positive + (asserts! (> lock-period u0) (err ERR_STACKING_INVALID_LOCK_PERIOD)) + + ;; must be called directly by the tx-sender or by an allowed contract-caller + (asserts! (check-caller-allowed) + (err ERR_STACKING_PERMISSION_DENIED)) + + ;; tx-sender must be locked + (asserts! (> amount-ustx u0) + (err ERR_STACK_EXTEND_NOT_LOCKED)) + + ;; tx-sender must not be delegating + (asserts! (is-none (get-check-delegation tx-sender)) + (err ERR_STACKING_ALREADY_DELEGATED)) + + ;; standard can-stack-stx checks + (try! (can-stack-stx pox-addr amount-ustx first-extend-cycle lock-period)) + + ;; register the PoX address with the amount stacked + ;; for the new cycles + (let ((extended-reward-set-indexes (try! (add-pox-addr-to-reward-cycles pox-addr first-extend-cycle extend-count amount-ustx tx-sender))) + (reward-set-indexes + ;; use the active stacker state and extend the existing reward-set-indexes + (let ((cur-cycle-index (- first-reward-cycle (get first-reward-cycle stacker-state))) + (old-indexes (get reward-set-indexes stacker-state)) + ;; build index list by taking the old-indexes starting from cur cycle + ;; and adding the new indexes to it. this way, the index is valid starting from the current cycle + (new-list (concat (default-to (list) (slice? old-indexes cur-cycle-index (len old-indexes))) + extended-reward-set-indexes))) + (unwrap-panic (as-max-len? new-list u12))))) + ;; update stacker record + (map-set stacking-state + { stacker: tx-sender } + { pox-addr: pox-addr, + reward-set-indexes: reward-set-indexes, + first-reward-cycle: first-reward-cycle, + lock-period: lock-period, + delegated-to: none }) + + ;; return lock-up information + (ok { stacker: tx-sender, unlock-burn-height: new-unlock-ht }))))) + +;; As a delegator, increase an active Stacking lock, issuing a "partial commitment" for the +;; increased cycles. +;; *New in Stacks 2.1* +;; This method increases `stacker`'s current lockup and partially commits the additional +;; STX to `pox-addr` +(define-public (delegate-stack-increase + (stacker principal) + (pox-addr { version: (buff 1), hashbytes: (buff 32) }) + (increase-by uint)) + (let ((stacker-info (stx-account stacker)) + (existing-lock (get locked stacker-info)) + (available-stx (get unlocked stacker-info)) + (unlock-height (get unlock-height stacker-info))) + + ;; must be called with positive `increase-by` + (asserts! (>= increase-by u1) + (err ERR_STACKING_INVALID_AMOUNT)) + + (let ((unlock-in-cycle (burn-height-to-reward-cycle unlock-height)) + (cur-cycle (current-pox-reward-cycle)) + (first-increase-cycle (+ cur-cycle u1)) + (last-increase-cycle (- unlock-in-cycle u1)) + (cycle-count (try! (if (<= first-increase-cycle last-increase-cycle) + (ok (+ u1 (- last-increase-cycle first-increase-cycle))) + (err ERR_STACKING_INVALID_LOCK_PERIOD)))) + (new-total-locked (+ increase-by existing-lock)) + (stacker-state + (unwrap! (map-get? stacking-state { stacker: stacker }) + (err ERR_STACK_INCREASE_NOT_LOCKED)))) + + ;; must be called directly by the tx-sender or by an allowed contract-caller + (asserts! (check-caller-allowed) + (err ERR_STACKING_PERMISSION_DENIED)) + + ;; stacker must not be directly stacking + (asserts! (is-eq (len (get reward-set-indexes stacker-state)) u0) + (err ERR_STACKING_NOT_DELEGATED)) + + ;; stacker must be delegated to tx-sender + (asserts! (is-eq (unwrap! (get delegated-to stacker-state) + (err ERR_STACKING_NOT_DELEGATED)) + tx-sender) + (err ERR_STACKING_PERMISSION_DENIED)) + + ;; stacker must be currently locked + (asserts! (> existing-lock u0) + (err ERR_STACK_INCREASE_NOT_LOCKED)) + + ;; stacker must have enough stx to lock + (asserts! (>= available-stx increase-by) + (err ERR_STACKING_INSUFFICIENT_FUNDS)) + + ;; stacker must have delegated to the caller + (let ((delegation-info (unwrap! (get-check-delegation stacker) (err ERR_STACKING_PERMISSION_DENIED))) + (delegated-to (get delegated-to delegation-info)) + (delegated-amount (get amount-ustx delegation-info)) + (delegated-pox-addr (get pox-addr delegation-info)) + (delegated-until (get until-burn-ht delegation-info))) + ;; must have delegated to tx-sender + (asserts! (is-eq delegated-to tx-sender) + (err ERR_STACKING_PERMISSION_DENIED)) + ;; must have delegated enough stx + (asserts! (>= delegated-amount new-total-locked) + (err ERR_DELEGATION_TOO_MUCH_LOCKED)) + ;; if pox-addr is set, must be equal to pox-addr + (asserts! (match delegated-pox-addr + specified-pox-addr (is-eq pox-addr specified-pox-addr) + true) + (err ERR_DELEGATION_POX_ADDR_REQUIRED)) + ;; delegation must not expire before lock period + (asserts! (match delegated-until + until-burn-ht + (>= until-burn-ht unlock-height) + true) + (err ERR_DELEGATION_EXPIRES_DURING_LOCK))) + + ;; delegate stacking does minimal-can-stack-stx + (try! (minimal-can-stack-stx pox-addr new-total-locked first-increase-cycle (+ u1 (- last-increase-cycle first-increase-cycle)))) + + ;; register the PoX address with the amount stacked via partial stacking + ;; before it can be included in the reward set, this must be committed! + (add-pox-partial-stacked pox-addr first-increase-cycle cycle-count increase-by) + + ;; stacking-state is unchanged, so no need to update + + ;; return the lock-up information, so the node can actually carry out the lock. + (ok { stacker: stacker, total-locked: new-total-locked})))) + +;; As a delegator, extend an active stacking lock, issuing a "partial commitment" for the +;; extended-to cycles. +;; *New in Stacks 2.1* +;; This method extends `stacker`'s current lockup for an additional `extend-count` +;; and partially commits those new cycles to `pox-addr` +(define-public (delegate-stack-extend + (stacker principal) + (pox-addr { version: (buff 1), hashbytes: (buff 32) }) + (extend-count uint)) + (let ((stacker-info (stx-account stacker)) + ;; to extend, there must already be an entry in the stacking-state + (stacker-state (unwrap! (get-stacker-info stacker) (err ERR_STACK_EXTEND_NOT_LOCKED))) + (amount-ustx (get locked stacker-info)) + (unlock-height (get unlock-height stacker-info)) + ;; first-extend-cycle will be the cycle in which tx-sender *would have* unlocked + (first-extend-cycle (burn-height-to-reward-cycle unlock-height)) + (cur-cycle (current-pox-reward-cycle)) + ;; new first cycle should be max(cur-cycle, stacker-state.first-reward-cycle) + (cur-first-reward-cycle (get first-reward-cycle stacker-state)) + (first-reward-cycle (if (> cur-cycle cur-first-reward-cycle) cur-cycle cur-first-reward-cycle))) + + ;; must be called with positive extend-count + (asserts! (>= extend-count u1) + (err ERR_STACKING_INVALID_LOCK_PERIOD)) + + (let ((last-extend-cycle (- (+ first-extend-cycle extend-count) u1)) + (lock-period (+ u1 (- last-extend-cycle first-reward-cycle))) + (new-unlock-ht (reward-cycle-to-burn-height (+ u1 last-extend-cycle)))) + + ;; first cycle must be after the current cycle + (asserts! (> first-extend-cycle cur-cycle) (err ERR_STACKING_INVALID_LOCK_PERIOD)) + ;; lock period must be positive + (asserts! (> lock-period u0) (err ERR_STACKING_INVALID_LOCK_PERIOD)) + + ;; must be called directly by the tx-sender or by an allowed contract-caller + (asserts! (check-caller-allowed) + (err ERR_STACKING_PERMISSION_DENIED)) + + ;; stacker must not be directly stacking + (asserts! (is-eq (len (get reward-set-indexes stacker-state)) u0) + (err ERR_STACKING_NOT_DELEGATED)) + + ;; stacker must be delegated to tx-sender + (asserts! (is-eq (unwrap! (get delegated-to stacker-state) + (err ERR_STACKING_NOT_DELEGATED)) + tx-sender) + (err ERR_STACKING_PERMISSION_DENIED)) + + ;; check valid lock period + (asserts! (check-pox-lock-period lock-period) + (err ERR_STACKING_INVALID_LOCK_PERIOD)) + + ;; stacker must be currently locked + (asserts! (> amount-ustx u0) + (err ERR_STACK_EXTEND_NOT_LOCKED)) + + ;; stacker must have delegated to the caller + (let ((delegation-info (unwrap! (get-check-delegation stacker) (err ERR_STACKING_PERMISSION_DENIED)))) + ;; must have delegated to tx-sender + (asserts! (is-eq (get delegated-to delegation-info) tx-sender) + (err ERR_STACKING_PERMISSION_DENIED)) + ;; must have delegated enough stx + (asserts! (>= (get amount-ustx delegation-info) amount-ustx) + (err ERR_DELEGATION_TOO_MUCH_LOCKED)) + ;; if pox-addr is set, must be equal to pox-addr + (asserts! (match (get pox-addr delegation-info) + specified-pox-addr (is-eq pox-addr specified-pox-addr) + true) + (err ERR_DELEGATION_POX_ADDR_REQUIRED)) + ;; delegation must not expire before lock period + (asserts! (match (get until-burn-ht delegation-info) + until-burn-ht (>= until-burn-ht + new-unlock-ht) + true) + (err ERR_DELEGATION_EXPIRES_DURING_LOCK))) + + ;; delegate stacking does minimal-can-stack-stx + (try! (minimal-can-stack-stx pox-addr amount-ustx first-extend-cycle lock-period)) + + ;; register the PoX address with the amount stacked via partial stacking + ;; before it can be included in the reward set, this must be committed! + (add-pox-partial-stacked pox-addr first-extend-cycle extend-count amount-ustx) + + (map-set stacking-state + { stacker: stacker } + { pox-addr: pox-addr, + reward-set-indexes: (list), + first-reward-cycle: first-reward-cycle, + lock-period: lock-period, + delegated-to: (some tx-sender) }) + + ;; return the lock-up information, so the node can actually carry out the lock. + (ok { stacker: stacker, + unlock-burn-height: new-unlock-ht })))) + +;; Get the _current_ PoX stacking delegation information for a stacker. If the information +;; is expired, or if there's never been such a stacker, then returns none. +;; *New in Stacks 2.1* +(define-read-only (get-delegation-info (stacker principal)) + (get-check-delegation stacker) +) + +;; Get the burn height at which a particular contract is allowed to stack for a particular principal. +;; *New in Stacks 2.1* +;; Returns (some (some X)) if X is the burn height at which the allowance terminates +;; Returns (some none) if the caller is allowed indefinitely +;; Returns none if there is no allowance record +(define-read-only (get-allowance-contract-callers (sender principal) (calling-contract principal)) + (map-get? allowance-contract-callers { sender: sender, contract-caller: calling-contract }) +) + +;; How many PoX addresses in this reward cycle? +;; *New in Stacks 2.1* +(define-read-only (get-num-reward-set-pox-addresses (reward-cycle uint)) + (match (map-get? reward-cycle-pox-address-list-len { reward-cycle: reward-cycle }) + num-addrs + (get len num-addrs) + u0 + ) +) + +;; How many uSTX have been locked up for this address so far, before the delegator commits them? +;; *New in Stacks 2.1* +(define-read-only (get-partial-stacked-by-cycle (pox-addr { version: (buff 1), hashbytes: (buff 32) }) (reward-cycle uint) (sender principal)) + (map-get? partial-stacked-by-cycle { pox-addr: pox-addr, reward-cycle: reward-cycle, sender: sender }) +) + +;; How many uSTX have voted to reject PoX in a given reward cycle? +;; *New in Stacks 2.1* +(define-read-only (get-total-pox-rejection (reward-cycle uint)) + (match (map-get? stacking-rejection { reward-cycle: reward-cycle }) + rejected + (get amount rejected) + u0 + ) +) From d4640a8b85d3538e0a9583e7475927e85fe1041f Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 2 Nov 2023 14:17:26 -0400 Subject: [PATCH 073/122] feat: plumb through nakamoto block-mined event handler --- testnet/stacks-node/src/event_dispatcher.rs | 68 +++++++++++++++++++++ 1 file changed, 68 insertions(+) diff --git a/testnet/stacks-node/src/event_dispatcher.rs b/testnet/stacks-node/src/event_dispatcher.rs index 6d3f216ea4..80b803d185 100644 --- a/testnet/stacks-node/src/event_dispatcher.rs +++ b/testnet/stacks-node/src/event_dispatcher.rs @@ -12,6 +12,7 @@ use stacks::burnchains::{PoxConstants, Txid}; use stacks::chainstate::burn::operations::BlockstackOperationType; use stacks::chainstate::burn::ConsensusHash; use stacks::chainstate::coordinator::BlockEventDispatcher; +use stacks::chainstate::nakamoto::NakamotoBlock; use stacks::chainstate::stacks::address::PoxAddress; use stacks::chainstate::stacks::db::unconfirmed::ProcessedUnconfirmedState; use stacks::chainstate::stacks::db::StacksHeaderInfo; @@ -66,6 +67,7 @@ pub const PATH_MEMPOOL_TX_SUBMIT: &str = "new_mempool_tx"; pub const PATH_MEMPOOL_TX_DROP: &str = "drop_mempool_tx"; pub const PATH_MINED_BLOCK: &str = "mined_block"; pub const PATH_MINED_MICROBLOCK: &str = "mined_microblock"; +pub const PATH_MINED_NAKAMOTO_BLOCK: &str = "mined_nakamoto_block"; pub const PATH_STACKERDB_CHUNKS: &str = "stackerdb_chunks"; pub const PATH_BURN_BLOCK_SUBMIT: &str = "new_burn_block"; pub const PATH_BLOCK_PROCESSED: &str = "new_block"; @@ -91,6 +93,17 @@ pub struct MinedMicroblockEvent { pub anchor_block: BlockHeaderHash, } +#[derive(Clone, Debug, Serialize, Deserialize)] +pub struct MinedNakamotoBlockEvent { + pub target_burn_height: u64, + pub block_hash: String, + pub block_id: String, + pub stacks_height: u64, + pub block_size: u64, + pub cost: ExecutionCost, + pub tx_events: Vec, +} + impl EventObserver { pub fn send_payload(&self, payload: &serde_json::Value, path: &str) { let body = match serde_json::to_vec(&payload) { @@ -342,6 +355,10 @@ impl EventObserver { fn send_mined_microblock(&self, payload: &serde_json::Value) { self.send_payload(payload, PATH_MINED_MICROBLOCK); } + + fn send_mined_nakamoto_block(&self, payload: &serde_json::Value) { + self.send_payload(payload, PATH_MINED_NAKAMOTO_BLOCK); + } fn send_stackerdb_chunks(&self, payload: &serde_json::Value) { self.send_payload(payload, PATH_STACKERDB_CHUNKS); @@ -465,6 +482,23 @@ impl MemPoolEventDispatcher for EventDispatcher { anchor_block, ); } + + fn mined_nakamoto_block_event( + &self, + target_burn_height: u64, + block: &NakamotoBlock, + block_size_bytes: u64, + consumed: &ExecutionCost, + tx_events: Vec, + ) { + self.process_mined_nakamoto_block_event( + target_burn_height, + block, + block_size_bytes, + consumed, + tx_events, + ) + } } impl StackerDBEventDispatcher for EventDispatcher { @@ -903,6 +937,40 @@ impl EventDispatcher { observer.send_mined_microblock(&payload); } } + + pub fn process_mined_nakamoto_block_event( + &self, + target_burn_height: u64, + block: &NakamotoBlock, + block_size_bytes: u64, + consumed: &ExecutionCost, + tx_events: Vec, + ) { + let interested_observers: Vec<_> = self + .registered_observers + .iter() + .enumerate() + .filter(|(obs_id, _observer)| self.miner_observers_lookup.contains(&(*obs_id as u16))) + .collect(); + if interested_observers.len() < 1 { + return; + } + + let payload = serde_json::to_value(MinedNakamotoBlockEvent { + target_burn_height, + block_hash: block.header.block_hash().to_string(), + block_id: block.header.block_id().to_string(), + stacks_height: block.header.chain_length, + block_size: block_size_bytes, + cost: consumed.clone(), + tx_events, + }) + .unwrap(); + + for (_, observer) in interested_observers.iter() { + observer.send_mined_nakamoto_block(&payload); + } + } /// Forward newly-accepted StackerDB chunk metadata to downstream `stackerdb` observers. /// Infallible. From 8341cf4906c63af8bb3dfcd64bb77be848e4cd47 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 2 Nov 2023 14:17:43 -0400 Subject: [PATCH 074/122] chore: new coinbase variant --- testnet/stacks-node/src/neon_node.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/testnet/stacks-node/src/neon_node.rs b/testnet/stacks-node/src/neon_node.rs index aa0c31c9a1..55177a23a0 100644 --- a/testnet/stacks-node/src/neon_node.rs +++ b/testnet/stacks-node/src/neon_node.rs @@ -1269,7 +1269,7 @@ impl BlockMinerThread { let mut tx = StacksTransaction::new( version, tx_auth, - TransactionPayload::Coinbase(CoinbasePayload([0u8; 32]), recipient_opt), + TransactionPayload::Coinbase(CoinbasePayload([0u8; 32]), recipient_opt, None), ); tx.chain_id = chain_id; tx.anchor_mode = TransactionAnchorMode::OnChainOnly; From 5f3431f09ddf1d07252dd6ac46f4519b238cda98 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 2 Nov 2023 14:17:55 -0400 Subject: [PATCH 075/122] chore: new coinbase variant --- testnet/stacks-node/src/node.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/testnet/stacks-node/src/node.rs b/testnet/stacks-node/src/node.rs index 933e8d232a..063a7f5f9b 100644 --- a/testnet/stacks-node/src/node.rs +++ b/testnet/stacks-node/src/node.rs @@ -1066,7 +1066,7 @@ impl Node { let mut tx = StacksTransaction::new( version, tx_auth, - TransactionPayload::Coinbase(CoinbasePayload([0u8; 32]), None), + TransactionPayload::Coinbase(CoinbasePayload([0u8; 32]), None, None), ); tx.chain_id = self.config.burnchain.chain_id; tx.anchor_mode = TransactionAnchorMode::OnChainOnly; From 4fcd30aceee844adbd6315e3176282a65587d1f9 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 2 Nov 2023 14:18:10 -0400 Subject: [PATCH 076/122] chore: mock v3 unlock and pox-4 activation --- testnet/stacks-node/src/tests/epoch_21.rs | 30 +++++++++++++++++++++++ testnet/stacks-node/src/tests/epoch_22.rs | 6 +++++ testnet/stacks-node/src/tests/epoch_23.rs | 2 ++ testnet/stacks-node/src/tests/epoch_24.rs | 4 +++ 4 files changed, 42 insertions(+) diff --git a/testnet/stacks-node/src/tests/epoch_21.rs b/testnet/stacks-node/src/tests/epoch_21.rs index 0659ca391e..0b2c9267ee 100644 --- a/testnet/stacks-node/src/tests/epoch_21.rs +++ b/testnet/stacks-node/src/tests/epoch_21.rs @@ -120,6 +120,8 @@ fn advance_to_2_1( u32::max_value(), u32::MAX, u32::MAX, + u32::MAX, + u32::MAX, )); burnchain_config.pox_constants = pox_constants.clone(); @@ -619,6 +621,8 @@ fn transition_fixes_bitcoin_rigidity() { u32::max_value(), u32::MAX, u32::MAX, + u32::MAX, + u32::MAX, ); burnchain_config.pox_constants = pox_constants.clone(); @@ -1063,6 +1067,8 @@ fn transition_adds_get_pox_addr_recipients() { v1_unlock_height, u32::MAX, u32::MAX, + u32::MAX, + u32::MAX, ); let mut spender_sks = vec![]; @@ -1369,6 +1375,8 @@ fn transition_adds_mining_from_segwit() { v1_unlock_height, u32::MAX, u32::MAX, + u32::MAX, + u32::MAX, ); let mut spender_sks = vec![]; @@ -1534,6 +1542,8 @@ fn transition_removes_pox_sunset() { (epoch_21 as u32) + 1, u32::MAX, u32::MAX, + u32::MAX, + u32::MAX, ); burnchain_config.pox_constants = pox_constants.clone(); @@ -1816,6 +1826,8 @@ fn transition_empty_blocks() { (epoch_2_1 + 1) as u32, u32::MAX, u32::MAX, + u32::MAX, + u32::MAX, ); burnchain_config.pox_constants = pox_constants.clone(); @@ -2174,6 +2186,8 @@ fn test_pox_reorgs_three_flaps() { v1_unlock_height, u32::MAX, u32::MAX, + u32::MAX, + u32::MAX, ); burnchain_config.pox_constants = pox_constants.clone(); @@ -2710,6 +2724,8 @@ fn test_pox_reorg_one_flap() { v1_unlock_height, u32::MAX, u32::MAX, + u32::MAX, + u32::MAX, ); burnchain_config.pox_constants = pox_constants.clone(); @@ -3134,6 +3150,8 @@ fn test_pox_reorg_flap_duel() { v1_unlock_height, u32::MAX, u32::MAX, + u32::MAX, + u32::MAX, ); burnchain_config.pox_constants = pox_constants.clone(); @@ -3568,6 +3586,8 @@ fn test_pox_reorg_flap_reward_cycles() { v1_unlock_height, u32::MAX, u32::MAX, + u32::MAX, + u32::MAX, ); burnchain_config.pox_constants = pox_constants.clone(); @@ -3996,6 +4016,8 @@ fn test_pox_missing_five_anchor_blocks() { v1_unlock_height, u32::MAX, u32::MAX, + u32::MAX, + u32::MAX, ); burnchain_config.pox_constants = pox_constants.clone(); @@ -4396,6 +4418,8 @@ fn test_sortition_divergence_pre_21() { v1_unlock_height, u32::MAX, u32::MAX, + u32::MAX, + u32::MAX, ); burnchain_config.pox_constants = pox_constants.clone(); @@ -4760,6 +4784,8 @@ fn trait_invocation_cross_epoch() { u32::max_value(), u32::MAX, u32::MAX, + u32::MAX, + u32::MAX, ); burnchain_config.pox_constants = pox_constants.clone(); @@ -5006,6 +5032,8 @@ fn test_v1_unlock_height_with_current_stackers() { v1_unlock_height as u32, u32::MAX, u32::MAX, + u32::MAX, + u32::MAX, ); burnchain_config.pox_constants = pox_constants.clone(); @@ -5268,6 +5296,8 @@ fn test_v1_unlock_height_with_delay_and_current_stackers() { v1_unlock_height as u32, u32::MAX, u32::MAX, + u32::MAX, + u32::MAX, ); burnchain_config.pox_constants = pox_constants.clone(); diff --git a/testnet/stacks-node/src/tests/epoch_22.rs b/testnet/stacks-node/src/tests/epoch_22.rs index 1ed40ef9f9..f20f59af46 100644 --- a/testnet/stacks-node/src/tests/epoch_22.rs +++ b/testnet/stacks-node/src/tests/epoch_22.rs @@ -176,6 +176,8 @@ fn disable_pox() { v1_unlock_height as u32, epoch_2_2 as u32 + 1, u32::MAX, + u32::MAX, + u32::MAX, ); burnchain_config.pox_constants = pox_constants.clone(); @@ -706,6 +708,8 @@ fn pox_2_unlock_all() { v1_unlock_height as u32, epoch_2_2 as u32 + 1, u32::MAX, + u32::MAX, + u32::MAX, ); burnchain_config.pox_constants = pox_constants.clone(); @@ -1398,6 +1402,8 @@ fn test_pox_reorg_one_flap() { v1_unlock_height, v2_unlock_height.try_into().unwrap(), u32::MAX, + u32::MAX, + u32::MAX, ); burnchain_config.pox_constants = pox_constants.clone(); diff --git a/testnet/stacks-node/src/tests/epoch_23.rs b/testnet/stacks-node/src/tests/epoch_23.rs index ac9524f07c..f9f2090c64 100644 --- a/testnet/stacks-node/src/tests/epoch_23.rs +++ b/testnet/stacks-node/src/tests/epoch_23.rs @@ -141,6 +141,8 @@ fn trait_invocation_behavior() { v1_unlock_height as u32, epoch_2_2 as u32 + 1, u32::MAX, + u32::MAX, + u32::MAX, ); burnchain_config.pox_constants = pox_constants.clone(); diff --git a/testnet/stacks-node/src/tests/epoch_24.rs b/testnet/stacks-node/src/tests/epoch_24.rs index 97f9744223..a374c945a9 100644 --- a/testnet/stacks-node/src/tests/epoch_24.rs +++ b/testnet/stacks-node/src/tests/epoch_24.rs @@ -187,7 +187,9 @@ fn fix_to_pox_contract() { u64::max_value() - 1, v1_unlock_height as u32, epoch_2_2 as u32 + 1, + u32::MAX, pox_3_activation_height as u32, + u32::MAX ); burnchain_config.pox_constants = pox_constants.clone(); @@ -823,7 +825,9 @@ fn verify_auto_unlock_behavior() { u64::max_value() - 1, v1_unlock_height as u32, epoch_2_2 as u32 + 1, + u32::MAX, pox_3_activation_height as u32, + u32::MAX ); burnchain_config.pox_constants = pox_constants.clone(); From ee535b552b26945c359c6634b80db2bbc167481b Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 2 Nov 2023 14:18:27 -0400 Subject: [PATCH 077/122] chore: new coinbase variant --- testnet/stacks-node/src/tests/mod.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/testnet/stacks-node/src/tests/mod.rs b/testnet/stacks-node/src/tests/mod.rs index f4b136e54b..106dd31877 100644 --- a/testnet/stacks-node/src/tests/mod.rs +++ b/testnet/stacks-node/src/tests/mod.rs @@ -355,7 +355,7 @@ pub fn make_poison( } pub fn make_coinbase(sender: &StacksPrivateKey, nonce: u64, tx_fee: u64) -> Vec { - let payload = TransactionPayload::Coinbase(CoinbasePayload([0; 32]), None); + let payload = TransactionPayload::Coinbase(CoinbasePayload([0; 32]), None, None); serialize_sign_standard_single_sig_tx(payload.into(), sender, nonce, tx_fee) } From fd2d51dfedb8253a407ec9b96b44390a3d5eb093 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 2 Nov 2023 14:18:41 -0400 Subject: [PATCH 078/122] feat: event observer for mined nakamoto blocks --- .../src/tests/neon_integrations.rs | 51 ++++++++++++++++++- 1 file changed, 49 insertions(+), 2 deletions(-) diff --git a/testnet/stacks-node/src/tests/neon_integrations.rs b/testnet/stacks-node/src/tests/neon_integrations.rs index 116e2954da..8dd46f905d 100644 --- a/testnet/stacks-node/src/tests/neon_integrations.rs +++ b/testnet/stacks-node/src/tests/neon_integrations.rs @@ -212,7 +212,7 @@ pub mod test_observer { use warp; use warp::Filter; - use crate::event_dispatcher::{MinedBlockEvent, MinedMicroblockEvent, StackerDBChunksEvent}; + use crate::event_dispatcher::{MinedBlockEvent, MinedMicroblockEvent, MinedNakamotoBlockEvent, StackerDBChunksEvent}; pub const EVENT_OBSERVER_PORT: u16 = 50303; @@ -220,6 +220,7 @@ pub mod test_observer { pub static ref NEW_BLOCKS: Mutex> = Mutex::new(Vec::new()); pub static ref MINED_BLOCKS: Mutex> = Mutex::new(Vec::new()); pub static ref MINED_MICROBLOCKS: Mutex> = Mutex::new(Vec::new()); + pub static ref MINED_NAKAMOTO_BLOCKS: Mutex> = Mutex::new(Vec::new()); pub static ref NEW_MICROBLOCKS: Mutex> = Mutex::new(Vec::new()); pub static ref NEW_STACKERDB_CHUNKS: Mutex> = Mutex::new(Vec::new()); @@ -250,7 +251,7 @@ pub mod test_observer { microblock_events.push(microblocks); Ok(warp::http::StatusCode::OK) } - + async fn handle_stackerdb_chunks( chunks: serde_json::Value, ) -> Result { @@ -309,6 +310,41 @@ pub mod test_observer { mined_txs.push(serde_json::from_value(tx_event).unwrap()); Ok(warp::http::StatusCode::OK) } + + async fn handle_mined_nakamoto_block(block: serde_json::Value) -> Result { + let mut mined_blocks = MINED_NAKAMOTO_BLOCKS.lock().unwrap(); + // assert that the mined transaction events have string-y txids + block + .as_object() + .expect("Expected JSON object for mined nakamoto block event") + .get("tx_events") + .expect("Expected tx_events key in mined nakamoto block event") + .as_array() + .expect("Expected tx_events key to be an array in mined nakamoto block event") + .iter() + .for_each(|txevent| { + let txevent_obj = txevent.as_object().expect("TransactionEvent should be object"); + let inner_obj = if let Some(inner_obj) = txevent_obj.get("Success") { + inner_obj + } else if let Some(inner_obj) = txevent_obj.get("ProcessingError") { + inner_obj + } else if let Some(inner_obj) = txevent_obj.get("Skipped") { + inner_obj + } else { + panic!("TransactionEvent object should have one of Success, ProcessingError, or Skipped") + }; + inner_obj + .as_object() + .expect("TransactionEvent should be an object") + .get("txid") + .expect("Should have txid key") + .as_str() + .expect("Expected txid to be a string"); + }); + + mined_blocks.push(serde_json::from_value(block).unwrap()); + Ok(warp::http::StatusCode::OK) + } async fn handle_mempool_txs(txs: serde_json::Value) -> Result { let new_rawtxs = txs @@ -419,6 +455,10 @@ pub mod test_observer { .and(warp::post()) .and(warp::body::json()) .and_then(handle_mined_block); + let mined_nakamoto_blocks = warp::path!("mined_nakamoto_block") + .and(warp::post()) + .and(warp::body::json()) + .and_then(handle_mined_nakamoto_block); let mined_microblocks = warp::path!("mined_microblock") .and(warp::post()) .and(warp::body::json()) @@ -438,6 +478,7 @@ pub mod test_observer { .or(new_microblocks) .or(mined_blocks) .or(mined_microblocks) + .or(mined_nakamoto_blocks) .or(new_stackerdb_chunks), ) .run(([127, 0, 0, 1], EVENT_OBSERVER_PORT)) @@ -1883,6 +1924,8 @@ fn stx_delegate_btc_integration_test() { u32::MAX, u32::MAX, u32::MAX, + u32::MAX, + u32::MAX, ); burnchain_config.pox_constants = pox_constants.clone(); @@ -5909,6 +5952,8 @@ fn pox_integration_test() { u32::MAX, u32::MAX, u32::MAX, + u32::MAX, + u32::MAX, ); burnchain_config.pox_constants = pox_constants.clone(); @@ -10617,6 +10662,8 @@ fn test_competing_miners_build_on_same_chain( u32::MAX, u32::MAX, u32::MAX, + u32::MAX, + u32::MAX, ); burnchain_config.pox_constants = pox_constants.clone(); From 1a773bd93a508cc128ba19eb21689a81b762516a Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Tue, 7 Nov 2023 00:40:46 -0500 Subject: [PATCH 079/122] chore: cargo fmt --- clarity/src/vm/database/clarity_db.rs | 12 +++--- clarity/src/vm/database/structures.rs | 22 ++++++++--- clarity/src/vm/docs/mod.rs | 4 +- clarity/src/vm/functions/assets.rs | 6 ++- clarity/src/vm/test_util/mod.rs | 4 +- stackslib/src/chainstate/stacks/block.rs | 6 +-- .../chainstate/stacks/boot/contract_tests.rs | 4 +- stackslib/src/chainstate/stacks/boot/mod.rs | 27 +++++++++++--- .../src/chainstate/stacks/boot/pox_2_tests.rs | 2 +- stackslib/src/chainstate/stacks/db/blocks.rs | 16 ++++---- stackslib/src/chainstate/stacks/miner.rs | 10 ++--- stackslib/src/chainstate/stacks/tests/mod.rs | 12 +++--- .../src/chainstate/stacks/transaction.rs | 37 ++++++++++++------- stackslib/src/clarity_vm/clarity.rs | 12 +++--- stackslib/src/clarity_vm/database/mod.rs | 8 ++-- stackslib/src/core/mod.rs | 5 +-- stackslib/src/net/rpc.rs | 2 +- testnet/stacks-node/src/event_dispatcher.rs | 6 +-- testnet/stacks-node/src/tests/epoch_24.rs | 4 +- .../src/tests/neon_integrations.rs | 15 +++++--- 20 files changed, 128 insertions(+), 86 deletions(-) diff --git a/clarity/src/vm/database/clarity_db.rs b/clarity/src/vm/database/clarity_db.rs index c7c058175c..116b3ba328 100644 --- a/clarity/src/vm/database/clarity_db.rs +++ b/clarity/src/vm/database/clarity_db.rs @@ -202,7 +202,7 @@ impl BurnStateDB for &dyn BurnStateDB { fn get_v2_unlock_height(&self) -> u32 { (*self).get_v2_unlock_height() } - + fn get_v3_unlock_height(&self) -> u32 { (*self).get_v3_unlock_height() } @@ -210,7 +210,7 @@ impl BurnStateDB for &dyn BurnStateDB { fn get_pox_3_activation_height(&self) -> u32 { (*self).get_pox_3_activation_height() } - + fn get_pox_4_activation_height(&self) -> u32 { (*self).get_pox_4_activation_height() } @@ -388,7 +388,7 @@ impl BurnStateDB for NullBurnStateDB { fn get_v2_unlock_height(&self) -> u32 { u32::MAX } - + fn get_v3_unlock_height(&self) -> u32 { u32::MAX } @@ -396,7 +396,7 @@ impl BurnStateDB for NullBurnStateDB { fn get_pox_3_activation_height(&self) -> u32 { u32::MAX } - + fn get_pox_4_activation_height(&self) -> u32 { u32::MAX } @@ -837,7 +837,7 @@ impl<'a> ClarityDatabase<'a> { pub fn get_pox_3_activation_height(&self) -> u32 { self.burn_state_db.get_pox_3_activation_height() } - + /// Return the height for PoX 4 activation from the burn state db pub fn get_pox_4_activation_height(&self) -> u32 { self.burn_state_db.get_pox_4_activation_height() @@ -852,7 +852,7 @@ impl<'a> ClarityDatabase<'a> { u32::MAX } } - + /// Return the height for PoX v3 -> v4 auto unlocks /// from the burn state db pub fn get_v3_unlock_height(&mut self) -> u32 { diff --git a/clarity/src/vm/database/structures.rs b/clarity/src/vm/database/structures.rs index 35ed8b9469..393838ad7a 100644 --- a/clarity/src/vm/database/structures.rs +++ b/clarity/src/vm/database/structures.rs @@ -149,7 +149,7 @@ pub enum STXBalance { LockedPoxFour { amount_unlocked: u128, amount_locked: u128, - unlock_height: u64 + unlock_height: u64, }, } @@ -392,7 +392,7 @@ impl<'db, 'conn> STXBalanceSnapshot<'db, 'conn> { self.burn_block_height, v1_unlock_height, v2_unlock_height, - v3_unlock_height + v3_unlock_height, ) } @@ -401,7 +401,12 @@ impl<'db, 'conn> STXBalanceSnapshot<'db, 'conn> { let v2_unlock_height = self.db_ref.get_v2_unlock_height(); let v3_unlock_height = self.db_ref.get_v3_unlock_height(); self.balance - .canonical_repr_at_block(self.burn_block_height, v1_unlock_height, v2_unlock_height, v3_unlock_height) + .canonical_repr_at_block( + self.burn_block_height, + v1_unlock_height, + v2_unlock_height, + v3_unlock_height, + ) .0 } @@ -413,7 +418,7 @@ impl<'db, 'conn> STXBalanceSnapshot<'db, 'conn> { self.burn_block_height, v1_unlock_height, v2_unlock_height, - v3_unlock_height + v3_unlock_height, ) } @@ -719,7 +724,7 @@ impl<'db, 'conn> STXBalanceSnapshot<'db, 'conn> { STXBalance::LockedPoxThree { .. } ) } - + //////////////// Pox-4 ////////////////// /// Lock `amount_to_lock` tokens on this account until `unlock_burn_height`. @@ -926,7 +931,12 @@ impl STXBalance { /// *while* factoring in the PoX 2 early unlock for PoX 1 and PoX 3 early unlock for PoX 2. /// This value is still lazy: this unlock height may be less than the current /// burn block height, if so it will be updated in a canonicalized view. - pub fn effective_unlock_height(&self, v1_unlock_height: u32, v2_unlock_height: u32, v3_unlock_height: u32) -> u64 { + pub fn effective_unlock_height( + &self, + v1_unlock_height: u32, + v2_unlock_height: u32, + v3_unlock_height: u32, + ) -> u64 { match self { STXBalance::Unlocked { .. } => 0, STXBalance::LockedPoxOne { unlock_height, .. } => { diff --git a/clarity/src/vm/docs/mod.rs b/clarity/src/vm/docs/mod.rs index 8f1dafc660..2444bfc5fc 100644 --- a/clarity/src/vm/docs/mod.rs +++ b/clarity/src/vm/docs/mod.rs @@ -2772,7 +2772,7 @@ mod test { fn get_v2_unlock_height(&self) -> u32 { u32::MAX } - + fn get_v3_unlock_height(&self) -> u32 { u32::MAX } @@ -2780,7 +2780,7 @@ mod test { fn get_pox_3_activation_height(&self) -> u32 { u32::MAX } - + fn get_pox_4_activation_height(&self) -> u32 { u32::MAX } diff --git a/clarity/src/vm/functions/assets.rs b/clarity/src/vm/functions/assets.rs index 5653d7cbb3..fa5086639f 100644 --- a/clarity/src/vm/functions/assets.rs +++ b/clarity/src/vm/functions/assets.rs @@ -249,7 +249,11 @@ pub fn special_stx_account( ), ( "unlock-height".try_into().unwrap(), - Value::UInt(stx_balance.effective_unlock_height(v1_unlock_ht, v2_unlock_ht, v3_unlock_ht) as u128), + Value::UInt(stx_balance.effective_unlock_height( + v1_unlock_ht, + v2_unlock_ht, + v3_unlock_ht, + ) as u128), ), ]) .map(Value::Tuple) diff --git a/clarity/src/vm/test_util/mod.rs b/clarity/src/vm/test_util/mod.rs index ffe9db8de0..957c65e6eb 100644 --- a/clarity/src/vm/test_util/mod.rs +++ b/clarity/src/vm/test_util/mod.rs @@ -232,7 +232,7 @@ impl BurnStateDB for UnitTestBurnStateDB { fn get_v2_unlock_height(&self) -> u32 { u32::MAX } - + fn get_v3_unlock_height(&self) -> u32 { u32::MAX } @@ -240,7 +240,7 @@ impl BurnStateDB for UnitTestBurnStateDB { fn get_pox_3_activation_height(&self) -> u32 { u32::MAX } - + fn get_pox_4_activation_height(&self) -> u32 { u32::MAX } diff --git a/stackslib/src/chainstate/stacks/block.rs b/stackslib/src/chainstate/stacks/block.rs index 2cb7eb98ce..2485a771e9 100644 --- a/stackslib/src/chainstate/stacks/block.rs +++ b/stackslib/src/chainstate/stacks/block.rs @@ -1733,7 +1733,7 @@ mod test { None, ), ); - + let proof_bytes = hex_bytes("9275df67a68c8745c0ff97b48201ee6db447f7c93b23ae24cdc2400f52fdb08a1a6ac7ec71bf9c9c76e96ee4675ebff60625af28718501047bfd87b810c2d2139b73c23bd69de66360953a642c2a330a").unwrap(); let proof = VRFProof::from_bytes(&proof_bytes[..].to_vec()).unwrap(); let tx_coinbase_proof = StacksTransaction::new( @@ -1804,12 +1804,12 @@ mod test { cause: TenureChangeCause::BlockFound, pubkey_hash: Hash160([0x00; 20]), signature: SchnorrThresholdSignature::empty(), - signers: vec![] + signers: vec![], }; let tx_tenure_change = StacksTransaction::new( TransactionVersion::Testnet, origin_auth.clone(), - TransactionPayload::TenureChange(tenure_change_payload) + TransactionPayload::TenureChange(tenure_change_payload), ); let dup_txs = vec![ diff --git a/stackslib/src/chainstate/stacks/boot/contract_tests.rs b/stackslib/src/chainstate/stacks/boot/contract_tests.rs index a8af6a7b92..b6ecd2a5a5 100644 --- a/stackslib/src/chainstate/stacks/boot/contract_tests.rs +++ b/stackslib/src/chainstate/stacks/boot/contract_tests.rs @@ -407,7 +407,7 @@ impl BurnStateDB for TestSimBurnStateDB { fn get_v2_unlock_height(&self) -> u32 { u32::MAX } - + fn get_v3_unlock_height(&self) -> u32 { u32::MAX } @@ -415,7 +415,7 @@ impl BurnStateDB for TestSimBurnStateDB { fn get_pox_3_activation_height(&self) -> u32 { u32::MAX } - + fn get_pox_4_activation_height(&self) -> u32 { u32::MAX } diff --git a/stackslib/src/chainstate/stacks/boot/mod.rs b/stackslib/src/chainstate/stacks/boot/mod.rs index 33ef391ce5..8bba21285f 100644 --- a/stackslib/src/chainstate/stacks/boot/mod.rs +++ b/stackslib/src/chainstate/stacks/boot/mod.rs @@ -356,7 +356,7 @@ impl StacksChainState { ) -> Result, Error> { Self::handle_pox_cycle_start(clarity, cycle_number, cycle_info, POX_3_NAME) } - + /// Do all the necessary Clarity operations at the start of a PoX reward cycle. /// Currently, this just means applying any auto-unlocks to Stackers who qualified. /// @@ -987,7 +987,7 @@ impl StacksChainState { Ok(ret) } - + /// Get all PoX reward addresses from .pox-4 /// TODO: also return their stacker signer keys (as part of `RawRewardSetEntry` fn get_reward_addresses_pox_4( @@ -1099,7 +1099,10 @@ impl StacksChainState { .pox_constants .active_pox_contract(reward_cycle_start_height); - debug!("Active PoX contract at {} (burn height {}): {}", block_id, current_burn_height, &pox_contract_name); + debug!( + "Active PoX contract at {} (burn height {}): {}", + block_id, current_burn_height, &pox_contract_name + ); let result = match pox_contract_name { x if x == POX_1_NAME => self.get_reward_addresses_pox_1(sortdb, block_id, reward_cycle), x if x == POX_2_NAME => self.get_reward_addresses_pox_2(sortdb, block_id, reward_cycle), @@ -1222,8 +1225,20 @@ pub mod test { #[test] fn get_reward_threshold_units() { - let test_pox_constants = - PoxConstants::new(501, 1, 1, 1, 5, 5000, 10000, u32::MAX, u32::MAX, u32::MAX, u32::MAX, u32::MAX); + let test_pox_constants = PoxConstants::new( + 501, + 1, + 1, + 1, + 5, + 5000, + 10000, + u32::MAX, + u32::MAX, + u32::MAX, + u32::MAX, + u32::MAX, + ); // when the liquid amount = the threshold step, // the threshold should always be the step size. let liquid = POX_THRESHOLD_STEPS_USTX; @@ -1603,7 +1618,7 @@ pub mod test { ) -> StacksTransaction { make_pox_2_or_3_lockup(key, nonce, amount, addr, lock_period, burn_ht, POX_3_NAME) } - + /// TODO: add signer key pub fn make_pox_4_lockup( key: &StacksPrivateKey, diff --git a/stackslib/src/chainstate/stacks/boot/pox_2_tests.rs b/stackslib/src/chainstate/stacks/boot/pox_2_tests.rs index d6ef06c15a..51e203a014 100644 --- a/stackslib/src/chainstate/stacks/boot/pox_2_tests.rs +++ b/stackslib/src/chainstate/stacks/boot/pox_2_tests.rs @@ -528,7 +528,7 @@ pub fn check_stacker_link_invariants(peer: &mut TestPeer, tip: &StacksBlockId, c // the invariant checks will not make sense for the same reasons as above continue; } - + if tip_epoch.epoch_id >= StacksEpochId::Epoch25 && current_burn_height <= peer.config.burnchain.pox_constants.pox_4_activation_height diff --git a/stackslib/src/chainstate/stacks/db/blocks.rs b/stackslib/src/chainstate/stacks/db/blocks.rs index 1fe1a07dbd..eba256bde1 100644 --- a/stackslib/src/chainstate/stacks/db/blocks.rs +++ b/stackslib/src/chainstate/stacks/db/blocks.rs @@ -5298,13 +5298,11 @@ impl StacksChainState { pox_start_cycle_info, ) } - StacksEpochId::Epoch24 => { - Self::handle_pox_cycle_start_pox_3( - clarity_tx, - pox_reward_cycle, - pox_start_cycle_info, - ) - } + StacksEpochId::Epoch24 => Self::handle_pox_cycle_start_pox_3( + clarity_tx, + pox_reward_cycle, + pox_start_cycle_info, + ), StacksEpochId::Epoch25 | StacksEpochId::Epoch30 => { Self::handle_pox_cycle_start_pox_4( clarity_tx, @@ -6912,8 +6910,8 @@ impl StacksChainState { return Err(MemPoolRejection::BadAddressVersionByte); } - let (block_height, v1_unlock_height, v2_unlock_height, v3_unlock_height) = clarity_connection - .with_clarity_db_readonly(|ref mut db| { + let (block_height, v1_unlock_height, v2_unlock_height, v3_unlock_height) = + clarity_connection.with_clarity_db_readonly(|ref mut db| { ( db.get_current_burnchain_block_height() as u64, db.get_v1_unlock_height(), diff --git a/stackslib/src/chainstate/stacks/miner.rs b/stackslib/src/chainstate/stacks/miner.rs index ea18d7f9bf..8fe4996c40 100644 --- a/stackslib/src/chainstate/stacks/miner.rs +++ b/stackslib/src/chainstate/stacks/miner.rs @@ -619,7 +619,7 @@ pub trait BlockBuilder { limit_behavior: &BlockLimitFunction, ast_rules: ASTRules, ) -> TransactionResult; - + /// Append a transaction if doing so won't exceed the epoch data size. /// Errors out if we fail to mine the tx (exceed budget, or the transaction is invalid). fn try_mine_tx( @@ -2152,7 +2152,7 @@ impl StacksBlockBuilder { coinbase_tx: Option<&StacksTransaction>, settings: BlockBuilderSettings, event_observer: Option<&dyn MemPoolEventDispatcher>, - ast_rules: ASTRules + ast_rules: ASTRules, ) -> Result<(bool, Vec), Error> { let max_miner_time_ms = settings.max_miner_time_ms; let mempool_settings = settings.mempool_settings.clone(); @@ -2193,7 +2193,7 @@ impl StacksBlockBuilder { &parent_stacks_header.anchored_header.block_hash() ); let result = { - let mut intermediate_result : Result<_, Error> = Ok(0); + let mut intermediate_result: Result<_, Error> = Ok(0); while block_limit_hit != BlockLimitFunction::LIMIT_REACHED { let mut num_considered = 0; intermediate_result = mempool.iterate_candidates( @@ -2442,7 +2442,7 @@ impl StacksBlockBuilder { let block_limit = epoch_tx .block_limit() .expect("Failed to obtain block limit from miner's block connection"); - + let (blocked, tx_events) = match Self::select_and_apply_transactions( &mut epoch_tx, &mut builder, @@ -2451,7 +2451,7 @@ impl StacksBlockBuilder { Some(coinbase_tx), settings, event_observer, - ast_rules + ast_rules, ) { Ok(x) => x, Err(e) => { diff --git a/stackslib/src/chainstate/stacks/tests/mod.rs b/stackslib/src/chainstate/stacks/tests/mod.rs index 7d72951114..5c0ab78446 100644 --- a/stackslib/src/chainstate/stacks/tests/mod.rs +++ b/stackslib/src/chainstate/stacks/tests/mod.rs @@ -427,7 +427,7 @@ impl TestStacksNode { }; block_commit_op } - + pub fn get_last_key(&self, miner: &TestMiner) -> LeaderKeyRegisterOp { let last_vrf_pubkey = miner.last_VRF_public_key().unwrap(); let idx = *self.key_ops.get(&last_vrf_pubkey).unwrap(); @@ -445,7 +445,7 @@ impl TestStacksNode { } } } - + pub fn get_last_accepted_anchored_block( &self, sortdb: &SortitionDB, @@ -490,7 +490,7 @@ impl TestStacksNode { } return None; } - + pub fn get_microblock_stream( &self, miner: &TestMiner, @@ -508,7 +508,7 @@ impl TestStacksNode { Some(idx) => Some(self.anchored_blocks[*idx].clone()), } } - + pub fn get_last_winning_snapshot( ic: &SortitionDBConn, fork_tip: &BlockSnapshot, @@ -584,7 +584,7 @@ impl TestStacksNode { ); block_commit_op } - + /// Mine a single Stacks block and a microblock stream. /// Produce its block-commit. pub fn mine_stacks_block( @@ -1035,7 +1035,7 @@ pub fn make_coinbase_with_nonce( TransactionPayload::Coinbase( CoinbasePayload([(burnchain_height % 256) as u8; 32]), recipient, - None + None, ), ); tx_coinbase.chain_id = 0x80000000; diff --git a/stackslib/src/chainstate/stacks/transaction.rs b/stackslib/src/chainstate/stacks/transaction.rs index 796140767c..8e02812240 100644 --- a/stackslib/src/chainstate/stacks/transaction.rs +++ b/stackslib/src/chainstate/stacks/transaction.rs @@ -31,8 +31,8 @@ use stacks_common::util::retry::BoundReader; use stacks_common::util::secp256k1::MessageSignature; use crate::burnchains::Txid; -use crate::chainstate::stacks::*; use crate::chainstate::stacks::TransactionPayloadID; +use crate::chainstate::stacks::*; use crate::core::*; use crate::net::Error as net_error; use stacks_common::codec::{read_next, write_next, Error as codec_error, StacksMessageCodec}; @@ -243,7 +243,12 @@ impl StacksMessageCodec for TransactionPayload { (Some(recipient), Some(vrf_proof)) => { write_next(fd, &(TransactionPayloadID::NakamotoCoinbase as u8))?; write_next(fd, buf)?; - write_next(fd, &Value::some(Value::Principal(recipient.clone())).expect("FATAL: failed to encode recipient principal as `optional`"))?; + write_next( + fd, + &Value::some(Value::Principal(recipient.clone())).expect( + "FATAL: failed to encode recipient principal as `optional`", + ), + )?; write_next(fd, &vrf_proof.to_bytes().to_vec())?; } } @@ -321,7 +326,7 @@ impl StacksMessageCodec for TransactionPayload { TransactionPayload::Coinbase(payload, Some(recipient), None) } TransactionPayloadID::TenureChange => { - let payload : TenureChangePayload = read_next(fd)?; + let payload: TenureChangePayload = read_next(fd)?; TransactionPayload::TenureChange(payload) } // TODO: gate this! @@ -332,21 +337,20 @@ impl StacksMessageCodec for TransactionPayload { if let Some(principal_value) = optional_data.data { if let Value::Principal(recipient_principal) = *principal_value { Some(recipient_principal) - } - else { + } else { None } - } - else { + } else { None } - } - else { + } else { return Err(codec_error::DeserializeError("Failed to parse nakamoto coinbase transaction -- did not receive an optional recipient principal value".to_string())); }; let vrf_proof_bytes: Vec = read_next(fd)?; let Some(vrf_proof) = VRFProof::from_bytes(&vrf_proof_bytes) else { - return Err(codec_error::DeserializeError("Failed to decode coinbase VRF proof".to_string())); + return Err(codec_error::DeserializeError( + "Failed to decode coinbase VRF proof".to_string(), + )); }; TransactionPayload::Coinbase(payload, recipient_opt, Some(vrf_proof)) } @@ -677,7 +681,9 @@ impl StacksTransaction { } /// Try to convert to a coinbase payload - pub fn try_as_coinbase(&self) -> Option<(&CoinbasePayload, Option<&PrincipalData>, Option<&VRFProof>)> { + pub fn try_as_coinbase( + &self, + ) -> Option<(&CoinbasePayload, Option<&PrincipalData>, Option<&VRFProof>)> { match &self.payload { TransactionPayload::Coinbase(ref payload, ref recipient_opt, ref vrf_proof_opt) => { Some((payload, recipient_opt.as_ref(), vrf_proof_opt.as_ref())) @@ -1652,7 +1658,11 @@ mod test { corrupt_buf_bytes[0] = (((corrupt_buf_bytes[0] as u16) + 1) % 256) as u8; let corrupt_buf = CoinbasePayload(corrupt_buf_bytes); - TransactionPayload::Coinbase(corrupt_buf, recipient_opt.clone(), vrf_proof_opt.clone()) + TransactionPayload::Coinbase( + corrupt_buf, + recipient_opt.clone(), + vrf_proof_opt.clone(), + ) } TransactionPayload::TenureChange(_) => todo!(), }; @@ -1890,7 +1900,8 @@ mod test { #[test] fn tx_stacks_transaction_payload_coinbase() { - let coinbase_payload = TransactionPayload::Coinbase(CoinbasePayload([0x12; 32]), None, None); + let coinbase_payload = + TransactionPayload::Coinbase(CoinbasePayload([0x12; 32]), None, None); let coinbase_payload_bytes = vec![ // payload type ID TransactionPayloadID::Coinbase as u8, diff --git a/stackslib/src/clarity_vm/clarity.rs b/stackslib/src/clarity_vm/clarity.rs index 899c09cf29..97e1332010 100644 --- a/stackslib/src/clarity_vm/clarity.rs +++ b/stackslib/src/clarity_vm/clarity.rs @@ -1278,7 +1278,7 @@ impl<'a, 'b> ClarityBlockConnection<'a, 'b> { (old_cost_tracker, Ok(vec![pox_3_initialization_receipt])) }) } - + pub fn initialize_epoch_2_5(&mut self) -> Result, Error> { // use the `using!` statement to ensure that the old cost_tracker is placed // back in all branches after initialization @@ -1299,7 +1299,7 @@ impl<'a, 'b> ClarityBlockConnection<'a, 'b> { // require 3.0 rules henceforth in this connection as well tx_conn.epoch = StacksEpochId::Epoch25; }); - + /////////////////// .pox-4 //////////////////////// let mainnet = self.mainnet; let first_block_height = self.burn_state_db.get_burn_start_height(); @@ -1414,7 +1414,7 @@ impl<'a, 'b> ClarityBlockConnection<'a, 'b> { debug!("Epoch 2.5 initialized"); (old_cost_tracker, Ok(vec![pox_4_initialization_receipt])) }) - } + } pub fn initialize_epoch_3_0(&mut self) -> Result, Error> { // use the `using!` statement to ensure that the old cost_tracker is placed @@ -1440,7 +1440,7 @@ impl<'a, 'b> ClarityBlockConnection<'a, 'b> { debug!("Epoch 3.0 initialized"); (old_cost_tracker, Ok(vec![])) }) - } + } pub fn start_transaction_processing<'c>(&'c mut self) -> ClarityTransactionConnection<'c, 'a> { let store = &mut self.datastore; @@ -2609,7 +2609,7 @@ mod tests { fn get_v2_unlock_height(&self) -> u32 { u32::MAX } - + fn get_v3_unlock_height(&self) -> u32 { u32::MAX } @@ -2621,7 +2621,7 @@ mod tests { fn get_pox_3_activation_height(&self) -> u32 { u32::MAX } - + fn get_pox_4_activation_height(&self) -> u32 { u32::MAX } diff --git a/stackslib/src/clarity_vm/database/mod.rs b/stackslib/src/clarity_vm/database/mod.rs index 86e0dd9ef2..bd0b914be7 100644 --- a/stackslib/src/clarity_vm/database/mod.rs +++ b/stackslib/src/clarity_vm/database/mod.rs @@ -501,7 +501,7 @@ impl BurnStateDB for SortitionHandleTx<'_> { fn get_v2_unlock_height(&self) -> u32 { self.context.pox_constants.v2_unlock_height } - + fn get_v3_unlock_height(&self) -> u32 { self.context.pox_constants.v3_unlock_height } @@ -509,7 +509,7 @@ impl BurnStateDB for SortitionHandleTx<'_> { fn get_pox_3_activation_height(&self) -> u32 { self.context.pox_constants.pox_3_activation_height } - + fn get_pox_4_activation_height(&self) -> u32 { self.context.pox_constants.pox_4_activation_height } @@ -628,7 +628,7 @@ impl BurnStateDB for SortitionDBConn<'_> { fn get_v2_unlock_height(&self) -> u32 { self.context.pox_constants.v2_unlock_height } - + fn get_v3_unlock_height(&self) -> u32 { self.context.pox_constants.v3_unlock_height } @@ -636,7 +636,7 @@ impl BurnStateDB for SortitionDBConn<'_> { fn get_pox_3_activation_height(&self) -> u32 { self.context.pox_constants.pox_3_activation_height } - + fn get_pox_4_activation_height(&self) -> u32 { self.context.pox_constants.pox_4_activation_height } diff --git a/stackslib/src/core/mod.rs b/stackslib/src/core/mod.rs index 80e7806af7..91f8bdffbc 100644 --- a/stackslib/src/core/mod.rs +++ b/stackslib/src/core/mod.rs @@ -379,7 +379,6 @@ lazy_static! { network_epoch: PEER_VERSION_EPOCH_2_4 }, StacksEpoch { - epoch_id: StacksEpochId::Epoch25, start_height: BITCOIN_MAINNET_STACKS_25_BURN_HEIGHT, end_height: BITCOIN_MAINNET_STACKS_30_BURN_HEIGHT, @@ -996,7 +995,7 @@ impl StacksEpochExtension for StacksEpoch { }, ] } - + #[cfg(test)] fn unit_test_2_5(first_burnchain_height: u64) -> Vec { info!( @@ -1266,7 +1265,7 @@ impl StacksEpochExtension for StacksEpoch { }, ] } - + #[cfg(test)] fn unit_test_3_0_only(first_burnchain_height: u64) -> Vec { info!( diff --git a/stackslib/src/net/rpc.rs b/stackslib/src/net/rpc.rs index f1bac65427..c8ca454fd4 100644 --- a/stackslib/src/net/rpc.rs +++ b/stackslib/src/net/rpc.rs @@ -337,7 +337,7 @@ impl RPCPoxInfoData { "PoX-3 first reward cycle begins before first burn block height".to_string(), ))? + 1; - + let pox_4_first_cycle = burnchain .block_height_to_reward_cycle(burnchain.pox_constants.pox_4_activation_height as u64) .ok_or(net_error::ChainstateError( diff --git a/testnet/stacks-node/src/event_dispatcher.rs b/testnet/stacks-node/src/event_dispatcher.rs index 80b803d185..ef9442898e 100644 --- a/testnet/stacks-node/src/event_dispatcher.rs +++ b/testnet/stacks-node/src/event_dispatcher.rs @@ -355,7 +355,7 @@ impl EventObserver { fn send_mined_microblock(&self, payload: &serde_json::Value) { self.send_payload(payload, PATH_MINED_MICROBLOCK); } - + fn send_mined_nakamoto_block(&self, payload: &serde_json::Value) { self.send_payload(payload, PATH_MINED_NAKAMOTO_BLOCK); } @@ -482,7 +482,7 @@ impl MemPoolEventDispatcher for EventDispatcher { anchor_block, ); } - + fn mined_nakamoto_block_event( &self, target_burn_height: u64, @@ -937,7 +937,7 @@ impl EventDispatcher { observer.send_mined_microblock(&payload); } } - + pub fn process_mined_nakamoto_block_event( &self, target_burn_height: u64, diff --git a/testnet/stacks-node/src/tests/epoch_24.rs b/testnet/stacks-node/src/tests/epoch_24.rs index a374c945a9..0a116353c2 100644 --- a/testnet/stacks-node/src/tests/epoch_24.rs +++ b/testnet/stacks-node/src/tests/epoch_24.rs @@ -189,7 +189,7 @@ fn fix_to_pox_contract() { epoch_2_2 as u32 + 1, u32::MAX, pox_3_activation_height as u32, - u32::MAX + u32::MAX, ); burnchain_config.pox_constants = pox_constants.clone(); @@ -827,7 +827,7 @@ fn verify_auto_unlock_behavior() { epoch_2_2 as u32 + 1, u32::MAX, pox_3_activation_height as u32, - u32::MAX + u32::MAX, ); burnchain_config.pox_constants = pox_constants.clone(); diff --git a/testnet/stacks-node/src/tests/neon_integrations.rs b/testnet/stacks-node/src/tests/neon_integrations.rs index 8dd46f905d..39b0b4be32 100644 --- a/testnet/stacks-node/src/tests/neon_integrations.rs +++ b/testnet/stacks-node/src/tests/neon_integrations.rs @@ -212,7 +212,9 @@ pub mod test_observer { use warp; use warp::Filter; - use crate::event_dispatcher::{MinedBlockEvent, MinedMicroblockEvent, MinedNakamotoBlockEvent, StackerDBChunksEvent}; + use crate::event_dispatcher::{ + MinedBlockEvent, MinedMicroblockEvent, MinedNakamotoBlockEvent, StackerDBChunksEvent, + }; pub const EVENT_OBSERVER_PORT: u16 = 50303; @@ -220,7 +222,8 @@ pub mod test_observer { pub static ref NEW_BLOCKS: Mutex> = Mutex::new(Vec::new()); pub static ref MINED_BLOCKS: Mutex> = Mutex::new(Vec::new()); pub static ref MINED_MICROBLOCKS: Mutex> = Mutex::new(Vec::new()); - pub static ref MINED_NAKAMOTO_BLOCKS: Mutex> = Mutex::new(Vec::new()); + pub static ref MINED_NAKAMOTO_BLOCKS: Mutex> = + Mutex::new(Vec::new()); pub static ref NEW_MICROBLOCKS: Mutex> = Mutex::new(Vec::new()); pub static ref NEW_STACKERDB_CHUNKS: Mutex> = Mutex::new(Vec::new()); @@ -251,7 +254,7 @@ pub mod test_observer { microblock_events.push(microblocks); Ok(warp::http::StatusCode::OK) } - + async fn handle_stackerdb_chunks( chunks: serde_json::Value, ) -> Result { @@ -310,8 +313,10 @@ pub mod test_observer { mined_txs.push(serde_json::from_value(tx_event).unwrap()); Ok(warp::http::StatusCode::OK) } - - async fn handle_mined_nakamoto_block(block: serde_json::Value) -> Result { + + async fn handle_mined_nakamoto_block( + block: serde_json::Value, + ) -> Result { let mut mined_blocks = MINED_NAKAMOTO_BLOCKS.lock().unwrap(); // assert that the mined transaction events have string-y txids block From 9c9032c53dc82689d8366188913dd5a4019139ff Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Tue, 7 Nov 2023 00:41:05 -0500 Subject: [PATCH 080/122] chore: cargo fmt --- stacks-common/src/types/mod.rs | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/stacks-common/src/types/mod.rs b/stacks-common/src/types/mod.rs index ee37690877..953668cbef 100644 --- a/stacks-common/src/types/mod.rs +++ b/stacks-common/src/types/mod.rs @@ -93,9 +93,7 @@ impl StacksEpochId { | StacksEpochId::Epoch21 | StacksEpochId::Epoch22 | StacksEpochId::Epoch23 => false, - StacksEpochId::Epoch24 - | StacksEpochId::Epoch25 - | StacksEpochId::Epoch30 => true, + StacksEpochId::Epoch24 | StacksEpochId::Epoch25 | StacksEpochId::Epoch30 => true, } } } From 18214251b8b37585fab84547d64ce36773b0ac13 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Tue, 7 Nov 2023 00:41:21 -0500 Subject: [PATCH 081/122] chore: cargo fmt --- stackslib/src/burnchains/mod.rs | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) diff --git a/stackslib/src/burnchains/mod.rs b/stackslib/src/burnchains/mod.rs index 2d5df93a57..0596582ae7 100644 --- a/stackslib/src/burnchains/mod.rs +++ b/stackslib/src/burnchains/mod.rs @@ -362,7 +362,20 @@ impl PoxConstants { #[cfg(test)] pub fn test_default() -> PoxConstants { // 20 reward slots; 10 prepare-phase slots - PoxConstants::new(10, 5, 3, 25, 5, 5000, 10000, u32::MAX, u32::MAX, u32::MAX, u32::MAX, u32::MAX) + PoxConstants::new( + 10, + 5, + 3, + 25, + 5, + 5000, + 10000, + u32::MAX, + u32::MAX, + u32::MAX, + u32::MAX, + u32::MAX, + ) } /// Returns the PoX contract that is "active" at the given burn block height From 64c9451073043f16f21d1d6cc957697238658d3a Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Tue, 7 Nov 2023 00:41:38 -0500 Subject: [PATCH 082/122] refactor: when generating a block-commit, take an optionally-given VRF seed. This is necessary for Nakamoto because the VRF seed is generated differently from epoch 2.x --- stackslib/src/burnchains/tests/mod.rs | 42 ++++++++++++++++++--------- 1 file changed, 29 insertions(+), 13 deletions(-) diff --git a/stackslib/src/burnchains/tests/mod.rs b/stackslib/src/burnchains/tests/mod.rs index 32c4e444b4..4030a53815 100644 --- a/stackslib/src/burnchains/tests/mod.rs +++ b/stackslib/src/burnchains/tests/mod.rs @@ -396,7 +396,8 @@ impl TestBurnchainBlock { leader_key: &LeaderKeyRegisterOp, fork_snapshot: Option<&BlockSnapshot>, parent_block_snapshot: Option<&BlockSnapshot>, - epoch_marker: u8 + new_seed: Option, + epoch_marker: u8, ) -> LeaderBlockCommitOp { let input = (Txid([0; 32]), 0); let pubks = miner @@ -417,15 +418,20 @@ impl TestBurnchainBlock { None => SortitionDB::get_first_block_snapshot(ic).unwrap(), }; - // prove on the last-ever sortition's hash to produce the new seed - let proof = miner - .make_proof(&leader_key.public_key, &last_snapshot.sortition_hash) - .expect(&format!( - "FATAL: no private key for {}", - leader_key.public_key.to_hex() - )); - - let new_seed = VRFSeed::from_proof(&proof); + let new_seed = if let Some(new_seed) = new_seed { + new_seed + } else { + // prove on the last-ever sortition's hash to produce the new seed + let proof = miner + .make_proof(&leader_key.public_key, &last_snapshot.sortition_hash) + .expect(&format!( + "FATAL: no private key for {}", + leader_key.public_key.to_hex() + )); + + let new_seed = VRFSeed::from_proof(&proof); + new_seed + }; let get_commit_res = SortitionDB::get_block_commit( ic.conn(), @@ -480,7 +486,7 @@ impl TestBurnchainBlock { miner.block_commits.push(txop.clone()); txop } - + /// Add an epoch 2.x block-commit pub fn add_leader_block_commit( &mut self, @@ -492,9 +498,19 @@ impl TestBurnchainBlock { fork_snapshot: Option<&BlockSnapshot>, parent_block_snapshot: Option<&BlockSnapshot>, ) -> LeaderBlockCommitOp { - self.inner_add_block_commit(ic, miner, block_hash, burn_fee, leader_key, fork_snapshot, parent_block_snapshot, STACKS_EPOCH_2_4_MARKER) + self.inner_add_block_commit( + ic, + miner, + block_hash, + burn_fee, + leader_key, + fork_snapshot, + parent_block_snapshot, + None, + STACKS_EPOCH_2_4_MARKER, + ) } - + pub fn patch_from_chain_tip(&mut self, parent_snapshot: &BlockSnapshot) -> () { assert_eq!(parent_snapshot.block_height + 1, self.block_height); From 53688a4dc4150848c7287d93f01dd9bbf8f6b688 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Tue, 7 Nov 2023 00:42:21 -0500 Subject: [PATCH 083/122] fix: track the canonical Stacks chain tips in a dedicated DB table, which avoids mucking up the epch 2.x means of tracking the canonical Stacks tip but allows it to keep working alongside Nakamoto (which uses this new table exclusively) --- stackslib/src/chainstate/burn/db/sortdb.rs | 178 ++++++++++++++++++--- 1 file changed, 154 insertions(+), 24 deletions(-) diff --git a/stackslib/src/chainstate/burn/db/sortdb.rs b/stackslib/src/chainstate/burn/db/sortdb.rs index 4e42adc9c1..b72e541622 100644 --- a/stackslib/src/chainstate/burn/db/sortdb.rs +++ b/stackslib/src/chainstate/burn/db/sortdb.rs @@ -31,6 +31,7 @@ use clarity::vm::types::Value; use rand; use rand::RngCore; use rusqlite::types::ToSql; +use rusqlite::Error as sqlite_error; use rusqlite::Row; use rusqlite::Transaction; use rusqlite::TransactionBehavior; @@ -874,17 +875,22 @@ const SORTITION_DB_SCHEMA_8: &'static [&'static str] = &[ PRIMARY KEY(txid, burn_header_hash) );"#, r#"ALTER TABLE snapshots ADD miner_pk_hash TEXT DEFAULT NULL"#, -]; - -const SORTITION_DB_SCHEMA_9: &'static [&'static str] = &[ - r#"ALTER TABLE snapshots ADD miner_pk_hash TEXT DEFAULT NULL"#, r#" -- eagerly-processed reward sets, before they're applied to the start of the next reward cycle CREATE TABLE preprocessed_reward_sets ( sortition_id TEXT PRIMARY KEY, reward_set TEXT NOT NULL - );"# -]; + );"#, + r#" + -- canonical chain tip at each sortition ID. + -- This is updated in both 2.x and Nakamoto, but Nakamoto relies on this exclusively + CREATE TABLE stacks_chain_tips ( + sortition_id TEXT PRIMARY KEY, + consensus_hash TEXT NOT NULL, + block_hash TEXT NOT NULL, + block_height INTEGER NOT NULL + );"#, +]; const SORTITION_DB_INDEXES: &'static [&'static str] = &[ "CREATE INDEX IF NOT EXISTS snapshots_block_hashes ON snapshots(block_height,index_root,winning_stacks_block_hash);", @@ -968,7 +974,7 @@ impl SortitionContext for SortitionDBTxContext { } } -fn get_block_commit_by_txid( +pub fn get_block_commit_by_txid( conn: &Connection, sort_id: &SortitionId, txid: &Txid, @@ -1596,6 +1602,15 @@ impl<'a> SortitionHandleTx<'a> { stacks_block_height, )?; + #[cfg(test)] + { + let (ch, bhh) = SortitionDB::get_canonical_stacks_chain_tip_hash(self).unwrap(); + debug!( + "Memoized canonical Stacks chain tip is now {}/{}", + &ch, &bhh + ); + } + Ok(()) } @@ -1796,6 +1811,25 @@ impl<'a> SortitionHandleTx<'a> { Ok(anchor_block_txid) } + /// Update the canonical Stacks tip + fn update_canonical_stacks_tip( + &mut self, + sort_id: &SortitionId, + consensus_hash: &ConsensusHash, + stacks_block_hash: &BlockHeaderHash, + stacks_block_height: u64, + ) -> Result<(), db_error> { + let sql = "INSERT OR REPLACE INTO stacks_chain_tips (sortition_id,consensus_hash,block_hash,block_height) VALUES (?1,?2,?3,?4)"; + let args: &[&dyn ToSql] = &[ + sort_id, + consensus_hash, + stacks_block_hash, + &u64_to_sql(stacks_block_height)?, + ]; + self.execute(sql, args)?; + Ok(()) + } + /// Mark an existing snapshot's stacks block as accepted at a particular burn chain tip within a PoX fork (identified by the consensus hash), /// and calculate and store its arrival index. /// If this Stacks block extends the canonical stacks chain tip, then also update the memoized canonical @@ -1812,6 +1846,27 @@ impl<'a> SortitionHandleTx<'a> { stacks_block_hash: &BlockHeaderHash, stacks_block_height: u64, ) -> Result<(), db_error> { + let block_sn = SortitionDB::get_block_snapshot_consensus(self, consensus_hash)? + .ok_or(db_error::NotFoundError)?; + + let cur_epoch = + SortitionDB::get_stacks_epoch(self, block_sn.block_height)?.expect(&format!( + "FATAL: no epoch defined for burn height {}", + block_sn.block_height + )); + + if cur_epoch.epoch_id >= StacksEpochId::Epoch30 { + // Nakamoto blocks are always processed in order since the chain can't fork + self.update_canonical_stacks_tip( + &burn_tip.sortition_id, + consensus_hash, + stacks_block_hash, + stacks_block_height, + )?; + return Ok(()); + } + + // in epoch 2.x, where we track canonical stacks tip via the sortition DB let arrival_index = SortitionDB::get_max_arrival_index(self)?; let args: &[&dyn ToSql] = &[ &u64_to_sql(stacks_block_height)?, @@ -1838,6 +1893,7 @@ impl<'a> SortitionHandleTx<'a> { // update arrival data across all Stacks forks let (best_ch, best_bhh, best_height) = self.find_new_block_arrivals(burn_tip)?; + self.update_canonical_stacks_tip(&burn_tip.sortition_id, &best_ch, &best_bhh, best_height)?; self.update_new_block_arrivals(burn_tip, best_ch, best_bhh, best_height)?; Ok(()) @@ -3058,8 +3114,6 @@ impl SortitionDB { /// Get the Sortition ID for the burnchain block containing `txid`'s parent. /// `txid` is the burnchain txid of a block-commit. - /// Because the block_commit_parents table is not populated on schema migration, the returned - /// value may be NULL (and this is okay). pub fn get_block_commit_parent_sortition_id( conn: &Connection, txid: &Txid, @@ -3486,7 +3540,11 @@ impl SortitionDB { /// Store a pre-processed reward set. /// `sortition_id` is the first sortition ID of the prepare phase - pub fn store_preprocessed_reward_set(sort_tx: &mut DBTx, sortition_id: &SortitionId, rc_info: &RewardCycleInfo) -> Result<(), db_error> { + pub fn store_preprocessed_reward_set( + sort_tx: &mut DBTx, + sortition_id: &SortitionId, + rc_info: &RewardCycleInfo, + ) -> Result<(), db_error> { let sql = "INSERT INTO preprocessed_reward_sets (sortition_id,reward_set) VALUES (?1,?2)"; let rc_json = serde_json::to_string(rc_info).map_err(db_error::SerializationError)?; let args: &[&dyn ToSql] = &[sortition_id, &rc_json]; @@ -3496,21 +3554,25 @@ impl SortitionDB { /// Get a pre-processed reawrd set. /// `sortition_id` is the first sortition ID of the prepare phase. - pub fn get_preprocessed_reward_set(sortdb: &DBConn, sortition_id: &SortitionId) -> Result, db_error> { + pub fn get_preprocessed_reward_set( + sortdb: &DBConn, + sortition_id: &SortitionId, + ) -> Result, db_error> { let sql = "SELECT reward_set FROM preprocessed_reward_sets WHERE sortition_id = ?1"; let args: &[&dyn ToSql] = &[sortition_id]; - let reward_set_opt : Option = sortdb.query_row(sql, args, |row| row.get(0)) + let reward_set_opt: Option = sortdb + .query_row(sql, args, |row| row.get(0)) .optional() .map_err(db_error::from)?; if let Some(reward_set_str) = reward_set_opt { - let rc_info : RewardCycleInfo = serde_json::from_str(&reward_set_str).map_err(|_| db_error::ParseError)?; + let rc_info: RewardCycleInfo = + serde_json::from_str(&reward_set_str).map_err(|_| db_error::ParseError)?; Ok(Some(rc_info)) - } - else { + } else { Ok(None) } - } + } } impl<'a> SortitionDBTx<'a> { @@ -4488,7 +4550,22 @@ impl SortitionDB { conn: &Connection, ) -> Result<(ConsensusHash, BlockHeaderHash), db_error> { let sn = SortitionDB::get_canonical_burn_chain_tip(conn)?; + let cur_epoch = SortitionDB::get_stacks_epoch(conn, sn.block_height)?.expect(&format!( + "FATAL: no epoch defined for burn height {}", + sn.block_height + )); + if cur_epoch.epoch_id >= StacksEpochId::Epoch30 { + // nakamoto behavior -- look to the stacks_chain_tip table + let res: Result<_, db_error> = conn.query_row_and_then( + "SELECT consensus_hash,block_hash FROM stacks_chain_tips WHERE sortition_id = ?", + &[&sn.sortition_id], + |row| Ok((row.get_unwrap(0), row.get_unwrap(1))), + ); + return res; + } + + // epoch 2.x behavior -- look at the snapshot itself let stacks_block_hash = sn.canonical_stacks_tip_hash; let consensus_hash = sn.canonical_stacks_tip_consensus_hash; @@ -4857,8 +4934,8 @@ impl SortitionDB { } /// Get a block commit by its committed block. - /// For Nakamoto, `consensus_hash` and `block_hash` are the hashes that combine to form the - /// `last_tenure_id` (i.e. the index block hash of the first block in the last tenure) + /// For Stacks 2.x, `block_hash` is just the hash of the block + /// For Nakamoto, `block_hash` is the StacksBlockId of the last tenure's first block pub fn get_block_commit_for_stacks_block( conn: &Connection, consensus_hash: &ConsensusHash, @@ -4887,6 +4964,17 @@ impl SortitionDB { }) } + /// Get the block-commit for a Nakamoto block, given the block-commit's sortition's consensus + /// hash and its given last_tenure_id + pub fn get_block_commit_for_nakamoto_block( + conn: &Connection, + consensus_hash: &ConsensusHash, + last_tenure_id: &StacksBlockId, + ) -> Result, db_error> { + let bhh = BlockHeaderHash(last_tenure_id.0.clone()); + Self::get_block_commit_for_stacks_block(conn, consensus_hash, &bhh) + } + /// Get a block snapshot for a winning block hash in a given burn chain fork. pub fn get_block_snapshot_for_winning_stacks_block( ic: &SortitionDBConn, @@ -4902,7 +4990,7 @@ impl SortitionDB { None => Ok(None), } } - + /// Get a block snapshot for a winning Nakamoto tenure in a given burn chain fork. pub fn get_block_snapshot_for_winning_nakamoto_tenure( ic: &SortitionDBConn, @@ -5139,10 +5227,35 @@ impl<'a> SortitionHandleTx<'a> { let mut sn = snapshot.clone(); sn.index_root = root_hash.clone(); - // preserve memoized stacks chain tip from this burn chain fork - sn.canonical_stacks_tip_height = parent_sn.canonical_stacks_tip_height; - sn.canonical_stacks_tip_hash = parent_sn.canonical_stacks_tip_hash; - sn.canonical_stacks_tip_consensus_hash = parent_sn.canonical_stacks_tip_consensus_hash; + let cur_epoch = + SortitionDB::get_stacks_epoch(self, snapshot.block_height)?.expect(&format!( + "FATAL: no epoch defined for burn height {}", + snapshot.block_height + )); + + if cur_epoch.epoch_id >= StacksEpochId::Epoch30 { + // nakamoto behavior + // look at stacks_chain_tips table + let res: Result<_, db_error> = self.deref().query_row_and_then( + "SELECT consensus_hash,block_hash,block_height FROM stacks_chain_tips WHERE sortition_id = ?", + &[&parent_snapshot.sortition_id], + |row| Ok((row.get_unwrap(0), row.get_unwrap(1), (u64::try_from(row.get_unwrap::<_, i64>(2)).expect("FATAL: block height too high")))) + ); + let ( + canonical_stacks_tip_consensus_hash, + canonical_stacks_tip_block_hash, + canonical_stacks_tip_height, + ) = res?; + sn.canonical_stacks_tip_height = canonical_stacks_tip_height; + sn.canonical_stacks_tip_hash = canonical_stacks_tip_block_hash; + sn.canonical_stacks_tip_consensus_hash = canonical_stacks_tip_consensus_hash; + } else { + // epoch 2.x behavior + // preserve memoized stacks chain tip from this burn chain fork + sn.canonical_stacks_tip_height = parent_sn.canonical_stacks_tip_height; + sn.canonical_stacks_tip_hash = parent_sn.canonical_stacks_tip_hash; + sn.canonical_stacks_tip_consensus_hash = parent_sn.canonical_stacks_tip_consensus_hash; + } self.insert_block_snapshot(&sn, pox_payout)?; @@ -5154,6 +5267,23 @@ impl<'a> SortitionHandleTx<'a> { self.insert_missed_block_commit(missed_commit)?; } + self.update_canonical_stacks_tip( + &sn.sortition_id, + &sn.canonical_stacks_tip_consensus_hash, + &sn.canonical_stacks_tip_hash, + sn.canonical_stacks_tip_height, + )?; + + #[cfg(test)] + { + let (block_consensus_hash, block_bhh) = + SortitionDB::get_canonical_stacks_chain_tip_hash(self).unwrap(); + + debug!( + "After sortition {}, canonical Stacks tip is {}/{}", + &snapshot.consensus_hash, &block_consensus_hash, &block_bhh + ); + } Ok(root_hash) } @@ -6241,6 +6371,7 @@ impl<'a> SortitionHandleTx<'a> { /// Update the given tip's canonical Stacks block pointer. /// Does so on all sortitions of the same height as tip. + /// Only used in Stacks 2.x fn update_new_block_arrivals( &mut self, tip: &BlockSnapshot, @@ -6262,7 +6393,6 @@ impl<'a> SortitionHandleTx<'a> { self.execute("UPDATE snapshots SET canonical_stacks_tip_consensus_hash = ?1, canonical_stacks_tip_hash = ?2, canonical_stacks_tip_height = ?3 WHERE block_height = ?4", args) .map_err(db_error::SqliteError)?; - Ok(()) } From 4de344001ba441dcb4dafe17b273d9d503492903 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Tue, 7 Nov 2023 00:43:24 -0500 Subject: [PATCH 084/122] fix: off-by-one error in loading the preprocessed reward cycle info (also, cargo fmt) --- stackslib/src/chainstate/coordinator/mod.rs | 43 ++++++++++++++------- 1 file changed, 30 insertions(+), 13 deletions(-) diff --git a/stackslib/src/chainstate/coordinator/mod.rs b/stackslib/src/chainstate/coordinator/mod.rs index ba1666bfca..706fbb6bb1 100644 --- a/stackslib/src/chainstate/coordinator/mod.rs +++ b/stackslib/src/chainstate/coordinator/mod.rs @@ -750,14 +750,24 @@ pub fn get_reward_cycle_info( .expect("FATAL: no reward cycle for burn height"); if prev_reward_cycle > 1 { - let prev_reward_cycle_start = burnchain.reward_cycle_to_block_height(prev_reward_cycle - 1); - let prepare_phase_start = prev_reward_cycle_start + u64::from(burnchain.pox_constants.reward_cycle_length) - u64::from(burnchain.pox_constants.prepare_length); - let first_prepare_sn = SortitionDB::get_ancestor_snapshot(&ic, prepare_phase_start, sortition_tip)? - .expect("FATAL: no start-of-prepare-phase sortition"); + let prev_reward_cycle_start = + burnchain.reward_cycle_to_block_height(prev_reward_cycle - 1); + let prepare_phase_start = prev_reward_cycle_start + + u64::from(burnchain.pox_constants.reward_cycle_length) + - u64::from(burnchain.pox_constants.prepare_length); + let first_prepare_sn = + SortitionDB::get_ancestor_snapshot(&ic, prepare_phase_start, sortition_tip)? + .expect("FATAL: no start-of-prepare-phase sortition"); let mut tx = sort_db.tx_begin()?; - if SortitionDB::get_preprocessed_reward_set(&mut tx, &first_prepare_sn.sortition_id)?.is_none() { - SortitionDB::store_preprocessed_reward_set(&mut tx, &first_prepare_sn.sortition_id, &reward_cycle_info)?; + if SortitionDB::get_preprocessed_reward_set(&mut tx, &first_prepare_sn.sortition_id)? + .is_none() + { + SortitionDB::store_preprocessed_reward_set( + &mut tx, + &first_prepare_sn.sortition_id, + &reward_cycle_info, + )?; } tx.commit()?; } @@ -2251,20 +2261,23 @@ impl< } /// Outermost call to process a burnchain block. - /// Will call the Stacks 2.x or Nakamoto handler, depending on whether or not + /// Will call the Stacks 2.x or Nakamoto handler, depending on whether or not /// Not called internally. /// NOTE: the 2.x and Nakamoto handlers return `Some(..)` in _different_ circumstances. If /// that matters to you, then you should call them directly. pub fn handle_new_burnchain_block(&mut self) -> Result, Error> { let canonical_burnchain_tip = self.burnchain_blocks_db.get_canonical_chain_tip()?; let epochs = SortitionDB::get_stacks_epochs(self.sortition_db.conn())?; - let target_epoch_index = StacksEpoch::find_epoch(&epochs, canonical_burnchain_tip.block_height).expect("FATAL: epoch not defined for burnchain height"); - let target_epoch = epochs.get(target_epoch_index).expect("FATAL: StacksEpoch::find_epoch() returned an invalid index"); + let target_epoch_index = + StacksEpoch::find_epoch(&epochs, canonical_burnchain_tip.block_height) + .expect("FATAL: epoch not defined for burnchain height"); + let target_epoch = epochs + .get(target_epoch_index) + .expect("FATAL: StacksEpoch::find_epoch() returned an invalid index"); if target_epoch.epoch_id < StacksEpochId::Epoch30 { // burnchain has not yet advanced to epoch 3.0 self.handle_new_epoch2_burnchain_block(&mut HashSet::new()) - } - else { + } else { // burnchain has advanced to epoch 3.0, but has our sortition DB? let canonical_snapshot = match self.canonical_sortition_tip.as_ref() { Some(sn_tip) => SortitionDB::get_block_snapshot(self.sortition_db.conn(), sn_tip)? @@ -2274,8 +2287,12 @@ impl< )), None => SortitionDB::get_canonical_burn_chain_tip(&self.sortition_db.conn())?, }; - let target_epoch_index = StacksEpoch::find_epoch(&epochs, canonical_snapshot.block_height).expect("FATAL: epoch not defined for BlockSnapshot height"); - let target_epoch = epochs.get(target_epoch_index).expect("FATAL: StacksEpoch::find_epoch() returned an invalid index"); + let target_epoch_index = + StacksEpoch::find_epoch(&epochs, canonical_snapshot.block_height) + .expect("FATAL: epoch not defined for BlockSnapshot height"); + let target_epoch = epochs + .get(target_epoch_index) + .expect("FATAL: StacksEpoch::find_epoch() returned an invalid index"); if target_epoch.epoch_id < StacksEpochId::Epoch30 { // need to catch the sortition DB up From 1acaa22d43bb7ee3c8c39296e64d5a23c95f8eb2 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Tue, 7 Nov 2023 00:43:47 -0500 Subject: [PATCH 085/122] chore: cargo fmt --- stackslib/src/chainstate/coordinator/tests.rs | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) diff --git a/stackslib/src/chainstate/coordinator/tests.rs b/stackslib/src/chainstate/coordinator/tests.rs index eac7ed36a9..6a10ae0b75 100644 --- a/stackslib/src/chainstate/coordinator/tests.rs +++ b/stackslib/src/chainstate/coordinator/tests.rs @@ -4654,7 +4654,20 @@ fn test_epoch_switch_pox_3_contract_instantiation() { let _r = std::fs::remove_dir_all(path); let sunset_ht = 8000; - let pox_consts = Some(PoxConstants::new(6, 3, 3, 25, 5, 10, sunset_ht, 10, 14, u32::MAX, 16, u32::MAX)); + let pox_consts = Some(PoxConstants::new( + 6, + 3, + 3, + 25, + 5, + 10, + sunset_ht, + 10, + 14, + u32::MAX, + 16, + u32::MAX, + )); let burnchain_conf = get_burnchain(path, pox_consts.clone()); let vrf_keys: Vec<_> = (0..25).map(|_| VRFPrivateKey::new()).collect(); From 32f73770bbc4ef20c0e86f4ad83ba08ff4310bad Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Tue, 7 Nov 2023 00:44:43 -0500 Subject: [PATCH 086/122] feat: the Nakamoto anchor block is the *tenure-start* block of the *last* tenure in the *reward phase*. This is because we can find it via block-commits (its index block hash will be in the block-commit for the first prepare-phase Nakamoto tenure), which allows the block-commit check_pox() code to work without modification --- .../chainstate/nakamoto/coordinator/mod.rs | 154 +++++++++++++----- 1 file changed, 113 insertions(+), 41 deletions(-) diff --git a/stackslib/src/chainstate/nakamoto/coordinator/mod.rs b/stackslib/src/chainstate/nakamoto/coordinator/mod.rs index 50bc7b4c6f..8b5b7c0589 100644 --- a/stackslib/src/chainstate/nakamoto/coordinator/mod.rs +++ b/stackslib/src/chainstate/nakamoto/coordinator/mod.rs @@ -26,6 +26,7 @@ use crate::burnchains::db::BurnchainHeaderReader; use crate::burnchains::Burnchain; use crate::burnchains::BurnchainBlockHeader; use crate::chainstate::burn::db::sortdb::SortitionDB; +use crate::chainstate::burn::operations::leader_block_commit::RewardSetInfo; use crate::chainstate::burn::BlockSnapshot; use crate::chainstate::coordinator::comm::{ CoordinatorChannels, CoordinatorCommunication, CoordinatorEvents, CoordinatorNotices, @@ -38,12 +39,13 @@ use crate::chainstate::coordinator::{ RewardSetProvider, }; use crate::chainstate::nakamoto::NakamotoChainState; -use crate::chainstate::stacks::Error as ChainstateError; use crate::chainstate::stacks::boot::RewardSet; +use crate::chainstate::stacks::db::StacksBlockHeaderTypes; use crate::chainstate::stacks::db::StacksChainState; use crate::chainstate::stacks::miner::signal_mining_blocked; use crate::chainstate::stacks::miner::signal_mining_ready; use crate::chainstate::stacks::miner::MinerStatus; +use crate::chainstate::stacks::Error as ChainstateError; use crate::cost_estimates::CostEstimator; use crate::cost_estimates::FeeEstimator; @@ -80,7 +82,12 @@ impl OnChainRewardSetProvider { let liquid_ustx = chainstate.get_liquid_ustx(block_id); - debug!("PoX addrs at {} ({}): {:?}", block_id, registered_addrs.len(), ®istered_addrs); + debug!( + "PoX addrs at {} ({}): {:?}", + block_id, + registered_addrs.len(), + ®istered_addrs + ); let (threshold, participation) = StacksChainState::get_reward_threshold_and_participation( &burnchain.pox_constants, @@ -152,12 +159,18 @@ fn find_prepare_phase_sortitions( } /// Try to get the reward cycle information for a Nakamoto reward cycle. -/// In Nakamoto, the PoX anchor block for reward cycle _R_ is the last Stacks block mined in the -/// _R - 1_'s reward phase phase (i.e. which takes place toward the end of reward cycle). +/// In Nakamoto, the PoX anchor block for reward cycle _R_ is the _first_ Stacks block mined in the +/// _last_ tenure of _R - 1_'s reward phase phase (i.e. which takes place toward the end of reward cycle). +/// The reason it must be this way is because its hash will be in the block-commit for the first +/// prepare-phase tenure of cycle _R_ (which is required for the PoX ancestry query in the +/// block-commit validation logic). /// /// If this method returns None, the caller should try again when there are more Stacks blocks. In /// Nakamoto, every reward cycle _must_ have a PoX anchor block; otherwise, the chain halts. /// +/// N.B. this method assumes that the prepare phase is comprised _solely_ of Nakamoto tenures. It +/// will not work if any of the prepare-phase tenures are from epoch 2.x. +/// /// Returns Ok(Some(reward-cycle-info)) if we found the first sortition in the prepare phase. /// Returns Ok(None) if we're still waiting for the PoX anchor block sortition /// Returns Err(Error::NotInPreparePhase) if `burn_height` is not in the prepare phase @@ -193,24 +206,14 @@ pub fn get_nakamoto_reward_cycle_info( .expect("FATAL: no reward cycle for burn height") + 1; - // only proceed if we have not yet calculated the PoX reward info for this reward cycle. - let last_processed_reward_cycle = { - let ic = sort_db.index_handle(sortition_tip); - ic.get_last_processed_reward_cycle()? - }; - - if last_processed_reward_cycle >= reward_cycle { - return Err(Error::RewardSetAlreadyProcessed); - } - debug!("Processing reward set for Nakamoto reward cycle"; "burn_height" => burn_height, "reward_cycle" => reward_cycle, "reward_cycle_length" => burnchain.pox_constants.reward_cycle_length, "prepare_phase_length" => burnchain.pox_constants.prepare_length); - // find the last Stacks block processed in the preceeding prepare phase - // (i.e. the parent of the first Stacks block processed in the prepare phase). + // find the last tenure-start Stacks block processed in the preceeding prepare phase + // (i.e. the first block in the tenure of the parent of the first Stacks block processed in the prepare phase). // Note that we may not have processed it yet. But, if we do find it, then it's // unique (and since Nakamoto Stacks blocks are processed in order, the anchor block // cannot change later). @@ -219,12 +222,13 @@ pub fn get_nakamoto_reward_cycle_info( // did we already calculate the reward cycle info? If so, then return it. let first_sortition_id = if let Some(first_sn) = prepare_phase_sortitions.first() { - if let Some(persisted_reward_cycle_info) = SortitionDB::get_preprocessed_reward_set(sort_db.conn(), &first_sn.sortition_id)? { + if let Some(persisted_reward_cycle_info) = + SortitionDB::get_preprocessed_reward_set(sort_db.conn(), &first_sn.sortition_id)? + { return Ok(Some(persisted_reward_cycle_info)); } first_sn.sortition_id.clone() - } - else { + } else { // can't do anything return Ok(None); }; @@ -251,32 +255,55 @@ pub fn get_nakamoto_reward_cycle_info( .expect("FATAL: queried non-Nakamoto tenure start header") .parent_block_id; - // find the parent of this Stacks block - let anchor_block_header = + // find the tenure-start block of the tenure of the parent of this Stacks block. + // in epoch 2, this is the preceding anchor block + // in nakamoto, this is the tenure-start block of the preceding tenure + let parent_block_header = NakamotoChainState::get_block_header(chain_state.db(), &parent_block_id)? .expect("FATAL: no parent for processed Stacks block in prepare phase"); + let anchor_block_header = match &parent_block_header.anchored_header { + StacksBlockHeaderTypes::Epoch2(..) => parent_block_header, + StacksBlockHeaderTypes::Nakamoto(..) => { + NakamotoChainState::get_nakamoto_tenure_start_block_header( + chain_state.db(), + &parent_block_header.consensus_hash, + )? + .expect("FATAL: no parent for processed Stacks block in prepare phase") + } + }; + let anchor_block_sn = SortitionDB::get_block_snapshot_consensus( sort_db.conn(), &anchor_block_header.consensus_hash, )? .expect("FATAL: no snapshot for winning PoX anchor block"); - let stacks_block_hash = anchor_block_header.anchored_header.block_hash(); + // make sure the `anchor_block` field is the same as whatever goes into the block-commit, + // or PoX ancestry queries won't work + let (block_id, stacks_block_hash) = match anchor_block_header.anchored_header { + StacksBlockHeaderTypes::Epoch2(header) => ( + StacksBlockId::new(&anchor_block_header.consensus_hash, &header.block_hash()), + header.block_hash(), + ), + StacksBlockHeaderTypes::Nakamoto(header) => { + (header.block_id(), BlockHeaderHash(header.block_id().0)) + } + }; + let txid = anchor_block_sn.winning_block_txid; info!( - "Anchor block selected for cycle {}: {}/{}", - reward_cycle, &anchor_block_header.consensus_hash, &stacks_block_hash + "Anchor block selected for cycle {}: (ch {}) {}", + reward_cycle, &anchor_block_header.consensus_hash, &block_id ); - let block_id = StacksBlockId::new(&anchor_block_header.consensus_hash, &stacks_block_hash); let reward_set = provider.get_reward_set(burn_height, chain_state, burnchain, sort_db, &block_id)?; debug!( - "Stacks anchor block {}/{} cycle {} is processed", - &anchor_block_header.consensus_hash, &stacks_block_hash, reward_cycle + "Stacks anchor block (ch {}) {} cycle {} is processed", + &anchor_block_header.consensus_hash, &block_id, reward_cycle ); let anchor_status = PoxAnchorBlockStatus::SelectedAndKnown(stacks_block_hash, txid, reward_set); @@ -299,6 +326,45 @@ pub fn get_nakamoto_reward_cycle_info( return Ok(None); } +/// Get the next PoX recipients in the Nakamoto epoch. +/// This is a little different than epoch 2.x: +/// * we're guaranteed to have an anchor block +/// * we pre-compute the reward set at the start of the prepare phase, so we only need to load it +/// up here at the start of the reward phase. +pub fn get_nakamoto_next_recipients( + sortition_tip: &BlockSnapshot, + sort_db: &mut SortitionDB, + burnchain: &Burnchain, +) -> Result, Error> { + let reward_cycle_info = if burnchain.is_reward_cycle_start(sortition_tip.block_height + 1) { + // load up new reward cycle info so we can start using *that* + let prepare_phase_sortitions = + find_prepare_phase_sortitions(sort_db, burnchain, &sortition_tip.parent_sortition_id)?; + + // NOTE: this must panic because Nakamoto's first reward cycle has stackers + let first_sn = prepare_phase_sortitions + .first() + .expect("FATAL: unreachable: no prepare-phase sortitions at start of reward cycle"); + + debug!("Get pre-processed reward set"; + "sortition_id" => %first_sn.sortition_id); + + // NOTE: if we don't panic here, we'll panic later in a more obscure way + Some( + SortitionDB::get_preprocessed_reward_set(sort_db.conn(), &first_sn.sortition_id)? + .expect(&format!( + "No reward set for start of reward cycle beginning with block {}", + &sortition_tip.block_height + )), + ) + } else { + None + }; + sort_db + .get_next_block_recipients(burnchain, sortition_tip, reward_cycle_info.as_ref()) + .map_err(|e| Error::from(e)) +} + impl< 'a, T: BlockEventDispatcher, @@ -446,7 +512,7 @@ impl< Err(ChainstateError::NetError(NetError::DeserializeError(msg))) => { // happens if we load a zero-sized block (i.e. an invalid block) warn!("Encountered invalid block (codec error): {}", &msg); - + // try again self.notifier.notify_stacks_block_processed(); increment_stx_blocks_processed_counter(); @@ -579,7 +645,7 @@ impl< } /// Given a burnchain header, find the PoX reward cycle info - pub fn get_nakamoto_reward_cycle_info( + fn get_nakamoto_reward_cycle_info( &mut self, burn_header: &BurnchainBlockHeader, ) -> Result, Error> { @@ -707,21 +773,29 @@ impl< ); } } - + let reward_cycle_info = if self.burnchain.is_reward_cycle_start(header.block_height) { // we're at the end of the prepare phase, so we'd better have obtained the reward // cycle info of we must block. - let prepare_phase_sortitions = - find_prepare_phase_sortitions(&self.sortition_db, &self.burnchain, &last_processed_ancestor)?; + let prepare_phase_sortitions = find_prepare_phase_sortitions( + &self.sortition_db, + &self.burnchain, + &last_processed_ancestor, + )?; if let Some(first_sn) = prepare_phase_sortitions.first() { - let reward_cycle_info = SortitionDB::get_preprocessed_reward_set(&self.sortition_db.conn(), &first_sn.sortition_id)?; + let reward_cycle_info = SortitionDB::get_preprocessed_reward_set( + &self.sortition_db.conn(), + &first_sn.sortition_id, + )?; if let Some(rc_info) = reward_cycle_info.as_ref() { // we must have an anchor block - assert!(rc_info.known_selected_anchor_block().is_some(), "FATAL: do not know prior reward cycle anchor block"); - } - else { - // have to block -- we don't have the reward cycle information + assert!( + rc_info.known_selected_anchor_block().is_some(), + "FATAL: do not know prior reward cycle anchor block" + ); + } else { + // have to block -- we don't have the reward cycle information debug!("Do not yet have PoX anchor block for next reward cycle -- no anchor block found"; "next_reward_cycle" => self.burnchain.block_height_to_reward_cycle(header.block_height), "sortition_id" => %first_sn.sortition_id @@ -729,8 +803,7 @@ impl< return Ok(None); } reward_cycle_info - } - else { + } else { // have to block -- we don't have any sortitions in the preceding prepare // phase. // this is really unreachable, but don't panic just yet. @@ -739,8 +812,7 @@ impl< ); return Ok(None); } - } - else { + } else { // not starting a reward cycle anyway None }; From 3c4a201575f73c43876154d2081eed45df557181 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Tue, 7 Nov 2023 00:45:51 -0500 Subject: [PATCH 087/122] chore: add/fix tenure tests -- in particular, test that we can produce 10 tenures in a row across two reward cycles (of 10 blocks each). This lets us verify that not only does the VRF work (i.e. PoX payouts work), but also that PoX anchor block selection works across the transition to Nakamoto and within Nakamoto --- .../chainstate/nakamoto/coordinator/tests.rs | 194 ++++++++++++++++-- 1 file changed, 181 insertions(+), 13 deletions(-) diff --git a/stackslib/src/chainstate/nakamoto/coordinator/tests.rs b/stackslib/src/chainstate/nakamoto/coordinator/tests.rs index 6fc00d3320..e5a695f438 100644 --- a/stackslib/src/chainstate/nakamoto/coordinator/tests.rs +++ b/stackslib/src/chainstate/nakamoto/coordinator/tests.rs @@ -18,12 +18,18 @@ use crate::net::test::{TestPeer, TestPeerConfig}; use clarity::vm::types::PrincipalData; +use crate::chainstate::burn::db::sortdb::SortitionDB; +use crate::chainstate::nakamoto::tests::get_account; +use crate::chainstate::nakamoto::NakamotoChainState; use crate::chainstate::stacks::address::PoxAddress; use crate::chainstate::stacks::boot::test::make_pox_4_lockup; +use crate::chainstate::stacks::db::StacksAccount; +use crate::chainstate::stacks::db::StacksChainState; use crate::chainstate::stacks::CoinbasePayload; use crate::chainstate::stacks::StacksTransaction; use crate::chainstate::stacks::StacksTransactionSigner; use crate::chainstate::stacks::TenureChangeCause; +use crate::chainstate::stacks::TokenTransferMemo; use crate::chainstate::stacks::TransactionAnchorMode; use crate::chainstate::stacks::TransactionAuth; use crate::chainstate::stacks::TransactionPayload; @@ -31,11 +37,12 @@ use crate::chainstate::stacks::TransactionVersion; use crate::clarity::vm::types::StacksAddressExtensions; -use stacks_common::address::C32_ADDRESS_VERSION_TESTNET_SINGLESIG; use stacks_common::address::AddressHashMode; +use stacks_common::address::C32_ADDRESS_VERSION_TESTNET_SINGLESIG; use stacks_common::types::chainstate::StacksAddress; -use stacks_common::types::chainstate::StacksPublicKey; use stacks_common::types::chainstate::StacksPrivateKey; +use stacks_common::types::chainstate::StacksPublicKey; +use stacks_common::types::Address; use stacks_common::types::StacksEpoch; use stacks_common::util::vrf::VRFProof; @@ -58,7 +65,7 @@ fn boot_nakamoto(test_name: &str, mut initial_balances: Vec<(PrincipalData, u64) // first 25 blocks are boot-up // reward cycle 6 instantiates pox-3 // we stack in reward cycle 7 so pox-3 is evaluated to find reward set participation - peer_config.epochs = Some(StacksEpoch::unit_test_3_0_only(36)); + peer_config.epochs = Some(StacksEpoch::unit_test_3_0_only(37)); peer_config.initial_balances = vec![(addr.to_account_principal(), 1_000_000_000_000_000_000)]; peer_config.initial_balances.append(&mut initial_balances); peer_config.burnchain.pox_constants.v2_unlock_height = 21; @@ -75,16 +82,22 @@ fn boot_nakamoto(test_name: &str, mut initial_balances: Vec<(PrincipalData, u64) } // stack to pox-3 in cycle 7 - for sortition_height in 0..5 { + for sortition_height in 0..6 { let txs = if sortition_height == 0 { // stack them all - let stack_tx = make_pox_4_lockup(&private_key, 0, 1_000_000_000_000_000_000, PoxAddress::from_legacy(AddressHashMode::SerializeP2PKH, addr.bytes.clone()), 12, 34); + let stack_tx = make_pox_4_lockup( + &private_key, + 0, + 1_000_000_000_000_000_000, + PoxAddress::from_legacy(AddressHashMode::SerializeP2PKH, addr.bytes.clone()), + 12, + 34, + ); vec![stack_tx] - } - else { + } else { vec![] }; - + peer.tenure_with_txs(&txs, &mut peer_nonce); } @@ -92,18 +105,173 @@ fn boot_nakamoto(test_name: &str, mut initial_balances: Vec<(PrincipalData, u64) peer } -/// Mine a single Nakamoto tenure +/// Make a token-transfer from a private key +fn make_token_transfer( + chainstate: &mut StacksChainState, + sortdb: &SortitionDB, + private_key: &StacksPrivateKey, + nonce: u64, + amt: u128, + recipient_addr: &StacksAddress, +) -> StacksTransaction { + let mut stx_transfer = StacksTransaction::new( + TransactionVersion::Testnet, + TransactionAuth::from_p2pkh(private_key).unwrap(), + TransactionPayload::TokenTransfer( + recipient_addr.clone().to_account_principal(), + 1, + TokenTransferMemo([0x00; 34]), + ), + ); + stx_transfer.chain_id = 0x80000000; + stx_transfer.anchor_mode = TransactionAnchorMode::OnChainOnly; + stx_transfer.auth.set_origin_nonce(nonce); + + let mut tx_signer = StacksTransactionSigner::new(&stx_transfer); + tx_signer.sign_origin(&private_key).unwrap(); + let stx_transfer_signed = tx_signer.get_tx().unwrap(); + + stx_transfer_signed +} + +/// Mine a single Nakamoto tenure with a single Nakamoto block #[test] fn test_simple_nakamoto_coordinator_bootup() { let mut peer = boot_nakamoto(function_name!(), vec![]); - let (burn_ops, tenure_change, vrf_proof) = peer.begin_nakamoto_tenure(TenureChangeCause::BlockFound); + let (burn_ops, tenure_change, miner_key) = + peer.begin_nakamoto_tenure(TenureChangeCause::BlockFound); let (_, _, consensus_hash) = peer.next_burnchain_block(burn_ops); - let blocks_and_sizes = peer.make_nakamoto_tenure(&consensus_hash, tenure_change, vrf_proof, |_miner, _chainstate, _sort_dbconn, _count| { vec![] }); - let blocks = blocks_and_sizes + let vrf_proof = peer.make_nakamoto_vrf_proof(miner_key); + let blocks_and_sizes = peer.make_nakamoto_tenure( + &consensus_hash, + tenure_change, + vrf_proof, + |_miner, _chainstate, _sort_dbconn, _count| vec![], + ); + let blocks: Vec<_> = blocks_and_sizes + .into_iter() + .map(|(block, _, _)| block) + .collect(); + + // TODO: check chain tip +} + +/// Mine a single Nakamoto tenure with 10 Nakamoto blocks +#[test] +fn test_simple_nakamoto_coordinator_1_tenure_10_blocks() { + let mut peer = boot_nakamoto(function_name!(), vec![]); + let private_key = peer.config.private_key.clone(); + let addr = StacksAddress::from_public_keys( + C32_ADDRESS_VERSION_TESTNET_SINGLESIG, + &AddressHashMode::SerializeP2PKH, + 1, + &vec![StacksPublicKey::from_private(&private_key)], + ) + .unwrap(); + + let (burn_ops, tenure_change, miner_key) = + peer.begin_nakamoto_tenure(TenureChangeCause::BlockFound); + let (_, _, consensus_hash) = peer.next_burnchain_block(burn_ops); + let vrf_proof = peer.make_nakamoto_vrf_proof(miner_key); + + // do a stx transfer in each block to a given recipient + let recipient_addr = + StacksAddress::from_string("ST2YM3J4KQK09V670TD6ZZ1XYNYCNGCWCVTASN5VM").unwrap(); + + let blocks_and_sizes = peer.make_nakamoto_tenure( + &consensus_hash, + tenure_change, + vrf_proof, + |miner, chainstate, sortdb, count| { + if count < 10 { + debug!("\n\nProduce block {}\n\n", count); + + let account = get_account(chainstate, sortdb, &addr); + let stx_transfer = make_token_transfer( + chainstate, + sortdb, + &private_key, + account.nonce, + 1, + &recipient_addr, + ); + + vec![stx_transfer] + } else { + vec![] + } + }, + ); + + let blocks: Vec<_> = blocks_and_sizes .into_iter() .map(|(block, _, _)| block) .collect(); - peer.process_nakamoto_tenure(blocks); + // TODO: check chain tip +} + +/// Mine a 10 Nakamoto tenures with 10 Nakamoto blocks +#[test] +fn test_simple_nakamoto_coordinator_10_tenures_10_blocks() { + let mut peer = boot_nakamoto(function_name!(), vec![]); + let private_key = peer.config.private_key.clone(); + let addr = StacksAddress::from_public_keys( + C32_ADDRESS_VERSION_TESTNET_SINGLESIG, + &AddressHashMode::SerializeP2PKH, + 1, + &vec![StacksPublicKey::from_private(&private_key)], + ) + .unwrap(); + + let mut all_blocks = vec![]; + + for i in 0..10 { + let (burn_ops, tenure_change, miner_key) = + peer.begin_nakamoto_tenure(TenureChangeCause::BlockFound); + let (_, _, consensus_hash) = peer.next_burnchain_block(burn_ops); + let vrf_proof = peer.make_nakamoto_vrf_proof(miner_key); + + debug!("Next burnchain block: {}", &consensus_hash); + + // do a stx transfer in each block to a given recipient + let recipient_addr = + StacksAddress::from_string("ST2YM3J4KQK09V670TD6ZZ1XYNYCNGCWCVTASN5VM").unwrap(); + + let blocks_and_sizes = peer.make_nakamoto_tenure( + &consensus_hash, + tenure_change, + vrf_proof, + |miner, chainstate, sortdb, count| { + if count < 10 { + debug!("\n\nProduce block {}\n\n", all_blocks.len()); + + let account = get_account(chainstate, sortdb, &addr); + + let stx_transfer = make_token_transfer( + chainstate, + sortdb, + &private_key, + account.nonce, + 1, + &recipient_addr, + ); + + vec![stx_transfer] + } else { + vec![] + } + }, + ); + + let mut blocks = blocks_and_sizes + .into_iter() + .map(|(block, _, _)| block) + .collect(); + + all_blocks.append(&mut blocks); + } + + // TODO: check chain tip } From 9cc8415296a7d5b21384f9d5610d232adc9ecdaa Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Tue, 7 Nov 2023 00:48:08 -0500 Subject: [PATCH 088/122] chore: cargo fmt --- stackslib/src/chainstate/nakamoto/miner.rs | 269 +++++++++++++-------- 1 file changed, 168 insertions(+), 101 deletions(-) diff --git a/stackslib/src/chainstate/nakamoto/miner.rs b/stackslib/src/chainstate/nakamoto/miner.rs index 2d12d48b76..4d58a2d0de 100644 --- a/stackslib/src/chainstate/nakamoto/miner.rs +++ b/stackslib/src/chainstate/nakamoto/miner.rs @@ -30,10 +30,10 @@ use clarity::vm::analysis::{CheckError, CheckErrors}; use clarity::vm::ast::errors::ParseErrors; use clarity::vm::ast::ASTRules; use clarity::vm::clarity::TransactionConnection; +use clarity::vm::costs::ExecutionCost; use clarity::vm::database::BurnStateDB; use clarity::vm::errors::Error as InterpreterError; use clarity::vm::types::TypeSignature; -use clarity::vm::costs::ExecutionCost; use serde::Deserialize; use stacks_common::util::get_epoch_time_ms; @@ -47,31 +47,31 @@ use crate::burnchains::PublicKey; use crate::chainstate::burn::db::sortdb::{SortitionDB, SortitionDBConn, SortitionHandleTx}; use crate::chainstate::burn::operations::*; use crate::chainstate::burn::*; -use crate::chainstate::stacks::address::StacksAddressExtensions; -use crate::chainstate::nakamoto::SetupBlockResult; use crate::chainstate::nakamoto::NakamotoBlock; use crate::chainstate::nakamoto::NakamotoBlockHeader; use crate::chainstate::nakamoto::NakamotoChainState; +use crate::chainstate::nakamoto::SetupBlockResult; +use crate::chainstate::stacks::address::StacksAddressExtensions; use crate::chainstate::stacks::db::accounts::MinerReward; use crate::chainstate::stacks::db::transactions::{ handle_clarity_runtime_error, ClarityRuntimeTxError, }; +use crate::chainstate::stacks::db::StacksHeaderInfo; use crate::chainstate::stacks::db::{ blocks::MemPoolRejection, ChainstateTx, ClarityTx, MinerRewardInfo, StacksChainState, MINER_REWARD_MATURITY, }; -use crate::chainstate::stacks::StacksBlockHeader; -use crate::chainstate::stacks::db::StacksHeaderInfo; +use crate::chainstate::stacks::events::{StacksTransactionEvent, StacksTransactionReceipt}; +use crate::chainstate::stacks::miner::BlockBuilder; use crate::chainstate::stacks::miner::BlockBuilderSettings; use crate::chainstate::stacks::miner::BlockLimitFunction; -use crate::chainstate::stacks::miner::TransactionResult; -use crate::chainstate::stacks::miner::TransactionSkipped; use crate::chainstate::stacks::miner::TransactionError; use crate::chainstate::stacks::miner::TransactionProblematic; -use crate::chainstate::stacks::events::{StacksTransactionEvent, StacksTransactionReceipt}; +use crate::chainstate::stacks::miner::TransactionResult; +use crate::chainstate::stacks::miner::TransactionSkipped; use crate::chainstate::stacks::Error; +use crate::chainstate::stacks::StacksBlockHeader; use crate::chainstate::stacks::*; -use crate::chainstate::stacks::miner::BlockBuilder; use crate::clarity_vm::clarity::{ClarityConnection, ClarityInstance, Error as clarity_error}; use crate::core::mempool::*; use crate::core::*; @@ -89,7 +89,7 @@ use stacks_common::codec::{read_next, write_next, StacksMessageCodec}; use stacks_common::types::chainstate::BurnchainHeaderHash; use stacks_common::types::chainstate::StacksBlockId; use stacks_common::types::chainstate::TrieHash; -use stacks_common::types::chainstate::{BlockHeaderHash, StacksAddress, ConsensusHash}; +use stacks_common::types::chainstate::{BlockHeaderHash, ConsensusHash, StacksAddress}; use stacks_common::util::hash::Hash160; /// New tenure information @@ -97,7 +97,7 @@ pub struct NakamotoTenureStart { /// coinbase transaction for this miner pub coinbase_tx: StacksTransaction, /// VRF proof for this miner - pub vrf_proof: VRFProof + pub vrf_proof: VRFProof, } pub struct NakamotoBlockBuilder { @@ -145,7 +145,7 @@ impl NakamotoBlockBuilder { parent: &NakamotoBlockHeader, consensus_hash: &ConsensusHash, total_burn: u64, - proof: &VRFProof + proof: &VRFProof, ) -> NakamotoBlockBuilder { let parent_commit_hash_value = BlockHeaderHash(parent_tenure_id.0.clone()); NakamotoBlockBuilder { @@ -157,10 +157,15 @@ impl NakamotoBlockBuilder { matured_miner_rewards_opt: None, bytes_so_far: 0, txs: vec![], - header: NakamotoBlockHeader::from_parent_empty(parent.chain_length + 1, total_burn, consensus_hash.clone(), parent.block_id()) + header: NakamotoBlockHeader::from_parent_empty( + parent.chain_length + 1, + total_burn, + consensus_hash.clone(), + parent.block_id(), + ), } } - + /// Make a block builder atop a Nakamoto parent for a new block within a tenure pub fn continue_tenure_from_nakamoto_parent( parent: &NakamotoBlockHeader, @@ -177,17 +182,22 @@ impl NakamotoBlockBuilder { matured_miner_rewards_opt: None, bytes_so_far: 0, txs: vec![], - header: NakamotoBlockHeader::from_parent_empty(parent.chain_length + 1, total_burn, consensus_hash.clone(), parent.block_id()) + header: NakamotoBlockHeader::from_parent_empty( + parent.chain_length + 1, + total_burn, + consensus_hash.clone(), + parent.block_id(), + ), } } - + /// Make a block builder atop an epoch 2 parent for a new tenure pub fn new_tenure_from_epoch2_parent( parent: &StacksBlockHeader, parent_consensus_hash: &ConsensusHash, consensus_hash: &ConsensusHash, total_burn: u64, - proof: &VRFProof + proof: &VRFProof, ) -> NakamotoBlockBuilder { NakamotoBlockBuilder { epoch2_parent_header: Some((parent.clone(), parent_consensus_hash.clone())), @@ -198,14 +208,17 @@ impl NakamotoBlockBuilder { matured_miner_rewards_opt: None, bytes_so_far: 0, txs: vec![], - header: NakamotoBlockHeader::from_parent_empty(parent.total_work.work + 1, total_burn, consensus_hash.clone(), StacksBlockId::new(parent_consensus_hash, &parent.block_hash())) + header: NakamotoBlockHeader::from_parent_empty( + parent.total_work.work + 1, + total_burn, + consensus_hash.clone(), + StacksBlockId::new(parent_consensus_hash, &parent.block_hash()), + ), } } /// Make a block builder from genesis (testing only) - pub fn new_tenure_from_genesis( - proof: &VRFProof - ) -> NakamotoBlockBuilder { + pub fn new_tenure_from_genesis(proof: &VRFProof) -> NakamotoBlockBuilder { NakamotoBlockBuilder { epoch2_parent_header: None, nakamoto_parent_header: None, @@ -215,7 +228,7 @@ impl NakamotoBlockBuilder { matured_miner_rewards_opt: None, bytes_so_far: 0, txs: vec![], - header: NakamotoBlockHeader::genesis() + header: NakamotoBlockHeader::genesis(), } } @@ -234,37 +247,54 @@ impl NakamotoBlockBuilder { // VRF proof, if we're starting a _new_ tenure (instead of continuing an existing one) vrf_proof_opt: Option, ) -> Result { - let builder = if let Some(parent_nakamoto_header) = parent_stacks_header.anchored_header.as_stacks_nakamoto() { + let builder = if let Some(parent_nakamoto_header) = + parent_stacks_header.anchored_header.as_stacks_nakamoto() + { // building atop a nakamoto block // new tenure? if let Some(vrf_proof) = vrf_proof_opt.as_ref() { - NakamotoBlockBuilder::new_tenure_from_nakamoto_parent(parent_tenure_id, parent_nakamoto_header, consensus_hash, total_burn, vrf_proof) - } - else { - NakamotoBlockBuilder::continue_tenure_from_nakamoto_parent(parent_nakamoto_header, consensus_hash, total_burn) + NakamotoBlockBuilder::new_tenure_from_nakamoto_parent( + parent_tenure_id, + parent_nakamoto_header, + consensus_hash, + total_burn, + vrf_proof, + ) + } else { + NakamotoBlockBuilder::continue_tenure_from_nakamoto_parent( + parent_nakamoto_header, + consensus_hash, + total_burn, + ) } - } - else if let Some(parent_epoch2_header) = parent_stacks_header.anchored_header.as_stacks_epoch2() { + } else if let Some(parent_epoch2_header) = + parent_stacks_header.anchored_header.as_stacks_epoch2() + { // building atop a stacks 2.x block. // we are necessarily starting a new tenure if let Some(vrf_proof) = vrf_proof_opt.as_ref() { - NakamotoBlockBuilder::new_tenure_from_epoch2_parent(parent_epoch2_header, &parent_stacks_header.consensus_hash, consensus_hash, total_burn, vrf_proof) - } - else { + NakamotoBlockBuilder::new_tenure_from_epoch2_parent( + parent_epoch2_header, + &parent_stacks_header.consensus_hash, + consensus_hash, + total_burn, + vrf_proof, + ) + } else { // not allowed warn!("Failed to start a Nakamoto tenure atop a Stacks 2.x block -- missing a VRF proof"); return Err(Error::ExpectedTenureChange); } - } - else { + } else { // not reachable -- no other choices - return Err(Error::InvalidStacksBlock("Parent is neither a Nakamoto block nor a Stacks 2.x block".into())); + return Err(Error::InvalidStacksBlock( + "Parent is neither a Nakamoto block nor a Stacks 2.x block".into(), + )); }; Ok(builder) } - /// This function should be called before `tenure_begin`. /// It creates a MinerTenureInfo struct which owns connections to the chainstate and sortition /// DBs, so that block-processing is guaranteed to terminate before the lives of these handles @@ -273,11 +303,9 @@ impl NakamotoBlockBuilder { &self, chainstate: &'a mut StacksChainState, burn_dbconn: &'a SortitionDBConn, - tenure_start: bool + tenure_start: bool, ) -> Result, Error> { - debug!( - "Nakamoto miner tenure begin" - ); + debug!("Nakamoto miner tenure begin"); let burn_tip = SortitionDB::get_canonical_chain_tip_bhh(burn_dbconn.conn())?; let burn_tip_height = @@ -285,43 +313,73 @@ impl NakamotoBlockBuilder { let mainnet = chainstate.config().mainnet; - let (chain_tip, parent_consensus_hash, parent_header_hash) = if let Some(nakamoto_parent_header) = self.nakamoto_parent_header.as_ref() { - // parent is a nakamoto block - let parent_header_info = NakamotoChainState::get_block_header( - chainstate.db(), - &StacksBlockId::new(&nakamoto_parent_header.consensus_hash, &nakamoto_parent_header.block_hash()) - )? - .ok_or(Error::NoSuchBlockError) - .map_err(|e| { - warn!("No such Nakamoto parent block {}/{} ({})", &nakamoto_parent_header.consensus_hash, &nakamoto_parent_header.block_hash(), &nakamoto_parent_header.block_id()); - e - })?; - - (parent_header_info, nakamoto_parent_header.consensus_hash.clone(), nakamoto_parent_header.block_hash()) - } - else if let Some((stacks_header, consensus_hash)) = self.epoch2_parent_header.as_ref() { - // parent is a Stacks epoch2 block - let parent_header_info = NakamotoChainState::get_block_header( + let (chain_tip, parent_consensus_hash, parent_header_hash) = + if let Some(nakamoto_parent_header) = self.nakamoto_parent_header.as_ref() { + // parent is a nakamoto block + let parent_header_info = NakamotoChainState::get_block_header( + chainstate.db(), + &StacksBlockId::new( + &nakamoto_parent_header.consensus_hash, + &nakamoto_parent_header.block_hash(), + ), + )? + .ok_or(Error::NoSuchBlockError) + .map_err(|e| { + warn!( + "No such Nakamoto parent block {}/{} ({})", + &nakamoto_parent_header.consensus_hash, + &nakamoto_parent_header.block_hash(), + &nakamoto_parent_header.block_id() + ); + e + })?; + + ( + parent_header_info, + nakamoto_parent_header.consensus_hash.clone(), + nakamoto_parent_header.block_hash(), + ) + } else if let Some((stacks_header, consensus_hash)) = self.epoch2_parent_header.as_ref() + { + // parent is a Stacks epoch2 block + let parent_header_info = NakamotoChainState::get_block_header( + chainstate.db(), + &StacksBlockId::new(consensus_hash, &stacks_header.block_hash()), + )? + .ok_or(Error::NoSuchBlockError) + .map_err(|e| { + warn!( + "No such Stacks 2.x parent block {}/{} ({})", + &consensus_hash, + &stacks_header.block_hash(), + &StacksBlockId::new(&consensus_hash, &stacks_header.block_hash()) + ); + e + })?; + + ( + parent_header_info, + consensus_hash.clone(), + stacks_header.block_hash(), + ) + } else { + // parent is genesis (testing only) + ( + StacksHeaderInfo::regtest_genesis(), + FIRST_BURNCHAIN_CONSENSUS_HASH.clone(), + FIRST_STACKS_BLOCK_HASH.clone(), + ) + }; + + let tenure_height = if let Ok(Some(parent_tenure_height)) = + NakamotoChainState::get_tenure_height( chainstate.db(), - &StacksBlockId::new(consensus_hash, &stacks_header.block_hash()) - )? - .ok_or(Error::NoSuchBlockError) - .map_err(|e| { - warn!("No such Stacks 2.x parent block {}/{} ({})", &consensus_hash, &stacks_header.block_hash(), &StacksBlockId::new(&consensus_hash, &stacks_header.block_hash())); - e - })?; - - (parent_header_info, consensus_hash.clone(), stacks_header.block_hash()) - } - else { - // parent is genesis (testing only) - (StacksHeaderInfo::regtest_genesis(), FIRST_BURNCHAIN_CONSENSUS_HASH.clone(), FIRST_STACKS_BLOCK_HASH.clone()) - }; - - let tenure_height = if let Ok(Some(parent_tenure_height)) = NakamotoChainState::get_tenure_height(chainstate.db(), &StacksBlockId::new(&parent_consensus_hash, &parent_header_hash)) { - parent_tenure_height.checked_add(1).expect("Blockchain overflow") - } - else { + &StacksBlockId::new(&parent_consensus_hash, &parent_header_hash), + ) { + parent_tenure_height + .checked_add(1) + .expect("Blockchain overflow") + } else { 0 }; @@ -339,7 +397,7 @@ impl NakamotoBlockBuilder { parent_stacks_block_height: chain_tip.stacks_block_height, parent_burn_block_height: chain_tip.burn_header_height, tenure_start, - tenure_height + tenure_height, }) } @@ -388,13 +446,11 @@ impl NakamotoBlockBuilder { // write out the trie... let consumed = tx.commit_mined_block(&index_block_hash); - test_debug!( - "\n\nFinished mining. Trie is in mined_blocks table.\n", - ); + test_debug!("\n\nFinished mining. Trie is in mined_blocks table.\n",); consumed } - + /// Finish constructing a Nakamoto block. /// The block will not be signed yet. /// Returns the unsigned Nakamoto block @@ -419,7 +475,7 @@ impl NakamotoBlockBuilder { }; test_debug!( - "\n\nMined Nakamoo block {}, {} transactions, state root is {}\n", + "\n\nMined Nakamoto block {}, {} transactions, state root is {}\n", block.header.block_hash(), block.txs.len(), state_root_hash @@ -427,6 +483,7 @@ impl NakamotoBlockBuilder { info!( "Miner: mined Nakamoto block"; + "consensus_hash" => %block.header.consensus_hash, "block_hash" => %block.header.block_hash(), "block_height" => block.header.chain_length, "num_txs" => block.txs.len(), @@ -439,11 +496,8 @@ impl NakamotoBlockBuilder { /// Finish building the Nakamoto block pub fn mine_nakamoto_block(&mut self, clarity_tx: &mut ClarityTx) -> NakamotoBlock { - NakamotoChainState::finish_block( - clarity_tx, - self.matured_miner_rewards_opt.as_ref(), - ) - .expect("FATAL: call to `finish_block` failed"); + NakamotoChainState::finish_block(clarity_tx, self.matured_miner_rewards_opt.as_ref()) + .expect("FATAL: call to `finish_block` failed"); self.finalize_block(clarity_tx) } @@ -485,12 +539,13 @@ impl NakamotoBlockBuilder { parent_stacks_header, consensus_hash, total_burn, - new_tenure_info.as_ref().map(|info| info.vrf_proof.clone()) + new_tenure_info.as_ref().map(|info| info.vrf_proof.clone()), )?; let ts_start = get_epoch_time_ms(); - let mut miner_tenure_info = builder.load_tenure_info(&mut chainstate, burn_dbconn, new_tenure_info.is_some())?; + let mut miner_tenure_info = + builder.load_tenure_info(&mut chainstate, burn_dbconn, new_tenure_info.is_some())?; let mut tenure_tx = builder.tenure_begin(burn_dbconn, &mut miner_tenure_info)?; let block_limit = tenure_tx @@ -505,7 +560,7 @@ impl NakamotoBlockBuilder { new_tenure_info.as_ref().map(|info| &info.coinbase_tx), settings, event_observer, - ASTRules::PrecheckSize + ASTRules::PrecheckSize, ) { Ok(x) => x, Err(e) => { @@ -558,7 +613,7 @@ impl NakamotoBlockBuilder { Ok((block, consumed, size)) } - + #[cfg(test)] pub fn make_nakamoto_block_from_txs( mut self, @@ -568,21 +623,35 @@ impl NakamotoBlockBuilder { ) -> Result<(NakamotoBlock, u64, ExecutionCost), Error> { debug!("Build Nakamoto block from {} transactions", txs.len()); let (mut chainstate, _) = chainstate_handle.reopen()?; - - let new_tenure = txs.iter().find(|txn| if let TransactionPayload::TenureChange(..) = txn.payload { true } else { false }).is_some(); - let mut miner_tenure_info = self.load_tenure_info(&mut chainstate, burn_dbconn, new_tenure)?; + let new_tenure = txs + .iter() + .find(|txn| { + if let TransactionPayload::TenureChange(..) = txn.payload { + true + } else { + false + } + }) + .is_some(); + + let mut miner_tenure_info = + self.load_tenure_info(&mut chainstate, burn_dbconn, new_tenure)?; let mut tenure_tx = self.tenure_begin(burn_dbconn, &mut miner_tenure_info)?; for tx in txs.drain(..) { let tx_len = tx.tx_len(); - match self.try_mine_tx_with_len(&mut tenure_tx, &tx, tx_len, &BlockLimitFunction::NO_LIMIT_HIT, ASTRules::PrecheckSize) { + match self.try_mine_tx_with_len( + &mut tenure_tx, + &tx, + tx_len, + &BlockLimitFunction::NO_LIMIT_HIT, + ASTRules::PrecheckSize, + ) { TransactionResult::Success(..) => { debug!("Included {}", &tx.txid()); } TransactionResult::Skipped(TransactionSkipped { error, .. }) - | TransactionResult::ProcessingError(TransactionError { - error, .. - }) => { + | TransactionResult::ProcessingError(TransactionError { error, .. }) => { match error { Error::BlockTooBigError => { // done mining -- our execution budget is exceeded. @@ -608,9 +677,7 @@ impl NakamotoBlockBuilder { } } } - TransactionResult::Problematic(TransactionProblematic { - tx, .. - }) => { + TransactionResult::Problematic(TransactionProblematic { tx, .. }) => { // drop from the mempool debug!("Encountered problematic transaction {}", &tx.txid()); return Err(Error::ProblematicTransaction(tx.txid())); From 80f7217ac8d7b3f2e5e0008e07edcc7fff62e3d2 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Tue, 7 Nov 2023 00:48:31 -0500 Subject: [PATCH 089/122] feat: verify the VRF of a Nakamoto tenure and verify Nakamoto block-commit VRF seeds against previously-processed Nakamoto tenures. Also, add some preliminary verification work for the coinbase and tenure-change transactions (enough that we can mock tenures with TestPeer while driving the VRF), and add acceptance logic to check that a Nakamoto block is consistent with the burnchain state in which its tenure resides --- stackslib/src/chainstate/nakamoto/mod.rs | 802 +++++++++++++++++++---- 1 file changed, 681 insertions(+), 121 deletions(-) diff --git a/stackslib/src/chainstate/nakamoto/mod.rs b/stackslib/src/chainstate/nakamoto/mod.rs index 605c324f9a..8779354501 100644 --- a/stackslib/src/chainstate/nakamoto/mod.rs +++ b/stackslib/src/chainstate/nakamoto/mod.rs @@ -14,6 +14,7 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . +use std::collections::HashSet; use std::ops::DerefMut; use clarity::vm::ast::ASTRules; @@ -31,19 +32,21 @@ use stacks_common::codec::{ use stacks_common::consts::{ FIRST_BURNCHAIN_CONSENSUS_HASH, FIRST_STACKS_BLOCK_HASH, MINER_REWARD_MATURITY, }; +use stacks_common::types::chainstate::StacksPrivateKey; +use stacks_common::types::chainstate::StacksPublicKey; +use stacks_common::types::chainstate::VRFSeed; use stacks_common::types::chainstate::{ BlockHeaderHash, BurnchainHeaderHash, ConsensusHash, StacksBlockId, TrieHash, }; +use stacks_common::types::PrivateKey; use stacks_common::types::StacksEpochId; use stacks_common::util::get_epoch_time_secs; -use stacks_common::util::hash::{Hash160, MerkleHashFunc, MerkleTree, Sha512Trunc256Sum}; +use stacks_common::util::hash::{to_hex, Hash160, MerkleHashFunc, MerkleTree, Sha512Trunc256Sum}; use stacks_common::util::retry::BoundReader; -use stacks_common::util::secp256k1::{MessageSignature}; -use stacks_common::types::chainstate::StacksPrivateKey; -use stacks_common::types::chainstate::StacksPublicKey; -use stacks_common::types::PrivateKey; +use stacks_common::util::secp256k1::MessageSignature; +use stacks_common::util::vrf::{VRFProof, VRF}; -use super::burn::db::sortdb::{SortitionHandleConn, SortitionHandleTx}; +use super::burn::db::sortdb::{get_block_commit_by_txid, SortitionHandleConn, SortitionHandleTx}; use super::burn::operations::{DelegateStxOp, StackStxOp, TransferStxOp}; use super::stacks::db::accounts::MinerReward; use super::stacks::db::blocks::StagingUserBurnSupport; @@ -57,14 +60,19 @@ use super::stacks::{ TenureChangeError, TenureChangePayload, TransactionPayload, }; use crate::burnchains::PoxConstants; +use crate::burnchains::Txid; use crate::chainstate::burn::db::sortdb::SortitionDB; +use crate::chainstate::burn::operations::LeaderBlockCommitOp; +use crate::chainstate::burn::operations::LeaderKeyRegisterOp; +use crate::chainstate::burn::BlockSnapshot; +use crate::chainstate::stacks::db::DBConfig as ChainstateConfig; use crate::chainstate::stacks::db::StacksChainState; use crate::chainstate::stacks::{MINER_BLOCK_CONSENSUS_HASH, MINER_BLOCK_HEADER_HASH}; use crate::clarity_vm::clarity::{ClarityInstance, PreCommitClarityBlock}; use crate::clarity_vm::database::SortitionDBRef; use crate::monitoring; use crate::util_lib::db::{ - query_row_panic, query_rows, u64_to_sql, DBConn, Error as DBError, FromRow, + query_row, query_row_panic, query_rows, u64_to_sql, DBConn, Error as DBError, FromRow, }; use crate::core::BOOT_BLOCK_HASH; @@ -196,6 +204,8 @@ lazy_static! { tenure_changed INTEGER NOT NULL, -- this field tracks the total tx fees so far in this tenure. it is a text-serialized u128 tenure_tx_fees TEXT NOT NULL, + -- nakamoto block's VRF proof, if this is a tenure-start block + vrf_proof TEXT, PRIMARY KEY(consensus_hash,block_hash) ); "#.into(), @@ -346,8 +356,7 @@ impl NakamotoBlockHeader { pub fn recover_miner_pk(&self) -> Option { let signed_hash = self.signature_hash().ok()?; let recovered_pk = - StacksPublicKey::recover_to_pubkey(signed_hash.bits(), &self.miner_signature) - .ok()?; + StacksPublicKey::recover_to_pubkey(signed_hash.bits(), &self.miner_signature).ok()?; Some(recovered_pk) } @@ -392,7 +401,7 @@ impl NakamotoBlockHeader { tx_merkle_root: Sha512Trunc256Sum([0u8; 32]), state_index_root: TrieHash([0u8; 32]), miner_signature: MessageSignature::empty(), - stacker_signature: MessageSignature::empty() + stacker_signature: MessageSignature::empty(), } } @@ -407,10 +416,10 @@ impl NakamotoBlockHeader { tx_merkle_root: Sha512Trunc256Sum([0u8; 32]), state_index_root: TrieHash([0u8; 32]), miner_signature: MessageSignature::empty(), - stacker_signature: MessageSignature::empty() + stacker_signature: MessageSignature::empty(), } } - + /// Make a genesis header (testing only) pub fn genesis() -> NakamotoBlockHeader { NakamotoBlockHeader { @@ -422,27 +431,47 @@ impl NakamotoBlockHeader { tx_merkle_root: Sha512Trunc256Sum([0u8; 32]), state_index_root: TrieHash([0u8; 32]), miner_signature: MessageSignature::empty(), - stacker_signature: MessageSignature::empty() + stacker_signature: MessageSignature::empty(), } } } impl NakamotoBlock { - /// Did the stacks tenure change on this nakamoto block? i.e., does this block - /// include a TenureChange transaction? - pub fn tenure_changed(&self, parent: &StacksBlockId) -> bool { + /// Find all positionally-valid tenure changes in this block. + /// They must be the first transactions. + /// Return their indexes into self.txs + fn find_tenure_changes(&self) -> Vec { + let mut ret = vec![]; + for (i, tx) in self.txs.iter().enumerate() { + if let TransactionPayload::TenureChange(..) = &tx.payload { + ret.push(i); + } else { + break; + } + } + ret + } + + /// Does this block contain one or more well-formed and valid tenure change transactions? + /// Return Some(true) if it does contain at least one, and they're all valid + /// Return Some(false) if it does contain at least one, but at least one is invalid + /// Return None if it contains none. + pub fn tenure_changed(&self) -> Option { + let wellformed = self.is_wellformed_first_tenure_block(); + if wellformed.is_none() { + // block isn't a first-tenure block, so no valid tenure changes + return None; + } + // Find all txs that have TenureChange payload let tenure_changes = self - .txs + .find_tenure_changes() .iter() - .filter_map(|tx| match &tx.payload { - TransactionPayload::TenureChange(payload) => Some(payload), - _ => None, - }) + .map(|i| &self.txs[*i]) .collect::>(); if tenure_changes.len() > 1 { - warn!( + debug!( "Block contains multiple TenureChange transactions"; "tenure_change_txs" => tenure_changes.len(), "parent_block_id" => %self.header.parent_block_id, @@ -450,19 +479,26 @@ impl NakamotoBlock { ); } - let validate = |tc: &TenureChangePayload| -> Result<(), TenureChangeError> { - if tc.previous_tenure_end != *parent { - return Err(TenureChangeError::PreviousTenureInvalid); - } + let validate = |tc: &StacksTransaction| -> Result<(), TenureChangeError> { + if let TransactionPayload::TenureChange(tc) = &tc.payload { + if tc.previous_tenure_end != self.header.parent_block_id { + return Err(TenureChangeError::PreviousTenureInvalid); + } - tc.validate() + tc.validate() + } else { + // placeholder error + Err(TenureChangeError::NotNakamoto) + } }; // Return true if there is a valid TenureChange - tenure_changes - .iter() - .find(|tc| validate(tc).is_ok()) - .is_some() + Some( + tenure_changes + .iter() + .find(|tc| validate(tc).is_ok()) + .is_some(), + ) } pub fn is_first_mined(&self) -> bool { @@ -473,38 +509,319 @@ impl NakamotoBlock { /// It's the first non-TenureChange transaction /// (and, all preceding transactions _must_ be TenureChanges) pub fn get_coinbase_tx(&self) -> Option<&StacksTransaction> { - let mut tx_ref = None; - for tx in self.txs.iter() { - if let TransactionPayload::TenureChange(..) = &tx.payload { - if tx_ref.is_none() { - continue; - } - // non-TenureChange tx precedes a coinbase, so there's no valid coinbase. - // (a coinbase in any other position is invalid anyway). - return None; + let wellformed = self.is_wellformed_first_tenure_block(); + if wellformed.is_none() { + // block isn't a first-tenure block, so no coinbase + return None; + } + if let Some(false) = wellformed { + // block isn't well-formed + return None; + } + + // there is one coinbase. + // go find it. + self.txs.iter().find(|tx| { + if let TransactionPayload::Coinbase(..) = &tx.payload { + true + } else { + false } - else if let TransactionPayload::Coinbase(..) = &tx.payload { - if tx_ref.is_none() { - // contender - tx_ref = Some(tx); + }) + } + + /// Get the VRF proof from this block. + /// It's Some(..) only if there's a coinbase + pub fn get_vrf_proof(&self) -> Option<&VRFProof> { + self.get_coinbase_tx() + .map(|coinbase_tx| { + if let TransactionPayload::Coinbase(_, _, vrf_proof) = &coinbase_tx.payload { + vrf_proof.as_ref() + } else { + // actually unreachable + None } - else { - // multiple coinbases, so none of them are valid. - return None; + }) + .flatten() + } + + /// Determine if this is a well-formed first block in a tenure. + /// * It has one or more TenureChange transactions + /// * It then has a coinbase + /// * Coinbases and TenureChanges do not occur anywhere else + /// + /// Returns Some(true) if the above are true + /// Returns Some(false) if this block has at least one coinbase or TenureChange tx, but one of + /// the above checks are false + /// Returns None if this block has no coinbase or TenureChange txs + pub fn is_wellformed_first_tenure_block(&self) -> Option { + // sanity check -- this may contain no coinbases or tenure-changes + let coinbase_positions = self + .txs + .iter() + .enumerate() + .filter_map(|(i, tx)| { + if let TransactionPayload::Coinbase(..) = &tx.payload { + Some(i) + } else { + None } + }) + .collect::>(); + + let tenure_change_positions = self + .txs + .iter() + .enumerate() + .filter_map(|(i, tx)| { + if let TransactionPayload::TenureChange(..) = &tx.payload { + Some(i) + } else { + None + } + }) + .collect::>(); + + if coinbase_positions.len() == 0 && tenure_change_positions.len() == 0 { + // can't be a first block in a tenure + return None; + } + + if coinbase_positions.len() > 1 { + // has more than one coinbase + return Some(false); + } + + if coinbase_positions.len() == 1 && tenure_change_positions.len() == 0 { + // has a coinbase but no tenure change + return Some(false); + } + + if coinbase_positions.len() == 0 && tenure_change_positions.len() > 0 { + // has tenure-changes but no coinbase + return Some(false); + } + + // tenure-changes must all come first, and must be in order + for i in 0..tenure_change_positions.len() { + if i != tenure_change_positions[i] { + // tenure-change is out of place + return Some(false); } - else if tx_ref.is_none() { - // non-Coinbase and non-TenureChange tx, so there's no valid coinbase. - // (a coinbase in any other position is invalid anyway) - return None; - } } - tx_ref + + // coinbase must come next + if coinbase_positions + .first() + .expect("FATAL: coinbase_positions.len() == 1") + != &tenure_change_positions.len() + { + // coinbase is not the next transaction + return Some(false); + } + + return Some(true); + } + + /// Verify that the VRF seed of this block's block-commit is the hash of the parent tenure's + /// VRF seed. + pub fn validate_vrf_seed( + &self, + sortdb_conn: &Connection, + chainstate_conn: &Connection, + block_commit: &LeaderBlockCommitOp, + ) -> Result<(), ChainstateError> { + // the block-commit from the miner who created this coinbase must have a VRF seed that + // is the hash of the parent tenure's VRF proof. + let parent_vrf_proof = NakamotoChainState::get_parent_vrf_proof( + chainstate_conn, + sortdb_conn, + &self.header.consensus_hash, + &block_commit.txid, + )?; + if !block_commit.new_seed.is_from_proof(&parent_vrf_proof) { + warn!("Invalid Nakamoto block-commit: seed does not match parent VRF proof"; + "block_id" => %self.block_id(), + "commit_seed" => %block_commit.new_seed, + "proof_seed" => %VRFSeed::from_proof(&parent_vrf_proof), + "parent_vrf_proof" => %parent_vrf_proof.to_hex(), + "block_commit" => format!("{:?}", &block_commit) + ); + return Err(ChainstateError::InvalidStacksBlock( + "Invalid Nakamoto block: bad VRF proof".into(), + )); + } + Ok(()) } pub fn block_id(&self) -> StacksBlockId { self.header.block_id() } + + /// Validate this Nakamoto block header against burnchain state. + /// Used to determine whether or not we'll keep a block around (even if we don't yet have its parent). + /// + /// Arguments + /// -- `burn_chain_tip` is the BlockSnapshot containing the block-commit for this block's + /// tenure + /// -- `leader_key` is the miner's leader key registration transaction + /// -- `bloc_commit` is the block-commit for this tenure + /// + /// Verifies the following: + /// -- that this block falls into this block-commit's tenure + /// -- that this miner signed this block + /// -- if this block has a coinbase, then that it's VRF proof was generated by this miner + /// -- that this block's burn total matches `burn_chain_tip`'s total burn + pub fn validate_against_burnchain( + &self, + burn_chain_tip: &BlockSnapshot, + leader_key: &LeaderKeyRegisterOp, + ) -> Result<(), ChainstateError> { + // this block's consensus hash must match the sortition that selected it + if burn_chain_tip.consensus_hash != self.header.consensus_hash { + warn!("Invalid Nakamoto block: consensus hash does not match sortition"; + "consensus_hash" => %self.header.consensus_hash, + "sortition.consensus_hash" => %burn_chain_tip.consensus_hash + ); + return Err(ChainstateError::InvalidStacksBlock( + "Invalid Nakamoto block: invalid consensus hash".into(), + )); + } + + // miner must have signed this block + let miner_pubkey_hash160 = leader_key + .interpret_nakamoto_signing_key() + .ok_or(ChainstateError::NoSuchBlockError) + .map_err(|e| { + warn!( + "Leader key did not contain a hash160 of the miner signing public key"; + "leader_key" => format!("{:?}", &leader_key), + ); + e + })?; + + let recovered_miner_pubk = self.header.recover_miner_pk().ok_or_else(|| { + warn!( + "Nakamoto Stacks block downloaded with unrecoverable miner public key"; + "block_hash" => %self.header.block_hash(), + "block_id" => %self.header.block_id(), + ); + return ChainstateError::InvalidStacksBlock("Unrecoverable miner public key".into()); + })?; + + let recovered_miner_hash160 = Hash160::from_node_public_key(&recovered_miner_pubk); + if recovered_miner_hash160 != miner_pubkey_hash160 { + warn!( + "Nakamoto Stacks block signature from {recovered_miner_pubk:?} mismatch: {recovered_miner_hash160} != {miner_pubkey_hash160} from leader-key"; + "block_hash" => %self.header.block_hash(), + "block_id" => %self.header.block_id(), + "leader_key" => format!("{:?}", &leader_key), + ); + return Err(ChainstateError::InvalidStacksBlock( + "Invalid miner signature".into(), + )); + } + + // If this block has a coinbase, then verify that its VRF proof was generated by this + // block's miner. We'll verify that the seed of this block-commit was generated from the + // parnet tenure's VRF proof via the `validate_vrf_seed()` method, which requires that we + // already have the parent block. + if let Some(coinbase_tx) = self.get_coinbase_tx() { + let (_, _, vrf_proof_opt) = coinbase_tx + .try_as_coinbase() + .expect("FATAL: `get_coinbase_tx()` did not return a coinbase"); + let vrf_proof = vrf_proof_opt.ok_or(ChainstateError::InvalidStacksBlock( + "Nakamoto coinbase must have a VRF proof".into(), + ))?; + + // this block's VRF proof must have ben generated from the last sortition's sortition + // hash (which includes the last commit's VRF seed) + let valid = match VRF::verify( + &leader_key.public_key, + vrf_proof, + burn_chain_tip.sortition_hash.as_bytes(), + ) { + Ok(v) => v, + Err(e) => { + warn!( + "Invalid Stacks block header {}: failed to verify VRF proof: {}", + self.header.block_hash(), + e + ); + false + } + }; + + if !valid { + warn!("Invalid Nakamoto block: leader VRF key did not produce a valid proof"; + "block_id" => %self.block_id(), + "leader_public_key" => %leader_key.public_key.to_hex(), + "sortition_hash" => %burn_chain_tip.sortition_hash + ); + return Err(ChainstateError::InvalidStacksBlock( + "Invalid Nakamoto block: leader VRF key did not produce a valid proof".into(), + )); + } + } + + // this block must commit to all of the work seen so far + if self.header.burn_spent != burn_chain_tip.total_burn { + warn!("Invalid Nakamoto block header: invalid total burns"; + "header.burn_spent" => self.header.burn_spent, + "burn_chain_tip.total_burn" => burn_chain_tip.total_burn + ); + return Err(ChainstateError::InvalidStacksBlock( + "Invalid Nakamoto block: invalid total burns".into(), + )); + } + // not verified by this method: + // * chain_length (need parent block header) + // * parent_block_id (need parent block header) + // * block-commit seed (need parent block) + // * tx_merkle_root (already verified; validated on deserialization) + // * state_index_root (validated on process_block()) + Ok(()) + } + + /// Static sanity checks on transactions. + /// Verifies: + /// * that all txs are unique + /// * that all txs use the given network + /// * that all txs use the given chain ID + /// * if this is a tenure-start tx, that: + /// * it has a well-formed coinbase + /// * all TenureChange transactions are present and in the right order, starting with + /// `stacks_tip` and leading up to this block + /// * that only epoch-permitted transactions are present + pub fn validate_transactions_static( + &self, + mainnet: bool, + chain_id: u32, + epoch_id: StacksEpochId, + ) -> bool { + if !StacksBlock::validate_transactions_unique(&self.txs) { + return false; + } + if !StacksBlock::validate_transactions_network(&self.txs, mainnet) { + return false; + } + if !StacksBlock::validate_transactions_chain_id(&self.txs, chain_id) { + return false; + } + if let Some(valid) = self.tenure_changed() { + if !valid { + // bad tenure change + return false; + } + if self.get_coinbase_tx().is_none() { + return false; + } + } + if !StacksBlock::validate_transactions_static_epoch(&self.txs, epoch_id) { + return false; + } + return true; + } } impl NakamotoChainState { @@ -700,7 +1017,18 @@ impl NakamotoChainState { // find commit and sortition burns if this is a tenure-start block // TODO: store each *tenure* - let (commit_burn, sortition_burn) = if next_ready_block.tenure_changed(&parent_block_id) { + let tenure_changed = if let Some(tenure_valid) = next_ready_block.tenure_changed() { + if !tenure_valid { + return Err(ChainstateError::InvalidStacksBlock( + "Invalid Nakamoto block: invalid tenure change tx(s)".into(), + )); + } + true + } else { + false + }; + + let (commit_burn, sortition_burn) = if tenure_changed { // find block-commit to get commit-burn let block_commit = sort_tx .get_block_commit( @@ -804,6 +1132,87 @@ impl NakamotoChainState { Ok(Some(receipt)) } + /// Validate that a Nakamoto block attaches to the burn chain state. + /// Called before inserting the block into the staging DB. + /// Wraps `NakamotoBlock::validate_against_burnchain()`, and + /// verifies that all transactions in the block are allowed in this epoch. + fn validate_nakamoto_block_burnchain( + db_handle: &SortitionHandleConn, + block: &NakamotoBlock, + mainnet: bool, + chain_id: u32, + ) -> Result<(), ChainstateError> { + // find the sortition-winning block commit for this block, as well as the block snapshot + // containing the parent block-commit + let block_hash = block.header.block_hash(); + let consensus_hash = &block.header.consensus_hash; + + // burn chain tip that selected this commit's block + let Some(burn_chain_tip) = + SortitionDB::get_block_snapshot_consensus(db_handle, &consensus_hash)? + else { + warn!("No sortition for {}", &consensus_hash); + return Err(ChainstateError::InvalidStacksBlock( + "No sortition for block's consensus hash".into(), + )); + }; + + // the block-commit itself + let Some(block_commit) = db_handle.get_block_commit_by_txid( + &burn_chain_tip.sortition_id, + &burn_chain_tip.winning_block_txid, + )? + else { + warn!( + "No block commit for {} in sortition for {}", + &burn_chain_tip.winning_block_txid, &consensus_hash + ); + return Err(ChainstateError::InvalidStacksBlock( + "No block-commit in sortition for block's consensus hash".into(), + )); + }; + + // key register of the winning miner + let leader_key = db_handle + .get_leader_key_at( + block_commit.key_block_ptr as u64, + block_commit.key_vtxindex as u32, + )? + .expect("FATAL: have block commit but no leader key"); + + // attaches to burn chain + if let Err(e) = block.validate_against_burnchain(&burn_chain_tip, &leader_key) { + warn!( + "Invalid Nakamoto block, could not validate on burnchain"; + "consensus_hash" => %consensus_hash, + "block_hash" => %block_hash, + "error" => format!("{:?}", &e) + ); + + return Err(e); + } + + // check the _next_ block's tenure, since when Nakamoto's miner activates, the current chain tip + // will be in epoch 2.5 (the next block will be epoch 3.0) + let cur_epoch = + SortitionDB::get_stacks_epoch(db_handle.deref(), burn_chain_tip.block_height + 1)? + .expect("FATAL: no epoch defined for current Stacks block"); + + // static checks on transactions all pass + let valid = block.validate_transactions_static(mainnet, chain_id, cur_epoch.epoch_id); + if !valid { + warn!( + "Invalid Nakamoto block, transactions failed static checks: {}/{} (epoch {})", + consensus_hash, block_hash, cur_epoch.epoch_id + ); + return Err(ChainstateError::InvalidStacksBlock( + "Invalid Nakamoto block: failed static transaction checks".into(), + )); + } + + Ok(()) + } + /// Accept a Nakamoto block into the staging blocks DB. /// Fails if: /// * the public key cannot be recovered from the miner's signature @@ -813,6 +1222,7 @@ impl NakamotoChainState { /// * we already have the block /// Returns true if we stored the block; false if not. pub fn accept_block( + config: &ChainstateConfig, block: NakamotoBlock, sortdb: &SortitionHandleConn, staging_db_tx: &rusqlite::Transaction, @@ -820,68 +1230,37 @@ impl NakamotoChainState { // do nothing if we already have this block if let Some(_) = Self::get_block_header(&staging_db_tx, &block.header.block_id())? { debug!("Already have block {}", &block.header.block_id()); - return Ok(false) + return Ok(false); } - // identify the winning block-commit - let sortition = SortitionDB::get_block_snapshot_consensus(sortdb, &block.header.consensus_hash)? - .ok_or(ChainstateError::NoSuchBlockError) - .map_err(|e| { - warn!("No block snapshot for {}", &block.header.consensus_hash); - e - })?; - - let block_commit = SortitionDB::get_block_commit(sortdb, &sortition.winning_block_txid, &sortition.sortition_id)? - .ok_or(ChainstateError::NoSuchBlockError) - .map_err(|e| { - warn!("No block commit {} off of sortition tip {}", &sortition.winning_block_txid, &sortition.sortition_id); - e - })?; - - // identify the leader key for this block-commit - let leader_key = SortitionDB::get_leader_key_at(sortdb, u64::from(block_commit.key_block_ptr), u32::from(block_commit.key_vtxindex), &sortition.sortition_id)? - .ok_or(ChainstateError::NoSuchBlockError) - .map_err(|e| { - warn!("No leader key at {},{} for block-commit {} off of sortition tip {}", block_commit.key_block_ptr, block_commit.key_vtxindex, &block_commit.txid, &sortition.sortition_id); - e - })?; - - let miner_pubkey_hash160 = leader_key.interpret_nakamoto_signing_key() - .ok_or(ChainstateError::NoSuchBlockError) - .map_err(|e| { - warn!( - "Leader key did not contain a hash160 of the miner signing public key"; - "leader_key" => format!("{:?}", &leader_key), - ); - e - })?; - - let recovered_miner_pubk = block.header.recover_miner_pk().ok_or_else(|| { + // if this is the first tenure block, then make sure it's well-formed + if let Some(false) = block.is_wellformed_first_tenure_block() { warn!( - "Nakamoto Stacks block downloaded with unrecoverable miner public key"; - "block_hash" => %block.header.block_hash(), - "block_id" => %block.header.block_id(), + "Block {} is not a well-formed first tenure block", + &block.block_id() ); - return ChainstateError::InvalidStacksBlock("Unrecoverable miner public key".into()); - })?; + return Err(ChainstateError::InvalidStacksBlock( + "Not a well-formed first block".into(), + )); + } - let recovered_miner_hash160 = Hash160::from_node_public_key(&recovered_miner_pubk); - if recovered_miner_hash160 != miner_pubkey_hash160 { - warn!( - "Nakamoto Stacks block signature from {recovered_miner_pubk:?} mismatch: {recovered_miner_hash160} != {miner_pubkey_hash160} from leader-key"; - "block_hash" => %block.header.block_hash(), - "block_id" => %block.header.block_id(), - "leader_key" => format!("{:?}", &leader_key), - "block_commit" => format!("{:?}", &block_commit) + // this block must be consistent with its miner's leader-key and block-commit, and must + // contain only transactions that are valid in this epoch. + if let Err(e) = + Self::validate_nakamoto_block_burnchain(sortdb, &block, config.mainnet, config.chain_id) + { + warn!("Unacceptable Nakamoto block; will not store"; + "block_id" => %block.block_id(), + "error" => format!("{:?}", &e) ); - return Err(ChainstateError::InvalidStacksBlock("Invalid miner signature".into())); - } + return Ok(false); + }; if !sortdb.expects_stacker_signature( &block.header.consensus_hash, &block.header.stacker_signature, )? { - let msg = format!("Received block, signed by {recovered_miner_pubk:?}, but the stacker signature does not match the active stacking cycle"); + let msg = format!("Received block, but the stacker signature does not match the active stacking cycle"); warn!("{}", msg); return Err(ChainstateError::InvalidStacksBlock(msg)); } @@ -1071,7 +1450,7 @@ impl NakamotoChainState { for candidate in candidate_headers.into_iter() { let Ok(Some(ancestor_at_height)) = - tx.get_ancestor_block_hash(tenure_height, tip_index_hash) + tx.get_ancestor_block_hash(candidate.stacks_block_height, tip_index_hash) else { // if there's an error or no result, this candidate doesn't match, so try next candidate continue; @@ -1137,13 +1516,110 @@ impl NakamotoChainState { /// Load the canonical Stacks block header (either epoch-2 rules or Nakamoto) pub fn get_canonical_block_header( - conn: &Connection, + chainstate_conn: &Connection, sortdb: &SortitionDB, ) -> Result, ChainstateError> { - let (consensus_hash, block_bhh) = + let (consensus_hash, block_hash) = SortitionDB::get_canonical_stacks_chain_tip_hash(sortdb.conn())?; - let index_block_hash = StacksBlockId::new(&consensus_hash, &block_bhh); - Self::get_block_header(conn, &index_block_hash) + Self::get_block_header( + chainstate_conn, + &StacksBlockId::new(&consensus_hash, &block_hash), + ) + } + + /// Get the parent header of a Nakamoto block. + /// It might be an epoch 2.x block header + pub fn get_block_header_by_consensus_hash( + chainstate_conn: &Connection, + consensus_hash: &ConsensusHash, + ) -> Result, ChainstateError> { + let nakamoto_header_info = + Self::get_nakamoto_tenure_start_block_header(chainstate_conn, consensus_hash)?; + if nakamoto_header_info.is_some() { + return Ok(nakamoto_header_info); + } + + // parent might be epoch 2 + let epoch2_header_info = StacksChainState::get_stacks_block_header_info_by_consensus_hash( + chainstate_conn, + consensus_hash, + )?; + Ok(epoch2_header_info) + } + + /// Get the VRF proof for a Stacks block. + /// This works for either Nakamoto or epoch 2.x + pub fn get_block_vrf_proof( + chainstate_conn: &Connection, + consensus_hash: &ConsensusHash, + ) -> Result, ChainstateError> { + let Some(start_header) = NakamotoChainState::get_block_header_by_consensus_hash( + chainstate_conn, + consensus_hash, + )? + else { + return Ok(None); + }; + + let vrf_proof = match start_header.anchored_header { + StacksBlockHeaderTypes::Epoch2(epoch2_header) => Some(epoch2_header.proof), + StacksBlockHeaderTypes::Nakamoto(..) => { + NakamotoChainState::get_nakamoto_tenure_vrf_proof(chainstate_conn, consensus_hash)? + } + }; + + Ok(vrf_proof) + } + + /// Get the VRF proof of the parent tenure (either Nakamoto or epoch 2.x) of the block + /// identified by the given consensus hash. + /// The parent must already have been processed. + /// + /// `consensus_hash` identifies the child block. + /// `block_commit_txid` identifies the child block's tenure's block-commit tx + /// + /// Returns the proof of this block's parent tenure on success. + /// + /// Returns InvalidStacksBlock if the sortition for `consensus_hash` does not exist, or if its + /// parent sortition doesn't exist (i.e. the sortition DB is missing something) + /// + /// Returns NoSuchBlockError if the block header for `consensus_hash` does not exist, or if the + /// parent block header info does not exist (i.e. the chainstate DB is missing something) + pub fn get_parent_vrf_proof( + chainstate_conn: &Connection, + sortdb_conn: &Connection, + consensus_hash: &ConsensusHash, + block_commit_txid: &Txid, + ) -> Result { + let sn = SortitionDB::get_block_snapshot_consensus(sortdb_conn, consensus_hash)?.ok_or( + ChainstateError::InvalidStacksBlock("No sortition for consensus hash".into()), + )?; + + let parent_sortition_id = SortitionDB::get_block_commit_parent_sortition_id( + sortdb_conn, + &block_commit_txid, + &sn.sortition_id, + )? + .ok_or(ChainstateError::InvalidStacksBlock( + "Parent block-commit is not in this block's sortition history".into(), + ))?; + + let parent_sn = SortitionDB::get_block_snapshot(sortdb_conn, &parent_sortition_id)?.ok_or( + ChainstateError::InvalidStacksBlock( + "Parent block-commit does not have a sortition".into(), + ), + )?; + + let parent_vrf_proof = + Self::get_block_vrf_proof(chainstate_conn, &parent_sn.consensus_hash)? + .ok_or(ChainstateError::NoSuchBlockError) + .map_err(|e| { + warn!("Nakamoto block has no parent"; + "block consensus_hash" => %consensus_hash); + e + })?; + + Ok(parent_vrf_proof) } /// Get the first block header in a Nakamoto tenure @@ -1177,7 +1653,7 @@ impl NakamotoChainState { pub fn get_nakamoto_block_status( conn: &Connection, consensus_hash: &ConsensusHash, - block_hash: &BlockHeaderHash + block_hash: &BlockHeaderHash, ) -> Result, ChainstateError> { let sql = "SELECT (processed, orphaned) FROM nakamoto_block_headers WHERE consensus_hash = ?1 AND block_hash = ?2"; let args: &[&dyn ToSql] = &[consensus_hash, block_hash]; @@ -1188,14 +1664,67 @@ impl NakamotoChainState { .map(|(processed, orphaned): (u32, u32)| (processed != 0, orphaned != 0))) } + /// Get the VRF proof for a Nakamoto block, if it exists. + /// Returns None if the Nakamoto block's VRF proof is not found (e.g. because there is no + /// Nakamoto block) + pub fn get_nakamoto_tenure_vrf_proof( + conn: &Connection, + consensus_hash: &ConsensusHash, + ) -> Result, ChainstateError> { + let sql = "SELECT vrf_proof FROM nakamoto_block_headers WHERE consensus_hash = ?1 AND tenure_changed = 1"; + let args: &[&dyn ToSql] = &[consensus_hash]; + let proof_bytes: Option = query_row(conn, sql, args)?; + if let Some(bytes) = proof_bytes { + let proof = VRFProof::from_hex(&bytes) + .ok_or(DBError::Corruption) + .map_err(|e| { + warn!("Failed to load VRF proof: could not decode"; + "vrf_proof" => %bytes, + "consensus_hash" => %consensus_hash + ); + e + })?; + Ok(Some(proof)) + } else { + Ok(None) + } + } + + /// Verify that a nakamoto block's block-commit's VRF seed is consistent with the VRF proof + fn check_block_commit_vrf_seed( + chainstate_conn: &Connection, + sortdb_conn: &Connection, + block: &NakamotoBlock, + ) -> Result<(), ChainstateError> { + // get the block-commit for this block + let sn = + SortitionDB::get_block_snapshot_consensus(sortdb_conn, &block.header.consensus_hash)? + .ok_or(ChainstateError::NoSuchBlockError) + .map_err(|e| { + warn!("No block-commit for block"; "block_id" => %block.block_id()); + e + })?; + + let block_commit = + get_block_commit_by_txid(sortdb_conn, &sn.sortition_id, &sn.winning_block_txid)? + .ok_or(ChainstateError::NoSuchBlockError) + .map_err(|e| { + warn!("No block-commit for block"; "block_id" => %block.block_id()); + e + })?; + + block.validate_vrf_seed(sortdb_conn, chainstate_conn, &block_commit) + } + /// Insert a nakamoto block header that is paired with an /// already-existing block commit and snapshot /// /// `header` should be a pointer to the header in `tip_info`. - pub fn insert_stacks_block_header( + fn insert_stacks_block_header( tx: &Connection, tip_info: &StacksHeaderInfo, header: &NakamotoBlockHeader, + vrf_proof: Option<&VRFProof>, block_cost: &ExecutionCost, total_tenure_cost: &ExecutionCost, tenure_height: u64, @@ -1224,6 +1753,8 @@ impl NakamotoChainState { assert!(*stacks_block_height < u64::try_from(i64::MAX).unwrap()); + let vrf_proof_bytes = vrf_proof.map(|proof| proof.to_hex()); + let args: &[&dyn ToSql] = &[ &u64_to_sql(*stacks_block_height)?, &index_root, @@ -1248,6 +1779,7 @@ impl NakamotoChainState { &header.parent_block_id, &u64_to_sql(tenure_height)?, if tenure_changed { &1i64 } else { &0 }, + &vrf_proof_bytes.as_ref(), ]; tx.execute( @@ -1267,8 +1799,9 @@ impl NakamotoChainState { tenure_tx_fees, parent_block_id, tenure_height, - tenure_changed) - VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9, ?10, ?11, ?12, ?13, ?14, ?15, ?16, ?17, ?18, ?19, ?20, ?21, ?22, ?23)", + tenure_changed, + vrf_proof) + VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9, ?10, ?11, ?12, ?13, ?14, ?15, ?16, ?17, ?18, ?19, ?20, ?21, ?22, ?23, ?24)", args )?; @@ -1277,11 +1810,12 @@ impl NakamotoChainState { /// Append a Stacks block to an existing Stacks block, and grant the miner the block reward. /// Return the new Stacks header info. - pub fn advance_tip( + fn advance_tip( headers_tx: &mut StacksDBTx, parent_tip: &StacksBlockHeaderTypes, parent_consensus_hash: &ConsensusHash, new_tip: &NakamotoBlockHeader, + new_vrf_proof: Option<&VRFProof>, new_burn_header_hash: &BurnchainHeaderHash, new_burnchain_height: u32, new_burnchain_timestamp: u64, @@ -1299,10 +1833,7 @@ impl NakamotoChainState { block_fees: u128, ) -> Result { if new_tip.parent_block_id - != StacksBlockHeader::make_index_block_hash( - &FIRST_BURNCHAIN_CONSENSUS_HASH, - &FIRST_STACKS_BLOCK_HASH, - ) + != StacksBlockId::new(&FIRST_BURNCHAIN_CONSENSUS_HASH, &FIRST_STACKS_BLOCK_HASH) { // not the first-ever block, so linkage must occur match parent_tip { @@ -1388,6 +1919,7 @@ impl NakamotoChainState { headers_tx.deref_mut(), &new_tip_info, &new_tip, + new_vrf_proof, anchor_block_cost, total_tenure_cost, tenure_height, @@ -1786,8 +2318,17 @@ impl NakamotoChainState { .block_height; let block_hash = block.header.block_hash(); + let tenure_changed = if let Some(tenures_valid) = block.tenure_changed() { + if !tenures_valid { + return Err(ChainstateError::InvalidStacksBlock( + "Invalid tenure changes in nakamoto block".into(), + )); + } + true + } else { + false + }; - let tenure_changed = block.tenure_changed(&parent_block_id); if !tenure_changed && (block.is_first_mined() || parent_ch != block.header.consensus_hash) { return Err(ChainstateError::ExpectedTenureChange); } @@ -1807,11 +2348,28 @@ impl NakamotoChainState { }; let tenure_height = if tenure_changed { + // TODO: this should be + ${num_tenures_passed_since_parent} parent_tenure_height + 1 } else { parent_tenure_height }; + // verify VRF proof, if present + // only need to do this once per tenure + // get the resulting vrf proof bytes + let vrf_proof_opt = if tenure_changed { + Self::check_block_commit_vrf_seed(chainstate_tx.deref(), burn_dbconn, block)?; + Some( + block + .get_vrf_proof() + .ok_or(ChainstateError::InvalidStacksBlock( + "Invalid Nakamoto block: has coinbase but no VRF proof".into(), + ))?, + ) + } else { + None + }; + let SetupBlockResult { mut clarity_tx, mut tx_receipts, @@ -1820,8 +2378,8 @@ impl NakamotoChainState { applied_epoch_transition, burn_stack_stx_ops, burn_transfer_stx_ops, - mut auto_unlock_events, burn_delegate_stx_ops, + mut auto_unlock_events, } = Self::setup_block( chainstate_tx, clarity_instance, @@ -1973,6 +2531,7 @@ impl NakamotoChainState { .ok_or_else(|| { warn!("While processing tenure change, failed to look up parent tenure"; "parent_tenure_height" => parent_tenure_height, + "parent_block_id" => %parent_block_id, "block_hash" => %block_hash, "block_consensus_hash" => %block.header.consensus_hash); ChainstateError::NoSuchBlockError @@ -2032,6 +2591,7 @@ impl NakamotoChainState { &parent_chain_tip.anchored_header, &parent_chain_tip.consensus_hash, &block.header, + vrf_proof_opt, chain_tip_burn_header_hash, chain_tip_burn_header_height, chain_tip_burn_header_timestamp, From c165f8084b7a4ef15e8182aa38181d0360a09159 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Tue, 7 Nov 2023 00:50:41 -0500 Subject: [PATCH 090/122] refactor: move `get_account()` helper and API sync --- .../src/chainstate/nakamoto/tests/mod.rs | 46 ++++++++++++++++--- 1 file changed, 40 insertions(+), 6 deletions(-) diff --git a/stackslib/src/chainstate/nakamoto/tests/mod.rs b/stackslib/src/chainstate/nakamoto/tests/mod.rs index 97aa74fccd..e4b38bed06 100644 --- a/stackslib/src/chainstate/nakamoto/tests/mod.rs +++ b/stackslib/src/chainstate/nakamoto/tests/mod.rs @@ -19,13 +19,15 @@ use std::fs; use clarity::types::chainstate::{PoxId, SortitionId, StacksBlockId}; use clarity::vm::clarity::ClarityConnection; +use clarity::vm::types::StacksAddressExtensions; use stacks_common::consts::{FIRST_BURNCHAIN_CONSENSUS_HASH, FIRST_STACKS_BLOCK_HASH}; +use stacks_common::types::chainstate::StacksAddress; use stacks_common::types::chainstate::{ BlockHeaderHash, BurnchainHeaderHash, ConsensusHash, StacksPrivateKey, StacksWorkScore, TrieHash, }; use stacks_common::types::{PrivateKey, StacksEpoch, StacksEpochId}; -use stacks_common::util::hash::{Hash160, Sha512Trunc256Sum, hex_bytes}; +use stacks_common::util::hash::{hex_bytes, Hash160, Sha512Trunc256Sum}; use stacks_common::util::secp256k1::{MessageSignature, Secp256k1PublicKey}; use stacks_common::util::vrf::{VRFPrivateKey, VRFProof}; use stdext::prelude::Integer; @@ -41,7 +43,8 @@ use crate::chainstate::coordinator::tests::{ use crate::chainstate::nakamoto::{NakamotoBlock, NakamotoBlockHeader, NakamotoChainState}; use crate::chainstate::stacks::db::{ ChainStateBootData, ChainstateAccountBalance, ChainstateAccountLockup, ChainstateBNSName, - ChainstateBNSNamespace, StacksBlockHeaderTypes, StacksChainState, StacksHeaderInfo, + ChainstateBNSNamespace, StacksAccount, StacksBlockHeaderTypes, StacksChainState, + StacksHeaderInfo, }; use crate::chainstate::stacks::{ CoinbasePayload, SchnorrThresholdSignature, StacksBlockHeader, StacksTransaction, @@ -51,6 +54,34 @@ use crate::chainstate::stacks::{ use crate::core; use crate::core::StacksEpochExtension; +/// Get an address's account +pub fn get_account( + chainstate: &mut StacksChainState, + sortdb: &SortitionDB, + addr: &StacksAddress, +) -> StacksAccount { + let tip = NakamotoChainState::get_canonical_block_header(chainstate.db(), sortdb) + .unwrap() + .unwrap(); + debug!( + "Canonical block header is {}/{} ({}): {:?}", + &tip.consensus_hash, + &tip.anchored_header.block_hash(), + &tip.index_block_hash(), + &tip + ); + + chainstate + .with_read_only_clarity_tx( + &sortdb.index_conn(), + &tip.index_block_hash(), + |clarity_conn| { + StacksChainState::get_account(clarity_conn, &addr.to_account_principal()) + }, + ) + .unwrap() +} + fn test_path(name: &str) -> String { format!("/tmp/stacks-node-tests/nakamoto-tests/{}", name) } @@ -99,10 +130,11 @@ pub fn nakamoto_advance_tip_simple() { let chain_tip_burn_header_hash = BurnchainHeaderHash([0; 32]); let chain_tip_burn_header_height = 1; let chain_tip_burn_header_timestamp = 100; - + let proof_bytes = hex_bytes("9275df67a68c8745c0ff97b48201ee6db447f7c93b23ae24cdc2400f52fdb08a1a6ac7ec71bf9c9c76e96ee4675ebff60625af28718501047bfd87b810c2d2139b73c23bd69de66360953a642c2a330a").unwrap(); let proof = VRFProof::from_bytes(&proof_bytes).unwrap(); - let coinbase_tx_payload = TransactionPayload::Coinbase(CoinbasePayload([0; 32]), None, Some(proof)); + let coinbase_tx_payload = + TransactionPayload::Coinbase(CoinbasePayload([0; 32]), None, Some(proof)); let mut coinbase_tx = StacksTransaction::new( TransactionVersion::Testnet, TransactionAuth::from_p2pkh(&stacker_sk).unwrap(), @@ -308,10 +340,11 @@ pub fn staging_blocks() { block.header.miner_signature = miner_signature; + let config = chainstate.config(); let (chainstate_tx, _clarity_instance) = chainstate.chainstate_tx_begin().unwrap(); let sortdb_conn = sort_db.index_handle_at_tip(); - NakamotoChainState::accept_block(block.clone(), &sortdb_conn, &chainstate_tx).unwrap(); + NakamotoChainState::accept_block(&config, block.clone(), &sortdb_conn, &chainstate_tx).unwrap(); chainstate_tx.commit().unwrap(); @@ -427,7 +460,8 @@ pub fn nakamoto_advance_tip_multiple() { let proof_bytes = hex_bytes("9275df67a68c8745c0ff97b48201ee6db447f7c93b23ae24cdc2400f52fdb08a1a6ac7ec71bf9c9c76e96ee4675ebff60625af28718501047bfd87b810c2d2139b73c23bd69de66360953a642c2a330a").unwrap(); let proof = VRFProof::from_bytes(&proof_bytes).unwrap(); - let coinbase_tx_payload = TransactionPayload::Coinbase(CoinbasePayload([i; 32]), None, Some(proof)); + let coinbase_tx_payload = + TransactionPayload::Coinbase(CoinbasePayload([i; 32]), None, Some(proof)); let mut coinbase_tx = StacksTransaction::new( TransactionVersion::Testnet, TransactionAuth::from_p2pkh(&miner_sk).unwrap(), From 5030e9b0b0449f5103840767f620704d218cb50d Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Tue, 7 Nov 2023 00:51:02 -0500 Subject: [PATCH 091/122] fix: update the way we generate Nakamoto tenure information. In particular, we need to calculate the block-commit VRF seed from the parent tenure's VRF proof, and generate the new tenure's VRF proof _after_ we process the block-commit's sortition (since that contains sortition hash we prove on) --- .../src/chainstate/nakamoto/tests/node.rs | 458 +++++++++++++----- 1 file changed, 330 insertions(+), 128 deletions(-) diff --git a/stackslib/src/chainstate/nakamoto/tests/node.rs b/stackslib/src/chainstate/nakamoto/tests/node.rs index 67dde8a752..9c4f545fae 100644 --- a/stackslib/src/chainstate/nakamoto/tests/node.rs +++ b/stackslib/src/chainstate/nakamoto/tests/node.rs @@ -23,22 +23,24 @@ use std::io; use std::path::{Path, PathBuf}; use clarity::vm::clarity::ClarityConnection; -use clarity::vm::costs::LimitedCostTracker; use clarity::vm::costs::ExecutionCost; +use clarity::vm::costs::LimitedCostTracker; use clarity::vm::types::*; use rand::seq::SliceRandom; use rand::thread_rng; use rand::Rng; use stacks_common::address::*; use stacks_common::consts::{FIRST_BURNCHAIN_CONSENSUS_HASH, FIRST_STACKS_BLOCK_HASH}; +use stacks_common::types::chainstate::BlockHeaderHash; +use stacks_common::types::chainstate::SortitionId; +use stacks_common::types::chainstate::StacksBlockId; +use stacks_common::types::chainstate::VRFSeed; +use stacks_common::util::hash::Hash160; use stacks_common::util::sleep_ms; use stacks_common::util::vrf::VRFProof; use stacks_common::util::vrf::VRFPublicKey; -use stacks_common::util::hash::Hash160; -use stacks_common::types::chainstate::SortitionId; -use stacks_common::types::chainstate::StacksBlockId; -use stacks_common::types::chainstate::BlockHeaderHash; +use crate::burnchains::bitcoin::indexer::BitcoinIndexer; use crate::burnchains::tests::*; use crate::burnchains::*; use crate::chainstate::burn::db::sortdb::*; @@ -46,19 +48,21 @@ use crate::chainstate::burn::operations::{ BlockstackOperationType, LeaderBlockCommitOp, LeaderKeyRegisterOp, UserBurnSupportOp, }; use crate::chainstate::burn::*; +use crate::chainstate::coordinator::ChainsCoordinator; use crate::chainstate::coordinator::Error as CoordinatorError; -use crate::chainstate::coordinator::get_next_recipients; use crate::chainstate::coordinator::OnChainRewardSetProvider; +use crate::chainstate::nakamoto::coordinator::get_nakamoto_next_recipients; +use crate::chainstate::nakamoto::miner::NakamotoBlockBuilder; +use crate::chainstate::nakamoto::tests::get_account; use crate::chainstate::nakamoto::NakamotoBlock; use crate::chainstate::nakamoto::NakamotoChainState; -use crate::chainstate::nakamoto::miner::NakamotoBlockBuilder; use crate::chainstate::stacks::address::PoxAddress; use crate::chainstate::stacks::db::blocks::test::store_staging_block; use crate::chainstate::stacks::db::test::*; use crate::chainstate::stacks::db::*; use crate::chainstate::stacks::miner::*; -use crate::chainstate::stacks::StacksBlock; use crate::chainstate::stacks::Error as ChainstateError; +use crate::chainstate::stacks::StacksBlock; use crate::chainstate::stacks::C32_ADDRESS_VERSION_TESTNET_SINGLESIG; use crate::chainstate::stacks::*; use crate::cost_estimates::metrics::UnitMetric; @@ -72,7 +76,7 @@ use crate::chainstate::stacks::tests::TestStacksNode; use crate::net::relay::Relayer; use crate::net::test::{TestPeer, TestPeerConfig}; -use crate::core::{STACKS_EPOCH_3_0_MARKER, BOOT_BLOCK_HASH}; +use crate::core::{BOOT_BLOCK_HASH, STACKS_EPOCH_3_0_MARKER}; impl TestBurnchainBlock { pub fn add_nakamoto_tenure_commit( @@ -84,9 +88,20 @@ impl TestBurnchainBlock { leader_key: &LeaderKeyRegisterOp, fork_snapshot: Option<&BlockSnapshot>, parent_block_snapshot: Option<&BlockSnapshot>, + vrf_seed: VRFSeed, ) -> LeaderBlockCommitOp { let tenure_id_as_block_hash = BlockHeaderHash(last_tenure_id.0.clone()); - self.inner_add_block_commit(ic, miner, &tenure_id_as_block_hash, burn_fee, leader_key, fork_snapshot, parent_block_snapshot, STACKS_EPOCH_3_0_MARKER) + self.inner_add_block_commit( + ic, + miner, + &tenure_id_as_block_hash, + burn_fee, + leader_key, + fork_snapshot, + parent_block_snapshot, + Some(vrf_seed), + STACKS_EPOCH_3_0_MARKER, + ) } } @@ -100,14 +115,18 @@ impl TestMiner { Hash160::from_node_public_key(&pubk) } - pub fn make_nakamoto_coinbase(&mut self, recipient: Option, vrf_proof: VRFProof) -> StacksTransaction { + pub fn make_nakamoto_coinbase( + &mut self, + recipient: Option, + vrf_proof: VRFProof, + ) -> StacksTransaction { let mut tx_coinbase = StacksTransaction::new( TransactionVersion::Testnet, self.as_transaction_auth().unwrap(), TransactionPayload::Coinbase( CoinbasePayload([(self.nonce % 256) as u8; 32]), recipient, - Some(vrf_proof) + Some(vrf_proof), ), ); tx_coinbase.chain_id = 0x80000000; @@ -119,13 +138,16 @@ impl TestMiner { let tx_coinbase_signed = tx_signer.get_tx().unwrap(); tx_coinbase_signed } - - pub fn make_nakamoto_tenure_change(&mut self, tenure_change: TenureChangePayload) -> StacksTransaction { + + pub fn make_nakamoto_tenure_change( + &mut self, + tenure_change: TenureChangePayload, + ) -> StacksTransaction { let mut tx_tenure_change = StacksTransaction::new( TransactionVersion::Testnet, // TODO: this needs to be a schnorr signature self.as_transaction_auth().unwrap(), - TransactionPayload::TenureChange(tenure_change) + TransactionPayload::TenureChange(tenure_change), ); tx_tenure_change.chain_id = 0x80000000; tx_tenure_change.anchor_mode = TransactionAnchorMode::OnChainOnly; @@ -143,7 +165,6 @@ impl TestMiner { } } - impl TestStacksNode { pub fn add_nakamoto_tenure_commit( sortdb: &SortitionDB, @@ -153,6 +174,7 @@ impl TestStacksNode { burn_amount: u64, key_op: &LeaderKeyRegisterOp, parent_block_snapshot: Option<&BlockSnapshot>, + vrf_seed: VRFSeed, ) -> LeaderBlockCommitOp { let block_commit_op = { let ic = sortdb.index_conn(); @@ -165,6 +187,7 @@ impl TestStacksNode { key_op, Some(&parent_snapshot), parent_block_snapshot, + vrf_seed, ) }; block_commit_op @@ -183,7 +206,10 @@ impl TestStacksNode { } } - pub fn get_nakamoto_tenure(&self, last_tenure_id: &StacksBlockId) -> Option> { + pub fn get_nakamoto_tenure( + &self, + last_tenure_id: &StacksBlockId, + ) -> Option> { match self.nakamoto_commit_ops.get(last_tenure_id) { None => None, Some(idx) => Some(self.nakamoto_blocks[*idx].clone()), @@ -209,6 +235,24 @@ impl TestStacksNode { &last_tenure_id, ); + let parent_block = + NakamotoChainState::get_block_header(self.chainstate.db(), last_tenure_id) + .unwrap() + .unwrap(); + let vrf_proof = NakamotoChainState::get_block_vrf_proof( + self.chainstate.db(), + &parent_block.consensus_hash, + ) + .unwrap() + .unwrap(); + + debug!( + "proof from parent in {} is {}", + &parent_block.consensus_hash, + &vrf_proof.to_hex() + ); + let vrf_seed = VRFSeed::from_proof(&vrf_proof); + // send block commit for this block let block_commit_op = TestStacksNode::add_nakamoto_tenure_commit( sortdb, @@ -218,6 +262,7 @@ impl TestStacksNode { burn_amount, miner_key, parent_block_snapshot_opt, + vrf_seed, ); test_debug!( @@ -231,10 +276,8 @@ impl TestStacksNode { // NOTE: self.nakamoto_commit_ops[block_header_hash] now contains an index into // self.nakamoto_blocks that doesn't exist. The caller needs to follow this call with a // call to self.add_nakamoto_tenure_blocks() - self.nakamoto_commit_ops.insert( - last_tenure_id.clone(), - self.nakamoto_blocks.len(), - ); + self.nakamoto_commit_ops + .insert(last_tenure_id.clone(), self.nakamoto_blocks.len()); block_commit_op } @@ -257,33 +300,28 @@ impl TestStacksNode { // parent Nakamoto blocks, if we're building atop a previous Nakamoto tenure parent_nakamoto_tenure: Option<&[NakamotoBlock]>, burn_amount: u64, - tenure_change_cause: TenureChangeCause - ) -> (LeaderBlockCommitOp, TenureChangePayload, VRFProof) { - let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); - let proof = miner - .make_proof( - &miner_key.public_key, - &burn_block.parent_snapshot.sortition_hash, - ) - .expect(&format!( - "FATAL: no private key for {}", - miner_key.public_key.to_hex() - )); - - let (last_tenure_id, previous_tenure_end, previous_tenure_blocks, parent_block_snapshot_opt) = if let Some(parent_blocks) = parent_nakamoto_tenure { + tenure_change_cause: TenureChangeCause, + ) -> (LeaderBlockCommitOp, TenureChangePayload) { + let ( + last_tenure_id, + previous_tenure_end, + previous_tenure_blocks, + parent_block_snapshot_opt, + ) = if let Some(parent_blocks) = parent_nakamoto_tenure { // parent is an epoch 3 nakamoto block let first_parent = parent_blocks.first().unwrap(); let last_parent = parent_blocks.last().unwrap(); - let parent_tenure_id = StacksBlockId::new(&first_parent.header.consensus_hash, &first_parent.header.block_hash()); - let ic = sortdb.index_conn(); - let parent_sortition = SortitionDB::get_block_snapshot_for_winning_nakamoto_tenure( - &ic, - &tip.sortition_id, - &parent_tenure_id, + let parent_tenure_id = StacksBlockId::new( + &first_parent.header.consensus_hash, + &first_parent.header.block_hash(), + ); + let parent_sortition = SortitionDB::get_block_snapshot_consensus( + &sortdb.conn(), + &first_parent.header.consensus_hash, ) .unwrap() .unwrap(); - + test_debug!( "Work in {} {} for Nakamoto parent: {},{}", burn_block.block_height, @@ -292,9 +330,13 @@ impl TestStacksNode { last_parent.header.chain_length + 1, ); - (parent_tenure_id, last_parent.header.block_id(), parent_blocks.len(), Some(parent_sortition)) - } - else if let Some(parent_stacks_block) = parent_stacks_block { + ( + parent_tenure_id, + last_parent.header.block_id(), + parent_blocks.len(), + Some(parent_sortition), + ) + } else if let Some(parent_stacks_block) = parent_stacks_block { // building off an existing stacks block let parent_stacks_block_snapshot = { let ic = sortdb.index_conn(); @@ -327,22 +369,28 @@ impl TestStacksNode { parent_chain_tip.anchored_header.height(), ); - (parent_tenure_id.clone(), parent_tenure_id, 1, Some(parent_stacks_block_snapshot)) - } - else { + ( + parent_tenure_id.clone(), + parent_tenure_id, + 1, + Some(parent_stacks_block_snapshot), + ) + } else { // first epoch is a nakamoto epoch (testing only) - let parent_tenure_id = StacksBlockId::new(&FIRST_BURNCHAIN_CONSENSUS_HASH, &FIRST_STACKS_BLOCK_HASH); + let parent_tenure_id = + StacksBlockId::new(&FIRST_BURNCHAIN_CONSENSUS_HASH, &FIRST_STACKS_BLOCK_HASH); (parent_tenure_id.clone(), parent_tenure_id, 0, None) }; - - let previous_tenure_blocks = u32::try_from(previous_tenure_blocks).expect("FATAL: too many blocks from last miner"); + + let previous_tenure_blocks = + u32::try_from(previous_tenure_blocks).expect("FATAL: too many blocks from last miner"); let tenure_change_payload = TenureChangePayload { previous_tenure_end, previous_tenure_blocks, cause: tenure_change_cause, pubkey_hash: miner.nakamoto_miner_hash160(), signature: SchnorrThresholdSignature::empty(), - signers: vec![] + signers: vec![], }; let block_commit_op = self.make_nakamoto_tenure_commitment( @@ -355,25 +403,44 @@ impl TestStacksNode { parent_block_snapshot_opt.as_ref(), ); - (block_commit_op, tenure_change_payload, proof) + (block_commit_op, tenure_change_payload) } /// Construct a full Nakamoto tenure with the given block builder. - /// The first block will contain a coinbase and a tenure-change - pub fn make_nakamoto_tenure_blocks( - chainstate: &StacksChainState, + /// The first block will contain a coinbase and a tenure-change. + /// Process the blocks via the chains coordinator as we produce them. + pub fn make_nakamoto_tenure_blocks<'a, F>( + chainstate: &mut StacksChainState, sortdb: &SortitionDB, miner: &mut TestMiner, proof: VRFProof, tenure_change_payload: TenureChangePayload, - mut block_builder: F + coord: &mut ChainsCoordinator< + 'a, + TestEventObserver, + (), + OnChainRewardSetProvider, + (), + (), + BitcoinIndexer, + >, + mut block_builder: F, ) -> Vec<(NakamotoBlock, u64, ExecutionCost)> where - F: FnMut(&mut TestMiner, &StacksChainState, &SortitionDBConn, usize) -> Vec + F: FnMut( + &mut TestMiner, + &mut StacksChainState, + &SortitionDB, + usize, + ) -> Vec, { + let miner_addr = miner.origin_address().unwrap(); + let miner_account = get_account(chainstate, sortdb, &miner_addr); + miner.set_nonce(miner_account.nonce); + let mut tenure_change = Some(miner.make_nakamoto_tenure_change(tenure_change_payload)); let mut coinbase = Some(miner.make_nakamoto_coinbase(None, proof.clone())); - + let mut blocks = vec![]; let mut block_count = 0; loop { @@ -384,16 +451,22 @@ impl TestStacksNode { if let Some(coinbase) = coinbase.take() { txs.push(coinbase); } - let mut next_block_txs = block_builder(miner, chainstate, &sortdb.index_conn(), block_count); + let mut next_block_txs = block_builder(miner, chainstate, sortdb, block_count); txs.append(&mut next_block_txs); if txs.len() == 0 { break; } - let parent_tip_opt = NakamotoChainState::get_canonical_block_header(chainstate.db(), &sortdb).unwrap(); + let parent_tip_opt = + NakamotoChainState::get_canonical_block_header(chainstate.db(), &sortdb).unwrap(); let burn_tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); + debug!( + "Build Nakamoto block in tenure {}", + &burn_tip.consensus_hash + ); + // make a block let builder = if let Some(parent_tip) = parent_tip_opt { NakamotoBlockBuilder::new_from_parent( @@ -401,15 +474,43 @@ impl TestStacksNode { &parent_tip, &burn_tip.consensus_hash, burn_tip.total_burn, - if block_count == 0 { Some(proof.clone()) } else { None } - ).unwrap() - } - else { + if block_count == 0 { + Some(proof.clone()) + } else { + None + }, + ) + .unwrap() + } else { NakamotoBlockBuilder::new_tenure_from_genesis(&proof) }; - let (mut nakamoto_block, size, cost) = builder.make_nakamoto_block_from_txs(chainstate, &sortdb.index_conn(), txs).unwrap(); + let (mut nakamoto_block, size, cost) = builder + .make_nakamoto_block_from_txs(chainstate, &sortdb.index_conn(), txs) + .unwrap(); miner.sign_nakamoto_block(&mut nakamoto_block); + + let block_id = nakamoto_block.block_id(); + debug!( + "Process Nakamoto block {} ({:?}", + &block_id, &nakamoto_block.header + ); + + let sort_tip = SortitionDB::get_canonical_sortition_tip(sortdb.conn()).unwrap(); + let sort_handle = sortdb.index_handle(&sort_tip); + let accepted = Relayer::process_new_nakamoto_block( + &sort_handle, + chainstate, + nakamoto_block.clone(), + ) + .unwrap(); + if accepted { + test_debug!("Accepted Nakamoto block {}", &block_id); + coord.handle_new_nakamoto_stacks_block().unwrap(); + } else { + test_debug!("Did NOT accept Nakamoto block {}", &block_id); + } + blocks.push((nakamoto_block, size, cost)); block_count += 1; } @@ -421,12 +522,24 @@ impl<'a> TestPeer<'a> { /// Get the Nakamoto parent linkage data for building atop the last-produced tenure or /// Stacks 2.x block. /// Returns (last-tenure-id, epoch2-parent, nakamoto-parent-tenure, parent-sortition) - fn get_nakamoto_parent(miner: &TestMiner, stacks_node: &TestStacksNode, sortdb: &SortitionDB) -> (StacksBlockId, Option, Option>, Option) { + fn get_nakamoto_parent( + miner: &TestMiner, + stacks_node: &TestStacksNode, + sortdb: &SortitionDB, + ) -> ( + StacksBlockId, + Option, + Option>, + Option, + ) { let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); if let Some(parent_blocks) = stacks_node.get_last_nakamoto_tenure(miner) { // parent is an epoch 3 nakamoto block let first_parent = parent_blocks.first().unwrap(); - let parent_tenure_id = StacksBlockId::new(&first_parent.header.consensus_hash, &first_parent.header.block_hash()); + let parent_tenure_id = StacksBlockId::new( + &first_parent.header.consensus_hash, + &first_parent.header.block_hash(), + ); let ic = sortdb.index_conn(); let parent_sortition_opt = SortitionDB::get_block_snapshot_for_winning_nakamoto_tenure( &ic, @@ -434,58 +547,113 @@ impl<'a> TestPeer<'a> { &parent_tenure_id, ) .unwrap(); - let last_tenure_id = StacksBlockId::new(&first_parent.header.consensus_hash, &first_parent.header.block_hash()); - (last_tenure_id, None, Some(parent_blocks), parent_sortition_opt) - } - else { + let last_tenure_id = StacksBlockId::new( + &first_parent.header.consensus_hash, + &first_parent.header.block_hash(), + ); + ( + last_tenure_id, + None, + Some(parent_blocks), + parent_sortition_opt, + ) + } else { // parent may be an epoch 2.x block - let (parent_opt, parent_sortition_opt) = if let Some(parent_block) = stacks_node.get_last_anchored_block(miner) { - let ic = sortdb.index_conn(); - let sort_opt = SortitionDB::get_block_snapshot_for_winning_stacks_block( - &ic, - &tip.sortition_id, - &parent_block.block_hash(), - ) - .unwrap(); - (Some(parent_block), sort_opt) - } - else { - (None, None) - }; + let (parent_opt, parent_sortition_opt) = + if let Some(parent_block) = stacks_node.get_last_anchored_block(miner) { + let ic = sortdb.index_conn(); + let sort_opt = SortitionDB::get_block_snapshot_for_winning_stacks_block( + &ic, + &tip.sortition_id, + &parent_block.block_hash(), + ) + .unwrap(); + (Some(parent_block), sort_opt) + } else { + (None, None) + }; let last_tenure_id = if let Some(last_epoch2_block) = parent_opt.as_ref() { let parent_sort = parent_sortition_opt.as_ref().unwrap(); - StacksBlockId::new(&parent_sort.consensus_hash, &last_epoch2_block.header.block_hash()) - } - else { + StacksBlockId::new( + &parent_sort.consensus_hash, + &last_epoch2_block.header.block_hash(), + ) + } else { // must be a genesis block (testing only!) StacksBlockId(BOOT_BLOCK_HASH.0.clone()) }; (last_tenure_id, parent_opt, None, parent_sortition_opt) } } - + /// Start the next Nakamoto tenure. /// This generates the VRF key and block-commit txs, as well as the TenureChange and - /// VRFProof + /// leader key this commit references pub fn begin_nakamoto_tenure( &mut self, - tenure_change_cause: TenureChangeCause - ) -> (Vec, TenureChangePayload, VRFProof) { + tenure_change_cause: TenureChangeCause, + ) -> ( + Vec, + TenureChangePayload, + LeaderKeyRegisterOp, + ) { let mut sortdb = self.sortdb.take().unwrap(); let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); let mut burn_block = TestBurnchainBlock::new(&tip, 0); let mut stacks_node = self.stacks_node.take().unwrap(); - let (last_tenure_id, parent_block_opt, parent_tenure_opt, parent_sortition_opt) = Self::get_nakamoto_parent(&self.miner, &stacks_node, &sortdb); - let last_key = stacks_node.get_last_key(&self.miner); + let (last_tenure_id, parent_block_opt, parent_tenure_opt, parent_sortition_opt) = + Self::get_nakamoto_parent(&self.miner, &stacks_node, &sortdb); + + // find the VRF leader key register tx to use. + // it's the one pointed to by the parent tenure + let parent_consensus_hash_opt = if let Some(parent_tenure) = parent_tenure_opt.as_ref() { + let tenure_start_block = parent_tenure.first().unwrap(); + Some(tenure_start_block.header.consensus_hash) + } else if let Some(parent_block) = parent_block_opt.as_ref() { + let parent_header_info = + StacksChainState::get_stacks_block_header_info_by_index_block_hash( + stacks_node.chainstate.db(), + &last_tenure_id, + ) + .unwrap() + .unwrap(); + Some(parent_header_info.consensus_hash) + } else { + None + }; + + let last_key = if let Some(ch) = parent_consensus_hash_opt { + let tenure_sn = SortitionDB::get_block_snapshot_consensus(sortdb.conn(), &ch) + .unwrap() + .unwrap(); + let tenure_block_commit = get_block_commit_by_txid( + sortdb.conn(), + &tenure_sn.sortition_id, + &tenure_sn.winning_block_txid, + ) + .unwrap() + .unwrap(); + let tenure_leader_key = SortitionDB::get_leader_key_at( + &sortdb.index_conn(), + tenure_block_commit.key_block_ptr.into(), + tenure_block_commit.key_vtxindex.into(), + &tenure_sn.sortition_id, + ) + .unwrap() + .unwrap(); + tenure_leader_key + } else { + panic!("No leader key"); + }; let network_id = self.config.network_id; let chainstate_path = self.chainstate_path.clone(); let burn_block_height = burn_block.block_height; - let (mut block_commit_op, tenure_change_payload, vrf_proof) = stacks_node.begin_nakamoto_tenure( + let (mut block_commit_op, tenure_change_payload) = stacks_node.begin_nakamoto_tenure( &sortdb, &mut self.miner, &mut burn_block, @@ -493,7 +661,7 @@ impl<'a> TestPeer<'a> { parent_block_opt.as_ref(), parent_tenure_opt.as_ref().map(|blocks| blocks.as_slice()), 1000, - tenure_change_cause + tenure_change_cause, ); // patch up block-commit -- these blocks all mine off of genesis @@ -502,17 +670,14 @@ impl<'a> TestPeer<'a> { block_commit_op.parent_vtxindex = 0; } - let leader_key_op = stacks_node.add_key_register(&mut burn_block, &mut self.miner); + let mut burn_ops = vec![]; + if self.miner.last_VRF_public_key().is_none() { + let leader_key_op = stacks_node.add_key_register(&mut burn_block, &mut self.miner); + burn_ops.push(BlockstackOperationType::LeaderKeyRegister(leader_key_op)); + } // patch in reward set info - match get_next_recipients( - &tip, - &mut stacks_node.chainstate, - &mut sortdb, - &self.config.burnchain, - &OnChainRewardSetProvider(), - true, - ) { + match get_nakamoto_next_recipients(&tip, &mut sortdb, &self.config.burnchain) { Ok(recipients) => { block_commit_op.commit_outs = match recipients { Some(info) => { @@ -553,36 +718,76 @@ impl<'a> TestPeer<'a> { } }; + burn_ops.push(BlockstackOperationType::LeaderBlockCommit(block_commit_op)); + self.stacks_node = Some(stacks_node); self.sortdb = Some(sortdb); - ( - vec![ - BlockstackOperationType::LeaderKeyRegister(leader_key_op), - BlockstackOperationType::LeaderBlockCommit(block_commit_op), - ], - tenure_change_payload, - vrf_proof - ) + (burn_ops, tenure_change_payload, last_key) } - /// Produce a Nakamoto tenure, after processing the block-commit from + /// Make the VRF proof for this tenure. + /// Call after processing the block-commit + pub fn make_nakamoto_vrf_proof(&mut self, miner_key: LeaderKeyRegisterOp) -> VRFProof { + let sortdb = self.sortdb.take().unwrap(); + let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); + let proof = self + .miner + .make_proof(&miner_key.public_key, &tip.sortition_hash) + .expect(&format!( + "FATAL: no private key for {}", + miner_key.public_key.to_hex() + )); + self.sortdb = Some(sortdb); + debug!( + "VRF proof made from {} over {}: {}", + &miner_key.public_key.to_hex(), + &tip.sortition_hash, + &proof.to_hex() + ); + proof + } + + /// Produce and process a Nakamoto tenure, after processing the block-commit from /// begin_nakamoto_tenure(). You'd process the burnchain ops from begin_nakamoto_tenure(), /// take the consensus hash, and feed it in here. + /// + /// Returns the blocks, their sizes, and runtime costs pub fn make_nakamoto_tenure( &mut self, consensus_hash: &ConsensusHash, tenure_change_payload: TenureChangePayload, vrf_proof: VRFProof, - block_builder: F + block_builder: F, ) -> Vec<(NakamotoBlock, u64, ExecutionCost)> where - F: FnMut(&mut TestMiner, &StacksChainState, &SortitionDBConn, usize) -> Vec + F: FnMut( + &mut TestMiner, + &mut StacksChainState, + &SortitionDB, + usize, + ) -> Vec, { - let stacks_node = self.stacks_node.take().unwrap(); + let mut stacks_node = self.stacks_node.take().unwrap(); let sortdb = self.sortdb.take().unwrap(); - let (last_tenure_id, parent_block_opt, _parent_tenure_opt, parent_sortition_opt) = Self::get_nakamoto_parent(&self.miner, &stacks_node, &sortdb); - let blocks = TestStacksNode::make_nakamoto_tenure_blocks(&stacks_node.chainstate, &sortdb, &mut self.miner, vrf_proof, tenure_change_payload, block_builder); + let (last_tenure_id, parent_block_opt, _parent_tenure_opt, parent_sortition_opt) = + Self::get_nakamoto_parent(&self.miner, &stacks_node, &sortdb); + let blocks = TestStacksNode::make_nakamoto_tenure_blocks( + &mut stacks_node.chainstate, + &sortdb, + &mut self.miner, + vrf_proof, + tenure_change_payload, + &mut self.coord, + block_builder, + ); + + let just_blocks = blocks + .clone() + .into_iter() + .map(|(block, _, _)| block) + .collect(); + stacks_node.add_nakamoto_tenure_blocks(just_blocks); self.stacks_node = Some(stacks_node); self.sortdb = Some(sortdb); @@ -591,11 +796,7 @@ impl<'a> TestPeer<'a> { } /// Accept a new Nakamoto tenure via the relayer, and then try to process them. - /// Call this after make_nakamoto_tenure() - pub fn process_nakamoto_tenure( - &mut self, - blocks: Vec - ) { + pub fn process_nakamoto_tenure(&mut self, blocks: Vec) { debug!("Peer will process {} Nakamoto blocks", blocks.len()); let sortdb = self.sortdb.take().unwrap(); @@ -608,16 +809,17 @@ impl<'a> TestPeer<'a> { for block in blocks.into_iter() { let block_id = block.block_id(); debug!("Process Nakamoto block {} ({:?}", &block_id, &block.header); - let accepted = Relayer::process_new_nakamoto_block(&sort_handle, &mut node.chainstate, block).unwrap(); + let accepted = + Relayer::process_new_nakamoto_block(&sort_handle, &mut node.chainstate, block) + .unwrap(); if accepted { test_debug!("Accepted Nakamoto block {}", &block_id); self.coord.handle_new_nakamoto_stacks_block().unwrap(); - } - else { + } else { test_debug!("Did NOT accept Nakamoto block {}", &block_id); } } - + self.sortdb = Some(sortdb); self.stacks_node = Some(node); } From 5c389eb1b11a6e654e876c19fcf4d1f12f36af61 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Tue, 7 Nov 2023 00:52:13 -0500 Subject: [PATCH 092/122] feat: get Stacks 2.x header by its sortition's consensus hash (which is guaranteed to uniquely identify the Stacks block) --- stackslib/src/chainstate/stacks/db/headers.rs | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/stackslib/src/chainstate/stacks/db/headers.rs b/stackslib/src/chainstate/stacks/db/headers.rs index d8cc18e120..ebde32d91a 100644 --- a/stackslib/src/chainstate/stacks/db/headers.rs +++ b/stackslib/src/chainstate/stacks/db/headers.rs @@ -249,6 +249,21 @@ impl StacksChainState { .map_err(Error::DBError) } + /// Get a stacks header info by its sortition's consensus hash. + /// Because the consensus hash mixes in the burnchain header hash and the PoX bit vector, + /// it's guaranteed to be unique across all burnchain forks and all PoX forks, and thus all + /// Stacks forks. + pub fn get_stacks_block_header_info_by_consensus_hash( + conn: &Connection, + consensus_hash: &ConsensusHash, + ) -> Result, Error> { + let sql = "SELECT * FROM block_headers WHERE consensus_hash = ?1"; + query_row_panic(conn, sql, &[&consensus_hash], || { + "FATAL: multiple rows for the same consensus hash".to_string() + }) + .map_err(Error::DBError) + } + /// Get an ancestor block header pub fn get_tip_ancestor( tx: &mut StacksDBTx, From 79f9c06b8b9dc2c2f3353fe28b2969e8288e8fa6 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Tue, 7 Nov 2023 00:52:43 -0500 Subject: [PATCH 093/122] fix: use NakamotoChainState::get_canonical_block_header() for performing Clarity VM queries (since we can query at any nakamoto block). Also, index block headers by consensus hash --- stackslib/src/chainstate/stacks/db/mod.rs | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/stackslib/src/chainstate/stacks/db/mod.rs b/stackslib/src/chainstate/stacks/db/mod.rs index 1ae076a76c..ccd7e86e85 100644 --- a/stackslib/src/chainstate/stacks/db/mod.rs +++ b/stackslib/src/chainstate/stacks/db/mod.rs @@ -57,7 +57,8 @@ use crate::chainstate::burn::operations::{DelegateStxOp, StackStxOp, TransferStx use crate::chainstate::burn::ConsensusHash; use crate::chainstate::burn::ConsensusHashExtensions; use crate::chainstate::nakamoto::{ - HeaderTypeNames, NakamotoBlock, NakamotoBlockHeader, NAKAMOTO_CHAINSTATE_SCHEMA_1, + HeaderTypeNames, NakamotoBlock, NakamotoBlockHeader, NakamotoChainState, + NAKAMOTO_CHAINSTATE_SCHEMA_1, }; use crate::chainstate::stacks::address::StacksAddressExtensions; use crate::chainstate::stacks::boot::*; @@ -843,6 +844,7 @@ const CHAINSTATE_INDEXES: &'static [&'static str] = &[ "CREATE INDEX IF NOT EXISTS index_block_hash_tx_index ON transactions(index_block_hash);", "CREATE INDEX IF NOT EXISTS index_block_header_by_affirmation_weight ON block_headers(affirmation_weight);", "CREATE INDEX IF NOT EXISTS index_block_header_by_height_and_affirmation_weight ON block_headers(block_height,affirmation_weight);", + "CREATE INDEX IF NOT EXISTS index_headers_by_consensus_hash ON block_headers(consensus_hash);", ]; pub use stacks_common::consts::MINER_REWARD_MATURITY; @@ -1976,9 +1978,9 @@ impl StacksChainState { where F: FnOnce(&mut ClarityReadOnlyConnection) -> R, { - match StacksChainState::has_stacks_block(self.db(), parent_tip) { - Ok(true) => {} - Ok(false) => { + match NakamotoChainState::get_block_header(self.db(), parent_tip) { + Ok(Some(_)) => {} + Ok(None) => { return None; } Err(e) => { From 5c3e24d8b32b6e43c4f0a1e612beff65abc5ce01 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Tue, 7 Nov 2023 00:53:53 -0500 Subject: [PATCH 094/122] chore: use NakamotoChainState::get_canonical_block_header() wherever we can, and documented 2.x-only behavior wherever we can't --- stackslib/src/net/inv.rs | 2 ++ stackslib/src/net/p2p.rs | 17 ++++++++++++++--- 2 files changed, 16 insertions(+), 3 deletions(-) diff --git a/stackslib/src/net/inv.rs b/stackslib/src/net/inv.rs index cce729d7ab..0b3c0b63ff 100644 --- a/stackslib/src/net/inv.rs +++ b/stackslib/src/net/inv.rs @@ -1779,6 +1779,8 @@ impl PeerNetwork { // affirmation maps are compatible, so just resume scanning off of wherever we are at the // tip. + // NOTE: This code path only works in Stacks 2.x, but that's okay because this whole state + // machine is only used in Stacks 2.x let (consensus_hash, _) = SortitionDB::get_canonical_stacks_chain_tip_hash(sortdb.conn()) .unwrap_or((ConsensusHash::empty(), BlockHeaderHash([0u8; 32]))); diff --git a/stackslib/src/net/p2p.rs b/stackslib/src/net/p2p.rs index 34c01502f7..b90d89340e 100644 --- a/stackslib/src/net/p2p.rs +++ b/stackslib/src/net/p2p.rs @@ -93,6 +93,8 @@ use crate::net::*; use crate::util_lib::db::DBConn; use crate::util_lib::db::DBTx; use crate::util_lib::db::Error as db_error; +use stacks_common::consts::FIRST_BURNCHAIN_CONSENSUS_HASH; +use stacks_common::consts::FIRST_STACKS_BLOCK_HASH; use stacks_common::types::chainstate::{PoxId, SortitionId}; /// inter-thread request to send a p2p message from another thread in this program. @@ -3978,6 +3980,8 @@ impl PeerNetwork { // hint to the downloader to start scanning at the sortition // height we just synchronized + // NOTE: this only works in Stacks 2.x. + // Nakamoto uses a different state machine let start_download_sortition = if let Some(ref inv_state) = self.inv_state { @@ -3986,7 +3990,6 @@ impl PeerNetwork { sortdb.conn(), ) .expect("FATAL: failed to load canonical stacks chain tip hash from sortition DB"); - let stacks_tip_sortition_height = SortitionDB::get_block_snapshot_consensus( sortdb.conn(), @@ -5616,8 +5619,16 @@ impl PeerNetwork { network_result: &mut NetworkResult, event_observer: Option<&dyn MemPoolEventDispatcher>, ) -> Result<(), net_error> { - let (canonical_consensus_hash, canonical_block_hash) = - SortitionDB::get_canonical_stacks_chain_tip_hash(sortdb.conn())?; + let (canonical_consensus_hash, canonical_block_hash) = if let Some(header) = + NakamotoChainState::get_canonical_block_header(chainstate.db(), sortdb)? + { + (header.consensus_hash, header.anchored_header.block_hash()) + } else { + ( + FIRST_BURNCHAIN_CONSENSUS_HASH.clone(), + FIRST_STACKS_BLOCK_HASH.clone(), + ) + }; let sn = SortitionDB::get_canonical_burn_chain_tip(&sortdb.conn())?; From 8e53e698e9eba02b3ae444a1ed105c42a309be09 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Tue, 7 Nov 2023 00:54:22 -0500 Subject: [PATCH 095/122] fix: process affirmation maps in epoch 3 --- stackslib/src/net/mod.rs | 36 ++++++++++++++++++------------------ 1 file changed, 18 insertions(+), 18 deletions(-) diff --git a/stackslib/src/net/mod.rs b/stackslib/src/net/mod.rs index f5e153ca81..85ba9b2a93 100644 --- a/stackslib/src/net/mod.rs +++ b/stackslib/src/net/mod.rs @@ -3525,7 +3525,10 @@ pub mod test { let sortdb = self.sortdb.take().unwrap(); let (block_height, block_hash, epoch_id) = { let tip = SortitionDB::get_canonical_burn_chain_tip(&sortdb.conn()).unwrap(); - let epoch_id = SortitionDB::get_stacks_epoch(&sortdb.conn(), tip.block_height + 1).unwrap().unwrap().epoch_id; + let epoch_id = SortitionDB::get_stacks_epoch(&sortdb.conn(), tip.block_height + 1) + .unwrap() + .unwrap() + .epoch_id; if set_consensus_hash { TestPeer::set_ops_consensus_hash(&mut blockstack_ops, &tip.consensus_hash); @@ -3582,25 +3585,22 @@ pub mod test { ) .unwrap(); - if epoch_id < StacksEpochId::Epoch30 { - Burnchain::process_affirmation_maps( - &self.config.burnchain, - &mut burnchain_db, - &indexer, - block_header.block_height, - ) - .unwrap(); - } + Burnchain::process_affirmation_maps( + &self.config.burnchain, + &mut burnchain_db, + &indexer, + block_header.block_height, + ) + .unwrap(); + (block_header.block_height, block_header_hash, epoch_id) }; - let missing_pox_anchor_block_hash_opt = - if epoch_id < StacksEpochId::Epoch30 { - self.coord.handle_new_burnchain_block().unwrap() - } - else { - self.coord.handle_new_nakamoto_burnchain_block().unwrap() - }; + let missing_pox_anchor_block_hash_opt = if epoch_id < StacksEpochId::Epoch30 { + self.coord.handle_new_burnchain_block().unwrap() + } else { + self.coord.handle_new_nakamoto_burnchain_block().unwrap() + }; let pox_id = { let ic = sortdb.index_conn(); @@ -4272,7 +4272,7 @@ pub mod test { microblocks, ) } - + pub fn to_neighbor(&self) -> Neighbor { self.config.to_neighbor() } From cacf191372378f1ce0d42ee82865d5a0b660268e Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Tue, 7 Nov 2023 00:54:51 -0500 Subject: [PATCH 096/122] fix: off-by-one error when checking for Nakamoto block acceptance (also, cargo fmt) --- stackslib/src/net/relay.rs | 43 +++++++++++++++++++++++--------------- 1 file changed, 26 insertions(+), 17 deletions(-) diff --git a/stackslib/src/net/relay.rs b/stackslib/src/net/relay.rs index 4f96f22aab..8fb5628352 100644 --- a/stackslib/src/net/relay.rs +++ b/stackslib/src/net/relay.rs @@ -43,8 +43,8 @@ use crate::chainstate::burn::BlockSnapshot; use crate::chainstate::burn::ConsensusHash; use crate::chainstate::coordinator::comm::CoordinatorChannels; use crate::chainstate::coordinator::BlockEventDispatcher; -use crate::chainstate::nakamoto::NakamotoChainState; use crate::chainstate::nakamoto::NakamotoBlock; +use crate::chainstate::nakamoto::NakamotoChainState; use crate::chainstate::stacks::db::unconfirmed::ProcessedUnconfirmedState; use crate::chainstate::stacks::db::{StacksChainState, StacksEpochReceipt, StacksHeaderInfo}; use crate::chainstate::stacks::events::StacksTransactionReceipt; @@ -661,7 +661,7 @@ impl Relayer { } Ok(res) } - + /// Insert a staging Nakamoto block that got relayed to us somehow -- e.g. uploaded via http, /// downloaded by us, or pushed via p2p. /// Return Ok(true) if we stored it, Ok(false) if we didn't @@ -675,27 +675,34 @@ impl Relayer { &block.header.consensus_hash, &block.header.block_hash() ); - + // do we have this block? don't lock the DB needlessly if so. - if let Some(_) = NakamotoChainState::get_block_header(chainstate.db(), &block.header.block_id())? { + if let Some(_) = + NakamotoChainState::get_block_header(chainstate.db(), &block.header.block_id())? + { debug!("Already have Nakamoto block {}", &block.header.block_id()); return Ok(false); } - let block_sn = SortitionDB::get_block_snapshot_consensus(sort_handle, &block.header.consensus_hash)? - .ok_or(chainstate_error::DBError(db_error::NotFoundError))?; + let block_sn = + SortitionDB::get_block_snapshot_consensus(sort_handle, &block.header.consensus_hash)? + .ok_or(chainstate_error::DBError(db_error::NotFoundError))?; - // don't relay this block if it's using the wrong AST rules (this would render at least one of its - // txs problematic). - let epoch_id = SortitionDB::get_stacks_epoch(sort_handle, block_sn.block_height)? + // NOTE: it's `+ 1` because the first Nakamoto block is built atop the last epoch 2.x + // tenure, right after the last 2.x sortition + let epoch_id = SortitionDB::get_stacks_epoch(sort_handle, block_sn.block_height + 1)? .expect("FATAL: no epoch defined") .epoch_id; if epoch_id < StacksEpochId::Epoch30 { error!("Nakamoto blocks are not supported in this epoch"); - return Err(chainstate_error::InvalidStacksBlock("Nakamoto blocks are not supported in this epoch".into())); + return Err(chainstate_error::InvalidStacksBlock( + "Nakamoto blocks are not supported in this epoch".into(), + )); } + // don't relay this block if it's using the wrong AST rules (this would render at least one of its + // txs problematic). if !Relayer::static_check_problematic_relayed_nakamoto_block( chainstate.mainnet, epoch_id, @@ -718,12 +725,10 @@ impl Relayer { &block.header.block_hash() ); + let config = chainstate.config(); let staging_db_tx = chainstate.db_tx_begin()?; - let accepted = NakamotoChainState::accept_block( - block, - sort_handle, - &staging_db_tx - )?; + let accepted = + NakamotoChainState::accept_block(&config, block, sort_handle, &staging_db_tx)?; staging_db_tx.commit()?; if accepted { @@ -1384,7 +1389,7 @@ impl Relayer { } true } - + /// Verify that a relayed block is not problematic -- i.e. it doesn't contain any problematic /// transactions. This is a static check -- we only look at the block contents. /// @@ -1773,6 +1778,7 @@ impl Relayer { } /// Set up the unconfirmed chain state off of the canonical chain tip. + /// Only relevant in Stacks 2.x. Nakamoto nodes should not call this. pub fn setup_unconfirmed_state( chainstate: &mut StacksChainState, sortdb: &SortitionDB, @@ -1794,7 +1800,8 @@ impl Relayer { Ok(processed_unconfirmed_state) } - /// Set up unconfirmed chain state in a read-only fashion + /// Set up unconfirmed chain state in a read-only fashion. + /// Only relevant in Stacks 2.x. Nakamoto nodes should not call this. pub fn setup_unconfirmed_state_readonly( chainstate: &mut StacksChainState, sortdb: &SortitionDB, @@ -1815,6 +1822,8 @@ impl Relayer { Ok(()) } + /// Reload unconfirmed microblock stream. + /// Only call if we're in Stacks 2.x pub fn refresh_unconfirmed( chainstate: &mut StacksChainState, sortdb: &mut SortitionDB, From 3acb3db4038c0f694e326b5bd384559d073c6a41 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Tue, 7 Nov 2023 00:55:19 -0500 Subject: [PATCH 097/122] chore: API sync --- testnet/stacks-node/src/mockamoto.rs | 48 +++++++++++++++------------- 1 file changed, 25 insertions(+), 23 deletions(-) diff --git a/testnet/stacks-node/src/mockamoto.rs b/testnet/stacks-node/src/mockamoto.rs index 9d8dd96e45..c87ec5b652 100644 --- a/testnet/stacks-node/src/mockamoto.rs +++ b/testnet/stacks-node/src/mockamoto.rs @@ -329,10 +329,7 @@ impl MockamotoNode { sortdb_tx.commit()?; let staging_db_tx = self.chainstate.db_tx_begin()?; - NakamotoChainState::set_burn_block_processed( - &staging_db_tx, - &new_snapshot.consensus_hash, - )?; + NakamotoChainState::set_burn_block_processed(&staging_db_tx, &new_snapshot.consensus_hash)?; staging_db_tx.commit()?; Ok(()) @@ -343,22 +340,24 @@ impl MockamotoNode { let chain_id = self.chainstate.chain_id; let (mut chainstate_tx, clarity_instance) = self.chainstate.chainstate_tx_begin().unwrap(); - let (is_genesis, chain_tip_bh, chain_tip_ch) = match NakamotoChainState::get_canonical_block_header( - &chainstate_tx, - &self.sortdb, - ) { - Ok(Some(chain_tip)) => (false, chain_tip.anchored_header.block_hash(), chain_tip.consensus_hash), - Ok(None) | Err(ChainstateError::NoSuchBlockError) => - // No stacks tip yet, parent should be genesis - { - ( - true, - FIRST_STACKS_BLOCK_HASH, - FIRST_BURNCHAIN_CONSENSUS_HASH, - ) - } - Err(e) => return Err(e), - }; + let (is_genesis, chain_tip_bh, chain_tip_ch) = + match NakamotoChainState::get_canonical_block_header(&chainstate_tx, &self.sortdb) { + Ok(Some(chain_tip)) => ( + false, + chain_tip.anchored_header.block_hash(), + chain_tip.consensus_hash, + ), + Ok(None) | Err(ChainstateError::NoSuchBlockError) => + // No stacks tip yet, parent should be genesis + { + ( + true, + FIRST_STACKS_BLOCK_HASH, + FIRST_BURNCHAIN_CONSENSUS_HASH, + ) + } + Err(e) => return Err(e), + }; let (parent_chain_length, parent_burn_height) = if is_genesis { (0, 0) @@ -372,7 +371,8 @@ impl MockamotoNode { let miner_nonce = 2 * parent_chain_length; // TODO: VRF proof cannot be None in Nakamoto rules - let coinbase_tx_payload = TransactionPayload::Coinbase(CoinbasePayload([1; 32]), None, None); + let coinbase_tx_payload = + TransactionPayload::Coinbase(CoinbasePayload([1; 32]), None, None); let mut coinbase_tx = StacksTransaction::new( TransactionVersion::Testnet, TransactionAuth::from_p2pkh(&self.miner_key).unwrap(), @@ -503,9 +503,10 @@ impl MockamotoNode { fn mine_and_stage_block(&mut self) -> Result<(), ChainstateError> { let block = self.mine_stacks_block()?; + let config = self.chainstate.config(); let chainstate_tx = self.chainstate.db_tx_begin()?; let sortition_handle = self.sortdb.index_handle_at_tip(); - NakamotoChainState::accept_block(block, &sortition_handle, &chainstate_tx)?; + NakamotoChainState::accept_block(&config, block, &sortition_handle, &chainstate_tx)?; chainstate_tx.commit()?; Ok(()) } @@ -514,7 +515,8 @@ impl MockamotoNode { let (mut chainstate_tx, clarity_instance) = self.chainstate.chainstate_tx_begin()?; let pox_constants = self.sortdb.pox_constants.clone(); let mut sortdb_tx = self.sortdb.tx_begin_at_tip(); - let Some((next_block, _)) = NakamotoChainState::next_ready_nakamoto_block(&chainstate_tx)? else { + let Some((next_block, _)) = NakamotoChainState::next_ready_nakamoto_block(&chainstate_tx)? + else { return Ok(false); }; From 96a9ca62b5986e66ecd1356d50a3863b92aef3ae Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Tue, 7 Nov 2023 17:44:57 +0000 Subject: [PATCH 098/122] refactor: accept @kantai's change Co-authored-by: Aaron Blankstein --- stackslib/src/burnchains/mod.rs | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/stackslib/src/burnchains/mod.rs b/stackslib/src/burnchains/mod.rs index 0596582ae7..ef7986c625 100644 --- a/stackslib/src/burnchains/mod.rs +++ b/stackslib/src/burnchains/mod.rs @@ -518,7 +518,11 @@ impl PoxConstants { /// Is this burnchain block height the start of a prepare phase? pub fn is_prepare_phase_start(&self, first_block_height: u64, burn_height: u64) -> bool { - let effective_height = burn_height - first_block_height; + let Some(effective_height) = burn_height.checked_sub(first_block_height) + else { + // if `burn_height < first_block_height`, then return false: this isn't the start of a prepare phase. + return false; + }; (effective_height % u64::from(self.reward_cycle_length)) == u64::from((self.reward_cycle_length - self.prepare_length) + 1) } From e6df7313773170b57adbc2e78d10f28edda3574d Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Tue, 7 Nov 2023 19:46:26 +0000 Subject: [PATCH 099/122] fix: `processed_block` should check that the `consensus_hash` is in the sortition history Co-authored-by: Aaron Blankstein --- stackslib/src/chainstate/burn/db/sortdb.rs | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/stackslib/src/chainstate/burn/db/sortdb.rs b/stackslib/src/chainstate/burn/db/sortdb.rs index b72e541622..23a89816ad 100644 --- a/stackslib/src/chainstate/burn/db/sortdb.rs +++ b/stackslib/src/chainstate/burn/db/sortdb.rs @@ -2073,12 +2073,16 @@ impl<'a> SortitionHandleConn<'a> { /// Has `consensus_hash` been processed in the current fork? pub fn processed_block(&self, consensus_hash: &ConsensusHash) -> Result { - let Some(bhh) = SortitionDB::get_burnchain_header_hash_by_consensus(self, consensus_hash)? + let Some(snapshot) = SortitionDB::get_block_snapshot_consensus(self, consensus_hash)? else { return Ok(false); }; - self.get_sortition_id_for_bhh(&bhh) - .map(|result| result.is_some()) + let Some(expected_sortition_id) = self.get_sortition_id_for_bhh(&snapshot.burn_header_hash)? + else { + return Ok(false); + }; + let matched_fork = expected_sortition_id == snapshot.sortition_id; + Ok(matched_fork) } pub fn get_tip_snapshot(&self) -> Result, db_error> { From e3208ac7bc27e4cab9e26f23fe474578df9b7e55 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Sat, 11 Nov 2023 01:06:40 -0500 Subject: [PATCH 100/122] chore: add first_mined() helper --- stacks-common/src/types/chainstate.rs | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/stacks-common/src/types/chainstate.rs b/stacks-common/src/types/chainstate.rs index a348024c2d..67ccfca970 100644 --- a/stacks-common/src/types/chainstate.rs +++ b/stacks-common/src/types/chainstate.rs @@ -15,6 +15,7 @@ use sha2::Sha256; use sha2::{Digest as Sha2Digest, Sha512_256}; use crate::codec::{read_next, write_next, Error as CodecError, StacksMessageCodec}; +use crate::consts::{FIRST_BURNCHAIN_CONSENSUS_HASH, FIRST_STACKS_BLOCK_HASH}; use crate::deps_common::bitcoin::util::hash::Sha256dHash; use crate::util::hash::DoubleSha256; use crate::util::hash::{to_hex, Hash160, Sha512Trunc256Sum, HASH160_ENCODED_SIZE}; @@ -280,6 +281,10 @@ impl StacksBlockId { let h = Sha512Trunc256Sum::from_hasher(hasher); StacksBlockId(h.0) } + + pub fn first_mined() -> StacksBlockId { + StacksBlockId::new(&FIRST_BURNCHAIN_CONSENSUS_HASH, &FIRST_STACKS_BLOCK_HASH) + } } impl StacksWorkScore { From 4da825a61a0ae4eb47618610c265b827c9204615 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Sat, 11 Nov 2023 01:06:56 -0500 Subject: [PATCH 101/122] chore: remove dead code (which was buggy) --- stackslib/src/burnchains/burnchain.rs | 6 ------ 1 file changed, 6 deletions(-) diff --git a/stackslib/src/burnchains/burnchain.rs b/stackslib/src/burnchains/burnchain.rs index 77910f5a08..c0ce9db054 100644 --- a/stackslib/src/burnchains/burnchain.rs +++ b/stackslib/src/burnchains/burnchain.rs @@ -511,12 +511,6 @@ impl Burnchain { .is_reward_cycle_start(self.first_block_height, burn_height) } - /// Is this burnchain block height the start of the prepare phase? - pub fn is_prepare_phase_start(&self, burn_height: u64) -> bool { - self.pox_constants - .is_prepare_phase_start(self.first_block_height, burn_height) - } - pub fn reward_cycle_to_block_height(&self, reward_cycle: u64) -> u64 { self.pox_constants .reward_cycle_to_block_height(self.first_block_height, reward_cycle) From 3a239190b35976cf7c11347305d013f7baa9216e Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Sat, 11 Nov 2023 01:07:19 -0500 Subject: [PATCH 102/122] feat: add prepare_phase_start() helper --- stackslib/src/burnchains/mod.rs | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) diff --git a/stackslib/src/burnchains/mod.rs b/stackslib/src/burnchains/mod.rs index 0596582ae7..656259cb33 100644 --- a/stackslib/src/burnchains/mod.rs +++ b/stackslib/src/burnchains/mod.rs @@ -510,19 +510,21 @@ impl PoxConstants { } } + /// What's the first block in the prepare phase + pub fn prepare_phase_start(&self, first_block_height: u64, reward_cycle: u64) -> u64 { + let reward_cycle_start = + self.reward_cycle_to_block_height(first_block_height, reward_cycle); + let prepare_phase_start = reward_cycle_start + u64::from(self.reward_cycle_length) + - u64::from(self.prepare_length); + prepare_phase_start + } + pub fn is_reward_cycle_start(&self, first_block_height: u64, burn_height: u64) -> bool { let effective_height = burn_height - first_block_height; // first block of the new reward cycle (effective_height % (self.reward_cycle_length as u64)) == 1 } - /// Is this burnchain block height the start of a prepare phase? - pub fn is_prepare_phase_start(&self, first_block_height: u64, burn_height: u64) -> bool { - let effective_height = burn_height - first_block_height; - (effective_height % u64::from(self.reward_cycle_length)) - == u64::from((self.reward_cycle_length - self.prepare_length) + 1) - } - pub fn reward_cycle_to_block_height(&self, first_block_height: u64, reward_cycle: u64) -> u64 { // NOTE: the `+ 1` is because the height of the first block of a reward cycle is mod 1, not // mod 0. From 8062ac4a1667c761853a1b2595e505a644ab5160 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Sat, 11 Nov 2023 01:08:12 -0500 Subject: [PATCH 103/122] fix: only acknowledge the stackers' aggregate public key in a query if the given consensus hash is within one reward cycle of the tip (so we don't allow old stackers to submit ~infinite nakamoto blocks) --- stackslib/src/chainstate/burn/db/sortdb.rs | 59 +++++++++++++++++++++- 1 file changed, 58 insertions(+), 1 deletion(-) diff --git a/stackslib/src/chainstate/burn/db/sortdb.rs b/stackslib/src/chainstate/burn/db/sortdb.rs index b72e541622..2a759e153d 100644 --- a/stackslib/src/chainstate/burn/db/sortdb.rs +++ b/stackslib/src/chainstate/burn/db/sortdb.rs @@ -1928,11 +1928,66 @@ impl<'a> SortitionHandleConn<'a> { /// Does the sortition db expect to receive blocks /// signed by this stacker set? + /// + /// This only works if `consensus_hash` is within one reward cycle (2100 blocks) of the + /// sortition pointed to by this handle's sortiton tip. If it isn't, then this + /// method returns Ok(false). This is to prevent a DDoS vector whereby compromised stale + /// Stacker keys can be used to blast out lots of Nakamoto blocks that will be accepted + /// but never processed. So, `consensus_hash` can be in the same reward cycle as + /// `self.context.chain_tip`, or the previous, but no earlier. pub fn expects_stacker_signature( &self, consensus_hash: &ConsensusHash, _stacker_signature: &MessageSignature, ) -> Result { + let sn = SortitionDB::get_block_snapshot(self, &self.context.chain_tip)? + .ok_or(db_error::NotFoundError) + .map_err(|e| { + warn!("No sortition for tip: {:?}", &self.context.chain_tip); + e + })?; + + let ch_sn = SortitionDB::get_block_snapshot_consensus(self, consensus_hash)? + .ok_or(db_error::NotFoundError) + .map_err(|e| { + warn!("No sortition for consensus hash: {:?}", consensus_hash); + e + })?; + + if ch_sn.block_height + u64::from(self.context.pox_constants.reward_cycle_length) + < sn.block_height + { + // too far in the past + debug!("Block with consensus hash {} is too far in the past", consensus_hash; + "consensus_hash" => %consensus_hash, + "block_height" => ch_sn.block_height, + "tip_block_height" => sn.block_height + ); + return Ok(false); + } + + // this given consensus hash must be an ancestor of our chain tip + let ch_at = self + .get_consensus_at(ch_sn.block_height)? + .ok_or(db_error::NotFoundError) + .map_err(|e| { + warn!("No ancestor consensus hash"; + "tip" => %self.context.chain_tip, + "consensus_hash" => %consensus_hash, + "consensus_hash height" => %ch_sn.block_height + ); + e + })?; + + if ch_at != ch_sn.consensus_hash { + // not an ancestor + warn!("Consensus hash is not an ancestor of the sortition tip"; + "tip" => %self.context.chain_tip, + "consensus_hash" => %consensus_hash + ); + return Err(db_error::NotFoundError); + } + // is this consensus hash in this fork? let Some(bhh) = SortitionDB::get_burnchain_header_hash_by_consensus(self, consensus_hash)? else { @@ -4991,7 +5046,9 @@ impl SortitionDB { } } - /// Get a block snapshot for a winning Nakamoto tenure in a given burn chain fork. + /// Given the last_tenure_id (e.g. in a block-commit in Nakamoto), find its sortition in the + /// given sortition fork. + #[cfg(test)] pub fn get_block_snapshot_for_winning_nakamoto_tenure( ic: &SortitionDBConn, tip: &SortitionId, From c091471228522def710a1536ad815713514eecd1 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Sat, 11 Nov 2023 01:09:02 -0500 Subject: [PATCH 104/122] chore: clean up unneeded indentations --- stackslib/src/chainstate/coordinator/mod.rs | 70 +++++++++++---------- 1 file changed, 38 insertions(+), 32 deletions(-) diff --git a/stackslib/src/chainstate/coordinator/mod.rs b/stackslib/src/chainstate/coordinator/mod.rs index 706fbb6bb1..a8ef06b144 100644 --- a/stackslib/src/chainstate/coordinator/mod.rs +++ b/stackslib/src/chainstate/coordinator/mod.rs @@ -141,7 +141,7 @@ impl RewardCycleInfo { pub trait BlockEventDispatcher { fn announce_block( &self, - block: StacksBlockEventData, + block: &StacksBlockEventData, metadata: &StacksHeaderInfo, receipts: &[StacksTransactionReceipt], parent: &StacksBlockId, @@ -226,6 +226,7 @@ pub enum Error { NotPoXAnchorBlock, NotInPreparePhase, RewardSetAlreadyProcessed, + PoXAnchorBlockRequired, } impl From for Error { @@ -750,11 +751,9 @@ pub fn get_reward_cycle_info( .expect("FATAL: no reward cycle for burn height"); if prev_reward_cycle > 1 { - let prev_reward_cycle_start = - burnchain.reward_cycle_to_block_height(prev_reward_cycle - 1); - let prepare_phase_start = prev_reward_cycle_start - + u64::from(burnchain.pox_constants.reward_cycle_length) - - u64::from(burnchain.pox_constants.prepare_length); + let prepare_phase_start = burnchain + .pox_constants + .prepare_phase_start(burnchain.first_block_height, prev_reward_cycle - 1); let first_prepare_sn = SortitionDB::get_ancestor_snapshot(&ic, prepare_phase_start, sortition_tip)? .expect("FATAL: no start-of-prepare-phase sortition"); @@ -2263,8 +2262,8 @@ impl< /// Outermost call to process a burnchain block. /// Will call the Stacks 2.x or Nakamoto handler, depending on whether or not /// Not called internally. - /// NOTE: the 2.x and Nakamoto handlers return `Some(..)` in _different_ circumstances. If - /// that matters to you, then you should call them directly. + /// NOTE: in epoch 3.x, we can't determine the hash of the PoX anchor block directly if it's + /// missing. If it is missing, then this method would return Some(BlockHeaderHash("0000...0000")) pub fn handle_new_burnchain_block(&mut self) -> Result, Error> { let canonical_burnchain_tip = self.burnchain_blocks_db.get_canonical_chain_tip()?; let epochs = SortitionDB::get_stacks_epochs(self.sortition_db.conn())?; @@ -2276,32 +2275,39 @@ impl< .expect("FATAL: StacksEpoch::find_epoch() returned an invalid index"); if target_epoch.epoch_id < StacksEpochId::Epoch30 { // burnchain has not yet advanced to epoch 3.0 - self.handle_new_epoch2_burnchain_block(&mut HashSet::new()) - } else { - // burnchain has advanced to epoch 3.0, but has our sortition DB? - let canonical_snapshot = match self.canonical_sortition_tip.as_ref() { - Some(sn_tip) => SortitionDB::get_block_snapshot(self.sortition_db.conn(), sn_tip)? - .expect(&format!( - "FATAL: do not have previously-calculated highest valid sortition tip {}", - sn_tip - )), - None => SortitionDB::get_canonical_burn_chain_tip(&self.sortition_db.conn())?, - }; - let target_epoch_index = - StacksEpoch::find_epoch(&epochs, canonical_snapshot.block_height) - .expect("FATAL: epoch not defined for BlockSnapshot height"); - let target_epoch = epochs - .get(target_epoch_index) - .expect("FATAL: StacksEpoch::find_epoch() returned an invalid index"); - - if target_epoch.epoch_id < StacksEpochId::Epoch30 { - // need to catch the sortition DB up - self.handle_new_epoch2_burnchain_block(&mut HashSet::new())?; - } + return self.handle_new_epoch2_burnchain_block(&mut HashSet::new()); + } + + // burnchain has advanced to epoch 3.0, but has our sortition DB? + let canonical_snapshot = match self.canonical_sortition_tip.as_ref() { + Some(sn_tip) => SortitionDB::get_block_snapshot(self.sortition_db.conn(), sn_tip)? + .expect(&format!( + "FATAL: do not have previously-calculated highest valid sortition tip {}", + sn_tip + )), + None => SortitionDB::get_canonical_burn_chain_tip(&self.sortition_db.conn())?, + }; + let target_epoch_index = StacksEpoch::find_epoch(&epochs, canonical_snapshot.block_height) + .expect("FATAL: epoch not defined for BlockSnapshot height"); + let target_epoch = epochs + .get(target_epoch_index) + .expect("FATAL: StacksEpoch::find_epoch() returned an invalid index"); - // proceed to process sortitions in epoch 3.0 - self.handle_new_nakamoto_burnchain_block() + if target_epoch.epoch_id < StacksEpochId::Epoch30 { + // need to catch the sortition DB up + self.handle_new_epoch2_burnchain_block(&mut HashSet::new())?; } + + // proceed to process sortitions in epoch 3.0 + self.handle_new_nakamoto_burnchain_block() + .map(|can_proceed| { + if can_proceed { + None + } else { + // missing PoX anchor block, but unlike in 2.x, we don't know what it is! + Some(BlockHeaderHash([0x00; 32])) + } + }) } /// Are affirmation maps active during the epoch? From a9d0e24992e48cce8b01450cba24afc6333f48be Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Sat, 11 Nov 2023 01:09:19 -0500 Subject: [PATCH 105/122] fix: take block event struct by reference --- stackslib/src/chainstate/coordinator/tests.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stackslib/src/chainstate/coordinator/tests.rs b/stackslib/src/chainstate/coordinator/tests.rs index 6a10ae0b75..bc9259061a 100644 --- a/stackslib/src/chainstate/coordinator/tests.rs +++ b/stackslib/src/chainstate/coordinator/tests.rs @@ -429,7 +429,7 @@ pub struct NullEventDispatcher; impl BlockEventDispatcher for NullEventDispatcher { fn announce_block( &self, - _block: StacksBlockEventData, + _block: &StacksBlockEventData, _metadata: &StacksHeaderInfo, _receipts: &[StacksTransactionReceipt], _parent: &StacksBlockId, From a51845b729afa887aa090603d43a22578eb507b2 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Sat, 11 Nov 2023 01:09:52 -0500 Subject: [PATCH 106/122] fix: address PR feedback and fix a couple bugs -- (1) process the next reward cycle's sortitions when we reach the anchor block when processing stacks blocks, and (2) always try to re-calculate the reward set info if we don't yet have it when processing prepare-phase sortitions --- .../chainstate/nakamoto/coordinator/mod.rs | 150 ++++++++---------- 1 file changed, 64 insertions(+), 86 deletions(-) diff --git a/stackslib/src/chainstate/nakamoto/coordinator/mod.rs b/stackslib/src/chainstate/nakamoto/coordinator/mod.rs index 8b5b7c0589..53bec549f4 100644 --- a/stackslib/src/chainstate/nakamoto/coordinator/mod.rs +++ b/stackslib/src/chainstate/nakamoto/coordinator/mod.rs @@ -82,13 +82,6 @@ impl OnChainRewardSetProvider { let liquid_ustx = chainstate.get_liquid_ustx(block_id); - debug!( - "PoX addrs at {} ({}): {:?}", - block_id, - registered_addrs.len(), - ®istered_addrs - ); - let (threshold, participation) = StacksChainState::get_reward_threshold_and_participation( &burnchain.pox_constants, ®istered_addrs[..], @@ -102,9 +95,9 @@ impl OnChainRewardSetProvider { )); if cur_epoch.epoch_id >= StacksEpochId::Epoch30 && participation == 0 { - // no one is stacking. This is a fatal error. - error!("No PoX participation. Aborting."); - panic!(); + // no one is stacking + error!("No PoX participation"); + return Err(Error::PoXAnchorBlockRequired); } info!("PoX reward cycle threshold computed"; @@ -456,12 +449,9 @@ impl< signal_mining_blocked(miner_status.clone()); debug!("Received new burn block notice"); match self.handle_new_nakamoto_burnchain_block() { - Ok(missing_block_opt) => { - if missing_block_opt.is_some() { - debug!( - "Missing canonical anchor block {}", - &missing_block_opt.clone().unwrap() - ); + Ok(can_proceed) => { + if !can_proceed { + error!("Missing canonical anchor block",); } } Err(e) => { @@ -476,7 +466,7 @@ impl< return false; } - return true; + true } /// Handle one or more new Nakamoto Stacks blocks. @@ -528,20 +518,11 @@ impl< let Some(block_receipt) = processed_block_receipt.take() else { // out of blocks + debug!("No more blocks to process (no receipts)"); break; }; - // only bump the coordinator's state if the processed block - // is in our sortition fork let block_hash = block_receipt.header.anchored_header.block_hash(); - let in_sortition_set = self - .sortition_db - .is_stacks_block_in_sortition_set(&canonical_sortition_tip, &block_hash)?; - - if !in_sortition_set { - continue; - } - let ( canonical_stacks_block_id, canonical_stacks_block_height, @@ -637,7 +618,9 @@ impl< // This is the first Stacks block in the prepare phase for the next reward cycle. // Pause here and process the next sortitions - return Ok(Some(block_hash)); + debug!("Process next reward cycle's sortitions"); + self.handle_new_nakamoto_burnchain_block()?; + debug!("Processed next reward cycle's sortitions"); } // no PoX anchor block found @@ -647,7 +630,7 @@ impl< /// Given a burnchain header, find the PoX reward cycle info fn get_nakamoto_reward_cycle_info( &mut self, - burn_header: &BurnchainBlockHeader, + block_height: u64, ) -> Result, Error> { let sortition_tip_id = self .canonical_sortition_tip @@ -655,7 +638,7 @@ impl< .expect("FATAL: Processing anchor block, but no known sortition tip"); get_nakamoto_reward_cycle_info( - burn_header.block_height, + block_height, sortition_tip_id, &self.burnchain, &mut self.chain_state_db, @@ -664,29 +647,14 @@ impl< ) } - /// Process the next-available burnchain block, if possible. - /// Burnchain blocks can only be processed for the last-known PoX reward set, which is to say, - /// burnchain block processing can be blocked on the unavailability of the next PoX anchor - /// block. If the next PoX anchor block is not available, then no burnchain block processing - /// happens, and the hash of the PoX anchor block is returned instead. - /// - /// Returns Err(..) if an error occurred while processing (i.e. a DB error). - pub fn handle_new_nakamoto_burnchain_block( - &mut self, - ) -> Result, Error> { - // highest burnchain block we've downloaded - let canonical_burnchain_tip = self.burnchain_blocks_db.get_canonical_chain_tip()?; - - debug!("Handle new canonical burnchain tip"; - "height" => %canonical_burnchain_tip.block_height, - "block_hash" => %canonical_burnchain_tip.block_hash.to_string()); - - // Retrieve all the direct ancestors of this block with an unprocessed sortition - let mut cursor = canonical_burnchain_tip.block_hash.clone(); + /// Find sortitions to process. + /// Returns the last processed ancestor of `cursor`, and any unprocessed burnchain blocks + fn find_sortitions_to_process( + &self, + mut cursor: BurnchainHeaderHash, + ) -> Result<(SortitionId, VecDeque), Error> { let mut sortitions_to_process = VecDeque::new(); - - // We halt the ancestry research as soon as we find a processed parent - let mut last_processed_ancestor = loop { + let last_processed_ancestor = loop { if let Some(found_sortition) = self.sortition_db.is_sortition_processed(&cursor)? { debug!( "Ancestor sortition {} of block {} is processed", @@ -715,7 +683,27 @@ impl< sortitions_to_process.push_front(current_block); cursor = parent; }; + Ok((last_processed_ancestor, sortitions_to_process)) + } + + /// Process the next-available burnchain block, if possible. + /// Burnchain blocks can only be processed for the last-known PoX reward set, which is to say, + /// burnchain block processing can be blocked on the unavailability of the next PoX anchor + /// block. If the next PoX anchor block is not available, then no burnchain block processing + /// happens, and this function returns false. It returns true otherwise. + /// + /// Returns Err(..) if an error occurred while processing (i.e. a DB error). + pub fn handle_new_nakamoto_burnchain_block(&mut self) -> Result { + // highest burnchain block we've downloaded + let canonical_burnchain_tip = self.burnchain_blocks_db.get_canonical_chain_tip()?; + + debug!("Handle new canonical burnchain tip"; + "height" => %canonical_burnchain_tip.block_height, + "block_hash" => %canonical_burnchain_tip.block_hash.to_string()); + // Retrieve all the direct ancestors of this block with an unprocessed sortition + let (mut last_processed_ancestor, sortitions_to_process) = + self.find_sortitions_to_process(canonical_burnchain_tip.block_hash.clone())?; let dbg_burn_header_hashes: Vec<_> = sortitions_to_process .iter() .map(|block| { @@ -764,7 +752,7 @@ impl< // try to eagerly load up the reward cycle information, so we can persist it and // make it available to signers. If we're at the _end_ of the prepare phase, then // we have no choice but to block. - let reward_cycle_info = self.get_nakamoto_reward_cycle_info(&header)?; + let reward_cycle_info = self.get_nakamoto_reward_cycle_info(header.block_height)?; if let Some(rc_info) = reward_cycle_info { // in nakamoto, if we have any reward cycle info at all, it will be known. assert!( @@ -777,41 +765,26 @@ impl< let reward_cycle_info = if self.burnchain.is_reward_cycle_start(header.block_height) { // we're at the end of the prepare phase, so we'd better have obtained the reward // cycle info of we must block. - let prepare_phase_sortitions = find_prepare_phase_sortitions( - &self.sortition_db, - &self.burnchain, - &last_processed_ancestor, - )?; - - if let Some(first_sn) = prepare_phase_sortitions.first() { - let reward_cycle_info = SortitionDB::get_preprocessed_reward_set( - &self.sortition_db.conn(), - &first_sn.sortition_id, - )?; - if let Some(rc_info) = reward_cycle_info.as_ref() { - // we must have an anchor block - assert!( - rc_info.known_selected_anchor_block().is_some(), - "FATAL: do not know prior reward cycle anchor block" - ); - } else { - // have to block -- we don't have the reward cycle information - debug!("Do not yet have PoX anchor block for next reward cycle -- no anchor block found"; - "next_reward_cycle" => self.burnchain.block_height_to_reward_cycle(header.block_height), - "sortition_id" => %first_sn.sortition_id - ); - return Ok(None); - } - reward_cycle_info + // N.B. it's `- 2` because `is_reward_cycle_start` implies that `block_height % reward_cycle_length == 1`, + // but this call needs `block_height % reward_cycle_length == reward_cycle_length - 1` -- i.e. `block_height` + // must be the last block height in the last reward cycle. + let reward_cycle_info = + self.get_nakamoto_reward_cycle_info(header.block_height - 2)?; + if let Some(rc_info) = reward_cycle_info.as_ref() { + // in nakamoto, if we have any reward cycle info at all, it will be known. + assert!( + rc_info.known_selected_anchor_block().is_some(), + "FATAL: unknown PoX anchor block in Nakamoto" + ); } else { - // have to block -- we don't have any sortitions in the preceding prepare - // phase. - // this is really unreachable, but don't panic just yet. - debug!("Do not yet have PoX anchor block for next reward cycle -- no prepare-phase sortitions"; - "next_reward_cycle" => self.burnchain.block_height_to_reward_cycle(header.block_height) + // have to block -- we don't have the reward cycle information + debug!("Do not yet have PoX anchor block for next reward cycle -- no anchor block found"; + "next_reward_cycle" => self.burnchain.block_height_to_reward_cycle(header.block_height), + "reward_cycle_end" => header.block_height - 2 ); - return Ok(None); + return Ok(false); } + reward_cycle_info } else { // not starting a reward cycle anyway None @@ -843,6 +816,11 @@ impl< Error::FailedToProcessSortition(e) })?; + // mark this burn block as processed in the nakamoto chainstate + let tx = self.chain_state_db.staging_db_tx_begin()?; + NakamotoChainState::set_burn_block_processed(&tx, &next_snapshot.consensus_hash)?; + tx.commit().map_err(DBError::SqliteError)?; + let sortition_id = next_snapshot.sortition_id; self.notifier.notify_sortition_processed(); @@ -861,6 +839,6 @@ impl< last_processed_ancestor = sortition_id; } - Ok(None) + Ok(true) } } From cb5fa064cc03e0a4a07a1675cef342f13460c3bf Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Sat, 11 Nov 2023 01:10:48 -0500 Subject: [PATCH 107/122] feat: test each tenure runs' chain tips, and test that we can replay blocks in random order and still reach the same chain tip --- .../chainstate/nakamoto/coordinator/tests.rs | 349 ++++++++++++++++-- 1 file changed, 321 insertions(+), 28 deletions(-) diff --git a/stackslib/src/chainstate/nakamoto/coordinator/tests.rs b/stackslib/src/chainstate/nakamoto/coordinator/tests.rs index e5a695f438..2c453c894f 100644 --- a/stackslib/src/chainstate/nakamoto/coordinator/tests.rs +++ b/stackslib/src/chainstate/nakamoto/coordinator/tests.rs @@ -16,10 +16,14 @@ use crate::net::test::{TestPeer, TestPeerConfig}; +use clarity::vm::clarity::ClarityConnection; use clarity::vm::types::PrincipalData; use crate::chainstate::burn::db::sortdb::SortitionDB; +use crate::chainstate::burn::operations::BlockstackOperationType; +use crate::chainstate::coordinator::tests::p2pkh_from; use crate::chainstate::nakamoto::tests::get_account; +use crate::chainstate::nakamoto::NakamotoBlock; use crate::chainstate::nakamoto::NakamotoChainState; use crate::chainstate::stacks::address::PoxAddress; use crate::chainstate::stacks::boot::test::make_pox_4_lockup; @@ -35,11 +39,14 @@ use crate::chainstate::stacks::TransactionAuth; use crate::chainstate::stacks::TransactionPayload; use crate::chainstate::stacks::TransactionVersion; +use crate::net::relay::Relayer; + use crate::clarity::vm::types::StacksAddressExtensions; use stacks_common::address::AddressHashMode; use stacks_common::address::C32_ADDRESS_VERSION_TESTNET_SINGLESIG; use stacks_common::types::chainstate::StacksAddress; +use stacks_common::types::chainstate::StacksBlockId; use stacks_common::types::chainstate::StacksPrivateKey; use stacks_common::types::chainstate::StacksPublicKey; use stacks_common::types::Address; @@ -48,11 +55,14 @@ use stacks_common::util::vrf::VRFProof; use crate::core::StacksEpochExtension; -/// Make a peer and transition it into the Nakamoto epoch. -/// The node needs to be stacking; otherwise, Nakamoto won't activate. -fn boot_nakamoto(test_name: &str, mut initial_balances: Vec<(PrincipalData, u64)>) -> TestPeer { - let mut peer_config = TestPeerConfig::new(test_name, 0, 0); - let private_key = peer_config.private_key.clone(); +use rand::prelude::SliceRandom; +use rand::thread_rng; +use rand::RngCore; + +/// Bring a TestPeer into the Nakamoto Epoch +fn advance_to_nakamoto(peer: &mut TestPeer) { + let mut peer_nonce = 0; + let private_key = peer.config.private_key.clone(); let addr = StacksAddress::from_public_keys( C32_ADDRESS_VERSION_TESTNET_SINGLESIG, &AddressHashMode::SerializeP2PKH, @@ -61,21 +71,6 @@ fn boot_nakamoto(test_name: &str, mut initial_balances: Vec<(PrincipalData, u64) ) .unwrap(); - // reward cycles are 5 blocks long - // first 25 blocks are boot-up - // reward cycle 6 instantiates pox-3 - // we stack in reward cycle 7 so pox-3 is evaluated to find reward set participation - peer_config.epochs = Some(StacksEpoch::unit_test_3_0_only(37)); - peer_config.initial_balances = vec![(addr.to_account_principal(), 1_000_000_000_000_000_000)]; - peer_config.initial_balances.append(&mut initial_balances); - peer_config.burnchain.pox_constants.v2_unlock_height = 21; - peer_config.burnchain.pox_constants.pox_3_activation_height = 26; - peer_config.burnchain.pox_constants.v3_unlock_height = 27; - peer_config.burnchain.pox_constants.pox_4_activation_height = 31; - - let mut peer = TestPeer::new(peer_config); - let mut peer_nonce = 0; - // advance through cycle 6 for _ in 0..5 { peer.tenure_with_txs(&[], &mut peer_nonce); @@ -102,16 +97,78 @@ fn boot_nakamoto(test_name: &str, mut initial_balances: Vec<(PrincipalData, u64) } // peer is at the start of cycle 8 +} + +/// Make a peer and transition it into the Nakamoto epoch. +/// The node needs to be stacking; otherwise, Nakamoto won't activate. +fn boot_nakamoto(test_name: &str, mut initial_balances: Vec<(PrincipalData, u64)>) -> TestPeer { + let mut peer_config = TestPeerConfig::new(test_name, 0, 0); + let private_key = peer_config.private_key.clone(); + let addr = StacksAddress::from_public_keys( + C32_ADDRESS_VERSION_TESTNET_SINGLESIG, + &AddressHashMode::SerializeP2PKH, + 1, + &vec![StacksPublicKey::from_private(&private_key)], + ) + .unwrap(); + + // reward cycles are 5 blocks long + // first 25 blocks are boot-up + // reward cycle 6 instantiates pox-3 + // we stack in reward cycle 7 so pox-3 is evaluated to find reward set participation + peer_config.epochs = Some(StacksEpoch::unit_test_3_0_only(37)); + peer_config.initial_balances = vec![(addr.to_account_principal(), 1_000_000_000_000_000_000)]; + peer_config.initial_balances.append(&mut initial_balances); + peer_config.burnchain.pox_constants.v2_unlock_height = 21; + peer_config.burnchain.pox_constants.pox_3_activation_height = 26; + peer_config.burnchain.pox_constants.v3_unlock_height = 27; + peer_config.burnchain.pox_constants.pox_4_activation_height = 31; + + let mut peer = TestPeer::new(peer_config); + advance_to_nakamoto(&mut peer); peer } +/// Make a replay peer, used for replaying the blockchain +fn make_replay_peer<'a>(peer: &'a mut TestPeer<'a>) -> TestPeer<'a> { + let mut replay_config = peer.config.clone(); + replay_config.test_name = format!("{}.replay", &peer.config.test_name); + + let mut replay_peer = TestPeer::new(replay_config); + advance_to_nakamoto(&mut replay_peer); + + // sanity check + let replay_tip = { + let sort_db = replay_peer.sortdb.as_ref().unwrap(); + let tip = SortitionDB::get_canonical_burn_chain_tip(sort_db.conn()).unwrap(); + tip + }; + let tip = { + let sort_db = peer.sortdb.as_ref().unwrap(); + let tip = SortitionDB::get_canonical_burn_chain_tip(sort_db.conn()).unwrap(); + let sort_ic = sort_db.index_conn(); + let ancestor_tip = SortitionDB::get_ancestor_snapshot( + &sort_ic, + replay_tip.block_height, + &tip.sortition_id, + ) + .unwrap() + .unwrap(); + ancestor_tip + }; + + assert_eq!(tip, replay_tip); + replay_peer +} + /// Make a token-transfer from a private key fn make_token_transfer( chainstate: &mut StacksChainState, sortdb: &SortitionDB, private_key: &StacksPrivateKey, nonce: u64, - amt: u128, + amt: u64, + fee: u64, recipient_addr: &StacksAddress, ) -> StacksTransaction { let mut stx_transfer = StacksTransaction::new( @@ -119,12 +176,13 @@ fn make_token_transfer( TransactionAuth::from_p2pkh(private_key).unwrap(), TransactionPayload::TokenTransfer( recipient_addr.clone().to_account_principal(), - 1, + amt, TokenTransferMemo([0x00; 34]), ), ); stx_transfer.chain_id = 0x80000000; stx_transfer.anchor_mode = TransactionAnchorMode::OnChainOnly; + stx_transfer.set_tx_fee(fee); stx_transfer.auth.set_origin_nonce(nonce); let mut tx_signer = StacksTransactionSigner::new(&stx_transfer); @@ -134,6 +192,48 @@ fn make_token_transfer( stx_transfer_signed } +/// Given the blocks and block-commits for a reward cycle, replay the sortitions on the given +/// TestPeer but submit the blocks in random order. +fn replay_reward_cycle( + peer: &mut TestPeer, + burn_ops: &[Vec], + stacks_blocks: &[NakamotoBlock], +) { + eprintln!("\n\n=============================================\nBegin replay\n==============================================\n"); + + let mut indexes: Vec<_> = (0..stacks_blocks.len()).collect(); + indexes.shuffle(&mut thread_rng()); + + for burn_ops in burn_ops.iter() { + let (_, _, consensus_hash) = peer.next_burnchain_block(burn_ops.clone()); + } + + let sortdb = peer.sortdb.take().unwrap(); + let mut node = peer.stacks_node.take().unwrap(); + + let sort_tip = SortitionDB::get_canonical_sortition_tip(sortdb.conn()).unwrap(); + let sort_handle = sortdb.index_handle(&sort_tip); + + for i in indexes.into_iter() { + let block: &NakamotoBlock = &stacks_blocks[i]; + let block_id = block.block_id(); + debug!("Process Nakamoto block {} ({:?}", &block_id, &block.header); + + let accepted = + Relayer::process_new_nakamoto_block(&sort_handle, &mut node.chainstate, block.clone()) + .unwrap(); + if accepted { + test_debug!("Accepted Nakamoto block {}", &block_id); + peer.coord.handle_new_nakamoto_stacks_block().unwrap(); + } else { + test_debug!("Did NOT accept Nakamoto block {}", &block_id); + } + } + + peer.sortdb = Some(sortdb); + peer.stacks_node = Some(node); +} + /// Mine a single Nakamoto tenure with a single Nakamoto block #[test] fn test_simple_nakamoto_coordinator_bootup() { @@ -154,7 +254,22 @@ fn test_simple_nakamoto_coordinator_bootup() { .map(|(block, _, _)| block) .collect(); - // TODO: check chain tip + let chainstate = &mut peer.stacks_node.as_mut().unwrap().chainstate; + let sort_db = peer.sortdb.as_mut().unwrap(); + let tip = NakamotoChainState::get_canonical_block_header(chainstate.db(), sort_db) + .unwrap() + .unwrap(); + assert_eq!( + tip.anchored_header + .as_stacks_nakamoto() + .unwrap() + .chain_length, + 12 + ); + assert_eq!( + tip.anchored_header.as_stacks_nakamoto().unwrap(), + &blocks.last().unwrap().header + ); } /// Mine a single Nakamoto tenure with 10 Nakamoto blocks @@ -172,7 +287,7 @@ fn test_simple_nakamoto_coordinator_1_tenure_10_blocks() { let (burn_ops, tenure_change, miner_key) = peer.begin_nakamoto_tenure(TenureChangeCause::BlockFound); - let (_, _, consensus_hash) = peer.next_burnchain_block(burn_ops); + let (_, _, consensus_hash) = peer.next_burnchain_block(burn_ops.clone()); let vrf_proof = peer.make_nakamoto_vrf_proof(miner_key); // do a stx transfer in each block to a given recipient @@ -193,6 +308,7 @@ fn test_simple_nakamoto_coordinator_1_tenure_10_blocks() { sortdb, &private_key, account.nonce, + 100, 1, &recipient_addr, ); @@ -209,7 +325,50 @@ fn test_simple_nakamoto_coordinator_1_tenure_10_blocks() { .map(|(block, _, _)| block) .collect(); - // TODO: check chain tip + let tip = { + let chainstate = &mut peer.stacks_node.as_mut().unwrap().chainstate; + let sort_db = peer.sortdb.as_mut().unwrap(); + NakamotoChainState::get_canonical_block_header(chainstate.db(), sort_db) + .unwrap() + .unwrap() + }; + + assert_eq!( + tip.anchored_header + .as_stacks_nakamoto() + .unwrap() + .chain_length, + 21 + ); + assert_eq!( + tip.anchored_header.as_stacks_nakamoto().unwrap(), + &blocks.last().unwrap().header + ); + + // replay the blocks and sortitions in random order, and verify that we still reach the chain + // tip + let mut replay_peer = make_replay_peer(&mut peer); + replay_reward_cycle(&mut replay_peer, &[burn_ops], &blocks); + + let tip = { + let chainstate = &mut replay_peer.stacks_node.as_mut().unwrap().chainstate; + let sort_db = replay_peer.sortdb.as_mut().unwrap(); + NakamotoChainState::get_canonical_block_header(chainstate.db(), sort_db) + .unwrap() + .unwrap() + }; + + assert_eq!( + tip.anchored_header + .as_stacks_nakamoto() + .unwrap() + .chain_length, + 21 + ); + assert_eq!( + tip.anchored_header.as_stacks_nakamoto().unwrap(), + &blocks.last().unwrap().header + ); } /// Mine a 10 Nakamoto tenures with 10 Nakamoto blocks @@ -226,11 +385,16 @@ fn test_simple_nakamoto_coordinator_10_tenures_10_blocks() { .unwrap(); let mut all_blocks = vec![]; + let mut all_burn_ops = vec![]; + let mut rc_blocks = vec![]; + let mut rc_burn_ops = vec![]; + let mut consensus_hashes = vec![]; + let stx_miner_key = peer.miner.nakamoto_miner_key(); for i in 0..10 { let (burn_ops, tenure_change, miner_key) = peer.begin_nakamoto_tenure(TenureChangeCause::BlockFound); - let (_, _, consensus_hash) = peer.next_burnchain_block(burn_ops); + let (_, _, consensus_hash) = peer.next_burnchain_block(burn_ops.clone()); let vrf_proof = peer.make_nakamoto_vrf_proof(miner_key); debug!("Next burnchain block: {}", &consensus_hash); @@ -254,6 +418,7 @@ fn test_simple_nakamoto_coordinator_10_tenures_10_blocks() { sortdb, &private_key, account.nonce, + 100, 1, &recipient_addr, ); @@ -265,13 +430,141 @@ fn test_simple_nakamoto_coordinator_10_tenures_10_blocks() { }, ); - let mut blocks = blocks_and_sizes + consensus_hashes.push(consensus_hash); + let mut blocks: Vec = blocks_and_sizes .into_iter() .map(|(block, _, _)| block) .collect(); + // if we're starting a new reward cycle, then save the current one + let tip = { + let sort_db = peer.sortdb.as_mut().unwrap(); + SortitionDB::get_canonical_burn_chain_tip(sort_db.conn()).unwrap() + }; + if peer + .config + .burnchain + .is_reward_cycle_start(tip.block_height) + { + rc_blocks.push(all_blocks.clone()); + rc_burn_ops.push(all_burn_ops.clone()); + + all_burn_ops.clear(); + all_blocks.clear(); + } + all_blocks.append(&mut blocks); + all_burn_ops.push(burn_ops); + } + + rc_blocks.push(all_blocks.clone()); + rc_burn_ops.push(all_burn_ops.clone()); + + all_burn_ops.clear(); + all_blocks.clear(); + + // in nakamoto, tx fees are rewarded by the next tenure, so the + // scheduled rewards come 1 tenure after the coinbase reward matures + let miner = p2pkh_from(&stx_miner_key); + let chainstate = &mut peer.stacks_node.as_mut().unwrap().chainstate; + let sort_db = peer.sortdb.as_mut().unwrap(); + + // this is sortition height 12, and this miner has earned all 12 of the coinbases + // plus the initial per-block mining bonus of 2600 STX, but minus the last three rewards (since + // the miner rewards take three sortitions to confirm). + // + // This is (1000 + 2600) * 10 + 1000 - (3600 * 2 + 1000) + // first 10 block unmatured rewards + // blocks 11 + let mut expected_coinbase_rewards: u128 = 28800000000; + for (i, ch) in consensus_hashes.into_iter().enumerate() { + let sn = SortitionDB::get_block_snapshot_consensus(sort_db.conn(), &ch) + .unwrap() + .unwrap(); + + if !sn.sortition { + continue; + } + let block_id = StacksBlockId(sn.winning_stacks_block_hash.0); + + let (chainstate_tx, clarity_instance) = chainstate.chainstate_tx_begin().unwrap(); + let sort_db_tx = sort_db.tx_begin_at_tip(); + + let stx_balance = clarity_instance + .read_only_connection(&block_id, &chainstate_tx, &sort_db_tx) + .with_clarity_db_readonly(|db| db.get_account_stx_balance(&miner.clone().into())); + + // it's 1 * 10 because it's 1 uSTX per token-transfer, and 10 per tenure + let expected_total_tx_fees = 1 * 10 * (i as u128).saturating_sub(3); + let expected_total_coinbase = expected_coinbase_rewards; + + if i == 0 { + // first tenure awards the last of the initial mining bonus + expected_coinbase_rewards += (1000 + 2600) * 1000000; + } else { + // subsequent tenures award normal coinbases + expected_coinbase_rewards += 1000 * 1000000; + } + + eprintln!( + "Checking block #{} ({},{}): {} =?= {} + {}", + i, + &ch, + &sn.block_height, + stx_balance.amount_unlocked(), + expected_total_coinbase, + expected_total_tx_fees + ); + assert_eq!( + stx_balance.amount_unlocked(), + expected_total_coinbase + expected_total_tx_fees + ); } - // TODO: check chain tip + let tip = { + let chainstate = &mut peer.stacks_node.as_mut().unwrap().chainstate; + let sort_db = peer.sortdb.as_mut().unwrap(); + NakamotoChainState::get_canonical_block_header(chainstate.db(), sort_db) + .unwrap() + .unwrap() + }; + + assert_eq!( + tip.anchored_header + .as_stacks_nakamoto() + .unwrap() + .chain_length, + 111 + ); + assert_eq!( + tip.anchored_header.as_stacks_nakamoto().unwrap(), + &rc_blocks.last().unwrap().last().unwrap().header + ); + + // replay the blocks and sortitions in random order, and verify that we still reach the chain + // tip + let mut replay_peer = make_replay_peer(&mut peer); + for (burn_ops, blocks) in rc_burn_ops.iter().zip(rc_blocks.iter()) { + replay_reward_cycle(&mut replay_peer, burn_ops, blocks); + } + + let tip = { + let chainstate = &mut replay_peer.stacks_node.as_mut().unwrap().chainstate; + let sort_db = replay_peer.sortdb.as_mut().unwrap(); + NakamotoChainState::get_canonical_block_header(chainstate.db(), sort_db) + .unwrap() + .unwrap() + }; + + assert_eq!( + tip.anchored_header + .as_stacks_nakamoto() + .unwrap() + .chain_length, + 111 + ); + assert_eq!( + tip.anchored_header.as_stacks_nakamoto().unwrap(), + &rc_blocks.last().unwrap().last().unwrap().header + ); } From a7c13ada15418cd939e4f1eb23cfe5c51bb508ae Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Sat, 11 Nov 2023 01:11:28 -0500 Subject: [PATCH 108/122] chore: address PR feedback --- stackslib/src/chainstate/nakamoto/mod.rs | 336 +++++++++++++---------- 1 file changed, 193 insertions(+), 143 deletions(-) diff --git a/stackslib/src/chainstate/nakamoto/mod.rs b/stackslib/src/chainstate/nakamoto/mod.rs index 8779354501..5eeefc7fd5 100644 --- a/stackslib/src/chainstate/nakamoto/mod.rs +++ b/stackslib/src/chainstate/nakamoto/mod.rs @@ -269,7 +269,7 @@ pub struct NakamotoBlockHeader { pub stacker_signature: MessageSignature, } -#[derive(Debug, Clone)] +#[derive(Debug, Clone, PartialEq)] pub struct NakamotoBlock { pub header: NakamotoBlockHeader, pub txs: Vec, @@ -371,7 +371,7 @@ impl NakamotoBlockHeader { } pub fn is_first_mined(&self) -> bool { - StacksBlockHeader::is_first_index_block_hash(&self.parent_block_id) + self.block_id() == StacksBlockId::first_mined() } /// Sign the block header by the miner @@ -461,6 +461,11 @@ impl NakamotoBlock { if wellformed.is_none() { // block isn't a first-tenure block, so no valid tenure changes return None; + } else if let Some(false) = wellformed { + // this block is malformed + info!("Block is malformed"; + "block_id" => %self.block_id()); + return Some(false); } // Find all txs that have TenureChange payload @@ -492,12 +497,17 @@ impl NakamotoBlock { } }; - // Return true if there is a valid TenureChange + // Return true if all of the following are true: + // (1) there is at least one tenure change + // (2) all tenure changes are valid Some( - tenure_changes - .iter() - .find(|tc| validate(tc).is_ok()) - .is_some(), + tenure_changes.len() > 0 + && tenure_changes.len() + == tenure_changes + .iter() + .filter(|tc| validate(tc).is_ok()) + .collect::>() + .len(), ) } @@ -610,13 +620,21 @@ impl NakamotoBlock { } } - // coinbase must come next - if coinbase_positions + let coinbase_idx = *coinbase_positions .first() - .expect("FATAL: coinbase_positions.len() == 1") - != &tenure_change_positions.len() - { - // coinbase is not the next transaction + .expect("FATAL: coinbase_positions.len() == 1"); + if coinbase_idx != tenure_change_positions.len() { + // coinbase is not the next transaction after tenure changes + return Some(false); + } + + let TransactionPayload::Coinbase(_, _, vrf_proof_opt) = &self.txs[coinbase_idx].payload + else { + // this transaction is not a coinbase (but this should be unreachable) + return Some(false); + }; + if vrf_proof_opt.is_none() { + // no a Nakamoto coinbase return Some(false); } @@ -785,6 +803,7 @@ impl NakamotoBlock { /// Static sanity checks on transactions. /// Verifies: + /// * the block is non-empty /// * that all txs are unique /// * that all txs use the given network /// * that all txs use the given chain ID @@ -799,6 +818,9 @@ impl NakamotoBlock { chain_id: u32, epoch_id: StacksEpochId, ) -> bool { + if self.txs.len() == 0 { + return false; + } if !StacksBlock::validate_transactions_unique(&self.txs) { return false; } @@ -820,10 +842,36 @@ impl NakamotoBlock { if !StacksBlock::validate_transactions_static_epoch(&self.txs, epoch_id) { return false; } + match self.is_wellformed_first_tenure_block() { + Some(true) => match self.tenure_changed() { + Some(false) | None => { + // either the tenure_changed() check failed, or this is a tenure change that is + // not in a well-formed tenure block. Either way, this block is invalid. + return false; + } + _ => {} + }, + Some(false) => { + // tenure_change() check failed + return false; + } + None => {} + } return true; } } +impl StacksChainState { + /// Begin a transaction against the staging blocks DB. + /// Note that this DB is (or will eventually be) in a separate database from the headers. + pub fn staging_db_tx_begin<'a>( + &'a mut self, + ) -> Result, ChainstateError> { + // TODO: this should be against a separate DB! + self.db_tx_begin() + } +} + impl NakamotoChainState { /// Notify the staging database that a given stacks block has been processed. /// This will update the attachable status for children blocks, as well as marking the stacks @@ -913,7 +961,7 @@ impl NakamotoChainState { } /// Extract and parse a nakamoto block from the DB, and verify its integrity. - fn load_nakamoto_block( + pub fn load_nakamoto_block( staging_db_conn: &Connection, consensus_hash: &ConsensusHash, block_hash: &BlockHeaderHash, @@ -925,13 +973,15 @@ impl NakamotoChainState { rusqlite::params![consensus_hash, block_hash], |row| { let data: Vec = row.get("data")?; - let block = NakamotoBlock::consensus_deserialize(&mut data.as_slice())?; + let block = NakamotoBlock::consensus_deserialize(&mut data.as_slice()) + .map_err(|_| DBError::ParseError)?; if &block.header.block_hash() != block_hash { - panic!( + error!( "Staging DB corruption: expected {}, got {}", &block_hash, &block.header.block_hash() ); + return Err(DBError::Corruption.into()); } Ok(Some(block)) }, @@ -943,7 +993,7 @@ impl NakamotoChainState { { Ok(None) } else { - Err(e) + Err(e.into()) } }) } @@ -1088,8 +1138,6 @@ impl NakamotoChainState { next_ready_block.header.consensus_hash ); - NakamotoChainState::set_block_processed(&chainstate_tx.tx, &block_id)?; - // set stacks block accepted sort_tx.set_stacks_block_accepted( &next_ready_block.header.consensus_hash, @@ -1099,12 +1147,13 @@ impl NakamotoChainState { // announce the block, if we're connected to an event dispatcher if let Some(dispatcher) = dispatcher_opt { + let block_event = ( + next_ready_block, + parent_header_info.anchored_header.block_hash(), + ) + .into(); dispatcher.announce_block( - ( - next_ready_block, - parent_header_info.anchored_header.block_hash(), - ) - .into(), + &block_event, &receipt.header.clone(), &receipt.tx_receipts, &parent_block_id, @@ -1136,7 +1185,7 @@ impl NakamotoChainState { /// Called before inserting the block into the staging DB. /// Wraps `NakamotoBlock::validate_against_burnchain()`, and /// verifies that all transactions in the block are allowed in this epoch. - fn validate_nakamoto_block_burnchain( + pub fn validate_nakamoto_block_burnchain( db_handle: &SortitionHandleConn, block: &NakamotoBlock, mainnet: bool, @@ -1213,6 +1262,50 @@ impl NakamotoChainState { Ok(()) } + /// Insert a Nakamoto block into the staging blocks DB + pub(crate) fn store_block( + staging_db_tx: &rusqlite::Transaction, + block: NakamotoBlock, + burn_attachable: bool, + stacks_attachable: bool, + ) -> Result<(), ChainstateError> { + let block_id = block.block_id(); + staging_db_tx.execute( + "INSERT INTO nakamoto_staging_blocks ( + block_hash, + consensus_hash, + parent_block_id, + burn_attachable, + stacks_attachable, + orphaned, + processed, + + height, + index_block_hash, + download_time, + arrival_time, + processed_time, + data + ) VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9, ?10, ?11, ?12, ?13)", + params![ + &block.header.block_hash(), + &block.header.consensus_hash, + &block.header.parent_block_id, + if burn_attachable { 1 } else { 0 }, + if stacks_attachable { 1 } else { 0 }, + 0, + 0, + u64_to_sql(block.header.chain_length)?, + &block_id, + 0, + 0, + 0, + block.serialize_to_vec(), + ], + )?; + Ok(()) + } + /// Accept a Nakamoto block into the staging blocks DB. /// Fails if: /// * the public key cannot be recovered from the miner's signature @@ -1293,41 +1386,7 @@ impl NakamotoChainState { ).optional()?.is_none() ); - let block_id = block.block_id(); - staging_db_tx.execute( - "INSERT INTO nakamoto_staging_blocks ( - block_hash, - consensus_hash, - parent_block_id, - burn_attachable, - stacks_attachable, - orphaned, - processed, - - height, - index_block_hash, - download_time, - arrival_time, - processed_time, - data - ) VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9, ?10, ?11, ?12, ?13)", - params![ - &block.header.block_hash(), - &block.header.consensus_hash, - &block.header.parent_block_id, - if burn_attachable { 1 } else { 0 }, - if stacks_attachable { 1 } else { 0 }, - 0, - 0, - u64_to_sql(block.header.chain_length)?, - &block_id, - 0, - 0, - 0, - block.serialize_to_vec(), - ], - )?; - + Self::store_block(staging_db_tx, block, burn_attachable, stacks_attachable)?; Ok(true) } @@ -1347,7 +1406,7 @@ impl NakamotoChainState { burnchain_commit_burn: u64, burnchain_sortition_burn: u64, coinbase_reward_ustx: u128, - ) -> Result { + ) -> MinerPaymentSchedule { let miner_auth = coinbase_tx.get_origin(); let miner_addr = miner_auth.get_address(mainnet); @@ -1383,17 +1442,18 @@ impl NakamotoChainState { vtxindex: 0, }; - Ok(miner_reward) + miner_reward } /// Return the total ExecutionCost consumed during the tenure up to and including /// `block` pub fn get_total_tenure_cost_at( - conn: &Connection, + chainstate_conn: &Connection, block: &StacksBlockId, ) -> Result, ChainstateError> { let qry = "SELECT total_tenure_cost FROM nakamoto_block_headers WHERE index_block_hash = ?"; - conn.query_row(qry, &[block], |row| row.get(0)) + chainstate_conn + .query_row(qry, &[block], |row| row.get(0)) .optional() .map_err(ChainstateError::from) } @@ -1401,12 +1461,13 @@ impl NakamotoChainState { /// Return the total transactions fees during the tenure up to and including /// `block` pub fn get_total_tenure_tx_fees_at( - conn: &Connection, + chainstate_conn: &Connection, block: &StacksBlockId, ) -> Result, ChainstateError> { let qry = "SELECT tenure_tx_fees FROM nakamoto_block_headers WHERE index_block_hash = ?"; - let tx_fees_str: Option = - conn.query_row(qry, &[block], |row| row.get(0)).optional()?; + let tx_fees_str: Option = chainstate_conn + .query_row(qry, &[block], |row| row.get(0)) + .optional()?; tx_fees_str .map(|x| x.parse()) .transpose() @@ -1470,11 +1531,11 @@ impl NakamotoChainState { /// in the single Bitcoin-anchored Stacks block they produce, as /// well as the microblock stream they append to it. pub fn get_tenure_height( - conn: &Connection, + chainstate_conn: &Connection, block: &StacksBlockId, ) -> Result, ChainstateError> { let nak_qry = "SELECT tenure_height FROM nakamoto_block_headers WHERE index_block_hash = ?"; - let opt_height: Option = conn + let opt_height: Option = chainstate_conn .query_row(nak_qry, &[block], |row| row.get(0)) .optional()?; if let Some(height) = opt_height { @@ -1484,7 +1545,7 @@ impl NakamotoChainState { } let epoch_2_qry = "SELECT block_height FROM block_headers WHERE index_block_hash = ?"; - let opt_height: Option = conn + let opt_height: Option = chainstate_conn .query_row(epoch_2_qry, &[block], |row| row.get(0)) .optional()?; opt_height @@ -1495,11 +1556,11 @@ impl NakamotoChainState { /// Load block header (either Epoch-2 rules or Nakamoto) by `index_block_hash` pub fn get_block_header( - conn: &Connection, + chainstate_conn: &Connection, index_block_hash: &StacksBlockId, ) -> Result, ChainstateError> { let sql = "SELECT * FROM nakamoto_block_headers WHERE index_block_hash = ?1"; - let result = query_row_panic(conn, sql, &[&index_block_hash], || { + let result = query_row_panic(chainstate_conn, sql, &[&index_block_hash], || { "FATAL: multiple rows for the same block hash".to_string() })?; if result.is_some() { @@ -1507,7 +1568,7 @@ impl NakamotoChainState { } let sql = "SELECT * FROM block_headers WHERE index_block_hash = ?1"; - let result = query_row_panic(conn, sql, &[&index_block_hash], || { + let result = query_row_panic(chainstate_conn, sql, &[&index_block_hash], || { "FATAL: multiple rows for the same block hash".to_string() })?; @@ -1624,11 +1685,11 @@ impl NakamotoChainState { /// Get the first block header in a Nakamoto tenure pub fn get_nakamoto_tenure_start_block_header( - conn: &Connection, + chainstate_conn: &Connection, consensus_hash: &ConsensusHash, ) -> Result, ChainstateError> { let sql = "SELECT * FROM nakamoto_block_headers WHERE consensus_hash = ?1 ORDER BY block_height ASC LIMIT 1"; - query_row_panic(conn, sql, &[&consensus_hash], || { + query_row_panic(chainstate_conn, sql, &[&consensus_hash], || { "FATAL: multiple rows for the same consensus hash".to_string() }) .map_err(ChainstateError::DBError) @@ -1636,11 +1697,11 @@ impl NakamotoChainState { /// Get the last block header in a Nakamoto tenure pub fn get_nakamoto_tenure_finish_block_header( - conn: &Connection, + chainstate_conn: &Connection, consensus_hash: &ConsensusHash, ) -> Result, ChainstateError> { let sql = "SELECT * FROM nakamoto_block_headers WHERE consensus_hash = ?1 ORDER BY block_height DESC LIMIT 1"; - query_row_panic(conn, sql, &[&consensus_hash], || { + query_row_panic(chainstate_conn, sql, &[&consensus_hash], || { "FATAL: multiple rows for the same consensus hash".to_string() }) .map_err(ChainstateError::DBError) @@ -1651,13 +1712,13 @@ impl NakamotoChainState { /// Returns None if there's no such block /// Returns Err on DBError pub fn get_nakamoto_block_status( - conn: &Connection, + staging_blocks_conn: &Connection, consensus_hash: &ConsensusHash, block_hash: &BlockHeaderHash, ) -> Result, ChainstateError> { - let sql = "SELECT (processed, orphaned) FROM nakamoto_block_headers WHERE consensus_hash = ?1 AND block_hash = ?2"; + let sql = "SELECT processed, orphaned FROM nakamoto_staging_blocks WHERE consensus_hash = ?1 AND block_hash = ?2"; let args: &[&dyn ToSql] = &[consensus_hash, block_hash]; - Ok(query_row_panic(conn, sql, args, || { + Ok(query_row_panic(staging_blocks_conn, sql, args, || { "FATAL: multiple rows for the same consensus hash and block hash".to_string() }) .map_err(ChainstateError::DBError)? @@ -1668,12 +1729,12 @@ impl NakamotoChainState { /// Returns None if the Nakamoto block's VRF proof is not found (e.g. because there is no /// Nakamoto block) pub fn get_nakamoto_tenure_vrf_proof( - conn: &Connection, + chainstate_conn: &Connection, consensus_hash: &ConsensusHash, ) -> Result, ChainstateError> { let sql = "SELECT vrf_proof FROM nakamoto_block_headers WHERE consensus_hash = ?1 AND tenure_changed = 1"; let args: &[&dyn ToSql] = &[consensus_hash]; - let proof_bytes: Option = query_row(conn, sql, args)?; + let proof_bytes: Option = query_row(chainstate_conn, sql, args)?; if let Some(bytes) = proof_bytes { let proof = VRFProof::from_hex(&bytes) .ok_or(DBError::Corruption) @@ -1720,8 +1781,8 @@ impl NakamotoChainState { /// already-existing block commit and snapshot /// /// `header` should be a pointer to the header in `tip_info`. - fn insert_stacks_block_header( - tx: &Connection, + pub(crate) fn insert_stacks_block_header( + chainstate_tx: &Connection, tip_info: &StacksHeaderInfo, header: &NakamotoBlockHeader, vrf_proof: Option<&VRFProof>, @@ -1748,8 +1809,7 @@ impl NakamotoChainState { let block_hash = header.block_hash(); - let index_block_hash = - StacksBlockHeader::make_index_block_hash(&consensus_hash, &block_hash); + let index_block_hash = StacksBlockId::new(&consensus_hash, &block_hash); assert!(*stacks_block_height < u64::try_from(i64::MAX).unwrap()); @@ -1782,7 +1842,7 @@ impl NakamotoChainState { &vrf_proof_bytes.as_ref(), ]; - tx.execute( + chainstate_tx.execute( "INSERT INTO nakamoto_block_headers (block_height, index_root, consensus_hash, burn_header_hash, burn_header_height, @@ -1837,26 +1897,15 @@ impl NakamotoChainState { { // not the first-ever block, so linkage must occur match parent_tip { - StacksBlockHeaderTypes::Epoch2(stacks_header) => { - // this is the first nakamoto block - assert_eq!(parent_tip.block_hash(), stacks_header.block_hash()); + StacksBlockHeaderTypes::Epoch2(..) => { assert_eq!( new_tip.parent_block_id, - StacksBlockHeader::make_index_block_hash( - &parent_consensus_hash, - &parent_tip.block_hash() - ) + StacksBlockId::new(&parent_consensus_hash, &parent_tip.block_hash()) ); } StacksBlockHeaderTypes::Nakamoto(nakamoto_header) => { // nakamoto blocks link to their parent via index block hashes - assert_eq!( - new_tip.parent_block_id, - StacksBlockHeader::make_index_block_hash( - &nakamoto_header.consensus_hash, - &parent_tip.block_hash() - ) - ); + assert_eq!(new_tip.parent_block_id, nakamoto_header.block_id()); } } } @@ -1869,19 +1918,9 @@ impl NakamotoChainState { new_tip.chain_length ); - let parent_hash = StacksBlockId::new(parent_consensus_hash, &parent_tip.block_hash()); - assert_eq!( - parent_hash, - new_tip.parent_block_id, - "FATAL: parent_consensus_hash/parent_block_hash ({}/{}) {} != {}", - parent_consensus_hash, - &parent_tip.block_hash(), - &parent_hash, - &new_tip.parent_block_id - ); - + let parent_hash = new_tip.parent_block_id.clone(); let new_block_hash = new_tip.block_hash(); - let index_block_hash = StacksBlockId::new(&new_tip.consensus_hash, &new_block_hash); + let index_block_hash = new_tip.block_id(); // store each indexed field test_debug!("Headers index_put_begin {parent_hash}-{index_block_hash}"); @@ -2009,34 +2048,47 @@ impl NakamotoChainState { Ok(lockup_events) } + /// Begin block-processing and return all of the pre-processed state within a + /// `SetupBlockResult`. + /// + /// * Find the matured miner rewards that must be applied in this block + /// * Begin the Clarity transaction + /// * Load up the tenure's execution cost thus far + /// * Apply an epoch transition, if necessary + /// * Handle auto-unlock for PoX + /// * Process any new Stacks-on-Bitcoin transactions + /// /// Called in both follower and miner block assembly paths. + /// Arguments: + /// * chainstate_tx: transaction against the chainstate MARF + /// * clarity_instance: connection to the chainstate Clarity instance + /// * sortition_dbconn: connection to the sortition DB MARF + /// * pox_constants: PoX parameters + /// * parent_consensus_hash, parent_header_hash, parent_stacks_height, parent_burn_height: + /// pointer to the already-processed parent Stacks block + /// * burn_header_hash, burn_header_height: pointer to the Bitcoin block that identifies the + /// tenure of this block to be processed + /// * mainnet: whether or not we're in mainnet + /// * tenure_chainged: whether or not this block represents a tenure change + /// * tenure_height: the number of tenures that this block confirms /// /// Returns clarity_tx, list of receipts, microblock execution cost, /// microblock fees, microblock burns, list of microblock tx receipts, /// miner rewards tuples, the stacks epoch id, and a boolean that /// represents whether the epoch transition has been applied. pub fn setup_block<'a, 'b>( - // Transaction against the chainstate chainstate_tx: &'b mut ChainstateTx, - // Clarity connection to the chainstate clarity_instance: &'a mut ClarityInstance, - // Reference to the sortition DB sortition_dbconn: &'b dyn SortitionDBRef, - // PoX constants for the system pox_constants: &PoxConstants, - // Stacks chain tip parent_consensus_hash: ConsensusHash, parent_header_hash: BlockHeaderHash, parent_stacks_height: u64, parent_burn_height: u32, - // Burnchain block hash and height of the tenure for this Stacks block burn_header_hash: BurnchainHeaderHash, burn_header_height: u32, - // are we in mainnet or testnet? mainnet: bool, - // is this the start of a new tenure? tenure_changed: bool, - // What tenure height are we in? tenure_height: u64, ) -> Result, ChainstateError> { let parent_index_hash = StacksBlockId::new(&parent_consensus_hash, &parent_header_hash); @@ -2096,6 +2148,7 @@ impl NakamotoChainState { } }; + // TODO: only need to do this if this is a tenure-start block let (stacking_burn_ops, transfer_burn_ops, delegate_burn_ops) = StacksChainState::get_stacking_and_transfer_and_delegate_burn_ops( chainstate_tx, @@ -2559,27 +2612,24 @@ impl NakamotoChainState { 0 }; - Some( - Self::make_scheduled_miner_reward( - mainnet, - evaluated_epoch, - &parent_tenure_header.anchored_header.block_hash(), - &parent_tenure_header.consensus_hash, - &block_hash, - &block.header.consensus_hash, - next_block_height, - block - .get_coinbase_tx() - .ok_or(ChainstateError::InvalidStacksBlock( - "No coinbase transaction in tenure changing block".into(), - ))?, - parent_tenure_fees, - burnchain_commit_burn, - burnchain_sortition_burn, - total_coinbase, - ) - .expect("FATAL: parsed and processed a block without a coinbase"), - ) + Some(Self::make_scheduled_miner_reward( + mainnet, + evaluated_epoch, + &parent_tenure_header.anchored_header.block_hash(), + &parent_tenure_header.consensus_hash, + &block_hash, + &block.header.consensus_hash, + next_block_height, + block + .get_coinbase_tx() + .ok_or(ChainstateError::InvalidStacksBlock( + "No coinbase transaction in tenure changing block".into(), + ))?, + parent_tenure_fees, + burnchain_commit_burn, + burnchain_sortition_burn, + total_coinbase, + )) } else { None }; From 6a2b166b30a431ec948df8a63211cfd3c4cff0df Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Sat, 11 Nov 2023 01:11:45 -0500 Subject: [PATCH 109/122] chore: add unit tests to most of the NakamotoChainState helpers and add a codec test for nakamoto blocks --- .../src/chainstate/nakamoto/tests/mod.rs | 1153 ++++++++++------- 1 file changed, 662 insertions(+), 491 deletions(-) diff --git a/stackslib/src/chainstate/nakamoto/tests/mod.rs b/stackslib/src/chainstate/nakamoto/tests/mod.rs index e4b38bed06..5ef40e1816 100644 --- a/stackslib/src/chainstate/nakamoto/tests/mod.rs +++ b/stackslib/src/chainstate/nakamoto/tests/mod.rs @@ -19,6 +19,7 @@ use std::fs; use clarity::types::chainstate::{PoxId, SortitionId, StacksBlockId}; use clarity::vm::clarity::ClarityConnection; +use clarity::vm::costs::ExecutionCost; use clarity::vm::types::StacksAddressExtensions; use stacks_common::consts::{FIRST_BURNCHAIN_CONSENSUS_HASH, FIRST_STACKS_BLOCK_HASH}; use stacks_common::types::chainstate::StacksAddress; @@ -27,7 +28,7 @@ use stacks_common::types::chainstate::{ TrieHash, }; use stacks_common::types::{PrivateKey, StacksEpoch, StacksEpochId}; -use stacks_common::util::hash::{hex_bytes, Hash160, Sha512Trunc256Sum}; +use stacks_common::util::hash::{hex_bytes, Hash160, MerkleTree, Sha512Trunc256Sum}; use stacks_common::util::secp256k1::{MessageSignature, Secp256k1PublicKey}; use stacks_common::util::vrf::{VRFPrivateKey, VRFProof}; use stdext::prelude::Integer; @@ -47,12 +48,13 @@ use crate::chainstate::stacks::db::{ StacksHeaderInfo, }; use crate::chainstate::stacks::{ - CoinbasePayload, SchnorrThresholdSignature, StacksBlockHeader, StacksTransaction, + CoinbasePayload, SchnorrThresholdSignature, StacksBlock, StacksBlockHeader, StacksTransaction, StacksTransactionSigner, TenureChangeCause, TenureChangePayload, TokenTransferMemo, - TransactionAuth, TransactionPayload, TransactionVersion, + TransactionAnchorMode, TransactionAuth, TransactionPayload, TransactionVersion, }; use crate::core; use crate::core::StacksEpochExtension; +use crate::net::codec::test::check_codec_and_corruption; /// Get an address's account pub fn get_account( @@ -89,564 +91,733 @@ fn test_path(name: &str) -> String { pub mod node; #[test] -pub fn nakamoto_advance_tip_simple() { - let path = test_path(function_name!()); - let _r = std::fs::remove_dir_all(&path); +fn codec_nakamoto_header() { + let header = NakamotoBlockHeader { + version: 1, + chain_length: 2, + burn_spent: 3, + consensus_hash: ConsensusHash([0x04; 20]), + parent_block_id: StacksBlockId([0x05; 32]), + tx_merkle_root: Sha512Trunc256Sum([0x06; 32]), + state_index_root: TrieHash([0x07; 32]), + miner_signature: MessageSignature::empty(), + stacker_signature: MessageSignature::empty(), + }; - let burnchain_conf = get_burnchain(&path, None); + let bytes = vec![ + // version + 0x01, // chain length + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, // burn spent + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, // consensus hash + 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, + 0x04, 0x04, 0x04, 0x04, 0x04, // parent block id + 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, + 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, + 0x05, 0x05, // tx merkle root + 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, + 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, + 0x06, 0x06, // state index root + 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, + 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, + 0x07, 0x07, // miner signature + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, // stacker signature + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, + ]; - let vrf_keys: Vec<_> = (0..50).map(|_| VRFPrivateKey::new()).collect(); - let committers: Vec<_> = (0..50).map(|_| StacksPrivateKey::new()).collect(); + check_codec_and_corruption(&header, &bytes); +} - let stacker_sk = StacksPrivateKey::from_seed(&[0]); - let stacker = p2pkh_from(&stacker_sk); - let balance = 6_000_000_000 * (core::MICROSTACKS_PER_STACKS as u64); - let stacked_amt = 1_000_000_000 * (core::MICROSTACKS_PER_STACKS as u128); - let initial_balances = vec![(stacker.clone().into(), balance)]; +#[test] +pub fn test_nakamoto_first_tenure_block_syntactic_validation() { + let private_key = StacksPrivateKey::new(); + let header = NakamotoBlockHeader { + version: 1, + chain_length: 2, + burn_spent: 3, + consensus_hash: ConsensusHash([0x04; 20]), + parent_block_id: StacksBlockId([0x05; 32]), + tx_merkle_root: Sha512Trunc256Sum([0x06; 32]), + state_index_root: TrieHash([0x07; 32]), + miner_signature: MessageSignature::empty(), + stacker_signature: MessageSignature::empty(), + }; - let pox_constants = PoxConstants::mainnet_default(); + let tenure_change_payload = TransactionPayload::TenureChange(TenureChangePayload { + previous_tenure_end: header.parent_block_id.clone(), + previous_tenure_blocks: 1, + cause: TenureChangeCause::BlockFound, + pubkey_hash: Hash160([0x02; 20]), + signature: SchnorrThresholdSignature {}, + signers: vec![], + }); - setup_states_with_epochs( - &[&path], - &vrf_keys, - &committers, - None, - Some(initial_balances), - StacksEpochId::Epoch21, - Some(StacksEpoch::all(0, 0, 1000000)), - ); + let invalid_tenure_change_payload = TransactionPayload::TenureChange(TenureChangePayload { + // bad parent block ID + previous_tenure_end: StacksBlockId([0x00; 32]), + previous_tenure_blocks: 1, + cause: TenureChangeCause::BlockFound, + pubkey_hash: Hash160([0x02; 20]), + signature: SchnorrThresholdSignature {}, + signers: vec![], + }); - let mut sort_db = get_rw_sortdb(&path, None); - let tip = SortitionDB::get_canonical_burn_chain_tip(sort_db.conn()).unwrap(); + let proof_bytes = hex_bytes("9275df67a68c8745c0ff97b48201ee6db447f7c93b23ae24cdc2400f52fdb08a1a6ac7ec71bf9c9c76e96ee4675ebff60625af28718501047bfd87b810c2d2139b73c23bd69de66360953a642c2a330a").unwrap(); + let proof = VRFProof::from_bytes(&proof_bytes[..].to_vec()).unwrap(); - let b = get_burnchain(&path, None); - let burnchain = get_burnchain_db(&path, None); - let mut chainstate = get_chainstate(&path); - let chainstate_chain_id = chainstate.chain_id; - let (mut chainstate_tx, clarity_instance) = chainstate.chainstate_tx_begin().unwrap(); + let coinbase_payload = + TransactionPayload::Coinbase(CoinbasePayload([0x12; 32]), None, Some(proof.clone())); - let mut sortdb_tx = sort_db.tx_handle_begin(&tip.sortition_id).unwrap(); + // invalid coinbase payload -- needs a proof + let invalid_coinbase_payload = + TransactionPayload::Coinbase(CoinbasePayload([0x12; 32]), None, None); - let chain_tip_burn_header_hash = BurnchainHeaderHash([0; 32]); - let chain_tip_burn_header_height = 1; - let chain_tip_burn_header_timestamp = 100; + let mut tenure_change_tx = StacksTransaction::new( + TransactionVersion::Testnet, + TransactionAuth::from_p2pkh(&private_key).unwrap(), + tenure_change_payload.clone(), + ); + tenure_change_tx.chain_id = 0x80000000; + tenure_change_tx.anchor_mode = TransactionAnchorMode::OnChainOnly; + + let mut invalid_tenure_change_tx = StacksTransaction::new( + TransactionVersion::Testnet, + TransactionAuth::from_p2pkh(&private_key).unwrap(), + invalid_tenure_change_payload.clone(), + ); + invalid_tenure_change_tx.chain_id = 0x80000000; + invalid_tenure_change_tx.anchor_mode = TransactionAnchorMode::OnChainOnly; - let proof_bytes = hex_bytes("9275df67a68c8745c0ff97b48201ee6db447f7c93b23ae24cdc2400f52fdb08a1a6ac7ec71bf9c9c76e96ee4675ebff60625af28718501047bfd87b810c2d2139b73c23bd69de66360953a642c2a330a").unwrap(); - let proof = VRFProof::from_bytes(&proof_bytes).unwrap(); - let coinbase_tx_payload = - TransactionPayload::Coinbase(CoinbasePayload([0; 32]), None, Some(proof)); let mut coinbase_tx = StacksTransaction::new( TransactionVersion::Testnet, - TransactionAuth::from_p2pkh(&stacker_sk).unwrap(), - coinbase_tx_payload, - ); - coinbase_tx.chain_id = chainstate_chain_id; - let txid = coinbase_tx.txid(); - coinbase_tx.sign_next_origin(&txid, &stacker_sk).unwrap(); - - let parent_block_id = - StacksBlockId::new(&FIRST_BURNCHAIN_CONSENSUS_HASH, &FIRST_STACKS_BLOCK_HASH); - let tenure_change_tx_payload = TransactionPayload::TenureChange(TenureChangePayload { - previous_tenure_end: parent_block_id, - previous_tenure_blocks: 0, - cause: TenureChangeCause::BlockFound, - pubkey_hash: Hash160([0; 20]), - signature: SchnorrThresholdSignature {}, - signers: vec![], - }); - let mut tenure_tx = StacksTransaction::new( + TransactionAuth::from_p2pkh(&private_key).unwrap(), + coinbase_payload.clone(), + ); + coinbase_tx.chain_id = 0x80000000; + coinbase_tx.anchor_mode = TransactionAnchorMode::OnChainOnly; + + let mut invalid_coinbase_tx = StacksTransaction::new( TransactionVersion::Testnet, - TransactionAuth::from_p2pkh(&stacker_sk).unwrap(), - tenure_change_tx_payload, + TransactionAuth::from_p2pkh(&private_key).unwrap(), + invalid_coinbase_payload.clone(), ); - tenure_tx.chain_id = chainstate_chain_id; - tenure_tx.set_origin_nonce(1); - let txid = tenure_tx.txid(); - let mut tenure_tx_signer = StacksTransactionSigner::new(&tenure_tx); - tenure_tx_signer.sign_origin(&stacker_sk).unwrap(); - let tenure_tx = tenure_tx_signer.get_tx().unwrap(); + invalid_coinbase_tx.chain_id = 0x80000000; + invalid_coinbase_tx.anchor_mode = TransactionAnchorMode::OnChainOnly; + // no tenure change if the block doesn't have a tenure change let block = NakamotoBlock { - header: NakamotoBlockHeader { - version: 100, - chain_length: 1, - burn_spent: 5, - tx_merkle_root: Sha512Trunc256Sum([0; 32]), - state_index_root: TrieHash::from_hex( - "9f283c59142dec747911897fc120f1d2af8c0384830a95e1847803ee31a70ab1", - ) - .unwrap(), - stacker_signature: MessageSignature([0; 65]), - miner_signature: MessageSignature([0; 65]), - consensus_hash: tip.consensus_hash.clone(), - parent_block_id: parent_block_id.clone(), - }, - txs: vec![coinbase_tx, tenure_tx], + header: header.clone(), + txs: vec![], }; - let block_size = 10; - let burnchain_commit_burn = 1; - let burnchain_sortition_burn = 5; - let parent_chain_tip = StacksHeaderInfo { - anchored_header: StacksBlockHeader { - version: 100, - total_work: StacksWorkScore::genesis(), - proof: VRFProof::empty(), - parent_block: BlockHeaderHash([0; 32]), - parent_microblock: BlockHeaderHash([0; 32]), - parent_microblock_sequence: 0, - tx_merkle_root: Sha512Trunc256Sum([0; 32]), - state_index_root: TrieHash([0; 32]), - microblock_pubkey_hash: Hash160([1; 20]), - } - .into(), - microblock_tail: None, - stacks_block_height: 0, - index_root: TrieHash([0; 32]), - consensus_hash: FIRST_BURNCHAIN_CONSENSUS_HASH.clone(), - burn_header_hash: tip.burn_header_hash.clone(), - burn_header_height: 2, - burn_header_timestamp: 50, - anchored_block_size: 10, + assert_eq!(block.is_wellformed_first_tenure_block(), None); + assert_eq!(block.tenure_changed(), None); + assert_eq!(block.get_coinbase_tx(), None); + assert_eq!(block.get_vrf_proof(), None); + assert_eq!( + block.validate_transactions_static(false, 0x80000000, StacksEpochId::Epoch30), + false + ); // empty blocks not allowed + + // syntactically invalid block if there's a tenure change but no coinbase + let block = NakamotoBlock { + header: header.clone(), + txs: vec![tenure_change_tx.clone()], }; + assert_eq!(block.is_wellformed_first_tenure_block(), Some(false)); + assert_eq!(block.tenure_changed(), Some(false)); + assert_eq!(block.get_coinbase_tx(), None); + assert_eq!(block.get_vrf_proof(), None); + assert_eq!( + block.validate_transactions_static(false, 0x80000000, StacksEpochId::Epoch30), + false + ); - NakamotoChainState::append_block( - &mut chainstate_tx, - clarity_instance, - &mut sortdb_tx, - &pox_constants, - &parent_chain_tip, - &chain_tip_burn_header_hash, - chain_tip_burn_header_height, - chain_tip_burn_header_timestamp, - &block, - block_size, - burnchain_commit_burn, - burnchain_sortition_burn, - ) - .unwrap(); -} + // syntactically invalid block if there's a coinbase but not tenure change + let block = NakamotoBlock { + header: header.clone(), + txs: vec![coinbase_tx.clone()], + }; + assert_eq!(block.is_wellformed_first_tenure_block(), Some(false)); + assert_eq!(block.tenure_changed(), Some(false)); + assert_eq!(block.get_coinbase_tx(), None); + assert_eq!(block.get_vrf_proof(), None); + assert_eq!( + block.validate_transactions_static(false, 0x80000000, StacksEpochId::Epoch30), + false + ); -#[test] -pub fn staging_blocks() { - let path = test_path(function_name!()); - let _r = std::fs::remove_dir_all(&path); + // syntactically invalid block if there's a coinbase and tenure change, but the coinbase is + // missing a proof + let block = NakamotoBlock { + header: header.clone(), + txs: vec![tenure_change_tx.clone(), invalid_coinbase_tx.clone()], + }; + assert_eq!(block.is_wellformed_first_tenure_block(), Some(false)); + assert_eq!(block.tenure_changed(), Some(false)); + assert_eq!(block.get_coinbase_tx(), None); + assert_eq!(block.get_vrf_proof(), None); + assert_eq!( + block.validate_transactions_static(false, 0x80000000, StacksEpochId::Epoch30), + false + ); - let burnchain_conf = get_burnchain(&path, None); + // syntactically invalid block if there is more than one coinbase transaction + let block = NakamotoBlock { + header: header.clone(), + txs: vec![ + tenure_change_tx.clone(), + coinbase_tx.clone(), + coinbase_tx.clone(), + ], + }; + assert_eq!(block.is_wellformed_first_tenure_block(), Some(false)); + assert_eq!(block.tenure_changed(), Some(false)); + assert_eq!(block.get_coinbase_tx(), None); + assert_eq!(block.get_vrf_proof(), None); + assert_eq!( + block.validate_transactions_static(false, 0x80000000, StacksEpochId::Epoch30), + false + ); - let vrf_keys: Vec<_> = (0..50).map(|_| VRFPrivateKey::new()).collect(); - let committers: Vec<_> = (0..50).map(|_| StacksPrivateKey::new()).collect(); + // syntactically invalid block if the coinbase comes before a tenure change + let block = NakamotoBlock { + header: header.clone(), + txs: vec![coinbase_tx.clone(), tenure_change_tx.clone()], + }; + assert_eq!(block.is_wellformed_first_tenure_block(), Some(false)); + assert_eq!(block.tenure_changed(), Some(false)); + assert_eq!(block.get_coinbase_tx(), None); + assert_eq!(block.get_vrf_proof(), None); + assert_eq!( + block.validate_transactions_static(false, 0x80000000, StacksEpochId::Epoch30), + false + ); - let miner_sks: Vec<_> = (0..10).map(|i| StacksPrivateKey::from_seed(&[i])).collect(); + // syntactically invalid block if there is a tenure change after the coinbase + let block = NakamotoBlock { + header: header.clone(), + txs: vec![ + tenure_change_tx.clone(), + coinbase_tx.clone(), + tenure_change_tx.clone(), + ], + }; + assert_eq!(block.is_wellformed_first_tenure_block(), Some(false)); + assert_eq!(block.tenure_changed(), Some(false)); + assert_eq!(block.get_coinbase_tx(), None); + assert_eq!(block.get_vrf_proof(), None); + assert_eq!( + block.validate_transactions_static(false, 0x80000000, StacksEpochId::Epoch30), + false + ); - let transacter_sk = StacksPrivateKey::from_seed(&[1]); - let transacter = p2pkh_from(&transacter_sk); + // syntatically invalid block if there's an invalid tenure change + let block = NakamotoBlock { + header: header.clone(), + txs: vec![ + tenure_change_tx.clone(), + invalid_tenure_change_tx.clone(), + coinbase_tx.clone(), + ], + }; + assert_eq!(block.is_wellformed_first_tenure_block(), Some(true)); + assert_eq!(block.tenure_changed(), Some(false)); + assert_eq!(block.get_coinbase_tx(), Some(&coinbase_tx)); + assert_eq!(block.get_vrf_proof(), Some(&proof)); + assert_eq!( + block.validate_transactions_static(false, 0x80000000, StacksEpochId::Epoch30), + false + ); - let recipient_sk = StacksPrivateKey::from_seed(&[2]); - let recipient = p2pkh_from(&recipient_sk); + // syntactically valid only if we have syntactically valid tenure changes and a syntactically + // valid coinbase + let block = NakamotoBlock { + header: header.clone(), + txs: vec![tenure_change_tx.clone(), coinbase_tx.clone()], + }; + assert_eq!(block.is_wellformed_first_tenure_block(), Some(true)); + assert_eq!(block.tenure_changed(), Some(true)); + assert_eq!(block.get_coinbase_tx(), Some(&coinbase_tx)); + assert_eq!(block.get_vrf_proof(), Some(&proof)); + assert_eq!( + block.validate_transactions_static(false, 0x80000000, StacksEpochId::Epoch30), + true + ); - let initial_balances = vec![(transacter.clone().into(), 100000)]; - let transacter_fee = 1000; - let transacter_send = 250; + // can have multiple valid tenure changes (but note that this block is syntactically invalid + // because duplicate txs are not allowed) + let block = NakamotoBlock { + header: header.clone(), + txs: vec![ + tenure_change_tx.clone(), + tenure_change_tx.clone(), + coinbase_tx.clone(), + ], + }; + assert_eq!(block.is_wellformed_first_tenure_block(), Some(true)); + assert_eq!(block.tenure_changed(), Some(true)); + assert_eq!(block.get_coinbase_tx(), Some(&coinbase_tx)); + assert_eq!(block.get_vrf_proof(), Some(&proof)); + assert_eq!( + block.validate_transactions_static(false, 0x80000000, StacksEpochId::Epoch30), + false + ); // duplicate transaction +} - let pox_constants = PoxConstants::mainnet_default(); +#[test] +pub fn test_load_store_update_nakamoto_blocks() { + let test_name = function_name!(); + let path = test_path(&test_name); + let pox_constants = PoxConstants::new(5, 3, 3, 25, 5, 0, 0, 0, 0, 0, 0, 0); + let epochs = StacksEpoch::unit_test_3_0_only(1); + let _ = std::fs::remove_dir_all(&path); + let burnchain_conf = get_burnchain(&path, Some(pox_constants.clone())); setup_states_with_epochs( &[&path], - &vrf_keys, - &committers, + &[], + &[], + Some(pox_constants.clone()), None, - Some(initial_balances), - StacksEpochId::Epoch21, - Some(StacksEpoch::all(0, 0, 1000000)), - ); - - let mut sort_db = get_rw_sortdb(&path, None); - - for i in 1..6u8 { - let parent_snapshot = SortitionDB::get_canonical_burn_chain_tip(sort_db.conn()).unwrap(); - let miner_pk = Secp256k1PublicKey::from_private(&miner_sks[usize::from(i)]); - let miner_pk_hash = Hash160::from_node_public_key(&miner_pk); - eprintln!("Advance sortition: {i}. Miner PK = {miner_pk:?}"); - let new_bhh = BurnchainHeaderHash([i; 32]); - let new_ch = ConsensusHash([i; 20]); - let new_sh = SortitionHash([1; 32]); - - let new_snapshot = BlockSnapshot { - block_height: parent_snapshot.block_height + 1, - burn_header_timestamp: 100 * u64::from(i), - burn_header_hash: new_bhh.clone(), - parent_burn_header_hash: parent_snapshot.burn_header_hash.clone(), - consensus_hash: new_ch.clone(), - ops_hash: OpsHash([0; 32]), - total_burn: 10, - sortition: true, - sortition_hash: new_sh, - winning_block_txid: Txid([0; 32]), - winning_stacks_block_hash: BlockHeaderHash([0; 32]), - index_root: TrieHash([0; 32]), - num_sortitions: parent_snapshot.num_sortitions + 1, - stacks_block_accepted: true, - stacks_block_height: 1, - arrival_index: i.into(), - canonical_stacks_tip_height: i.into(), - canonical_stacks_tip_hash: BlockHeaderHash([0; 32]), - canonical_stacks_tip_consensus_hash: new_ch.clone(), - sortition_id: SortitionId::new(&new_bhh.clone(), &PoxId::new(vec![true])), - parent_sortition_id: parent_snapshot.sortition_id.clone(), - pox_valid: true, - accumulated_coinbase_ustx: 0, - miner_pk_hash: Some(miner_pk_hash), - }; - - let mut sortdb_tx = sort_db - .tx_handle_begin(&parent_snapshot.sortition_id) - .unwrap(); - - sortdb_tx - .append_chain_tip_snapshot( - &parent_snapshot, - &new_snapshot, - &vec![], - &vec![], - None, - None, - None, - ) - .unwrap(); + StacksEpochId::Epoch30, + Some(epochs), + ); - sortdb_tx.commit().unwrap(); - } + let private_key = StacksPrivateKey::new(); + let epoch2_proof_bytes = hex_bytes("9275df67a68c8745c0ff97b48201ee6db447f7c93b23ae24cdc2400f52fdb08a1a6ac7ec71bf9c9c76e96ee4675ebff60625af28718501047bfd87b810c2d2139b73c23bd69de66360953a642c2a330a").unwrap(); + let epoch2_proof = VRFProof::from_bytes(&epoch2_proof_bytes[..].to_vec()).unwrap(); - let mut chainstate = get_chainstate(&path); + let nakamoto_proof_bytes = hex_bytes("973c815ac3e81a4aff3243f3d8310d24ab9783acd6caa4dcfab20a3744584b2f966acf08140e1a7e1e685695d51b1b511f4f19260a21887244a6c47f7637b8bdeaf5eafe85c1975bab75bc0668fe8a0b").unwrap(); + let nakamoto_proof = VRFProof::from_bytes(&nakamoto_proof_bytes[..].to_vec()).unwrap(); - let mut block = NakamotoBlock { - header: NakamotoBlockHeader { - version: 100, - chain_length: 1, - burn_spent: 10, - tx_merkle_root: Sha512Trunc256Sum([0; 32]), - state_index_root: TrieHash([0; 32]), - stacker_signature: MessageSignature([0; 65]), - miner_signature: MessageSignature([0; 65]), - consensus_hash: ConsensusHash([2; 20]), - parent_block_id: StacksBlockId([1; 32]), - }, - txs: vec![], - }; + let coinbase_payload = TransactionPayload::Coinbase( + CoinbasePayload([0x12; 32]), + None, + Some(nakamoto_proof.clone()), + ); - let miner_signature = miner_sks[4] - .sign(block.header.signature_hash().unwrap().as_bytes()) - .unwrap(); + let mut coinbase_tx = StacksTransaction::new( + TransactionVersion::Testnet, + TransactionAuth::from_p2pkh(&private_key).unwrap(), + coinbase_payload.clone(), + ); + coinbase_tx.chain_id = 0x80000000; + coinbase_tx.anchor_mode = TransactionAnchorMode::OnChainOnly; - block.header.miner_signature = miner_signature; + let epoch2_txs = vec![coinbase_tx.clone()]; + let epoch2_tx_merkle_root = { + let txid_vecs = epoch2_txs + .iter() + .map(|tx| tx.txid().as_bytes().to_vec()) + .collect(); - let config = chainstate.config(); - let (chainstate_tx, _clarity_instance) = chainstate.chainstate_tx_begin().unwrap(); - let sortdb_conn = sort_db.index_handle_at_tip(); + MerkleTree::::new(&txid_vecs).root() + }; - NakamotoChainState::accept_block(&config, block.clone(), &sortdb_conn, &chainstate_tx).unwrap(); + let epoch2_header = StacksBlockHeader { + version: 0, + total_work: StacksWorkScore { + burn: 123, + work: 456, + }, + proof: epoch2_proof.clone(), + parent_block: BlockHeaderHash([0x11; 32]), + parent_microblock: BlockHeaderHash([0x00; 32]), + parent_microblock_sequence: 0, + tx_merkle_root: epoch2_tx_merkle_root, + state_index_root: TrieHash([0x55; 32]), + microblock_pubkey_hash: Hash160([0x66; 20]), + }; + let epoch2_consensus_hash = ConsensusHash([0x03; 20]); + let epoch2_parent_block_id = + StacksBlockId::new(&epoch2_consensus_hash, &epoch2_header.block_hash()); - chainstate_tx.commit().unwrap(); + let epoch2_header_info = StacksHeaderInfo { + anchored_header: StacksBlockHeaderTypes::Epoch2(epoch2_header.clone()), + microblock_tail: None, + stacks_block_height: epoch2_header.total_work.work, + index_root: TrieHash([0x56; 32]), + consensus_hash: epoch2_consensus_hash.clone(), + burn_header_hash: BurnchainHeaderHash([0x77; 32]), + burn_header_height: 100, + burn_header_timestamp: 1000, + anchored_block_size: 12345, + }; - let (chainstate_tx, _clarity_instance) = chainstate.chainstate_tx_begin().unwrap(); - let sortdb_conn = sort_db.index_handle_at_tip(); + let epoch2_execution_cost = ExecutionCost { + write_length: 100, + write_count: 101, + read_length: 102, + read_count: 103, + runtime: 104, + }; - assert!( - NakamotoChainState::next_ready_nakamoto_block(&chainstate_tx) - .unwrap() - .is_none(), - "No block should be ready yet", + let tenure_change_payload = TransactionPayload::TenureChange(TenureChangePayload { + previous_tenure_end: epoch2_parent_block_id.clone(), + previous_tenure_blocks: 1, + cause: TenureChangeCause::BlockFound, + pubkey_hash: Hash160([0x02; 20]), + signature: SchnorrThresholdSignature {}, + signers: vec![], + }); + let mut tenure_change_tx = StacksTransaction::new( + TransactionVersion::Testnet, + TransactionAuth::from_p2pkh(&private_key).unwrap(), + tenure_change_payload.clone(), ); + tenure_change_tx.chain_id = 0x80000000; + tenure_change_tx.anchor_mode = TransactionAnchorMode::OnChainOnly; - let block_parent_id = block.header.parent_block_id.clone(); - NakamotoChainState::set_block_processed(&chainstate_tx, &block_parent_id).unwrap(); - - // block should be ready -- the burn view was processed before the block was inserted. - let ready_block = NakamotoChainState::next_ready_nakamoto_block(&chainstate_tx) - .unwrap() - .unwrap() - .0; - - assert_eq!(ready_block.header.block_hash(), block.header.block_hash()); + let nakamoto_txs = vec![tenure_change_tx.clone(), coinbase_tx.clone()]; + let nakamoto_tx_merkle_root = { + let txid_vecs = nakamoto_txs + .iter() + .map(|tx| tx.txid().as_bytes().to_vec()) + .collect(); - chainstate_tx.commit().unwrap(); -} + MerkleTree::::new(&txid_vecs).root() + }; -// Assemble 5 nakamoto blocks, invoking append_block. Check that miner rewards -// mature as expected. -#[test] -pub fn nakamoto_advance_tip_multiple() { - let path = test_path(function_name!()); - let _r = std::fs::remove_dir_all(&path); + let nakamoto_header = NakamotoBlockHeader { + version: 1, + chain_length: 457, + burn_spent: 126, + consensus_hash: ConsensusHash([0x04; 20]), + parent_block_id: epoch2_parent_block_id.clone(), + tx_merkle_root: nakamoto_tx_merkle_root, + state_index_root: TrieHash([0x07; 32]), + miner_signature: MessageSignature::empty(), + stacker_signature: MessageSignature::empty(), + }; - let burnchain_conf = get_burnchain(&path, None); + let nakamoto_header_info = StacksHeaderInfo { + anchored_header: StacksBlockHeaderTypes::Nakamoto(nakamoto_header.clone()), + microblock_tail: None, + stacks_block_height: nakamoto_header.chain_length, + index_root: TrieHash([0x67; 32]), + consensus_hash: nakamoto_header.consensus_hash.clone(), + burn_header_hash: BurnchainHeaderHash([0x88; 32]), + burn_header_height: 200, + burn_header_timestamp: 1001, + anchored_block_size: 123, + }; - let vrf_keys: Vec<_> = (0..50).map(|_| VRFPrivateKey::new()).collect(); - let committers: Vec<_> = (0..50).map(|_| StacksPrivateKey::new()).collect(); + let nakamoto_execution_cost = ExecutionCost { + write_length: 200, + write_count: 201, + read_length: 202, + read_count: 203, + runtime: 204, + }; - let miner_sk = StacksPrivateKey::from_seed(&[0]); - let miner = p2pkh_from(&miner_sk); + let total_nakamoto_execution_cost = ExecutionCost { + write_length: 400, + write_count: 401, + read_length: 402, + read_count: 403, + runtime: 404, + }; - let transacter_sk = StacksPrivateKey::from_seed(&[1]); - let transacter = p2pkh_from(&transacter_sk); + let epoch2_block = StacksBlock { + header: epoch2_header.clone(), + txs: epoch2_txs, + }; - let recipient_sk = StacksPrivateKey::from_seed(&[2]); - let recipient = p2pkh_from(&recipient_sk); + let nakamoto_block = NakamotoBlock { + header: nakamoto_header.clone(), + txs: nakamoto_txs, + }; - let initial_balances = vec![ - (miner.clone().into(), 0), - (transacter.clone().into(), 100000), - ]; - let transacter_fee = 1000; - let transacter_send = 250; + let mut chainstate = get_chainstate(&path); - let pox_constants = PoxConstants::mainnet_default(); + // store epoch2 and nakamoto headers + { + let tx = chainstate.db_tx_begin().unwrap(); + StacksChainState::insert_stacks_block_header( + &tx, + &epoch2_parent_block_id, + &epoch2_header_info, + &epoch2_execution_cost, + 1, + ) + .unwrap(); + NakamotoChainState::insert_stacks_block_header( + &tx, + &nakamoto_header_info, + &nakamoto_header, + Some(&nakamoto_proof), + &nakamoto_execution_cost, + &total_nakamoto_execution_cost, + epoch2_header_info.anchored_header.height() + 1, + true, + 300, + ) + .unwrap(); + NakamotoChainState::store_block(&tx, nakamoto_block.clone(), false, false).unwrap(); + tx.commit().unwrap(); + } - setup_states_with_epochs( - &[&path], - &vrf_keys, - &committers, - None, - Some(initial_balances), - StacksEpochId::Epoch21, - Some(StacksEpoch::all(0, 0, 1000000)), + // can load Nakamoto block, but only the Nakamoto block + assert_eq!( + NakamotoChainState::load_nakamoto_block( + chainstate.db(), + &nakamoto_header.consensus_hash, + &nakamoto_header.block_hash() + ) + .unwrap() + .unwrap(), + nakamoto_block + ); + assert_eq!( + NakamotoChainState::load_nakamoto_block( + chainstate.db(), + &epoch2_header_info.consensus_hash, + &epoch2_header.block_hash() + ) + .unwrap(), + None ); - let mut sort_db = get_rw_sortdb(&path, None); - - let b = get_burnchain(&path, None); - let burnchain = get_burnchain_db(&path, None); - let mut chainstate = get_chainstate(&path); - let chainstate_chain_id = chainstate.chain_id; - - let mut last_block: Option = None; - let index_roots = [ - "c76d48e971b2ea3c78c476486455090da37df260a41eef355d4e9330faf166c0", - "20185974f1ab02d25c6920d755594ff9c104f70ae185aa8c112245eaef0078fd", - "a079309c45f5cb70be6f67cd442d50ba6c2154d77b819321a63e4ed077e46e59", - "1679af6d97e102a5762e88a876e74c0083ffb492f98bde249a36a6f53b81a2ad", - "5c989f8cbdfe054b3b8c1c2a4667e97d4f43b2eef6caffe569a61598e1492b04", - ]; - - for i in 1..6 { - eprintln!("Advance tip: {}", i); - let parent_snapshot = SortitionDB::get_canonical_burn_chain_tip(sort_db.conn()).unwrap(); - - let (mut chainstate_tx, clarity_instance) = chainstate.chainstate_tx_begin().unwrap(); - let mut sortdb_tx = sort_db - .tx_handle_begin(&parent_snapshot.sortition_id) - .unwrap(); + // nakamoto block should not be processed yet + assert_eq!( + NakamotoChainState::get_nakamoto_block_status( + chainstate.db(), + &nakamoto_header.consensus_hash, + &nakamoto_header.block_hash() + ) + .unwrap() + .unwrap(), + (false, false) + ); + assert_eq!( + NakamotoChainState::get_nakamoto_block_status( + chainstate.db(), + &epoch2_header_info.consensus_hash, + &epoch2_header.block_hash() + ) + .unwrap(), + None + ); - let parent = match last_block.as_ref() { - Some(x) => x.header.block_hash(), - None => FIRST_STACKS_BLOCK_HASH, - }; - - let parent_header: StacksBlockHeaderTypes = match last_block.clone() { - Some(x) => x.header.into(), - None => StacksBlockHeader { - version: 100, - total_work: StacksWorkScore::genesis(), - proof: VRFProof::empty(), - parent_block: BlockHeaderHash([0; 32]), - parent_microblock: BlockHeaderHash([0; 32]), - parent_microblock_sequence: 0, - tx_merkle_root: Sha512Trunc256Sum([0; 32]), - state_index_root: TrieHash([0; 32]), - microblock_pubkey_hash: Hash160([1; 20]), - } - .into(), - }; - - let proof_bytes = hex_bytes("9275df67a68c8745c0ff97b48201ee6db447f7c93b23ae24cdc2400f52fdb08a1a6ac7ec71bf9c9c76e96ee4675ebff60625af28718501047bfd87b810c2d2139b73c23bd69de66360953a642c2a330a").unwrap(); - let proof = VRFProof::from_bytes(&proof_bytes).unwrap(); - let coinbase_tx_payload = - TransactionPayload::Coinbase(CoinbasePayload([i; 32]), None, Some(proof)); - let mut coinbase_tx = StacksTransaction::new( - TransactionVersion::Testnet, - TransactionAuth::from_p2pkh(&miner_sk).unwrap(), - coinbase_tx_payload, - ); - coinbase_tx.chain_id = chainstate_chain_id; - coinbase_tx.set_origin_nonce((i - 1).into()); - let mut coinbase_tx_signer = StacksTransactionSigner::new(&coinbase_tx); - coinbase_tx_signer.sign_origin(&miner_sk).unwrap(); - let coinbase_tx = coinbase_tx_signer.get_tx().unwrap(); - - let transacter_tx_payload = TransactionPayload::TokenTransfer( - recipient.clone().into(), - transacter_send, - TokenTransferMemo([0; 34]), - ); - let mut transacter_tx = StacksTransaction::new( - TransactionVersion::Testnet, - TransactionAuth::from_p2pkh(&transacter_sk).unwrap(), - transacter_tx_payload, + // set nakamoto block processed + { + let tx = chainstate.db_tx_begin().unwrap(); + NakamotoChainState::set_block_processed(&tx, &nakamoto_header.block_id()).unwrap(); + assert_eq!( + NakamotoChainState::get_nakamoto_block_status( + &tx, + &nakamoto_header.consensus_hash, + &nakamoto_header.block_hash() + ) + .unwrap() + .unwrap(), + (true, false) ); - transacter_tx.chain_id = chainstate_chain_id; - transacter_tx.set_tx_fee(transacter_fee); - transacter_tx.set_origin_nonce((2 * (i - 1)).into()); - let mut transacter_tx_signer = StacksTransactionSigner::new(&transacter_tx); - transacter_tx_signer.sign_origin(&transacter_sk).unwrap(); - let transacter_tx = transacter_tx_signer.get_tx().unwrap(); - - let new_bhh = BurnchainHeaderHash([i; 32]); - let new_ch = ConsensusHash([i; 20]); - let new_sh = SortitionHash([1; 32]); - - let parent_block_id = StacksBlockId::new(&parent_snapshot.consensus_hash, &parent); - let tenure_change_tx_payload = TransactionPayload::TenureChange(TenureChangePayload { - previous_tenure_end: parent_block_id, - previous_tenure_blocks: 1, - cause: TenureChangeCause::BlockFound, - pubkey_hash: Hash160([0; 20]), - signature: SchnorrThresholdSignature {}, - signers: vec![], - }); - let mut tenure_tx = StacksTransaction::new( - TransactionVersion::Testnet, - TransactionAuth::from_p2pkh(&transacter_sk).unwrap(), - tenure_change_tx_payload, + } + // set nakamoto block orphaned + { + let tx = chainstate.db_tx_begin().unwrap(); + NakamotoChainState::set_block_orphaned(&tx, &nakamoto_header.block_id()).unwrap(); + assert_eq!( + NakamotoChainState::get_nakamoto_block_status( + &tx, + &nakamoto_header.consensus_hash, + &nakamoto_header.block_hash() + ) + .unwrap() + .unwrap(), + (true, true) ); - tenure_tx.chain_id = chainstate_chain_id; - tenure_tx.set_origin_nonce((2 * (i - 1) + 1).into()); - let txid = tenure_tx.txid(); - let mut tenure_tx_signer = StacksTransactionSigner::new(&tenure_tx); - tenure_tx_signer.sign_origin(&transacter_sk).unwrap(); - let tenure_tx = tenure_tx_signer.get_tx().unwrap(); - - let block = NakamotoBlock { - header: NakamotoBlockHeader { - version: 100, - chain_length: i.into(), - burn_spent: 10, - tx_merkle_root: Sha512Trunc256Sum([0; 32]), - state_index_root: TrieHash::from_hex(&index_roots[usize::from(i) - 1]).unwrap(), - stacker_signature: MessageSignature([0; 65]), - miner_signature: MessageSignature([0; 65]), - consensus_hash: new_ch, - parent_block_id: StacksBlockId::new(&parent_snapshot.consensus_hash, &parent), - }, - txs: vec![coinbase_tx, transacter_tx, tenure_tx], - }; - - let new_snapshot = BlockSnapshot { - block_height: parent_snapshot.block_height + 1, - burn_header_timestamp: 100 * u64::from(i), - burn_header_hash: new_bhh.clone(), - parent_burn_header_hash: parent_snapshot.burn_header_hash.clone(), - consensus_hash: new_ch.clone(), - ops_hash: OpsHash([0; 32]), - total_burn: 10, - sortition: true, - sortition_hash: new_sh, - winning_block_txid: Txid([0; 32]), - winning_stacks_block_hash: block.header.block_hash(), - index_root: block.header.state_index_root, - num_sortitions: parent_snapshot.num_sortitions + 1, - stacks_block_accepted: true, - stacks_block_height: block.header.chain_length, - arrival_index: i.into(), - canonical_stacks_tip_height: i.into(), - canonical_stacks_tip_hash: block.header.block_hash(), - canonical_stacks_tip_consensus_hash: new_ch.clone(), - sortition_id: SortitionId::new(&new_bhh.clone(), &PoxId::new(vec![true])), - parent_sortition_id: parent_snapshot.sortition_id.clone(), - pox_valid: true, - accumulated_coinbase_ustx: 0, - miner_pk_hash: None, - }; - - sortdb_tx - .append_chain_tip_snapshot( - &parent_snapshot, - &new_snapshot, - &vec![], - &vec![], - None, - None, - None, + } + // orphan nakamoto block by parent + { + let tx = chainstate.db_tx_begin().unwrap(); + NakamotoChainState::set_block_orphaned(&tx, &nakamoto_header.parent_block_id).unwrap(); + assert_eq!( + NakamotoChainState::get_nakamoto_block_status( + &tx, + &nakamoto_header.consensus_hash, + &nakamoto_header.block_hash() ) - .unwrap(); + .unwrap() + .unwrap(), + (false, true) + ); + } - sortdb_tx.commit().unwrap(); - let mut sortdb_tx = sort_db.tx_handle_begin(&new_snapshot.sortition_id).unwrap(); - - let chain_tip_burn_header_hash = new_snapshot.burn_header_hash.clone(); - let chain_tip_burn_header_height = new_snapshot.block_height; - let chain_tip_burn_header_timestamp = new_snapshot.burn_header_timestamp; - - let block_size = 10; - let burnchain_commit_burn = 1; - let burnchain_sortition_burn = 10; - let parent_chain_tip = StacksHeaderInfo { - anchored_header: parent_header.clone(), - microblock_tail: None, - stacks_block_height: parent_header.height(), - index_root: parent_snapshot.index_root.clone(), - consensus_hash: parent_snapshot.consensus_hash.clone(), - burn_header_hash: parent_snapshot.burn_header_hash.clone(), - burn_header_height: parent_snapshot.block_height.try_into().unwrap(), - burn_header_timestamp: parent_snapshot.burn_header_timestamp, - anchored_block_size: 10, - }; - - let (_receipt, clarity_tx) = NakamotoChainState::append_block( - &mut chainstate_tx, - clarity_instance, - &mut sortdb_tx, - &pox_constants, - &parent_chain_tip, - &chain_tip_burn_header_hash, - chain_tip_burn_header_height.try_into().unwrap(), - chain_tip_burn_header_timestamp, - &block, - block_size, - burnchain_commit_burn, - burnchain_sortition_burn, + // only one nakamoto block in this tenure, so it's both the start and finish + assert_eq!( + NakamotoChainState::get_nakamoto_tenure_start_block_header( + chainstate.db(), + &nakamoto_header.consensus_hash ) - .unwrap(); + .unwrap() + .unwrap(), + nakamoto_header_info + ); + assert_eq!( + NakamotoChainState::get_nakamoto_tenure_finish_block_header( + chainstate.db(), + &nakamoto_header.consensus_hash + ) + .unwrap() + .unwrap(), + nakamoto_header_info + ); - clarity_tx.commit(); - chainstate_tx.commit().unwrap(); + // can query the tenure-start and epoch2 headers by consensus hash + assert_eq!( + NakamotoChainState::get_block_header_by_consensus_hash( + chainstate.db(), + &nakamoto_header.consensus_hash + ) + .unwrap() + .unwrap(), + nakamoto_header_info + ); + assert_eq!( + NakamotoChainState::get_block_header_by_consensus_hash( + chainstate.db(), + &epoch2_consensus_hash + ) + .unwrap() + .unwrap(), + epoch2_header_info + ); - last_block = Some(block); - } + // can query the tenure-start and epoch2 headers by block ID + assert_eq!( + NakamotoChainState::get_block_header(chainstate.db(), &nakamoto_header.block_id()) + .unwrap() + .unwrap(), + nakamoto_header_info + ); + assert_eq!( + NakamotoChainState::get_block_header( + chainstate.db(), + &epoch2_header_info.index_block_hash() + ) + .unwrap() + .unwrap(), + epoch2_header_info + ); - // we've produced 5 simulated blocks now (1, 2, 3, 4, and 5) - // - // rewards from block 1 should mature 2 tenures later in block 3. - // however, due to the way `find_mature_miner_rewards` works, in - // the current setup block 1's reward is missed: - // `find_mature_miner_rewards` checks the *parent* of the current - // block (i.e., the block that block 1's reward mature's in) for - // `<= MINER_REWARD_MATURITY`. - // this means that for these unit tests, blocks 2 and 3 will have rewards - // processed at blocks 4 and 5 - // - // in nakamoto, tx fees are rewarded by the next tenure, so the - // scheduled rewards come 1 tenure after the coinbase reward matures - for i in 1..6 { - let ch = ConsensusHash([i; 20]); - let bh = SortitionDB::get_block_snapshot_consensus(sort_db.conn(), &ch) + // can get tenure height of nakamoto blocks and epoch2 blocks + assert_eq!( + NakamotoChainState::get_tenure_height(chainstate.db(), &nakamoto_header.block_id()) .unwrap() + .unwrap(), + epoch2_header_info.anchored_header.height() + 1 + ); + assert_eq!( + NakamotoChainState::get_tenure_height( + chainstate.db(), + &epoch2_header_info.index_block_hash() + ) + .unwrap() + .unwrap(), + epoch2_header_info.anchored_header.height() + ); + + // can get total tenure cost for nakamoto blocks, but not epoch2 blocks + assert_eq!( + NakamotoChainState::get_total_tenure_cost_at(chainstate.db(), &nakamoto_header.block_id()) .unwrap() - .winning_stacks_block_hash; - let block_id = StacksBlockId::new(&ch, &bh); + .unwrap(), + total_nakamoto_execution_cost + ); + assert_eq!( + NakamotoChainState::get_total_tenure_cost_at( + chainstate.db(), + &epoch2_header_info.index_block_hash() + ) + .unwrap(), + None + ); - let (chainstate_tx, clarity_instance) = chainstate.chainstate_tx_begin().unwrap(); - let sort_db_tx = sort_db.tx_begin_at_tip(); + // can get total tenure tx fees for nakamoto blocks, but not in epoch2 blocks + assert_eq!( + NakamotoChainState::get_total_tenure_tx_fees_at( + chainstate.db(), + &nakamoto_header.block_id() + ) + .unwrap() + .unwrap(), + 300 + ); + assert_eq!( + NakamotoChainState::get_total_tenure_tx_fees_at( + chainstate.db(), + &epoch2_header_info.index_block_hash() + ) + .unwrap(), + None + ); - let stx_balance = clarity_instance - .read_only_connection(&block_id, &chainstate_tx, &sort_db_tx) - .with_clarity_db_readonly(|db| db.get_account_stx_balance(&miner.clone().into())); + // can get block VRF proof for both nakamoto and epoch2 blocks + assert_eq!( + NakamotoChainState::get_block_vrf_proof(chainstate.db(), &nakamoto_header.consensus_hash) + .unwrap() + .unwrap(), + nakamoto_proof + ); + assert_eq!( + NakamotoChainState::get_block_vrf_proof(chainstate.db(), &epoch2_consensus_hash) + .unwrap() + .unwrap(), + epoch2_proof + ); + + // can get nakamoto VRF proof only for nakamoto blocks + assert_eq!( + NakamotoChainState::get_nakamoto_tenure_vrf_proof( + chainstate.db(), + &nakamoto_header.consensus_hash + ) + .unwrap() + .unwrap(), + nakamoto_proof + ); + assert_eq!( + NakamotoChainState::get_nakamoto_tenure_vrf_proof(chainstate.db(), &epoch2_consensus_hash) + .unwrap(), + None + ); + + // next ready nakamoto block is None unless both the burn block and stacks parent block have + // been processed + { + let tx = chainstate.db_tx_begin().unwrap(); + assert_eq!( + NakamotoChainState::next_ready_nakamoto_block(&tx).unwrap(), + None + ); + + // set burn processed, but this isn't enough + NakamotoChainState::set_burn_block_processed(&tx, &nakamoto_header.consensus_hash).unwrap(); + assert_eq!( + NakamotoChainState::next_ready_nakamoto_block(&tx).unwrap(), + None + ); + + // set parent block processed + NakamotoChainState::set_block_processed(&tx, &epoch2_header_info.index_block_hash()) + .unwrap(); - eprintln!("Checking block #{}", i); - let expected_total_tx_fees = u128::from(transacter_fee) * u128::from(i).saturating_sub(3); - let expected_total_coinbase = 1000000000 * u128::from(i).saturating_sub(3); + // this works now assert_eq!( - stx_balance.amount_unlocked(), - expected_total_coinbase + expected_total_tx_fees + NakamotoChainState::next_ready_nakamoto_block(&tx) + .unwrap() + .unwrap() + .0, + nakamoto_block ); } } From 3b9009f567adf6d5089e365d86b33babd53b351e Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Sat, 11 Nov 2023 01:12:24 -0500 Subject: [PATCH 110/122] chore: always test that the chain tip advances if we accept and process a block --- stackslib/src/chainstate/nakamoto/tests/node.rs | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/stackslib/src/chainstate/nakamoto/tests/node.rs b/stackslib/src/chainstate/nakamoto/tests/node.rs index 9c4f545fae..a9362a85d6 100644 --- a/stackslib/src/chainstate/nakamoto/tests/node.rs +++ b/stackslib/src/chainstate/nakamoto/tests/node.rs @@ -507,6 +507,17 @@ impl TestStacksNode { if accepted { test_debug!("Accepted Nakamoto block {}", &block_id); coord.handle_new_nakamoto_stacks_block().unwrap(); + + // confirm that the chain tip advanced + let stacks_chain_tip = + NakamotoChainState::get_canonical_block_header(chainstate.db(), &sortdb) + .unwrap() + .unwrap(); + let nakamoto_chain_tip = stacks_chain_tip + .anchored_header + .as_stacks_nakamoto() + .expect("FATAL: chain tip is not a Nakamoto block"); + assert_eq!(nakamoto_chain_tip, &nakamoto_block.header); } else { test_debug!("Did NOT accept Nakamoto block {}", &block_id); } From c2696d42d2521487185c2cf988237a2326f8bc48 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Sat, 11 Nov 2023 01:12:46 -0500 Subject: [PATCH 111/122] chore: remove dead code --- stackslib/src/chainstate/stacks/block.rs | 9 --------- 1 file changed, 9 deletions(-) diff --git a/stackslib/src/chainstate/stacks/block.rs b/stackslib/src/chainstate/stacks/block.rs index 2485a771e9..15be745164 100644 --- a/stackslib/src/chainstate/stacks/block.rs +++ b/stackslib/src/chainstate/stacks/block.rs @@ -108,15 +108,6 @@ impl StacksBlockHeader { *to_check == FIRST_STACKS_BLOCK_HASH } - /// Is this the first-ever index block hash? - pub fn is_first_index_block_hash(to_check: &StacksBlockId) -> bool { - to_check - == &StacksBlockHeader::make_index_block_hash( - &FIRST_BURNCHAIN_CONSENSUS_HASH, - &FIRST_STACKS_BLOCK_HASH, - ) - } - /// Is this a first-mined block header? i.e. builds off of the boot code? pub fn is_first_mined(&self) -> bool { Self::is_first_block_hash(&self.parent_block) From d1a4705dbd6bad743aa7630ce5034b2709581aa6 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Sat, 11 Nov 2023 01:12:58 -0500 Subject: [PATCH 112/122] chore: pass block event data by reference --- stackslib/src/chainstate/stacks/db/blocks.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/stackslib/src/chainstate/stacks/db/blocks.rs b/stackslib/src/chainstate/stacks/db/blocks.rs index eba256bde1..a8e13eb61c 100644 --- a/stackslib/src/chainstate/stacks/db/blocks.rs +++ b/stackslib/src/chainstate/stacks/db/blocks.rs @@ -196,7 +196,7 @@ pub struct DummyEventDispatcher; impl BlockEventDispatcher for DummyEventDispatcher { fn announce_block( &self, - _block: StacksBlockEventData, + _block: &StacksBlockEventData, _metadata: &StacksHeaderInfo, _receipts: &[StacksTransactionReceipt], _parent: &StacksBlockId, @@ -6542,7 +6542,7 @@ impl StacksChainState { &next_staging_block.parent_anchored_block_hash, ); dispatcher.announce_block( - block.into(), + &block.into(), &epoch_receipt.header.clone(), &epoch_receipt.tx_receipts, &parent_id, From d799ce54b539ef80049b2095dae1c2ceab323995 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Sat, 11 Nov 2023 01:13:12 -0500 Subject: [PATCH 113/122] chore: add codec tests for nakamoto transactions --- .../src/chainstate/stacks/transaction.rs | 320 ++++++++++++++++++ 1 file changed, 320 insertions(+) diff --git a/stackslib/src/chainstate/stacks/transaction.rs b/stackslib/src/chainstate/stacks/transaction.rs index 8e02812240..e0d78f7cbd 100644 --- a/stackslib/src/chainstate/stacks/transaction.rs +++ b/stackslib/src/chainstate/stacks/transaction.rs @@ -1946,6 +1946,326 @@ mod test { ); } + #[test] + fn tx_stacks_transaction_payload_nakamoto_coinbase() { + let proof_bytes = hex_bytes("9275df67a68c8745c0ff97b48201ee6db447f7c93b23ae24cdc2400f52fdb08a1a6ac7ec71bf9c9c76e96ee4675ebff60625af28718501047bfd87b810c2d2139b73c23bd69de66360953a642c2a330a").unwrap(); + let proof = VRFProof::from_bytes(&proof_bytes[..].to_vec()).unwrap(); + + let coinbase_payload = + TransactionPayload::Coinbase(CoinbasePayload([0x12; 32]), None, Some(proof)); + let coinbase_bytes = vec![ + // payload type ID + TransactionPayloadID::NakamotoCoinbase as u8, + // buffer + 0x12, + 0x12, + 0x12, + 0x12, + 0x12, + 0x12, + 0x12, + 0x12, + 0x12, + 0x12, + 0x12, + 0x12, + 0x12, + 0x12, + 0x12, + 0x12, + 0x12, + 0x12, + 0x12, + 0x12, + 0x12, + 0x12, + 0x12, + 0x12, + 0x12, + 0x12, + 0x12, + 0x12, + 0x12, + 0x12, + 0x12, + 0x12, + // no alt recipient, so Value::none + 0x09, + // proof bytes length + 0x00, + 0x00, + 0x00, + 0x50, + // proof bytes + 0x92, + 0x75, + 0xdf, + 0x67, + 0xa6, + 0x8c, + 0x87, + 0x45, + 0xc0, + 0xff, + 0x97, + 0xb4, + 0x82, + 0x01, + 0xee, + 0x6d, + 0xb4, + 0x47, + 0xf7, + 0xc9, + 0x3b, + 0x23, + 0xae, + 0x24, + 0xcd, + 0xc2, + 0x40, + 0x0f, + 0x52, + 0xfd, + 0xb0, + 0x8a, + 0x1a, + 0x6a, + 0xc7, + 0xec, + 0x71, + 0xbf, + 0x9c, + 0x9c, + 0x76, + 0xe9, + 0x6e, + 0xe4, + 0x67, + 0x5e, + 0xbf, + 0xf6, + 0x06, + 0x25, + 0xaf, + 0x28, + 0x71, + 0x85, + 0x01, + 0x04, + 0x7b, + 0xfd, + 0x87, + 0xb8, + 0x10, + 0xc2, + 0xd2, + 0x13, + 0x9b, + 0x73, + 0xc2, + 0x3b, + 0xd6, + 0x9d, + 0xe6, + 0x63, + 0x60, + 0x95, + 0x3a, + 0x64, + 0x2c, + 0x2a, + 0x33, + 0x0a, + ]; + + check_codec_and_corruption(&coinbase_payload, &coinbase_bytes); + } + + #[test] + fn tx_stacks_transaction_payload_nakamoto_coinbase_alt_recipient() { + let proof_bytes = hex_bytes("9275df67a68c8745c0ff97b48201ee6db447f7c93b23ae24cdc2400f52fdb08a1a6ac7ec71bf9c9c76e96ee4675ebff60625af28718501047bfd87b810c2d2139b73c23bd69de66360953a642c2a330a").unwrap(); + let proof = VRFProof::from_bytes(&proof_bytes[..].to_vec()).unwrap(); + + let recipient = PrincipalData::from(QualifiedContractIdentifier { + issuer: StacksAddress { + version: 1, + bytes: Hash160([0xff; 20]), + } + .into(), + name: "foo-contract".into(), + }); + + let coinbase_payload = + TransactionPayload::Coinbase(CoinbasePayload([0x12; 32]), Some(recipient), Some(proof)); + let coinbase_bytes = vec![ + // payload type ID + TransactionPayloadID::NakamotoCoinbase as u8, + // buffer + 0x12, + 0x12, + 0x12, + 0x12, + 0x12, + 0x12, + 0x12, + 0x12, + 0x12, + 0x12, + 0x12, + 0x12, + 0x12, + 0x12, + 0x12, + 0x12, + 0x12, + 0x12, + 0x12, + 0x12, + 0x12, + 0x12, + 0x12, + 0x12, + 0x12, + 0x12, + 0x12, + 0x12, + 0x12, + 0x12, + 0x12, + 0x12, + // have contract recipient, so Some(..) + 0x0a, + // contract address type + 0x06, + // address + 0x01, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + // name length + 0x0c, + // name ('foo-contract') + 0x66, + 0x6f, + 0x6f, + 0x2d, + 0x63, + 0x6f, + 0x6e, + 0x74, + 0x72, + 0x61, + 0x63, + 0x74, + // proof bytes length + 0x00, + 0x00, + 0x00, + 0x50, + // proof bytes + 0x92, + 0x75, + 0xdf, + 0x67, + 0xa6, + 0x8c, + 0x87, + 0x45, + 0xc0, + 0xff, + 0x97, + 0xb4, + 0x82, + 0x01, + 0xee, + 0x6d, + 0xb4, + 0x47, + 0xf7, + 0xc9, + 0x3b, + 0x23, + 0xae, + 0x24, + 0xcd, + 0xc2, + 0x40, + 0x0f, + 0x52, + 0xfd, + 0xb0, + 0x8a, + 0x1a, + 0x6a, + 0xc7, + 0xec, + 0x71, + 0xbf, + 0x9c, + 0x9c, + 0x76, + 0xe9, + 0x6e, + 0xe4, + 0x67, + 0x5e, + 0xbf, + 0xf6, + 0x06, + 0x25, + 0xaf, + 0x28, + 0x71, + 0x85, + 0x01, + 0x04, + 0x7b, + 0xfd, + 0x87, + 0xb8, + 0x10, + 0xc2, + 0xd2, + 0x13, + 0x9b, + 0x73, + 0xc2, + 0x3b, + 0xd6, + 0x9d, + 0xe6, + 0x63, + 0x60, + 0x95, + 0x3a, + 0x64, + 0x2c, + 0x2a, + 0x33, + 0x0a, + ]; + + check_codec_and_corruption(&coinbase_payload, &coinbase_bytes); + } + #[test] fn tx_stacks_transaction_payload_microblock_poison() { let header_1 = StacksMicroblockHeader { From 58495fca77a4033508ec1e6afab4a4091b09cb66 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Sat, 11 Nov 2023 01:13:28 -0500 Subject: [PATCH 114/122] fix: deterministic microblock keypairs (so we can replay state from one peer from to another) --- stackslib/src/net/mod.rs | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/stackslib/src/net/mod.rs b/stackslib/src/net/mod.rs index 85ba9b2a93..07ef723f8c 100644 --- a/stackslib/src/net/mod.rs +++ b/stackslib/src/net/mod.rs @@ -2684,7 +2684,7 @@ pub mod test { impl BlockEventDispatcher for TestEventObserver { fn announce_block( &self, - block: StacksBlockEventData, + block: &StacksBlockEventData, metadata: &StacksHeaderInfo, receipts: &[events::StacksTransactionReceipt], parent: &StacksBlockId, @@ -3599,7 +3599,11 @@ pub mod test { let missing_pox_anchor_block_hash_opt = if epoch_id < StacksEpochId::Epoch30 { self.coord.handle_new_burnchain_block().unwrap() } else { - self.coord.handle_new_nakamoto_burnchain_block().unwrap() + if self.coord.handle_new_nakamoto_burnchain_block().unwrap() { + None + } else { + Some(BlockHeaderHash([0x00; 32])) + } }; let pox_id = { @@ -3995,7 +3999,7 @@ pub mod test { txs: &[StacksTransaction], coinbase_nonce: &mut usize, ) -> StacksBlockId { - let microblock_privkey = StacksPrivateKey::new(); + let microblock_privkey = self.miner.next_microblock_privkey(); let microblock_pubkeyhash = Hash160::from_node_public_key(&StacksPublicKey::from_private(µblock_privkey)); let tip = From 91d2ffb6a0392c934b7dcf6aec42ce2f54aad5aa Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Sat, 11 Nov 2023 01:14:06 -0500 Subject: [PATCH 115/122] fix: pass event data by reference --- testnet/stacks-node/src/event_dispatcher.rs | 10 +++++----- testnet/stacks-node/src/run_loop/mod.rs | 2 +- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/testnet/stacks-node/src/event_dispatcher.rs b/testnet/stacks-node/src/event_dispatcher.rs index ef9442898e..62668a0129 100644 --- a/testnet/stacks-node/src/event_dispatcher.rs +++ b/testnet/stacks-node/src/event_dispatcher.rs @@ -371,7 +371,7 @@ impl EventObserver { fn make_new_block_processed_payload( &self, filtered_events: Vec<(usize, &(bool, Txid, &StacksTransactionEvent))>, - block: StacksBlockEventData, + block: &StacksBlockEventData, metadata: &StacksHeaderInfo, receipts: &[StacksTransactionReceipt], parent_index_hash: &StacksBlockId, @@ -515,7 +515,7 @@ impl StackerDBEventDispatcher for EventDispatcher { impl BlockEventDispatcher for EventDispatcher { fn announce_block( &self, - block: StacksBlockEventData, + block: &StacksBlockEventData, metadata: &StacksHeaderInfo, receipts: &[StacksTransactionReceipt], parent: &StacksBlockId, @@ -717,7 +717,7 @@ impl EventDispatcher { pub fn process_chain_tip( &self, - block: StacksBlockEventData, + block: &StacksBlockEventData, metadata: &StacksHeaderInfo, receipts: &[StacksTransactionReceipt], parent_index_hash: &StacksBlockId, @@ -767,7 +767,7 @@ impl EventDispatcher { let payload = self.registered_observers[observer_id] .make_new_block_processed_payload( filtered_events, - block.clone(), + &block, metadata, receipts, parent_index_hash, @@ -1163,7 +1163,7 @@ mod test { let payload = observer.make_new_block_processed_payload( filtered_events, - block.into(), + &block.into(), &metadata, &receipts, &parent_index_hash, diff --git a/testnet/stacks-node/src/run_loop/mod.rs b/testnet/stacks-node/src/run_loop/mod.rs index d9987f0ac0..49feeb4f42 100644 --- a/testnet/stacks-node/src/run_loop/mod.rs +++ b/testnet/stacks-node/src/run_loop/mod.rs @@ -181,7 +181,7 @@ pub fn announce_boot_receipts( debug!("Push {} boot receipts", &boot_receipts.len()); event_dispatcher.announce_block( - block_0.into(), + &block_0.into(), &block_header_0, boot_receipts, &StacksBlockId::sentinel(), From bef3db05362a3edfa311b8ecfc750032e4e1e4f2 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Sat, 11 Nov 2023 01:17:09 -0500 Subject: [PATCH 116/122] chore: cargo fmt --- stackslib/src/chainstate/burn/db/sortdb.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/stackslib/src/chainstate/burn/db/sortdb.rs b/stackslib/src/chainstate/burn/db/sortdb.rs index 35e1fedfdc..56a2337d30 100644 --- a/stackslib/src/chainstate/burn/db/sortdb.rs +++ b/stackslib/src/chainstate/burn/db/sortdb.rs @@ -2132,7 +2132,8 @@ impl<'a> SortitionHandleConn<'a> { else { return Ok(false); }; - let Some(expected_sortition_id) = self.get_sortition_id_for_bhh(&snapshot.burn_header_hash)? + let Some(expected_sortition_id) = + self.get_sortition_id_for_bhh(&snapshot.burn_header_hash)? else { return Ok(false); }; From 94ff8cd8d228acf532eaa7c0735fdb063d330049 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Sun, 12 Nov 2023 15:12:00 -0500 Subject: [PATCH 117/122] fix: fix regression in static block validation check -- versioned smart contracts aren't allowed before epoch 2.1 --- stackslib/src/chainstate/stacks/block.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stackslib/src/chainstate/stacks/block.rs b/stackslib/src/chainstate/stacks/block.rs index 15be745164..dcc4a64021 100644 --- a/stackslib/src/chainstate/stacks/block.rs +++ b/stackslib/src/chainstate/stacks/block.rs @@ -592,7 +592,7 @@ impl StacksBlock { } } if let TransactionPayload::SmartContract(_, ref version_opt) = &tx.payload { - if version_opt.is_some() { + if version_opt.is_some() && epoch_id < StacksEpochId::Epoch21 { // not supported error!("Versioned smart contracts not supported before Stacks 2.1"); return false; From 6bd1ca2e07be78b0136fec7d83020290cf14334f Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Sun, 12 Nov 2023 15:12:23 -0500 Subject: [PATCH 118/122] fix: special-case handling for choosing the parent block pointer in the block miner if we're mining the first-ever block (only affects integration tests) --- testnet/stacks-node/src/neon_node.rs | 43 ++++++++++++++++++---------- 1 file changed, 28 insertions(+), 15 deletions(-) diff --git a/testnet/stacks-node/src/neon_node.rs b/testnet/stacks-node/src/neon_node.rs index 211a312af4..88748e7e25 100644 --- a/testnet/stacks-node/src/neon_node.rs +++ b/testnet/stacks-node/src/neon_node.rs @@ -3471,26 +3471,39 @@ impl ParentStacksBlockInfo { .expect("Failed to look up block's parent snapshot"); let parent_sortition_id = &parent_snapshot.sortition_id; - let parent_winning_vtxindex = - SortitionDB::get_block_winning_vtxindex(burn_db.conn(), parent_sortition_id) + + let (parent_block_height, parent_winning_vtxindex, parent_block_total_burn) = if mine_tip_ch + == &FIRST_BURNCHAIN_CONSENSUS_HASH + { + (0, 0, 0) + } else { + let parent_winning_vtxindex = + SortitionDB::get_block_winning_vtxindex(burn_db.conn(), parent_sortition_id) + .expect("SortitionDB failure.") + .ok_or_else(|| { + error!( + "Failed to find winning vtx index for the parent sortition"; + "parent_sortition_id" => %parent_sortition_id + ); + Error::WinningVtxNotFoundForChainTip + })?; + + let parent_block = SortitionDB::get_block_snapshot(burn_db.conn(), parent_sortition_id) .expect("SortitionDB failure.") .ok_or_else(|| { error!( - "Failed to find winning vtx index for the parent sortition"; + "Failed to find block snapshot for the parent sortition"; "parent_sortition_id" => %parent_sortition_id ); - Error::WinningVtxNotFoundForChainTip + Error::SnapshotNotFoundForChainTip })?; - let parent_block = SortitionDB::get_block_snapshot(burn_db.conn(), parent_sortition_id) - .expect("SortitionDB failure.") - .ok_or_else(|| { - error!( - "Failed to find block snapshot for the parent sortition"; - "parent_sortition_id" => %parent_sortition_id - ); - Error::SnapshotNotFoundForChainTip - })?; + ( + parent_block.block_height, + parent_winning_vtxindex, + parent_block.total_burn, + ) + }; // don't mine off of an old burnchain block let burn_chain_tip = SortitionDB::get_canonical_burn_chain_tip(burn_db.conn()) @@ -3529,8 +3542,8 @@ impl ParentStacksBlockInfo { Ok(ParentStacksBlockInfo { stacks_parent_header: stacks_tip_header, parent_consensus_hash: mine_tip_ch.clone(), - parent_block_burn_height: parent_block.block_height, - parent_block_total_burn: parent_block.total_burn, + parent_block_burn_height: parent_block_height, + parent_block_total_burn: parent_block_total_burn, parent_winning_vtxindex, coinbase_nonce, }) From 6d6e00bce3d50e5d3161f8fb788da340fc7e4fe7 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Sun, 12 Nov 2023 15:13:28 -0500 Subject: [PATCH 119/122] fix: update comments in pox-4 to reflect that it is pox-4, not pox-3 --- stackslib/src/chainstate/stacks/boot/pox-4.clar | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/stackslib/src/chainstate/stacks/boot/pox-4.clar b/stackslib/src/chainstate/stacks/boot/pox-4.clar index 5878038a0b..1f9ad6dad7 100644 --- a/stackslib/src/chainstate/stacks/boot/pox-4.clar +++ b/stackslib/src/chainstate/stacks/boot/pox-4.clar @@ -1,4 +1,4 @@ -;; The .pox-3 contract +;; The .pox-4 contract ;; Error codes (define-constant ERR_STACKING_UNREACHABLE 255) (define-constant ERR_STACKING_CORRUPTED_STATE 254) @@ -85,7 +85,7 @@ ;; Records will be deleted from this map when auto-unlocks are processed ;; ;; This map de-normalizes some state from the `reward-cycle-pox-address-list` map -;; and the `pox-3` contract tries to keep this state in sync with the reward-cycle +;; and the `pox-4` contract tries to keep this state in sync with the reward-cycle ;; state. The major invariants of this `stacking-state` map are: ;; (1) any entry in `reward-cycle-pox-address-list` with `some stacker` points to a real `stacking-state` ;; (2) `stacking-state.reward-set-indexes` matches the index of that `reward-cycle-pox-address-list` @@ -1010,7 +1010,7 @@ stacker: tx-sender, add-amount: increase-by }))) (err ERR_STACKING_UNREACHABLE)) - ;; NOTE: stacking-state map is unchanged: it does not track amount-stacked in PoX-3 + ;; NOTE: stacking-state map is unchanged: it does not track amount-stacked in PoX-4 (ok { stacker: tx-sender, total-locked: (+ amount-stacked increase-by)}))) ;; Extend an active Stacking lock. From c4360987e63a6beb42e9c338f03116a4211ec9b3 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Sun, 12 Nov 2023 16:12:45 -0500 Subject: [PATCH 120/122] fix: cover epoch 2.4 and epoch 3.0 --- clarity/src/vm/test_util/mod.rs | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/clarity/src/vm/test_util/mod.rs b/clarity/src/vm/test_util/mod.rs index 957c65e6eb..6633c65093 100644 --- a/clarity/src/vm/test_util/mod.rs +++ b/clarity/src/vm/test_util/mod.rs @@ -42,6 +42,9 @@ pub const TEST_BURN_STATE_DB_21: UnitTestBurnStateDB = UnitTestBurnStateDB { pub fn generate_test_burn_state_db(epoch_id: StacksEpochId) -> UnitTestBurnStateDB { match epoch_id { + StacksEpochId::Epoch10 => { + panic!("Epoch 1.0 not testable"); + } StacksEpochId::Epoch20 => UnitTestBurnStateDB { epoch_id, ast_rules: ASTRules::Typical, @@ -50,11 +53,12 @@ pub fn generate_test_burn_state_db(epoch_id: StacksEpochId) -> UnitTestBurnState | StacksEpochId::Epoch21 | StacksEpochId::Epoch22 | StacksEpochId::Epoch23 - | StacksEpochId::Epoch24 => UnitTestBurnStateDB { + | StacksEpochId::Epoch24 + | StacksEpochId::Epoch25 + | StacksEpochId::Epoch30 => UnitTestBurnStateDB { epoch_id, ast_rules: ASTRules::PrecheckSize, }, - _ => panic!("Epoch {} not covered", &epoch_id), } } From a6e9b4c6c076216af0bd6607751d5fbdb41d4fdd Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Sun, 12 Nov 2023 21:24:45 -0500 Subject: [PATCH 121/122] fix: fix another test that explicitly panics on epochs 2.5 and 3.0 --- stackslib/src/clarity_vm/tests/large_contract.rs | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/stackslib/src/clarity_vm/tests/large_contract.rs b/stackslib/src/clarity_vm/tests/large_contract.rs index 0adb308261..f68a0e4f83 100644 --- a/stackslib/src/clarity_vm/tests/large_contract.rs +++ b/stackslib/src/clarity_vm/tests/large_contract.rs @@ -148,7 +148,9 @@ fn test_simple_token_system(#[case] version: ClarityVersion, #[case] epoch: Stac StacksEpochId::Epoch21 | StacksEpochId::Epoch22 | StacksEpochId::Epoch23 - | StacksEpochId::Epoch24 => { + | StacksEpochId::Epoch24 + | StacksEpochId::Epoch25 + | StacksEpochId::Epoch30 => { let (ast, _analysis) = tx .analyze_smart_contract( &boot_code_id("costs-3", false), From 7cf69c74a5b81bc2236480bd9122c99662e2ae91 Mon Sep 17 00:00:00 2001 From: Jude Nelson Date: Thu, 16 Nov 2023 14:16:08 -0500 Subject: [PATCH 122/122] chore: address PR feedback; use explicit integer conversions (but this really needs a clippy pass) --- clarity/src/vm/database/structures.rs | 24 +++--- clarity/src/vm/functions/assets.rs | 4 +- stackslib/src/burnchains/mod.rs | 23 ++--- stackslib/src/burnchains/tests/mod.rs | 9 +- stackslib/src/chainstate/burn/db/sortdb.rs | 30 ++----- .../burn/operations/leader_block_commit.rs | 37 ++++---- stackslib/src/chainstate/coordinator/mod.rs | 61 ++++++++++---- stackslib/src/chainstate/coordinator/tests.rs | 5 +- .../chainstate/nakamoto/coordinator/mod.rs | 13 ++- stackslib/src/chainstate/nakamoto/miner.rs | 6 +- stackslib/src/chainstate/nakamoto/mod.rs | 21 +++-- stackslib/src/chainstate/stacks/boot/mod.rs | 15 ++-- stackslib/src/chainstate/stacks/db/blocks.rs | 84 +++++++++++-------- .../src/chainstate/stacks/db/transactions.rs | 20 +++-- stackslib/src/chainstate/stacks/miner.rs | 33 +++++--- .../src/chainstate/stacks/transaction.rs | 4 +- stackslib/src/clarity_vm/clarity.rs | 54 ++++++------ stackslib/src/core/mod.rs | 2 +- stackslib/src/net/mod.rs | 5 +- stackslib/src/net/rpc.rs | 12 ++- testnet/stacks-node/src/event_dispatcher.rs | 43 +++++++--- testnet/stacks-node/src/neon_node.rs | 10 +-- 22 files changed, 300 insertions(+), 215 deletions(-) diff --git a/clarity/src/vm/database/structures.rs b/clarity/src/vm/database/structures.rs index 393838ad7a..e1454d8a1e 100644 --- a/clarity/src/vm/database/structures.rs +++ b/clarity/src/vm/database/structures.rs @@ -940,22 +940,22 @@ impl STXBalance { match self { STXBalance::Unlocked { .. } => 0, STXBalance::LockedPoxOne { unlock_height, .. } => { - if *unlock_height >= (v1_unlock_height as u64) { - v1_unlock_height as u64 + if *unlock_height >= u64::from(v1_unlock_height) { + u64::from(v1_unlock_height) } else { *unlock_height } } STXBalance::LockedPoxTwo { unlock_height, .. } => { - if *unlock_height >= (v2_unlock_height as u64) { - v2_unlock_height as u64 + if *unlock_height >= u64::from(v2_unlock_height) { + u64::from(v2_unlock_height) } else { *unlock_height } } STXBalance::LockedPoxThree { unlock_height, .. } => { - if *unlock_height >= (v3_unlock_height as u64) { - v3_unlock_height as u64 + if *unlock_height >= u64::from(v3_unlock_height) { + u64::from(v3_unlock_height) } else { *unlock_height } @@ -1210,7 +1210,7 @@ impl STXBalance { return false; } // if unlockable due to Stacks 2.1 early unlock - if v1_unlock_height as u64 <= burn_block_height { + if u64::from(v1_unlock_height) <= burn_block_height { return false; } true @@ -1227,7 +1227,7 @@ impl STXBalance { return false; } // if unlockable due to Stacks 2.2 early unlock - if v2_unlock_height as u64 <= burn_block_height { + if u64::from(v2_unlock_height) <= burn_block_height { return false; } true @@ -1244,7 +1244,7 @@ impl STXBalance { return false; } // if unlockable due to Stacks 2.5 early unlock - if v3_unlock_height as u64 <= burn_block_height { + if u64::from(v3_unlock_height) <= burn_block_height { return false; } true @@ -1287,7 +1287,7 @@ impl STXBalance { return true; } // if unlockable due to Stacks 2.1 early unlock - if v1_unlock_height as u64 <= burn_block_height { + if u64::from(v1_unlock_height) <= burn_block_height { return true; } false @@ -1305,7 +1305,7 @@ impl STXBalance { return true; } // if unlockable due to Stacks 2.2 early unlock - if v2_unlock_height as u64 <= burn_block_height { + if u64::from(v2_unlock_height) <= burn_block_height { return true; } false @@ -1323,7 +1323,7 @@ impl STXBalance { return true; } // if unlockable due to Stacks 2.5 early unlock - if v3_unlock_height as u64 <= burn_block_height { + if u64::from(v3_unlock_height) <= burn_block_height { return true; } false diff --git a/clarity/src/vm/functions/assets.rs b/clarity/src/vm/functions/assets.rs index fa5086639f..b4cf3c2084 100644 --- a/clarity/src/vm/functions/assets.rs +++ b/clarity/src/vm/functions/assets.rs @@ -249,11 +249,11 @@ pub fn special_stx_account( ), ( "unlock-height".try_into().unwrap(), - Value::UInt(stx_balance.effective_unlock_height( + Value::UInt(u128::from(stx_balance.effective_unlock_height( v1_unlock_ht, v2_unlock_ht, v3_unlock_ht, - ) as u128), + ))), ), ]) .map(Value::Tuple) diff --git a/stackslib/src/burnchains/mod.rs b/stackslib/src/burnchains/mod.rs index 656259cb33..302d0a49dc 100644 --- a/stackslib/src/burnchains/mod.rs +++ b/stackslib/src/burnchains/mod.rs @@ -399,15 +399,16 @@ impl PoxConstants { /// Returns the PoX contract that is "active" at the given burn block height pub fn active_pox_contract(&self, burn_height: u64) -> &'static str { Self::static_active_pox_contract( - self.v1_unlock_height as u64, - self.pox_3_activation_height as u64, - self.pox_4_activation_height as u64, + u64::from(self.v1_unlock_height), + u64::from(self.pox_3_activation_height), + u64::from(self.pox_4_activation_height), burn_height, ) } pub fn reward_slots(&self) -> u32 { - (self.reward_cycle_length - self.prepare_length) * (OUTPUTS_PER_COMMIT as u32) + (self.reward_cycle_length - self.prepare_length) + * u32::try_from(OUTPUTS_PER_COMMIT).expect("FATAL: > 2^32 outputs per commit") } /// is participating_ustx enough to engage in PoX in the next reward cycle? @@ -416,7 +417,7 @@ impl PoxConstants { .checked_mul(100) .expect("OVERFLOW: uSTX overflowed u128") > liquid_ustx - .checked_mul(self.pox_participation_threshold_pct as u128) + .checked_mul(u128::from(self.pox_participation_threshold_pct)) .expect("OVERFLOW: uSTX overflowed u128") } @@ -522,13 +523,13 @@ impl PoxConstants { pub fn is_reward_cycle_start(&self, first_block_height: u64, burn_height: u64) -> bool { let effective_height = burn_height - first_block_height; // first block of the new reward cycle - (effective_height % (self.reward_cycle_length as u64)) == 1 + (effective_height % u64::from(self.reward_cycle_length)) == 1 } pub fn reward_cycle_to_block_height(&self, first_block_height: u64, reward_cycle: u64) -> u64 { // NOTE: the `+ 1` is because the height of the first block of a reward cycle is mod 1, not // mod 0. - first_block_height + reward_cycle * (self.reward_cycle_length as u64) + 1 + first_block_height + reward_cycle * u64::from(self.reward_cycle_length) + 1 } pub fn block_height_to_reward_cycle( @@ -539,15 +540,15 @@ impl PoxConstants { Self::static_block_height_to_reward_cycle( block_height, first_block_height, - self.reward_cycle_length as u64, + u64::from(self.reward_cycle_length), ) } pub fn is_in_prepare_phase(&self, first_block_height: u64, block_height: u64) -> bool { Self::static_is_in_prepare_phase( first_block_height, - self.reward_cycle_length as u64, - self.prepare_length as u64, + u64::from(self.reward_cycle_length), + u64::from(self.prepare_length), block_height, ) } @@ -567,7 +568,7 @@ impl PoxConstants { // NOTE: first block in reward cycle is mod 1, so mod 0 is the last block in the // prepare phase. - reward_index == 0 || reward_index > ((reward_cycle_length - prepare_length) as u64) + reward_index == 0 || reward_index > u64::from(reward_cycle_length - prepare_length) } } diff --git a/stackslib/src/burnchains/tests/mod.rs b/stackslib/src/burnchains/tests/mod.rs index 4030a53815..d051c74151 100644 --- a/stackslib/src/burnchains/tests/mod.rs +++ b/stackslib/src/burnchains/tests/mod.rs @@ -418,9 +418,7 @@ impl TestBurnchainBlock { None => SortitionDB::get_first_block_snapshot(ic).unwrap(), }; - let new_seed = if let Some(new_seed) = new_seed { - new_seed - } else { + let new_seed = new_seed.unwrap_or_else(|| { // prove on the last-ever sortition's hash to produce the new seed let proof = miner .make_proof(&leader_key.public_key, &last_snapshot.sortition_hash) @@ -429,9 +427,8 @@ impl TestBurnchainBlock { leader_key.public_key.to_hex() )); - let new_seed = VRFSeed::from_proof(&proof); - new_seed - }; + VRFSeed::from_proof(&proof) + }); let get_commit_res = SortitionDB::get_block_commit( ic.conn(), diff --git a/stackslib/src/chainstate/burn/db/sortdb.rs b/stackslib/src/chainstate/burn/db/sortdb.rs index 56a2337d30..7847529514 100644 --- a/stackslib/src/chainstate/burn/db/sortdb.rs +++ b/stackslib/src/chainstate/burn/db/sortdb.rs @@ -3620,18 +3620,15 @@ impl SortitionDB { ) -> Result, db_error> { let sql = "SELECT reward_set FROM preprocessed_reward_sets WHERE sortition_id = ?1"; let args: &[&dyn ToSql] = &[sortition_id]; - let reward_set_opt: Option = sortdb - .query_row(sql, args, |row| row.get(0)) - .optional() - .map_err(db_error::from)?; + let reward_set_opt: Option = + sortdb.query_row(sql, args, |row| row.get(0)).optional()?; - if let Some(reward_set_str) = reward_set_opt { - let rc_info: RewardCycleInfo = - serde_json::from_str(&reward_set_str).map_err(|_| db_error::ParseError)?; - Ok(Some(rc_info)) - } else { - Ok(None) - } + let rc_info = reward_set_opt + .map(|reward_set_str| serde_json::from_str(&reward_set_str)) + .transpose() + .map_err(|_| db_error::ParseError)?; + + Ok(rc_info) } } @@ -5024,17 +5021,6 @@ impl SortitionDB { }) } - /// Get the block-commit for a Nakamoto block, given the block-commit's sortition's consensus - /// hash and its given last_tenure_id - pub fn get_block_commit_for_nakamoto_block( - conn: &Connection, - consensus_hash: &ConsensusHash, - last_tenure_id: &StacksBlockId, - ) -> Result, db_error> { - let bhh = BlockHeaderHash(last_tenure_id.0.clone()); - Self::get_block_commit_for_stacks_block(conn, consensus_hash, &bhh) - } - /// Get a block snapshot for a winning block hash in a given burn chain fork. pub fn get_block_snapshot_for_winning_stacks_block( ic: &SortitionDBConn, diff --git a/stackslib/src/chainstate/burn/operations/leader_block_commit.rs b/stackslib/src/chainstate/burn/operations/leader_block_commit.rs index fbb593e663..54d5e287ad 100644 --- a/stackslib/src/chainstate/burn/operations/leader_block_commit.rs +++ b/stackslib/src/chainstate/burn/operations/leader_block_commit.rs @@ -86,9 +86,12 @@ impl LeaderBlockCommitOp { sunset_burn: 0, block_height: block_height, burn_parent_modulus: if block_height > 0 { - ((block_height - 1) % BURN_BLOCK_MINED_AT_MODULUS) as u8 + u8::try_from((block_height - 1) % BURN_BLOCK_MINED_AT_MODULUS) + .expect("FATAL: unreachable: unable to form u8 from 3-bit number") } else { - BURN_BLOCK_MINED_AT_MODULUS as u8 - 1 + u8::try_from(BURN_BLOCK_MINED_AT_MODULUS) + .expect("FATAL: unreachable: 5 is not a u8") + - 1 }, new_seed: new_seed.clone(), key_block_ptr: paired_key.block_height as u32, @@ -139,7 +142,9 @@ impl LeaderBlockCommitOp { txid: Txid([0u8; 32]), vtxindex: 0, block_height: 0, - burn_parent_modulus: BURN_BLOCK_MINED_AT_MODULUS as u8 - 1, + burn_parent_modulus: u8::try_from(BURN_BLOCK_MINED_AT_MODULUS) + .expect("FATAL: unreachable: 5 is not a u8") + - 1, burn_header_hash: BurnchainHeaderHash::zero(), } @@ -148,11 +153,13 @@ impl LeaderBlockCommitOp { #[cfg(test)] pub fn set_burn_height(&mut self, height: u64) { self.block_height = height; - self.burn_parent_modulus = if height > 0 { + let new_burn_parent_modulus = if height > 0 { (height - 1) % BURN_BLOCK_MINED_AT_MODULUS } else { BURN_BLOCK_MINED_AT_MODULUS - 1 - } as u8; + }; + self.burn_parent_modulus = u8::try_from(new_burn_parent_modulus) + .expect("FATAL: unreachable: 3-bit number is not a u8"); } pub fn expected_chained_utxo(burn_only: bool) -> u32 { @@ -165,7 +172,7 @@ impl LeaderBlockCommitOp { } pub fn burn_block_mined_at(&self) -> u64 { - self.burn_parent_modulus as u64 % BURN_BLOCK_MINED_AT_MODULUS + u64::from(self.burn_parent_modulus) % BURN_BLOCK_MINED_AT_MODULUS } /// In Nakamoto, the block header hash is actually the index block hash of the first Nakamoto @@ -207,8 +214,10 @@ impl LeaderBlockCommitOp { let burn_parent_modulus_and_memo_byte = data[76]; - let burn_parent_modulus = ((burn_parent_modulus_and_memo_byte & 0b111) as u64 - % BURN_BLOCK_MINED_AT_MODULUS) as u8; + let burn_parent_modulus = u8::try_from( + u64::from(burn_parent_modulus_and_memo_byte & 0b111) % BURN_BLOCK_MINED_AT_MODULUS, + ) + .expect("FATAL: unreachable: could not make u8 from a 3-bit number"); let memo = (burn_parent_modulus_and_memo_byte >> 3) & 0x1f; Some(ParsedData { @@ -291,7 +300,7 @@ impl LeaderBlockCommitOp { // the genesis block. } - if data.parent_block_ptr as u64 >= block_height { + if u64::from(data.parent_block_ptr) >= block_height { warn!( "Invalid tx: parent block back-pointer {} exceeds block height {}", data.parent_block_ptr, block_height @@ -304,7 +313,7 @@ impl LeaderBlockCommitOp { return Err(op_error::ParseError); } - if data.key_block_ptr as u64 >= block_height { + if u64::from(data.key_block_ptr) >= block_height { warn!( "Invalid tx: key block back-pointer {} exceeds block height {}", data.key_block_ptr, block_height @@ -382,7 +391,7 @@ impl LeaderBlockCommitOp { // is expected given the amount transfered. let burn_fee = pox_fee .expect("A 0-len output should have already errored") - .checked_mul(OUTPUTS_PER_COMMIT as u64) // total commitment is the pox_amount * outputs + .checked_mul(u64::try_from(OUTPUTS_PER_COMMIT).expect(">2^64 outputs per commit")) // total commitment is the pox_amount * outputs .ok_or_else(|| op_error::ParseError)?; if burn_fee == 0 { @@ -563,7 +572,7 @@ impl LeaderBlockCommitOp { tx: &mut SortitionHandleTx, reward_set_info: Option<&RewardSetInfo>, ) -> Result<(), op_error> { - let parent_block_height = self.parent_block_ptr as u64; + let parent_block_height = u64::from(self.parent_block_ptr); if PoxConstants::has_pox_sunset(epoch_id) { // sunset only applies in epochs prior to 2.1. After 2.1, miners can put whatever they @@ -857,8 +866,8 @@ impl LeaderBlockCommitOp { epoch_id: StacksEpochId, tx: &mut SortitionHandleTx, ) -> Result<(), op_error> { - let leader_key_block_height = self.key_block_ptr as u64; - let parent_block_height = self.parent_block_ptr as u64; + let leader_key_block_height = u64::from(self.key_block_ptr); + let parent_block_height = u64::from(self.parent_block_ptr); let tx_tip = tx.context.chain_tip.clone(); let apparent_sender_repr = format!("{}", &self.apparent_sender); diff --git a/stackslib/src/chainstate/coordinator/mod.rs b/stackslib/src/chainstate/coordinator/mod.rs index a8ef06b144..c40ed4e83a 100644 --- a/stackslib/src/chainstate/coordinator/mod.rs +++ b/stackslib/src/chainstate/coordinator/mod.rs @@ -97,6 +97,32 @@ pub enum PoxAnchorBlockStatus { NotSelected, } +/// The possible outcomes of processing a burnchain block. +/// Indicates whether or not we're ready to process Stacks blocks, or if not, whether or not we're +/// blocked on a Stacks 2.x anchor block or a Nakamoto anchor block +pub enum NewBurnchainBlockStatus { + /// Ready to process Stacks blocks + Ready, + /// Missing 2.x PoX anchor block + WaitForPox2x(BlockHeaderHash), + /// Missing Nakamoto anchor block. Unlike 2.x, we won't know its hash. + WaitForPoxNakamoto, +} + +impl NewBurnchainBlockStatus { + /// Test helper to convert this status into the optional hash of the missing PoX anchor block. + /// Because there are unit tests that expect a Some(..) result if PoX cannot proceed, the + /// missing Nakamoto anchor block case is converted into a placeholder Some(..) value + #[cfg(test)] + pub fn into_missing_block_hash(self) -> Option { + match self { + Self::Ready => None, + Self::WaitForPox2x(block_hash) => Some(block_hash), + Self::WaitForPoxNakamoto => Some(BlockHeaderHash([0x00; 32])), + } + } +} + #[derive(Debug, PartialEq, Serialize, Deserialize)] pub struct RewardCycleInfo { pub reward_cycle: u64, @@ -501,14 +527,15 @@ impl< signal_mining_blocked(miner_status.clone()); debug!("Received new burn block notice"); match self.handle_new_burnchain_block() { - Ok(missing_block_opt) => { - if missing_block_opt.is_some() { - debug!( - "Missing canonical anchor block {}", - &missing_block_opt.clone().unwrap() - ); + Ok(burn_block_status) => match burn_block_status { + NewBurnchainBlockStatus::Ready => {} + NewBurnchainBlockStatus::WaitForPox2x(block_hash) => { + debug!("Missing canonical Stacks 2.x anchor block {}", &block_hash,); } - } + NewBurnchainBlockStatus::WaitForPoxNakamoto => { + debug!("Missing canonical Nakamoto anchor block"); + } + }, Err(e) => { warn!("Error processing new burn block: {:?}", e); } @@ -2262,9 +2289,7 @@ impl< /// Outermost call to process a burnchain block. /// Will call the Stacks 2.x or Nakamoto handler, depending on whether or not /// Not called internally. - /// NOTE: in epoch 3.x, we can't determine the hash of the PoX anchor block directly if it's - /// missing. If it is missing, then this method would return Some(BlockHeaderHash("0000...0000")) - pub fn handle_new_burnchain_block(&mut self) -> Result, Error> { + pub fn handle_new_burnchain_block(&mut self) -> Result { let canonical_burnchain_tip = self.burnchain_blocks_db.get_canonical_chain_tip()?; let epochs = SortitionDB::get_stacks_epochs(self.sortition_db.conn())?; let target_epoch_index = @@ -2275,7 +2300,15 @@ impl< .expect("FATAL: StacksEpoch::find_epoch() returned an invalid index"); if target_epoch.epoch_id < StacksEpochId::Epoch30 { // burnchain has not yet advanced to epoch 3.0 - return self.handle_new_epoch2_burnchain_block(&mut HashSet::new()); + return self + .handle_new_epoch2_burnchain_block(&mut HashSet::new()) + .and_then(|block_hash_opt| { + if let Some(block_hash) = block_hash_opt { + Ok(NewBurnchainBlockStatus::WaitForPox2x(block_hash)) + } else { + Ok(NewBurnchainBlockStatus::Ready) + } + }); } // burnchain has advanced to epoch 3.0, but has our sortition DB? @@ -2300,12 +2333,12 @@ impl< // proceed to process sortitions in epoch 3.0 self.handle_new_nakamoto_burnchain_block() - .map(|can_proceed| { + .and_then(|can_proceed| { if can_proceed { - None + Ok(NewBurnchainBlockStatus::Ready) } else { // missing PoX anchor block, but unlike in 2.x, we don't know what it is! - Some(BlockHeaderHash([0x00; 32])) + Ok(NewBurnchainBlockStatus::WaitForPoxNakamoto) } }) } diff --git a/stackslib/src/chainstate/coordinator/tests.rs b/stackslib/src/chainstate/coordinator/tests.rs index bc9259061a..0e3dda311e 100644 --- a/stackslib/src/chainstate/coordinator/tests.rs +++ b/stackslib/src/chainstate/coordinator/tests.rs @@ -6258,7 +6258,10 @@ fn test_pox_processable_block_in_different_pox_forks() { ); loop { - let missing_anchor_opt = coord.handle_new_burnchain_block().unwrap(); + let missing_anchor_opt = coord + .handle_new_burnchain_block() + .unwrap() + .into_missing_block_hash(); if let Some(missing_anchor) = missing_anchor_opt { eprintln!( "Unblinded database reports missing anchor block {:?} (ix={})", diff --git a/stackslib/src/chainstate/nakamoto/coordinator/mod.rs b/stackslib/src/chainstate/nakamoto/coordinator/mod.rs index 53bec549f4..4e38980fa5 100644 --- a/stackslib/src/chainstate/nakamoto/coordinator/mod.rs +++ b/stackslib/src/chainstate/nakamoto/coordinator/mod.rs @@ -128,16 +128,15 @@ fn find_prepare_phase_sortitions( let sn = SortitionDB::get_block_snapshot(sort_db.conn(), sortition_tip)? .ok_or(DBError::NotFoundError)?; - let mut sns = vec![]; let mut height = sn.block_height; - sns.push(sn); + let mut sns = vec![sn]; while burnchain.is_in_prepare_phase(height) && height > 0 { let Some(sn) = SortitionDB::get_block_snapshot( sort_db.conn(), &sns.last() .as_ref() - .expect("FATAL; unreachable: sns is never empty") + .expect("FATAL: unreachable: sns is never empty") .parent_sortition_id, )? else { @@ -355,7 +354,7 @@ pub fn get_nakamoto_next_recipients( }; sort_db .get_next_block_recipients(burnchain, sortition_tip, reward_cycle_info.as_ref()) - .map_err(|e| Error::from(e)) + .map_err(Error::from) } impl< @@ -667,7 +666,7 @@ impl< BurnchainDB::get_burnchain_block(&self.burnchain_blocks_db.conn(), &cursor) .map_err(|e| { warn!( - "ChainsCoordinator: could not retrieve block burnhash={}", + "ChainsCoordinator: could not retrieve block burnhash={}", &cursor ); Error::NonContiguousBurnchainBlock(e) @@ -716,8 +715,8 @@ impl< .collect(); debug!( - "Unprocessed burn chain blocks [{}]", - dbg_burn_header_hashes.join(", ") + "Unprocessed burn chain blocks: {:?}", + &dbg_burn_header_hashes ); // Unlike in Stacks 2.x, there can be neither chain reorgs nor PoX reorgs unless Bitcoin itself diff --git a/stackslib/src/chainstate/nakamoto/miner.rs b/stackslib/src/chainstate/nakamoto/miner.rs index 4d58a2d0de..3de982a16e 100644 --- a/stackslib/src/chainstate/nakamoto/miner.rs +++ b/stackslib/src/chainstate/nakamoto/miner.rs @@ -308,8 +308,10 @@ impl NakamotoBlockBuilder { debug!("Nakamoto miner tenure begin"); let burn_tip = SortitionDB::get_canonical_chain_tip_bhh(burn_dbconn.conn())?; - let burn_tip_height = - SortitionDB::get_canonical_burn_chain_tip(burn_dbconn.conn())?.block_height as u32; + let burn_tip_height = u32::try_from( + SortitionDB::get_canonical_burn_chain_tip(burn_dbconn.conn())?.block_height, + ) + .expect("block height overflow"); let mainnet = chainstate.config().mainnet; diff --git a/stackslib/src/chainstate/nakamoto/mod.rs b/stackslib/src/chainstate/nakamoto/mod.rs index 5eeefc7fd5..0ee3c7732c 100644 --- a/stackslib/src/chainstate/nakamoto/mod.rs +++ b/stackslib/src/chainstate/nakamoto/mod.rs @@ -371,7 +371,7 @@ impl NakamotoBlockHeader { } pub fn is_first_mined(&self) -> bool { - self.block_id() == StacksBlockId::first_mined() + self.parent_block_id == StacksBlockId::first_mined() } /// Sign the block header by the miner @@ -490,6 +490,8 @@ impl NakamotoBlock { return Err(TenureChangeError::PreviousTenureInvalid); } + // TODO: check number of blocks in previous tenure + // TODO: check tenure change cause tc.validate() } else { // placeholder error @@ -613,8 +615,8 @@ impl NakamotoBlock { } // tenure-changes must all come first, and must be in order - for i in 0..tenure_change_positions.len() { - if i != tenure_change_positions[i] { + for (i, pos) in tenure_change_positions.iter().enumerate() { + if &i != pos { // tenure-change is out of place return Some(false); } @@ -818,7 +820,7 @@ impl NakamotoBlock { chain_id: u32, epoch_id: StacksEpochId, ) -> bool { - if self.txs.len() == 0 { + if self.txs.is_empty() { return false; } if !StacksBlock::validate_transactions_unique(&self.txs) { @@ -946,7 +948,10 @@ impl NakamotoChainState { .query_row_and_then(query, NO_PARAMS, |row| { let data: Vec = row.get("data")?; let block = NakamotoBlock::consensus_deserialize(&mut data.as_slice())?; - Ok(Some((block, data.len() as u64))) + Ok(Some(( + block, + u64::try_from(data.len()).expect("FATAL: block is bigger than a u64"), + ))) }) .or_else(|e| { if let ChainstateError::DBError(DBError::SqliteError( @@ -1224,8 +1229,8 @@ impl NakamotoChainState { // key register of the winning miner let leader_key = db_handle .get_leader_key_at( - block_commit.key_block_ptr as u64, - block_commit.key_vtxindex as u32, + u64::from(block_commit.key_block_ptr), + u32::from(block_commit.key_vtxindex), )? .expect("FATAL: have block commit but no leader key"); @@ -1588,7 +1593,7 @@ impl NakamotoChainState { ) } - /// Get the parent header of a Nakamoto block. + /// Get the tenure-start block header of a given consensus hash. /// It might be an epoch 2.x block header pub fn get_block_header_by_consensus_hash( chainstate_conn: &Connection, diff --git a/stackslib/src/chainstate/stacks/boot/mod.rs b/stackslib/src/chainstate/stacks/boot/mod.rs index 8bba21285f..6ade061ddf 100644 --- a/stackslib/src/chainstate/stacks/boot/mod.rs +++ b/stackslib/src/chainstate/stacks/boot/mod.rs @@ -688,7 +688,7 @@ impl StacksChainState { ) -> u128 { // set the lower limit on reward scaling at 25% of liquid_ustx // (i.e., liquid_ustx / POX_MAXIMAL_SCALING) - let scale_by = cmp::max(participation, liquid_ustx / POX_MAXIMAL_SCALING as u128); + let scale_by = cmp::max(participation, liquid_ustx / u128::from(POX_MAXIMAL_SCALING)); let threshold_precise = scale_by / reward_slots; // compute the threshold as nearest 10k > threshold_precise let ceil_amount = match threshold_precise % POX_THRESHOLD_STEPS_USTX { @@ -715,9 +715,10 @@ impl StacksChainState { // set the lower limit on reward scaling at 25% of liquid_ustx // (i.e., liquid_ustx / POX_MAXIMAL_SCALING) - let scale_by = cmp::max(participation, liquid_ustx / POX_MAXIMAL_SCALING as u128); + let scale_by = cmp::max(participation, liquid_ustx / u128::from(POX_MAXIMAL_SCALING)); - let reward_slots = pox_settings.reward_slots() as u128; + let reward_slots = u128::try_from(pox_settings.reward_slots()) + .expect("FATAL: unreachable: more than 2^128 reward slots"); let threshold_precise = scale_by / reward_slots; // compute the threshold as nearest 10k > threshold_precise let ceil_amount = match threshold_precise % POX_THRESHOLD_STEPS_USTX { @@ -738,7 +739,7 @@ impl StacksChainState { block_id: &StacksBlockId, reward_cycle: u64, ) -> Result, Error> { - if !self.is_pox_active(sortdb, block_id, reward_cycle as u128, POX_1_NAME)? { + if !self.is_pox_active(sortdb, block_id, u128::from(reward_cycle), POX_1_NAME)? { debug!( "PoX was voted disabled in block {} (reward cycle {})", block_id, reward_cycle @@ -816,7 +817,7 @@ impl StacksChainState { block_id: &StacksBlockId, reward_cycle: u64, ) -> Result, Error> { - if !self.is_pox_active(sortdb, block_id, reward_cycle as u128, POX_2_NAME)? { + if !self.is_pox_active(sortdb, block_id, u128::from(reward_cycle), POX_2_NAME)? { debug!( "PoX was voted disabled in block {} (reward cycle {})", block_id, reward_cycle @@ -905,7 +906,7 @@ impl StacksChainState { block_id: &StacksBlockId, reward_cycle: u64, ) -> Result, Error> { - if !self.is_pox_active(sortdb, block_id, reward_cycle as u128, POX_3_NAME)? { + if !self.is_pox_active(sortdb, block_id, u128::from(reward_cycle), POX_3_NAME)? { debug!( "PoX was voted disabled in block {} (reward cycle {})", block_id, reward_cycle @@ -996,7 +997,7 @@ impl StacksChainState { block_id: &StacksBlockId, reward_cycle: u64, ) -> Result, Error> { - if !self.is_pox_active(sortdb, block_id, reward_cycle as u128, POX_4_NAME)? { + if !self.is_pox_active(sortdb, block_id, u128::from(reward_cycle), POX_4_NAME)? { debug!( "PoX was voted disabled in block {} (reward cycle {})", block_id, reward_cycle diff --git a/stackslib/src/chainstate/stacks/db/blocks.rs b/stackslib/src/chainstate/stacks/db/blocks.rs index a8e13eb61c..c430e8f644 100644 --- a/stackslib/src/chainstate/stacks/db/blocks.rs +++ b/stackslib/src/chainstate/stacks/db/blocks.rs @@ -591,7 +591,7 @@ impl StacksChainState { } })?; - let mut bound_reader = BoundReader::from_reader(&mut fd, MAX_MESSAGE_LEN as u64); + let mut bound_reader = BoundReader::from_reader(&mut fd, u64::from(MAX_MESSAGE_LEN)); let inst = T::consensus_deserialize(&mut bound_reader).map_err(Error::CodecError)?; Ok(inst) } @@ -829,7 +829,7 @@ impl StacksChainState { debug!("Zero-sized block {}", block_hash); return Ok(None); } - if sz > MAX_MESSAGE_LEN as u64 { + if sz > u64::from(MAX_MESSAGE_LEN) { debug!("Invalid block {}: too big", block_hash); return Ok(None); } @@ -1584,8 +1584,8 @@ impl StacksChainState { block.block_hash(), parent_consensus_hash ); - assert!(commit_burn < i64::MAX as u64); - assert!(sortition_burn < i64::MAX as u64); + assert!(commit_burn < u64::try_from(i64::MAX).expect("unreachable")); + assert!(sortition_burn < u64::try_from(i64::MAX).expect("unreachable")); let block_hash = block.block_hash(); let index_block_hash = @@ -1755,7 +1755,7 @@ impl StacksChainState { burn_supports: &[UserBurnSupportOp], ) -> Result<(), Error> { for burn_support in burn_supports.iter() { - assert!(burn_support.burn_fee < i64::MAX as u64); + assert!(burn_support.burn_fee < u64::try_from(i64::MAX).expect("unreachable")); } for burn_support in burn_supports.iter() { @@ -2061,7 +2061,8 @@ impl StacksChainState { ); Ok(BlocksInvData { - bitlen: block_bits.len() as u16, + bitlen: u16::try_from(block_bits.len()) + .expect("FATAL: unreachable: more than 2^16 block bits"), block_bitvec: block_bitvec, microblocks_bitvec: microblocks_bitvec, }) @@ -2093,7 +2094,10 @@ impl StacksChainState { .query_row(sql, args, |row| { let start_height_i64: i64 = row.get_unwrap(0); let end_height_i64: i64 = row.get_unwrap(1); - return Ok((start_height_i64 as u64, end_height_i64 as u64)); + return Ok(( + u64::try_from(start_height_i64).expect("FATAL: height exceeds i64::MAX"), + u64::try_from(end_height_i64).expect("FATAL: height exceeds i64::MAX"), + )); }) .optional()? .ok_or_else(|| Error::DBError(db_error::NotFoundError)) @@ -2183,7 +2187,8 @@ impl StacksChainState { ); Ok(BlocksInvData { - bitlen: block_bits.len() as u16, + bitlen: u16::try_from(block_bits.len()) + .expect("FATAL: block bits has more than 2^16 members"), block_bitvec: block_bitvec, microblocks_bitvec: microblocks_bitvec, }) @@ -3724,8 +3729,8 @@ impl StacksChainState { // key of the winning leader let leader_key = db_handle .get_leader_key_at( - block_commit.key_block_ptr as u64, - block_commit.key_vtxindex as u32, + u64::from(block_commit.key_block_ptr), + u32::from(block_commit.key_vtxindex), )? .expect("FATAL: have block commit but no leader key"); @@ -4149,7 +4154,7 @@ impl StacksChainState { 125 }; - stx_reward * (MICROSTACKS_PER_STACKS as u128) + stx_reward * (u128::from(MICROSTACKS_PER_STACKS)) } /// Create the block reward. @@ -4303,7 +4308,7 @@ impl StacksChainState { &[&u64_to_sql(min_arrival_time)?, &u64_to_sql(limit)?], ) .map_err(Error::DBError)?; - Ok(cnt as u64) + Ok(u64::try_from(cnt).expect("more than i64::MAX rows")) } /// How many processed staging blocks do we have, up to a limit, at or after the given @@ -4320,7 +4325,7 @@ impl StacksChainState { &[&u64_to_sql(min_arrival_time)?, &u64_to_sql(limit)?], ) .map_err(Error::DBError)?; - Ok(cnt as u64) + Ok(u64::try_from(cnt).expect("more than i64::MAX rows")) } /// Measure how long a block waited in-between when it arrived and when it got processed. @@ -4413,7 +4418,7 @@ impl StacksChainState { &candidate.anchored_block_hash, &candidate.parent_consensus_hash, &candidate.parent_anchored_block_hash, - if candidate.parent_microblock_hash != BlockHeaderHash([0u8; 32]) { (candidate.parent_microblock_seq as u32) + 1 } else { 0 }, + if candidate.parent_microblock_hash != BlockHeaderHash([0u8; 32]) { u32::from(candidate.parent_microblock_seq) + 1 } else { 0 }, &candidate.parent_microblock_hash ); @@ -4545,10 +4550,10 @@ impl StacksChainState { .map_err(|e| (e, microblock.block_hash()))?; tx_receipt.microblock_header = Some(microblock.header.clone()); - tx_receipt.tx_index = tx_index as u32; - fees = fees.checked_add(tx_fee as u128).expect("Fee overflow"); + tx_receipt.tx_index = u32::try_from(tx_index).expect("more than 2^32 items"); + fees = fees.checked_add(u128::from(tx_fee)).expect("Fee overflow"); burns = burns - .checked_add(tx_receipt.stx_burned as u128) + .checked_add(u128::from(tx_receipt.stx_burned)) .expect("Burns overflow"); receipts.push(tx_receipt); } @@ -4901,10 +4906,10 @@ impl StacksChainState { for tx in block_txs.iter() { let (tx_fee, mut tx_receipt) = StacksChainState::process_transaction(clarity_tx, tx, false, ast_rules)?; - fees = fees.checked_add(tx_fee as u128).expect("Fee overflow"); + fees = fees.checked_add(u128::from(tx_fee)).expect("Fee overflow"); tx_receipt.tx_index = tx_index; burns = burns - .checked_add(tx_receipt.stx_burned as u128) + .checked_add(u128::from(tx_receipt.stx_burned)) .expect("Burns overflow"); receipts.push(tx_receipt); tx_index += 1; @@ -5026,7 +5031,11 @@ impl StacksChainState { .to_owned() .expect_principal(); total_minted += amount; - StacksChainState::account_credit(tx_connection, &recipient, amount as u64); + StacksChainState::account_credit( + tx_connection, + &recipient, + u64::try_from(amount).expect("FATAL: transferred more STX than exist"), + ); let event = STXEventType::STXMintEvent(STXMintEventData { recipient, amount }); events.push(StacksTransactionEvent::STXEvent(event)); } @@ -5092,7 +5101,7 @@ impl StacksChainState { ) -> Result<(Vec, Vec, Vec), Error> { // only consider transactions in Stacks 2.1 let search_window: u8 = - if epoch_start_height + (BURNCHAIN_TX_SEARCH_WINDOW as u64) > burn_tip_height { + if epoch_start_height + u64::from(BURNCHAIN_TX_SEARCH_WINDOW) > burn_tip_height { burn_tip_height .saturating_sub(epoch_start_height) .try_into() @@ -5521,7 +5530,7 @@ impl StacksChainState { vec![] }; - let active_pox_contract = pox_constants.active_pox_contract(burn_tip_height as u64); + let active_pox_contract = pox_constants.active_pox_contract(u64::from(burn_tip_height)); // process stacking & transfer operations from burnchain ops tx_receipts.extend(StacksChainState::process_stacking_ops( @@ -5816,7 +5825,7 @@ impl StacksChainState { )? { Some(sn) => ( sn.burn_header_hash, - sn.block_height as u32, + u32::try_from(sn.block_height).expect("FATAL: block height overflow"), sn.burn_header_timestamp, ), None => { @@ -5878,7 +5887,8 @@ impl StacksChainState { match StacksChainState::process_block_transactions( &mut clarity_tx, &block.txs, - microblock_txs_receipts.len() as u32, + u32::try_from(microblock_txs_receipts.len()) + .expect("more than 2^32 tx receipts"), ast_rules, ) { Err(e) => { @@ -5923,7 +5933,7 @@ impl StacksChainState { let mut lockup_events = match StacksChainState::finish_block( &mut clarity_tx, miner_payouts_opt.as_ref(), - block.header.total_work.work as u32, + u32::try_from(block.header.total_work.work).expect("FATAL: more than 2^32 blocks"), block.header.microblock_pubkey_hash, ) { Err(Error::InvalidStacksBlock(e)) => { @@ -5991,7 +6001,7 @@ impl StacksChainState { .accumulated_coinbase_ustx; let coinbase_at_block = StacksChainState::get_coinbase_reward( - chain_tip_burn_header_height as u64, + u64::from(chain_tip_burn_header_height), burn_dbconn.context.first_block_height, ); @@ -6071,7 +6081,9 @@ impl StacksChainState { chainstate_tx.log_transactions_processed(&new_tip.index_block_hash(), &tx_receipts); - set_last_block_transaction_count(block.txs.len() as u64); + set_last_block_transaction_count( + u64::try_from(block.txs.len()).expect("more than 2^64 txs"), + ); set_last_execution_cost_observed(&block_execution_cost, &block_limit); let epoch_receipt = StacksEpochReceipt { @@ -6268,7 +6280,7 @@ impl StacksChainState { )? { Some(sn) => ( sn.burn_header_hash, - sn.block_height as u32, + u32::try_from(sn.block_height).expect("FATAL: more than 2^32 blocks"), sn.burn_header_timestamp, sn.winning_block_txid, ), @@ -6298,7 +6310,8 @@ impl StacksChainState { }; let block = StacksChainState::extract_stacks_block(&next_staging_block)?; - let block_size = next_staging_block.block_data.len() as u64; + let block_size = u64::try_from(next_staging_block.block_data.len()) + .expect("FATAL: more than 2^64 transactions"); // sanity check -- don't process this block again if we already did so if StacksChainState::has_stacks_block( @@ -6913,7 +6926,7 @@ impl StacksChainState { let (block_height, v1_unlock_height, v2_unlock_height, v3_unlock_height) = clarity_connection.with_clarity_db_readonly(|ref mut db| { ( - db.get_current_burnchain_block_height() as u64, + u64::from(db.get_current_burnchain_block_height()), db.get_v1_unlock_height(), db.get_v2_unlock_height(), db.get_v3_unlock_height(), @@ -6922,7 +6935,7 @@ impl StacksChainState { // 5: the paying account must have enough funds if !payer.stx_balance.can_transfer_at_burn_block( - fee as u128, + u128::from(fee), block_height, v1_unlock_height, v2_unlock_height, @@ -6934,7 +6947,7 @@ impl StacksChainState { } _ => { return Err(MemPoolRejection::NotEnoughFunds( - fee as u128, + u128::from(fee), payer.stx_balance.get_available_balance_at_burn_block( block_height, v1_unlock_height, @@ -6958,7 +6971,8 @@ impl StacksChainState { } // does the owner have the funds for the token transfer? - let total_spent = (*amount as u128) + if origin == payer { fee as u128 } else { 0 }; + let total_spent = + u128::from(*amount) + if origin == payer { u128::from(fee) } else { 0 }; if !origin.stx_balance.can_transfer_at_burn_block( total_spent, block_height, @@ -6980,14 +6994,14 @@ impl StacksChainState { // if the payer for the tx is different from owner, check if they can afford fee if origin != payer { if !payer.stx_balance.can_transfer_at_burn_block( - fee as u128, + u128::from(fee), block_height, v1_unlock_height, v2_unlock_height, v3_unlock_height, ) { return Err(MemPoolRejection::NotEnoughFunds( - fee as u128, + u128::from(fee), payer.stx_balance.get_available_balance_at_burn_block( block_height, v1_unlock_height, diff --git a/stackslib/src/chainstate/stacks/db/transactions.rs b/stackslib/src/chainstate/stacks/db/transactions.rs index 7a911c9c86..8dcc94705b 100644 --- a/stackslib/src/chainstate/stacks/db/transactions.rs +++ b/stackslib/src/chainstate/stacks/db/transactions.rs @@ -481,13 +481,13 @@ impl StacksChainState { let consolidated_balance = payer_account .stx_balance .get_available_balance_at_burn_block( - cur_burn_block_height as u64, + u64::from(cur_burn_block_height), v1_unlock_ht, v2_unlock_ht, v3_unlock_ht, ); - if consolidated_balance < fee as u128 { + if consolidated_balance < u128::from(fee) { return Err(Error::InvalidFee); } @@ -571,7 +571,7 @@ impl StacksChainState { .checked_add(amount_burned) .expect("FATAL: sent waaaaay too much STX"); - if !condition_code.check(*amount_sent_condition as u128, amount_sent) { + if !condition_code.check(u128::from(*amount_sent_condition), amount_sent) { info!( "Post-condition check failure on STX owned by {}: {:?} {:?} {}", account_principal, amount_sent_condition, condition_code, amount_sent @@ -617,7 +617,7 @@ impl StacksChainState { let amount_sent = asset_map .get_fungible_tokens(&account_principal, &asset_id) .unwrap_or(0); - if !condition_code.check(*amount_sent_condition as u128, amount_sent) { + if !condition_code.check(u128::from(*amount_sent_condition), amount_sent) { info!("Post-condition check failure on fungible asset {} owned by {}: {} {:?} {}", &asset_id, account_principal, amount_sent_condition, condition_code, amount_sent); return false; } @@ -830,7 +830,9 @@ impl StacksChainState { } Some(height) => { if height - .checked_add(MINER_REWARD_MATURITY as u32) + .checked_add( + u32::try_from(MINER_REWARD_MATURITY).expect("FATAL: maturity > 2^32"), + ) .expect("BUG: too many blocks") < current_height { @@ -852,7 +854,7 @@ impl StacksChainState { .get_microblock_poison_report(mblock_pubk_height) { // account for report loaded - env.add_memory(TypeSignature::PrincipalType.size() as u64) + env.add_memory(u64::from(TypeSignature::PrincipalType.size())) .map_err(|e| Error::from_cost_error(e, cost_before.clone(), &env.global_context))?; // u128 sequence @@ -902,7 +904,7 @@ impl StacksChainState { let tuple_data = TupleData::from_data(vec![ ( ClarityName::try_from("block_height").expect("BUG: valid string representation"), - Value::UInt(mblock_pubk_height as u128), + Value::UInt(u128::from(mblock_pubk_height)), ), ( ClarityName::try_from("microblock_pubkey_hash") @@ -915,7 +917,7 @@ impl StacksChainState { ), ( ClarityName::try_from("sequence").expect("BUG: valid string representation"), - Value::UInt(reported_seq as u128), + Value::UInt(u128::from(reported_seq)), ), ]) .expect("BUG: valid tuple representation"); @@ -958,7 +960,7 @@ impl StacksChainState { .run_stx_transfer( &origin_account.principal, addr, - *amount as u128, + u128::from(*amount), &BuffData { data: Vec::from(memo.0.clone()), }, diff --git a/stackslib/src/chainstate/stacks/miner.rs b/stackslib/src/chainstate/stacks/miner.rs index 8fe4996c40..a3151e171f 100644 --- a/stackslib/src/chainstate/stacks/miner.rs +++ b/stackslib/src/chainstate/stacks/miner.rs @@ -1180,7 +1180,7 @@ impl<'a> StacksMicroblockBuilder<'a> { let mut num_txs = self.runtime.num_mined; let mut num_selected = 0; let mut tx_events = Vec::new(); - let deadline = get_epoch_time_ms() + (self.settings.max_miner_time_ms as u128); + let deadline = get_epoch_time_ms() + u128::from(self.settings.max_miner_time_ms); let mut block_limit_hit = BlockLimitFunction::NO_LIMIT_HIT; mem_pool.reset_nonce_cache()?; @@ -1430,7 +1430,7 @@ impl StacksBlockBuilder { header .consensus_serialize(&mut header_bytes) .expect("FATAL: failed to serialize to vec"); - let bytes_so_far = header_bytes.len() as u64; + let bytes_so_far = u64::try_from(header_bytes.len()).expect("header bytes exceeds 2^64"); StacksBlockBuilder { chain_tip: parent_chain_tip.clone(), @@ -1587,7 +1587,7 @@ impl StacksBlockBuilder { let mut tx_bytes = vec![]; tx.consensus_serialize(&mut tx_bytes) .map_err(Error::CodecError)?; - let tx_len = tx_bytes.len() as u64; + let tx_len = u64::try_from(tx_bytes.len()).expect("tx len exceeds 2^64 bytes"); if self.bytes_so_far + tx_len >= MAX_EPOCH_SIZE.into() { warn!( @@ -1690,7 +1690,7 @@ impl StacksBlockBuilder { StacksChainState::finish_block( clarity_tx, self.miner_payouts.as_ref(), - self.header.total_work.work as u32, + u32::try_from(self.header.total_work.work).expect("FATAL: more than 2^32 blocks"), self.header.microblock_pubkey_hash, ) .expect("FATAL: call to `finish_block` failed"); @@ -1835,8 +1835,10 @@ impl StacksBlockBuilder { ); let burn_tip = SortitionDB::get_canonical_chain_tip_bhh(burn_dbconn.conn())?; - let burn_tip_height = - SortitionDB::get_canonical_burn_chain_tip(burn_dbconn.conn())?.block_height as u32; + let burn_tip_height = u32::try_from( + SortitionDB::get_canonical_burn_chain_tip(burn_dbconn.conn())?.block_height, + ) + .expect("FATAL: more than 2^32 sortitions"); let parent_microblocks = if StacksChainState::block_crosses_epoch_boundary( chainstate.db(), @@ -1938,7 +1940,8 @@ impl StacksBlockBuilder { Some(self.miner_id), )?; self.miner_payouts = matured_miner_rewards_opt; - self.total_confirmed_streamed_fees += microblock_fees as u64; + self.total_confirmed_streamed_fees += + u64::try_from(microblock_fees).expect("more than 2^64 microstx microblock fees"); Ok((clarity_tx, microblock_execution_cost)) } @@ -2073,8 +2076,8 @@ impl StacksBlockBuilder { 0, &FIRST_BURNCHAIN_CONSENSUS_HASH, &first_block_hash, - first_block_height as u32, - first_block_ts as u64, + u32::try_from(first_block_height).expect("FATAL: first block is over 2^32"), + u64::try_from(first_block_ts).expect("FATAL: first block timestamp is over 2^64"), &proof, pubkey_hash, ) @@ -2114,8 +2117,10 @@ impl StacksBlockBuilder { 0, &FIRST_BURNCHAIN_CONSENSUS_HASH, &first_block_hash, - BITCOIN_REGTEST_FIRST_BLOCK_HEIGHT as u32, - BITCOIN_REGTEST_FIRST_BLOCK_TIMESTAMP as u64, + u32::try_from(BITCOIN_REGTEST_FIRST_BLOCK_HEIGHT) + .expect("first regtest bitcoin block is over 2^32"), + u64::try_from(BITCOIN_REGTEST_FIRST_BLOCK_TIMESTAMP) + .expect("first regtest bitcoin block timestamp is over 2^64"), &proof, pubkey_hash, ) @@ -2184,7 +2189,7 @@ impl StacksBlockBuilder { let mut invalidated_txs = vec![]; let mut to_drop_and_blacklist = vec![]; - let deadline = ts_start + (max_miner_time_ms as u128); + let deadline = ts_start + u128::from(max_miner_time_ms); let mut num_txs = 0; let mut blocked = false; @@ -2487,7 +2492,9 @@ impl StacksBlockBuilder { ); } - set_last_mined_block_transaction_count(block.txs.len() as u64); + set_last_mined_block_transaction_count( + u64::try_from(block.txs.len()).expect("more than 2^64 txs"), + ); set_last_mined_execution_cost_observed(&consumed, &block_limit); info!( diff --git a/stackslib/src/chainstate/stacks/transaction.rs b/stackslib/src/chainstate/stacks/transaction.rs index e0d78f7cbd..637d1366f4 100644 --- a/stackslib/src/chainstate/stacks/transaction.rs +++ b/stackslib/src/chainstate/stacks/transaction.rs @@ -52,7 +52,7 @@ impl StacksMessageCodec for TransactionContractCall { let contract_name: ContractName = read_next(fd)?; let function_name: ClarityName = read_next(fd)?; let function_args: Vec = { - let mut bound_read = BoundReader::from_reader(fd, MAX_TRANSACTION_LEN as u64); + let mut bound_read = BoundReader::from_reader(fd, u64::from(MAX_TRANSACTION_LEN)); read_next(&mut bound_read) }?; @@ -585,7 +585,7 @@ impl StacksTransaction { let mut tx_bytes = vec![]; self.consensus_serialize(&mut tx_bytes) .expect("BUG: Failed to serialize a transaction object"); - tx_bytes.len() as u64 + u64::try_from(tx_bytes.len()).expect("tx len exceeds 2^64 bytes") } pub fn consensus_deserialize_with_len( diff --git a/stackslib/src/clarity_vm/clarity.rs b/stackslib/src/clarity_vm/clarity.rs index 97e1332010..5b7a1ca7d1 100644 --- a/stackslib/src/clarity_vm/clarity.rs +++ b/stackslib/src/clarity_vm/clarity.rs @@ -907,9 +907,9 @@ impl<'a, 'b> ClarityBlockConnection<'a, 'b> { let v1_unlock_height = self.burn_state_db.get_v1_unlock_height(); let pox_2_first_cycle = PoxConstants::static_block_height_to_reward_cycle( - v1_unlock_height as u64, - first_block_height as u64, - pox_reward_cycle_length as u64, + u64::from(v1_unlock_height), + u64::from(first_block_height), + u64::from(pox_reward_cycle_length), ) .expect("PANIC: PoX-2 first reward cycle begins *before* first burn block height"); @@ -994,11 +994,11 @@ impl<'a, 'b> ClarityBlockConnection<'a, 'b> { // set burnchain params let consts_setter = PrincipalData::from(pox_2_contract_id.clone()); let params = vec![ - Value::UInt(first_block_height as u128), - Value::UInt(pox_prepare_length as u128), - Value::UInt(pox_reward_cycle_length as u128), - Value::UInt(pox_rejection_fraction as u128), - Value::UInt(pox_2_first_cycle as u128), + Value::UInt(u128::from(first_block_height)), + Value::UInt(u128::from(pox_prepare_length)), + Value::UInt(u128::from(pox_reward_cycle_length)), + Value::UInt(u128::from(pox_rejection_fraction)), + Value::UInt(u128::from(pox_2_first_cycle)), ]; let (_, _, _burnchain_params_events) = tx_conn @@ -1171,9 +1171,9 @@ impl<'a, 'b> ClarityBlockConnection<'a, 'b> { let pox_3_activation_height = self.burn_state_db.get_pox_3_activation_height(); let pox_3_first_cycle = PoxConstants::static_block_height_to_reward_cycle( - pox_3_activation_height as u64, - first_block_height as u64, - pox_reward_cycle_length as u64, + u64::from(pox_3_activation_height), + u64::from(first_block_height), + u64::from(pox_reward_cycle_length), ) .expect("PANIC: PoX-3 first reward cycle begins *before* first burn block height") + 1; @@ -1243,11 +1243,11 @@ impl<'a, 'b> ClarityBlockConnection<'a, 'b> { // set burnchain params let consts_setter = PrincipalData::from(pox_3_contract_id.clone()); let params = vec![ - Value::UInt(first_block_height as u128), - Value::UInt(pox_prepare_length as u128), - Value::UInt(pox_reward_cycle_length as u128), - Value::UInt(pox_rejection_fraction as u128), - Value::UInt(pox_3_first_cycle as u128), + Value::UInt(u128::from(first_block_height)), + Value::UInt(u128::from(pox_prepare_length)), + Value::UInt(u128::from(pox_reward_cycle_length)), + Value::UInt(u128::from(pox_rejection_fraction)), + Value::UInt(u128::from(pox_3_first_cycle)), ]; let (_, _, _burnchain_params_events) = tx_conn @@ -1309,9 +1309,9 @@ impl<'a, 'b> ClarityBlockConnection<'a, 'b> { let pox_4_activation_height = self.burn_state_db.get_pox_4_activation_height(); let pox_4_first_cycle = PoxConstants::static_block_height_to_reward_cycle( - pox_4_activation_height as u64, - first_block_height as u64, - pox_reward_cycle_length as u64, + u64::from(pox_4_activation_height), + u64::from(first_block_height), + u64::from(pox_reward_cycle_length), ) .expect("PANIC: PoX-4 first reward cycle begins *before* first burn block height") + 1; @@ -1381,11 +1381,11 @@ impl<'a, 'b> ClarityBlockConnection<'a, 'b> { // set burnchain params let consts_setter = PrincipalData::from(pox_4_contract_id.clone()); let params = vec![ - Value::UInt(first_block_height as u128), - Value::UInt(pox_prepare_length as u128), - Value::UInt(pox_reward_cycle_length as u128), - Value::UInt(pox_rejection_fraction as u128), - Value::UInt(pox_4_first_cycle as u128), + Value::UInt(u128::from(first_block_height)), + Value::UInt(u128::from(pox_prepare_length)), + Value::UInt(u128::from(pox_reward_cycle_length)), + Value::UInt(u128::from(pox_rejection_fraction)), + Value::UInt(u128::from(pox_4_first_cycle)), ]; let (_, _, _burnchain_params_events) = tx_conn @@ -2606,15 +2606,15 @@ mod tests { self.get_stacks_epoch(0) } - fn get_v2_unlock_height(&self) -> u32 { + fn get_v1_unlock_height(&self) -> u32 { u32::MAX } - fn get_v3_unlock_height(&self) -> u32 { + fn get_v2_unlock_height(&self) -> u32 { u32::MAX } - fn get_v1_unlock_height(&self) -> u32 { + fn get_v3_unlock_height(&self) -> u32 { u32::MAX } diff --git a/stackslib/src/core/mod.rs b/stackslib/src/core/mod.rs index 91f8bdffbc..b658e84785 100644 --- a/stackslib/src/core/mod.rs +++ b/stackslib/src/core/mod.rs @@ -487,7 +487,7 @@ pub static STACKS_EPOCH_2_4_MARKER: u8 = 0x09; /// *or greater*. pub static STACKS_EPOCH_2_5_MARKER: u8 = 0x0a; -/// Stacks 3.0 epoch marker. All block-commits in 2.4 must have a memo bitfield with this value +/// Stacks 3.0 epoch marker. All block-commits in 3.0 must have a memo bitfield with this value /// *or greater*. pub static STACKS_EPOCH_3_0_MARKER: u8 = 0x0b; diff --git a/stackslib/src/net/mod.rs b/stackslib/src/net/mod.rs index 07ef723f8c..41def42dda 100644 --- a/stackslib/src/net/mod.rs +++ b/stackslib/src/net/mod.rs @@ -3597,7 +3597,10 @@ pub mod test { }; let missing_pox_anchor_block_hash_opt = if epoch_id < StacksEpochId::Epoch30 { - self.coord.handle_new_burnchain_block().unwrap() + self.coord + .handle_new_burnchain_block() + .unwrap() + .into_missing_block_hash() } else { if self.coord.handle_new_nakamoto_burnchain_block().unwrap() { None diff --git a/stackslib/src/net/rpc.rs b/stackslib/src/net/rpc.rs index c8ca454fd4..48c815e760 100644 --- a/stackslib/src/net/rpc.rs +++ b/stackslib/src/net/rpc.rs @@ -319,27 +319,31 @@ impl RPCPoxInfoData { // Note: should always be 0 unless somehow configured to start later let pox_1_first_cycle = burnchain - .block_height_to_reward_cycle(burnchain.first_block_height as u64) + .block_height_to_reward_cycle(u64::from(burnchain.first_block_height)) .ok_or(net_error::ChainstateError( "PoX-1 first reward cycle begins before first burn block height".to_string(), ))?; let pox_2_first_cycle = burnchain - .block_height_to_reward_cycle(burnchain.pox_constants.v1_unlock_height as u64) + .block_height_to_reward_cycle(u64::from(burnchain.pox_constants.v1_unlock_height)) .ok_or(net_error::ChainstateError( "PoX-2 first reward cycle begins before first burn block height".to_string(), ))? + 1; let pox_3_first_cycle = burnchain - .block_height_to_reward_cycle(burnchain.pox_constants.pox_3_activation_height as u64) + .block_height_to_reward_cycle(u64::from( + burnchain.pox_constants.pox_3_activation_height, + )) .ok_or(net_error::ChainstateError( "PoX-3 first reward cycle begins before first burn block height".to_string(), ))? + 1; let pox_4_first_cycle = burnchain - .block_height_to_reward_cycle(burnchain.pox_constants.pox_4_activation_height as u64) + .block_height_to_reward_cycle(u64::from( + burnchain.pox_constants.pox_4_activation_height, + )) .ok_or(net_error::ChainstateError( "PoX-4 first reward cycle begins before first burn block height".to_string(), ))? diff --git a/testnet/stacks-node/src/event_dispatcher.rs b/testnet/stacks-node/src/event_dispatcher.rs index 62668a0129..ccedd48cff 100644 --- a/testnet/stacks-node/src/event_dispatcher.rs +++ b/testnet/stacks-node/src/event_dispatcher.rs @@ -595,8 +595,11 @@ impl EventDispatcher { .iter() .enumerate() .filter(|(obs_id, _observer)| { - self.burn_block_observers_lookup.contains(&(*obs_id as u16)) - || self.any_event_observers_lookup.contains(&(*obs_id as u16)) + self.burn_block_observers_lookup + .contains(&(u16::try_from(*obs_id).expect("FATAL: more than 2^16 observers"))) + || self.any_event_observers_lookup.contains( + &(u16::try_from(*obs_id).expect("FATAL: more than 2^16 observers")), + ) }) .collect(); if interested_observers.len() < 1 { @@ -801,8 +804,11 @@ impl EventDispatcher { .iter() .enumerate() .filter(|(obs_id, _observer)| { - self.microblock_observers_lookup.contains(&(*obs_id as u16)) - || self.any_event_observers_lookup.contains(&(*obs_id as u16)) + self.microblock_observers_lookup + .contains(&(u16::try_from(*obs_id).expect("FATAL: more than 2^16 observers"))) + || self.any_event_observers_lookup.contains( + &(u16::try_from(*obs_id).expect("FATAL: more than 2^16 observers")), + ) }) .collect(); if interested_observers.len() < 1 { @@ -854,8 +860,11 @@ impl EventDispatcher { .iter() .enumerate() .filter(|(obs_id, _observer)| { - self.mempool_observers_lookup.contains(&(*obs_id as u16)) - || self.any_event_observers_lookup.contains(&(*obs_id as u16)) + self.mempool_observers_lookup + .contains(&(u16::try_from(*obs_id).expect("FATAL: more than 2^16 observers"))) + || self.any_event_observers_lookup.contains( + &(u16::try_from(*obs_id).expect("FATAL: more than 2^16 observers")), + ) }) .collect(); if interested_observers.len() < 1 { @@ -882,7 +891,10 @@ impl EventDispatcher { .registered_observers .iter() .enumerate() - .filter(|(obs_id, _observer)| self.miner_observers_lookup.contains(&(*obs_id as u16))) + .filter(|(obs_id, _observer)| { + self.miner_observers_lookup + .contains(&(u16::try_from(*obs_id).expect("FATAL: more than 2^16 observers"))) + }) .collect(); if interested_observers.len() < 1 { return; @@ -917,7 +929,7 @@ impl EventDispatcher { .enumerate() .filter(|(obs_id, _observer)| { self.mined_microblocks_observers_lookup - .contains(&(*obs_id as u16)) + .contains(&(u16::try_from(*obs_id).expect("FATAL: more than 2^16 observers"))) }) .collect(); if interested_observers.len() < 1 { @@ -950,7 +962,10 @@ impl EventDispatcher { .registered_observers .iter() .enumerate() - .filter(|(obs_id, _observer)| self.miner_observers_lookup.contains(&(*obs_id as u16))) + .filter(|(obs_id, _observer)| { + self.miner_observers_lookup + .contains(&(u16::try_from(*obs_id).expect("FATAL: more than 2^16 observers"))) + }) .collect(); if interested_observers.len() < 1 { return; @@ -984,7 +999,8 @@ impl EventDispatcher { .iter() .enumerate() .filter(|(obs_id, _observer)| { - self.stackerdb_observers_lookup.contains(&(*obs_id as u16)) + self.stackerdb_observers_lookup + .contains(&(u16::try_from(*obs_id).expect("FATAL: more than 2^16 observers"))) }) .collect(); if interested_observers.len() < 1 { @@ -1009,8 +1025,11 @@ impl EventDispatcher { .iter() .enumerate() .filter(|(obs_id, _observer)| { - self.mempool_observers_lookup.contains(&(*obs_id as u16)) - || self.any_event_observers_lookup.contains(&(*obs_id as u16)) + self.mempool_observers_lookup + .contains(&(u16::try_from(*obs_id).expect("FATAL: more than 2^16 observers"))) + || self.any_event_observers_lookup.contains( + &(u16::try_from(*obs_id).expect("FATAL: more than 2^16 observers")), + ) }) .collect(); if interested_observers.len() < 1 { diff --git a/testnet/stacks-node/src/neon_node.rs b/testnet/stacks-node/src/neon_node.rs index 88748e7e25..40df0ed45c 100644 --- a/testnet/stacks-node/src/neon_node.rs +++ b/testnet/stacks-node/src/neon_node.rs @@ -517,8 +517,8 @@ impl Globals { LeaderKeyRegistrationState::Active(RegisteredKey { target_block_height, vrf_public_key: op.public_key, - block_height: op.block_height as u64, - op_vtxindex: op.vtxindex as u32, + block_height: u64::from(op.block_height), + op_vtxindex: u32::from(op.vtxindex), }); activated = true; } else { @@ -1436,7 +1436,7 @@ impl BlockMinerThread { if last_mined_blocks.len() == 1 { debug!("Have only attempted one block; unconditionally trying again"); } - last_mined_blocks.len() as u64 + 1 + u64::try_from(last_mined_blocks.len()).expect("FATAL: more than 2^64 mined blocks") + 1 } else { let mut best_attempt = 0; debug!( @@ -1801,11 +1801,11 @@ impl BlockMinerThread { // longer if let Some(highest_unprocessed_block_sn) = highest_unprocessed_block_sn_opt { if stacks_tip.anchored_header.height() - + (burnchain.pox_constants.prepare_length as u64) + + u64::from(burnchain.pox_constants.prepare_length) - 1 >= highest_unprocessed.height && highest_unprocessed_block_sn.block_height - + (burnchain.pox_constants.prepare_length as u64) + + u64::from(burnchain.pox_constants.prepare_length) - 1 >= sort_tip.block_height {