diff --git a/.github/workflows/bitcoin-tests.yml b/.github/workflows/bitcoin-tests.yml index ac1dd45a26..5c2a341421 100644 --- a/.github/workflows/bitcoin-tests.yml +++ b/.github/workflows/bitcoin-tests.yml @@ -142,6 +142,7 @@ jobs: - tests::epoch_24::verify_auto_unlock_behavior - tests::stackerdb::test_stackerdb_load_store - tests::stackerdb::test_stackerdb_event_observer + - tests::signer::test_stackerdb_dkg steps: - name: Checkout the latest code id: git_checkout diff --git a/clarity/src/vm/analysis/mod.rs b/clarity/src/vm/analysis/mod.rs index 19e38af647..901cdfce69 100644 --- a/clarity/src/vm/analysis/mod.rs +++ b/clarity/src/vm/analysis/mod.rs @@ -141,6 +141,7 @@ pub fn run_analysis( | StacksEpochId::Epoch22 | StacksEpochId::Epoch23 | StacksEpochId::Epoch24 + | StacksEpochId::Epoch25 | StacksEpochId::Epoch30 => { TypeChecker2_1::run_pass(&epoch, &mut contract_analysis, db) } diff --git a/clarity/src/vm/analysis/type_checker/mod.rs b/clarity/src/vm/analysis/type_checker/mod.rs index dd2f145333..b3b8d6fb45 100644 --- a/clarity/src/vm/analysis/type_checker/mod.rs +++ b/clarity/src/vm/analysis/type_checker/mod.rs @@ -53,6 +53,7 @@ impl FunctionType { | StacksEpochId::Epoch22 | StacksEpochId::Epoch23 | StacksEpochId::Epoch24 + | StacksEpochId::Epoch25 | StacksEpochId::Epoch30 => self.check_args_2_1(accounting, args, clarity_version), StacksEpochId::Epoch10 => unreachable!("Epoch10 is not supported"), } @@ -73,6 +74,7 @@ impl FunctionType { | StacksEpochId::Epoch22 | StacksEpochId::Epoch23 | StacksEpochId::Epoch24 + | StacksEpochId::Epoch25 | StacksEpochId::Epoch30 => { self.check_args_by_allowing_trait_cast_2_1(db, clarity_version, func_args) } diff --git a/clarity/src/vm/costs/mod.rs b/clarity/src/vm/costs/mod.rs index 31b22f956f..aa0c6710e9 100644 --- a/clarity/src/vm/costs/mod.rs +++ b/clarity/src/vm/costs/mod.rs @@ -724,6 +724,7 @@ impl LimitedCostTracker { | StacksEpochId::Epoch22 | StacksEpochId::Epoch23 | StacksEpochId::Epoch24 + | StacksEpochId::Epoch25 | StacksEpochId::Epoch30 => COSTS_3_NAME.to_string(), } } diff --git a/clarity/src/vm/database/clarity_db.rs b/clarity/src/vm/database/clarity_db.rs index 96013cde27..116b3ba328 100644 --- a/clarity/src/vm/database/clarity_db.rs +++ b/clarity/src/vm/database/clarity_db.rs @@ -112,7 +112,9 @@ pub trait HeadersDB { pub trait BurnStateDB { fn get_v1_unlock_height(&self) -> u32; fn get_v2_unlock_height(&self) -> u32; + fn get_v3_unlock_height(&self) -> u32; fn get_pox_3_activation_height(&self) -> u32; + fn get_pox_4_activation_height(&self) -> u32; /// Returns the *burnchain block height* for the `sortition_id` is associated with. fn get_burn_block_height(&self, sortition_id: &SortitionId) -> Option; @@ -201,10 +203,18 @@ impl BurnStateDB for &dyn BurnStateDB { (*self).get_v2_unlock_height() } + fn get_v3_unlock_height(&self) -> u32 { + (*self).get_v3_unlock_height() + } + fn get_pox_3_activation_height(&self) -> u32 { (*self).get_pox_3_activation_height() } + fn get_pox_4_activation_height(&self) -> u32 { + (*self).get_pox_4_activation_height() + } + fn get_burn_block_height(&self, sortition_id: &SortitionId) -> Option { (*self).get_burn_block_height(sortition_id) } @@ -379,10 +389,18 @@ impl BurnStateDB for NullBurnStateDB { u32::MAX } + fn get_v3_unlock_height(&self) -> u32 { + u32::MAX + } + fn get_pox_3_activation_height(&self) -> u32 { u32::MAX } + fn get_pox_4_activation_height(&self) -> u32 { + u32::MAX + } + fn get_pox_prepare_length(&self) -> u32 { panic!("NullBurnStateDB should not return PoX info"); } @@ -820,6 +838,11 @@ impl<'a> ClarityDatabase<'a> { self.burn_state_db.get_pox_3_activation_height() } + /// Return the height for PoX 4 activation from the burn state db + pub fn get_pox_4_activation_height(&self) -> u32 { + self.burn_state_db.get_pox_4_activation_height() + } + /// Return the height for PoX v2 -> v3 auto unlocks /// from the burn state db pub fn get_v2_unlock_height(&mut self) -> u32 { @@ -830,6 +853,16 @@ impl<'a> ClarityDatabase<'a> { } } + /// Return the height for PoX v3 -> v4 auto unlocks + /// from the burn state db + pub fn get_v3_unlock_height(&mut self) -> u32 { + if self.get_clarity_epoch_version() >= StacksEpochId::Epoch24 { + self.burn_state_db.get_v3_unlock_height() + } else { + u32::MAX + } + } + /// Get the last-known burnchain block height. /// Note that this is _not_ the burnchain height in which this block was mined! /// This is the burnchain block height of the parent of the Stacks block at the current Stacks @@ -1906,8 +1939,8 @@ impl<'a> ClarityDatabase<'a> { stx_balance.amount_locked(), stx_balance.unlock_height(), cur_burn_height, - stx_balance.get_available_balance_at_burn_block(cur_burn_height, self.get_v1_unlock_height(), self.get_v2_unlock_height()), - stx_balance.has_unlockable_tokens_at_burn_block(cur_burn_height, self.get_v1_unlock_height(), self.get_v2_unlock_height())); + stx_balance.get_available_balance_at_burn_block(cur_burn_height, self.get_v1_unlock_height(), self.get_v2_unlock_height(), self.get_v3_unlock_height()), + stx_balance.has_unlockable_tokens_at_burn_block(cur_burn_height, self.get_v1_unlock_height(), self.get_v2_unlock_height(), self.get_v3_unlock_height())); STXBalanceSnapshot::new(principal, stx_balance, cur_burn_height, self) } @@ -1925,8 +1958,8 @@ impl<'a> ClarityDatabase<'a> { stx_balance.amount_locked(), stx_balance.unlock_height(), cur_burn_height, - stx_balance.get_available_balance_at_burn_block(cur_burn_height, self.get_v1_unlock_height(), self.get_v2_unlock_height()), - stx_balance.has_unlockable_tokens_at_burn_block(cur_burn_height, self.get_v1_unlock_height(), self.get_v2_unlock_height())); + stx_balance.get_available_balance_at_burn_block(cur_burn_height, self.get_v1_unlock_height(), self.get_v2_unlock_height(), self.get_v3_unlock_height()), + stx_balance.has_unlockable_tokens_at_burn_block(cur_burn_height, self.get_v1_unlock_height(), self.get_v2_unlock_height(), self.get_v3_unlock_height())); STXBalanceSnapshot::new(principal, stx_balance, cur_burn_height, self) } diff --git a/clarity/src/vm/database/structures.rs b/clarity/src/vm/database/structures.rs index 89a635765e..e1454d8a1e 100644 --- a/clarity/src/vm/database/structures.rs +++ b/clarity/src/vm/database/structures.rs @@ -146,6 +146,11 @@ pub enum STXBalance { amount_locked: u128, unlock_height: u64, }, + LockedPoxFour { + amount_unlocked: u128, + amount_locked: u128, + unlock_height: u64, + }, } /// Lifetime-limited handle to an uncommitted balance structure. @@ -225,6 +230,24 @@ impl ClaritySerializable for STXBalance { .write_all(&unlock_height.to_be_bytes()) .expect("STXBalance serialization: failed writing unlock_height."); } + STXBalance::LockedPoxFour { + amount_unlocked, + amount_locked, + unlock_height, + } => { + buffer + .write_all(&[STXBalance::pox_4_version]) + .expect("STXBalance serialization: failed to write PoX version byte"); + buffer + .write_all(&amount_unlocked.to_be_bytes()) + .expect("STXBalance serialization: failed writing amount_unlocked."); + buffer + .write_all(&amount_locked.to_be_bytes()) + .expect("STXBalance serialization: failed writing amount_locked."); + buffer + .write_all(&unlock_height.to_be_bytes()) + .expect("STXBalance serialization: failed writing unlock_height."); + } } to_hex(buffer.as_slice()) } @@ -301,6 +324,12 @@ impl ClarityDeserializable for STXBalance { amount_locked, unlock_height, } + } else if version == &STXBalance::pox_4_version { + STXBalance::LockedPoxFour { + amount_unlocked, + amount_locked, + unlock_height, + } } else { unreachable!("Version is checked for pox_3 or pox_2 version compliance above"); } @@ -358,38 +387,50 @@ impl<'db, 'conn> STXBalanceSnapshot<'db, 'conn> { pub fn get_available_balance(&mut self) -> u128 { let v1_unlock_height = self.db_ref.get_v1_unlock_height(); let v2_unlock_height = self.db_ref.get_v2_unlock_height(); + let v3_unlock_height = self.db_ref.get_v3_unlock_height(); self.balance.get_available_balance_at_burn_block( self.burn_block_height, v1_unlock_height, v2_unlock_height, + v3_unlock_height, ) } pub fn canonical_balance_repr(&mut self) -> STXBalance { let v1_unlock_height = self.db_ref.get_v1_unlock_height(); let v2_unlock_height = self.db_ref.get_v2_unlock_height(); + let v3_unlock_height = self.db_ref.get_v3_unlock_height(); self.balance - .canonical_repr_at_block(self.burn_block_height, v1_unlock_height, v2_unlock_height) + .canonical_repr_at_block( + self.burn_block_height, + v1_unlock_height, + v2_unlock_height, + v3_unlock_height, + ) .0 } pub fn has_locked_tokens(&mut self) -> bool { let v1_unlock_height = self.db_ref.get_v1_unlock_height(); let v2_unlock_height = self.db_ref.get_v2_unlock_height(); + let v3_unlock_height = self.db_ref.get_v3_unlock_height(); self.balance.has_locked_tokens_at_burn_block( self.burn_block_height, v1_unlock_height, v2_unlock_height, + v3_unlock_height, ) } pub fn has_unlockable_tokens(&mut self) -> bool { let v1_unlock_height = self.db_ref.get_v1_unlock_height(); let v2_unlock_height = self.db_ref.get_v2_unlock_height(); + let v3_unlock_height = self.db_ref.get_v3_unlock_height(); self.balance.has_unlockable_tokens_at_burn_block( self.burn_block_height, v1_unlock_height, v2_unlock_height, + v3_unlock_height, ) } @@ -684,6 +725,120 @@ impl<'db, 'conn> STXBalanceSnapshot<'db, 'conn> { ) } + //////////////// Pox-4 ////////////////// + + /// Lock `amount_to_lock` tokens on this account until `unlock_burn_height`. + /// After calling, this method will set the balance to a "LockedPoxFour" balance, + /// because this method is only invoked as a result of PoX4 interactions + pub fn lock_tokens_v4(&mut self, amount_to_lock: u128, unlock_burn_height: u64) { + let unlocked = self.unlock_available_tokens_if_any(); + if unlocked > 0 { + debug!("Consolidated after account-token-lock"); + } + + // caller needs to have checked this + assert!(amount_to_lock > 0, "BUG: cannot lock 0 tokens"); + + if unlock_burn_height <= self.burn_block_height { + // caller needs to have checked this + panic!("FATAL: cannot set a lock with expired unlock burn height"); + } + + if self.has_locked_tokens() { + // caller needs to have checked this + panic!("FATAL: account already has locked tokens"); + } + + // from `unlock_available_tokens_if_any` call above, `self.balance` should + // be canonicalized already + + let new_amount_unlocked = self + .balance + .get_total_balance() + .checked_sub(amount_to_lock) + .expect("FATAL: account locks more STX than balance possessed"); + + self.balance = STXBalance::LockedPoxFour { + amount_unlocked: new_amount_unlocked, + amount_locked: amount_to_lock, + unlock_height: unlock_burn_height, + }; + } + + /// Extend this account's current lock to `unlock_burn_height`. + /// After calling, this method will set the balance to a "LockedPoxFour" balance, + /// because this method is only invoked as a result of PoX3 interactions + pub fn extend_lock_v4(&mut self, unlock_burn_height: u64) { + let unlocked = self.unlock_available_tokens_if_any(); + if unlocked > 0 { + debug!("Consolidated after extend-token-lock"); + } + + if !self.has_locked_tokens() { + // caller needs to have checked this + panic!("FATAL: account does not have locked tokens"); + } + + if unlock_burn_height <= self.burn_block_height { + // caller needs to have checked this + panic!("FATAL: cannot set a lock with expired unlock burn height"); + } + + self.balance = STXBalance::LockedPoxFour { + amount_unlocked: self.balance.amount_unlocked(), + amount_locked: self.balance.amount_locked(), + unlock_height: unlock_burn_height, + }; + } + + /// Increase the account's current lock to `new_total_locked`. + /// Panics if `self` was not locked by V3 PoX. + pub fn increase_lock_v4(&mut self, new_total_locked: u128) { + let unlocked = self.unlock_available_tokens_if_any(); + if unlocked > 0 { + debug!("Consolidated after extend-token-lock"); + } + + if !self.has_locked_tokens() { + // caller needs to have checked this + panic!("FATAL: account does not have locked tokens"); + } + + if !self.is_v4_locked() { + // caller needs to have checked this + panic!("FATAL: account must be locked by pox-3"); + } + + assert!( + self.balance.amount_locked() <= new_total_locked, + "FATAL: account must lock more after `increase_lock_v3`" + ); + + let total_amount = self + .balance + .amount_unlocked() + .checked_add(self.balance.amount_locked()) + .expect("STX balance overflowed u128"); + let amount_unlocked = total_amount + .checked_sub(new_total_locked) + .expect("STX underflow: more is locked than total balance"); + + self.balance = STXBalance::LockedPoxFour { + amount_unlocked, + amount_locked: new_total_locked, + unlock_height: self.balance.unlock_height(), + }; + } + + /// Return true iff `self` represents a snapshot that has a lock + /// created by PoX v3. + pub fn is_v4_locked(&mut self) -> bool { + matches!( + self.canonical_balance_repr(), + STXBalance::LockedPoxFour { .. } + ) + } + /////////////// GENERAL ////////////////////// /// If this snapshot is locked, then alter the lock height to be @@ -718,6 +873,15 @@ impl<'db, 'conn> STXBalanceSnapshot<'db, 'conn> { amount_locked, unlock_height: new_unlock_height, }, + STXBalance::LockedPoxFour { + amount_unlocked, + amount_locked, + .. + } => STXBalance::LockedPoxFour { + amount_unlocked, + amount_locked, + unlock_height: new_unlock_height, + }, }; } @@ -728,6 +892,7 @@ impl<'db, 'conn> STXBalanceSnapshot<'db, 'conn> { self.burn_block_height, self.db_ref.get_v1_unlock_height(), self.db_ref.get_v2_unlock_height(), + self.db_ref.get_v3_unlock_height(), ); self.balance = new_balance; unlocked @@ -740,6 +905,7 @@ impl STXBalance { pub const v2_and_v3_size: usize = 41; pub const pox_2_version: u8 = 0; pub const pox_3_version: u8 = 1; + pub const pox_4_version: u8 = 2; pub fn zero() -> STXBalance { STXBalance::Unlocked { amount: 0 } @@ -756,7 +922,8 @@ impl STXBalance { STXBalance::Unlocked { .. } => 0, STXBalance::LockedPoxOne { unlock_height, .. } | STXBalance::LockedPoxTwo { unlock_height, .. } - | STXBalance::LockedPoxThree { unlock_height, .. } => *unlock_height, + | STXBalance::LockedPoxThree { unlock_height, .. } + | STXBalance::LockedPoxFour { unlock_height, .. } => *unlock_height, } } @@ -764,24 +931,36 @@ impl STXBalance { /// *while* factoring in the PoX 2 early unlock for PoX 1 and PoX 3 early unlock for PoX 2. /// This value is still lazy: this unlock height may be less than the current /// burn block height, if so it will be updated in a canonicalized view. - pub fn effective_unlock_height(&self, v1_unlock_height: u32, v2_unlock_height: u32) -> u64 { + pub fn effective_unlock_height( + &self, + v1_unlock_height: u32, + v2_unlock_height: u32, + v3_unlock_height: u32, + ) -> u64 { match self { STXBalance::Unlocked { .. } => 0, STXBalance::LockedPoxOne { unlock_height, .. } => { - if *unlock_height >= (v1_unlock_height as u64) { - v1_unlock_height as u64 + if *unlock_height >= u64::from(v1_unlock_height) { + u64::from(v1_unlock_height) } else { *unlock_height } } STXBalance::LockedPoxTwo { unlock_height, .. } => { - if *unlock_height >= (v2_unlock_height as u64) { - v2_unlock_height as u64 + if *unlock_height >= u64::from(v2_unlock_height) { + u64::from(v2_unlock_height) + } else { + *unlock_height + } + } + STXBalance::LockedPoxThree { unlock_height, .. } => { + if *unlock_height >= u64::from(v3_unlock_height) { + u64::from(v3_unlock_height) } else { *unlock_height } } - STXBalance::LockedPoxThree { unlock_height, .. } => *unlock_height, + STXBalance::LockedPoxFour { unlock_height, .. } => *unlock_height, } } @@ -792,7 +971,8 @@ impl STXBalance { STXBalance::Unlocked { .. } => 0, STXBalance::LockedPoxOne { amount_locked, .. } | STXBalance::LockedPoxTwo { amount_locked, .. } - | STXBalance::LockedPoxThree { amount_locked, .. } => *amount_locked, + | STXBalance::LockedPoxThree { amount_locked, .. } + | STXBalance::LockedPoxFour { amount_locked, .. } => *amount_locked, } } @@ -811,6 +991,9 @@ impl STXBalance { } | STXBalance::LockedPoxThree { amount_unlocked, .. + } + | STXBalance::LockedPoxFour { + amount_unlocked, .. } => *amount_unlocked, } } @@ -828,6 +1011,9 @@ impl STXBalance { } | STXBalance::LockedPoxThree { amount_unlocked, .. + } + | STXBalance::LockedPoxFour { + amount_unlocked, .. } => { *amount_unlocked = amount_unlocked.checked_sub(delta).expect("STX underflow"); } @@ -847,6 +1033,9 @@ impl STXBalance { } | STXBalance::LockedPoxThree { amount_unlocked, .. + } + | STXBalance::LockedPoxFour { + amount_unlocked, .. } => { if let Some(new_amount) = amount_unlocked.checked_add(delta) { *amount_unlocked = new_amount; @@ -867,11 +1056,13 @@ impl STXBalance { burn_block_height: u64, v1_unlock_height: u32, v2_unlock_height: u32, + v3_unlock_height: u32, ) -> (STXBalance, u128) { if self.has_unlockable_tokens_at_burn_block( burn_block_height, v1_unlock_height, v2_unlock_height, + v3_unlock_height, ) { ( STXBalance::Unlocked { @@ -889,11 +1080,13 @@ impl STXBalance { burn_block_height: u64, v1_unlock_height: u32, v2_unlock_height: u32, + v3_unlock_height: u32, ) -> u128 { if self.has_unlockable_tokens_at_burn_block( burn_block_height, v1_unlock_height, v2_unlock_height, + v3_unlock_height, ) { self.get_total_balance() } else { @@ -908,6 +1101,9 @@ impl STXBalance { STXBalance::LockedPoxThree { amount_unlocked, .. } => *amount_unlocked, + STXBalance::LockedPoxFour { + amount_unlocked, .. + } => *amount_unlocked, } } } @@ -917,11 +1113,13 @@ impl STXBalance { burn_block_height: u64, v1_unlock_height: u32, v2_unlock_height: u32, + v3_unlock_height: u32, ) -> (u128, u64) { if self.has_unlockable_tokens_at_burn_block( burn_block_height, v1_unlock_height, v2_unlock_height, + v3_unlock_height, ) { (0, 0) } else { @@ -942,6 +1140,11 @@ impl STXBalance { unlock_height, .. } => (*amount_locked, *unlock_height), + STXBalance::LockedPoxFour { + amount_locked, + unlock_height, + .. + } => (*amount_locked, *unlock_height), } } } @@ -964,6 +1167,11 @@ impl STXBalance { amount_locked, .. } => (*amount_unlocked, *amount_locked), + STXBalance::LockedPoxFour { + amount_unlocked, + amount_locked, + .. + } => (*amount_unlocked, *amount_locked), }; unlocked.checked_add(locked).expect("STX overflow") } @@ -985,6 +1193,7 @@ impl STXBalance { burn_block_height: u64, v1_unlock_height: u32, v2_unlock_height: u32, + v3_unlock_height: u32, ) -> bool { match self { STXBalance::Unlocked { .. } => false, @@ -1001,7 +1210,7 @@ impl STXBalance { return false; } // if unlockable due to Stacks 2.1 early unlock - if v1_unlock_height as u64 <= burn_block_height { + if u64::from(v1_unlock_height) <= burn_block_height { return false; } true @@ -1018,7 +1227,7 @@ impl STXBalance { return false; } // if unlockable due to Stacks 2.2 early unlock - if v2_unlock_height as u64 <= burn_block_height { + if u64::from(v2_unlock_height) <= burn_block_height { return false; } true @@ -1027,6 +1236,23 @@ impl STXBalance { amount_locked, unlock_height, .. + } => { + if *amount_locked == 0 { + return false; + } + if *unlock_height <= burn_block_height { + return false; + } + // if unlockable due to Stacks 2.5 early unlock + if u64::from(v3_unlock_height) <= burn_block_height { + return false; + } + true + } + STXBalance::LockedPoxFour { + amount_locked, + unlock_height, + .. } => { if *amount_locked == 0 { return false; @@ -1044,6 +1270,7 @@ impl STXBalance { burn_block_height: u64, v1_unlock_height: u32, v2_unlock_height: u32, + v3_unlock_height: u32, ) -> bool { match self { STXBalance::Unlocked { .. } => false, @@ -1060,7 +1287,7 @@ impl STXBalance { return true; } // if unlockable due to Stacks 2.1 early unlock - if v1_unlock_height as u64 <= burn_block_height { + if u64::from(v1_unlock_height) <= burn_block_height { return true; } false @@ -1078,7 +1305,7 @@ impl STXBalance { return true; } // if unlockable due to Stacks 2.2 early unlock - if v2_unlock_height as u64 <= burn_block_height { + if u64::from(v2_unlock_height) <= burn_block_height { return true; } false @@ -1087,6 +1314,24 @@ impl STXBalance { amount_locked, unlock_height, .. + } => { + if *amount_locked == 0 { + return false; + } + // if normally unlockable, return true + if *unlock_height <= burn_block_height { + return true; + } + // if unlockable due to Stacks 2.5 early unlock + if u64::from(v3_unlock_height) <= burn_block_height { + return true; + } + false + } + STXBalance::LockedPoxFour { + amount_locked, + unlock_height, + .. } => { if *amount_locked == 0 { return false; @@ -1106,11 +1351,13 @@ impl STXBalance { burn_block_height: u64, v1_unlock_height: u32, v2_unlock_height: u32, + v3_unlock_height: u32, ) -> bool { self.get_available_balance_at_burn_block( burn_block_height, v1_unlock_height, v2_unlock_height, + v3_unlock_height, ) >= amount } } diff --git a/clarity/src/vm/docs/mod.rs b/clarity/src/vm/docs/mod.rs index cf0d142f02..2444bfc5fc 100644 --- a/clarity/src/vm/docs/mod.rs +++ b/clarity/src/vm/docs/mod.rs @@ -2773,10 +2773,18 @@ mod test { u32::MAX } + fn get_v3_unlock_height(&self) -> u32 { + u32::MAX + } + fn get_pox_3_activation_height(&self) -> u32 { u32::MAX } + fn get_pox_4_activation_height(&self) -> u32 { + u32::MAX + } + fn get_pox_prepare_length(&self) -> u32 { panic!("Docs db should not return PoX info") } diff --git a/clarity/src/vm/functions/assets.rs b/clarity/src/vm/functions/assets.rs index c0c4fde945..b4cf3c2084 100644 --- a/clarity/src/vm/functions/assets.rs +++ b/clarity/src/vm/functions/assets.rs @@ -236,6 +236,7 @@ pub fn special_stx_account( .canonical_balance_repr(); let v1_unlock_ht = env.global_context.database.get_v1_unlock_height(); let v2_unlock_ht = env.global_context.database.get_v2_unlock_height(); + let v3_unlock_ht = env.global_context.database.get_v3_unlock_height(); TupleData::from_data(vec![ ( @@ -248,7 +249,11 @@ pub fn special_stx_account( ), ( "unlock-height".try_into().unwrap(), - Value::UInt(stx_balance.effective_unlock_height(v1_unlock_ht, v2_unlock_ht) as u128), + Value::UInt(u128::from(stx_balance.effective_unlock_height( + v1_unlock_ht, + v2_unlock_ht, + v3_unlock_ht, + ))), ), ]) .map(Value::Tuple) diff --git a/clarity/src/vm/functions/mod.rs b/clarity/src/vm/functions/mod.rs index 7668305f72..ef10d351a1 100644 --- a/clarity/src/vm/functions/mod.rs +++ b/clarity/src/vm/functions/mod.rs @@ -62,6 +62,8 @@ macro_rules! switch_on_global_epoch { StacksEpochId::Epoch23 => $Epoch205Version(args, env, context), // Note: We reuse 2.05 for 2.4. StacksEpochId::Epoch24 => $Epoch205Version(args, env, context), + // Note: We reuse 2.05 for 2.5. + StacksEpochId::Epoch25 => $Epoch205Version(args, env, context), // Note: We reuse 2.05 for 3.0. StacksEpochId::Epoch30 => $Epoch205Version(args, env, context), } diff --git a/clarity/src/vm/test_util/mod.rs b/clarity/src/vm/test_util/mod.rs index aec31e1e41..6633c65093 100644 --- a/clarity/src/vm/test_util/mod.rs +++ b/clarity/src/vm/test_util/mod.rs @@ -42,6 +42,9 @@ pub const TEST_BURN_STATE_DB_21: UnitTestBurnStateDB = UnitTestBurnStateDB { pub fn generate_test_burn_state_db(epoch_id: StacksEpochId) -> UnitTestBurnStateDB { match epoch_id { + StacksEpochId::Epoch10 => { + panic!("Epoch 1.0 not testable"); + } StacksEpochId::Epoch20 => UnitTestBurnStateDB { epoch_id, ast_rules: ASTRules::Typical, @@ -50,11 +53,12 @@ pub fn generate_test_burn_state_db(epoch_id: StacksEpochId) -> UnitTestBurnState | StacksEpochId::Epoch21 | StacksEpochId::Epoch22 | StacksEpochId::Epoch23 - | StacksEpochId::Epoch24 => UnitTestBurnStateDB { + | StacksEpochId::Epoch24 + | StacksEpochId::Epoch25 + | StacksEpochId::Epoch30 => UnitTestBurnStateDB { epoch_id, ast_rules: ASTRules::PrecheckSize, }, - _ => panic!("Epoch {} not covered", &epoch_id), } } @@ -233,10 +237,18 @@ impl BurnStateDB for UnitTestBurnStateDB { u32::MAX } + fn get_v3_unlock_height(&self) -> u32 { + u32::MAX + } + fn get_pox_3_activation_height(&self) -> u32 { u32::MAX } + fn get_pox_4_activation_height(&self) -> u32 { + u32::MAX + } + fn get_pox_prepare_length(&self) -> u32 { 1 } diff --git a/clarity/src/vm/tests/mod.rs b/clarity/src/vm/tests/mod.rs index f179dd0e78..c73db0cdad 100644 --- a/clarity/src/vm/tests/mod.rs +++ b/clarity/src/vm/tests/mod.rs @@ -102,6 +102,7 @@ epochs_template! { Epoch22, Epoch23, Epoch24, + Epoch25, } clarity_template! { @@ -115,6 +116,8 @@ clarity_template! { (Epoch23, Clarity2), (Epoch24, Clarity1), (Epoch24, Clarity2), + (Epoch25, Clarity1), + (Epoch25, Clarity2), } #[cfg(test)] diff --git a/clarity/src/vm/types/signatures.rs b/clarity/src/vm/types/signatures.rs index 1d25438cb5..6fc8d5f014 100644 --- a/clarity/src/vm/types/signatures.rs +++ b/clarity/src/vm/types/signatures.rs @@ -529,6 +529,7 @@ impl TypeSignature { | StacksEpochId::Epoch22 | StacksEpochId::Epoch23 | StacksEpochId::Epoch24 + | StacksEpochId::Epoch25 | StacksEpochId::Epoch30 => self.admits_type_v2_1(other), StacksEpochId::Epoch10 => unreachable!("epoch 1.0 not supported"), } @@ -728,7 +729,11 @@ impl TypeSignature { // Epoch-2.2 had a regression in canonicalization, so it must be preserved here. | StacksEpochId::Epoch22 => self.clone(), // Note for future epochs: Epochs >= 2.3 should use the canonicalize_v2_1() routine - StacksEpochId::Epoch21 | StacksEpochId::Epoch23 | StacksEpochId::Epoch24 | StacksEpochId::Epoch30 => self.canonicalize_v2_1(), + StacksEpochId::Epoch21 + | StacksEpochId::Epoch23 + | StacksEpochId::Epoch24 + | StacksEpochId::Epoch25 + | StacksEpochId::Epoch30 => self.canonicalize_v2_1(), } } @@ -1060,6 +1065,7 @@ impl TypeSignature { | StacksEpochId::Epoch22 | StacksEpochId::Epoch23 | StacksEpochId::Epoch24 + | StacksEpochId::Epoch25 | StacksEpochId::Epoch30 => Self::least_supertype_v2_1(a, b), StacksEpochId::Epoch10 => unreachable!("Clarity 1.0 is not supported"), } diff --git a/clarity/src/vm/version.rs b/clarity/src/vm/version.rs index 6da73f7dc6..f64d4ee878 100644 --- a/clarity/src/vm/version.rs +++ b/clarity/src/vm/version.rs @@ -36,6 +36,7 @@ impl ClarityVersion { StacksEpochId::Epoch22 => ClarityVersion::Clarity2, StacksEpochId::Epoch23 => ClarityVersion::Clarity2, StacksEpochId::Epoch24 => ClarityVersion::Clarity2, + StacksEpochId::Epoch25 => ClarityVersion::Clarity2, StacksEpochId::Epoch30 => ClarityVersion::Clarity2, } } diff --git a/stacks-common/src/types/chainstate.rs b/stacks-common/src/types/chainstate.rs index a348024c2d..67ccfca970 100644 --- a/stacks-common/src/types/chainstate.rs +++ b/stacks-common/src/types/chainstate.rs @@ -15,6 +15,7 @@ use sha2::Sha256; use sha2::{Digest as Sha2Digest, Sha512_256}; use crate::codec::{read_next, write_next, Error as CodecError, StacksMessageCodec}; +use crate::consts::{FIRST_BURNCHAIN_CONSENSUS_HASH, FIRST_STACKS_BLOCK_HASH}; use crate::deps_common::bitcoin::util::hash::Sha256dHash; use crate::util::hash::DoubleSha256; use crate::util::hash::{to_hex, Hash160, Sha512Trunc256Sum, HASH160_ENCODED_SIZE}; @@ -280,6 +281,10 @@ impl StacksBlockId { let h = Sha512Trunc256Sum::from_hasher(hasher); StacksBlockId(h.0) } + + pub fn first_mined() -> StacksBlockId { + StacksBlockId::new(&FIRST_BURNCHAIN_CONSENSUS_HASH, &FIRST_STACKS_BLOCK_HASH) + } } impl StacksWorkScore { diff --git a/stacks-common/src/types/mod.rs b/stacks-common/src/types/mod.rs index f60a70efd6..953668cbef 100644 --- a/stacks-common/src/types/mod.rs +++ b/stacks-common/src/types/mod.rs @@ -74,6 +74,7 @@ pub enum StacksEpochId { Epoch22 = 0x0200f, Epoch23 = 0x02014, Epoch24 = 0x02019, + Epoch25 = 0x0201a, Epoch30 = 0x03000, } @@ -92,7 +93,7 @@ impl StacksEpochId { | StacksEpochId::Epoch21 | StacksEpochId::Epoch22 | StacksEpochId::Epoch23 => false, - StacksEpochId::Epoch24 | StacksEpochId::Epoch30 => true, + StacksEpochId::Epoch24 | StacksEpochId::Epoch25 | StacksEpochId::Epoch30 => true, } } } @@ -107,6 +108,7 @@ impl std::fmt::Display for StacksEpochId { StacksEpochId::Epoch22 => write!(f, "2.2"), StacksEpochId::Epoch23 => write!(f, "2.3"), StacksEpochId::Epoch24 => write!(f, "2.4"), + StacksEpochId::Epoch25 => write!(f, "2.5"), StacksEpochId::Epoch30 => write!(f, "3.0"), } } @@ -124,6 +126,7 @@ impl TryFrom for StacksEpochId { x if x == StacksEpochId::Epoch22 as u32 => Ok(StacksEpochId::Epoch22), x if x == StacksEpochId::Epoch23 as u32 => Ok(StacksEpochId::Epoch23), x if x == StacksEpochId::Epoch24 as u32 => Ok(StacksEpochId::Epoch24), + x if x == StacksEpochId::Epoch25 as u32 => Ok(StacksEpochId::Epoch25), x if x == StacksEpochId::Epoch30 as u32 => Ok(StacksEpochId::Epoch30), _ => Err("Invalid epoch"), } diff --git a/stackslib/src/burnchains/mod.rs b/stackslib/src/burnchains/mod.rs index c33ed1bb05..302d0a49dc 100644 --- a/stackslib/src/burnchains/mod.rs +++ b/stackslib/src/burnchains/mod.rs @@ -50,7 +50,7 @@ use crate::chainstate::burn::operations::BlockstackOperationType; use crate::chainstate::burn::operations::Error as op_error; use crate::chainstate::burn::operations::LeaderKeyRegisterOp; use crate::chainstate::stacks::address::PoxAddress; -use crate::chainstate::stacks::boot::{POX_1_NAME, POX_2_NAME, POX_3_NAME}; +use crate::chainstate::stacks::boot::{POX_1_NAME, POX_2_NAME, POX_3_NAME, POX_4_NAME}; use crate::chainstate::stacks::StacksPublicKey; use crate::core::*; use crate::net::neighbors::MAX_NEIGHBOR_BLOCK_DELAY; @@ -311,8 +311,12 @@ pub struct PoxConstants { pub v1_unlock_height: u32, /// The auto unlock height for PoX v2 lockups during Epoch 2.2 pub v2_unlock_height: u32, + /// The auto unlock height for PoX v3 lockups during Epoch 2.5 + pub v3_unlock_height: u32, /// After this burn height, reward cycles use pox-3 for reward set data pub pox_3_activation_height: u32, + /// After this burn height, reward cycles use pox-4 for reward set data + pub pox_4_activation_height: u32, _shadow: PhantomData<()>, } @@ -327,13 +331,17 @@ impl PoxConstants { sunset_end: u64, v1_unlock_height: u32, v2_unlock_height: u32, + v3_unlock_height: u32, pox_3_activation_height: u32, + pox_4_activation_height: u32, ) -> PoxConstants { assert!(anchor_threshold > (prepare_length / 2)); assert!(prepare_length < reward_cycle_length); assert!(sunset_start <= sunset_end); assert!(v2_unlock_height >= v1_unlock_height); + assert!(v3_unlock_height >= v2_unlock_height); assert!(pox_3_activation_height >= v2_unlock_height); + assert!(pox_4_activation_height >= v3_unlock_height); PoxConstants { reward_cycle_length, @@ -345,23 +353,41 @@ impl PoxConstants { sunset_end, v1_unlock_height, v2_unlock_height, + v3_unlock_height, pox_3_activation_height, + pox_4_activation_height, _shadow: PhantomData, } } #[cfg(test)] pub fn test_default() -> PoxConstants { // 20 reward slots; 10 prepare-phase slots - PoxConstants::new(10, 5, 3, 25, 5, 5000, 10000, u32::MAX, u32::MAX, u32::MAX) + PoxConstants::new( + 10, + 5, + 3, + 25, + 5, + 5000, + 10000, + u32::MAX, + u32::MAX, + u32::MAX, + u32::MAX, + u32::MAX, + ) } /// Returns the PoX contract that is "active" at the given burn block height pub fn static_active_pox_contract( v1_unlock_height: u64, pox_3_activation_height: u64, + pox_4_activation_height: u64, burn_height: u64, ) -> &'static str { - if burn_height > pox_3_activation_height { + if burn_height > pox_4_activation_height { + POX_4_NAME + } else if burn_height > pox_3_activation_height { POX_3_NAME } else if burn_height > v1_unlock_height { POX_2_NAME @@ -373,14 +399,16 @@ impl PoxConstants { /// Returns the PoX contract that is "active" at the given burn block height pub fn active_pox_contract(&self, burn_height: u64) -> &'static str { Self::static_active_pox_contract( - self.v1_unlock_height as u64, - self.pox_3_activation_height as u64, + u64::from(self.v1_unlock_height), + u64::from(self.pox_3_activation_height), + u64::from(self.pox_4_activation_height), burn_height, ) } pub fn reward_slots(&self) -> u32 { - (self.reward_cycle_length - self.prepare_length) * (OUTPUTS_PER_COMMIT as u32) + (self.reward_cycle_length - self.prepare_length) + * u32::try_from(OUTPUTS_PER_COMMIT).expect("FATAL: > 2^32 outputs per commit") } /// is participating_ustx enough to engage in PoX in the next reward cycle? @@ -389,7 +417,7 @@ impl PoxConstants { .checked_mul(100) .expect("OVERFLOW: uSTX overflowed u128") > liquid_ustx - .checked_mul(self.pox_participation_threshold_pct as u128) + .checked_mul(u128::from(self.pox_participation_threshold_pct)) .expect("OVERFLOW: uSTX overflowed u128") } @@ -404,9 +432,13 @@ impl PoxConstants { BITCOIN_MAINNET_FIRST_BLOCK_HEIGHT + POX_SUNSET_END, POX_V1_MAINNET_EARLY_UNLOCK_HEIGHT, POX_V2_MAINNET_EARLY_UNLOCK_HEIGHT, + POX_V3_MAINNET_EARLY_UNLOCK_HEIGHT, BITCOIN_MAINNET_STACKS_24_BURN_HEIGHT .try_into() .expect("Epoch transition height must be <= u32::MAX"), + BITCOIN_MAINNET_STACKS_25_BURN_HEIGHT + .try_into() + .expect("Epoch transition height must be <= u32::MAX"), ) } @@ -421,9 +453,13 @@ impl PoxConstants { BITCOIN_TESTNET_FIRST_BLOCK_HEIGHT + POX_SUNSET_END, POX_V1_TESTNET_EARLY_UNLOCK_HEIGHT, POX_V2_TESTNET_EARLY_UNLOCK_HEIGHT, + POX_V3_TESTNET_EARLY_UNLOCK_HEIGHT, BITCOIN_TESTNET_STACKS_24_BURN_HEIGHT .try_into() .expect("Epoch transition height must be <= u32::MAX"), + BITCOIN_TESTNET_STACKS_25_BURN_HEIGHT + .try_into() + .expect("Epoch transition height must be <= u32::MAX"), ) // total liquid supply is 40000000000000000 µSTX } @@ -439,6 +475,8 @@ impl PoxConstants { 1_000_000, 2_000_000, 3_000_000, + 4_000_000, + 5_000_000, ) } @@ -473,16 +511,25 @@ impl PoxConstants { } } + /// What's the first block in the prepare phase + pub fn prepare_phase_start(&self, first_block_height: u64, reward_cycle: u64) -> u64 { + let reward_cycle_start = + self.reward_cycle_to_block_height(first_block_height, reward_cycle); + let prepare_phase_start = reward_cycle_start + u64::from(self.reward_cycle_length) + - u64::from(self.prepare_length); + prepare_phase_start + } + pub fn is_reward_cycle_start(&self, first_block_height: u64, burn_height: u64) -> bool { let effective_height = burn_height - first_block_height; // first block of the new reward cycle - (effective_height % (self.reward_cycle_length as u64)) == 1 + (effective_height % u64::from(self.reward_cycle_length)) == 1 } pub fn reward_cycle_to_block_height(&self, first_block_height: u64, reward_cycle: u64) -> u64 { // NOTE: the `+ 1` is because the height of the first block of a reward cycle is mod 1, not // mod 0. - first_block_height + reward_cycle * (self.reward_cycle_length as u64) + 1 + first_block_height + reward_cycle * u64::from(self.reward_cycle_length) + 1 } pub fn block_height_to_reward_cycle( @@ -493,15 +540,15 @@ impl PoxConstants { Self::static_block_height_to_reward_cycle( block_height, first_block_height, - self.reward_cycle_length as u64, + u64::from(self.reward_cycle_length), ) } pub fn is_in_prepare_phase(&self, first_block_height: u64, block_height: u64) -> bool { Self::static_is_in_prepare_phase( first_block_height, - self.reward_cycle_length as u64, - self.prepare_length as u64, + u64::from(self.reward_cycle_length), + u64::from(self.prepare_length), block_height, ) } @@ -521,7 +568,7 @@ impl PoxConstants { // NOTE: first block in reward cycle is mod 1, so mod 0 is the last block in the // prepare phase. - reward_index == 0 || reward_index > ((reward_cycle_length - prepare_length) as u64) + reward_index == 0 || reward_index > u64::from(reward_cycle_length - prepare_length) } } diff --git a/stackslib/src/burnchains/tests/affirmation.rs b/stackslib/src/burnchains/tests/affirmation.rs index ef4f8c5ba3..49097a1938 100644 --- a/stackslib/src/burnchains/tests/affirmation.rs +++ b/stackslib/src/burnchains/tests/affirmation.rs @@ -497,6 +497,8 @@ fn test_read_prepare_phase_commits() { u32::MAX, u32::MAX, u32::MAX, + u32::MAX, + u32::MAX, ); burnchain.first_block_height = first_height; burnchain.first_block_hash = first_bhh.clone(); @@ -574,6 +576,8 @@ fn test_parent_block_commits() { u32::MAX, u32::MAX, u32::MAX, + u32::MAX, + u32::MAX, ); burnchain.first_block_height = first_height; burnchain.first_block_hash = first_bhh.clone(); @@ -676,6 +680,8 @@ fn test_filter_orphan_block_commits() { u32::MAX, u32::MAX, u32::MAX, + u32::MAX, + u32::MAX, ); burnchain.first_block_height = first_height; burnchain.first_block_hash = first_bhh.clone(); @@ -747,6 +753,8 @@ fn test_filter_missed_block_commits() { u32::MAX, u32::MAX, u32::MAX, + u32::MAX, + u32::MAX, ); burnchain.first_block_height = first_height; burnchain.first_block_hash = first_bhh.clone(); @@ -818,6 +826,8 @@ fn test_find_heaviest_block_commit() { u32::MAX, u32::MAX, u32::MAX, + u32::MAX, + u32::MAX, ); burnchain.first_block_height = first_height; burnchain.first_block_hash = first_bhh.clone(); @@ -1041,6 +1051,8 @@ fn test_find_heaviest_parent_commit_many_commits() { u32::MAX, u32::MAX, u32::MAX, + u32::MAX, + u32::MAX, ); burnchain.first_block_height = first_height; burnchain.first_block_hash = first_bhh.clone(); @@ -1304,6 +1316,8 @@ fn test_update_pox_affirmation_maps_3_forks() { u32::MAX, u32::MAX, u32::MAX, + u32::MAX, + u32::MAX, ); burnchain.first_block_height = first_height; burnchain.first_block_hash = first_bhh.clone(); @@ -1564,6 +1578,8 @@ fn test_update_pox_affirmation_maps_unique_anchor_block() { u32::MAX, u32::MAX, u32::MAX, + u32::MAX, + u32::MAX, ); burnchain.first_block_height = first_height; burnchain.first_block_hash = first_bhh.clone(); @@ -1767,6 +1783,8 @@ fn test_update_pox_affirmation_maps_absent() { u32::MAX, u32::MAX, u32::MAX, + u32::MAX, + u32::MAX, ); burnchain.first_block_height = first_height; burnchain.first_block_hash = first_bhh.clone(); @@ -2240,6 +2258,8 @@ fn test_update_pox_affirmation_maps_nothing() { u32::MAX, u32::MAX, u32::MAX, + u32::MAX, + u32::MAX, ); burnchain.first_block_height = first_height; burnchain.first_block_hash = first_bhh.clone(); @@ -2517,6 +2537,8 @@ fn test_update_pox_affirmation_fork_2_cycles() { u32::MAX, u32::MAX, u32::MAX, + u32::MAX, + u32::MAX, ); burnchain.first_block_height = first_height; burnchain.first_block_hash = first_bhh.clone(); @@ -2819,6 +2841,8 @@ fn test_update_pox_affirmation_fork_duel() { u32::MAX, u32::MAX, u32::MAX, + u32::MAX, + u32::MAX, ); burnchain.first_block_height = first_height; burnchain.first_block_hash = first_bhh.clone(); diff --git a/stackslib/src/burnchains/tests/db.rs b/stackslib/src/burnchains/tests/db.rs index c0bd183b92..7312f406d0 100644 --- a/stackslib/src/burnchains/tests/db.rs +++ b/stackslib/src/burnchains/tests/db.rs @@ -523,6 +523,8 @@ fn test_get_commit_at() { u32::MAX, u32::MAX, u32::MAX, + u32::MAX, + u32::MAX, ); burnchain.first_block_height = first_height; burnchain.first_block_hash = first_bhh.clone(); @@ -648,6 +650,8 @@ fn test_get_set_check_anchor_block() { u32::MAX, u32::MAX, u32::MAX, + u32::MAX, + u32::MAX, ); burnchain.first_block_height = first_height; burnchain.first_block_hash = first_bhh.clone(); @@ -743,6 +747,8 @@ fn test_update_block_descendancy() { u32::MAX, u32::MAX, u32::MAX, + u32::MAX, + u32::MAX, ); burnchain.first_block_height = first_height; burnchain.first_block_hash = first_bhh.clone(); @@ -872,6 +878,8 @@ fn test_update_block_descendancy_with_fork() { u32::MAX, u32::MAX, u32::MAX, + u32::MAX, + u32::MAX, ); burnchain.first_block_height = first_height; burnchain.first_block_hash = first_bhh.clone(); diff --git a/stackslib/src/burnchains/tests/mod.rs b/stackslib/src/burnchains/tests/mod.rs index 2658a9305f..d051c74151 100644 --- a/stackslib/src/burnchains/tests/mod.rs +++ b/stackslib/src/burnchains/tests/mod.rs @@ -38,7 +38,8 @@ use crate::chainstate::burn::*; use crate::chainstate::coordinator::comm::*; use crate::chainstate::coordinator::*; use crate::chainstate::stacks::*; -use crate::core::STACKS_EPOCH_2_1_MARKER; +use crate::core::STACKS_EPOCH_2_4_MARKER; +use crate::core::STACKS_EPOCH_3_0_MARKER; use crate::cost_estimates::{CostEstimator, FeeEstimator}; use crate::stacks_common::deps_common::bitcoin::network::serialize::BitcoinHash; use crate::types::chainstate::{BlockHeaderHash, SortitionId, VRFSeed}; @@ -377,13 +378,16 @@ impl TestBurnchainBlock { Txid::from_test_data(txop.block_height, txop.vtxindex, &txop.burn_header_hash, 0); txop.consensus_hash = self.parent_snapshot.consensus_hash.clone(); + let miner_pubkey_hash160 = miner.nakamoto_miner_hash160(); + txop.set_nakamoto_signing_key(&miner_pubkey_hash160); + self.txs .push(BlockstackOperationType::LeaderKeyRegister(txop.clone())); txop } - pub fn add_leader_block_commit( + pub(crate) fn inner_add_block_commit( &mut self, ic: &SortitionDBConn, miner: &mut TestMiner, @@ -392,6 +396,8 @@ impl TestBurnchainBlock { leader_key: &LeaderKeyRegisterOp, fork_snapshot: Option<&BlockSnapshot>, parent_block_snapshot: Option<&BlockSnapshot>, + new_seed: Option, + epoch_marker: u8, ) -> LeaderBlockCommitOp { let input = (Txid([0; 32]), 0); let pubks = miner @@ -412,15 +418,17 @@ impl TestBurnchainBlock { None => SortitionDB::get_first_block_snapshot(ic).unwrap(), }; - // prove on the last-ever sortition's hash to produce the new seed - let proof = miner - .make_proof(&leader_key.public_key, &last_snapshot.sortition_hash) - .expect(&format!( - "FATAL: no private key for {}", - leader_key.public_key.to_hex() - )); + let new_seed = new_seed.unwrap_or_else(|| { + // prove on the last-ever sortition's hash to produce the new seed + let proof = miner + .make_proof(&leader_key.public_key, &last_snapshot.sortition_hash) + .expect(&format!( + "FATAL: no private key for {}", + leader_key.public_key.to_hex() + )); - let new_seed = VRFSeed::from_proof(&proof); + VRFSeed::from_proof(&proof) + }); let get_commit_res = SortitionDB::get_block_commit( ic.conn(), @@ -468,7 +476,7 @@ impl TestBurnchainBlock { txop.txid = Txid::from_test_data(txop.block_height, txop.vtxindex, &txop.burn_header_hash, 0); - txop.memo = vec![STACKS_EPOCH_2_1_MARKER << 3]; + txop.memo = vec![epoch_marker << 3]; self.txs .push(BlockstackOperationType::LeaderBlockCommit(txop.clone())); @@ -476,7 +484,29 @@ impl TestBurnchainBlock { txop } - // TODO: user burn support + /// Add an epoch 2.x block-commit + pub fn add_leader_block_commit( + &mut self, + ic: &SortitionDBConn, + miner: &mut TestMiner, + block_hash: &BlockHeaderHash, + burn_fee: u64, + leader_key: &LeaderKeyRegisterOp, + fork_snapshot: Option<&BlockSnapshot>, + parent_block_snapshot: Option<&BlockSnapshot>, + ) -> LeaderBlockCommitOp { + self.inner_add_block_commit( + ic, + miner, + block_hash, + burn_fee, + leader_key, + fork_snapshot, + parent_block_snapshot, + None, + STACKS_EPOCH_2_4_MARKER, + ) + } pub fn patch_from_chain_tip(&mut self, parent_snapshot: &BlockSnapshot) -> () { assert_eq!(parent_snapshot.block_height + 1, self.block_height); diff --git a/stackslib/src/chainstate/burn/db/sortdb.rs b/stackslib/src/chainstate/burn/db/sortdb.rs index ca2917449b..7847529514 100644 --- a/stackslib/src/chainstate/burn/db/sortdb.rs +++ b/stackslib/src/chainstate/burn/db/sortdb.rs @@ -31,6 +31,7 @@ use clarity::vm::types::Value; use rand; use rand::RngCore; use rusqlite::types::ToSql; +use rusqlite::Error as sqlite_error; use rusqlite::Row; use rusqlite::Transaction; use rusqlite::TransactionBehavior; @@ -87,12 +88,9 @@ use crate::chainstate::ChainstateDB; use crate::core::AST_RULES_PRECHECK_SIZE; use crate::core::FIRST_BURNCHAIN_CONSENSUS_HASH; use crate::core::FIRST_STACKS_BLOCK_HASH; -use crate::core::{ - StacksEpoch, StacksEpochExtension, StacksEpochId, NAKAMOTO_TENURE_BLOCK_ACCEPTANCE_PERIOD, - STACKS_EPOCH_MAX, -}; +use crate::core::{StacksEpoch, StacksEpochExtension, StacksEpochId, STACKS_EPOCH_MAX}; use crate::net::neighbors::MAX_NEIGHBOR_BLOCK_DELAY; -use crate::net::{Error as NetError, Error}; +use crate::net::Error as NetError; use crate::util_lib::db::tx_begin_immediate; use crate::util_lib::db::tx_busy_handler; use crate::util_lib::db::DBTx; @@ -877,6 +875,21 @@ const SORTITION_DB_SCHEMA_8: &'static [&'static str] = &[ PRIMARY KEY(txid, burn_header_hash) );"#, r#"ALTER TABLE snapshots ADD miner_pk_hash TEXT DEFAULT NULL"#, + r#" + -- eagerly-processed reward sets, before they're applied to the start of the next reward cycle + CREATE TABLE preprocessed_reward_sets ( + sortition_id TEXT PRIMARY KEY, + reward_set TEXT NOT NULL + );"#, + r#" + -- canonical chain tip at each sortition ID. + -- This is updated in both 2.x and Nakamoto, but Nakamoto relies on this exclusively + CREATE TABLE stacks_chain_tips ( + sortition_id TEXT PRIMARY KEY, + consensus_hash TEXT NOT NULL, + block_hash TEXT NOT NULL, + block_height INTEGER NOT NULL + );"#, ]; const SORTITION_DB_INDEXES: &'static [&'static str] = &[ @@ -961,7 +974,7 @@ impl SortitionContext for SortitionDBTxContext { } } -fn get_block_commit_by_txid( +pub fn get_block_commit_by_txid( conn: &Connection, sort_id: &SortitionId, txid: &Txid, @@ -1106,6 +1119,11 @@ impl db_keys { format!("{}", index) } + /// reward cycle ID that was last processed + pub fn last_reward_cycle_key() -> &'static str { + "sortition_db::last_reward_cycle" + } + pub fn reward_set_size_to_string(size: usize) -> String { to_hex( &u16::try_from(size) @@ -1120,6 +1138,22 @@ impl db_keys { byte_buff.copy_from_slice(&bytes[0..2]); u16::from_le_bytes(byte_buff) } + + pub fn last_reward_cycle_to_string(rc: u64) -> String { + to_hex(&rc.to_le_bytes()) + } + + pub fn last_reward_cycle_from_string(rc_str: &str) -> u64 { + let bytes = hex_bytes(rc_str).expect("CORRUPTION: bad format written for reward cycle ID"); + assert_eq!( + bytes.len(), + 8, + "CORRUPTION: expected 8 bytes for reward cycle" + ); + let mut rc_buff = [0; 8]; + rc_buff.copy_from_slice(&bytes[0..8]); + u64::from_le_bytes(rc_buff) + } } /// Trait for structs that provide a chaintip-indexed handle into the @@ -1568,6 +1602,15 @@ impl<'a> SortitionHandleTx<'a> { stacks_block_height, )?; + #[cfg(test)] + { + let (ch, bhh) = SortitionDB::get_canonical_stacks_chain_tip_hash(self).unwrap(); + debug!( + "Memoized canonical Stacks chain tip is now {}/{}", + &ch, &bhh + ); + } + Ok(()) } @@ -1768,6 +1811,25 @@ impl<'a> SortitionHandleTx<'a> { Ok(anchor_block_txid) } + /// Update the canonical Stacks tip + fn update_canonical_stacks_tip( + &mut self, + sort_id: &SortitionId, + consensus_hash: &ConsensusHash, + stacks_block_hash: &BlockHeaderHash, + stacks_block_height: u64, + ) -> Result<(), db_error> { + let sql = "INSERT OR REPLACE INTO stacks_chain_tips (sortition_id,consensus_hash,block_hash,block_height) VALUES (?1,?2,?3,?4)"; + let args: &[&dyn ToSql] = &[ + sort_id, + consensus_hash, + stacks_block_hash, + &u64_to_sql(stacks_block_height)?, + ]; + self.execute(sql, args)?; + Ok(()) + } + /// Mark an existing snapshot's stacks block as accepted at a particular burn chain tip within a PoX fork (identified by the consensus hash), /// and calculate and store its arrival index. /// If this Stacks block extends the canonical stacks chain tip, then also update the memoized canonical @@ -1784,6 +1846,27 @@ impl<'a> SortitionHandleTx<'a> { stacks_block_hash: &BlockHeaderHash, stacks_block_height: u64, ) -> Result<(), db_error> { + let block_sn = SortitionDB::get_block_snapshot_consensus(self, consensus_hash)? + .ok_or(db_error::NotFoundError)?; + + let cur_epoch = + SortitionDB::get_stacks_epoch(self, block_sn.block_height)?.expect(&format!( + "FATAL: no epoch defined for burn height {}", + block_sn.block_height + )); + + if cur_epoch.epoch_id >= StacksEpochId::Epoch30 { + // Nakamoto blocks are always processed in order since the chain can't fork + self.update_canonical_stacks_tip( + &burn_tip.sortition_id, + consensus_hash, + stacks_block_hash, + stacks_block_height, + )?; + return Ok(()); + } + + // in epoch 2.x, where we track canonical stacks tip via the sortition DB let arrival_index = SortitionDB::get_max_arrival_index(self)?; let args: &[&dyn ToSql] = &[ &u64_to_sql(stacks_block_height)?, @@ -1803,11 +1886,14 @@ impl<'a> SortitionHandleTx<'a> { burn_tip.block_height ); - let num_rows = self.execute("UPDATE snapshots SET stacks_block_accepted = 1, stacks_block_height = ?1, arrival_index = ?2 WHERE consensus_hash = ?3 AND winning_stacks_block_hash = ?4", args)?; - assert!(num_rows > 0); + // NOTE: in Nakamoto, this may return zero rows since blocks are no longer coupled to + // snapshots. However, it will update at least one row if the block is a tenure-start + // block. + self.execute("UPDATE snapshots SET stacks_block_accepted = 1, stacks_block_height = ?1, arrival_index = ?2 WHERE consensus_hash = ?3 AND winning_stacks_block_hash = ?4", args)?; // update arrival data across all Stacks forks let (best_ch, best_bhh, best_height) = self.find_new_block_arrivals(burn_tip)?; + self.update_canonical_stacks_tip(&burn_tip.sortition_id, &best_ch, &best_bhh, best_height)?; self.update_new_block_arrivals(burn_tip, best_ch, best_bhh, best_height)?; Ok(()) @@ -1840,36 +1926,78 @@ impl<'a> SortitionHandleConn<'a> { SortitionHandleConn::open_reader(connection, &sn.sortition_id) } - /// Does the sortition db expect to receive unknown blocks from - /// this tenure? - /// - /// This is used by nakamoto nodes while they are at or near the - /// current chain tip: only recent tenures can receive blocks this - /// way. Otherwise, the `BlockHeaderHash` must have been - /// explicitly confirmed by a block commit. - pub fn expects_blocks_from_tenure( - &self, - miner_pk: &Secp256k1PublicKey, - ) -> Result, db_error> { - let to_check = Hash160::from_node_public_key(miner_pk); - let mut cur_tip = self.context.chain_tip.clone(); - for _ in 0..NAKAMOTO_TENURE_BLOCK_ACCEPTANCE_PERIOD { - let cur_snapshot = SortitionDB::get_block_snapshot(self.sqlite(), &cur_tip)? - .ok_or_else(|| db_error::NotFoundError)?; - if cur_snapshot.miner_pk_hash == Some(to_check) { - return Ok(Some(cur_snapshot)); - } - cur_tip = cur_snapshot.parent_sortition_id.clone(); - } - Ok(None) - } - /// Does the sortition db expect to receive blocks /// signed by this stacker set? + /// + /// This only works if `consensus_hash` is within one reward cycle (2100 blocks) of the + /// sortition pointed to by this handle's sortiton tip. If it isn't, then this + /// method returns Ok(false). This is to prevent a DDoS vector whereby compromised stale + /// Stacker keys can be used to blast out lots of Nakamoto blocks that will be accepted + /// but never processed. So, `consensus_hash` can be in the same reward cycle as + /// `self.context.chain_tip`, or the previous, but no earlier. pub fn expects_stacker_signature( &self, + consensus_hash: &ConsensusHash, _stacker_signature: &MessageSignature, ) -> Result { + let sn = SortitionDB::get_block_snapshot(self, &self.context.chain_tip)? + .ok_or(db_error::NotFoundError) + .map_err(|e| { + warn!("No sortition for tip: {:?}", &self.context.chain_tip); + e + })?; + + let ch_sn = SortitionDB::get_block_snapshot_consensus(self, consensus_hash)? + .ok_or(db_error::NotFoundError) + .map_err(|e| { + warn!("No sortition for consensus hash: {:?}", consensus_hash); + e + })?; + + if ch_sn.block_height + u64::from(self.context.pox_constants.reward_cycle_length) + < sn.block_height + { + // too far in the past + debug!("Block with consensus hash {} is too far in the past", consensus_hash; + "consensus_hash" => %consensus_hash, + "block_height" => ch_sn.block_height, + "tip_block_height" => sn.block_height + ); + return Ok(false); + } + + // this given consensus hash must be an ancestor of our chain tip + let ch_at = self + .get_consensus_at(ch_sn.block_height)? + .ok_or(db_error::NotFoundError) + .map_err(|e| { + warn!("No ancestor consensus hash"; + "tip" => %self.context.chain_tip, + "consensus_hash" => %consensus_hash, + "consensus_hash height" => %ch_sn.block_height + ); + e + })?; + + if ch_at != ch_sn.consensus_hash { + // not an ancestor + warn!("Consensus hash is not an ancestor of the sortition tip"; + "tip" => %self.context.chain_tip, + "consensus_hash" => %consensus_hash + ); + return Err(db_error::NotFoundError); + } + + // is this consensus hash in this fork? + let Some(bhh) = SortitionDB::get_burnchain_header_hash_by_consensus(self, consensus_hash)? + else { + return Ok(false); + }; + let Some(_sortition_id) = self.get_sortition_id_for_bhh(&bhh)? else { + return Ok(false); + }; + + // TODO: query set of stacker signers in order to get the aggregate public key Ok(true) } @@ -1923,6 +2051,15 @@ impl<'a> SortitionHandleConn<'a> { Ok(anchor_block_txid) } + /// Get the last processed reward cycle + pub fn get_last_processed_reward_cycle(&self) -> Result { + let encoded_rc = self + .get_indexed(&self.context.chain_tip, &db_keys::last_reward_cycle_key())? + .expect("FATAL: no last-processed reward cycle"); + + Ok(db_keys::last_reward_cycle_from_string(&encoded_rc)) + } + pub fn get_reward_cycle_unlocks( &mut self, cycle: u64, @@ -1989,13 +2126,19 @@ impl<'a> SortitionHandleConn<'a> { SortitionDB::get_block_snapshot(self.conn(), &sortition_id) } - /// Has `burn_header_hash` been processed in the current fork? - pub fn processed_block( - &self, - burn_header_hash: &BurnchainHeaderHash, - ) -> Result { - self.get_sortition_id_for_bhh(burn_header_hash) - .map(|result| result.is_some()) + /// Has `consensus_hash` been processed in the current fork? + pub fn processed_block(&self, consensus_hash: &ConsensusHash) -> Result { + let Some(snapshot) = SortitionDB::get_block_snapshot_consensus(self, consensus_hash)? + else { + return Ok(false); + }; + let Some(expected_sortition_id) = + self.get_sortition_id_for_bhh(&snapshot.burn_header_hash)? + else { + return Ok(false); + }; + let matched_fork = expected_sortition_id == snapshot.sortition_id; + Ok(matched_fork) } pub fn get_tip_snapshot(&self) -> Result, db_error> { @@ -3031,8 +3174,6 @@ impl SortitionDB { /// Get the Sortition ID for the burnchain block containing `txid`'s parent. /// `txid` is the burnchain txid of a block-commit. - /// Because the block_commit_parents table is not populated on schema migration, the returned - /// value may be NULL (and this is okay). pub fn get_block_commit_parent_sortition_id( conn: &Connection, txid: &Txid, @@ -3157,6 +3298,15 @@ impl SortitionDB { || version == "7" || version == "8" } + StacksEpochId::Epoch25 => { + version == "3" + || version == "4" + || version == "5" + || version == "6" + || version == "7" + // TODO: This should move to Epoch 30 once it is added + || version == "8" + } StacksEpochId::Epoch30 => { version == "3" || version == "4" @@ -3447,6 +3597,39 @@ impl SortitionDB { return Ok(last_rules); } + + /// Store a pre-processed reward set. + /// `sortition_id` is the first sortition ID of the prepare phase + pub fn store_preprocessed_reward_set( + sort_tx: &mut DBTx, + sortition_id: &SortitionId, + rc_info: &RewardCycleInfo, + ) -> Result<(), db_error> { + let sql = "INSERT INTO preprocessed_reward_sets (sortition_id,reward_set) VALUES (?1,?2)"; + let rc_json = serde_json::to_string(rc_info).map_err(db_error::SerializationError)?; + let args: &[&dyn ToSql] = &[sortition_id, &rc_json]; + sort_tx.execute(sql, args)?; + Ok(()) + } + + /// Get a pre-processed reawrd set. + /// `sortition_id` is the first sortition ID of the prepare phase. + pub fn get_preprocessed_reward_set( + sortdb: &DBConn, + sortition_id: &SortitionId, + ) -> Result, db_error> { + let sql = "SELECT reward_set FROM preprocessed_reward_sets WHERE sortition_id = ?1"; + let args: &[&dyn ToSql] = &[sortition_id]; + let reward_set_opt: Option = + sortdb.query_row(sql, args, |row| row.get(0)).optional()?; + + let rc_info = reward_set_opt + .map(|reward_set_str| serde_json::from_str(&reward_set_str)) + .transpose() + .map_err(|_| db_error::ParseError)?; + + Ok(rc_info) + } } impl<'a> SortitionDBTx<'a> { @@ -3833,7 +4016,7 @@ impl SortitionDB { pub fn find_snapshots_with_dirty_canonical_block_pointers( conn: &DBConn, canonical_stacks_height: u64, - ) -> Result, Error> { + ) -> Result, db_error> { let dirty_sortitions : Vec = query_rows(conn, "SELECT sortition_id FROM snapshots WHERE canonical_stacks_tip_height > ?1 AND pox_valid = 1", &[&u64_to_sql(canonical_stacks_height)?])?; Ok(dirty_sortitions) } @@ -4424,7 +4607,22 @@ impl SortitionDB { conn: &Connection, ) -> Result<(ConsensusHash, BlockHeaderHash), db_error> { let sn = SortitionDB::get_canonical_burn_chain_tip(conn)?; + let cur_epoch = SortitionDB::get_stacks_epoch(conn, sn.block_height)?.expect(&format!( + "FATAL: no epoch defined for burn height {}", + sn.block_height + )); + + if cur_epoch.epoch_id >= StacksEpochId::Epoch30 { + // nakamoto behavior -- look to the stacks_chain_tip table + let res: Result<_, db_error> = conn.query_row_and_then( + "SELECT consensus_hash,block_hash FROM stacks_chain_tips WHERE sortition_id = ?", + &[&sn.sortition_id], + |row| Ok((row.get_unwrap(0), row.get_unwrap(1))), + ); + return res; + } + // epoch 2.x behavior -- look at the snapshot itself let stacks_block_hash = sn.canonical_stacks_tip_hash; let consensus_hash = sn.canonical_stacks_tip_consensus_hash; @@ -4459,6 +4657,20 @@ impl SortitionDB { ) } + pub fn get_burnchain_header_hash_by_consensus( + conn: &Connection, + consensus_hash: &ConsensusHash, + ) -> Result, db_error> { + let qry = "SELECT burn_header_hash FROM snapshots WHERE consensus_hash = ?1 AND pox_valid = 1 LIMIT 1"; + let args = [&consensus_hash]; + query_row_panic(conn, qry, &args, || { + format!( + "FATAL: multiple block snapshots for the same block with consensus hash {}", + consensus_hash + ) + }) + } + pub fn get_sortition_id_by_consensus( conn: &Connection, consensus_hash: &ConsensusHash, @@ -4778,7 +4990,9 @@ impl SortitionDB { }) } - /// Get a block commit by its committed block + /// Get a block commit by its committed block. + /// For Stacks 2.x, `block_hash` is just the hash of the block + /// For Nakamoto, `block_hash` is the StacksBlockId of the last tenure's first block pub fn get_block_commit_for_stacks_block( conn: &Connection, consensus_hash: &ConsensusHash, @@ -4823,6 +5037,18 @@ impl SortitionDB { } } + /// Given the last_tenure_id (e.g. in a block-commit in Nakamoto), find its sortition in the + /// given sortition fork. + #[cfg(test)] + pub fn get_block_snapshot_for_winning_nakamoto_tenure( + ic: &SortitionDBConn, + tip: &SortitionId, + last_tenure_id: &StacksBlockId, + ) -> Result, db_error> { + let block_hash = BlockHeaderHash(last_tenure_id.0.clone()); + Self::get_block_snapshot_for_winning_stacks_block(ic, tip, &block_hash) + } + /// Merge the result of get_stacks_header_hashes() into a BlockHeaderCache pub fn merge_block_header_cache( cache: &mut BlockHeaderCache, @@ -5049,10 +5275,35 @@ impl<'a> SortitionHandleTx<'a> { let mut sn = snapshot.clone(); sn.index_root = root_hash.clone(); - // preserve memoized stacks chain tip from this burn chain fork - sn.canonical_stacks_tip_height = parent_sn.canonical_stacks_tip_height; - sn.canonical_stacks_tip_hash = parent_sn.canonical_stacks_tip_hash; - sn.canonical_stacks_tip_consensus_hash = parent_sn.canonical_stacks_tip_consensus_hash; + let cur_epoch = + SortitionDB::get_stacks_epoch(self, snapshot.block_height)?.expect(&format!( + "FATAL: no epoch defined for burn height {}", + snapshot.block_height + )); + + if cur_epoch.epoch_id >= StacksEpochId::Epoch30 { + // nakamoto behavior + // look at stacks_chain_tips table + let res: Result<_, db_error> = self.deref().query_row_and_then( + "SELECT consensus_hash,block_hash,block_height FROM stacks_chain_tips WHERE sortition_id = ?", + &[&parent_snapshot.sortition_id], + |row| Ok((row.get_unwrap(0), row.get_unwrap(1), (u64::try_from(row.get_unwrap::<_, i64>(2)).expect("FATAL: block height too high")))) + ); + let ( + canonical_stacks_tip_consensus_hash, + canonical_stacks_tip_block_hash, + canonical_stacks_tip_height, + ) = res?; + sn.canonical_stacks_tip_height = canonical_stacks_tip_height; + sn.canonical_stacks_tip_hash = canonical_stacks_tip_block_hash; + sn.canonical_stacks_tip_consensus_hash = canonical_stacks_tip_consensus_hash; + } else { + // epoch 2.x behavior + // preserve memoized stacks chain tip from this burn chain fork + sn.canonical_stacks_tip_height = parent_sn.canonical_stacks_tip_height; + sn.canonical_stacks_tip_hash = parent_sn.canonical_stacks_tip_hash; + sn.canonical_stacks_tip_consensus_hash = parent_sn.canonical_stacks_tip_consensus_hash; + } self.insert_block_snapshot(&sn, pox_payout)?; @@ -5064,6 +5315,23 @@ impl<'a> SortitionHandleTx<'a> { self.insert_missed_block_commit(missed_commit)?; } + self.update_canonical_stacks_tip( + &sn.sortition_id, + &sn.canonical_stacks_tip_consensus_hash, + &sn.canonical_stacks_tip_hash, + sn.canonical_stacks_tip_height, + )?; + + #[cfg(test)] + { + let (block_consensus_hash, block_bhh) = + SortitionDB::get_canonical_stacks_chain_tip_hash(self).unwrap(); + + debug!( + "After sortition {}, canonical Stacks tip is {}/{}", + &snapshot.consensus_hash, &block_consensus_hash, &block_bhh + ); + } Ok(root_hash) } @@ -5751,6 +6019,9 @@ impl<'a> SortitionHandleTx<'a> { snapshot.block_height ); } + + let reward_cycle = reward_info.reward_cycle; + // if we've selected an anchor _and_ know of the anchor, // write the reward set information if let Some(mut reward_set) = reward_info.known_selected_anchor_block_owned() { @@ -5824,6 +6095,10 @@ impl<'a> SortitionHandleTx<'a> { keys.push(db_keys::pox_affirmation_map().to_string()); values.push(cur_affirmation_map.encode()); + // last reward cycle + keys.push(db_keys::last_reward_cycle_key().to_string()); + values.push(db_keys::last_reward_cycle_to_string(reward_cycle)); + pox_payout_addrs } else { // if this snapshot consumed some reward set entries AND @@ -5905,6 +6180,8 @@ impl<'a> SortitionHandleTx<'a> { values.push("".to_string()); keys.push(db_keys::pox_last_selected_anchor_txid().to_string()); values.push("".to_string()); + keys.push(db_keys::last_reward_cycle_key().to_string()); + values.push(db_keys::last_reward_cycle_to_string(0)); // no payouts vec![] @@ -6140,7 +6417,9 @@ impl<'a> SortitionHandleTx<'a> { .map(|(ch, bhh, height, _, _)| (ch, bhh, height)) } - /// Update the given tip's canonical Stacks block pointer + /// Update the given tip's canonical Stacks block pointer. + /// Does so on all sortitions of the same height as tip. + /// Only used in Stacks 2.x fn update_new_block_arrivals( &mut self, tip: &BlockSnapshot, @@ -6152,7 +6431,7 @@ impl<'a> SortitionHandleTx<'a> { &best_chh, &best_bhh, &u64_to_sql(best_height)?, - &tip.sortition_id, + &u64_to_sql(tip.block_height)?, ]; debug!( @@ -6160,9 +6439,8 @@ impl<'a> SortitionHandleTx<'a> { &tip.block_height, &tip.burn_header_hash, &best_chh, &best_bhh, best_height ); self.execute("UPDATE snapshots SET canonical_stacks_tip_consensus_hash = ?1, canonical_stacks_tip_hash = ?2, canonical_stacks_tip_height = ?3 - WHERE sortition_id = ?4", args) + WHERE block_height = ?4", args) .map_err(db_error::SqliteError)?; - Ok(()) } @@ -10231,6 +10509,8 @@ pub mod tests { u32::MAX, u32::MAX, u32::MAX, + u32::MAX, + u32::MAX, ); let mut burnchain = Burnchain::regtest(path_root); diff --git a/stackslib/src/chainstate/burn/operations/leader_block_commit.rs b/stackslib/src/chainstate/burn/operations/leader_block_commit.rs index cf241c0017..54d5e287ad 100644 --- a/stackslib/src/chainstate/burn/operations/leader_block_commit.rs +++ b/stackslib/src/chainstate/burn/operations/leader_block_commit.rs @@ -37,17 +37,20 @@ use crate::chainstate::burn::SortitionId; use crate::chainstate::stacks::address::PoxAddress; use crate::chainstate::stacks::index::storage::TrieFileStorage; use crate::chainstate::stacks::{StacksPrivateKey, StacksPublicKey}; +use crate::core::STACKS_EPOCH_2_05_MARKER; +use crate::core::STACKS_EPOCH_2_1_MARKER; use crate::core::STACKS_EPOCH_2_2_MARKER; use crate::core::STACKS_EPOCH_2_3_MARKER; use crate::core::STACKS_EPOCH_2_4_MARKER; +use crate::core::STACKS_EPOCH_2_5_MARKER; +use crate::core::STACKS_EPOCH_3_0_MARKER; use crate::core::{StacksEpoch, StacksEpochId}; -use crate::core::{STACKS_EPOCH_2_05_MARKER, STACKS_EPOCH_2_1_MARKER, STACKS_EPOCH_3_0_MARKER}; use crate::net::Error as net_error; use stacks_common::address::AddressHashMode; use stacks_common::codec::{write_next, Error as codec_error, StacksMessageCodec}; use stacks_common::types::chainstate::TrieHash; use stacks_common::types::chainstate::{ - BlockHeaderHash, BurnchainHeaderHash, StacksAddress, VRFSeed, + BlockHeaderHash, BurnchainHeaderHash, StacksAddress, StacksBlockId, VRFSeed, }; use stacks_common::util::hash::to_hex; use stacks_common::util::log; @@ -83,9 +86,12 @@ impl LeaderBlockCommitOp { sunset_burn: 0, block_height: block_height, burn_parent_modulus: if block_height > 0 { - ((block_height - 1) % BURN_BLOCK_MINED_AT_MODULUS) as u8 + u8::try_from((block_height - 1) % BURN_BLOCK_MINED_AT_MODULUS) + .expect("FATAL: unreachable: unable to form u8 from 3-bit number") } else { - BURN_BLOCK_MINED_AT_MODULUS as u8 - 1 + u8::try_from(BURN_BLOCK_MINED_AT_MODULUS) + .expect("FATAL: unreachable: 5 is not a u8") + - 1 }, new_seed: new_seed.clone(), key_block_ptr: paired_key.block_height as u32, @@ -136,7 +142,9 @@ impl LeaderBlockCommitOp { txid: Txid([0u8; 32]), vtxindex: 0, block_height: 0, - burn_parent_modulus: BURN_BLOCK_MINED_AT_MODULUS as u8 - 1, + burn_parent_modulus: u8::try_from(BURN_BLOCK_MINED_AT_MODULUS) + .expect("FATAL: unreachable: 5 is not a u8") + - 1, burn_header_hash: BurnchainHeaderHash::zero(), } @@ -145,11 +153,13 @@ impl LeaderBlockCommitOp { #[cfg(test)] pub fn set_burn_height(&mut self, height: u64) { self.block_height = height; - self.burn_parent_modulus = if height > 0 { + let new_burn_parent_modulus = if height > 0 { (height - 1) % BURN_BLOCK_MINED_AT_MODULUS } else { BURN_BLOCK_MINED_AT_MODULUS - 1 - } as u8; + }; + self.burn_parent_modulus = u8::try_from(new_burn_parent_modulus) + .expect("FATAL: unreachable: 3-bit number is not a u8"); } pub fn expected_chained_utxo(burn_only: bool) -> u32 { @@ -162,7 +172,13 @@ impl LeaderBlockCommitOp { } pub fn burn_block_mined_at(&self) -> u64 { - self.burn_parent_modulus as u64 % BURN_BLOCK_MINED_AT_MODULUS + u64::from(self.burn_parent_modulus) % BURN_BLOCK_MINED_AT_MODULUS + } + + /// In Nakamoto, the block header hash is actually the index block hash of the first Nakamoto + /// block of the last tenure (the "tenure id"). This helper obtains it. + pub fn last_tenure_id(&self) -> StacksBlockId { + StacksBlockId(self.block_header_hash.0.clone()) } fn parse_data(data: &Vec) -> Option { @@ -198,8 +214,10 @@ impl LeaderBlockCommitOp { let burn_parent_modulus_and_memo_byte = data[76]; - let burn_parent_modulus = ((burn_parent_modulus_and_memo_byte & 0b111) as u64 - % BURN_BLOCK_MINED_AT_MODULUS) as u8; + let burn_parent_modulus = u8::try_from( + u64::from(burn_parent_modulus_and_memo_byte & 0b111) % BURN_BLOCK_MINED_AT_MODULUS, + ) + .expect("FATAL: unreachable: could not make u8 from a 3-bit number"); let memo = (burn_parent_modulus_and_memo_byte >> 3) & 0x1f; Some(ParsedData { @@ -282,7 +300,7 @@ impl LeaderBlockCommitOp { // the genesis block. } - if data.parent_block_ptr as u64 >= block_height { + if u64::from(data.parent_block_ptr) >= block_height { warn!( "Invalid tx: parent block back-pointer {} exceeds block height {}", data.parent_block_ptr, block_height @@ -295,7 +313,7 @@ impl LeaderBlockCommitOp { return Err(op_error::ParseError); } - if data.key_block_ptr as u64 >= block_height { + if u64::from(data.key_block_ptr) >= block_height { warn!( "Invalid tx: key block back-pointer {} exceeds block height {}", data.key_block_ptr, block_height @@ -373,7 +391,7 @@ impl LeaderBlockCommitOp { // is expected given the amount transfered. let burn_fee = pox_fee .expect("A 0-len output should have already errored") - .checked_mul(OUTPUTS_PER_COMMIT as u64) // total commitment is the pox_amount * outputs + .checked_mul(u64::try_from(OUTPUTS_PER_COMMIT).expect(">2^64 outputs per commit")) // total commitment is the pox_amount * outputs .ok_or_else(|| op_error::ParseError)?; if burn_fee == 0 { @@ -554,7 +572,7 @@ impl LeaderBlockCommitOp { tx: &mut SortitionHandleTx, reward_set_info: Option<&RewardSetInfo>, ) -> Result<(), op_error> { - let parent_block_height = self.parent_block_ptr as u64; + let parent_block_height = u64::from(self.parent_block_ptr); if PoxConstants::has_pox_sunset(epoch_id) { // sunset only applies in epochs prior to 2.1. After 2.1, miners can put whatever they @@ -761,6 +779,7 @@ impl LeaderBlockCommitOp { StacksEpochId::Epoch22 => self.check_epoch_commit_marker(STACKS_EPOCH_2_2_MARKER), StacksEpochId::Epoch23 => self.check_epoch_commit_marker(STACKS_EPOCH_2_3_MARKER), StacksEpochId::Epoch24 => self.check_epoch_commit_marker(STACKS_EPOCH_2_4_MARKER), + StacksEpochId::Epoch25 => self.check_epoch_commit_marker(STACKS_EPOCH_2_5_MARKER), StacksEpochId::Epoch30 => self.check_epoch_commit_marker(STACKS_EPOCH_3_0_MARKER), } } @@ -780,6 +799,7 @@ impl LeaderBlockCommitOp { | StacksEpochId::Epoch22 | StacksEpochId::Epoch23 | StacksEpochId::Epoch24 + | StacksEpochId::Epoch25 | StacksEpochId::Epoch30 => { // correct behavior -- uses *sortition height* to find the intended sortition ID let sortition_height = self @@ -846,8 +866,8 @@ impl LeaderBlockCommitOp { epoch_id: StacksEpochId, tx: &mut SortitionHandleTx, ) -> Result<(), op_error> { - let leader_key_block_height = self.key_block_ptr as u64; - let parent_block_height = self.parent_block_ptr as u64; + let leader_key_block_height = u64::from(self.key_block_ptr); + let parent_block_height = u64::from(self.parent_block_ptr); let tx_tip = tx.context.chain_tip.clone(); let apparent_sender_repr = format!("{}", &self.apparent_sender); @@ -1789,6 +1809,8 @@ mod tests { u32::MAX, u32::MAX, u32::MAX, + u32::MAX, + u32::MAX, ), peer_version: 0x012345678, network_id: 0x9abcdef0, @@ -2334,6 +2356,8 @@ mod tests { u32::MAX, u32::MAX, u32::MAX, + u32::MAX, + u32::MAX, ), peer_version: 0x012345678, network_id: 0x9abcdef0, @@ -3035,6 +3059,8 @@ mod tests { u32::MAX, u32::MAX, u32::MAX, + u32::MAX, + u32::MAX, ), peer_version: 0x012345678, network_id: 0x9abcdef0, diff --git a/stackslib/src/chainstate/burn/operations/leader_key_register.rs b/stackslib/src/chainstate/burn/operations/leader_key_register.rs index 953a6ce7da..03bf35afe1 100644 --- a/stackslib/src/chainstate/burn/operations/leader_key_register.rs +++ b/stackslib/src/chainstate/burn/operations/leader_key_register.rs @@ -83,16 +83,27 @@ impl LeaderKeyRegisterOp { self.memo.get(0..20).map(Hash160::from_bytes).flatten() } + /// Set the miner public key hash160 for block-signing + pub fn set_nakamoto_signing_key(&mut self, pubkey_hash160: &Hash160) { + if self.memo.len() < 20 { + let mut new_memo = vec![0; 20]; + new_memo[0..self.memo.len()].copy_from_slice(&self.memo); + self.memo = new_memo; + } + self.memo[0..20].copy_from_slice(&pubkey_hash160.0); + } + fn parse_data(data: &Vec) -> Option { /* Wire format: - 0 2 3 23 55 80 - |------|--|---------------|-----------------------|---------------------------| - magic op consensus hash proving public key memo - (ignored) (ignored) + 0 2 3 23 55 75 80 + |------|--|---------------|-----------------------|-----------------------|---------| + magic op consensus hash proving public key block-signing hash160 memo + (ignored) (ignored) - Note that `data` is missing the first 3 bytes -- the magic and op have been stripped + Note that `data` is missing the first 3 bytes -- the magic and op have been stripped. + `block-signing hash160` is new to Nakamoto. */ // memo can be empty, and magic + op are omitted if data.len() < 52 { @@ -180,10 +191,13 @@ impl StacksMessageCodec for LeaderKeyRegisterOp { /* Wire format: - 0 2 3 23 55 80 - |------|--|---------------|-----------------------|---------------------------| - magic op consensus hash proving public key memo - (ignored) (ignored) + 0 2 3 23 55 75 80 + |------|--|---------------|-----------------------|-----------------------|---------| + magic op consensus hash proving public key block-signing hash160 memo + (ignored) (ignored) + + Note that `data` is missing the first 3 bytes -- the magic and op have been stripped. + `block-signing hash160` is new to Nakamoto, and is contained within the first 20 bytes of `memo` */ fn consensus_serialize(&self, fd: &mut W) -> Result<(), codec_error> { write_next(fd, &(Opcodes::LeaderKeyRegister as u8))?; diff --git a/stackslib/src/chainstate/coordinator/mod.rs b/stackslib/src/chainstate/coordinator/mod.rs index ab7e46fb7b..c40ed4e83a 100644 --- a/stackslib/src/chainstate/coordinator/mod.rs +++ b/stackslib/src/chainstate/coordinator/mod.rs @@ -58,6 +58,7 @@ use crate::chainstate::coordinator::comm::{ }; use crate::chainstate::stacks::address::PoxAddress; use crate::chainstate::stacks::boot::POX_3_NAME; +use crate::chainstate::stacks::boot::POX_4_NAME; use crate::chainstate::stacks::index::marf::MARFOpenOpts; use crate::chainstate::stacks::index::MarfTrieId; use crate::chainstate::stacks::{ @@ -65,7 +66,9 @@ use crate::chainstate::stacks::{ accounts::MinerReward, ChainStateBootData, ClarityTx, MinerRewardInfo, StacksChainState, StacksEpochReceipt, StacksHeaderInfo, }, - events::{StacksTransactionEvent, StacksTransactionReceipt, TransactionOrigin}, + events::{ + StacksBlockEventData, StacksTransactionEvent, StacksTransactionReceipt, TransactionOrigin, + }, miner::{signal_mining_blocked, signal_mining_ready, MinerStatus}, Error as ChainstateError, StacksBlock, StacksBlockHeader, TransactionPayload, }; @@ -87,15 +90,42 @@ pub mod tests; /// The 3 different states for the current /// reward cycle's relationship to its PoX anchor -#[derive(Debug, Clone, PartialEq)] +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] pub enum PoxAnchorBlockStatus { SelectedAndKnown(BlockHeaderHash, Txid, RewardSet), SelectedAndUnknown(BlockHeaderHash, Txid), NotSelected, } -#[derive(Debug, PartialEq)] +/// The possible outcomes of processing a burnchain block. +/// Indicates whether or not we're ready to process Stacks blocks, or if not, whether or not we're +/// blocked on a Stacks 2.x anchor block or a Nakamoto anchor block +pub enum NewBurnchainBlockStatus { + /// Ready to process Stacks blocks + Ready, + /// Missing 2.x PoX anchor block + WaitForPox2x(BlockHeaderHash), + /// Missing Nakamoto anchor block. Unlike 2.x, we won't know its hash. + WaitForPoxNakamoto, +} + +impl NewBurnchainBlockStatus { + /// Test helper to convert this status into the optional hash of the missing PoX anchor block. + /// Because there are unit tests that expect a Some(..) result if PoX cannot proceed, the + /// missing Nakamoto anchor block case is converted into a placeholder Some(..) value + #[cfg(test)] + pub fn into_missing_block_hash(self) -> Option { + match self { + Self::Ready => None, + Self::WaitForPox2x(block_hash) => Some(block_hash), + Self::WaitForPoxNakamoto => Some(BlockHeaderHash([0x00; 32])), + } + } +} + +#[derive(Debug, PartialEq, Serialize, Deserialize)] pub struct RewardCycleInfo { + pub reward_cycle: u64, pub anchor_status: PoxAnchorBlockStatus, } @@ -137,7 +167,7 @@ impl RewardCycleInfo { pub trait BlockEventDispatcher { fn announce_block( &self, - block: &StacksBlock, + block: &StacksBlockEventData, metadata: &StacksHeaderInfo, receipts: &[StacksTransactionReceipt], parent: &StacksBlockId, @@ -193,18 +223,18 @@ pub struct ChainsCoordinator< FE: FeeEstimator + ?Sized, B: BurnchainHeaderReader, > { - canonical_sortition_tip: Option, - burnchain_blocks_db: BurnchainDB, - chain_state_db: StacksChainState, - sortition_db: SortitionDB, - burnchain: Burnchain, - atlas_db: Option, - dispatcher: Option<&'a T>, - cost_estimator: Option<&'a mut CE>, - fee_estimator: Option<&'a mut FE>, - reward_set_provider: R, - notifier: N, - atlas_config: AtlasConfig, + pub canonical_sortition_tip: Option, + pub burnchain_blocks_db: BurnchainDB, + pub chain_state_db: StacksChainState, + pub sortition_db: SortitionDB, + pub burnchain: Burnchain, + pub atlas_db: Option, + pub dispatcher: Option<&'a T>, + pub cost_estimator: Option<&'a mut CE>, + pub fee_estimator: Option<&'a mut FE>, + pub reward_set_provider: R, + pub notifier: N, + pub atlas_config: AtlasConfig, config: ChainsCoordinatorConfig, burnchain_indexer: B, } @@ -220,6 +250,9 @@ pub enum Error { DBError(DBError), NotPrepareEndBlock, NotPoXAnchorBlock, + NotInPreparePhase, + RewardSetAlreadyProcessed, + PoXAnchorBlockRequired, } impl From for Error { @@ -266,6 +299,40 @@ impl RewardSetProvider for OnChainRewardSetProvider { let cur_epoch = SortitionDB::get_stacks_epoch(sortdb.conn(), current_burn_height)?.expect( &format!("FATAL: no epoch for burn height {}", current_burn_height), ); + if cur_epoch.epoch_id < StacksEpochId::Epoch30 { + // Stacks 2.x epoch + return self.get_reward_set_epoch2( + current_burn_height, + chainstate, + burnchain, + sortdb, + block_id, + cur_epoch, + ); + } else { + // Nakamoto epoch + return self.get_reward_set_nakamoto( + current_burn_height, + chainstate, + burnchain, + sortdb, + block_id, + ); + } + } +} + +impl OnChainRewardSetProvider { + fn get_reward_set_epoch2( + &self, + // Todo: `current_burn_height` is a misleading name: should be the `cycle_start_burn_height` + current_burn_height: u64, + chainstate: &mut StacksChainState, + burnchain: &Burnchain, + sortdb: &SortitionDB, + block_id: &StacksBlockId, + cur_epoch: StacksEpoch, + ) -> Result { match cur_epoch.epoch_id { StacksEpochId::Epoch10 | StacksEpochId::Epoch20 @@ -277,7 +344,7 @@ impl RewardSetProvider for OnChainRewardSetProvider { info!("PoX reward cycle defaulting to burn in Epochs 2.2 and 2.3"); return Ok(RewardSet::empty()); } - StacksEpochId::Epoch24 | StacksEpochId::Epoch30 => { + StacksEpochId::Epoch24 => { // Epoch 2.4 computes reward sets, but *only* if PoX-3 is active if burnchain .pox_constants @@ -292,6 +359,21 @@ impl RewardSetProvider for OnChainRewardSetProvider { return Ok(RewardSet::empty()); } } + StacksEpochId::Epoch25 | StacksEpochId::Epoch30 => { + // Epoch 2.5 and 3.0 compute reward sets, but *only* if PoX-4 is active + if burnchain + .pox_constants + .active_pox_contract(current_burn_height) + != POX_4_NAME + { + // Note: this should not happen in mainnet or testnet, because the no reward cycle start height + // exists between Epoch 2.5's instantiation height and the pox-4 activation height. + // However, this *will* happen in testing if Epoch 2.5's instantiation height is set == a reward cycle + // start height + info!("PoX reward cycle defaulting to burn in Epoch 2.5 because cycle start is before PoX-4 activation"); + return Ok(RewardSet::empty()); + } + } }; let registered_addrs = @@ -347,8 +429,8 @@ impl< dispatcher: &'a mut T, comms: CoordinatorReceivers, atlas_config: AtlasConfig, - cost_estimator: Option<&mut CE>, - fee_estimator: Option<&mut FE>, + cost_estimator: Option<&'a mut CE>, + fee_estimator: Option<&'a mut FE>, miner_status: Arc>, burnchain_indexer: B, atlas_db: AtlasDB, @@ -392,52 +474,81 @@ impl< burnchain_indexer, }; + let mut nakamoto_available = false; loop { - // timeout so that we handle Ctrl-C a little gracefully - let bits = comms.wait_on(); - if (bits & (CoordinatorEvents::NEW_STACKS_BLOCK as u8)) != 0 { - signal_mining_blocked(miner_status.clone()); - debug!("Received new stacks block notice"); - match inst.handle_new_stacks_block() { - Ok(missing_block_opt) => { - if missing_block_opt.is_some() { - debug!( - "Missing affirmed anchor block: {:?}", - &missing_block_opt.as_ref().expect("unreachable") - ); - } - } - Err(e) => { - warn!("Error processing new stacks block: {:?}", e); - } + if nakamoto_available + || inst + .can_process_nakamoto() + .expect("FATAL: could not determine if Nakamoto is available") + { + // short-circuit to avoid gratuitous I/O + nakamoto_available = true; + if !inst.handle_comms_nakamoto(&comms, miner_status.clone()) { + return; + } + } else { + if !inst.handle_comms_epoch2(&comms, miner_status.clone()) { + return; } + } + } + } - signal_mining_ready(miner_status.clone()); + /// This is the Stacks 2.x coordinator loop body, which handles communications + /// from the given `comms`. It returns `true` if the coordinator is still running, and `false` + /// if not. + pub fn handle_comms_epoch2( + &mut self, + comms: &CoordinatorReceivers, + miner_status: Arc>, + ) -> bool { + // timeout so that we handle Ctrl-C a little gracefully + let bits = comms.wait_on(); + if (bits & (CoordinatorEvents::NEW_STACKS_BLOCK as u8)) != 0 { + signal_mining_blocked(miner_status.clone()); + debug!("Received new stacks block notice"); + match self.handle_new_stacks_block() { + Ok(missing_block_opt) => { + if missing_block_opt.is_some() { + debug!( + "Missing affirmed anchor block: {:?}", + &missing_block_opt.as_ref().expect("unreachable") + ); + } + } + Err(e) => { + warn!("Error processing new stacks block: {:?}", e); + } } - if (bits & (CoordinatorEvents::NEW_BURN_BLOCK as u8)) != 0 { - signal_mining_blocked(miner_status.clone()); - debug!("Received new burn block notice"); - match inst.handle_new_burnchain_block() { - Ok(missing_block_opt) => { - if missing_block_opt.is_some() { - debug!( - "Missing canonical anchor block {}", - &missing_block_opt.clone().unwrap() - ); - } + + signal_mining_ready(miner_status.clone()); + } + if (bits & (CoordinatorEvents::NEW_BURN_BLOCK as u8)) != 0 { + signal_mining_blocked(miner_status.clone()); + debug!("Received new burn block notice"); + match self.handle_new_burnchain_block() { + Ok(burn_block_status) => match burn_block_status { + NewBurnchainBlockStatus::Ready => {} + NewBurnchainBlockStatus::WaitForPox2x(block_hash) => { + debug!("Missing canonical Stacks 2.x anchor block {}", &block_hash,); } - Err(e) => { - warn!("Error processing new burn block: {:?}", e); + NewBurnchainBlockStatus::WaitForPoxNakamoto => { + debug!("Missing canonical Nakamoto anchor block"); } + }, + Err(e) => { + warn!("Error processing new burn block: {:?}", e); } - signal_mining_ready(miner_status.clone()); - } - if (bits & (CoordinatorEvents::STOP as u8)) != 0 { - signal_mining_blocked(miner_status.clone()); - debug!("Received stop notice"); - return; } + signal_mining_ready(miner_status.clone()); + } + if (bits & (CoordinatorEvents::STOP as u8)) != 0 { + signal_mining_blocked(miner_status.clone()); + debug!("Received stop notice"); + return false; } + + return true; } } @@ -562,7 +673,7 @@ pub fn get_reward_cycle_info( burnchain: &Burnchain, burnchain_db: &BurnchainDB, chain_state: &mut StacksChainState, - sort_db: &SortitionDB, + sort_db: &mut SortitionDB, provider: &U, always_use_affirmation_maps: bool, ) -> Result, Error> { @@ -570,19 +681,21 @@ pub fn get_reward_cycle_info( &format!("FATAL: no epoch defined for burn height {}", burn_height), ); - if burnchain.is_reward_cycle_start(burn_height) { + let reward_cycle_info = if burnchain.is_reward_cycle_start(burn_height) { + let reward_cycle = burnchain + .block_height_to_reward_cycle(burn_height) + .expect("FATAL: no reward cycle for burn height"); + if burnchain .pox_constants .is_after_pox_sunset_end(burn_height, epoch_at_height.epoch_id) { return Ok(Some(RewardCycleInfo { + reward_cycle, anchor_status: PoxAnchorBlockStatus::NotSelected, })); } - let reward_cycle = burnchain - .block_height_to_reward_cycle(burn_height) - .expect("FATAL: no reward cycle for burn height"); debug!("Beginning reward cycle"; "burn_height" => burn_height, "reward_cycle" => reward_cycle, @@ -638,27 +751,65 @@ pub fn get_reward_cycle_info( ); PoxAnchorBlockStatus::SelectedAndUnknown(stacks_block_hash, txid) }; - Ok(Some(RewardCycleInfo { anchor_status })) + Ok(Some(RewardCycleInfo { + reward_cycle, + anchor_status, + })) } else { debug!( "PoX anchor block NOT chosen for reward cycle {} at burn height {}", reward_cycle, burn_height ); Ok(Some(RewardCycleInfo { + reward_cycle, anchor_status: PoxAnchorBlockStatus::NotSelected, })) } } else { Ok(None) + }; + + if let Ok(Some(reward_cycle_info)) = reward_cycle_info.as_ref() { + // cache the reward cycle info as of the first sortition in the prepare phase, so that + // the Nakamoto epoch can go find it later + let ic = sort_db.index_handle(sortition_tip); + let prev_reward_cycle = burnchain + .block_height_to_reward_cycle(burn_height) + .expect("FATAL: no reward cycle for burn height"); + + if prev_reward_cycle > 1 { + let prepare_phase_start = burnchain + .pox_constants + .prepare_phase_start(burnchain.first_block_height, prev_reward_cycle - 1); + let first_prepare_sn = + SortitionDB::get_ancestor_snapshot(&ic, prepare_phase_start, sortition_tip)? + .expect("FATAL: no start-of-prepare-phase sortition"); + + let mut tx = sort_db.tx_begin()?; + if SortitionDB::get_preprocessed_reward_set(&mut tx, &first_prepare_sn.sortition_id)? + .is_none() + { + SortitionDB::store_preprocessed_reward_set( + &mut tx, + &first_prepare_sn.sortition_id, + &reward_cycle_info, + )?; + } + tx.commit()?; + } } + reward_cycle_info } -struct PaidRewards { - pox: Vec<(PoxAddress, u64)>, - burns: u64, +/// PoX payout event to be sent to connected event observers +pub struct PaidRewards { + pub pox: Vec<(PoxAddress, u64)>, + pub burns: u64, } -fn calculate_paid_rewards(ops: &[BlockstackOperationType]) -> PaidRewards { +/// Determine the rewards paid for a given set of burnchain operations. All of these operations +/// ought to be from the same burnchain block. +pub fn calculate_paid_rewards(ops: &[BlockstackOperationType]) -> PaidRewards { let mut reward_recipients: HashMap<_, u64> = HashMap::new(); let mut burn_amt = 0; for op in ops.iter() { @@ -686,7 +837,7 @@ fn calculate_paid_rewards(ops: &[BlockstackOperationType]) -> PaidRewards { } } -fn dispatcher_announce_burn_ops( +pub fn dispatcher_announce_burn_ops( dispatcher: &T, burn_header: &BurnchainBlockHeader, paid_rewards: PaidRewards, @@ -2136,9 +2287,60 @@ impl< } /// Outermost call to process a burnchain block. + /// Will call the Stacks 2.x or Nakamoto handler, depending on whether or not /// Not called internally. - pub fn handle_new_burnchain_block(&mut self) -> Result, Error> { - self.inner_handle_new_burnchain_block(&mut HashSet::new()) + pub fn handle_new_burnchain_block(&mut self) -> Result { + let canonical_burnchain_tip = self.burnchain_blocks_db.get_canonical_chain_tip()?; + let epochs = SortitionDB::get_stacks_epochs(self.sortition_db.conn())?; + let target_epoch_index = + StacksEpoch::find_epoch(&epochs, canonical_burnchain_tip.block_height) + .expect("FATAL: epoch not defined for burnchain height"); + let target_epoch = epochs + .get(target_epoch_index) + .expect("FATAL: StacksEpoch::find_epoch() returned an invalid index"); + if target_epoch.epoch_id < StacksEpochId::Epoch30 { + // burnchain has not yet advanced to epoch 3.0 + return self + .handle_new_epoch2_burnchain_block(&mut HashSet::new()) + .and_then(|block_hash_opt| { + if let Some(block_hash) = block_hash_opt { + Ok(NewBurnchainBlockStatus::WaitForPox2x(block_hash)) + } else { + Ok(NewBurnchainBlockStatus::Ready) + } + }); + } + + // burnchain has advanced to epoch 3.0, but has our sortition DB? + let canonical_snapshot = match self.canonical_sortition_tip.as_ref() { + Some(sn_tip) => SortitionDB::get_block_snapshot(self.sortition_db.conn(), sn_tip)? + .expect(&format!( + "FATAL: do not have previously-calculated highest valid sortition tip {}", + sn_tip + )), + None => SortitionDB::get_canonical_burn_chain_tip(&self.sortition_db.conn())?, + }; + let target_epoch_index = StacksEpoch::find_epoch(&epochs, canonical_snapshot.block_height) + .expect("FATAL: epoch not defined for BlockSnapshot height"); + let target_epoch = epochs + .get(target_epoch_index) + .expect("FATAL: StacksEpoch::find_epoch() returned an invalid index"); + + if target_epoch.epoch_id < StacksEpochId::Epoch30 { + // need to catch the sortition DB up + self.handle_new_epoch2_burnchain_block(&mut HashSet::new())?; + } + + // proceed to process sortitions in epoch 3.0 + self.handle_new_nakamoto_burnchain_block() + .and_then(|can_proceed| { + if can_proceed { + Ok(NewBurnchainBlockStatus::Ready) + } else { + // missing PoX anchor block, but unlike in 2.x, we don't know what it is! + Ok(NewBurnchainBlockStatus::WaitForPoxNakamoto) + } + }) } /// Are affirmation maps active during the epoch? @@ -2156,7 +2358,7 @@ impl< /// this happens, *and* if re-processing the new affirmed history is *blocked on* the /// unavailability of a PoX anchor block that *must now* exist, then return the hash of this /// anchor block. - fn inner_handle_new_burnchain_block( + pub fn handle_new_epoch2_burnchain_block( &mut self, already_processed_burn_blocks: &mut HashSet, ) -> Result, Error> { @@ -2200,7 +2402,6 @@ impl< }; let canonical_burnchain_tip = self.burnchain_blocks_db.get_canonical_chain_tip()?; - // let canonical_affirmation_map = self.get_canonical_affirmation_map()?; let canonical_affirmation_map = self.get_canonical_affirmation_map(&canonical_snapshot.sortition_id)?; @@ -2267,6 +2468,19 @@ impl< for unprocessed_block in sortitions_to_process.into_iter() { let BurnchainBlockData { header, ops } = unprocessed_block; + + // only evaluate epoch 2.x. + // NOTE: epoch 3 starts _right after_ the first block in the first epoch3 reward cycle, + // so we use the 2.x rules to process the PoX reward set. + let sortition_epoch = + SortitionDB::get_stacks_epoch(self.sortition_db.conn(), header.block_height)? + .expect("FATAL: no epoch defined for a valid block height"); + + if sortition_epoch.epoch_id >= StacksEpochId::Epoch30 { + // stop processing + break; + } + if already_processed_burn_blocks.contains(&header.block_hash) { // don't re-process something we recursively processed already, by means of finding // a heretofore missing anchor block @@ -2348,7 +2562,6 @@ impl< ); stacks_blocks_to_reaccept.push(( sortition.consensus_hash.clone(), - stacks_block_header.anchored_header.parent().clone(), sortition.winning_stacks_block_hash.clone(), stacks_block_header.anchored_header.height(), )); @@ -2362,7 +2575,6 @@ impl< } sortition } else { - // new sortition -- go evaluate it. // bind a reference here to avoid tripping up the borrow-checker let dispatcher_ref = &self.dispatcher; let (next_snapshot, _) = self @@ -2400,7 +2612,7 @@ impl< { // get borrow checker to drop sort_tx let mut sort_tx = self.sortition_db.tx_begin()?; - for (ch, parent_bhh, bhh, height) in stacks_blocks_to_reaccept.into_iter() { + for (ch, bhh, height) in stacks_blocks_to_reaccept.into_iter() { debug!( "Check if Stacks block {}/{} height {} is compatible with `{}`", &ch, &bhh, height, &heaviest_am @@ -2422,7 +2634,7 @@ impl< "Stacks block {}/{} height {} is compatible with `{}`; will reaccept", &ch, &bhh, height, &heaviest_am ); - compatible_stacks_blocks.push((ch, parent_bhh, bhh, height)); + compatible_stacks_blocks.push((ch, bhh, height)); } else { debug!("Stacks block {}/{} height {} is NOT compatible with `{}`; will NOT reaccept", &ch, &bhh, height, &heaviest_am); } @@ -2433,7 +2645,7 @@ impl< let mut sortition_db_handle = SortitionHandleTx::begin(&mut self.sortition_db, &next_snapshot.sortition_id)?; - for (ch, _parent_bhh, bhh, height) in compatible_stacks_blocks.into_iter() { + for (ch, bhh, height) in compatible_stacks_blocks.into_iter() { debug!("Re-accept Stacks block {}/{} height {}", &ch, &bhh, height); revalidated_stacks_block = true; sortition_db_handle.set_stacks_block_accepted(&ch, &bhh, height)?; @@ -2597,14 +2809,14 @@ impl< &self.burnchain, &self.burnchain_blocks_db, &mut self.chain_state_db, - &self.sortition_db, + &mut self.sortition_db, &self.reward_set_provider, self.config.always_use_affirmation_maps, ) } /// Process any Atlas attachment events and forward them to the Atlas subsystem - fn process_atlas_attachment_events( + pub fn process_atlas_attachment_events( atlas_db: Option<&mut AtlasDB>, atlas_config: &AtlasConfig, block_receipt: &StacksEpochReceipt, @@ -3109,7 +3321,7 @@ impl< self.canonical_sortition_tip = Some(prep_end.sortition_id); // Start processing from the beginning of the new PoX reward set - self.inner_handle_new_burnchain_block(already_processed_burn_blocks) + self.handle_new_epoch2_burnchain_block(already_processed_burn_blocks) } } diff --git a/stackslib/src/chainstate/coordinator/tests.rs b/stackslib/src/chainstate/coordinator/tests.rs index e4d2df1380..0e3dda311e 100644 --- a/stackslib/src/chainstate/coordinator/tests.rs +++ b/stackslib/src/chainstate/coordinator/tests.rs @@ -72,6 +72,8 @@ use crate::chainstate::stacks::boot::POX_3_NAME; use crate::chainstate::stacks::db::{ accounts::MinerReward, ClarityTx, StacksChainState, StacksHeaderInfo, }; +use crate::chainstate::stacks::events::StacksBlockEventData; +use crate::chainstate::stacks::miner::BlockBuilder; use crate::chainstate::stacks::*; use crate::clarity_vm::clarity::ClarityConnection; use crate::core; @@ -427,7 +429,7 @@ pub struct NullEventDispatcher; impl BlockEventDispatcher for NullEventDispatcher { fn announce_block( &self, - _block: &StacksBlock, + _block: &StacksBlockEventData, _metadata: &StacksHeaderInfo, _receipts: &[StacksTransactionReceipt], _parent: &StacksBlockId, @@ -544,6 +546,8 @@ pub fn get_burnchain(path: &str, pox_consts: Option) -> Burnchain u32::MAX, u32::MAX, u32::MAX, + u32::MAX, + u32::MAX, ) }); b @@ -622,7 +626,7 @@ fn make_genesis_block_with_recipients( let mut tx = StacksTransaction::new( TransactionVersion::Testnet, tx_auth, - TransactionPayload::Coinbase(CoinbasePayload([0u8; 32]), None), + TransactionPayload::Coinbase(CoinbasePayload([0u8; 32]), None, None), ); tx.chain_id = 0x80000000; tx.anchor_mode = TransactionAnchorMode::OnChainOnly; @@ -854,7 +858,7 @@ fn make_stacks_block_with_input( let mut tx = StacksTransaction::new( TransactionVersion::Testnet, tx_auth, - TransactionPayload::Coinbase(CoinbasePayload([0u8; 32]), None), + TransactionPayload::Coinbase(CoinbasePayload([0u8; 32]), None, None), ); tx.chain_id = 0x80000000; tx.anchor_mode = TransactionAnchorMode::OnChainOnly; @@ -991,6 +995,8 @@ fn missed_block_commits_2_05() { u32::MAX, u32::MAX, u32::MAX, + u32::MAX, + u32::MAX, )); let burnchain_conf = get_burnchain(path, pox_consts.clone()); @@ -1311,6 +1317,8 @@ fn missed_block_commits_2_1() { u32::MAX, u32::MAX, u32::MAX, + u32::MAX, + u32::MAX, )); let burnchain_conf = get_burnchain(path, pox_consts.clone()); @@ -1655,6 +1663,8 @@ fn late_block_commits_2_1() { u32::MAX, u32::MAX, u32::MAX, + u32::MAX, + u32::MAX, )); let burnchain_conf = get_burnchain(path, pox_consts.clone()); @@ -2712,6 +2722,7 @@ fn test_pox_btc_ops() { let sunset_ht = 8000; let pox_v1_unlock_ht = u32::MAX; let pox_v2_unlock_ht = u32::MAX; + let pox_v3_unlock_ht = u32::MAX; let pox_consts = Some(PoxConstants::new( 5, 3, @@ -2722,6 +2733,8 @@ fn test_pox_btc_ops() { sunset_ht, pox_v1_unlock_ht, pox_v2_unlock_ht, + pox_v3_unlock_ht, + u32::MAX, u32::MAX, )); let burnchain_conf = get_burnchain(path, pox_consts.clone()); @@ -2903,7 +2916,8 @@ fn test_pox_btc_ops() { stacker_balance.get_available_balance_at_burn_block( burn_height as u64, pox_v1_unlock_ht, - pox_v2_unlock_ht + pox_v2_unlock_ht, + pox_v3_unlock_ht, ), balance as u128, "No lock should be active" @@ -2994,6 +3008,7 @@ fn test_stx_transfer_btc_ops() { let pox_v1_unlock_ht = u32::MAX; let pox_v2_unlock_ht = u32::MAX; + let pox_v3_unlock_ht = u32::MAX; let sunset_ht = 8000; let pox_consts = Some(PoxConstants::new( 5, @@ -3005,6 +3020,8 @@ fn test_stx_transfer_btc_ops() { sunset_ht, pox_v1_unlock_ht, pox_v2_unlock_ht, + pox_v3_unlock_ht, + u32::MAX, u32::MAX, )); let burnchain_conf = get_burnchain(path, pox_consts.clone()); @@ -3208,7 +3225,8 @@ fn test_stx_transfer_btc_ops() { sender_balance.get_available_balance_at_burn_block( burn_height as u64, pox_v1_unlock_ht, - pox_v2_unlock_ht + pox_v2_unlock_ht, + pox_v3_unlock_ht, ), (balance as u128) - transfer_amt, "Transfer should have decremented balance" @@ -3217,7 +3235,8 @@ fn test_stx_transfer_btc_ops() { recipient_balance.get_available_balance_at_burn_block( burn_height as u64, pox_v1_unlock_ht, - pox_v2_unlock_ht + pox_v2_unlock_ht, + pox_v3_unlock_ht, ), transfer_amt, "Recipient should have incremented balance" @@ -3227,7 +3246,8 @@ fn test_stx_transfer_btc_ops() { sender_balance.get_available_balance_at_burn_block( burn_height as u64, pox_v1_unlock_ht, - pox_v2_unlock_ht + pox_v2_unlock_ht, + pox_v3_unlock_ht, ), balance as u128, ); @@ -3235,7 +3255,8 @@ fn test_stx_transfer_btc_ops() { recipient_balance.get_available_balance_at_burn_block( burn_height as u64, pox_v1_unlock_ht, - pox_v2_unlock_ht + pox_v2_unlock_ht, + pox_v3_unlock_ht, ), 0, ); @@ -3325,7 +3346,9 @@ fn test_sbtc_ops() { let pox_v1_unlock_ht = 12; let pox_v2_unlock_ht = 14; - let pox_3_activation_ht = 16; + let pox_v3_unlock_ht = 16; + let pox_3_activation_ht = 15; + let pox_4_activation_ht = 16; let sunset_ht = 8000; let pox_consts = Some(PoxConstants::new( 100, @@ -3337,7 +3360,9 @@ fn test_sbtc_ops() { sunset_ht, pox_v1_unlock_ht, pox_v2_unlock_ht, + pox_v3_unlock_ht, pox_3_activation_ht, + pox_4_activation_ht, )); let burnchain_conf = get_burnchain(path, pox_consts.clone()); @@ -3683,6 +3708,8 @@ fn test_delegate_stx_btc_ops() { pox_v1_unlock_ht, pox_v2_unlock_ht, u32::MAX, + u32::MAX, + u32::MAX, )); let burnchain_conf = get_burnchain(path, pox_consts.clone()); @@ -3988,6 +4015,8 @@ fn test_initial_coinbase_reward_distributions() { u32::MAX, u32::MAX, u32::MAX, + u32::MAX, + u32::MAX, )); let burnchain_conf = get_burnchain(path, pox_consts.clone()); @@ -4227,6 +4256,8 @@ fn test_epoch_switch_cost_contract_instantiation() { u32::MAX, u32::MAX, u32::MAX, + u32::MAX, + u32::MAX, )); let burnchain_conf = get_burnchain(path, pox_consts.clone()); @@ -4428,6 +4459,8 @@ fn test_epoch_switch_pox_2_contract_instantiation() { 10, u32::MAX, u32::MAX, + u32::MAX, + u32::MAX, )); let burnchain_conf = get_burnchain(path, pox_consts.clone()); @@ -4621,7 +4654,20 @@ fn test_epoch_switch_pox_3_contract_instantiation() { let _r = std::fs::remove_dir_all(path); let sunset_ht = 8000; - let pox_consts = Some(PoxConstants::new(6, 3, 3, 25, 5, 10, sunset_ht, 10, 14, 16)); + let pox_consts = Some(PoxConstants::new( + 6, + 3, + 3, + 25, + 5, + 10, + sunset_ht, + 10, + 14, + u32::MAX, + 16, + u32::MAX, + )); let burnchain_conf = get_burnchain(path, pox_consts.clone()); let vrf_keys: Vec<_> = (0..25).map(|_| VRFPrivateKey::new()).collect(); @@ -4825,6 +4871,8 @@ fn atlas_stop_start() { 10, u32::MAX, u32::MAX, + u32::MAX, + u32::MAX, )); let burnchain_conf = get_burnchain(path, pox_consts.clone()); @@ -5134,6 +5182,8 @@ fn test_epoch_verify_active_pox_contract() { pox_v1_unlock_ht, pox_v2_unlock_ht, u32::MAX, + u32::MAX, + u32::MAX, )); let burnchain_conf = get_burnchain(path, pox_consts.clone()); @@ -5425,6 +5475,8 @@ fn test_sortition_with_sunset() { u32::MAX, u32::MAX, u32::MAX, + u32::MAX, + u32::MAX, )); let burnchain_conf = get_burnchain(path, pox_consts.clone()); @@ -5734,6 +5786,8 @@ fn test_sortition_with_sunset_and_epoch_switch() { v1_unlock_ht, u32::MAX, u32::MAX, + u32::MAX, + u32::MAX, )); let burnchain_conf = get_burnchain(path, pox_consts.clone()); @@ -6083,6 +6137,8 @@ fn test_pox_processable_block_in_different_pox_forks() { u32::MAX, u32::MAX, u32::MAX, + u32::MAX, + u32::MAX, )); let b = get_burnchain(path, pox_consts.clone()); let b_blind = get_burnchain(path_blinded, pox_consts.clone()); @@ -6202,7 +6258,10 @@ fn test_pox_processable_block_in_different_pox_forks() { ); loop { - let missing_anchor_opt = coord.handle_new_burnchain_block().unwrap(); + let missing_anchor_opt = coord + .handle_new_burnchain_block() + .unwrap() + .into_missing_block_hash(); if let Some(missing_anchor) = missing_anchor_opt { eprintln!( "Unblinded database reports missing anchor block {:?} (ix={})", diff --git a/stackslib/src/chainstate/nakamoto/coordinator/mod.rs b/stackslib/src/chainstate/nakamoto/coordinator/mod.rs new file mode 100644 index 0000000000..4e38980fa5 --- /dev/null +++ b/stackslib/src/chainstate/nakamoto/coordinator/mod.rs @@ -0,0 +1,843 @@ +// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation +// Copyright (C) 2020-2023 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use std::collections::VecDeque; +use std::sync::Arc; +use std::sync::Mutex; + +use clarity::vm::database::BurnStateDB; + +use crate::burnchains::db::BurnchainBlockData; +use crate::burnchains::db::BurnchainDB; +use crate::burnchains::db::BurnchainHeaderReader; +use crate::burnchains::Burnchain; +use crate::burnchains::BurnchainBlockHeader; +use crate::chainstate::burn::db::sortdb::SortitionDB; +use crate::chainstate::burn::operations::leader_block_commit::RewardSetInfo; +use crate::chainstate::burn::BlockSnapshot; +use crate::chainstate::coordinator::comm::{ + CoordinatorChannels, CoordinatorCommunication, CoordinatorEvents, CoordinatorNotices, + CoordinatorReceivers, +}; + +use crate::chainstate::coordinator::{ + calculate_paid_rewards, dispatcher_announce_burn_ops, BlockEventDispatcher, ChainsCoordinator, + Error, OnChainRewardSetProvider, PaidRewards, PoxAnchorBlockStatus, RewardCycleInfo, + RewardSetProvider, +}; +use crate::chainstate::nakamoto::NakamotoChainState; +use crate::chainstate::stacks::boot::RewardSet; +use crate::chainstate::stacks::db::StacksBlockHeaderTypes; +use crate::chainstate::stacks::db::StacksChainState; +use crate::chainstate::stacks::miner::signal_mining_blocked; +use crate::chainstate::stacks::miner::signal_mining_ready; +use crate::chainstate::stacks::miner::MinerStatus; +use crate::chainstate::stacks::Error as ChainstateError; + +use crate::cost_estimates::CostEstimator; +use crate::cost_estimates::FeeEstimator; + +use crate::monitoring::increment_stx_blocks_processed_counter; + +use crate::net::Error as NetError; + +use crate::util_lib::db::Error as DBError; + +use stacks_common::types::chainstate::BlockHeaderHash; +use stacks_common::types::chainstate::BurnchainHeaderHash; +use stacks_common::types::chainstate::SortitionId; +use stacks_common::types::chainstate::StacksBlockId; +use stacks_common::types::StacksEpoch; +use stacks_common::types::StacksEpochId; + +#[cfg(test)] +pub mod tests; + +impl OnChainRewardSetProvider { + pub fn get_reward_set_nakamoto( + &self, + // NOTE: this value is the first burnchain block in the prepare phase which has a Stacks + // block (unlike in Stacks 2.x, where this is the first block of the reward phase) + current_burn_height: u64, + chainstate: &mut StacksChainState, + burnchain: &Burnchain, + sortdb: &SortitionDB, + block_id: &StacksBlockId, + ) -> Result { + let registered_addrs = + chainstate.get_reward_addresses(burnchain, sortdb, current_burn_height, block_id)?; + + let liquid_ustx = chainstate.get_liquid_ustx(block_id); + + let (threshold, participation) = StacksChainState::get_reward_threshold_and_participation( + &burnchain.pox_constants, + ®istered_addrs[..], + liquid_ustx, + ); + + let cur_epoch = + SortitionDB::get_stacks_epoch(sortdb.conn(), current_burn_height)?.expect(&format!( + "FATAL: no epoch defined for burn height {}", + current_burn_height + )); + + if cur_epoch.epoch_id >= StacksEpochId::Epoch30 && participation == 0 { + // no one is stacking + error!("No PoX participation"); + return Err(Error::PoXAnchorBlockRequired); + } + + info!("PoX reward cycle threshold computed"; + "burn_height" => current_burn_height, + "threshold" => threshold, + "participation" => participation, + "liquid_ustx" => liquid_ustx, + "registered_addrs" => registered_addrs.len()); + + Ok(StacksChainState::make_reward_set( + threshold, + registered_addrs, + cur_epoch.epoch_id, + )) + } +} + +/// Find the ordered sequence of sortitions from a given burnchain block back to the start of +/// the burnchain block's reward cycle's prepare phase. If the burnchain block is not in a prepare +/// phase, then the returned list is empty. If the burnchain block is in a prepare phase, then all +/// consensus hashes back to the first block in the prepare phase are loaded and returned in +/// ascending height order. +fn find_prepare_phase_sortitions( + sort_db: &SortitionDB, + burnchain: &Burnchain, + sortition_tip: &SortitionId, +) -> Result, Error> { + let sn = SortitionDB::get_block_snapshot(sort_db.conn(), sortition_tip)? + .ok_or(DBError::NotFoundError)?; + + let mut height = sn.block_height; + let mut sns = vec![sn]; + + while burnchain.is_in_prepare_phase(height) && height > 0 { + let Some(sn) = SortitionDB::get_block_snapshot( + sort_db.conn(), + &sns.last() + .as_ref() + .expect("FATAL: unreachable: sns is never empty") + .parent_sortition_id, + )? + else { + break; + }; + height = sn.block_height.saturating_sub(1); + sns.push(sn); + } + + sns.reverse(); + Ok(sns) +} + +/// Try to get the reward cycle information for a Nakamoto reward cycle. +/// In Nakamoto, the PoX anchor block for reward cycle _R_ is the _first_ Stacks block mined in the +/// _last_ tenure of _R - 1_'s reward phase phase (i.e. which takes place toward the end of reward cycle). +/// The reason it must be this way is because its hash will be in the block-commit for the first +/// prepare-phase tenure of cycle _R_ (which is required for the PoX ancestry query in the +/// block-commit validation logic). +/// +/// If this method returns None, the caller should try again when there are more Stacks blocks. In +/// Nakamoto, every reward cycle _must_ have a PoX anchor block; otherwise, the chain halts. +/// +/// N.B. this method assumes that the prepare phase is comprised _solely_ of Nakamoto tenures. It +/// will not work if any of the prepare-phase tenures are from epoch 2.x. +/// +/// Returns Ok(Some(reward-cycle-info)) if we found the first sortition in the prepare phase. +/// Returns Ok(None) if we're still waiting for the PoX anchor block sortition +/// Returns Err(Error::NotInPreparePhase) if `burn_height` is not in the prepare phase +/// Returns Err(Error::RewardCycleAlreadyProcessed) if the reward set for this reward cycle has +/// already been processed. +pub fn get_nakamoto_reward_cycle_info( + burn_height: u64, + sortition_tip: &SortitionId, + burnchain: &Burnchain, + chain_state: &mut StacksChainState, + sort_db: &mut SortitionDB, + provider: &U, +) -> Result, Error> { + let epoch_at_height = SortitionDB::get_stacks_epoch(sort_db.conn(), burn_height)? + .expect(&format!( + "FATAL: no epoch defined for burn height {}", + burn_height + )) + .epoch_id; + + assert!( + epoch_at_height >= StacksEpochId::Epoch30, + "FATAL: called a nakamoto function outside of epoch 3" + ); + + if !burnchain.is_in_prepare_phase(burn_height) { + return Err(Error::NotInPreparePhase); + } + + // calculating the reward set for the _next_ reward cycle + let reward_cycle = burnchain + .block_height_to_reward_cycle(burn_height) + .expect("FATAL: no reward cycle for burn height") + + 1; + + debug!("Processing reward set for Nakamoto reward cycle"; + "burn_height" => burn_height, + "reward_cycle" => reward_cycle, + "reward_cycle_length" => burnchain.pox_constants.reward_cycle_length, + "prepare_phase_length" => burnchain.pox_constants.prepare_length); + + // find the last tenure-start Stacks block processed in the preceeding prepare phase + // (i.e. the first block in the tenure of the parent of the first Stacks block processed in the prepare phase). + // Note that we may not have processed it yet. But, if we do find it, then it's + // unique (and since Nakamoto Stacks blocks are processed in order, the anchor block + // cannot change later). + let prepare_phase_sortitions = + find_prepare_phase_sortitions(sort_db, burnchain, sortition_tip)?; + + // did we already calculate the reward cycle info? If so, then return it. + let first_sortition_id = if let Some(first_sn) = prepare_phase_sortitions.first() { + if let Some(persisted_reward_cycle_info) = + SortitionDB::get_preprocessed_reward_set(sort_db.conn(), &first_sn.sortition_id)? + { + return Ok(Some(persisted_reward_cycle_info)); + } + first_sn.sortition_id.clone() + } else { + // can't do anything + return Ok(None); + }; + + for sn in prepare_phase_sortitions.into_iter() { + if !sn.sortition { + continue; + } + + // find the first Stacks block processed in the prepare phase + let Some(prepare_start_block_header) = + NakamotoChainState::get_nakamoto_tenure_start_block_header( + chain_state.db(), + &sn.consensus_hash, + )? + else { + // no header for this snapshot (possibly invalid) + continue; + }; + + let parent_block_id = &prepare_start_block_header + .anchored_header + .as_stacks_nakamoto() + .expect("FATAL: queried non-Nakamoto tenure start header") + .parent_block_id; + + // find the tenure-start block of the tenure of the parent of this Stacks block. + // in epoch 2, this is the preceding anchor block + // in nakamoto, this is the tenure-start block of the preceding tenure + let parent_block_header = + NakamotoChainState::get_block_header(chain_state.db(), &parent_block_id)? + .expect("FATAL: no parent for processed Stacks block in prepare phase"); + + let anchor_block_header = match &parent_block_header.anchored_header { + StacksBlockHeaderTypes::Epoch2(..) => parent_block_header, + StacksBlockHeaderTypes::Nakamoto(..) => { + NakamotoChainState::get_nakamoto_tenure_start_block_header( + chain_state.db(), + &parent_block_header.consensus_hash, + )? + .expect("FATAL: no parent for processed Stacks block in prepare phase") + } + }; + + let anchor_block_sn = SortitionDB::get_block_snapshot_consensus( + sort_db.conn(), + &anchor_block_header.consensus_hash, + )? + .expect("FATAL: no snapshot for winning PoX anchor block"); + + // make sure the `anchor_block` field is the same as whatever goes into the block-commit, + // or PoX ancestry queries won't work + let (block_id, stacks_block_hash) = match anchor_block_header.anchored_header { + StacksBlockHeaderTypes::Epoch2(header) => ( + StacksBlockId::new(&anchor_block_header.consensus_hash, &header.block_hash()), + header.block_hash(), + ), + StacksBlockHeaderTypes::Nakamoto(header) => { + (header.block_id(), BlockHeaderHash(header.block_id().0)) + } + }; + + let txid = anchor_block_sn.winning_block_txid; + + info!( + "Anchor block selected for cycle {}: (ch {}) {}", + reward_cycle, &anchor_block_header.consensus_hash, &block_id + ); + + let reward_set = + provider.get_reward_set(burn_height, chain_state, burnchain, sort_db, &block_id)?; + + debug!( + "Stacks anchor block (ch {}) {} cycle {} is processed", + &anchor_block_header.consensus_hash, &block_id, reward_cycle + ); + let anchor_status = + PoxAnchorBlockStatus::SelectedAndKnown(stacks_block_hash, txid, reward_set); + + let rc_info = RewardCycleInfo { + reward_cycle, + anchor_status, + }; + + // persist this + let mut tx = sort_db.tx_begin()?; + SortitionDB::store_preprocessed_reward_set(&mut tx, &first_sortition_id, &rc_info)?; + tx.commit()?; + + return Ok(Some(rc_info)); + } + + // no stacks block known yet + info!("No PoX anchor block known yet for cycle {}", reward_cycle); + return Ok(None); +} + +/// Get the next PoX recipients in the Nakamoto epoch. +/// This is a little different than epoch 2.x: +/// * we're guaranteed to have an anchor block +/// * we pre-compute the reward set at the start of the prepare phase, so we only need to load it +/// up here at the start of the reward phase. +pub fn get_nakamoto_next_recipients( + sortition_tip: &BlockSnapshot, + sort_db: &mut SortitionDB, + burnchain: &Burnchain, +) -> Result, Error> { + let reward_cycle_info = if burnchain.is_reward_cycle_start(sortition_tip.block_height + 1) { + // load up new reward cycle info so we can start using *that* + let prepare_phase_sortitions = + find_prepare_phase_sortitions(sort_db, burnchain, &sortition_tip.parent_sortition_id)?; + + // NOTE: this must panic because Nakamoto's first reward cycle has stackers + let first_sn = prepare_phase_sortitions + .first() + .expect("FATAL: unreachable: no prepare-phase sortitions at start of reward cycle"); + + debug!("Get pre-processed reward set"; + "sortition_id" => %first_sn.sortition_id); + + // NOTE: if we don't panic here, we'll panic later in a more obscure way + Some( + SortitionDB::get_preprocessed_reward_set(sort_db.conn(), &first_sn.sortition_id)? + .expect(&format!( + "No reward set for start of reward cycle beginning with block {}", + &sortition_tip.block_height + )), + ) + } else { + None + }; + sort_db + .get_next_block_recipients(burnchain, sortition_tip, reward_cycle_info.as_ref()) + .map_err(Error::from) +} + +impl< + 'a, + T: BlockEventDispatcher, + N: CoordinatorNotices, + U: RewardSetProvider, + CE: CostEstimator + ?Sized, + FE: FeeEstimator + ?Sized, + B: BurnchainHeaderReader, + > ChainsCoordinator<'a, T, N, U, CE, FE, B> +{ + /// Check to see if we're in the last of the 2.x epochs, and we have the first PoX anchor block + /// for epoch 3. + /// NOTE: the first block in epoch3 must be after the first block in the reward phase, so as + /// to ensure that the PoX stackers have been selected for this cycle. This means that we + /// don't proceed to process Nakamoto blocks until the reward cycle has begun. Also, the last + /// reward cycle of epoch2 _must_ be PoX so we have stackers who can sign. + /// + /// TODO: how do signers register their initial keys? Can we just deploy a pre-registration + /// contract? + pub fn can_process_nakamoto(&mut self) -> Result { + let canonical_sortition_tip = self + .canonical_sortition_tip + .clone() + .expect("FAIL: checking epoch status, but we don't have a canonical sortition tip"); + + let canonical_sn = + SortitionDB::get_block_snapshot(self.sortition_db.conn(), &canonical_sortition_tip)? + .expect("FATAL: canonical sortition tip has no sortition"); + + // what epoch are we in? + let cur_epoch = + SortitionDB::get_stacks_epoch(self.sortition_db.conn(), canonical_sn.block_height)? + .expect(&format!( + "BUG: no epoch defined at height {}", + canonical_sn.block_height + )); + + if cur_epoch.epoch_id < StacksEpochId::Epoch30 { + return Ok(false); + } + + // in epoch3 + let all_epochs = SortitionDB::get_stacks_epochs(self.sortition_db.conn())?; + let epoch_3_idx = StacksEpoch::find_epoch_by_id(&all_epochs, StacksEpochId::Epoch30) + .expect("FATAL: epoch3 not defined"); + + let epoch3 = &all_epochs[epoch_3_idx]; + let first_epoch3_reward_cycle = self + .burnchain + .block_height_to_reward_cycle(epoch3.start_height) + .expect("FATAL: epoch3 block height has no reward cycle"); + + // only proceed if we have processed the _anchor block_ for this reward cycle + let handle_conn = self.sortition_db.index_handle(&canonical_sortition_tip); + let last_processed_rc = handle_conn.get_last_processed_reward_cycle()?; + Ok(last_processed_rc >= first_epoch3_reward_cycle) + } + + /// This is the main loop body for the coordinator in epoch 3. + /// Returns true if the coordinator is still running. + /// Returns false otherwise. + pub fn handle_comms_nakamoto( + &mut self, + comms: &CoordinatorReceivers, + miner_status: Arc>, + ) -> bool { + // timeout so that we handle Ctrl-C a little gracefully + let bits = comms.wait_on(); + if (bits & (CoordinatorEvents::NEW_STACKS_BLOCK as u8)) != 0 { + signal_mining_blocked(miner_status.clone()); + debug!("Received new Nakamoto stacks block notice"); + match self.handle_new_nakamoto_stacks_block() { + Ok(new_anchor_block_opt) => { + if let Some(bhh) = new_anchor_block_opt { + debug!( + "Found next PoX anchor block, waiting for reward cycle processing"; + "pox_anchor_block_hash" => %bhh + ); + } + } + Err(e) => { + warn!("Error processing new stacks block: {:?}", e); + } + } + + signal_mining_ready(miner_status.clone()); + } + if (bits & (CoordinatorEvents::NEW_BURN_BLOCK as u8)) != 0 { + signal_mining_blocked(miner_status.clone()); + debug!("Received new burn block notice"); + match self.handle_new_nakamoto_burnchain_block() { + Ok(can_proceed) => { + if !can_proceed { + error!("Missing canonical anchor block",); + } + } + Err(e) => { + warn!("Error processing new burn block: {:?}", e); + } + } + signal_mining_ready(miner_status.clone()); + } + if (bits & (CoordinatorEvents::STOP as u8)) != 0 { + signal_mining_blocked(miner_status.clone()); + debug!("Received stop notice"); + return false; + } + + true + } + + /// Handle one or more new Nakamoto Stacks blocks. + /// If we process a PoX anchor block, then return its block hash. This unblocks processing the + /// next reward cycle's burnchain blocks. Subsequent calls to this function will terminate + /// with Some(pox-anchor-block-hash) until the reward cycle info is processed in the sortition + /// DB. + pub fn handle_new_nakamoto_stacks_block(&mut self) -> Result, Error> { + let canonical_sortition_tip = self.canonical_sortition_tip.clone().expect( + "FAIL: processing a new Stacks block, but don't have a canonical sortition tip", + ); + + loop { + // process at most one block per loop pass + let mut sortdb_handle = self + .sortition_db + .tx_handle_begin(&canonical_sortition_tip)?; + + let mut processed_block_receipt = match NakamotoChainState::process_next_nakamoto_block( + &mut self.chain_state_db, + &mut sortdb_handle, + self.dispatcher, + ) { + Ok(receipt_opt) => receipt_opt, + Err(ChainstateError::InvalidStacksBlock(msg)) => { + warn!("Encountered invalid block: {}", &msg); + + // try again + self.notifier.notify_stacks_block_processed(); + increment_stx_blocks_processed_counter(); + continue; + } + Err(ChainstateError::NetError(NetError::DeserializeError(msg))) => { + // happens if we load a zero-sized block (i.e. an invalid block) + warn!("Encountered invalid block (codec error): {}", &msg); + + // try again + self.notifier.notify_stacks_block_processed(); + increment_stx_blocks_processed_counter(); + continue; + } + Err(e) => { + // something else happened + return Err(e.into()); + } + }; + + sortdb_handle.commit()?; + + let Some(block_receipt) = processed_block_receipt.take() else { + // out of blocks + debug!("No more blocks to process (no receipts)"); + break; + }; + + let block_hash = block_receipt.header.anchored_header.block_hash(); + let ( + canonical_stacks_block_id, + canonical_stacks_block_height, + canonical_stacks_consensus_hash, + ) = { + let nakamoto_header = block_receipt + .header + .anchored_header + .as_stacks_nakamoto() + .expect("FATAL: unreachable: processed a non-Nakamoto block"); + + ( + nakamoto_header.block_id(), + nakamoto_header.chain_length, + nakamoto_header.consensus_hash.clone(), + ) + }; + + debug!("Bump blocks processed ({})", &canonical_stacks_block_id); + + self.notifier.notify_stacks_block_processed(); + increment_stx_blocks_processed_counter(); + + // process Atlas events + Self::process_atlas_attachment_events( + self.atlas_db.as_mut(), + &self.atlas_config, + &block_receipt, + canonical_stacks_block_height, + ); + + // update cost estimator + if let Some(ref mut estimator) = self.cost_estimator { + let stacks_epoch = self + .sortition_db + .index_conn() + .get_stacks_epoch_by_epoch_id(&block_receipt.evaluated_epoch) + .expect("Could not find a stacks epoch."); + estimator.notify_block( + &block_receipt.tx_receipts, + &stacks_epoch.block_limit, + &stacks_epoch.epoch_id, + ); + } + + // update fee estimator + if let Some(ref mut estimator) = self.fee_estimator { + let stacks_epoch = self + .sortition_db + .index_conn() + .get_stacks_epoch_by_epoch_id(&block_receipt.evaluated_epoch) + .expect("Could not find a stacks epoch."); + if let Err(e) = estimator.notify_block(&block_receipt, &stacks_epoch.block_limit) { + warn!("FeeEstimator failed to process block receipt"; + "stacks_block" => %block_hash, + "stacks_height" => %block_receipt.header.stacks_block_height, + "error" => %e); + } + } + + let stacks_sn = SortitionDB::get_block_snapshot_consensus( + &self.sortition_db.conn(), + &canonical_stacks_consensus_hash, + )? + .expect(&format!( + "FATAL: unreachable: consensus hash {} has no snapshot", + &canonical_stacks_consensus_hash + )); + + // are we in the prepare phase? + if !self.burnchain.is_in_prepare_phase(stacks_sn.block_height) { + // next ready stacks block + continue; + } + + // is the upcoming reward cycle processed yet? + let current_reward_cycle = self + .burnchain + .block_height_to_reward_cycle(stacks_sn.block_height) + .expect(&format!( + "FATAL: unreachable: burnchain block height has no reward cycle" + )); + + let last_processed_reward_cycle = { + let ic = self.sortition_db.index_handle(&canonical_sortition_tip); + ic.get_last_processed_reward_cycle()? + }; + + if last_processed_reward_cycle > current_reward_cycle { + // already processed upcoming reward cycle + continue; + } + + // This is the first Stacks block in the prepare phase for the next reward cycle. + // Pause here and process the next sortitions + debug!("Process next reward cycle's sortitions"); + self.handle_new_nakamoto_burnchain_block()?; + debug!("Processed next reward cycle's sortitions"); + } + + // no PoX anchor block found + Ok(None) + } + + /// Given a burnchain header, find the PoX reward cycle info + fn get_nakamoto_reward_cycle_info( + &mut self, + block_height: u64, + ) -> Result, Error> { + let sortition_tip_id = self + .canonical_sortition_tip + .as_ref() + .expect("FATAL: Processing anchor block, but no known sortition tip"); + + get_nakamoto_reward_cycle_info( + block_height, + sortition_tip_id, + &self.burnchain, + &mut self.chain_state_db, + &mut self.sortition_db, + &self.reward_set_provider, + ) + } + + /// Find sortitions to process. + /// Returns the last processed ancestor of `cursor`, and any unprocessed burnchain blocks + fn find_sortitions_to_process( + &self, + mut cursor: BurnchainHeaderHash, + ) -> Result<(SortitionId, VecDeque), Error> { + let mut sortitions_to_process = VecDeque::new(); + let last_processed_ancestor = loop { + if let Some(found_sortition) = self.sortition_db.is_sortition_processed(&cursor)? { + debug!( + "Ancestor sortition {} of block {} is processed", + &found_sortition, &cursor + ); + break found_sortition; + } + + let current_block = + BurnchainDB::get_burnchain_block(&self.burnchain_blocks_db.conn(), &cursor) + .map_err(|e| { + warn!( + "ChainsCoordinator: could not retrieve block burnhash={}", + &cursor + ); + Error::NonContiguousBurnchainBlock(e) + })?; + + debug!( + "Unprocessed block: ({}, {})", + ¤t_block.header.block_hash.to_string(), + current_block.header.block_height + ); + + let parent = current_block.header.parent_block_hash.clone(); + sortitions_to_process.push_front(current_block); + cursor = parent; + }; + Ok((last_processed_ancestor, sortitions_to_process)) + } + + /// Process the next-available burnchain block, if possible. + /// Burnchain blocks can only be processed for the last-known PoX reward set, which is to say, + /// burnchain block processing can be blocked on the unavailability of the next PoX anchor + /// block. If the next PoX anchor block is not available, then no burnchain block processing + /// happens, and this function returns false. It returns true otherwise. + /// + /// Returns Err(..) if an error occurred while processing (i.e. a DB error). + pub fn handle_new_nakamoto_burnchain_block(&mut self) -> Result { + // highest burnchain block we've downloaded + let canonical_burnchain_tip = self.burnchain_blocks_db.get_canonical_chain_tip()?; + + debug!("Handle new canonical burnchain tip"; + "height" => %canonical_burnchain_tip.block_height, + "block_hash" => %canonical_burnchain_tip.block_hash.to_string()); + + // Retrieve all the direct ancestors of this block with an unprocessed sortition + let (mut last_processed_ancestor, sortitions_to_process) = + self.find_sortitions_to_process(canonical_burnchain_tip.block_hash.clone())?; + let dbg_burn_header_hashes: Vec<_> = sortitions_to_process + .iter() + .map(|block| { + format!( + "({}, {})", + &block.header.block_hash.to_string(), + block.header.block_height + ) + }) + .collect(); + + debug!( + "Unprocessed burn chain blocks: {:?}", + &dbg_burn_header_hashes + ); + + // Unlike in Stacks 2.x, there can be neither chain reorgs nor PoX reorgs unless Bitcoin itself + // reorgs. But if this happens, then we will have already found the set of + // (newly-canonical) burnchain blocks which lack sortitions -- they'll be in + // `sortitions_to_process`. So, we can proceed to process all outstanding sortitions until + // we come across a PoX anchor block that we don't have yet. + for unprocessed_block in sortitions_to_process.into_iter() { + let BurnchainBlockData { header, ops } = unprocessed_block; + let reward_cycle = self + .burnchain + .block_height_to_reward_cycle(header.block_height) + .unwrap_or(u64::MAX); + + debug!( + "Process burn block {} reward cycle {} in {}", + header.block_height, reward_cycle, &self.burnchain.working_dir, + ); + + // calculate paid rewards during this burnchain block if we announce + // to an events dispatcher + let paid_rewards = if self.dispatcher.is_some() { + calculate_paid_rewards(&ops) + } else { + PaidRewards { + pox: vec![], + burns: 0, + } + }; + + if self.burnchain.is_in_prepare_phase(header.block_height) { + // try to eagerly load up the reward cycle information, so we can persist it and + // make it available to signers. If we're at the _end_ of the prepare phase, then + // we have no choice but to block. + let reward_cycle_info = self.get_nakamoto_reward_cycle_info(header.block_height)?; + if let Some(rc_info) = reward_cycle_info { + // in nakamoto, if we have any reward cycle info at all, it will be known. + assert!( + rc_info.known_selected_anchor_block().is_some(), + "FATAL: unknown PoX anchor block in Nakamoto" + ); + } + } + + let reward_cycle_info = if self.burnchain.is_reward_cycle_start(header.block_height) { + // we're at the end of the prepare phase, so we'd better have obtained the reward + // cycle info of we must block. + // N.B. it's `- 2` because `is_reward_cycle_start` implies that `block_height % reward_cycle_length == 1`, + // but this call needs `block_height % reward_cycle_length == reward_cycle_length - 1` -- i.e. `block_height` + // must be the last block height in the last reward cycle. + let reward_cycle_info = + self.get_nakamoto_reward_cycle_info(header.block_height - 2)?; + if let Some(rc_info) = reward_cycle_info.as_ref() { + // in nakamoto, if we have any reward cycle info at all, it will be known. + assert!( + rc_info.known_selected_anchor_block().is_some(), + "FATAL: unknown PoX anchor block in Nakamoto" + ); + } else { + // have to block -- we don't have the reward cycle information + debug!("Do not yet have PoX anchor block for next reward cycle -- no anchor block found"; + "next_reward_cycle" => self.burnchain.block_height_to_reward_cycle(header.block_height), + "reward_cycle_end" => header.block_height - 2 + ); + return Ok(false); + } + reward_cycle_info + } else { + // not starting a reward cycle anyway + None + }; + + // process next sortition + let dispatcher_ref = &self.dispatcher; + let (next_snapshot, _) = self + .sortition_db + .evaluate_sortition( + &header, + ops, + &self.burnchain, + &last_processed_ancestor, + reward_cycle_info, + |reward_set_info| { + if let Some(dispatcher) = dispatcher_ref { + dispatcher_announce_burn_ops( + *dispatcher, + &header, + paid_rewards, + reward_set_info, + ); + } + }, + ) + .map_err(|e| { + error!("ChainsCoordinator: unable to evaluate sortition: {:?}", e); + Error::FailedToProcessSortition(e) + })?; + + // mark this burn block as processed in the nakamoto chainstate + let tx = self.chain_state_db.staging_db_tx_begin()?; + NakamotoChainState::set_burn_block_processed(&tx, &next_snapshot.consensus_hash)?; + tx.commit().map_err(DBError::SqliteError)?; + + let sortition_id = next_snapshot.sortition_id; + + self.notifier.notify_sortition_processed(); + + debug!( + "Sortition processed"; + "sortition_id" => &sortition_id.to_string(), + "burn_header_hash" => &next_snapshot.burn_header_hash.to_string(), + "burn_height" => next_snapshot.block_height + ); + + // always bump canonical sortition tip: + // if this code path is invoked, the canonical burnchain tip + // has moved, so we should move our canonical sortition tip as well. + self.canonical_sortition_tip = Some(sortition_id.clone()); + last_processed_ancestor = sortition_id; + } + + Ok(true) + } +} diff --git a/stackslib/src/chainstate/nakamoto/coordinator/tests.rs b/stackslib/src/chainstate/nakamoto/coordinator/tests.rs new file mode 100644 index 0000000000..2c453c894f --- /dev/null +++ b/stackslib/src/chainstate/nakamoto/coordinator/tests.rs @@ -0,0 +1,570 @@ +// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation +// Copyright (C) 2020-2023 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use crate::net::test::{TestPeer, TestPeerConfig}; + +use clarity::vm::clarity::ClarityConnection; +use clarity::vm::types::PrincipalData; + +use crate::chainstate::burn::db::sortdb::SortitionDB; +use crate::chainstate::burn::operations::BlockstackOperationType; +use crate::chainstate::coordinator::tests::p2pkh_from; +use crate::chainstate::nakamoto::tests::get_account; +use crate::chainstate::nakamoto::NakamotoBlock; +use crate::chainstate::nakamoto::NakamotoChainState; +use crate::chainstate::stacks::address::PoxAddress; +use crate::chainstate::stacks::boot::test::make_pox_4_lockup; +use crate::chainstate::stacks::db::StacksAccount; +use crate::chainstate::stacks::db::StacksChainState; +use crate::chainstate::stacks::CoinbasePayload; +use crate::chainstate::stacks::StacksTransaction; +use crate::chainstate::stacks::StacksTransactionSigner; +use crate::chainstate::stacks::TenureChangeCause; +use crate::chainstate::stacks::TokenTransferMemo; +use crate::chainstate::stacks::TransactionAnchorMode; +use crate::chainstate::stacks::TransactionAuth; +use crate::chainstate::stacks::TransactionPayload; +use crate::chainstate::stacks::TransactionVersion; + +use crate::net::relay::Relayer; + +use crate::clarity::vm::types::StacksAddressExtensions; + +use stacks_common::address::AddressHashMode; +use stacks_common::address::C32_ADDRESS_VERSION_TESTNET_SINGLESIG; +use stacks_common::types::chainstate::StacksAddress; +use stacks_common::types::chainstate::StacksBlockId; +use stacks_common::types::chainstate::StacksPrivateKey; +use stacks_common::types::chainstate::StacksPublicKey; +use stacks_common::types::Address; +use stacks_common::types::StacksEpoch; +use stacks_common::util::vrf::VRFProof; + +use crate::core::StacksEpochExtension; + +use rand::prelude::SliceRandom; +use rand::thread_rng; +use rand::RngCore; + +/// Bring a TestPeer into the Nakamoto Epoch +fn advance_to_nakamoto(peer: &mut TestPeer) { + let mut peer_nonce = 0; + let private_key = peer.config.private_key.clone(); + let addr = StacksAddress::from_public_keys( + C32_ADDRESS_VERSION_TESTNET_SINGLESIG, + &AddressHashMode::SerializeP2PKH, + 1, + &vec![StacksPublicKey::from_private(&private_key)], + ) + .unwrap(); + + // advance through cycle 6 + for _ in 0..5 { + peer.tenure_with_txs(&[], &mut peer_nonce); + } + + // stack to pox-3 in cycle 7 + for sortition_height in 0..6 { + let txs = if sortition_height == 0 { + // stack them all + let stack_tx = make_pox_4_lockup( + &private_key, + 0, + 1_000_000_000_000_000_000, + PoxAddress::from_legacy(AddressHashMode::SerializeP2PKH, addr.bytes.clone()), + 12, + 34, + ); + vec![stack_tx] + } else { + vec![] + }; + + peer.tenure_with_txs(&txs, &mut peer_nonce); + } + + // peer is at the start of cycle 8 +} + +/// Make a peer and transition it into the Nakamoto epoch. +/// The node needs to be stacking; otherwise, Nakamoto won't activate. +fn boot_nakamoto(test_name: &str, mut initial_balances: Vec<(PrincipalData, u64)>) -> TestPeer { + let mut peer_config = TestPeerConfig::new(test_name, 0, 0); + let private_key = peer_config.private_key.clone(); + let addr = StacksAddress::from_public_keys( + C32_ADDRESS_VERSION_TESTNET_SINGLESIG, + &AddressHashMode::SerializeP2PKH, + 1, + &vec![StacksPublicKey::from_private(&private_key)], + ) + .unwrap(); + + // reward cycles are 5 blocks long + // first 25 blocks are boot-up + // reward cycle 6 instantiates pox-3 + // we stack in reward cycle 7 so pox-3 is evaluated to find reward set participation + peer_config.epochs = Some(StacksEpoch::unit_test_3_0_only(37)); + peer_config.initial_balances = vec![(addr.to_account_principal(), 1_000_000_000_000_000_000)]; + peer_config.initial_balances.append(&mut initial_balances); + peer_config.burnchain.pox_constants.v2_unlock_height = 21; + peer_config.burnchain.pox_constants.pox_3_activation_height = 26; + peer_config.burnchain.pox_constants.v3_unlock_height = 27; + peer_config.burnchain.pox_constants.pox_4_activation_height = 31; + + let mut peer = TestPeer::new(peer_config); + advance_to_nakamoto(&mut peer); + peer +} + +/// Make a replay peer, used for replaying the blockchain +fn make_replay_peer<'a>(peer: &'a mut TestPeer<'a>) -> TestPeer<'a> { + let mut replay_config = peer.config.clone(); + replay_config.test_name = format!("{}.replay", &peer.config.test_name); + + let mut replay_peer = TestPeer::new(replay_config); + advance_to_nakamoto(&mut replay_peer); + + // sanity check + let replay_tip = { + let sort_db = replay_peer.sortdb.as_ref().unwrap(); + let tip = SortitionDB::get_canonical_burn_chain_tip(sort_db.conn()).unwrap(); + tip + }; + let tip = { + let sort_db = peer.sortdb.as_ref().unwrap(); + let tip = SortitionDB::get_canonical_burn_chain_tip(sort_db.conn()).unwrap(); + let sort_ic = sort_db.index_conn(); + let ancestor_tip = SortitionDB::get_ancestor_snapshot( + &sort_ic, + replay_tip.block_height, + &tip.sortition_id, + ) + .unwrap() + .unwrap(); + ancestor_tip + }; + + assert_eq!(tip, replay_tip); + replay_peer +} + +/// Make a token-transfer from a private key +fn make_token_transfer( + chainstate: &mut StacksChainState, + sortdb: &SortitionDB, + private_key: &StacksPrivateKey, + nonce: u64, + amt: u64, + fee: u64, + recipient_addr: &StacksAddress, +) -> StacksTransaction { + let mut stx_transfer = StacksTransaction::new( + TransactionVersion::Testnet, + TransactionAuth::from_p2pkh(private_key).unwrap(), + TransactionPayload::TokenTransfer( + recipient_addr.clone().to_account_principal(), + amt, + TokenTransferMemo([0x00; 34]), + ), + ); + stx_transfer.chain_id = 0x80000000; + stx_transfer.anchor_mode = TransactionAnchorMode::OnChainOnly; + stx_transfer.set_tx_fee(fee); + stx_transfer.auth.set_origin_nonce(nonce); + + let mut tx_signer = StacksTransactionSigner::new(&stx_transfer); + tx_signer.sign_origin(&private_key).unwrap(); + let stx_transfer_signed = tx_signer.get_tx().unwrap(); + + stx_transfer_signed +} + +/// Given the blocks and block-commits for a reward cycle, replay the sortitions on the given +/// TestPeer but submit the blocks in random order. +fn replay_reward_cycle( + peer: &mut TestPeer, + burn_ops: &[Vec], + stacks_blocks: &[NakamotoBlock], +) { + eprintln!("\n\n=============================================\nBegin replay\n==============================================\n"); + + let mut indexes: Vec<_> = (0..stacks_blocks.len()).collect(); + indexes.shuffle(&mut thread_rng()); + + for burn_ops in burn_ops.iter() { + let (_, _, consensus_hash) = peer.next_burnchain_block(burn_ops.clone()); + } + + let sortdb = peer.sortdb.take().unwrap(); + let mut node = peer.stacks_node.take().unwrap(); + + let sort_tip = SortitionDB::get_canonical_sortition_tip(sortdb.conn()).unwrap(); + let sort_handle = sortdb.index_handle(&sort_tip); + + for i in indexes.into_iter() { + let block: &NakamotoBlock = &stacks_blocks[i]; + let block_id = block.block_id(); + debug!("Process Nakamoto block {} ({:?}", &block_id, &block.header); + + let accepted = + Relayer::process_new_nakamoto_block(&sort_handle, &mut node.chainstate, block.clone()) + .unwrap(); + if accepted { + test_debug!("Accepted Nakamoto block {}", &block_id); + peer.coord.handle_new_nakamoto_stacks_block().unwrap(); + } else { + test_debug!("Did NOT accept Nakamoto block {}", &block_id); + } + } + + peer.sortdb = Some(sortdb); + peer.stacks_node = Some(node); +} + +/// Mine a single Nakamoto tenure with a single Nakamoto block +#[test] +fn test_simple_nakamoto_coordinator_bootup() { + let mut peer = boot_nakamoto(function_name!(), vec![]); + + let (burn_ops, tenure_change, miner_key) = + peer.begin_nakamoto_tenure(TenureChangeCause::BlockFound); + let (_, _, consensus_hash) = peer.next_burnchain_block(burn_ops); + let vrf_proof = peer.make_nakamoto_vrf_proof(miner_key); + let blocks_and_sizes = peer.make_nakamoto_tenure( + &consensus_hash, + tenure_change, + vrf_proof, + |_miner, _chainstate, _sort_dbconn, _count| vec![], + ); + let blocks: Vec<_> = blocks_and_sizes + .into_iter() + .map(|(block, _, _)| block) + .collect(); + + let chainstate = &mut peer.stacks_node.as_mut().unwrap().chainstate; + let sort_db = peer.sortdb.as_mut().unwrap(); + let tip = NakamotoChainState::get_canonical_block_header(chainstate.db(), sort_db) + .unwrap() + .unwrap(); + assert_eq!( + tip.anchored_header + .as_stacks_nakamoto() + .unwrap() + .chain_length, + 12 + ); + assert_eq!( + tip.anchored_header.as_stacks_nakamoto().unwrap(), + &blocks.last().unwrap().header + ); +} + +/// Mine a single Nakamoto tenure with 10 Nakamoto blocks +#[test] +fn test_simple_nakamoto_coordinator_1_tenure_10_blocks() { + let mut peer = boot_nakamoto(function_name!(), vec![]); + let private_key = peer.config.private_key.clone(); + let addr = StacksAddress::from_public_keys( + C32_ADDRESS_VERSION_TESTNET_SINGLESIG, + &AddressHashMode::SerializeP2PKH, + 1, + &vec![StacksPublicKey::from_private(&private_key)], + ) + .unwrap(); + + let (burn_ops, tenure_change, miner_key) = + peer.begin_nakamoto_tenure(TenureChangeCause::BlockFound); + let (_, _, consensus_hash) = peer.next_burnchain_block(burn_ops.clone()); + let vrf_proof = peer.make_nakamoto_vrf_proof(miner_key); + + // do a stx transfer in each block to a given recipient + let recipient_addr = + StacksAddress::from_string("ST2YM3J4KQK09V670TD6ZZ1XYNYCNGCWCVTASN5VM").unwrap(); + + let blocks_and_sizes = peer.make_nakamoto_tenure( + &consensus_hash, + tenure_change, + vrf_proof, + |miner, chainstate, sortdb, count| { + if count < 10 { + debug!("\n\nProduce block {}\n\n", count); + + let account = get_account(chainstate, sortdb, &addr); + let stx_transfer = make_token_transfer( + chainstate, + sortdb, + &private_key, + account.nonce, + 100, + 1, + &recipient_addr, + ); + + vec![stx_transfer] + } else { + vec![] + } + }, + ); + + let blocks: Vec<_> = blocks_and_sizes + .into_iter() + .map(|(block, _, _)| block) + .collect(); + + let tip = { + let chainstate = &mut peer.stacks_node.as_mut().unwrap().chainstate; + let sort_db = peer.sortdb.as_mut().unwrap(); + NakamotoChainState::get_canonical_block_header(chainstate.db(), sort_db) + .unwrap() + .unwrap() + }; + + assert_eq!( + tip.anchored_header + .as_stacks_nakamoto() + .unwrap() + .chain_length, + 21 + ); + assert_eq!( + tip.anchored_header.as_stacks_nakamoto().unwrap(), + &blocks.last().unwrap().header + ); + + // replay the blocks and sortitions in random order, and verify that we still reach the chain + // tip + let mut replay_peer = make_replay_peer(&mut peer); + replay_reward_cycle(&mut replay_peer, &[burn_ops], &blocks); + + let tip = { + let chainstate = &mut replay_peer.stacks_node.as_mut().unwrap().chainstate; + let sort_db = replay_peer.sortdb.as_mut().unwrap(); + NakamotoChainState::get_canonical_block_header(chainstate.db(), sort_db) + .unwrap() + .unwrap() + }; + + assert_eq!( + tip.anchored_header + .as_stacks_nakamoto() + .unwrap() + .chain_length, + 21 + ); + assert_eq!( + tip.anchored_header.as_stacks_nakamoto().unwrap(), + &blocks.last().unwrap().header + ); +} + +/// Mine a 10 Nakamoto tenures with 10 Nakamoto blocks +#[test] +fn test_simple_nakamoto_coordinator_10_tenures_10_blocks() { + let mut peer = boot_nakamoto(function_name!(), vec![]); + let private_key = peer.config.private_key.clone(); + let addr = StacksAddress::from_public_keys( + C32_ADDRESS_VERSION_TESTNET_SINGLESIG, + &AddressHashMode::SerializeP2PKH, + 1, + &vec![StacksPublicKey::from_private(&private_key)], + ) + .unwrap(); + + let mut all_blocks = vec![]; + let mut all_burn_ops = vec![]; + let mut rc_blocks = vec![]; + let mut rc_burn_ops = vec![]; + let mut consensus_hashes = vec![]; + let stx_miner_key = peer.miner.nakamoto_miner_key(); + + for i in 0..10 { + let (burn_ops, tenure_change, miner_key) = + peer.begin_nakamoto_tenure(TenureChangeCause::BlockFound); + let (_, _, consensus_hash) = peer.next_burnchain_block(burn_ops.clone()); + let vrf_proof = peer.make_nakamoto_vrf_proof(miner_key); + + debug!("Next burnchain block: {}", &consensus_hash); + + // do a stx transfer in each block to a given recipient + let recipient_addr = + StacksAddress::from_string("ST2YM3J4KQK09V670TD6ZZ1XYNYCNGCWCVTASN5VM").unwrap(); + + let blocks_and_sizes = peer.make_nakamoto_tenure( + &consensus_hash, + tenure_change, + vrf_proof, + |miner, chainstate, sortdb, count| { + if count < 10 { + debug!("\n\nProduce block {}\n\n", all_blocks.len()); + + let account = get_account(chainstate, sortdb, &addr); + + let stx_transfer = make_token_transfer( + chainstate, + sortdb, + &private_key, + account.nonce, + 100, + 1, + &recipient_addr, + ); + + vec![stx_transfer] + } else { + vec![] + } + }, + ); + + consensus_hashes.push(consensus_hash); + let mut blocks: Vec = blocks_and_sizes + .into_iter() + .map(|(block, _, _)| block) + .collect(); + + // if we're starting a new reward cycle, then save the current one + let tip = { + let sort_db = peer.sortdb.as_mut().unwrap(); + SortitionDB::get_canonical_burn_chain_tip(sort_db.conn()).unwrap() + }; + if peer + .config + .burnchain + .is_reward_cycle_start(tip.block_height) + { + rc_blocks.push(all_blocks.clone()); + rc_burn_ops.push(all_burn_ops.clone()); + + all_burn_ops.clear(); + all_blocks.clear(); + } + + all_blocks.append(&mut blocks); + all_burn_ops.push(burn_ops); + } + + rc_blocks.push(all_blocks.clone()); + rc_burn_ops.push(all_burn_ops.clone()); + + all_burn_ops.clear(); + all_blocks.clear(); + + // in nakamoto, tx fees are rewarded by the next tenure, so the + // scheduled rewards come 1 tenure after the coinbase reward matures + let miner = p2pkh_from(&stx_miner_key); + let chainstate = &mut peer.stacks_node.as_mut().unwrap().chainstate; + let sort_db = peer.sortdb.as_mut().unwrap(); + + // this is sortition height 12, and this miner has earned all 12 of the coinbases + // plus the initial per-block mining bonus of 2600 STX, but minus the last three rewards (since + // the miner rewards take three sortitions to confirm). + // + // This is (1000 + 2600) * 10 + 1000 - (3600 * 2 + 1000) + // first 10 block unmatured rewards + // blocks 11 + let mut expected_coinbase_rewards: u128 = 28800000000; + for (i, ch) in consensus_hashes.into_iter().enumerate() { + let sn = SortitionDB::get_block_snapshot_consensus(sort_db.conn(), &ch) + .unwrap() + .unwrap(); + + if !sn.sortition { + continue; + } + let block_id = StacksBlockId(sn.winning_stacks_block_hash.0); + + let (chainstate_tx, clarity_instance) = chainstate.chainstate_tx_begin().unwrap(); + let sort_db_tx = sort_db.tx_begin_at_tip(); + + let stx_balance = clarity_instance + .read_only_connection(&block_id, &chainstate_tx, &sort_db_tx) + .with_clarity_db_readonly(|db| db.get_account_stx_balance(&miner.clone().into())); + + // it's 1 * 10 because it's 1 uSTX per token-transfer, and 10 per tenure + let expected_total_tx_fees = 1 * 10 * (i as u128).saturating_sub(3); + let expected_total_coinbase = expected_coinbase_rewards; + + if i == 0 { + // first tenure awards the last of the initial mining bonus + expected_coinbase_rewards += (1000 + 2600) * 1000000; + } else { + // subsequent tenures award normal coinbases + expected_coinbase_rewards += 1000 * 1000000; + } + + eprintln!( + "Checking block #{} ({},{}): {} =?= {} + {}", + i, + &ch, + &sn.block_height, + stx_balance.amount_unlocked(), + expected_total_coinbase, + expected_total_tx_fees + ); + assert_eq!( + stx_balance.amount_unlocked(), + expected_total_coinbase + expected_total_tx_fees + ); + } + + let tip = { + let chainstate = &mut peer.stacks_node.as_mut().unwrap().chainstate; + let sort_db = peer.sortdb.as_mut().unwrap(); + NakamotoChainState::get_canonical_block_header(chainstate.db(), sort_db) + .unwrap() + .unwrap() + }; + + assert_eq!( + tip.anchored_header + .as_stacks_nakamoto() + .unwrap() + .chain_length, + 111 + ); + assert_eq!( + tip.anchored_header.as_stacks_nakamoto().unwrap(), + &rc_blocks.last().unwrap().last().unwrap().header + ); + + // replay the blocks and sortitions in random order, and verify that we still reach the chain + // tip + let mut replay_peer = make_replay_peer(&mut peer); + for (burn_ops, blocks) in rc_burn_ops.iter().zip(rc_blocks.iter()) { + replay_reward_cycle(&mut replay_peer, burn_ops, blocks); + } + + let tip = { + let chainstate = &mut replay_peer.stacks_node.as_mut().unwrap().chainstate; + let sort_db = replay_peer.sortdb.as_mut().unwrap(); + NakamotoChainState::get_canonical_block_header(chainstate.db(), sort_db) + .unwrap() + .unwrap() + }; + + assert_eq!( + tip.anchored_header + .as_stacks_nakamoto() + .unwrap() + .chain_length, + 111 + ); + assert_eq!( + tip.anchored_header.as_stacks_nakamoto().unwrap(), + &rc_blocks.last().unwrap().last().unwrap().header + ); +} diff --git a/stackslib/src/chainstate/nakamoto/miner.rs b/stackslib/src/chainstate/nakamoto/miner.rs new file mode 100644 index 0000000000..3de982a16e --- /dev/null +++ b/stackslib/src/chainstate/nakamoto/miner.rs @@ -0,0 +1,814 @@ +// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation +// Copyright (C) 2020 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use std::cmp; +use std::collections::HashMap; +use std::collections::HashSet; +use std::convert::From; +use std::fs; +use std::mem; +use std::sync::atomic::AtomicBool; +use std::sync::atomic::Ordering; +use std::sync::Arc; +use std::sync::Mutex; +use std::thread::ThreadId; + +use clarity::vm::analysis::{CheckError, CheckErrors}; +use clarity::vm::ast::errors::ParseErrors; +use clarity::vm::ast::ASTRules; +use clarity::vm::clarity::TransactionConnection; +use clarity::vm::costs::ExecutionCost; +use clarity::vm::database::BurnStateDB; +use clarity::vm::errors::Error as InterpreterError; +use clarity::vm::types::TypeSignature; + +use serde::Deserialize; +use stacks_common::util::get_epoch_time_ms; +use stacks_common::util::hash::MerkleTree; +use stacks_common::util::hash::Sha512Trunc256Sum; +use stacks_common::util::secp256k1::{MessageSignature, Secp256k1PrivateKey}; +use stacks_common::util::vrf::*; + +use crate::burnchains::PrivateKey; +use crate::burnchains::PublicKey; +use crate::chainstate::burn::db::sortdb::{SortitionDB, SortitionDBConn, SortitionHandleTx}; +use crate::chainstate::burn::operations::*; +use crate::chainstate::burn::*; +use crate::chainstate::nakamoto::NakamotoBlock; +use crate::chainstate::nakamoto::NakamotoBlockHeader; +use crate::chainstate::nakamoto::NakamotoChainState; +use crate::chainstate::nakamoto::SetupBlockResult; +use crate::chainstate::stacks::address::StacksAddressExtensions; +use crate::chainstate::stacks::db::accounts::MinerReward; +use crate::chainstate::stacks::db::transactions::{ + handle_clarity_runtime_error, ClarityRuntimeTxError, +}; +use crate::chainstate::stacks::db::StacksHeaderInfo; +use crate::chainstate::stacks::db::{ + blocks::MemPoolRejection, ChainstateTx, ClarityTx, MinerRewardInfo, StacksChainState, + MINER_REWARD_MATURITY, +}; +use crate::chainstate::stacks::events::{StacksTransactionEvent, StacksTransactionReceipt}; +use crate::chainstate::stacks::miner::BlockBuilder; +use crate::chainstate::stacks::miner::BlockBuilderSettings; +use crate::chainstate::stacks::miner::BlockLimitFunction; +use crate::chainstate::stacks::miner::TransactionError; +use crate::chainstate::stacks::miner::TransactionProblematic; +use crate::chainstate::stacks::miner::TransactionResult; +use crate::chainstate::stacks::miner::TransactionSkipped; +use crate::chainstate::stacks::Error; +use crate::chainstate::stacks::StacksBlockHeader; +use crate::chainstate::stacks::*; +use crate::clarity_vm::clarity::{ClarityConnection, ClarityInstance, Error as clarity_error}; +use crate::core::mempool::*; +use crate::core::*; +use crate::cost_estimates::metrics::CostMetric; +use crate::cost_estimates::CostEstimator; + +use crate::net::relay::Relayer; +use crate::net::Error as net_error; +use stacks_common::types::StacksPublicKeyBuffer; + +use crate::monitoring::{ + set_last_mined_block_transaction_count, set_last_mined_execution_cost_observed, +}; +use stacks_common::codec::{read_next, write_next, StacksMessageCodec}; +use stacks_common::types::chainstate::BurnchainHeaderHash; +use stacks_common::types::chainstate::StacksBlockId; +use stacks_common::types::chainstate::TrieHash; +use stacks_common::types::chainstate::{BlockHeaderHash, ConsensusHash, StacksAddress}; +use stacks_common::util::hash::Hash160; + +/// New tenure information +pub struct NakamotoTenureStart { + /// coinbase transaction for this miner + pub coinbase_tx: StacksTransaction, + /// VRF proof for this miner + pub vrf_proof: VRFProof, +} + +pub struct NakamotoBlockBuilder { + /// if this is building atop an epoch 2 block, then this is that block's header + epoch2_parent_header: Option<(StacksBlockHeader, ConsensusHash)>, + /// if this is building atop an epoch 3 block, then this is that block's header + nakamoto_parent_header: Option, + /// VRF proof, if needed + vrf_proof: Option, + /// Total burn this block represents + total_burn: u64, + /// parent block-commit hash value + parent_commit_hash_value: BlockHeaderHash, + /// Matured miner rewards to process, if any. + /// If given, this is (parent-miner-reward, this-miner-reward, reward-info) + matured_miner_rewards_opt: Option<(MinerReward, MinerReward, MinerRewardInfo)>, + /// bytes of space consumed so far + bytes_so_far: u64, + /// transactions selected + txs: Vec, + /// header we're filling in + header: NakamotoBlockHeader, +} + +pub struct MinerTenureInfo<'a> { + pub chainstate_tx: ChainstateTx<'a>, + pub clarity_instance: &'a mut ClarityInstance, + pub burn_tip: BurnchainHeaderHash, + /// This is the expected burn tip height (i.e., the current burnchain tip + 1) + /// of the mined block + pub burn_tip_height: u32, + pub mainnet: bool, + pub parent_consensus_hash: ConsensusHash, + pub parent_header_hash: BlockHeaderHash, + pub parent_stacks_block_height: u64, + pub parent_burn_block_height: u32, + pub tenure_start: bool, + pub tenure_height: u64, +} + +impl NakamotoBlockBuilder { + /// Make a block builder atop a Nakamoto parent for a new tenure + pub fn new_tenure_from_nakamoto_parent( + parent_tenure_id: &StacksBlockId, + parent: &NakamotoBlockHeader, + consensus_hash: &ConsensusHash, + total_burn: u64, + proof: &VRFProof, + ) -> NakamotoBlockBuilder { + let parent_commit_hash_value = BlockHeaderHash(parent_tenure_id.0.clone()); + NakamotoBlockBuilder { + epoch2_parent_header: None, + nakamoto_parent_header: Some(parent.clone()), + total_burn, + vrf_proof: Some(proof.clone()), + parent_commit_hash_value, + matured_miner_rewards_opt: None, + bytes_so_far: 0, + txs: vec![], + header: NakamotoBlockHeader::from_parent_empty( + parent.chain_length + 1, + total_burn, + consensus_hash.clone(), + parent.block_id(), + ), + } + } + + /// Make a block builder atop a Nakamoto parent for a new block within a tenure + pub fn continue_tenure_from_nakamoto_parent( + parent: &NakamotoBlockHeader, + consensus_hash: &ConsensusHash, + total_burn: u64, + ) -> NakamotoBlockBuilder { + let parent_commit_hash_value = BlockHeaderHash(parent.block_id().0.clone()); + NakamotoBlockBuilder { + epoch2_parent_header: None, + nakamoto_parent_header: Some(parent.clone()), + total_burn, + vrf_proof: None, + parent_commit_hash_value, + matured_miner_rewards_opt: None, + bytes_so_far: 0, + txs: vec![], + header: NakamotoBlockHeader::from_parent_empty( + parent.chain_length + 1, + total_burn, + consensus_hash.clone(), + parent.block_id(), + ), + } + } + + /// Make a block builder atop an epoch 2 parent for a new tenure + pub fn new_tenure_from_epoch2_parent( + parent: &StacksBlockHeader, + parent_consensus_hash: &ConsensusHash, + consensus_hash: &ConsensusHash, + total_burn: u64, + proof: &VRFProof, + ) -> NakamotoBlockBuilder { + NakamotoBlockBuilder { + epoch2_parent_header: Some((parent.clone(), parent_consensus_hash.clone())), + nakamoto_parent_header: None, + total_burn, + vrf_proof: Some(proof.clone()), + parent_commit_hash_value: parent.block_hash(), + matured_miner_rewards_opt: None, + bytes_so_far: 0, + txs: vec![], + header: NakamotoBlockHeader::from_parent_empty( + parent.total_work.work + 1, + total_burn, + consensus_hash.clone(), + StacksBlockId::new(parent_consensus_hash, &parent.block_hash()), + ), + } + } + + /// Make a block builder from genesis (testing only) + pub fn new_tenure_from_genesis(proof: &VRFProof) -> NakamotoBlockBuilder { + NakamotoBlockBuilder { + epoch2_parent_header: None, + nakamoto_parent_header: None, + total_burn: 0, + vrf_proof: Some(proof.clone()), + parent_commit_hash_value: FIRST_STACKS_BLOCK_HASH.clone(), + matured_miner_rewards_opt: None, + bytes_so_far: 0, + txs: vec![], + header: NakamotoBlockHeader::genesis(), + } + } + + /// Make a Nakamoto block builder appropriate for building atop the given block header + pub fn new_from_parent( + // tenure ID -- this is the index block hash of the start block of the last tenure (i.e. + // the data we committed to in the block-commit). If this is an epoch 2.x parent, then + // this is just the index block hash of the parent Stacks block. + parent_tenure_id: &StacksBlockId, + // Stacks header we're building off of. + parent_stacks_header: &StacksHeaderInfo, + // consensus hash of this tenure's burnchain block + consensus_hash: &ConsensusHash, + // total BTC burn so far + total_burn: u64, + // VRF proof, if we're starting a _new_ tenure (instead of continuing an existing one) + vrf_proof_opt: Option, + ) -> Result { + let builder = if let Some(parent_nakamoto_header) = + parent_stacks_header.anchored_header.as_stacks_nakamoto() + { + // building atop a nakamoto block + // new tenure? + if let Some(vrf_proof) = vrf_proof_opt.as_ref() { + NakamotoBlockBuilder::new_tenure_from_nakamoto_parent( + parent_tenure_id, + parent_nakamoto_header, + consensus_hash, + total_burn, + vrf_proof, + ) + } else { + NakamotoBlockBuilder::continue_tenure_from_nakamoto_parent( + parent_nakamoto_header, + consensus_hash, + total_burn, + ) + } + } else if let Some(parent_epoch2_header) = + parent_stacks_header.anchored_header.as_stacks_epoch2() + { + // building atop a stacks 2.x block. + // we are necessarily starting a new tenure + if let Some(vrf_proof) = vrf_proof_opt.as_ref() { + NakamotoBlockBuilder::new_tenure_from_epoch2_parent( + parent_epoch2_header, + &parent_stacks_header.consensus_hash, + consensus_hash, + total_burn, + vrf_proof, + ) + } else { + // not allowed + warn!("Failed to start a Nakamoto tenure atop a Stacks 2.x block -- missing a VRF proof"); + return Err(Error::ExpectedTenureChange); + } + } else { + // not reachable -- no other choices + return Err(Error::InvalidStacksBlock( + "Parent is neither a Nakamoto block nor a Stacks 2.x block".into(), + )); + }; + + Ok(builder) + } + + /// This function should be called before `tenure_begin`. + /// It creates a MinerTenureInfo struct which owns connections to the chainstate and sortition + /// DBs, so that block-processing is guaranteed to terminate before the lives of these handles + /// expire. + pub fn load_tenure_info<'a>( + &self, + chainstate: &'a mut StacksChainState, + burn_dbconn: &'a SortitionDBConn, + tenure_start: bool, + ) -> Result, Error> { + debug!("Nakamoto miner tenure begin"); + + let burn_tip = SortitionDB::get_canonical_chain_tip_bhh(burn_dbconn.conn())?; + let burn_tip_height = u32::try_from( + SortitionDB::get_canonical_burn_chain_tip(burn_dbconn.conn())?.block_height, + ) + .expect("block height overflow"); + + let mainnet = chainstate.config().mainnet; + + let (chain_tip, parent_consensus_hash, parent_header_hash) = + if let Some(nakamoto_parent_header) = self.nakamoto_parent_header.as_ref() { + // parent is a nakamoto block + let parent_header_info = NakamotoChainState::get_block_header( + chainstate.db(), + &StacksBlockId::new( + &nakamoto_parent_header.consensus_hash, + &nakamoto_parent_header.block_hash(), + ), + )? + .ok_or(Error::NoSuchBlockError) + .map_err(|e| { + warn!( + "No such Nakamoto parent block {}/{} ({})", + &nakamoto_parent_header.consensus_hash, + &nakamoto_parent_header.block_hash(), + &nakamoto_parent_header.block_id() + ); + e + })?; + + ( + parent_header_info, + nakamoto_parent_header.consensus_hash.clone(), + nakamoto_parent_header.block_hash(), + ) + } else if let Some((stacks_header, consensus_hash)) = self.epoch2_parent_header.as_ref() + { + // parent is a Stacks epoch2 block + let parent_header_info = NakamotoChainState::get_block_header( + chainstate.db(), + &StacksBlockId::new(consensus_hash, &stacks_header.block_hash()), + )? + .ok_or(Error::NoSuchBlockError) + .map_err(|e| { + warn!( + "No such Stacks 2.x parent block {}/{} ({})", + &consensus_hash, + &stacks_header.block_hash(), + &StacksBlockId::new(&consensus_hash, &stacks_header.block_hash()) + ); + e + })?; + + ( + parent_header_info, + consensus_hash.clone(), + stacks_header.block_hash(), + ) + } else { + // parent is genesis (testing only) + ( + StacksHeaderInfo::regtest_genesis(), + FIRST_BURNCHAIN_CONSENSUS_HASH.clone(), + FIRST_STACKS_BLOCK_HASH.clone(), + ) + }; + + let tenure_height = if let Ok(Some(parent_tenure_height)) = + NakamotoChainState::get_tenure_height( + chainstate.db(), + &StacksBlockId::new(&parent_consensus_hash, &parent_header_hash), + ) { + parent_tenure_height + .checked_add(1) + .expect("Blockchain overflow") + } else { + 0 + }; + + // data won't be committed, so do a concurrent transaction + let (chainstate_tx, clarity_instance) = chainstate.chainstate_tx_begin()?; + + Ok(MinerTenureInfo { + chainstate_tx, + clarity_instance, + burn_tip, + burn_tip_height, + mainnet, + parent_consensus_hash, + parent_header_hash, + parent_stacks_block_height: chain_tip.stacks_block_height, + parent_burn_block_height: chain_tip.burn_header_height, + tenure_start, + tenure_height, + }) + } + + /// Begin/resume mining a tenure's transactions. + /// Returns an open ClarityTx for mining the block. + /// NOTE: even though we don't yet know the block hash, the Clarity VM ensures that a + /// transaction can't query information about the _current_ block (i.e. information that is not + /// yet known). + pub fn tenure_begin<'a, 'b>( + &mut self, + burn_dbconn: &'a SortitionDBConn, + info: &'b mut MinerTenureInfo<'a>, + ) -> Result, Error> { + let SetupBlockResult { + clarity_tx, + matured_miner_rewards_opt, + .. + } = NakamotoChainState::setup_block( + &mut info.chainstate_tx, + info.clarity_instance, + burn_dbconn, + &burn_dbconn.context.pox_constants, + info.parent_consensus_hash, + info.parent_header_hash, + info.parent_stacks_block_height, + info.parent_burn_block_height, + info.burn_tip, + info.burn_tip_height, + info.mainnet, + info.tenure_start, + info.tenure_height, + )?; + self.matured_miner_rewards_opt = matured_miner_rewards_opt; + Ok(clarity_tx) + } + + /// Finish up mining an epoch's transactions. + /// Return the ExecutionCost consumed so far. + pub fn tenure_finish(self, tx: ClarityTx) -> ExecutionCost { + let new_consensus_hash = MINER_BLOCK_CONSENSUS_HASH.clone(); + let new_block_hash = MINER_BLOCK_HEADER_HASH.clone(); + + let index_block_hash = + StacksBlockHeader::make_index_block_hash(&new_consensus_hash, &new_block_hash); + + // write out the trie... + let consumed = tx.commit_mined_block(&index_block_hash); + + test_debug!("\n\nFinished mining. Trie is in mined_blocks table.\n",); + + consumed + } + + /// Finish constructing a Nakamoto block. + /// The block will not be signed yet. + /// Returns the unsigned Nakamoto block + fn finalize_block(&mut self, clarity_tx: &mut ClarityTx) -> NakamotoBlock { + // done! Calculate state root and tx merkle root + let txid_vecs = self + .txs + .iter() + .map(|tx| tx.txid().as_bytes().to_vec()) + .collect(); + + let merkle_tree = MerkleTree::::new(&txid_vecs); + let tx_merkle_root = merkle_tree.root(); + let state_root_hash = clarity_tx.seal(); + + self.header.tx_merkle_root = tx_merkle_root; + self.header.state_index_root = state_root_hash; + + let block = NakamotoBlock { + header: self.header.clone(), + txs: self.txs.clone(), + }; + + test_debug!( + "\n\nMined Nakamoto block {}, {} transactions, state root is {}\n", + block.header.block_hash(), + block.txs.len(), + state_root_hash + ); + + info!( + "Miner: mined Nakamoto block"; + "consensus_hash" => %block.header.consensus_hash, + "block_hash" => %block.header.block_hash(), + "block_height" => block.header.chain_length, + "num_txs" => block.txs.len(), + "parent_block" => %block.header.parent_block_id, + "state_root" => %state_root_hash + ); + + block + } + + /// Finish building the Nakamoto block + pub fn mine_nakamoto_block(&mut self, clarity_tx: &mut ClarityTx) -> NakamotoBlock { + NakamotoChainState::finish_block(clarity_tx, self.matured_miner_rewards_opt.as_ref()) + .expect("FATAL: call to `finish_block` failed"); + self.finalize_block(clarity_tx) + } + + /// Given access to the mempool, mine a nakamoto block. + /// It will not be signed. + pub fn build_nakamoto_block( + // not directly used; used as a handle to open other chainstates + chainstate_handle: &StacksChainState, + burn_dbconn: &SortitionDBConn, + mempool: &mut MemPoolDB, + // tenure ID -- this is the index block hash of the start block of the last tenure (i.e. + // the data we committed to in the block-commit) + parent_tenure_id: &StacksBlockId, + // Stacks header we're building off of. + parent_stacks_header: &StacksHeaderInfo, + // consensus hash of this block + consensus_hash: &ConsensusHash, + // the burn so far on the burnchain (i.e. from the last burnchain block) + total_burn: u64, + new_tenure_info: Option, + settings: BlockBuilderSettings, + event_observer: Option<&dyn MemPoolEventDispatcher>, + ) -> Result<(NakamotoBlock, ExecutionCost, u64), Error> { + let (tip_consensus_hash, tip_block_hash, tip_height) = ( + parent_stacks_header.consensus_hash.clone(), + parent_stacks_header.anchored_header.block_hash(), + parent_stacks_header.stacks_block_height, + ); + + debug!( + "Build Nakamoto block off of {}/{} height {}", + &tip_consensus_hash, &tip_block_hash, tip_height + ); + + let (mut chainstate, _) = chainstate_handle.reopen()?; + + let mut builder = NakamotoBlockBuilder::new_from_parent( + parent_tenure_id, + parent_stacks_header, + consensus_hash, + total_burn, + new_tenure_info.as_ref().map(|info| info.vrf_proof.clone()), + )?; + + let ts_start = get_epoch_time_ms(); + + let mut miner_tenure_info = + builder.load_tenure_info(&mut chainstate, burn_dbconn, new_tenure_info.is_some())?; + let mut tenure_tx = builder.tenure_begin(burn_dbconn, &mut miner_tenure_info)?; + + let block_limit = tenure_tx + .block_limit() + .expect("Failed to obtain block limit from miner's block connection"); + + let (blocked, tx_events) = match StacksBlockBuilder::select_and_apply_transactions( + &mut tenure_tx, + &mut builder, + mempool, + parent_stacks_header, + new_tenure_info.as_ref().map(|info| &info.coinbase_tx), + settings, + event_observer, + ASTRules::PrecheckSize, + ) { + Ok(x) => x, + Err(e) => { + warn!("Failure building block: {}", e); + tenure_tx.rollback_block(); + return Err(e); + } + }; + + if blocked { + debug!( + "Miner: block transaction selection aborted (child of {})", + &parent_stacks_header.anchored_header.block_hash() + ); + return Err(Error::MinerAborted); + } + + // save the block so we can build microblocks off of it + let block = builder.mine_nakamoto_block(&mut tenure_tx); + let size = builder.bytes_so_far; + let consumed = builder.tenure_finish(tenure_tx); + + let ts_end = get_epoch_time_ms(); + + if let Some(observer) = event_observer { + observer.mined_nakamoto_block_event( + SortitionDB::get_canonical_burn_chain_tip(burn_dbconn.conn())?.block_height + 1, + &block, + size, + &consumed, + tx_events, + ); + } + + set_last_mined_block_transaction_count(block.txs.len() as u64); + set_last_mined_execution_cost_observed(&consumed, &block_limit); + + info!( + "Miner: mined Nakamoto block"; + "block_hash" => %block.header.block_hash(), + "block_id" => %block.header.block_id(), + "height" => block.header.chain_length, + "tx_count" => block.txs.len(), + "parent_block_id" => %block.header.parent_block_id, + "block_size" => size, + "execution_consumed" => %consumed, + "%-full" => block_limit.proportion_largest_dimension(&consumed), + "assembly_time_ms" => ts_end.saturating_sub(ts_start), + ); + + Ok((block, consumed, size)) + } + + #[cfg(test)] + pub fn make_nakamoto_block_from_txs( + mut self, + chainstate_handle: &StacksChainState, + burn_dbconn: &SortitionDBConn, + mut txs: Vec, + ) -> Result<(NakamotoBlock, u64, ExecutionCost), Error> { + debug!("Build Nakamoto block from {} transactions", txs.len()); + let (mut chainstate, _) = chainstate_handle.reopen()?; + + let new_tenure = txs + .iter() + .find(|txn| { + if let TransactionPayload::TenureChange(..) = txn.payload { + true + } else { + false + } + }) + .is_some(); + + let mut miner_tenure_info = + self.load_tenure_info(&mut chainstate, burn_dbconn, new_tenure)?; + let mut tenure_tx = self.tenure_begin(burn_dbconn, &mut miner_tenure_info)?; + for tx in txs.drain(..) { + let tx_len = tx.tx_len(); + match self.try_mine_tx_with_len( + &mut tenure_tx, + &tx, + tx_len, + &BlockLimitFunction::NO_LIMIT_HIT, + ASTRules::PrecheckSize, + ) { + TransactionResult::Success(..) => { + debug!("Included {}", &tx.txid()); + } + TransactionResult::Skipped(TransactionSkipped { error, .. }) + | TransactionResult::ProcessingError(TransactionError { error, .. }) => { + match error { + Error::BlockTooBigError => { + // done mining -- our execution budget is exceeded. + // Make the block from the transactions we did manage to get + debug!("Block budget exceeded on tx {}", &tx.txid()); + } + Error::InvalidStacksTransaction(_emsg, true) => { + // if we have an invalid transaction that was quietly ignored, don't warn here either + test_debug!( + "Failed to apply tx {}: InvalidStacksTransaction '{:?}'", + &tx.txid(), + &_emsg + ); + continue; + } + Error::ProblematicTransaction(txid) => { + test_debug!("Encountered problematic transaction. Aborting"); + return Err(Error::ProblematicTransaction(txid)); + } + e => { + warn!("Failed to apply tx {}: {:?}", &tx.txid(), &e); + continue; + } + } + } + TransactionResult::Problematic(TransactionProblematic { tx, .. }) => { + // drop from the mempool + debug!("Encountered problematic transaction {}", &tx.txid()); + return Err(Error::ProblematicTransaction(tx.txid())); + } + } + } + let block = self.mine_nakamoto_block(&mut tenure_tx); + let size = self.bytes_so_far; + let cost = self.tenure_finish(tenure_tx); + Ok((block, size, cost)) + } +} + +impl BlockBuilder for NakamotoBlockBuilder { + /// Append a transaction if doing so won't exceed the epoch data size. + /// Errors out if we exceed budget, or the transaction is invalid. + fn try_mine_tx_with_len( + &mut self, + clarity_tx: &mut ClarityTx, + tx: &StacksTransaction, + tx_len: u64, + limit_behavior: &BlockLimitFunction, + ast_rules: ASTRules, + ) -> TransactionResult { + if self.bytes_so_far + tx_len >= MAX_EPOCH_SIZE.into() { + return TransactionResult::skipped_due_to_error(&tx, Error::BlockTooBigError); + } + + match limit_behavior { + BlockLimitFunction::CONTRACT_LIMIT_HIT => { + match &tx.payload { + TransactionPayload::ContractCall(cc) => { + // once we've hit the runtime limit once, allow boot code contract calls, but do not try to eval + // other contract calls + if !cc.address.is_boot_code_addr() { + return TransactionResult::skipped( + &tx, + "BlockLimitFunction::CONTRACT_LIMIT_HIT".to_string(), + ); + } + } + TransactionPayload::SmartContract(..) => { + return TransactionResult::skipped( + &tx, + "BlockLimitFunction::CONTRACT_LIMIT_HIT".to_string(), + ); + } + _ => {} + } + } + BlockLimitFunction::LIMIT_REACHED => { + return TransactionResult::skipped( + &tx, + "BlockLimitFunction::LIMIT_REACHED".to_string(), + ) + } + BlockLimitFunction::NO_LIMIT_HIT => {} + }; + + let quiet = !cfg!(test); + let result = { + // preemptively skip problematic transactions + if let Err(e) = Relayer::static_check_problematic_relayed_tx( + clarity_tx.config.mainnet, + clarity_tx.get_epoch(), + &tx, + ast_rules, + ) { + info!( + "Detected problematic tx {} while mining; dropping from mempool", + tx.txid() + ); + return TransactionResult::problematic(&tx, Error::NetError(e)); + } + let (fee, receipt) = match StacksChainState::process_transaction( + clarity_tx, tx, quiet, ast_rules, + ) { + Ok((fee, receipt)) => (fee, receipt), + Err(e) => { + let (is_problematic, e) = + TransactionResult::is_problematic(&tx, e, clarity_tx.get_epoch()); + if is_problematic { + return TransactionResult::problematic(&tx, e); + } else { + match e { + Error::CostOverflowError(cost_before, cost_after, total_budget) => { + clarity_tx.reset_cost(cost_before.clone()); + if total_budget.proportion_largest_dimension(&cost_before) + < TX_BLOCK_LIMIT_PROPORTION_HEURISTIC + { + warn!( + "Transaction {} consumed over {}% of block budget, marking as invalid; budget was {}", + tx.txid(), + 100 - TX_BLOCK_LIMIT_PROPORTION_HEURISTIC, + &total_budget + ); + return TransactionResult::error( + &tx, + Error::TransactionTooBigError, + ); + } else { + warn!( + "Transaction {} reached block cost {}; budget was {}", + tx.txid(), + &cost_after, + &total_budget + ); + return TransactionResult::skipped_due_to_error( + &tx, + Error::BlockTooBigError, + ); + } + } + _ => return TransactionResult::error(&tx, e), + } + } + } + }; + info!("Include tx"; + "tx" => %tx.txid(), + "payload" => tx.payload.name(), + "origin" => %tx.origin_address()); + + // save + self.txs.push(tx.clone()); + TransactionResult::success(&tx, fee, receipt) + }; + + self.bytes_so_far += tx_len; + result + } +} diff --git a/stackslib/src/chainstate/nakamoto/mod.rs b/stackslib/src/chainstate/nakamoto/mod.rs index a182293424..0ee3c7732c 100644 --- a/stackslib/src/chainstate/nakamoto/mod.rs +++ b/stackslib/src/chainstate/nakamoto/mod.rs @@ -14,6 +14,7 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . +use std::collections::HashSet; use std::ops::DerefMut; use clarity::vm::ast::ASTRules; @@ -31,16 +32,21 @@ use stacks_common::codec::{ use stacks_common::consts::{ FIRST_BURNCHAIN_CONSENSUS_HASH, FIRST_STACKS_BLOCK_HASH, MINER_REWARD_MATURITY, }; +use stacks_common::types::chainstate::StacksPrivateKey; +use stacks_common::types::chainstate::StacksPublicKey; +use stacks_common::types::chainstate::VRFSeed; use stacks_common::types::chainstate::{ BlockHeaderHash, BurnchainHeaderHash, ConsensusHash, StacksBlockId, TrieHash, }; +use stacks_common::types::PrivateKey; use stacks_common::types::StacksEpochId; use stacks_common::util::get_epoch_time_secs; -use stacks_common::util::hash::{Hash160, MerkleHashFunc, MerkleTree, Sha512Trunc256Sum}; +use stacks_common::util::hash::{to_hex, Hash160, MerkleHashFunc, MerkleTree, Sha512Trunc256Sum}; use stacks_common::util::retry::BoundReader; -use stacks_common::util::secp256k1::{MessageSignature, Secp256k1PublicKey}; +use stacks_common::util::secp256k1::MessageSignature; +use stacks_common::util::vrf::{VRFProof, VRF}; -use super::burn::db::sortdb::{SortitionHandleConn, SortitionHandleTx}; +use super::burn::db::sortdb::{get_block_commit_by_txid, SortitionHandleConn, SortitionHandleTx}; use super::burn::operations::{DelegateStxOp, StackStxOp, TransferStxOp}; use super::stacks::db::accounts::MinerReward; use super::stacks::db::blocks::StagingUserBurnSupport; @@ -54,17 +60,36 @@ use super::stacks::{ TenureChangeError, TenureChangePayload, TransactionPayload, }; use crate::burnchains::PoxConstants; +use crate::burnchains::Txid; use crate::chainstate::burn::db::sortdb::SortitionDB; +use crate::chainstate::burn::operations::LeaderBlockCommitOp; +use crate::chainstate::burn::operations::LeaderKeyRegisterOp; +use crate::chainstate::burn::BlockSnapshot; +use crate::chainstate::stacks::db::DBConfig as ChainstateConfig; use crate::chainstate::stacks::db::StacksChainState; use crate::chainstate::stacks::{MINER_BLOCK_CONSENSUS_HASH, MINER_BLOCK_HEADER_HASH}; use crate::clarity_vm::clarity::{ClarityInstance, PreCommitClarityBlock}; use crate::clarity_vm::database::SortitionDBRef; use crate::monitoring; -use crate::util_lib::db::{query_row_panic, query_rows, u64_to_sql, Error as DBError, FromRow}; +use crate::util_lib::db::{ + query_row, query_row_panic, query_rows, u64_to_sql, DBConn, Error as DBError, FromRow, +}; + +use crate::core::BOOT_BLOCK_HASH; + +use crate::chainstate::coordinator::BlockEventDispatcher; +use crate::chainstate::coordinator::Error; + +use crate::net::Error as net_error; + +pub mod coordinator; +pub mod miner; #[cfg(test)] pub mod tests; +pub const NAKAMOTO_BLOCK_VERSION: u8 = 0; + define_named_enum!(HeaderTypeNames { Nakamoto("nakamoto"), Epoch2("epoch2"), @@ -88,17 +113,18 @@ lazy_static! { pub static ref NAKAMOTO_CHAINSTATE_SCHEMA_1: Vec = vec![ r#" -- Table for staging nakamoto blocks + -- TODO: this goes into its own DB at some point CREATE TABLE nakamoto_staging_blocks ( + -- SHA512/256 hash of this block block_hash TEXT NOT NULL, -- the consensus hash of the burnchain block that selected this block's **tenure** consensus_hash TEXT NOT NULL, - burn_view TEXT NOT NULL, -- the parent index_block_hash parent_block_id TEXT NOT NULL, - -- has the burnchain view that this block depends on been processed? + -- has the burnchain block with this block's `consensus_hash` been processed? burn_attachable INT NOT NULL, - -- has the parent stacks block that this block depends on been processed? + -- has the parent Stacks block been processed? stacks_attachable INT NOT NULL, -- set to 1 if this block can never be attached orphaned INT NOT NULL, @@ -106,11 +132,15 @@ lazy_static! { processed INT NOT NULL, height INT NOT NULL, - - index_block_hash TEXT NOT NULL, -- used internally; hash of consensus hash and anchored_block_hash - download_time INT NOT NULL, -- how long the block was in-flight - arrival_time INT NOT NULL, -- when this block was stored - processed_time INT NOT NULL, -- when this block was processed + + -- used internally -- this is the StacksBlockId of this block's consensus hash and block hash + index_block_hash TEXT NOT NULL, + -- how long the block was in-flight + download_time INT NOT NULL, + -- when this block was stored + arrival_time INT NOT NULL, + -- when this block was processed + processed_time INT NOT NULL, -- block data data BLOB NOT NULL, @@ -118,7 +148,7 @@ lazy_static! { PRIMARY KEY(block_hash,consensus_hash) );"#.into(), r#" - -- Table for Nakamoto Block Headers + -- Table for Nakamoto block headers CREATE TABLE nakamoto_block_headers ( -- The following fields all correspond to entries in the StacksHeaderInfo struct block_height INTEGER NOT NULL, @@ -131,6 +161,8 @@ lazy_static! { burn_header_height INT NOT NULL, -- timestamp from burnchain block header that generated this consensus hash burn_header_timestamp INT NOT NULL, + -- size of this block, in bytes. + -- encoded as TEXT for compatibility block_size TEXT NOT NULL, -- The following fields all correspond to entries in the NakamotoBlockHeader struct version INTEGER NOT NULL, @@ -138,40 +170,42 @@ lazy_static! { chain_length INTEGER NOT NULL, -- this field is the total amount of BTC spent in the chain history (including this block) burn_spent INTEGER NOT NULL, - -- the parent BlockHeaderHash - parent TEXT NOT NULL, - -- the latest bitcoin block whose data is viewable from this stacks block - burn_view TEXT NOT NULL, + -- the consensus hash of the burnchain block that selected this block's tenure + consensus_hash TEXT NOT NULL, + -- the parent StacksBlockId + parent_block_id TEXT NOT NULL, + -- Merkle root of a Merkle tree constructed out of all the block's transactions + tx_merkle_root TEXT NOT NULL, + -- root hash of the Stacks chainstate MARF + state_index_root TEXT NOT NULL, -- miner's signature over the block miner_signature TEXT NOT NULL, -- stackers' signature over the block stacker_signature TEXT NOT NULL, - tx_merkle_root TEXT NOT NULL, - state_index_root TEXT NOT NULL, - -- the consensus hash of the burnchain block that selected this block's tenure - consensus_hash TEXT NOT NULL, - -- the consensus hash of the burnchain block that selected **parent**'s tenure - parent_consensus_hash TEXT NOT NULL, -- The following fields are not part of either the StacksHeaderInfo struct -- or its contained NakamotoBlockHeader struct, but are used for querying + -- what kind of header this is (nakamoto or stacks 2.x) header_type TEXT NOT NULL, + -- hash of the block block_hash TEXT NOT NULL, -- index_block_hash is the hash of the block hash and consensus hash of the burn block that selected it, -- and is guaranteed to be globally unique (across all Stacks forks and across all PoX forks). -- index_block_hash is the block hash fed into the MARF index. index_block_hash TEXT NOT NULL, - -- the total cost of the block + -- the ExecutionCost of the block cost TEXT NOT NULL, -- the total cost up to and including this block in the current tenure total_tenure_cost TEXT NOT NULL, - -- the parent index_block_hash - parent_block_id TEXT NOT NULL, - -- this field is the total number of *tenures* in the chain history (including this tenure) + -- this field is the total number of *tenures* in the chain history (including this tenure), + -- as of the _end_ of this block. A block can contain multiple TenureChanges; if so, then this + -- is the height of the _last_ TenureChange. tenure_height INTEGER NOT NULL, -- this field is true if this is the first block of a new tenure tenure_changed INTEGER NOT NULL, -- this field tracks the total tx fees so far in this tenure. it is a text-serialized u128 tenure_tx_fees TEXT NOT NULL, + -- nakamoto block's VRF proof, if this is a tenure-start block + vrf_proof TEXT, PRIMARY KEY(consensus_hash,block_hash) ); "#.into(), @@ -186,17 +220,26 @@ lazy_static! { ]; } +/// Result of preparing to produce or validate a block pub struct SetupBlockResult<'a, 'b> { + /// Handle to the ClarityVM pub clarity_tx: ClarityTx<'a, 'b>, + /// Transaction receipts from any Stacks-on-Bitcoin transactions and epoch transition events pub tx_receipts: Vec, - pub matured_miner_rewards_opt: - Option<(MinerReward, Vec, MinerReward, MinerRewardInfo)>, + /// Miner rewards that can be paid now: (this-miner-reward, parent-miner-reward, miner-info) + pub matured_miner_rewards_opt: Option<(MinerReward, MinerReward, MinerRewardInfo)>, + /// Epoch in which this block was set up pub evaluated_epoch: StacksEpochId, + /// Whether or not we applied an epoch transition in this block pub applied_epoch_transition: bool, + /// stack-stx Stacks-on-Bitcoin txs pub burn_stack_stx_ops: Vec, + /// transfer-stx Stacks-on-Bitcoin txs pub burn_transfer_stx_ops: Vec, - pub auto_unlock_events: Vec, + /// delegate-stx Stacks-on-Bitcoin txs pub burn_delegate_stx_ops: Vec, + /// STX auto-unlock events from PoX + pub auto_unlock_events: Vec, } #[derive(Debug, Clone, PartialEq)] @@ -208,11 +251,12 @@ pub struct NakamotoBlockHeader { /// Total amount of BTC spent producing the sortition that /// selected this block's miner. pub burn_spent: u64, - /// The block hash of the immediate parent of this block. - pub parent: BlockHeaderHash, - /// The bitcoin block whose data has been handled most recently by - /// the Stacks chain as of this block. - pub burn_view: BurnchainHeaderHash, + /// The consensus hash of the burnchain block that selected this tenure. The consensus hash + /// uniquely identifies this tenure, including across all Bitcoin forks. + pub consensus_hash: ConsensusHash, + /// The index block hash of the immediate parent of this block. + /// This is the hash of the parent block's hash and consensus hash. + pub parent_block_id: StacksBlockId, /// The root of a SHA512/256 merkle tree over all this block's /// contained transactions pub tx_merkle_root: Sha512Trunc256Sum, @@ -221,15 +265,11 @@ pub struct NakamotoBlockHeader { /// Recoverable ECDSA signature from the tenure's miner. pub miner_signature: MessageSignature, /// Recoverable ECDSA signature from the stacker set active during the tenure. + /// TODO: This is a placeholder pub stacker_signature: MessageSignature, - /// The consensus hash of the burnchain block that selected this tenure. - pub consensus_hash: ConsensusHash, - /// The consensus hash of the burnchain block that selected the tenure of this block's parent. - /// (note: nakamoto blocks produced in the same tenure as their parent will have the same consensus hash) - pub parent_consensus_hash: ConsensusHash, } -#[derive(Debug, Clone)] +#[derive(Debug, Clone, PartialEq)] pub struct NakamotoBlock { pub header: NakamotoBlockHeader, pub txs: Vec, @@ -246,27 +286,23 @@ impl FromRow for NakamotoBlockHeader { .map_err(|_| DBError::ParseError)?; let burn_spent_i64: i64 = row.get("burn_spent")?; let burn_spent = burn_spent_i64.try_into().map_err(|_| DBError::ParseError)?; - let parent = row.get("parent")?; - let burn_view = row.get("burn_view")?; - let stacker_signature = row.get("stacker_signature")?; - let miner_signature = row.get("miner_signature")?; + let consensus_hash = row.get("consensus_hash")?; + let parent_block_id = row.get("parent_block_id")?; let tx_merkle_root = row.get("tx_merkle_root")?; let state_index_root = row.get("state_index_root")?; - let consensus_hash = row.get("consensus_hash")?; - let parent_consensus_hash = row.get("parent_consensus_hash")?; + let stacker_signature = row.get("stacker_signature")?; + let miner_signature = row.get("miner_signature")?; Ok(NakamotoBlockHeader { version, chain_length, burn_spent, - parent, - burn_view, - stacker_signature, - miner_signature, + consensus_hash, + parent_block_id, tx_merkle_root, state_index_root, - consensus_hash, - parent_consensus_hash, + stacker_signature, + miner_signature, }) } } @@ -276,14 +312,12 @@ impl StacksMessageCodec for NakamotoBlockHeader { write_next(fd, &self.version)?; write_next(fd, &self.chain_length)?; write_next(fd, &self.burn_spent)?; - write_next(fd, &self.parent)?; - write_next(fd, &self.burn_view)?; + write_next(fd, &self.consensus_hash)?; + write_next(fd, &self.parent_block_id)?; write_next(fd, &self.tx_merkle_root)?; write_next(fd, &self.state_index_root)?; write_next(fd, &self.miner_signature)?; write_next(fd, &self.stacker_signature)?; - write_next(fd, &self.consensus_hash)?; - write_next(fd, &self.parent_consensus_hash)?; Ok(()) } @@ -293,39 +327,36 @@ impl StacksMessageCodec for NakamotoBlockHeader { version: read_next(fd)?, chain_length: read_next(fd)?, burn_spent: read_next(fd)?, - parent: read_next(fd)?, - burn_view: read_next(fd)?, + consensus_hash: read_next(fd)?, + parent_block_id: read_next(fd)?, tx_merkle_root: read_next(fd)?, state_index_root: read_next(fd)?, miner_signature: read_next(fd)?, stacker_signature: read_next(fd)?, - consensus_hash: read_next(fd)?, - parent_consensus_hash: read_next(fd)?, }) } } impl NakamotoBlockHeader { + /// Calculate the message digest to sign. + /// This includes all fields _except_ the signatures. pub fn signature_hash(&self) -> Result { let mut hasher = Sha512_256::new(); let fd = &mut hasher; write_next(fd, &self.version)?; write_next(fd, &self.chain_length)?; write_next(fd, &self.burn_spent)?; - write_next(fd, &self.parent)?; - write_next(fd, &self.burn_view)?; + write_next(fd, &self.consensus_hash)?; + write_next(fd, &self.parent_block_id)?; write_next(fd, &self.tx_merkle_root)?; write_next(fd, &self.state_index_root)?; - write_next(fd, &self.consensus_hash)?; - write_next(fd, &self.parent_consensus_hash)?; Ok(Sha512Trunc256Sum::from_hasher(hasher)) } - pub fn recover_miner_pk(&self) -> Option { + pub fn recover_miner_pk(&self) -> Option { let signed_hash = self.signature_hash().ok()?; let recovered_pk = - Secp256k1PublicKey::recover_to_pubkey(signed_hash.bits(), &self.miner_signature) - .ok()?; + StacksPublicKey::recover_to_pubkey(signed_hash.bits(), &self.miner_signature).ok()?; Some(recovered_pk) } @@ -334,108 +365,516 @@ impl NakamotoBlockHeader { BlockHeaderHash::from_serializer(self) .expect("BUG: failed to serialize block header hash struct") } + + pub fn block_id(&self) -> StacksBlockId { + StacksBlockId::new(&self.consensus_hash, &self.block_hash()) + } + + pub fn is_first_mined(&self) -> bool { + self.parent_block_id == StacksBlockId::first_mined() + } + + /// Sign the block header by the miner + pub fn sign_miner(&mut self, privk: &StacksPrivateKey) -> Result<(), ChainstateError> { + let sighash = self.signature_hash()?.0; + let sig = privk + .sign(&sighash) + .map_err(|se| net_error::SigningError(se.to_string()))?; + self.miner_signature = sig; + Ok(()) + } + + /// Make an "empty" header whose block data needs to be filled in. + /// This is used by the miner code. + pub fn from_parent_empty( + chain_length: u64, + burn_spent: u64, + consensus_hash: ConsensusHash, + parent_block_id: StacksBlockId, + ) -> NakamotoBlockHeader { + NakamotoBlockHeader { + version: NAKAMOTO_BLOCK_VERSION, + chain_length, + burn_spent, + consensus_hash, + parent_block_id, + tx_merkle_root: Sha512Trunc256Sum([0u8; 32]), + state_index_root: TrieHash([0u8; 32]), + miner_signature: MessageSignature::empty(), + stacker_signature: MessageSignature::empty(), + } + } + + /// Make a completely empty header + pub fn empty() -> NakamotoBlockHeader { + NakamotoBlockHeader { + version: 0, + chain_length: 0, + burn_spent: 0, + consensus_hash: ConsensusHash([0u8; 20]), + parent_block_id: StacksBlockId([0u8; 32]), + tx_merkle_root: Sha512Trunc256Sum([0u8; 32]), + state_index_root: TrieHash([0u8; 32]), + miner_signature: MessageSignature::empty(), + stacker_signature: MessageSignature::empty(), + } + } + + /// Make a genesis header (testing only) + pub fn genesis() -> NakamotoBlockHeader { + NakamotoBlockHeader { + version: 0, + chain_length: 0, + burn_spent: 0, + consensus_hash: FIRST_BURNCHAIN_CONSENSUS_HASH.clone(), + parent_block_id: StacksBlockId(BOOT_BLOCK_HASH.0.clone()), + tx_merkle_root: Sha512Trunc256Sum([0u8; 32]), + state_index_root: TrieHash([0u8; 32]), + miner_signature: MessageSignature::empty(), + stacker_signature: MessageSignature::empty(), + } + } } impl NakamotoBlock { - /// Did the stacks tenure change on this nakamoto block? i.e., does this block - /// include a TenureChange transaction? - pub fn tenure_changed(&self, parent: &StacksBlockId) -> bool { + /// Find all positionally-valid tenure changes in this block. + /// They must be the first transactions. + /// Return their indexes into self.txs + fn find_tenure_changes(&self) -> Vec { + let mut ret = vec![]; + for (i, tx) in self.txs.iter().enumerate() { + if let TransactionPayload::TenureChange(..) = &tx.payload { + ret.push(i); + } else { + break; + } + } + ret + } + + /// Does this block contain one or more well-formed and valid tenure change transactions? + /// Return Some(true) if it does contain at least one, and they're all valid + /// Return Some(false) if it does contain at least one, but at least one is invalid + /// Return None if it contains none. + pub fn tenure_changed(&self) -> Option { + let wellformed = self.is_wellformed_first_tenure_block(); + if wellformed.is_none() { + // block isn't a first-tenure block, so no valid tenure changes + return None; + } else if let Some(false) = wellformed { + // this block is malformed + info!("Block is malformed"; + "block_id" => %self.block_id()); + return Some(false); + } + // Find all txs that have TenureChange payload let tenure_changes = self - .txs + .find_tenure_changes() .iter() - .filter_map(|tx| match &tx.payload { - TransactionPayload::TenureChange(payload) => Some(payload), - _ => None, - }) + .map(|i| &self.txs[*i]) .collect::>(); if tenure_changes.len() > 1 { - warn!( + debug!( "Block contains multiple TenureChange transactions"; "tenure_change_txs" => tenure_changes.len(), - "parent_block_id" => %self.header.parent, - "burn_view" => %self.header.burn_view, + "parent_block_id" => %self.header.parent_block_id, + "consensus_hash" => %self.header.consensus_hash, ); } - let validate = |tc: &TenureChangePayload| -> Result<(), TenureChangeError> { - if tc.previous_tenure_end != *parent { - return Err(TenureChangeError::PreviousTenureInvalid); - } + let validate = |tc: &StacksTransaction| -> Result<(), TenureChangeError> { + if let TransactionPayload::TenureChange(tc) = &tc.payload { + if tc.previous_tenure_end != self.header.parent_block_id { + return Err(TenureChangeError::PreviousTenureInvalid); + } - tc.validate() + // TODO: check number of blocks in previous tenure + // TODO: check tenure change cause + tc.validate() + } else { + // placeholder error + Err(TenureChangeError::NotNakamoto) + } }; - // Return true if there is a valid TenureChange - tenure_changes - .iter() - .find(|tc| validate(tc).is_ok()) - .is_some() + // Return true if all of the following are true: + // (1) there is at least one tenure change + // (2) all tenure changes are valid + Some( + tenure_changes.len() > 0 + && tenure_changes.len() + == tenure_changes + .iter() + .filter(|tc| validate(tc).is_ok()) + .collect::>() + .len(), + ) } pub fn is_first_mined(&self) -> bool { - StacksBlockHeader::is_first_block_hash(&self.header.parent) + self.header.is_first_mined() } + /// Get the coinbase transaction in Nakamoto. + /// It's the first non-TenureChange transaction + /// (and, all preceding transactions _must_ be TenureChanges) pub fn get_coinbase_tx(&self) -> Option<&StacksTransaction> { - match self.txs.get(0).map(|x| &x.payload) { - Some(TransactionPayload::Coinbase(..)) => Some(&self.txs[0]), - _ => None, + let wellformed = self.is_wellformed_first_tenure_block(); + if wellformed.is_none() { + // block isn't a first-tenure block, so no coinbase + return None; + } + if let Some(false) = wellformed { + // block isn't well-formed + return None; } + + // there is one coinbase. + // go find it. + self.txs.iter().find(|tx| { + if let TransactionPayload::Coinbase(..) = &tx.payload { + true + } else { + false + } + }) } -} -impl NakamotoChainState { - pub fn get_chain_tip( + /// Get the VRF proof from this block. + /// It's Some(..) only if there's a coinbase + pub fn get_vrf_proof(&self) -> Option<&VRFProof> { + self.get_coinbase_tx() + .map(|coinbase_tx| { + if let TransactionPayload::Coinbase(_, _, vrf_proof) = &coinbase_tx.payload { + vrf_proof.as_ref() + } else { + // actually unreachable + None + } + }) + .flatten() + } + + /// Determine if this is a well-formed first block in a tenure. + /// * It has one or more TenureChange transactions + /// * It then has a coinbase + /// * Coinbases and TenureChanges do not occur anywhere else + /// + /// Returns Some(true) if the above are true + /// Returns Some(false) if this block has at least one coinbase or TenureChange tx, but one of + /// the above checks are false + /// Returns None if this block has no coinbase or TenureChange txs + pub fn is_wellformed_first_tenure_block(&self) -> Option { + // sanity check -- this may contain no coinbases or tenure-changes + let coinbase_positions = self + .txs + .iter() + .enumerate() + .filter_map(|(i, tx)| { + if let TransactionPayload::Coinbase(..) = &tx.payload { + Some(i) + } else { + None + } + }) + .collect::>(); + + let tenure_change_positions = self + .txs + .iter() + .enumerate() + .filter_map(|(i, tx)| { + if let TransactionPayload::TenureChange(..) = &tx.payload { + Some(i) + } else { + None + } + }) + .collect::>(); + + if coinbase_positions.len() == 0 && tenure_change_positions.len() == 0 { + // can't be a first block in a tenure + return None; + } + + if coinbase_positions.len() > 1 { + // has more than one coinbase + return Some(false); + } + + if coinbase_positions.len() == 1 && tenure_change_positions.len() == 0 { + // has a coinbase but no tenure change + return Some(false); + } + + if coinbase_positions.len() == 0 && tenure_change_positions.len() > 0 { + // has tenure-changes but no coinbase + return Some(false); + } + + // tenure-changes must all come first, and must be in order + for (i, pos) in tenure_change_positions.iter().enumerate() { + if &i != pos { + // tenure-change is out of place + return Some(false); + } + } + + let coinbase_idx = *coinbase_positions + .first() + .expect("FATAL: coinbase_positions.len() == 1"); + if coinbase_idx != tenure_change_positions.len() { + // coinbase is not the next transaction after tenure changes + return Some(false); + } + + let TransactionPayload::Coinbase(_, _, vrf_proof_opt) = &self.txs[coinbase_idx].payload + else { + // this transaction is not a coinbase (but this should be unreachable) + return Some(false); + }; + if vrf_proof_opt.is_none() { + // no a Nakamoto coinbase + return Some(false); + } + + return Some(true); + } + + /// Verify that the VRF seed of this block's block-commit is the hash of the parent tenure's + /// VRF seed. + pub fn validate_vrf_seed( + &self, + sortdb_conn: &Connection, chainstate_conn: &Connection, - sortition_tip: &ConsensusHash, - sortition_db_conn: &Connection, - ) -> Result<(BlockHeaderHash, ConsensusHash), ChainstateError> { - let block_sql = - "SELECT block_hash, consensus_hash, block_height FROM nakamoto_block_headers - WHERE consensus_hash = ? - UNION - SELECT block_hash, consensus_hash, block_height FROM block_headers - WHERE consensus_hash = ? - ORDER BY block_height DESC LIMIT 1"; - let mut cur_sortition_tip = sortition_tip.clone(); - loop { - if let Some(tip_pair) = chainstate_conn - .query_row( - block_sql, - params![&cur_sortition_tip, &cur_sortition_tip], - |row| { - let block_header_hash = row.get(0)?; - let consensus_hash = row.get(1)?; - Ok((block_header_hash, consensus_hash)) - }, - ) - .optional()? - { - return Ok(tip_pair); - } else { - // no blocks are processed that were produced at `cur_sortition_tip`, check `cur_sortition_tip`'s parent. - let parent_sortition_id = SortitionDB::get_block_snapshot_consensus( - sortition_db_conn, - &cur_sortition_tip, - )? - .ok_or_else(|| ChainstateError::NoSuchBlockError)? - .parent_sortition_id; + block_commit: &LeaderBlockCommitOp, + ) -> Result<(), ChainstateError> { + // the block-commit from the miner who created this coinbase must have a VRF seed that + // is the hash of the parent tenure's VRF proof. + let parent_vrf_proof = NakamotoChainState::get_parent_vrf_proof( + chainstate_conn, + sortdb_conn, + &self.header.consensus_hash, + &block_commit.txid, + )?; + if !block_commit.new_seed.is_from_proof(&parent_vrf_proof) { + warn!("Invalid Nakamoto block-commit: seed does not match parent VRF proof"; + "block_id" => %self.block_id(), + "commit_seed" => %block_commit.new_seed, + "proof_seed" => %VRFSeed::from_proof(&parent_vrf_proof), + "parent_vrf_proof" => %parent_vrf_proof.to_hex(), + "block_commit" => format!("{:?}", &block_commit) + ); + return Err(ChainstateError::InvalidStacksBlock( + "Invalid Nakamoto block: bad VRF proof".into(), + )); + } + Ok(()) + } + + pub fn block_id(&self) -> StacksBlockId { + self.header.block_id() + } + + /// Validate this Nakamoto block header against burnchain state. + /// Used to determine whether or not we'll keep a block around (even if we don't yet have its parent). + /// + /// Arguments + /// -- `burn_chain_tip` is the BlockSnapshot containing the block-commit for this block's + /// tenure + /// -- `leader_key` is the miner's leader key registration transaction + /// -- `bloc_commit` is the block-commit for this tenure + /// + /// Verifies the following: + /// -- that this block falls into this block-commit's tenure + /// -- that this miner signed this block + /// -- if this block has a coinbase, then that it's VRF proof was generated by this miner + /// -- that this block's burn total matches `burn_chain_tip`'s total burn + pub fn validate_against_burnchain( + &self, + burn_chain_tip: &BlockSnapshot, + leader_key: &LeaderKeyRegisterOp, + ) -> Result<(), ChainstateError> { + // this block's consensus hash must match the sortition that selected it + if burn_chain_tip.consensus_hash != self.header.consensus_hash { + warn!("Invalid Nakamoto block: consensus hash does not match sortition"; + "consensus_hash" => %self.header.consensus_hash, + "sortition.consensus_hash" => %burn_chain_tip.consensus_hash + ); + return Err(ChainstateError::InvalidStacksBlock( + "Invalid Nakamoto block: invalid consensus hash".into(), + )); + } + + // miner must have signed this block + let miner_pubkey_hash160 = leader_key + .interpret_nakamoto_signing_key() + .ok_or(ChainstateError::NoSuchBlockError) + .map_err(|e| { + warn!( + "Leader key did not contain a hash160 of the miner signing public key"; + "leader_key" => format!("{:?}", &leader_key), + ); + e + })?; + + let recovered_miner_pubk = self.header.recover_miner_pk().ok_or_else(|| { + warn!( + "Nakamoto Stacks block downloaded with unrecoverable miner public key"; + "block_hash" => %self.header.block_hash(), + "block_id" => %self.header.block_id(), + ); + return ChainstateError::InvalidStacksBlock("Unrecoverable miner public key".into()); + })?; + + let recovered_miner_hash160 = Hash160::from_node_public_key(&recovered_miner_pubk); + if recovered_miner_hash160 != miner_pubkey_hash160 { + warn!( + "Nakamoto Stacks block signature from {recovered_miner_pubk:?} mismatch: {recovered_miner_hash160} != {miner_pubkey_hash160} from leader-key"; + "block_hash" => %self.header.block_hash(), + "block_id" => %self.header.block_id(), + "leader_key" => format!("{:?}", &leader_key), + ); + return Err(ChainstateError::InvalidStacksBlock( + "Invalid miner signature".into(), + )); + } + + // If this block has a coinbase, then verify that its VRF proof was generated by this + // block's miner. We'll verify that the seed of this block-commit was generated from the + // parnet tenure's VRF proof via the `validate_vrf_seed()` method, which requires that we + // already have the parent block. + if let Some(coinbase_tx) = self.get_coinbase_tx() { + let (_, _, vrf_proof_opt) = coinbase_tx + .try_as_coinbase() + .expect("FATAL: `get_coinbase_tx()` did not return a coinbase"); + let vrf_proof = vrf_proof_opt.ok_or(ChainstateError::InvalidStacksBlock( + "Nakamoto coinbase must have a VRF proof".into(), + ))?; + + // this block's VRF proof must have ben generated from the last sortition's sortition + // hash (which includes the last commit's VRF seed) + let valid = match VRF::verify( + &leader_key.public_key, + vrf_proof, + burn_chain_tip.sortition_hash.as_bytes(), + ) { + Ok(v) => v, + Err(e) => { + warn!( + "Invalid Stacks block header {}: failed to verify VRF proof: {}", + self.header.block_hash(), + e + ); + false + } + }; + + if !valid { + warn!("Invalid Nakamoto block: leader VRF key did not produce a valid proof"; + "block_id" => %self.block_id(), + "leader_public_key" => %leader_key.public_key.to_hex(), + "sortition_hash" => %burn_chain_tip.sortition_hash + ); + return Err(ChainstateError::InvalidStacksBlock( + "Invalid Nakamoto block: leader VRF key did not produce a valid proof".into(), + )); + } + } - cur_sortition_tip = - SortitionDB::get_block_snapshot(sortition_db_conn, &parent_sortition_id)? - .ok_or_else(|| ChainstateError::NoSuchBlockError)? - .consensus_hash; + // this block must commit to all of the work seen so far + if self.header.burn_spent != burn_chain_tip.total_burn { + warn!("Invalid Nakamoto block header: invalid total burns"; + "header.burn_spent" => self.header.burn_spent, + "burn_chain_tip.total_burn" => burn_chain_tip.total_burn + ); + return Err(ChainstateError::InvalidStacksBlock( + "Invalid Nakamoto block: invalid total burns".into(), + )); + } + // not verified by this method: + // * chain_length (need parent block header) + // * parent_block_id (need parent block header) + // * block-commit seed (need parent block) + // * tx_merkle_root (already verified; validated on deserialization) + // * state_index_root (validated on process_block()) + Ok(()) + } - // this is true at the "genesis" sortition: this means that there is no stacks block produced yet. - if cur_sortition_tip == FIRST_BURNCHAIN_CONSENSUS_HASH { - return Err(ChainstateError::NoSuchBlockError); + /// Static sanity checks on transactions. + /// Verifies: + /// * the block is non-empty + /// * that all txs are unique + /// * that all txs use the given network + /// * that all txs use the given chain ID + /// * if this is a tenure-start tx, that: + /// * it has a well-formed coinbase + /// * all TenureChange transactions are present and in the right order, starting with + /// `stacks_tip` and leading up to this block + /// * that only epoch-permitted transactions are present + pub fn validate_transactions_static( + &self, + mainnet: bool, + chain_id: u32, + epoch_id: StacksEpochId, + ) -> bool { + if self.txs.is_empty() { + return false; + } + if !StacksBlock::validate_transactions_unique(&self.txs) { + return false; + } + if !StacksBlock::validate_transactions_network(&self.txs, mainnet) { + return false; + } + if !StacksBlock::validate_transactions_chain_id(&self.txs, chain_id) { + return false; + } + if let Some(valid) = self.tenure_changed() { + if !valid { + // bad tenure change + return false; + } + if self.get_coinbase_tx().is_none() { + return false; + } + } + if !StacksBlock::validate_transactions_static_epoch(&self.txs, epoch_id) { + return false; + } + match self.is_wellformed_first_tenure_block() { + Some(true) => match self.tenure_changed() { + Some(false) | None => { + // either the tenure_changed() check failed, or this is a tenure change that is + // not in a well-formed tenure block. Either way, this block is invalid. + return false; } + _ => {} + }, + Some(false) => { + // tenure_change() check failed + return false; } + None => {} } + return true; } +} + +impl StacksChainState { + /// Begin a transaction against the staging blocks DB. + /// Note that this DB is (or will eventually be) in a separate database from the headers. + pub fn staging_db_tx_begin<'a>( + &'a mut self, + ) -> Result, ChainstateError> { + // TODO: this should be against a separate DB! + self.db_tx_begin() + } +} +impl NakamotoChainState { /// Notify the staging database that a given stacks block has been processed. /// This will update the attachable status for children blocks, as well as marking the stacks /// block itself as processed. @@ -458,22 +897,47 @@ impl NakamotoChainState { Ok(()) } + /// Modify the staging database that a given stacks block can never be processed. + /// This will update the attachable status for children blocks, as well as marking the stacks + /// block itself as orphaned. + pub fn set_block_orphaned( + staging_db_tx: &rusqlite::Transaction, + block: &StacksBlockId, + ) -> Result<(), ChainstateError> { + let update_dependents = + "UPDATE nakamoto_staging_blocks SET stacks_attachable = 0, orphaned = 1 + WHERE parent_block_id = ?"; + staging_db_tx.execute(&update_dependents, &[&block])?; + + let clear_staged_block = + "UPDATE nakamoto_staging_blocks SET processed = 1, processed_time = ?2, orphaned = 1 + WHERE index_block_hash = ?1"; + staging_db_tx.execute( + &clear_staged_block, + params![&block, &u64_to_sql(get_epoch_time_secs())?], + )?; + + Ok(()) + } + /// Notify the staging database that a given burn block has been processed. /// This is required for staged blocks to be eligible for processing. pub fn set_burn_block_processed( staging_db_tx: &rusqlite::Transaction, - block: &BurnchainHeaderHash, + consensus_hash: &ConsensusHash, ) -> Result<(), ChainstateError> { let update_dependents = "UPDATE nakamoto_staging_blocks SET burn_attachable = 1 - WHERE burn_view = ?"; - staging_db_tx.execute(&update_dependents, &[&block])?; + WHERE consensus_hash = ?"; + staging_db_tx.execute(&update_dependents, &[consensus_hash])?; Ok(()) } - pub fn next_ready_block( + /// Find the next ready-to-process Nakamoto block, given a connection to the staging blocks DB. + /// Returns (the block, the size of the block) + pub fn next_ready_nakamoto_block( staging_db_conn: &Connection, - ) -> Result, ChainstateError> { + ) -> Result, ChainstateError> { let query = "SELECT data FROM nakamoto_staging_blocks WHERE burn_attachable = 1 AND stacks_attachable = 1 @@ -484,7 +948,10 @@ impl NakamotoChainState { .query_row_and_then(query, NO_PARAMS, |row| { let data: Vec = row.get("data")?; let block = NakamotoBlock::consensus_deserialize(&mut data.as_slice())?; - Ok(Some(block)) + Ok(Some(( + block, + u64::try_from(data.len()).expect("FATAL: block is bigger than a u64"), + ))) }) .or_else(|e| { if let ChainstateError::DBError(DBError::SqliteError( @@ -498,55 +965,320 @@ impl NakamotoChainState { }) } - pub fn accept_block( - block: NakamotoBlock, - sortdb: &SortitionHandleConn, - staging_db_tx: &rusqlite::Transaction, - ) -> Result<(), ChainstateError> { - let recovered_miner_pk = block.header.recover_miner_pk().ok_or_else(|| { - warn!( - "Stacks block downloaded with unrecoverable miner public key"; - "block_hash" => %block.header.block_hash(), + /// Extract and parse a nakamoto block from the DB, and verify its integrity. + pub fn load_nakamoto_block( + staging_db_conn: &Connection, + consensus_hash: &ConsensusHash, + block_hash: &BlockHeaderHash, + ) -> Result, ChainstateError> { + let query = "SELECT data FROM nakamoto_staging_blocks WHERE consensus_hash = ?1 AND block_hash = ?2"; + staging_db_conn + .query_row_and_then( + query, + rusqlite::params![consensus_hash, block_hash], + |row| { + let data: Vec = row.get("data")?; + let block = NakamotoBlock::consensus_deserialize(&mut data.as_slice()) + .map_err(|_| DBError::ParseError)?; + if &block.header.block_hash() != block_hash { + error!( + "Staging DB corruption: expected {}, got {}", + &block_hash, + &block.header.block_hash() + ); + return Err(DBError::Corruption.into()); + } + Ok(Some(block)) + }, + ) + .or_else(|e| { + if let ChainstateError::DBError(DBError::SqliteError( + rusqlite::Error::QueryReturnedNoRows, + )) = e + { + Ok(None) + } else { + Err(e.into()) + } + }) + } + + /// Process the next ready block. + /// If there exists a ready Nakamoto block, then this method returns Ok(Some(..)) with the + /// receipt. Otherwise, it returns Ok(None). + /// + /// It returns Err(..) on DB error, or if the child block does not connect to the parent. + /// The caller should keep calling this until it gets Ok(None) + pub fn process_next_nakamoto_block<'a, T: BlockEventDispatcher>( + stacks_chain_state: &mut StacksChainState, + sort_tx: &mut SortitionHandleTx, + dispatcher_opt: Option<&'a T>, + ) -> Result, ChainstateError> { + let (mut chainstate_tx, clarity_instance) = stacks_chain_state.chainstate_tx_begin()?; + let Some((next_ready_block, block_size)) = + Self::next_ready_nakamoto_block(&chainstate_tx.tx)? + else { + // no more blocks + return Ok(None); + }; + + let block_id = next_ready_block.block_id(); + + // find corresponding snapshot + let next_ready_block_snapshot = SortitionDB::get_block_snapshot_consensus( + sort_tx, + &next_ready_block.header.consensus_hash, + )? + .expect(&format!( + "CORRUPTION: staging Nakamoto block {}/{} does not correspond to a burn block", + &next_ready_block.header.consensus_hash, + &next_ready_block.header.block_hash() + )); + + debug!("Process staging Nakamoto block"; + "consensus_hash" => %next_ready_block.header.consensus_hash, + "block_hash" => %next_ready_block.header.block_hash(), + "burn_block_hash" => %next_ready_block_snapshot.burn_header_hash + ); + + // find parent header + let Some(parent_header_info) = + Self::get_block_header(&chainstate_tx.tx, &next_ready_block.header.parent_block_id)? + else { + // no parent; cannot process yet + debug!("Cannot process Nakamoto block: missing parent header"; + "consensus_hash" => %next_ready_block.header.consensus_hash, + "block_hash" => %next_ready_block.header.block_hash(), + "parent_block_id" => %next_ready_block.header.parent_block_id ); - return ChainstateError::InvalidStacksBlock("Unrecoverable miner public key".into()); - })?; + return Ok(None); + }; - if sortdb - .expects_blocks_from_tenure(&recovered_miner_pk)? - .is_none() - { - let msg = format!("Received block, signed by {recovered_miner_pk:?}, but this pubkey was not associated with recent tenures"); - warn!("{}", msg); - return Err(ChainstateError::InvalidStacksBlock(msg)); + // sanity check -- must attach to parent + let parent_block_id = StacksBlockId::new( + &parent_header_info.consensus_hash, + &parent_header_info.anchored_header.block_hash(), + ); + if parent_block_id != next_ready_block.header.parent_block_id { + let msg = "Discontinuous Nakamoto Stacks block"; + warn!("{}", &msg; + "child parent_block_id" => %next_ready_block.header.parent_block_id, + "expected parent_block_id" => %parent_block_id + ); + let _ = Self::set_block_orphaned(&chainstate_tx.tx, &block_id); + chainstate_tx.commit()?; + return Err(ChainstateError::InvalidStacksBlock(msg.into())); + } + + // find commit and sortition burns if this is a tenure-start block + // TODO: store each *tenure* + let tenure_changed = if let Some(tenure_valid) = next_ready_block.tenure_changed() { + if !tenure_valid { + return Err(ChainstateError::InvalidStacksBlock( + "Invalid Nakamoto block: invalid tenure change tx(s)".into(), + )); + } + true + } else { + false }; - if !sortdb.expects_stacker_signature(&block.header.stacker_signature)? { - let msg = format!("Received block, signed by {recovered_miner_pk:?}, but the stacker signature does not match the active stacking cycle"); - warn!("{}", msg); - return Err(ChainstateError::InvalidStacksBlock(msg)); + let (commit_burn, sortition_burn) = if tenure_changed { + // find block-commit to get commit-burn + let block_commit = sort_tx + .get_block_commit( + &next_ready_block_snapshot.winning_block_txid, + &next_ready_block_snapshot.sortition_id, + )? + .expect("FATAL: no block-commit for tenure-start block"); + + let sort_burn = SortitionDB::get_block_burn_amount( + sort_tx.deref().deref(), + &next_ready_block_snapshot, + )?; + (block_commit.burn_fee, sort_burn) + } else { + (0, 0) + }; + + // attach the block to the chain state and calculate the next chain tip. + let pox_constants = sort_tx.context.pox_constants.clone(); + let (receipt, clarity_commit) = match NakamotoChainState::append_block( + &mut chainstate_tx, + clarity_instance, + sort_tx, + &pox_constants, + &parent_header_info, + &next_ready_block_snapshot.burn_header_hash, + next_ready_block_snapshot + .block_height + .try_into() + .expect("Failed to downcast u64 to u32"), + next_ready_block_snapshot.burn_header_timestamp, + &next_ready_block, + block_size, + commit_burn, + sortition_burn, + ) { + Ok(next_chain_tip_info) => next_chain_tip_info, + Err(e) => { + test_debug!( + "Failed to append {}/{}: {:?}", + &next_ready_block.header.consensus_hash, + &next_ready_block.header.block_hash(), + &e + ); + let _ = Self::set_block_orphaned(&chainstate_tx.tx, &block_id); + chainstate_tx.commit()?; + return Err(e); + } + }; + + assert_eq!( + receipt.header.anchored_header.block_hash(), + next_ready_block.header.block_hash() + ); + assert_eq!( + receipt.header.consensus_hash, + next_ready_block.header.consensus_hash + ); + + // set stacks block accepted + sort_tx.set_stacks_block_accepted( + &next_ready_block.header.consensus_hash, + &next_ready_block.header.block_hash(), + next_ready_block.header.chain_length, + )?; + + // announce the block, if we're connected to an event dispatcher + if let Some(dispatcher) = dispatcher_opt { + let block_event = ( + next_ready_block, + parent_header_info.anchored_header.block_hash(), + ) + .into(); + dispatcher.announce_block( + &block_event, + &receipt.header.clone(), + &receipt.tx_receipts, + &parent_block_id, + next_ready_block_snapshot.winning_block_txid, + &receipt.matured_rewards, + receipt.matured_rewards_info.as_ref(), + receipt.parent_burn_block_hash, + receipt.parent_burn_block_height, + receipt.parent_burn_block_timestamp, + &receipt.anchored_block_cost, + &receipt.parent_microblocks_cost, + &pox_constants, + ); } - let parent_block_id = - StacksBlockId::new(&block.header.parent_consensus_hash, &block.header.parent); + // this will panic if the Clarity commit fails. + clarity_commit.commit(); + chainstate_tx.commit() + .unwrap_or_else(|e| { + error!("Failed to commit chainstate transaction after committing Clarity block. The chainstate database is now corrupted."; + "error" => ?e); + panic!() + }); - // if the burnview of this block has been processed, then it - // is ready to be processed from the perspective of the - // burnchain - let burn_attachable = sortdb.processed_block(&block.header.burn_view)?; - // check if the parent Stacks Block ID has been processed. if so, then this block is stacks_attachable - let stacks_attachable = block.is_first_mined() || staging_db_tx.query_row( - "SELECT 1 FROM nakamoto_staging_blocks WHERE index_block_hash = ? AND processed = 1", - rusqlite::params![&parent_block_id], - |_row| Ok(()) - ).optional()?.is_some(); + Ok(Some(receipt)) + } + /// Validate that a Nakamoto block attaches to the burn chain state. + /// Called before inserting the block into the staging DB. + /// Wraps `NakamotoBlock::validate_against_burnchain()`, and + /// verifies that all transactions in the block are allowed in this epoch. + pub fn validate_nakamoto_block_burnchain( + db_handle: &SortitionHandleConn, + block: &NakamotoBlock, + mainnet: bool, + chain_id: u32, + ) -> Result<(), ChainstateError> { + // find the sortition-winning block commit for this block, as well as the block snapshot + // containing the parent block-commit let block_hash = block.header.block_hash(); - let block_id = StacksBlockId::new(&block.header.consensus_hash, &block_hash); + let consensus_hash = &block.header.consensus_hash; + + // burn chain tip that selected this commit's block + let Some(burn_chain_tip) = + SortitionDB::get_block_snapshot_consensus(db_handle, &consensus_hash)? + else { + warn!("No sortition for {}", &consensus_hash); + return Err(ChainstateError::InvalidStacksBlock( + "No sortition for block's consensus hash".into(), + )); + }; + + // the block-commit itself + let Some(block_commit) = db_handle.get_block_commit_by_txid( + &burn_chain_tip.sortition_id, + &burn_chain_tip.winning_block_txid, + )? + else { + warn!( + "No block commit for {} in sortition for {}", + &burn_chain_tip.winning_block_txid, &consensus_hash + ); + return Err(ChainstateError::InvalidStacksBlock( + "No block-commit in sortition for block's consensus hash".into(), + )); + }; + + // key register of the winning miner + let leader_key = db_handle + .get_leader_key_at( + u64::from(block_commit.key_block_ptr), + u32::from(block_commit.key_vtxindex), + )? + .expect("FATAL: have block commit but no leader key"); + + // attaches to burn chain + if let Err(e) = block.validate_against_burnchain(&burn_chain_tip, &leader_key) { + warn!( + "Invalid Nakamoto block, could not validate on burnchain"; + "consensus_hash" => %consensus_hash, + "block_hash" => %block_hash, + "error" => format!("{:?}", &e) + ); + + return Err(e); + } + + // check the _next_ block's tenure, since when Nakamoto's miner activates, the current chain tip + // will be in epoch 2.5 (the next block will be epoch 3.0) + let cur_epoch = + SortitionDB::get_stacks_epoch(db_handle.deref(), burn_chain_tip.block_height + 1)? + .expect("FATAL: no epoch defined for current Stacks block"); + + // static checks on transactions all pass + let valid = block.validate_transactions_static(mainnet, chain_id, cur_epoch.epoch_id); + if !valid { + warn!( + "Invalid Nakamoto block, transactions failed static checks: {}/{} (epoch {})", + consensus_hash, block_hash, cur_epoch.epoch_id + ); + return Err(ChainstateError::InvalidStacksBlock( + "Invalid Nakamoto block: failed static transaction checks".into(), + )); + } + + Ok(()) + } + + /// Insert a Nakamoto block into the staging blocks DB + pub(crate) fn store_block( + staging_db_tx: &rusqlite::Transaction, + block: NakamotoBlock, + burn_attachable: bool, + stacks_attachable: bool, + ) -> Result<(), ChainstateError> { + let block_id = block.block_id(); staging_db_tx.execute( "INSERT INTO nakamoto_staging_blocks ( block_hash, consensus_hash, - burn_view, parent_block_id, burn_attachable, stacks_attachable, @@ -558,12 +1290,12 @@ impl NakamotoChainState { download_time, arrival_time, processed_time, - data ) VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9, ?10, ?11, ?12, ?13, ?14)", + data + ) VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9, ?10, ?11, ?12, ?13)", params![ - &block_hash, + &block.header.block_hash(), &block.header.consensus_hash, - &block.header.burn_view, - &parent_block_id, + &block.header.parent_block_id, if burn_attachable { 1 } else { 0 }, if stacks_attachable { 1 } else { 0 }, 0, @@ -576,10 +1308,93 @@ impl NakamotoChainState { block.serialize_to_vec(), ], )?; - Ok(()) } + /// Accept a Nakamoto block into the staging blocks DB. + /// Fails if: + /// * the public key cannot be recovered from the miner's signature + /// * the stackers during the tenure didn't sign it + /// * a DB error occurs + /// Does nothing if: + /// * we already have the block + /// Returns true if we stored the block; false if not. + pub fn accept_block( + config: &ChainstateConfig, + block: NakamotoBlock, + sortdb: &SortitionHandleConn, + staging_db_tx: &rusqlite::Transaction, + ) -> Result { + // do nothing if we already have this block + if let Some(_) = Self::get_block_header(&staging_db_tx, &block.header.block_id())? { + debug!("Already have block {}", &block.header.block_id()); + return Ok(false); + } + + // if this is the first tenure block, then make sure it's well-formed + if let Some(false) = block.is_wellformed_first_tenure_block() { + warn!( + "Block {} is not a well-formed first tenure block", + &block.block_id() + ); + return Err(ChainstateError::InvalidStacksBlock( + "Not a well-formed first block".into(), + )); + } + + // this block must be consistent with its miner's leader-key and block-commit, and must + // contain only transactions that are valid in this epoch. + if let Err(e) = + Self::validate_nakamoto_block_burnchain(sortdb, &block, config.mainnet, config.chain_id) + { + warn!("Unacceptable Nakamoto block; will not store"; + "block_id" => %block.block_id(), + "error" => format!("{:?}", &e) + ); + return Ok(false); + }; + + if !sortdb.expects_stacker_signature( + &block.header.consensus_hash, + &block.header.stacker_signature, + )? { + let msg = format!("Received block, but the stacker signature does not match the active stacking cycle"); + warn!("{}", msg); + return Err(ChainstateError::InvalidStacksBlock(msg)); + } + + // if the burnchain block of this Stacks block's tenure has been processed, then it + // is ready to be processed from the perspective of the burnchain + let burn_attachable = sortdb.processed_block(&block.header.consensus_hash)?; + + // check if the parent Stacks Block ID has been processed. if so, then this block is stacks_attachable + let stacks_attachable = + // block is the first-ever mined (test only) + block.is_first_mined() + // block attaches to a processed nakamoto block + || staging_db_tx.query_row( + "SELECT 1 FROM nakamoto_staging_blocks WHERE index_block_hash = ? AND processed = 1 AND orphaned = 0", + rusqlite::params![&block.header.parent_block_id], + |_row| Ok(()) + ).optional()?.is_some() + // block attaches to a Stacks epoch 2.x block, and there are no nakamoto blocks at all + || ( + staging_db_tx.query_row( + "SELECT 1 FROM block_headers WHERE index_block_hash = ?", + rusqlite::params![&block.header.parent_block_id], + |_row| Ok(()) + ).optional()?.is_some() + && staging_db_tx.query_row( + "SELECT 1 FROM nakamoto_block_headers LIMIT 1", + rusqlite::NO_PARAMS, + |_row| Ok(()) + ).optional()?.is_none() + ); + + Self::store_block(staging_db_tx, block, burn_attachable, stacks_attachable)?; + Ok(true) + } + /// Create the block reward for a NakamotoBlock /// `coinbase_reward_ustx` is the total coinbase reward for this block, including any /// accumulated rewards from missed sortitions or initial mining rewards. @@ -589,21 +1404,21 @@ impl NakamotoChainState { parent_block_hash: &BlockHeaderHash, parent_consensus_hash: &ConsensusHash, block_hash: &BlockHeaderHash, - coinbase_tx: &StacksTransaction, block_consensus_hash: &ConsensusHash, block_height: u64, + coinbase_tx: &StacksTransaction, parent_fees: u128, burnchain_commit_burn: u64, burnchain_sortition_burn: u64, coinbase_reward_ustx: u128, - ) -> Result { + ) -> MinerPaymentSchedule { let miner_auth = coinbase_tx.get_origin(); let miner_addr = miner_auth.get_address(mainnet); let recipient = if epoch_id >= StacksEpochId::Epoch21 { // pay to tx-designated recipient, or if there is none, pay to the origin match coinbase_tx.try_as_coinbase() { - Some((_, recipient_opt)) => recipient_opt + Some((_, recipient_opt, _)) => recipient_opt .cloned() .unwrap_or(miner_addr.to_account_principal()), None => miner_addr.to_account_principal(), @@ -632,17 +1447,18 @@ impl NakamotoChainState { vtxindex: 0, }; - Ok(miner_reward) + miner_reward } /// Return the total ExecutionCost consumed during the tenure up to and including /// `block` pub fn get_total_tenure_cost_at( - conn: &Connection, + chainstate_conn: &Connection, block: &StacksBlockId, ) -> Result, ChainstateError> { let qry = "SELECT total_tenure_cost FROM nakamoto_block_headers WHERE index_block_hash = ?"; - conn.query_row(qry, &[block], |row| row.get(0)) + chainstate_conn + .query_row(qry, &[block], |row| row.get(0)) .optional() .map_err(ChainstateError::from) } @@ -650,21 +1466,22 @@ impl NakamotoChainState { /// Return the total transactions fees during the tenure up to and including /// `block` pub fn get_total_tenure_tx_fees_at( - conn: &Connection, + chainstate_conn: &Connection, block: &StacksBlockId, ) -> Result, ChainstateError> { let qry = "SELECT tenure_tx_fees FROM nakamoto_block_headers WHERE index_block_hash = ?"; - let tx_fees_str: Option = - conn.query_row(qry, &[block], |row| row.get(0)).optional()?; + let tx_fees_str: Option = chainstate_conn + .query_row(qry, &[block], |row| row.get(0)) + .optional()?; tx_fees_str .map(|x| x.parse()) .transpose() .map_err(|_| ChainstateError::DBError(DBError::ParseError)) } - /// Return a Nakamoto StacksHeaderInfo at a given tenure height in the fork identified by `tip_index_hash` - /// Prior to Nakamoto, `tenure_height` is equivalent to stacks block height. - /// This returns the first Stacks block header in the tenure. + /// Return a Nakamoto StacksHeaderInfo at a given tenure height in the fork identified by `tip_index_hash`. + /// * For Stacks 2.x, this is the Stacks block's header + /// * For Stacks 3.x (Nakamoto), this is the first block in the miner's tenure. pub fn get_header_by_tenure_height( tx: &mut StacksDBTx, tip_index_hash: &StacksBlockId, @@ -699,7 +1516,7 @@ impl NakamotoChainState { for candidate in candidate_headers.into_iter() { let Ok(Some(ancestor_at_height)) = - tx.get_ancestor_block_hash(tenure_height, tip_index_hash) + tx.get_ancestor_block_hash(candidate.stacks_block_height, tip_index_hash) else { // if there's an error or no result, this candidate doesn't match, so try next candidate continue; @@ -719,11 +1536,11 @@ impl NakamotoChainState { /// in the single Bitcoin-anchored Stacks block they produce, as /// well as the microblock stream they append to it. pub fn get_tenure_height( - conn: &Connection, + chainstate_conn: &Connection, block: &StacksBlockId, ) -> Result, ChainstateError> { let nak_qry = "SELECT tenure_height FROM nakamoto_block_headers WHERE index_block_hash = ?"; - let opt_height: Option = conn + let opt_height: Option = chainstate_conn .query_row(nak_qry, &[block], |row| row.get(0)) .optional()?; if let Some(height) = opt_height { @@ -733,7 +1550,7 @@ impl NakamotoChainState { } let epoch_2_qry = "SELECT block_height FROM block_headers WHERE index_block_hash = ?"; - let opt_height: Option = conn + let opt_height: Option = chainstate_conn .query_row(epoch_2_qry, &[block], |row| row.get(0)) .optional()?; opt_height @@ -744,11 +1561,11 @@ impl NakamotoChainState { /// Load block header (either Epoch-2 rules or Nakamoto) by `index_block_hash` pub fn get_block_header( - conn: &Connection, + chainstate_conn: &Connection, index_block_hash: &StacksBlockId, ) -> Result, ChainstateError> { let sql = "SELECT * FROM nakamoto_block_headers WHERE index_block_hash = ?1"; - let result = query_row_panic(conn, sql, &[&index_block_hash], || { + let result = query_row_panic(chainstate_conn, sql, &[&index_block_hash], || { "FATAL: multiple rows for the same block hash".to_string() })?; if result.is_some() { @@ -756,23 +1573,225 @@ impl NakamotoChainState { } let sql = "SELECT * FROM block_headers WHERE index_block_hash = ?1"; - let result = query_row_panic(conn, sql, &[&index_block_hash], || { + let result = query_row_panic(chainstate_conn, sql, &[&index_block_hash], || { "FATAL: multiple rows for the same block hash".to_string() })?; Ok(result) } + /// Load the canonical Stacks block header (either epoch-2 rules or Nakamoto) + pub fn get_canonical_block_header( + chainstate_conn: &Connection, + sortdb: &SortitionDB, + ) -> Result, ChainstateError> { + let (consensus_hash, block_hash) = + SortitionDB::get_canonical_stacks_chain_tip_hash(sortdb.conn())?; + Self::get_block_header( + chainstate_conn, + &StacksBlockId::new(&consensus_hash, &block_hash), + ) + } + + /// Get the tenure-start block header of a given consensus hash. + /// It might be an epoch 2.x block header + pub fn get_block_header_by_consensus_hash( + chainstate_conn: &Connection, + consensus_hash: &ConsensusHash, + ) -> Result, ChainstateError> { + let nakamoto_header_info = + Self::get_nakamoto_tenure_start_block_header(chainstate_conn, consensus_hash)?; + if nakamoto_header_info.is_some() { + return Ok(nakamoto_header_info); + } + + // parent might be epoch 2 + let epoch2_header_info = StacksChainState::get_stacks_block_header_info_by_consensus_hash( + chainstate_conn, + consensus_hash, + )?; + Ok(epoch2_header_info) + } + + /// Get the VRF proof for a Stacks block. + /// This works for either Nakamoto or epoch 2.x + pub fn get_block_vrf_proof( + chainstate_conn: &Connection, + consensus_hash: &ConsensusHash, + ) -> Result, ChainstateError> { + let Some(start_header) = NakamotoChainState::get_block_header_by_consensus_hash( + chainstate_conn, + consensus_hash, + )? + else { + return Ok(None); + }; + + let vrf_proof = match start_header.anchored_header { + StacksBlockHeaderTypes::Epoch2(epoch2_header) => Some(epoch2_header.proof), + StacksBlockHeaderTypes::Nakamoto(..) => { + NakamotoChainState::get_nakamoto_tenure_vrf_proof(chainstate_conn, consensus_hash)? + } + }; + + Ok(vrf_proof) + } + + /// Get the VRF proof of the parent tenure (either Nakamoto or epoch 2.x) of the block + /// identified by the given consensus hash. + /// The parent must already have been processed. + /// + /// `consensus_hash` identifies the child block. + /// `block_commit_txid` identifies the child block's tenure's block-commit tx + /// + /// Returns the proof of this block's parent tenure on success. + /// + /// Returns InvalidStacksBlock if the sortition for `consensus_hash` does not exist, or if its + /// parent sortition doesn't exist (i.e. the sortition DB is missing something) + /// + /// Returns NoSuchBlockError if the block header for `consensus_hash` does not exist, or if the + /// parent block header info does not exist (i.e. the chainstate DB is missing something) + pub fn get_parent_vrf_proof( + chainstate_conn: &Connection, + sortdb_conn: &Connection, + consensus_hash: &ConsensusHash, + block_commit_txid: &Txid, + ) -> Result { + let sn = SortitionDB::get_block_snapshot_consensus(sortdb_conn, consensus_hash)?.ok_or( + ChainstateError::InvalidStacksBlock("No sortition for consensus hash".into()), + )?; + + let parent_sortition_id = SortitionDB::get_block_commit_parent_sortition_id( + sortdb_conn, + &block_commit_txid, + &sn.sortition_id, + )? + .ok_or(ChainstateError::InvalidStacksBlock( + "Parent block-commit is not in this block's sortition history".into(), + ))?; + + let parent_sn = SortitionDB::get_block_snapshot(sortdb_conn, &parent_sortition_id)?.ok_or( + ChainstateError::InvalidStacksBlock( + "Parent block-commit does not have a sortition".into(), + ), + )?; + + let parent_vrf_proof = + Self::get_block_vrf_proof(chainstate_conn, &parent_sn.consensus_hash)? + .ok_or(ChainstateError::NoSuchBlockError) + .map_err(|e| { + warn!("Nakamoto block has no parent"; + "block consensus_hash" => %consensus_hash); + e + })?; + + Ok(parent_vrf_proof) + } + + /// Get the first block header in a Nakamoto tenure + pub fn get_nakamoto_tenure_start_block_header( + chainstate_conn: &Connection, + consensus_hash: &ConsensusHash, + ) -> Result, ChainstateError> { + let sql = "SELECT * FROM nakamoto_block_headers WHERE consensus_hash = ?1 ORDER BY block_height ASC LIMIT 1"; + query_row_panic(chainstate_conn, sql, &[&consensus_hash], || { + "FATAL: multiple rows for the same consensus hash".to_string() + }) + .map_err(ChainstateError::DBError) + } + + /// Get the last block header in a Nakamoto tenure + pub fn get_nakamoto_tenure_finish_block_header( + chainstate_conn: &Connection, + consensus_hash: &ConsensusHash, + ) -> Result, ChainstateError> { + let sql = "SELECT * FROM nakamoto_block_headers WHERE consensus_hash = ?1 ORDER BY block_height DESC LIMIT 1"; + query_row_panic(chainstate_conn, sql, &[&consensus_hash], || { + "FATAL: multiple rows for the same consensus hash".to_string() + }) + .map_err(ChainstateError::DBError) + } + + /// Get the status of a Nakamoto block. + /// Returns Some(accepted?, orphaned?) on success + /// Returns None if there's no such block + /// Returns Err on DBError + pub fn get_nakamoto_block_status( + staging_blocks_conn: &Connection, + consensus_hash: &ConsensusHash, + block_hash: &BlockHeaderHash, + ) -> Result, ChainstateError> { + let sql = "SELECT processed, orphaned FROM nakamoto_staging_blocks WHERE consensus_hash = ?1 AND block_hash = ?2"; + let args: &[&dyn ToSql] = &[consensus_hash, block_hash]; + Ok(query_row_panic(staging_blocks_conn, sql, args, || { + "FATAL: multiple rows for the same consensus hash and block hash".to_string() + }) + .map_err(ChainstateError::DBError)? + .map(|(processed, orphaned): (u32, u32)| (processed != 0, orphaned != 0))) + } + + /// Get the VRF proof for a Nakamoto block, if it exists. + /// Returns None if the Nakamoto block's VRF proof is not found (e.g. because there is no + /// Nakamoto block) + pub fn get_nakamoto_tenure_vrf_proof( + chainstate_conn: &Connection, + consensus_hash: &ConsensusHash, + ) -> Result, ChainstateError> { + let sql = "SELECT vrf_proof FROM nakamoto_block_headers WHERE consensus_hash = ?1 AND tenure_changed = 1"; + let args: &[&dyn ToSql] = &[consensus_hash]; + let proof_bytes: Option = query_row(chainstate_conn, sql, args)?; + if let Some(bytes) = proof_bytes { + let proof = VRFProof::from_hex(&bytes) + .ok_or(DBError::Corruption) + .map_err(|e| { + warn!("Failed to load VRF proof: could not decode"; + "vrf_proof" => %bytes, + "consensus_hash" => %consensus_hash + ); + e + })?; + Ok(Some(proof)) + } else { + Ok(None) + } + } + + /// Verify that a nakamoto block's block-commit's VRF seed is consistent with the VRF proof + fn check_block_commit_vrf_seed( + chainstate_conn: &Connection, + sortdb_conn: &Connection, + block: &NakamotoBlock, + ) -> Result<(), ChainstateError> { + // get the block-commit for this block + let sn = + SortitionDB::get_block_snapshot_consensus(sortdb_conn, &block.header.consensus_hash)? + .ok_or(ChainstateError::NoSuchBlockError) + .map_err(|e| { + warn!("No block-commit for block"; "block_id" => %block.block_id()); + e + })?; + + let block_commit = + get_block_commit_by_txid(sortdb_conn, &sn.sortition_id, &sn.winning_block_txid)? + .ok_or(ChainstateError::NoSuchBlockError) + .map_err(|e| { + warn!("No block-commit for block"; "block_id" => %block.block_id()); + e + })?; + + block.validate_vrf_seed(sortdb_conn, chainstate_conn, &block_commit) + } + /// Insert a nakamoto block header that is paired with an /// already-existing block commit and snapshot /// /// `header` should be a pointer to the header in `tip_info`. - pub fn insert_stacks_block_header( - tx: &Connection, - parent_id: &StacksBlockId, + pub(crate) fn insert_stacks_block_header( + chainstate_tx: &Connection, tip_info: &StacksHeaderInfo, header: &NakamotoBlockHeader, - anchored_block_cost: &ExecutionCost, + vrf_proof: Option<&VRFProof>, + block_cost: &ExecutionCost, total_tenure_cost: &ExecutionCost, tenure_height: u64, tenure_changed: bool, @@ -795,11 +1814,12 @@ impl NakamotoChainState { let block_hash = header.block_hash(); - let index_block_hash = - StacksBlockHeader::make_index_block_hash(&consensus_hash, &block_hash); + let index_block_hash = StacksBlockId::new(&consensus_hash, &block_hash); assert!(*stacks_block_height < u64::try_from(i64::MAX).unwrap()); + let vrf_proof_bytes = vrf_proof.map(|proof| proof.to_hex()); + let args: &[&dyn ToSql] = &[ &u64_to_sql(*stacks_block_height)?, &index_root, @@ -812,32 +1832,30 @@ impl NakamotoChainState { &header.version, &u64_to_sql(header.chain_length)?, &u64_to_sql(header.burn_spent)?, - &header.parent, - &header.parent_consensus_hash, - &header.burn_view, &header.miner_signature, &header.stacker_signature, &header.tx_merkle_root, &header.state_index_root, &block_hash, &index_block_hash, - anchored_block_cost, + block_cost, total_tenure_cost, &tenure_tx_fees.to_string(), - parent_id, + &header.parent_block_id, &u64_to_sql(tenure_height)?, if tenure_changed { &1i64 } else { &0 }, + &vrf_proof_bytes.as_ref(), ]; - tx.execute( + chainstate_tx.execute( "INSERT INTO nakamoto_block_headers (block_height, index_root, consensus_hash, burn_header_hash, burn_header_height, burn_header_timestamp, block_size, header_type, - version, chain_length, burn_spent, parent, parent_consensus_hash, - burn_view, miner_signature, stacker_signature, tx_merkle_root, state_index_root, + version, chain_length, burn_spent, + miner_signature, stacker_signature, tx_merkle_root, state_index_root, block_hash, index_block_hash, @@ -846,8 +1864,9 @@ impl NakamotoChainState { tenure_tx_fees, parent_block_id, tenure_height, - tenure_changed) - VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9, ?10, ?11, ?12, ?13, ?14, ?15, ?16, ?17, ?18, ?19, ?20, ?21, ?22, ?23, ?24, ?25, ?26)", + tenure_changed, + vrf_proof) + VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9, ?10, ?11, ?12, ?13, ?14, ?15, ?16, ?17, ?18, ?19, ?20, ?21, ?22, ?23, ?24)", args )?; @@ -856,19 +1875,20 @@ impl NakamotoChainState { /// Append a Stacks block to an existing Stacks block, and grant the miner the block reward. /// Return the new Stacks header info. - pub fn advance_tip( + fn advance_tip( headers_tx: &mut StacksDBTx, parent_tip: &StacksBlockHeaderTypes, parent_consensus_hash: &ConsensusHash, new_tip: &NakamotoBlockHeader, + new_vrf_proof: Option<&VRFProof>, new_burn_header_hash: &BurnchainHeaderHash, new_burnchain_height: u32, new_burnchain_timestamp: u64, block_reward: Option<&MinerPaymentSchedule>, - mature_miner_payouts: Option<(MinerReward, Vec, MinerReward, MinerRewardInfo)>, // (miner, [users], parent, matured rewards) + mature_miner_payouts: Option<(MinerReward, MinerReward, MinerRewardInfo)>, // (miner, parent, matured rewards) anchor_block_cost: &ExecutionCost, total_tenure_cost: &ExecutionCost, - anchor_block_size: u64, + block_size: u64, applied_epoch_transition: bool, burn_stack_stx_ops: Vec, burn_transfer_stx_ops: Vec, @@ -877,10 +1897,22 @@ impl NakamotoChainState { tenure_changed: bool, block_fees: u128, ) -> Result { - if new_tip.parent != FIRST_STACKS_BLOCK_HASH { + if new_tip.parent_block_id + != StacksBlockId::new(&FIRST_BURNCHAIN_CONSENSUS_HASH, &FIRST_STACKS_BLOCK_HASH) + { // not the first-ever block, so linkage must occur - assert_eq!(new_tip.parent, parent_tip.block_hash()); - assert_eq!(&new_tip.parent_consensus_hash, parent_consensus_hash); + match parent_tip { + StacksBlockHeaderTypes::Epoch2(..) => { + assert_eq!( + new_tip.parent_block_id, + StacksBlockId::new(&parent_consensus_hash, &parent_tip.block_hash()) + ); + } + StacksBlockHeaderTypes::Nakamoto(nakamoto_header) => { + // nakamoto blocks link to their parent via index block hashes + assert_eq!(new_tip.parent_block_id, nakamoto_header.block_id()); + } + } } assert_eq!( @@ -891,9 +1923,9 @@ impl NakamotoChainState { new_tip.chain_length ); - let parent_hash = StacksBlockId::new(parent_consensus_hash, &parent_tip.block_hash()); + let parent_hash = new_tip.parent_block_id.clone(); let new_block_hash = new_tip.block_hash(); - let index_block_hash = StacksBlockId::new(&new_tip.consensus_hash, &new_block_hash); + let index_block_hash = new_tip.block_id(); // store each indexed field test_debug!("Headers index_put_begin {parent_hash}-{index_block_hash}"); @@ -910,7 +1942,7 @@ impl NakamotoChainState { burn_header_hash: new_burn_header_hash.clone(), burn_header_height: new_burnchain_height, burn_header_timestamp: new_burnchain_timestamp, - anchored_block_size: anchor_block_size, + anchored_block_size: block_size, }; let tenure_fees = block_fees @@ -929,9 +1961,9 @@ impl NakamotoChainState { Self::insert_stacks_block_header( headers_tx.deref_mut(), - &parent_hash, &new_tip_info, &new_tip, + new_vrf_proof, anchor_block_cost, total_tenure_cost, tenure_height, @@ -953,8 +1985,7 @@ impl NakamotoChainState { burn_delegate_stx_ops, )?; - if let Some((miner_payout, user_payouts, parent_payout, reward_info)) = mature_miner_payouts - { + if let Some((miner_payout, parent_payout, reward_info)) = mature_miner_payouts { let rewarded_miner_block_id = StacksBlockId::new( &reward_info.from_block_consensus_hash, &reward_info.from_stacks_block_hash, @@ -970,14 +2001,6 @@ impl NakamotoChainState { &rewarded_miner_block_id, &miner_payout, )?; - for user_payout in user_payouts.into_iter() { - StacksChainState::insert_matured_child_user_reward( - headers_tx.deref_mut(), - &rewarded_parent_miner_block_id, - &rewarded_miner_block_id, - &user_payout, - )?; - } StacksChainState::insert_matured_parent_miner_reward( headers_tx.deref_mut(), &rewarded_parent_miner_block_id, @@ -1007,15 +2030,15 @@ impl NakamotoChainState { /// Returns stx lockup events. pub fn finish_block( clarity_tx: &mut ClarityTx, - miner_payouts: Option<&(MinerReward, Vec, MinerReward, MinerRewardInfo)>, + miner_payouts: Option<&(MinerReward, MinerReward, MinerRewardInfo)>, ) -> Result, ChainstateError> { // add miner payments - if let Some((ref miner_reward, ref user_rewards, ref parent_reward, _)) = miner_payouts { + if let Some((ref miner_reward, ref parent_reward, _)) = miner_payouts { // grant in order by miner, then users let matured_ustx = StacksChainState::process_matured_miner_rewards( clarity_tx, miner_reward, - user_rewards, + &[], parent_reward, )?; @@ -1030,33 +2053,46 @@ impl NakamotoChainState { Ok(lockup_events) } + /// Begin block-processing and return all of the pre-processed state within a + /// `SetupBlockResult`. + /// + /// * Find the matured miner rewards that must be applied in this block + /// * Begin the Clarity transaction + /// * Load up the tenure's execution cost thus far + /// * Apply an epoch transition, if necessary + /// * Handle auto-unlock for PoX + /// * Process any new Stacks-on-Bitcoin transactions + /// /// Called in both follower and miner block assembly paths. + /// Arguments: + /// * chainstate_tx: transaction against the chainstate MARF + /// * clarity_instance: connection to the chainstate Clarity instance + /// * sortition_dbconn: connection to the sortition DB MARF + /// * pox_constants: PoX parameters + /// * parent_consensus_hash, parent_header_hash, parent_stacks_height, parent_burn_height: + /// pointer to the already-processed parent Stacks block + /// * burn_header_hash, burn_header_height: pointer to the Bitcoin block that identifies the + /// tenure of this block to be processed + /// * mainnet: whether or not we're in mainnet + /// * tenure_chainged: whether or not this block represents a tenure change + /// * tenure_height: the number of tenures that this block confirms /// /// Returns clarity_tx, list of receipts, microblock execution cost, /// microblock fees, microblock burns, list of microblock tx receipts, /// miner rewards tuples, the stacks epoch id, and a boolean that /// represents whether the epoch transition has been applied. - /// - /// The `burn_dbconn`, `sortition_dbconn`, and `conn` arguments - /// all reference the same sortition database through different - /// interfaces. `burn_dbconn` and `sortition_dbconn` should - /// reference the same object. The reason to provide both is that - /// `SortitionDBRef` captures trait functions that Clarity does - /// not need, and Rust does not support trait upcasting (even - /// though it would theoretically be safe). pub fn setup_block<'a, 'b>( chainstate_tx: &'b mut ChainstateTx, clarity_instance: &'a mut ClarityInstance, sortition_dbconn: &'b dyn SortitionDBRef, pox_constants: &PoxConstants, - burn_view: BurnchainHeaderHash, - burn_view_height: u32, parent_consensus_hash: ConsensusHash, parent_header_hash: BlockHeaderHash, parent_stacks_height: u64, parent_burn_height: u32, + burn_header_hash: BurnchainHeaderHash, + burn_header_height: u32, mainnet: bool, - miner_id_opt: Option, tenure_changed: bool, tenure_height: u64, ) -> Result, ChainstateError> { @@ -1117,13 +2153,14 @@ impl NakamotoChainState { } }; + // TODO: only need to do this if this is a tenure-start block let (stacking_burn_ops, transfer_burn_ops, delegate_burn_ops) = StacksChainState::get_stacking_and_transfer_and_delegate_burn_ops( chainstate_tx, &parent_index_hash, sortition_dbconn.sqlite_conn(), - &burn_view, - burn_view_height.into(), + &burn_header_hash, + burn_header_height.into(), )?; let mut clarity_tx = StacksChainState::chainstate_block_begin( @@ -1147,17 +2184,16 @@ impl NakamotoChainState { ) }); let matured_miner_rewards_opt = match matured_miner_rewards_result { - Some(Ok(x)) => x, + Some(Ok(Some((miner, _user_burns, parent, reward_info)))) => { + Some((miner, parent, reward_info)) + } + Some(Ok(None)) => None, Some(Err(e)) => { - if miner_id_opt.is_some() { - return Err(e); - } else { - let msg = format!("Failed to load miner rewards: {:?}", &e); - warn!("{}", &msg); + let msg = format!("Failed to load miner rewards: {:?}", &e); + warn!("{}", &msg); - clarity_tx.rollback_block(); - return Err(ChainstateError::InvalidStacksBlock(msg)); - } + clarity_tx.rollback_block(); + return Err(ChainstateError::InvalidStacksBlock(msg)); } None => None, }; @@ -1181,7 +2217,7 @@ impl NakamotoChainState { // is this stacks block the first of a new epoch? let (applied_epoch_transition, mut tx_receipts) = - StacksChainState::process_epoch_transition(&mut clarity_tx, burn_view_height)?; + StacksChainState::process_epoch_transition(&mut clarity_tx, burn_header_height)?; debug!( "Setup block: Processed epoch transition"; @@ -1193,7 +2229,7 @@ impl NakamotoChainState { let auto_unlock_events = if evaluated_epoch >= StacksEpochId::Epoch21 { let unlock_events = StacksChainState::check_and_handle_reward_start( - burn_view_height.into(), + burn_header_height.into(), sortition_dbconn.as_burn_state_db(), sortition_dbconn, &mut clarity_tx, @@ -1210,7 +2246,7 @@ impl NakamotoChainState { vec![] }; - let active_pox_contract = pox_constants.active_pox_contract(burn_view_height.into()); + let active_pox_contract = pox_constants.active_pox_contract(burn_header_height.into()); // process stacking & transfer operations from burnchain ops tx_receipts.extend(StacksChainState::process_stacking_ops( @@ -1263,6 +2299,7 @@ impl NakamotoChainState { }) } + /// Append a Nakamoto Stacks block to the Stacks chain state. pub fn append_block<'a>( chainstate_tx: &mut ChainstateTx, clarity_instance: &'a mut ClarityInstance, @@ -1284,7 +2321,6 @@ impl NakamotoChainState { ); let ast_rules = ASTRules::PrecheckSize; - let mainnet = chainstate_tx.get_config().mainnet; let next_block_height = block.header.chain_length; @@ -1300,35 +2336,57 @@ impl NakamotoChainState { ) }; - if parent_ch != block.header.parent_consensus_hash { + let parent_block_id = StacksChainState::get_index_hash(&parent_ch, &parent_block_hash); + if parent_block_id != block.header.parent_block_id { warn!("Error processing nakamoto block: Parent consensus hash does not match db view"; - "db_view" => %parent_ch, - "block_view" => %block.header.parent_consensus_hash); + "db.parent_block_id" => %parent_block_id, + "header.parent_block_id" => %block.header.parent_block_id); return Err(ChainstateError::InvalidStacksBlock( - "Parent consensus hash does not match".into(), + "Parent block does not match".into(), )); } - // check that the burnchain block that this block is associated with has been processed - let burn_view_hash = block.header.burn_view.clone(); + // check that the burnchain block that this block is associated with has been processed. + // N.B. we must first get its hash, and then verify that it's in the same Bitcoin fork as + // our `burn_dbconn` indicates. + let burn_header_hash = SortitionDB::get_burnchain_header_hash_by_consensus( + burn_dbconn, + &block.header.consensus_hash, + )? + .ok_or_else(|| { + warn!( + "Unrecognized consensus hash"; + "block_hash" => %block.header.block_hash(), + "consensus_hash" => %block.header.consensus_hash, + ); + ChainstateError::NoSuchBlockError + })?; + let sortition_tip = burn_dbconn.context.chain_tip.clone(); - let burn_view_height = burn_dbconn - .get_block_snapshot(&burn_view_hash, &sortition_tip)? + let burn_header_height = burn_dbconn + .get_block_snapshot(&burn_header_hash, &sortition_tip)? .ok_or_else(|| { warn!( "Tried to process Nakamoto block before its burn view was processed"; "block_hash" => block.header.block_hash(), - "burn_view" => %burn_view_hash, + "burn_header_hash" => %burn_header_hash, ); ChainstateError::NoSuchBlockError })? .block_height; let block_hash = block.header.block_hash(); + let tenure_changed = if let Some(tenures_valid) = block.tenure_changed() { + if !tenures_valid { + return Err(ChainstateError::InvalidStacksBlock( + "Invalid tenure changes in nakamoto block".into(), + )); + } + true + } else { + false + }; - let parent_block_id = StacksChainState::get_index_hash(&parent_ch, &parent_block_hash); - - let tenure_changed = block.tenure_changed(&parent_block_id); if !tenure_changed && (block.is_first_mined() || parent_ch != block.header.consensus_hash) { return Err(ChainstateError::ExpectedTenureChange); } @@ -1348,11 +2406,28 @@ impl NakamotoChainState { }; let tenure_height = if tenure_changed { + // TODO: this should be + ${num_tenures_passed_since_parent} parent_tenure_height + 1 } else { parent_tenure_height }; + // verify VRF proof, if present + // only need to do this once per tenure + // get the resulting vrf proof bytes + let vrf_proof_opt = if tenure_changed { + Self::check_block_commit_vrf_seed(chainstate_tx.deref(), burn_dbconn, block)?; + Some( + block + .get_vrf_proof() + .ok_or(ChainstateError::InvalidStacksBlock( + "Invalid Nakamoto block: has coinbase but no VRF proof".into(), + ))?, + ) + } else { + None + }; + let SetupBlockResult { mut clarity_tx, mut tx_receipts, @@ -1361,23 +2436,22 @@ impl NakamotoChainState { applied_epoch_transition, burn_stack_stx_ops, burn_transfer_stx_ops, - mut auto_unlock_events, burn_delegate_stx_ops, + mut auto_unlock_events, } = Self::setup_block( chainstate_tx, clarity_instance, burn_dbconn, pox_constants, - burn_view_hash, - burn_view_height.try_into().map_err(|_| { - ChainstateError::InvalidStacksBlock("Burn block height exceeded u32".into()) - })?, parent_ch, parent_block_hash, parent_chain_tip.stacks_block_height, parent_chain_tip.burn_header_height, + burn_header_hash, + burn_header_height.try_into().map_err(|_| { + ChainstateError::InvalidStacksBlock("Burn block height exceeded u32".into()) + })?, mainnet, - None, tenure_changed, tenure_height, )?; @@ -1387,7 +2461,7 @@ impl NakamotoChainState { debug!( "Append nakamoto block"; "block" => format!("{}/{block_hash}", block.header.consensus_hash), - "parent_block" => format!("{parent_ch}/{parent_block_hash}"), + "parent_block" => %block.header.parent_block_id, "stacks_height" => next_block_height, "total_burns" => block.header.burn_spent, "evaluated_epoch" => %evaluated_epoch @@ -1420,22 +2494,18 @@ impl NakamotoChainState { // obtain reward info for receipt -- consolidate miner, user, and parent rewards into a // single list, but keep the miner/user/parent/info tuple for advancing the chain tip - let (matured_rewards, miner_payouts_opt) = if let Some(matured_miner_rewards) = - matured_miner_rewards_opt - { - let (miner_reward, mut user_rewards, parent_reward, reward_ptr) = matured_miner_rewards; - - let mut ret = vec![]; - ret.push(miner_reward.clone()); - ret.append(&mut user_rewards); - ret.push(parent_reward.clone()); - ( - ret, - Some((miner_reward, user_rewards, parent_reward, reward_ptr)), - ) - } else { - (vec![], None) - }; + // TODO: drop user burn support + let (matured_rewards, miner_payouts_opt) = + if let Some(matured_miner_rewards) = matured_miner_rewards_opt { + let (miner_reward, parent_reward, reward_ptr) = matured_miner_rewards; + + let mut ret = vec![]; + ret.push(miner_reward.clone()); + ret.push(parent_reward.clone()); + (ret, Some((miner_reward, parent_reward, reward_ptr))) + } else { + (vec![], None) + }; let mut lockup_events = match Self::finish_block(&mut clarity_tx, miner_payouts_opt.as_ref()) { @@ -1519,6 +2589,7 @@ impl NakamotoChainState { .ok_or_else(|| { warn!("While processing tenure change, failed to look up parent tenure"; "parent_tenure_height" => parent_tenure_height, + "parent_block_id" => %parent_block_id, "block_hash" => %block_hash, "block_consensus_hash" => %block.header.consensus_hash); ChainstateError::NoSuchBlockError @@ -1546,40 +2617,36 @@ impl NakamotoChainState { 0 }; - Some( - Self::make_scheduled_miner_reward( - mainnet, - evaluated_epoch, - &parent_tenure_header.anchored_header.block_hash(), - &parent_tenure_header.consensus_hash, - &block_hash, - block - .get_coinbase_tx() - .ok_or(ChainstateError::InvalidStacksBlock( - "No coinbase transaction in tenure changing block".into(), - ))?, - &block.header.consensus_hash, - next_block_height, - parent_tenure_fees, - burnchain_commit_burn, - burnchain_sortition_burn, - total_coinbase, - ) - .expect("FATAL: parsed and processed a block without a coinbase"), - ) + Some(Self::make_scheduled_miner_reward( + mainnet, + evaluated_epoch, + &parent_tenure_header.anchored_header.block_hash(), + &parent_tenure_header.consensus_hash, + &block_hash, + &block.header.consensus_hash, + next_block_height, + block + .get_coinbase_tx() + .ok_or(ChainstateError::InvalidStacksBlock( + "No coinbase transaction in tenure changing block".into(), + ))?, + parent_tenure_fees, + burnchain_commit_burn, + burnchain_sortition_burn, + total_coinbase, + )) } else { None }; - let matured_rewards_info = miner_payouts_opt - .as_ref() - .map(|(_, _, _, info)| info.clone()); + let matured_rewards_info = miner_payouts_opt.as_ref().map(|(_, _, info)| info.clone()); let new_tip = Self::advance_tip( &mut chainstate_tx.tx, &parent_chain_tip.anchored_header, &parent_chain_tip.consensus_hash, &block.header, + vrf_proof_opt, chain_tip_burn_header_hash, chain_tip_burn_header_height, chain_tip_burn_header_timestamp, diff --git a/stackslib/src/chainstate/nakamoto/tests.rs b/stackslib/src/chainstate/nakamoto/tests.rs deleted file mode 100644 index ac3a86acb9..0000000000 --- a/stackslib/src/chainstate/nakamoto/tests.rs +++ /dev/null @@ -1,617 +0,0 @@ -// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation -// Copyright (C) 2020-2023 Stacks Open Internet Foundation -// -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// This program is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. -// -// You should have received a copy of the GNU General Public License -// along with this program. If not, see . - -use std::borrow::BorrowMut; -use std::fs; - -use clarity::types::chainstate::{PoxId, SortitionId, StacksBlockId}; -use clarity::vm::clarity::ClarityConnection; -use stacks_common::consts::{FIRST_BURNCHAIN_CONSENSUS_HASH, FIRST_STACKS_BLOCK_HASH}; -use stacks_common::types::chainstate::{ - BlockHeaderHash, BurnchainHeaderHash, ConsensusHash, StacksPrivateKey, StacksWorkScore, - TrieHash, -}; -use stacks_common::types::{PrivateKey, StacksEpoch, StacksEpochId}; -use stacks_common::util::hash::{Hash160, Sha512Trunc256Sum}; -use stacks_common::util::secp256k1::{MessageSignature, Secp256k1PublicKey}; -use stacks_common::util::vrf::{VRFPrivateKey, VRFProof}; -use stdext::prelude::Integer; -use stx_genesis::GenesisData; - -use crate::burnchains::{PoxConstants, Txid}; -use crate::chainstate::burn::db::sortdb::SortitionDB; -use crate::chainstate::burn::{BlockSnapshot, OpsHash, SortitionHash}; -use crate::chainstate::coordinator::tests::{ - get_burnchain, get_burnchain_db, get_chainstate, get_rw_sortdb, get_sortition_db, p2pkh_from, - pox_addr_from, setup_states_with_epochs, -}; -use crate::chainstate::nakamoto::{NakamotoBlock, NakamotoBlockHeader, NakamotoChainState}; -use crate::chainstate::stacks::db::{ - ChainStateBootData, ChainstateAccountBalance, ChainstateAccountLockup, ChainstateBNSName, - ChainstateBNSNamespace, StacksBlockHeaderTypes, StacksChainState, StacksHeaderInfo, -}; -use crate::chainstate::stacks::{ - CoinbasePayload, SchnorrThresholdSignature, StacksBlockHeader, StacksTransaction, - StacksTransactionSigner, TenureChangeCause, TenureChangePayload, TokenTransferMemo, - TransactionAuth, TransactionPayload, TransactionVersion, -}; -use crate::core; -use crate::core::StacksEpochExtension; - -fn test_path(name: &str) -> String { - format!("/tmp/stacks-node-tests/nakamoto-tests/{}", name) -} - -#[test] -pub fn nakamoto_advance_tip_simple() { - let path = test_path(function_name!()); - let _r = std::fs::remove_dir_all(&path); - - let burnchain_conf = get_burnchain(&path, None); - - let vrf_keys: Vec<_> = (0..50).map(|_| VRFPrivateKey::new()).collect(); - let committers: Vec<_> = (0..50).map(|_| StacksPrivateKey::new()).collect(); - - let stacker_sk = StacksPrivateKey::from_seed(&[0]); - let stacker = p2pkh_from(&stacker_sk); - let balance = 6_000_000_000 * (core::MICROSTACKS_PER_STACKS as u64); - let stacked_amt = 1_000_000_000 * (core::MICROSTACKS_PER_STACKS as u128); - let initial_balances = vec![(stacker.clone().into(), balance)]; - - let pox_constants = PoxConstants::mainnet_default(); - - setup_states_with_epochs( - &[&path], - &vrf_keys, - &committers, - None, - Some(initial_balances), - StacksEpochId::Epoch21, - Some(StacksEpoch::all(0, 0, 1000000)), - ); - - let mut sort_db = get_rw_sortdb(&path, None); - let tip = SortitionDB::get_canonical_burn_chain_tip(sort_db.conn()).unwrap(); - - let b = get_burnchain(&path, None); - let burnchain = get_burnchain_db(&path, None); - let mut chainstate = get_chainstate(&path); - let chainstate_chain_id = chainstate.chain_id; - let (mut chainstate_tx, clarity_instance) = chainstate.chainstate_tx_begin().unwrap(); - - let mut sortdb_tx = sort_db.tx_handle_begin(&tip.sortition_id).unwrap(); - - let chain_tip_burn_header_hash = BurnchainHeaderHash([0; 32]); - let chain_tip_burn_header_height = 1; - let chain_tip_burn_header_timestamp = 100; - let coinbase_tx_payload = TransactionPayload::Coinbase(CoinbasePayload([0; 32]), None); - let mut coinbase_tx = StacksTransaction::new( - TransactionVersion::Testnet, - TransactionAuth::from_p2pkh(&stacker_sk).unwrap(), - coinbase_tx_payload, - ); - coinbase_tx.chain_id = chainstate_chain_id; - let txid = coinbase_tx.txid(); - coinbase_tx.sign_next_origin(&txid, &stacker_sk).unwrap(); - - let parent_block_id = - StacksBlockId::new(&FIRST_BURNCHAIN_CONSENSUS_HASH, &FIRST_STACKS_BLOCK_HASH); - let tenure_change_tx_payload = TransactionPayload::TenureChange(TenureChangePayload { - previous_tenure_end: parent_block_id, - previous_tenure_blocks: 0, - cause: TenureChangeCause::BlockFound, - pubkey_hash: Hash160([0; 20]), - signature: SchnorrThresholdSignature {}, - signers: vec![], - }); - let mut tenure_tx = StacksTransaction::new( - TransactionVersion::Testnet, - TransactionAuth::from_p2pkh(&stacker_sk).unwrap(), - tenure_change_tx_payload, - ); - tenure_tx.chain_id = chainstate_chain_id; - tenure_tx.set_origin_nonce(1); - let txid = tenure_tx.txid(); - let mut tenure_tx_signer = StacksTransactionSigner::new(&tenure_tx); - tenure_tx_signer.sign_origin(&stacker_sk).unwrap(); - let tenure_tx = tenure_tx_signer.get_tx().unwrap(); - - let block = NakamotoBlock { - header: NakamotoBlockHeader { - version: 100, - chain_length: 1, - burn_spent: 5, - parent: FIRST_STACKS_BLOCK_HASH, - burn_view: tip.burn_header_hash.clone(), - tx_merkle_root: Sha512Trunc256Sum([0; 32]), - state_index_root: TrieHash::from_hex( - "9f283c59142dec747911897fc120f1d2af8c0384830a95e1847803ee31a70ab1", - ) - .unwrap(), - stacker_signature: MessageSignature([0; 65]), - miner_signature: MessageSignature([0; 65]), - consensus_hash: ConsensusHash([0; 20]), - parent_consensus_hash: FIRST_BURNCHAIN_CONSENSUS_HASH, - }, - txs: vec![coinbase_tx, tenure_tx], - }; - let block_size = 10; - let burnchain_commit_burn = 1; - let burnchain_sortition_burn = 5; - let parent_chain_tip = StacksHeaderInfo { - anchored_header: StacksBlockHeader { - version: 100, - total_work: StacksWorkScore::genesis(), - proof: VRFProof::empty(), - parent_block: BlockHeaderHash([0; 32]), - parent_microblock: BlockHeaderHash([0; 32]), - parent_microblock_sequence: 0, - tx_merkle_root: Sha512Trunc256Sum([0; 32]), - state_index_root: TrieHash([0; 32]), - microblock_pubkey_hash: Hash160([1; 20]), - } - .into(), - microblock_tail: None, - stacks_block_height: 0, - index_root: TrieHash([0; 32]), - consensus_hash: FIRST_BURNCHAIN_CONSENSUS_HASH.clone(), - burn_header_hash: tip.burn_header_hash.clone(), - burn_header_height: 2, - burn_header_timestamp: 50, - anchored_block_size: 10, - }; - - NakamotoChainState::append_block( - &mut chainstate_tx, - clarity_instance, - &mut sortdb_tx, - &pox_constants, - &parent_chain_tip, - &chain_tip_burn_header_hash, - chain_tip_burn_header_height, - chain_tip_burn_header_timestamp, - &block, - block_size, - burnchain_commit_burn, - burnchain_sortition_burn, - ) - .unwrap(); -} - -#[test] -pub fn staging_blocks() { - let path = test_path(function_name!()); - let _r = std::fs::remove_dir_all(&path); - - let burnchain_conf = get_burnchain(&path, None); - - let vrf_keys: Vec<_> = (0..50).map(|_| VRFPrivateKey::new()).collect(); - let committers: Vec<_> = (0..50).map(|_| StacksPrivateKey::new()).collect(); - - let miner_sks: Vec<_> = (0..10).map(|i| StacksPrivateKey::from_seed(&[i])).collect(); - - let transacter_sk = StacksPrivateKey::from_seed(&[1]); - let transacter = p2pkh_from(&transacter_sk); - - let recipient_sk = StacksPrivateKey::from_seed(&[2]); - let recipient = p2pkh_from(&recipient_sk); - - let initial_balances = vec![(transacter.clone().into(), 100000)]; - let transacter_fee = 1000; - let transacter_send = 250; - - let pox_constants = PoxConstants::mainnet_default(); - - setup_states_with_epochs( - &[&path], - &vrf_keys, - &committers, - None, - Some(initial_balances), - StacksEpochId::Epoch21, - Some(StacksEpoch::all(0, 0, 1000000)), - ); - - let mut sort_db = get_rw_sortdb(&path, None); - - for i in 1..6u8 { - let parent_snapshot = SortitionDB::get_canonical_burn_chain_tip(sort_db.conn()).unwrap(); - let miner_pk = Secp256k1PublicKey::from_private(&miner_sks[usize::from(i)]); - let miner_pk_hash = Hash160::from_node_public_key(&miner_pk); - eprintln!("Advance sortition: {i}. Miner PK = {miner_pk:?}"); - let new_bhh = BurnchainHeaderHash([i; 32]); - let new_ch = ConsensusHash([i; 20]); - let new_sh = SortitionHash([1; 32]); - - let new_snapshot = BlockSnapshot { - block_height: parent_snapshot.block_height + 1, - burn_header_timestamp: 100 * u64::from(i), - burn_header_hash: new_bhh.clone(), - parent_burn_header_hash: parent_snapshot.burn_header_hash.clone(), - consensus_hash: new_ch.clone(), - ops_hash: OpsHash([0; 32]), - total_burn: 10, - sortition: true, - sortition_hash: new_sh, - winning_block_txid: Txid([0; 32]), - winning_stacks_block_hash: BlockHeaderHash([0; 32]), - index_root: TrieHash([0; 32]), - num_sortitions: parent_snapshot.num_sortitions + 1, - stacks_block_accepted: true, - stacks_block_height: 1, - arrival_index: i.into(), - canonical_stacks_tip_height: i.into(), - canonical_stacks_tip_hash: BlockHeaderHash([0; 32]), - canonical_stacks_tip_consensus_hash: new_ch.clone(), - sortition_id: SortitionId::new(&new_bhh.clone(), &PoxId::new(vec![true])), - parent_sortition_id: parent_snapshot.sortition_id.clone(), - pox_valid: true, - accumulated_coinbase_ustx: 0, - miner_pk_hash: Some(miner_pk_hash), - }; - - let mut sortdb_tx = sort_db - .tx_handle_begin(&parent_snapshot.sortition_id) - .unwrap(); - - sortdb_tx - .append_chain_tip_snapshot( - &parent_snapshot, - &new_snapshot, - &vec![], - &vec![], - None, - None, - None, - ) - .unwrap(); - - sortdb_tx.commit().unwrap(); - } - - let mut chainstate = get_chainstate(&path); - - let mut block = NakamotoBlock { - header: NakamotoBlockHeader { - version: 100, - chain_length: 1, - burn_spent: 10, - parent: BlockHeaderHash([1; 32]), - burn_view: BurnchainHeaderHash([1; 32]), - tx_merkle_root: Sha512Trunc256Sum([0; 32]), - state_index_root: TrieHash([0; 32]), - stacker_signature: MessageSignature([0; 65]), - miner_signature: MessageSignature([0; 65]), - consensus_hash: ConsensusHash([2; 20]), - parent_consensus_hash: ConsensusHash([1; 20]), - }, - txs: vec![], - }; - - let miner_signature = miner_sks[4] - .sign(block.header.signature_hash().unwrap().as_bytes()) - .unwrap(); - - block.header.miner_signature = miner_signature; - - let (chainstate_tx, _clarity_instance) = chainstate.chainstate_tx_begin().unwrap(); - let sortdb_conn = sort_db.index_handle_at_tip(); - - NakamotoChainState::accept_block(block.clone(), &sortdb_conn, &chainstate_tx).unwrap(); - - chainstate_tx.commit().unwrap(); - - let (chainstate_tx, _clarity_instance) = chainstate.chainstate_tx_begin().unwrap(); - let sortdb_conn = sort_db.index_handle_at_tip(); - - assert!( - NakamotoChainState::next_ready_block(&chainstate_tx) - .unwrap() - .is_none(), - "No block should be ready yet", - ); - - let block_parent_id = - StacksBlockId::new(&block.header.parent_consensus_hash, &block.header.parent); - NakamotoChainState::set_block_processed(&chainstate_tx, &block_parent_id).unwrap(); - - // block should be ready -- the burn view was processed before the block was inserted. - let ready_block = NakamotoChainState::next_ready_block(&chainstate_tx) - .unwrap() - .unwrap(); - - assert_eq!(ready_block.header.block_hash(), block.header.block_hash()); - - chainstate_tx.commit().unwrap(); -} - -// Assemble 5 nakamoto blocks, invoking append_block. Check that miner rewards -// mature as expected. -#[test] -pub fn nakamoto_advance_tip_multiple() { - let path = test_path(function_name!()); - let _r = std::fs::remove_dir_all(&path); - - let burnchain_conf = get_burnchain(&path, None); - - let vrf_keys: Vec<_> = (0..50).map(|_| VRFPrivateKey::new()).collect(); - let committers: Vec<_> = (0..50).map(|_| StacksPrivateKey::new()).collect(); - - let miner_sk = StacksPrivateKey::from_seed(&[0]); - let miner = p2pkh_from(&miner_sk); - - let transacter_sk = StacksPrivateKey::from_seed(&[1]); - let transacter = p2pkh_from(&transacter_sk); - - let recipient_sk = StacksPrivateKey::from_seed(&[2]); - let recipient = p2pkh_from(&recipient_sk); - - let initial_balances = vec![ - (miner.clone().into(), 0), - (transacter.clone().into(), 100000), - ]; - let transacter_fee = 1000; - let transacter_send = 250; - - let pox_constants = PoxConstants::mainnet_default(); - - setup_states_with_epochs( - &[&path], - &vrf_keys, - &committers, - None, - Some(initial_balances), - StacksEpochId::Epoch21, - Some(StacksEpoch::all(0, 0, 1000000)), - ); - - let mut sort_db = get_rw_sortdb(&path, None); - - let b = get_burnchain(&path, None); - let burnchain = get_burnchain_db(&path, None); - let mut chainstate = get_chainstate(&path); - let chainstate_chain_id = chainstate.chain_id; - - let mut last_block: Option = None; - let index_roots = [ - "c76d48e971b2ea3c78c476486455090da37df260a41eef355d4e9330faf166c0", - "443403486d617e96e44aa6ff6056e575a7d29fd02a987452502e20c98929fe20", - "1c078414b996a42eabd7fc0b731d8ac49a74141313bdfbe4166349c3d1d27946", - "69cafb50ad1debcd0dee83d58b1a06060a5dd9597ec153e6129edd80c4368226", - "449f086937fda06db5859ce69c2c6bdd7d4d104bf4a6d2745bc81a17391daf36", - ]; - - for i in 1..6 { - eprintln!("Advance tip: {}", i); - let parent_snapshot = SortitionDB::get_canonical_burn_chain_tip(sort_db.conn()).unwrap(); - - let (mut chainstate_tx, clarity_instance) = chainstate.chainstate_tx_begin().unwrap(); - let mut sortdb_tx = sort_db - .tx_handle_begin(&parent_snapshot.sortition_id) - .unwrap(); - - let parent = match last_block.as_ref() { - Some(x) => x.header.block_hash(), - None => FIRST_STACKS_BLOCK_HASH, - }; - - let parent_header: StacksBlockHeaderTypes = match last_block.clone() { - Some(x) => x.header.into(), - None => StacksBlockHeader { - version: 100, - total_work: StacksWorkScore::genesis(), - proof: VRFProof::empty(), - parent_block: BlockHeaderHash([0; 32]), - parent_microblock: BlockHeaderHash([0; 32]), - parent_microblock_sequence: 0, - tx_merkle_root: Sha512Trunc256Sum([0; 32]), - state_index_root: TrieHash([0; 32]), - microblock_pubkey_hash: Hash160([1; 20]), - } - .into(), - }; - - let coinbase_tx_payload = TransactionPayload::Coinbase(CoinbasePayload([i; 32]), None); - let mut coinbase_tx = StacksTransaction::new( - TransactionVersion::Testnet, - TransactionAuth::from_p2pkh(&miner_sk).unwrap(), - coinbase_tx_payload, - ); - coinbase_tx.chain_id = chainstate_chain_id; - coinbase_tx.set_origin_nonce((i - 1).into()); - let mut coinbase_tx_signer = StacksTransactionSigner::new(&coinbase_tx); - coinbase_tx_signer.sign_origin(&miner_sk).unwrap(); - let coinbase_tx = coinbase_tx_signer.get_tx().unwrap(); - - let transacter_tx_payload = TransactionPayload::TokenTransfer( - recipient.clone().into(), - transacter_send, - TokenTransferMemo([0; 34]), - ); - let mut transacter_tx = StacksTransaction::new( - TransactionVersion::Testnet, - TransactionAuth::from_p2pkh(&transacter_sk).unwrap(), - transacter_tx_payload, - ); - transacter_tx.chain_id = chainstate_chain_id; - transacter_tx.set_tx_fee(transacter_fee); - transacter_tx.set_origin_nonce((2 * (i - 1)).into()); - let mut transacter_tx_signer = StacksTransactionSigner::new(&transacter_tx); - transacter_tx_signer.sign_origin(&transacter_sk).unwrap(); - let transacter_tx = transacter_tx_signer.get_tx().unwrap(); - - let new_bhh = BurnchainHeaderHash([i; 32]); - let new_ch = ConsensusHash([i; 20]); - let new_sh = SortitionHash([1; 32]); - - let parent_block_id = StacksBlockId::new(&parent_snapshot.consensus_hash, &parent); - let tenure_change_tx_payload = TransactionPayload::TenureChange(TenureChangePayload { - previous_tenure_end: parent_block_id, - previous_tenure_blocks: 1, - cause: TenureChangeCause::BlockFound, - pubkey_hash: Hash160([0; 20]), - signature: SchnorrThresholdSignature {}, - signers: vec![], - }); - let mut tenure_tx = StacksTransaction::new( - TransactionVersion::Testnet, - TransactionAuth::from_p2pkh(&transacter_sk).unwrap(), - tenure_change_tx_payload, - ); - tenure_tx.chain_id = chainstate_chain_id; - tenure_tx.set_origin_nonce((2 * (i - 1) + 1).into()); - let txid = tenure_tx.txid(); - let mut tenure_tx_signer = StacksTransactionSigner::new(&tenure_tx); - tenure_tx_signer.sign_origin(&transacter_sk).unwrap(); - let tenure_tx = tenure_tx_signer.get_tx().unwrap(); - - let block = NakamotoBlock { - header: NakamotoBlockHeader { - version: 100, - chain_length: i.into(), - burn_spent: 10, - parent, - burn_view: parent_snapshot.burn_header_hash.clone(), - tx_merkle_root: Sha512Trunc256Sum([0; 32]), - state_index_root: TrieHash::from_hex(&index_roots[usize::from(i) - 1]).unwrap(), - stacker_signature: MessageSignature([0; 65]), - miner_signature: MessageSignature([0; 65]), - consensus_hash: new_ch, - parent_consensus_hash: parent_snapshot.consensus_hash.clone(), - }, - txs: vec![coinbase_tx, transacter_tx, tenure_tx], - }; - - let new_snapshot = BlockSnapshot { - block_height: parent_snapshot.block_height + 1, - burn_header_timestamp: 100 * u64::from(i), - burn_header_hash: new_bhh.clone(), - parent_burn_header_hash: parent_snapshot.burn_header_hash.clone(), - consensus_hash: new_ch.clone(), - ops_hash: OpsHash([0; 32]), - total_burn: 10, - sortition: true, - sortition_hash: new_sh, - winning_block_txid: Txid([0; 32]), - winning_stacks_block_hash: block.header.block_hash(), - index_root: block.header.state_index_root, - num_sortitions: parent_snapshot.num_sortitions + 1, - stacks_block_accepted: true, - stacks_block_height: block.header.chain_length, - arrival_index: i.into(), - canonical_stacks_tip_height: i.into(), - canonical_stacks_tip_hash: block.header.block_hash(), - canonical_stacks_tip_consensus_hash: new_ch.clone(), - sortition_id: SortitionId::new(&new_bhh.clone(), &PoxId::new(vec![true])), - parent_sortition_id: parent_snapshot.sortition_id.clone(), - pox_valid: true, - accumulated_coinbase_ustx: 0, - miner_pk_hash: None, - }; - - sortdb_tx - .append_chain_tip_snapshot( - &parent_snapshot, - &new_snapshot, - &vec![], - &vec![], - None, - None, - None, - ) - .unwrap(); - - sortdb_tx.commit().unwrap(); - let mut sortdb_tx = sort_db.tx_handle_begin(&new_snapshot.sortition_id).unwrap(); - - let chain_tip_burn_header_hash = new_snapshot.burn_header_hash.clone(); - let chain_tip_burn_header_height = new_snapshot.block_height; - let chain_tip_burn_header_timestamp = new_snapshot.burn_header_timestamp; - - let block_size = 10; - let burnchain_commit_burn = 1; - let burnchain_sortition_burn = 10; - let parent_chain_tip = StacksHeaderInfo { - anchored_header: parent_header.clone(), - microblock_tail: None, - stacks_block_height: parent_header.height(), - index_root: parent_snapshot.index_root.clone(), - consensus_hash: parent_snapshot.consensus_hash.clone(), - burn_header_hash: parent_snapshot.burn_header_hash.clone(), - burn_header_height: parent_snapshot.block_height.try_into().unwrap(), - burn_header_timestamp: parent_snapshot.burn_header_timestamp, - anchored_block_size: 10, - }; - - let (_receipt, clarity_tx) = NakamotoChainState::append_block( - &mut chainstate_tx, - clarity_instance, - &mut sortdb_tx, - &pox_constants, - &parent_chain_tip, - &chain_tip_burn_header_hash, - chain_tip_burn_header_height.try_into().unwrap(), - chain_tip_burn_header_timestamp, - &block, - block_size, - burnchain_commit_burn, - burnchain_sortition_burn, - ) - .unwrap(); - - clarity_tx.commit(); - chainstate_tx.commit().unwrap(); - - last_block = Some(block); - } - - // we've produced 5 simulated blocks now (1, 2, 3, 4, and 5) - // - // rewards from block 1 should mature 2 tenures later in block 3. - // however, due to the way `find_mature_miner_rewards` works, in - // the current setup block 1's reward is missed: - // `find_mature_miner_rewards` checks the *parent* of the current - // block (i.e., the block that block 1's reward mature's in) for - // `<= MINER_REWARD_MATURITY`. - // this means that for these unit tests, blocks 2 and 3 will have rewards - // processed at blocks 4 and 5 - // - // in nakamoto, tx fees are rewarded by the next tenure, so the - // scheduled rewards come 1 tenure after the coinbase reward matures - for i in 1..6 { - let ch = ConsensusHash([i; 20]); - let bh = SortitionDB::get_block_snapshot_consensus(sort_db.conn(), &ch) - .unwrap() - .unwrap() - .winning_stacks_block_hash; - let block_id = StacksBlockId::new(&ch, &bh); - - let (chainstate_tx, clarity_instance) = chainstate.chainstate_tx_begin().unwrap(); - let sort_db_tx = sort_db.tx_begin_at_tip(); - - let stx_balance = clarity_instance - .read_only_connection(&block_id, &chainstate_tx, &sort_db_tx) - .with_clarity_db_readonly(|db| db.get_account_stx_balance(&miner.clone().into())); - - eprintln!("Checking block #{}", i); - let expected_total_tx_fees = u128::from(transacter_fee) * u128::from(i).saturating_sub(3); - let expected_total_coinbase = 1000000000 * u128::from(i).saturating_sub(3); - assert_eq!( - stx_balance.amount_unlocked(), - expected_total_coinbase + expected_total_tx_fees - ); - } -} diff --git a/stackslib/src/chainstate/nakamoto/tests/mod.rs b/stackslib/src/chainstate/nakamoto/tests/mod.rs new file mode 100644 index 0000000000..5ef40e1816 --- /dev/null +++ b/stackslib/src/chainstate/nakamoto/tests/mod.rs @@ -0,0 +1,823 @@ +// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation +// Copyright (C) 2020-2023 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use std::borrow::BorrowMut; +use std::fs; + +use clarity::types::chainstate::{PoxId, SortitionId, StacksBlockId}; +use clarity::vm::clarity::ClarityConnection; +use clarity::vm::costs::ExecutionCost; +use clarity::vm::types::StacksAddressExtensions; +use stacks_common::consts::{FIRST_BURNCHAIN_CONSENSUS_HASH, FIRST_STACKS_BLOCK_HASH}; +use stacks_common::types::chainstate::StacksAddress; +use stacks_common::types::chainstate::{ + BlockHeaderHash, BurnchainHeaderHash, ConsensusHash, StacksPrivateKey, StacksWorkScore, + TrieHash, +}; +use stacks_common::types::{PrivateKey, StacksEpoch, StacksEpochId}; +use stacks_common::util::hash::{hex_bytes, Hash160, MerkleTree, Sha512Trunc256Sum}; +use stacks_common::util::secp256k1::{MessageSignature, Secp256k1PublicKey}; +use stacks_common::util::vrf::{VRFPrivateKey, VRFProof}; +use stdext::prelude::Integer; +use stx_genesis::GenesisData; + +use crate::burnchains::{PoxConstants, Txid}; +use crate::chainstate::burn::db::sortdb::SortitionDB; +use crate::chainstate::burn::{BlockSnapshot, OpsHash, SortitionHash}; +use crate::chainstate::coordinator::tests::{ + get_burnchain, get_burnchain_db, get_chainstate, get_rw_sortdb, get_sortition_db, p2pkh_from, + pox_addr_from, setup_states_with_epochs, +}; +use crate::chainstate::nakamoto::{NakamotoBlock, NakamotoBlockHeader, NakamotoChainState}; +use crate::chainstate::stacks::db::{ + ChainStateBootData, ChainstateAccountBalance, ChainstateAccountLockup, ChainstateBNSName, + ChainstateBNSNamespace, StacksAccount, StacksBlockHeaderTypes, StacksChainState, + StacksHeaderInfo, +}; +use crate::chainstate::stacks::{ + CoinbasePayload, SchnorrThresholdSignature, StacksBlock, StacksBlockHeader, StacksTransaction, + StacksTransactionSigner, TenureChangeCause, TenureChangePayload, TokenTransferMemo, + TransactionAnchorMode, TransactionAuth, TransactionPayload, TransactionVersion, +}; +use crate::core; +use crate::core::StacksEpochExtension; +use crate::net::codec::test::check_codec_and_corruption; + +/// Get an address's account +pub fn get_account( + chainstate: &mut StacksChainState, + sortdb: &SortitionDB, + addr: &StacksAddress, +) -> StacksAccount { + let tip = NakamotoChainState::get_canonical_block_header(chainstate.db(), sortdb) + .unwrap() + .unwrap(); + debug!( + "Canonical block header is {}/{} ({}): {:?}", + &tip.consensus_hash, + &tip.anchored_header.block_hash(), + &tip.index_block_hash(), + &tip + ); + + chainstate + .with_read_only_clarity_tx( + &sortdb.index_conn(), + &tip.index_block_hash(), + |clarity_conn| { + StacksChainState::get_account(clarity_conn, &addr.to_account_principal()) + }, + ) + .unwrap() +} + +fn test_path(name: &str) -> String { + format!("/tmp/stacks-node-tests/nakamoto-tests/{}", name) +} + +pub mod node; + +#[test] +fn codec_nakamoto_header() { + let header = NakamotoBlockHeader { + version: 1, + chain_length: 2, + burn_spent: 3, + consensus_hash: ConsensusHash([0x04; 20]), + parent_block_id: StacksBlockId([0x05; 32]), + tx_merkle_root: Sha512Trunc256Sum([0x06; 32]), + state_index_root: TrieHash([0x07; 32]), + miner_signature: MessageSignature::empty(), + stacker_signature: MessageSignature::empty(), + }; + + let bytes = vec![ + // version + 0x01, // chain length + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, // burn spent + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, // consensus hash + 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, + 0x04, 0x04, 0x04, 0x04, 0x04, // parent block id + 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, + 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, + 0x05, 0x05, // tx merkle root + 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, + 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, + 0x06, 0x06, // state index root + 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, + 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, + 0x07, 0x07, // miner signature + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, // stacker signature + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, + ]; + + check_codec_and_corruption(&header, &bytes); +} + +#[test] +pub fn test_nakamoto_first_tenure_block_syntactic_validation() { + let private_key = StacksPrivateKey::new(); + let header = NakamotoBlockHeader { + version: 1, + chain_length: 2, + burn_spent: 3, + consensus_hash: ConsensusHash([0x04; 20]), + parent_block_id: StacksBlockId([0x05; 32]), + tx_merkle_root: Sha512Trunc256Sum([0x06; 32]), + state_index_root: TrieHash([0x07; 32]), + miner_signature: MessageSignature::empty(), + stacker_signature: MessageSignature::empty(), + }; + + let tenure_change_payload = TransactionPayload::TenureChange(TenureChangePayload { + previous_tenure_end: header.parent_block_id.clone(), + previous_tenure_blocks: 1, + cause: TenureChangeCause::BlockFound, + pubkey_hash: Hash160([0x02; 20]), + signature: SchnorrThresholdSignature {}, + signers: vec![], + }); + + let invalid_tenure_change_payload = TransactionPayload::TenureChange(TenureChangePayload { + // bad parent block ID + previous_tenure_end: StacksBlockId([0x00; 32]), + previous_tenure_blocks: 1, + cause: TenureChangeCause::BlockFound, + pubkey_hash: Hash160([0x02; 20]), + signature: SchnorrThresholdSignature {}, + signers: vec![], + }); + + let proof_bytes = hex_bytes("9275df67a68c8745c0ff97b48201ee6db447f7c93b23ae24cdc2400f52fdb08a1a6ac7ec71bf9c9c76e96ee4675ebff60625af28718501047bfd87b810c2d2139b73c23bd69de66360953a642c2a330a").unwrap(); + let proof = VRFProof::from_bytes(&proof_bytes[..].to_vec()).unwrap(); + + let coinbase_payload = + TransactionPayload::Coinbase(CoinbasePayload([0x12; 32]), None, Some(proof.clone())); + + // invalid coinbase payload -- needs a proof + let invalid_coinbase_payload = + TransactionPayload::Coinbase(CoinbasePayload([0x12; 32]), None, None); + + let mut tenure_change_tx = StacksTransaction::new( + TransactionVersion::Testnet, + TransactionAuth::from_p2pkh(&private_key).unwrap(), + tenure_change_payload.clone(), + ); + tenure_change_tx.chain_id = 0x80000000; + tenure_change_tx.anchor_mode = TransactionAnchorMode::OnChainOnly; + + let mut invalid_tenure_change_tx = StacksTransaction::new( + TransactionVersion::Testnet, + TransactionAuth::from_p2pkh(&private_key).unwrap(), + invalid_tenure_change_payload.clone(), + ); + invalid_tenure_change_tx.chain_id = 0x80000000; + invalid_tenure_change_tx.anchor_mode = TransactionAnchorMode::OnChainOnly; + + let mut coinbase_tx = StacksTransaction::new( + TransactionVersion::Testnet, + TransactionAuth::from_p2pkh(&private_key).unwrap(), + coinbase_payload.clone(), + ); + coinbase_tx.chain_id = 0x80000000; + coinbase_tx.anchor_mode = TransactionAnchorMode::OnChainOnly; + + let mut invalid_coinbase_tx = StacksTransaction::new( + TransactionVersion::Testnet, + TransactionAuth::from_p2pkh(&private_key).unwrap(), + invalid_coinbase_payload.clone(), + ); + invalid_coinbase_tx.chain_id = 0x80000000; + invalid_coinbase_tx.anchor_mode = TransactionAnchorMode::OnChainOnly; + + // no tenure change if the block doesn't have a tenure change + let block = NakamotoBlock { + header: header.clone(), + txs: vec![], + }; + assert_eq!(block.is_wellformed_first_tenure_block(), None); + assert_eq!(block.tenure_changed(), None); + assert_eq!(block.get_coinbase_tx(), None); + assert_eq!(block.get_vrf_proof(), None); + assert_eq!( + block.validate_transactions_static(false, 0x80000000, StacksEpochId::Epoch30), + false + ); // empty blocks not allowed + + // syntactically invalid block if there's a tenure change but no coinbase + let block = NakamotoBlock { + header: header.clone(), + txs: vec![tenure_change_tx.clone()], + }; + assert_eq!(block.is_wellformed_first_tenure_block(), Some(false)); + assert_eq!(block.tenure_changed(), Some(false)); + assert_eq!(block.get_coinbase_tx(), None); + assert_eq!(block.get_vrf_proof(), None); + assert_eq!( + block.validate_transactions_static(false, 0x80000000, StacksEpochId::Epoch30), + false + ); + + // syntactically invalid block if there's a coinbase but not tenure change + let block = NakamotoBlock { + header: header.clone(), + txs: vec![coinbase_tx.clone()], + }; + assert_eq!(block.is_wellformed_first_tenure_block(), Some(false)); + assert_eq!(block.tenure_changed(), Some(false)); + assert_eq!(block.get_coinbase_tx(), None); + assert_eq!(block.get_vrf_proof(), None); + assert_eq!( + block.validate_transactions_static(false, 0x80000000, StacksEpochId::Epoch30), + false + ); + + // syntactically invalid block if there's a coinbase and tenure change, but the coinbase is + // missing a proof + let block = NakamotoBlock { + header: header.clone(), + txs: vec![tenure_change_tx.clone(), invalid_coinbase_tx.clone()], + }; + assert_eq!(block.is_wellformed_first_tenure_block(), Some(false)); + assert_eq!(block.tenure_changed(), Some(false)); + assert_eq!(block.get_coinbase_tx(), None); + assert_eq!(block.get_vrf_proof(), None); + assert_eq!( + block.validate_transactions_static(false, 0x80000000, StacksEpochId::Epoch30), + false + ); + + // syntactically invalid block if there is more than one coinbase transaction + let block = NakamotoBlock { + header: header.clone(), + txs: vec![ + tenure_change_tx.clone(), + coinbase_tx.clone(), + coinbase_tx.clone(), + ], + }; + assert_eq!(block.is_wellformed_first_tenure_block(), Some(false)); + assert_eq!(block.tenure_changed(), Some(false)); + assert_eq!(block.get_coinbase_tx(), None); + assert_eq!(block.get_vrf_proof(), None); + assert_eq!( + block.validate_transactions_static(false, 0x80000000, StacksEpochId::Epoch30), + false + ); + + // syntactically invalid block if the coinbase comes before a tenure change + let block = NakamotoBlock { + header: header.clone(), + txs: vec![coinbase_tx.clone(), tenure_change_tx.clone()], + }; + assert_eq!(block.is_wellformed_first_tenure_block(), Some(false)); + assert_eq!(block.tenure_changed(), Some(false)); + assert_eq!(block.get_coinbase_tx(), None); + assert_eq!(block.get_vrf_proof(), None); + assert_eq!( + block.validate_transactions_static(false, 0x80000000, StacksEpochId::Epoch30), + false + ); + + // syntactically invalid block if there is a tenure change after the coinbase + let block = NakamotoBlock { + header: header.clone(), + txs: vec![ + tenure_change_tx.clone(), + coinbase_tx.clone(), + tenure_change_tx.clone(), + ], + }; + assert_eq!(block.is_wellformed_first_tenure_block(), Some(false)); + assert_eq!(block.tenure_changed(), Some(false)); + assert_eq!(block.get_coinbase_tx(), None); + assert_eq!(block.get_vrf_proof(), None); + assert_eq!( + block.validate_transactions_static(false, 0x80000000, StacksEpochId::Epoch30), + false + ); + + // syntatically invalid block if there's an invalid tenure change + let block = NakamotoBlock { + header: header.clone(), + txs: vec![ + tenure_change_tx.clone(), + invalid_tenure_change_tx.clone(), + coinbase_tx.clone(), + ], + }; + assert_eq!(block.is_wellformed_first_tenure_block(), Some(true)); + assert_eq!(block.tenure_changed(), Some(false)); + assert_eq!(block.get_coinbase_tx(), Some(&coinbase_tx)); + assert_eq!(block.get_vrf_proof(), Some(&proof)); + assert_eq!( + block.validate_transactions_static(false, 0x80000000, StacksEpochId::Epoch30), + false + ); + + // syntactically valid only if we have syntactically valid tenure changes and a syntactically + // valid coinbase + let block = NakamotoBlock { + header: header.clone(), + txs: vec![tenure_change_tx.clone(), coinbase_tx.clone()], + }; + assert_eq!(block.is_wellformed_first_tenure_block(), Some(true)); + assert_eq!(block.tenure_changed(), Some(true)); + assert_eq!(block.get_coinbase_tx(), Some(&coinbase_tx)); + assert_eq!(block.get_vrf_proof(), Some(&proof)); + assert_eq!( + block.validate_transactions_static(false, 0x80000000, StacksEpochId::Epoch30), + true + ); + + // can have multiple valid tenure changes (but note that this block is syntactically invalid + // because duplicate txs are not allowed) + let block = NakamotoBlock { + header: header.clone(), + txs: vec![ + tenure_change_tx.clone(), + tenure_change_tx.clone(), + coinbase_tx.clone(), + ], + }; + assert_eq!(block.is_wellformed_first_tenure_block(), Some(true)); + assert_eq!(block.tenure_changed(), Some(true)); + assert_eq!(block.get_coinbase_tx(), Some(&coinbase_tx)); + assert_eq!(block.get_vrf_proof(), Some(&proof)); + assert_eq!( + block.validate_transactions_static(false, 0x80000000, StacksEpochId::Epoch30), + false + ); // duplicate transaction +} + +#[test] +pub fn test_load_store_update_nakamoto_blocks() { + let test_name = function_name!(); + let path = test_path(&test_name); + let pox_constants = PoxConstants::new(5, 3, 3, 25, 5, 0, 0, 0, 0, 0, 0, 0); + let epochs = StacksEpoch::unit_test_3_0_only(1); + let _ = std::fs::remove_dir_all(&path); + let burnchain_conf = get_burnchain(&path, Some(pox_constants.clone())); + + setup_states_with_epochs( + &[&path], + &[], + &[], + Some(pox_constants.clone()), + None, + StacksEpochId::Epoch30, + Some(epochs), + ); + + let private_key = StacksPrivateKey::new(); + let epoch2_proof_bytes = hex_bytes("9275df67a68c8745c0ff97b48201ee6db447f7c93b23ae24cdc2400f52fdb08a1a6ac7ec71bf9c9c76e96ee4675ebff60625af28718501047bfd87b810c2d2139b73c23bd69de66360953a642c2a330a").unwrap(); + let epoch2_proof = VRFProof::from_bytes(&epoch2_proof_bytes[..].to_vec()).unwrap(); + + let nakamoto_proof_bytes = hex_bytes("973c815ac3e81a4aff3243f3d8310d24ab9783acd6caa4dcfab20a3744584b2f966acf08140e1a7e1e685695d51b1b511f4f19260a21887244a6c47f7637b8bdeaf5eafe85c1975bab75bc0668fe8a0b").unwrap(); + let nakamoto_proof = VRFProof::from_bytes(&nakamoto_proof_bytes[..].to_vec()).unwrap(); + + let coinbase_payload = TransactionPayload::Coinbase( + CoinbasePayload([0x12; 32]), + None, + Some(nakamoto_proof.clone()), + ); + + let mut coinbase_tx = StacksTransaction::new( + TransactionVersion::Testnet, + TransactionAuth::from_p2pkh(&private_key).unwrap(), + coinbase_payload.clone(), + ); + coinbase_tx.chain_id = 0x80000000; + coinbase_tx.anchor_mode = TransactionAnchorMode::OnChainOnly; + + let epoch2_txs = vec![coinbase_tx.clone()]; + let epoch2_tx_merkle_root = { + let txid_vecs = epoch2_txs + .iter() + .map(|tx| tx.txid().as_bytes().to_vec()) + .collect(); + + MerkleTree::::new(&txid_vecs).root() + }; + + let epoch2_header = StacksBlockHeader { + version: 0, + total_work: StacksWorkScore { + burn: 123, + work: 456, + }, + proof: epoch2_proof.clone(), + parent_block: BlockHeaderHash([0x11; 32]), + parent_microblock: BlockHeaderHash([0x00; 32]), + parent_microblock_sequence: 0, + tx_merkle_root: epoch2_tx_merkle_root, + state_index_root: TrieHash([0x55; 32]), + microblock_pubkey_hash: Hash160([0x66; 20]), + }; + let epoch2_consensus_hash = ConsensusHash([0x03; 20]); + let epoch2_parent_block_id = + StacksBlockId::new(&epoch2_consensus_hash, &epoch2_header.block_hash()); + + let epoch2_header_info = StacksHeaderInfo { + anchored_header: StacksBlockHeaderTypes::Epoch2(epoch2_header.clone()), + microblock_tail: None, + stacks_block_height: epoch2_header.total_work.work, + index_root: TrieHash([0x56; 32]), + consensus_hash: epoch2_consensus_hash.clone(), + burn_header_hash: BurnchainHeaderHash([0x77; 32]), + burn_header_height: 100, + burn_header_timestamp: 1000, + anchored_block_size: 12345, + }; + + let epoch2_execution_cost = ExecutionCost { + write_length: 100, + write_count: 101, + read_length: 102, + read_count: 103, + runtime: 104, + }; + + let tenure_change_payload = TransactionPayload::TenureChange(TenureChangePayload { + previous_tenure_end: epoch2_parent_block_id.clone(), + previous_tenure_blocks: 1, + cause: TenureChangeCause::BlockFound, + pubkey_hash: Hash160([0x02; 20]), + signature: SchnorrThresholdSignature {}, + signers: vec![], + }); + let mut tenure_change_tx = StacksTransaction::new( + TransactionVersion::Testnet, + TransactionAuth::from_p2pkh(&private_key).unwrap(), + tenure_change_payload.clone(), + ); + tenure_change_tx.chain_id = 0x80000000; + tenure_change_tx.anchor_mode = TransactionAnchorMode::OnChainOnly; + + let nakamoto_txs = vec![tenure_change_tx.clone(), coinbase_tx.clone()]; + let nakamoto_tx_merkle_root = { + let txid_vecs = nakamoto_txs + .iter() + .map(|tx| tx.txid().as_bytes().to_vec()) + .collect(); + + MerkleTree::::new(&txid_vecs).root() + }; + + let nakamoto_header = NakamotoBlockHeader { + version: 1, + chain_length: 457, + burn_spent: 126, + consensus_hash: ConsensusHash([0x04; 20]), + parent_block_id: epoch2_parent_block_id.clone(), + tx_merkle_root: nakamoto_tx_merkle_root, + state_index_root: TrieHash([0x07; 32]), + miner_signature: MessageSignature::empty(), + stacker_signature: MessageSignature::empty(), + }; + + let nakamoto_header_info = StacksHeaderInfo { + anchored_header: StacksBlockHeaderTypes::Nakamoto(nakamoto_header.clone()), + microblock_tail: None, + stacks_block_height: nakamoto_header.chain_length, + index_root: TrieHash([0x67; 32]), + consensus_hash: nakamoto_header.consensus_hash.clone(), + burn_header_hash: BurnchainHeaderHash([0x88; 32]), + burn_header_height: 200, + burn_header_timestamp: 1001, + anchored_block_size: 123, + }; + + let nakamoto_execution_cost = ExecutionCost { + write_length: 200, + write_count: 201, + read_length: 202, + read_count: 203, + runtime: 204, + }; + + let total_nakamoto_execution_cost = ExecutionCost { + write_length: 400, + write_count: 401, + read_length: 402, + read_count: 403, + runtime: 404, + }; + + let epoch2_block = StacksBlock { + header: epoch2_header.clone(), + txs: epoch2_txs, + }; + + let nakamoto_block = NakamotoBlock { + header: nakamoto_header.clone(), + txs: nakamoto_txs, + }; + + let mut chainstate = get_chainstate(&path); + + // store epoch2 and nakamoto headers + { + let tx = chainstate.db_tx_begin().unwrap(); + StacksChainState::insert_stacks_block_header( + &tx, + &epoch2_parent_block_id, + &epoch2_header_info, + &epoch2_execution_cost, + 1, + ) + .unwrap(); + NakamotoChainState::insert_stacks_block_header( + &tx, + &nakamoto_header_info, + &nakamoto_header, + Some(&nakamoto_proof), + &nakamoto_execution_cost, + &total_nakamoto_execution_cost, + epoch2_header_info.anchored_header.height() + 1, + true, + 300, + ) + .unwrap(); + NakamotoChainState::store_block(&tx, nakamoto_block.clone(), false, false).unwrap(); + tx.commit().unwrap(); + } + + // can load Nakamoto block, but only the Nakamoto block + assert_eq!( + NakamotoChainState::load_nakamoto_block( + chainstate.db(), + &nakamoto_header.consensus_hash, + &nakamoto_header.block_hash() + ) + .unwrap() + .unwrap(), + nakamoto_block + ); + assert_eq!( + NakamotoChainState::load_nakamoto_block( + chainstate.db(), + &epoch2_header_info.consensus_hash, + &epoch2_header.block_hash() + ) + .unwrap(), + None + ); + + // nakamoto block should not be processed yet + assert_eq!( + NakamotoChainState::get_nakamoto_block_status( + chainstate.db(), + &nakamoto_header.consensus_hash, + &nakamoto_header.block_hash() + ) + .unwrap() + .unwrap(), + (false, false) + ); + assert_eq!( + NakamotoChainState::get_nakamoto_block_status( + chainstate.db(), + &epoch2_header_info.consensus_hash, + &epoch2_header.block_hash() + ) + .unwrap(), + None + ); + + // set nakamoto block processed + { + let tx = chainstate.db_tx_begin().unwrap(); + NakamotoChainState::set_block_processed(&tx, &nakamoto_header.block_id()).unwrap(); + assert_eq!( + NakamotoChainState::get_nakamoto_block_status( + &tx, + &nakamoto_header.consensus_hash, + &nakamoto_header.block_hash() + ) + .unwrap() + .unwrap(), + (true, false) + ); + } + // set nakamoto block orphaned + { + let tx = chainstate.db_tx_begin().unwrap(); + NakamotoChainState::set_block_orphaned(&tx, &nakamoto_header.block_id()).unwrap(); + assert_eq!( + NakamotoChainState::get_nakamoto_block_status( + &tx, + &nakamoto_header.consensus_hash, + &nakamoto_header.block_hash() + ) + .unwrap() + .unwrap(), + (true, true) + ); + } + // orphan nakamoto block by parent + { + let tx = chainstate.db_tx_begin().unwrap(); + NakamotoChainState::set_block_orphaned(&tx, &nakamoto_header.parent_block_id).unwrap(); + assert_eq!( + NakamotoChainState::get_nakamoto_block_status( + &tx, + &nakamoto_header.consensus_hash, + &nakamoto_header.block_hash() + ) + .unwrap() + .unwrap(), + (false, true) + ); + } + + // only one nakamoto block in this tenure, so it's both the start and finish + assert_eq!( + NakamotoChainState::get_nakamoto_tenure_start_block_header( + chainstate.db(), + &nakamoto_header.consensus_hash + ) + .unwrap() + .unwrap(), + nakamoto_header_info + ); + assert_eq!( + NakamotoChainState::get_nakamoto_tenure_finish_block_header( + chainstate.db(), + &nakamoto_header.consensus_hash + ) + .unwrap() + .unwrap(), + nakamoto_header_info + ); + + // can query the tenure-start and epoch2 headers by consensus hash + assert_eq!( + NakamotoChainState::get_block_header_by_consensus_hash( + chainstate.db(), + &nakamoto_header.consensus_hash + ) + .unwrap() + .unwrap(), + nakamoto_header_info + ); + assert_eq!( + NakamotoChainState::get_block_header_by_consensus_hash( + chainstate.db(), + &epoch2_consensus_hash + ) + .unwrap() + .unwrap(), + epoch2_header_info + ); + + // can query the tenure-start and epoch2 headers by block ID + assert_eq!( + NakamotoChainState::get_block_header(chainstate.db(), &nakamoto_header.block_id()) + .unwrap() + .unwrap(), + nakamoto_header_info + ); + assert_eq!( + NakamotoChainState::get_block_header( + chainstate.db(), + &epoch2_header_info.index_block_hash() + ) + .unwrap() + .unwrap(), + epoch2_header_info + ); + + // can get tenure height of nakamoto blocks and epoch2 blocks + assert_eq!( + NakamotoChainState::get_tenure_height(chainstate.db(), &nakamoto_header.block_id()) + .unwrap() + .unwrap(), + epoch2_header_info.anchored_header.height() + 1 + ); + assert_eq!( + NakamotoChainState::get_tenure_height( + chainstate.db(), + &epoch2_header_info.index_block_hash() + ) + .unwrap() + .unwrap(), + epoch2_header_info.anchored_header.height() + ); + + // can get total tenure cost for nakamoto blocks, but not epoch2 blocks + assert_eq!( + NakamotoChainState::get_total_tenure_cost_at(chainstate.db(), &nakamoto_header.block_id()) + .unwrap() + .unwrap(), + total_nakamoto_execution_cost + ); + assert_eq!( + NakamotoChainState::get_total_tenure_cost_at( + chainstate.db(), + &epoch2_header_info.index_block_hash() + ) + .unwrap(), + None + ); + + // can get total tenure tx fees for nakamoto blocks, but not in epoch2 blocks + assert_eq!( + NakamotoChainState::get_total_tenure_tx_fees_at( + chainstate.db(), + &nakamoto_header.block_id() + ) + .unwrap() + .unwrap(), + 300 + ); + assert_eq!( + NakamotoChainState::get_total_tenure_tx_fees_at( + chainstate.db(), + &epoch2_header_info.index_block_hash() + ) + .unwrap(), + None + ); + + // can get block VRF proof for both nakamoto and epoch2 blocks + assert_eq!( + NakamotoChainState::get_block_vrf_proof(chainstate.db(), &nakamoto_header.consensus_hash) + .unwrap() + .unwrap(), + nakamoto_proof + ); + assert_eq!( + NakamotoChainState::get_block_vrf_proof(chainstate.db(), &epoch2_consensus_hash) + .unwrap() + .unwrap(), + epoch2_proof + ); + + // can get nakamoto VRF proof only for nakamoto blocks + assert_eq!( + NakamotoChainState::get_nakamoto_tenure_vrf_proof( + chainstate.db(), + &nakamoto_header.consensus_hash + ) + .unwrap() + .unwrap(), + nakamoto_proof + ); + assert_eq!( + NakamotoChainState::get_nakamoto_tenure_vrf_proof(chainstate.db(), &epoch2_consensus_hash) + .unwrap(), + None + ); + + // next ready nakamoto block is None unless both the burn block and stacks parent block have + // been processed + { + let tx = chainstate.db_tx_begin().unwrap(); + assert_eq!( + NakamotoChainState::next_ready_nakamoto_block(&tx).unwrap(), + None + ); + + // set burn processed, but this isn't enough + NakamotoChainState::set_burn_block_processed(&tx, &nakamoto_header.consensus_hash).unwrap(); + assert_eq!( + NakamotoChainState::next_ready_nakamoto_block(&tx).unwrap(), + None + ); + + // set parent block processed + NakamotoChainState::set_block_processed(&tx, &epoch2_header_info.index_block_hash()) + .unwrap(); + + // this works now + assert_eq!( + NakamotoChainState::next_ready_nakamoto_block(&tx) + .unwrap() + .unwrap() + .0, + nakamoto_block + ); + } +} diff --git a/stackslib/src/chainstate/nakamoto/tests/node.rs b/stackslib/src/chainstate/nakamoto/tests/node.rs new file mode 100644 index 0000000000..a9362a85d6 --- /dev/null +++ b/stackslib/src/chainstate/nakamoto/tests/node.rs @@ -0,0 +1,837 @@ +// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation +// Copyright (C) 2020-2022 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use std::cell::RefCell; +use std::collections::HashMap; +use std::collections::HashSet; +use std::collections::VecDeque; +use std::fs; +use std::io; +use std::path::{Path, PathBuf}; + +use clarity::vm::clarity::ClarityConnection; +use clarity::vm::costs::ExecutionCost; +use clarity::vm::costs::LimitedCostTracker; +use clarity::vm::types::*; +use rand::seq::SliceRandom; +use rand::thread_rng; +use rand::Rng; +use stacks_common::address::*; +use stacks_common::consts::{FIRST_BURNCHAIN_CONSENSUS_HASH, FIRST_STACKS_BLOCK_HASH}; +use stacks_common::types::chainstate::BlockHeaderHash; +use stacks_common::types::chainstate::SortitionId; +use stacks_common::types::chainstate::StacksBlockId; +use stacks_common::types::chainstate::VRFSeed; +use stacks_common::util::hash::Hash160; +use stacks_common::util::sleep_ms; +use stacks_common::util::vrf::VRFProof; +use stacks_common::util::vrf::VRFPublicKey; + +use crate::burnchains::bitcoin::indexer::BitcoinIndexer; +use crate::burnchains::tests::*; +use crate::burnchains::*; +use crate::chainstate::burn::db::sortdb::*; +use crate::chainstate::burn::operations::{ + BlockstackOperationType, LeaderBlockCommitOp, LeaderKeyRegisterOp, UserBurnSupportOp, +}; +use crate::chainstate::burn::*; +use crate::chainstate::coordinator::ChainsCoordinator; +use crate::chainstate::coordinator::Error as CoordinatorError; +use crate::chainstate::coordinator::OnChainRewardSetProvider; +use crate::chainstate::nakamoto::coordinator::get_nakamoto_next_recipients; +use crate::chainstate::nakamoto::miner::NakamotoBlockBuilder; +use crate::chainstate::nakamoto::tests::get_account; +use crate::chainstate::nakamoto::NakamotoBlock; +use crate::chainstate::nakamoto::NakamotoChainState; +use crate::chainstate::stacks::address::PoxAddress; +use crate::chainstate::stacks::db::blocks::test::store_staging_block; +use crate::chainstate::stacks::db::test::*; +use crate::chainstate::stacks::db::*; +use crate::chainstate::stacks::miner::*; +use crate::chainstate::stacks::Error as ChainstateError; +use crate::chainstate::stacks::StacksBlock; +use crate::chainstate::stacks::C32_ADDRESS_VERSION_TESTNET_SINGLESIG; +use crate::chainstate::stacks::*; +use crate::cost_estimates::metrics::UnitMetric; +use crate::cost_estimates::UnitEstimator; +use crate::net::test::*; +use crate::util_lib::boot::boot_code_addr; +use crate::util_lib::db::Error as db_error; + +use crate::chainstate::stacks::tests::TestStacksNode; + +use crate::net::relay::Relayer; +use crate::net::test::{TestPeer, TestPeerConfig}; + +use crate::core::{BOOT_BLOCK_HASH, STACKS_EPOCH_3_0_MARKER}; + +impl TestBurnchainBlock { + pub fn add_nakamoto_tenure_commit( + &mut self, + ic: &SortitionDBConn, + miner: &mut TestMiner, + last_tenure_id: &StacksBlockId, + burn_fee: u64, + leader_key: &LeaderKeyRegisterOp, + fork_snapshot: Option<&BlockSnapshot>, + parent_block_snapshot: Option<&BlockSnapshot>, + vrf_seed: VRFSeed, + ) -> LeaderBlockCommitOp { + let tenure_id_as_block_hash = BlockHeaderHash(last_tenure_id.0.clone()); + self.inner_add_block_commit( + ic, + miner, + &tenure_id_as_block_hash, + burn_fee, + leader_key, + fork_snapshot, + parent_block_snapshot, + Some(vrf_seed), + STACKS_EPOCH_3_0_MARKER, + ) + } +} + +impl TestMiner { + pub fn nakamoto_miner_key(&self) -> StacksPrivateKey { + self.privks[0].clone() + } + + pub fn nakamoto_miner_hash160(&self) -> Hash160 { + let pubk = StacksPublicKey::from_private(&self.nakamoto_miner_key()); + Hash160::from_node_public_key(&pubk) + } + + pub fn make_nakamoto_coinbase( + &mut self, + recipient: Option, + vrf_proof: VRFProof, + ) -> StacksTransaction { + let mut tx_coinbase = StacksTransaction::new( + TransactionVersion::Testnet, + self.as_transaction_auth().unwrap(), + TransactionPayload::Coinbase( + CoinbasePayload([(self.nonce % 256) as u8; 32]), + recipient, + Some(vrf_proof), + ), + ); + tx_coinbase.chain_id = 0x80000000; + tx_coinbase.anchor_mode = TransactionAnchorMode::OnChainOnly; + tx_coinbase.auth.set_origin_nonce(self.nonce); + + let mut tx_signer = StacksTransactionSigner::new(&tx_coinbase); + self.sign_as_origin(&mut tx_signer); + let tx_coinbase_signed = tx_signer.get_tx().unwrap(); + tx_coinbase_signed + } + + pub fn make_nakamoto_tenure_change( + &mut self, + tenure_change: TenureChangePayload, + ) -> StacksTransaction { + let mut tx_tenure_change = StacksTransaction::new( + TransactionVersion::Testnet, + // TODO: this needs to be a schnorr signature + self.as_transaction_auth().unwrap(), + TransactionPayload::TenureChange(tenure_change), + ); + tx_tenure_change.chain_id = 0x80000000; + tx_tenure_change.anchor_mode = TransactionAnchorMode::OnChainOnly; + tx_tenure_change.auth.set_origin_nonce(self.nonce); + + // TODO: This needs to be changed to an aggregate signature from the stackers + let mut tx_signer = StacksTransactionSigner::new(&tx_tenure_change); + self.sign_as_origin(&mut tx_signer); + let tx_tenure_change_signed = tx_signer.get_tx().unwrap(); + tx_tenure_change_signed + } + + pub fn sign_nakamoto_block(&self, block: &mut NakamotoBlock) { + block.header.sign_miner(&self.nakamoto_miner_key()).unwrap(); + } +} + +impl TestStacksNode { + pub fn add_nakamoto_tenure_commit( + sortdb: &SortitionDB, + burn_block: &mut TestBurnchainBlock, + miner: &mut TestMiner, + last_tenure_start: &StacksBlockId, + burn_amount: u64, + key_op: &LeaderKeyRegisterOp, + parent_block_snapshot: Option<&BlockSnapshot>, + vrf_seed: VRFSeed, + ) -> LeaderBlockCommitOp { + let block_commit_op = { + let ic = sortdb.index_conn(); + let parent_snapshot = burn_block.parent_snapshot.clone(); + burn_block.add_nakamoto_tenure_commit( + &ic, + miner, + last_tenure_start, + burn_amount, + key_op, + Some(&parent_snapshot), + parent_block_snapshot, + vrf_seed, + ) + }; + block_commit_op + } + + pub fn get_last_nakamoto_tenure(&self, miner: &TestMiner) -> Option> { + match miner.last_block_commit() { + None => None, + Some(block_commit_op) => { + let last_tenure_id = block_commit_op.last_tenure_id(); + match self.nakamoto_commit_ops.get(&last_tenure_id) { + None => None, + Some(idx) => self.nakamoto_blocks.get(*idx).cloned(), + } + } + } + } + + pub fn get_nakamoto_tenure( + &self, + last_tenure_id: &StacksBlockId, + ) -> Option> { + match self.nakamoto_commit_ops.get(last_tenure_id) { + None => None, + Some(idx) => Some(self.nakamoto_blocks[*idx].clone()), + } + } + + /// Begin the next nakamoto tenure by triggering a tenure-change. + /// Follow this call with a call to self.add_nakamoto_tenure_blocks() to add the corresponding + /// blocks, once they've been generated. + pub fn make_nakamoto_tenure_commitment( + &mut self, + sortdb: &SortitionDB, + burn_block: &mut TestBurnchainBlock, + miner: &mut TestMiner, + last_tenure_id: &StacksBlockId, + burn_amount: u64, + miner_key: &LeaderKeyRegisterOp, + parent_block_snapshot_opt: Option<&BlockSnapshot>, + ) -> LeaderBlockCommitOp { + test_debug!( + "Miner {}: Commit to Nakamoto tenure starting at {}", + miner.id, + &last_tenure_id, + ); + + let parent_block = + NakamotoChainState::get_block_header(self.chainstate.db(), last_tenure_id) + .unwrap() + .unwrap(); + let vrf_proof = NakamotoChainState::get_block_vrf_proof( + self.chainstate.db(), + &parent_block.consensus_hash, + ) + .unwrap() + .unwrap(); + + debug!( + "proof from parent in {} is {}", + &parent_block.consensus_hash, + &vrf_proof.to_hex() + ); + let vrf_seed = VRFSeed::from_proof(&vrf_proof); + + // send block commit for this block + let block_commit_op = TestStacksNode::add_nakamoto_tenure_commit( + sortdb, + burn_block, + miner, + &last_tenure_id, + burn_amount, + miner_key, + parent_block_snapshot_opt, + vrf_seed, + ); + + test_debug!( + "Miner {}: Nakamoto tenure commit transaction builds on {},{} (parent snapshot is {:?})", + miner.id, + block_commit_op.parent_block_ptr, + block_commit_op.parent_vtxindex, + &parent_block_snapshot_opt + ); + + // NOTE: self.nakamoto_commit_ops[block_header_hash] now contains an index into + // self.nakamoto_blocks that doesn't exist. The caller needs to follow this call with a + // call to self.add_nakamoto_tenure_blocks() + self.nakamoto_commit_ops + .insert(last_tenure_id.clone(), self.nakamoto_blocks.len()); + block_commit_op + } + + /// Record the nakamoto tenure blocks + pub fn add_nakamoto_tenure_blocks(&mut self, tenure_blocks: Vec) { + self.nakamoto_blocks.push(tenure_blocks); + } + + /// Begin the next Nakamoto tenure. + /// Create a block-commit, as well as a tenure change and VRF proof for use in a follow-on call + /// to make_nakamoto_tenure_blocks() + pub fn begin_nakamoto_tenure( + &mut self, + sortdb: &SortitionDB, + miner: &mut TestMiner, + burn_block: &mut TestBurnchainBlock, + miner_key: &LeaderKeyRegisterOp, + // parent Stacks block, if this is the first Nakamoto tenure + parent_stacks_block: Option<&StacksBlock>, + // parent Nakamoto blocks, if we're building atop a previous Nakamoto tenure + parent_nakamoto_tenure: Option<&[NakamotoBlock]>, + burn_amount: u64, + tenure_change_cause: TenureChangeCause, + ) -> (LeaderBlockCommitOp, TenureChangePayload) { + let ( + last_tenure_id, + previous_tenure_end, + previous_tenure_blocks, + parent_block_snapshot_opt, + ) = if let Some(parent_blocks) = parent_nakamoto_tenure { + // parent is an epoch 3 nakamoto block + let first_parent = parent_blocks.first().unwrap(); + let last_parent = parent_blocks.last().unwrap(); + let parent_tenure_id = StacksBlockId::new( + &first_parent.header.consensus_hash, + &first_parent.header.block_hash(), + ); + let parent_sortition = SortitionDB::get_block_snapshot_consensus( + &sortdb.conn(), + &first_parent.header.consensus_hash, + ) + .unwrap() + .unwrap(); + + test_debug!( + "Work in {} {} for Nakamoto parent: {},{}", + burn_block.block_height, + burn_block.parent_snapshot.burn_header_hash, + parent_sortition.total_burn, + last_parent.header.chain_length + 1, + ); + + ( + parent_tenure_id, + last_parent.header.block_id(), + parent_blocks.len(), + Some(parent_sortition), + ) + } else if let Some(parent_stacks_block) = parent_stacks_block { + // building off an existing stacks block + let parent_stacks_block_snapshot = { + let ic = sortdb.index_conn(); + let parent_stacks_block_snapshot = + SortitionDB::get_block_snapshot_for_winning_stacks_block( + &ic, + &burn_block.parent_snapshot.sortition_id, + &parent_stacks_block.block_hash(), + ) + .unwrap() + .unwrap(); + parent_stacks_block_snapshot + }; + + let parent_chain_tip = StacksChainState::get_anchored_block_header_info( + self.chainstate.db(), + &parent_stacks_block_snapshot.consensus_hash, + &parent_stacks_block.header.block_hash(), + ) + .unwrap() + .unwrap(); + + let parent_tenure_id = parent_chain_tip.index_block_hash(); + + test_debug!( + "Work in {} {} for Stacks 2.x parent: {},{}", + burn_block.block_height, + burn_block.parent_snapshot.burn_header_hash, + parent_stacks_block_snapshot.total_burn, + parent_chain_tip.anchored_header.height(), + ); + + ( + parent_tenure_id.clone(), + parent_tenure_id, + 1, + Some(parent_stacks_block_snapshot), + ) + } else { + // first epoch is a nakamoto epoch (testing only) + let parent_tenure_id = + StacksBlockId::new(&FIRST_BURNCHAIN_CONSENSUS_HASH, &FIRST_STACKS_BLOCK_HASH); + (parent_tenure_id.clone(), parent_tenure_id, 0, None) + }; + + let previous_tenure_blocks = + u32::try_from(previous_tenure_blocks).expect("FATAL: too many blocks from last miner"); + let tenure_change_payload = TenureChangePayload { + previous_tenure_end, + previous_tenure_blocks, + cause: tenure_change_cause, + pubkey_hash: miner.nakamoto_miner_hash160(), + signature: SchnorrThresholdSignature::empty(), + signers: vec![], + }; + + let block_commit_op = self.make_nakamoto_tenure_commitment( + sortdb, + burn_block, + miner, + &last_tenure_id, + burn_amount, + miner_key, + parent_block_snapshot_opt.as_ref(), + ); + + (block_commit_op, tenure_change_payload) + } + + /// Construct a full Nakamoto tenure with the given block builder. + /// The first block will contain a coinbase and a tenure-change. + /// Process the blocks via the chains coordinator as we produce them. + pub fn make_nakamoto_tenure_blocks<'a, F>( + chainstate: &mut StacksChainState, + sortdb: &SortitionDB, + miner: &mut TestMiner, + proof: VRFProof, + tenure_change_payload: TenureChangePayload, + coord: &mut ChainsCoordinator< + 'a, + TestEventObserver, + (), + OnChainRewardSetProvider, + (), + (), + BitcoinIndexer, + >, + mut block_builder: F, + ) -> Vec<(NakamotoBlock, u64, ExecutionCost)> + where + F: FnMut( + &mut TestMiner, + &mut StacksChainState, + &SortitionDB, + usize, + ) -> Vec, + { + let miner_addr = miner.origin_address().unwrap(); + let miner_account = get_account(chainstate, sortdb, &miner_addr); + miner.set_nonce(miner_account.nonce); + + let mut tenure_change = Some(miner.make_nakamoto_tenure_change(tenure_change_payload)); + let mut coinbase = Some(miner.make_nakamoto_coinbase(None, proof.clone())); + + let mut blocks = vec![]; + let mut block_count = 0; + loop { + let mut txs = vec![]; + if let Some(tenure_change) = tenure_change.take() { + txs.push(tenure_change); + } + if let Some(coinbase) = coinbase.take() { + txs.push(coinbase); + } + let mut next_block_txs = block_builder(miner, chainstate, sortdb, block_count); + txs.append(&mut next_block_txs); + + if txs.len() == 0 { + break; + } + + let parent_tip_opt = + NakamotoChainState::get_canonical_block_header(chainstate.db(), &sortdb).unwrap(); + let burn_tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); + + debug!( + "Build Nakamoto block in tenure {}", + &burn_tip.consensus_hash + ); + + // make a block + let builder = if let Some(parent_tip) = parent_tip_opt { + NakamotoBlockBuilder::new_from_parent( + &parent_tip.index_block_hash(), + &parent_tip, + &burn_tip.consensus_hash, + burn_tip.total_burn, + if block_count == 0 { + Some(proof.clone()) + } else { + None + }, + ) + .unwrap() + } else { + NakamotoBlockBuilder::new_tenure_from_genesis(&proof) + }; + + let (mut nakamoto_block, size, cost) = builder + .make_nakamoto_block_from_txs(chainstate, &sortdb.index_conn(), txs) + .unwrap(); + miner.sign_nakamoto_block(&mut nakamoto_block); + + let block_id = nakamoto_block.block_id(); + debug!( + "Process Nakamoto block {} ({:?}", + &block_id, &nakamoto_block.header + ); + + let sort_tip = SortitionDB::get_canonical_sortition_tip(sortdb.conn()).unwrap(); + let sort_handle = sortdb.index_handle(&sort_tip); + let accepted = Relayer::process_new_nakamoto_block( + &sort_handle, + chainstate, + nakamoto_block.clone(), + ) + .unwrap(); + if accepted { + test_debug!("Accepted Nakamoto block {}", &block_id); + coord.handle_new_nakamoto_stacks_block().unwrap(); + + // confirm that the chain tip advanced + let stacks_chain_tip = + NakamotoChainState::get_canonical_block_header(chainstate.db(), &sortdb) + .unwrap() + .unwrap(); + let nakamoto_chain_tip = stacks_chain_tip + .anchored_header + .as_stacks_nakamoto() + .expect("FATAL: chain tip is not a Nakamoto block"); + assert_eq!(nakamoto_chain_tip, &nakamoto_block.header); + } else { + test_debug!("Did NOT accept Nakamoto block {}", &block_id); + } + + blocks.push((nakamoto_block, size, cost)); + block_count += 1; + } + blocks + } +} + +impl<'a> TestPeer<'a> { + /// Get the Nakamoto parent linkage data for building atop the last-produced tenure or + /// Stacks 2.x block. + /// Returns (last-tenure-id, epoch2-parent, nakamoto-parent-tenure, parent-sortition) + fn get_nakamoto_parent( + miner: &TestMiner, + stacks_node: &TestStacksNode, + sortdb: &SortitionDB, + ) -> ( + StacksBlockId, + Option, + Option>, + Option, + ) { + let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); + if let Some(parent_blocks) = stacks_node.get_last_nakamoto_tenure(miner) { + // parent is an epoch 3 nakamoto block + let first_parent = parent_blocks.first().unwrap(); + let parent_tenure_id = StacksBlockId::new( + &first_parent.header.consensus_hash, + &first_parent.header.block_hash(), + ); + let ic = sortdb.index_conn(); + let parent_sortition_opt = SortitionDB::get_block_snapshot_for_winning_nakamoto_tenure( + &ic, + &tip.sortition_id, + &parent_tenure_id, + ) + .unwrap(); + let last_tenure_id = StacksBlockId::new( + &first_parent.header.consensus_hash, + &first_parent.header.block_hash(), + ); + ( + last_tenure_id, + None, + Some(parent_blocks), + parent_sortition_opt, + ) + } else { + // parent may be an epoch 2.x block + let (parent_opt, parent_sortition_opt) = + if let Some(parent_block) = stacks_node.get_last_anchored_block(miner) { + let ic = sortdb.index_conn(); + let sort_opt = SortitionDB::get_block_snapshot_for_winning_stacks_block( + &ic, + &tip.sortition_id, + &parent_block.block_hash(), + ) + .unwrap(); + (Some(parent_block), sort_opt) + } else { + (None, None) + }; + + let last_tenure_id = if let Some(last_epoch2_block) = parent_opt.as_ref() { + let parent_sort = parent_sortition_opt.as_ref().unwrap(); + StacksBlockId::new( + &parent_sort.consensus_hash, + &last_epoch2_block.header.block_hash(), + ) + } else { + // must be a genesis block (testing only!) + StacksBlockId(BOOT_BLOCK_HASH.0.clone()) + }; + (last_tenure_id, parent_opt, None, parent_sortition_opt) + } + } + + /// Start the next Nakamoto tenure. + /// This generates the VRF key and block-commit txs, as well as the TenureChange and + /// leader key this commit references + pub fn begin_nakamoto_tenure( + &mut self, + tenure_change_cause: TenureChangeCause, + ) -> ( + Vec, + TenureChangePayload, + LeaderKeyRegisterOp, + ) { + let mut sortdb = self.sortdb.take().unwrap(); + let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); + + let mut burn_block = TestBurnchainBlock::new(&tip, 0); + let mut stacks_node = self.stacks_node.take().unwrap(); + + let (last_tenure_id, parent_block_opt, parent_tenure_opt, parent_sortition_opt) = + Self::get_nakamoto_parent(&self.miner, &stacks_node, &sortdb); + + // find the VRF leader key register tx to use. + // it's the one pointed to by the parent tenure + let parent_consensus_hash_opt = if let Some(parent_tenure) = parent_tenure_opt.as_ref() { + let tenure_start_block = parent_tenure.first().unwrap(); + Some(tenure_start_block.header.consensus_hash) + } else if let Some(parent_block) = parent_block_opt.as_ref() { + let parent_header_info = + StacksChainState::get_stacks_block_header_info_by_index_block_hash( + stacks_node.chainstate.db(), + &last_tenure_id, + ) + .unwrap() + .unwrap(); + Some(parent_header_info.consensus_hash) + } else { + None + }; + + let last_key = if let Some(ch) = parent_consensus_hash_opt { + let tenure_sn = SortitionDB::get_block_snapshot_consensus(sortdb.conn(), &ch) + .unwrap() + .unwrap(); + let tenure_block_commit = get_block_commit_by_txid( + sortdb.conn(), + &tenure_sn.sortition_id, + &tenure_sn.winning_block_txid, + ) + .unwrap() + .unwrap(); + let tenure_leader_key = SortitionDB::get_leader_key_at( + &sortdb.index_conn(), + tenure_block_commit.key_block_ptr.into(), + tenure_block_commit.key_vtxindex.into(), + &tenure_sn.sortition_id, + ) + .unwrap() + .unwrap(); + tenure_leader_key + } else { + panic!("No leader key"); + }; + + let network_id = self.config.network_id; + let chainstate_path = self.chainstate_path.clone(); + let burn_block_height = burn_block.block_height; + + let (mut block_commit_op, tenure_change_payload) = stacks_node.begin_nakamoto_tenure( + &sortdb, + &mut self.miner, + &mut burn_block, + &last_key, + parent_block_opt.as_ref(), + parent_tenure_opt.as_ref().map(|blocks| blocks.as_slice()), + 1000, + tenure_change_cause, + ); + + // patch up block-commit -- these blocks all mine off of genesis + if last_tenure_id == StacksBlockId(BOOT_BLOCK_HASH.0.clone()) { + block_commit_op.parent_block_ptr = 0; + block_commit_op.parent_vtxindex = 0; + } + + let mut burn_ops = vec![]; + if self.miner.last_VRF_public_key().is_none() { + let leader_key_op = stacks_node.add_key_register(&mut burn_block, &mut self.miner); + burn_ops.push(BlockstackOperationType::LeaderKeyRegister(leader_key_op)); + } + + // patch in reward set info + match get_nakamoto_next_recipients(&tip, &mut sortdb, &self.config.burnchain) { + Ok(recipients) => { + block_commit_op.commit_outs = match recipients { + Some(info) => { + let mut recipients = info + .recipients + .into_iter() + .map(|x| x.0) + .collect::>(); + if recipients.len() == 1 { + recipients.push(PoxAddress::standard_burn_address(false)); + } + recipients + } + None => { + if self + .config + .burnchain + .is_in_prepare_phase(burn_block.block_height) + { + vec![PoxAddress::standard_burn_address(false)] + } else { + vec![ + PoxAddress::standard_burn_address(false), + PoxAddress::standard_burn_address(false), + ] + } + } + }; + test_debug!( + "Block commit at height {} has {} recipients: {:?}", + block_commit_op.block_height, + block_commit_op.commit_outs.len(), + &block_commit_op.commit_outs + ); + } + Err(e) => { + panic!("Failure fetching recipient set: {:?}", e); + } + }; + + burn_ops.push(BlockstackOperationType::LeaderBlockCommit(block_commit_op)); + + self.stacks_node = Some(stacks_node); + self.sortdb = Some(sortdb); + (burn_ops, tenure_change_payload, last_key) + } + + /// Make the VRF proof for this tenure. + /// Call after processing the block-commit + pub fn make_nakamoto_vrf_proof(&mut self, miner_key: LeaderKeyRegisterOp) -> VRFProof { + let sortdb = self.sortdb.take().unwrap(); + let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); + let proof = self + .miner + .make_proof(&miner_key.public_key, &tip.sortition_hash) + .expect(&format!( + "FATAL: no private key for {}", + miner_key.public_key.to_hex() + )); + self.sortdb = Some(sortdb); + debug!( + "VRF proof made from {} over {}: {}", + &miner_key.public_key.to_hex(), + &tip.sortition_hash, + &proof.to_hex() + ); + proof + } + + /// Produce and process a Nakamoto tenure, after processing the block-commit from + /// begin_nakamoto_tenure(). You'd process the burnchain ops from begin_nakamoto_tenure(), + /// take the consensus hash, and feed it in here. + /// + /// Returns the blocks, their sizes, and runtime costs + pub fn make_nakamoto_tenure( + &mut self, + consensus_hash: &ConsensusHash, + tenure_change_payload: TenureChangePayload, + vrf_proof: VRFProof, + block_builder: F, + ) -> Vec<(NakamotoBlock, u64, ExecutionCost)> + where + F: FnMut( + &mut TestMiner, + &mut StacksChainState, + &SortitionDB, + usize, + ) -> Vec, + { + let mut stacks_node = self.stacks_node.take().unwrap(); + let sortdb = self.sortdb.take().unwrap(); + + let (last_tenure_id, parent_block_opt, _parent_tenure_opt, parent_sortition_opt) = + Self::get_nakamoto_parent(&self.miner, &stacks_node, &sortdb); + let blocks = TestStacksNode::make_nakamoto_tenure_blocks( + &mut stacks_node.chainstate, + &sortdb, + &mut self.miner, + vrf_proof, + tenure_change_payload, + &mut self.coord, + block_builder, + ); + + let just_blocks = blocks + .clone() + .into_iter() + .map(|(block, _, _)| block) + .collect(); + stacks_node.add_nakamoto_tenure_blocks(just_blocks); + + self.stacks_node = Some(stacks_node); + self.sortdb = Some(sortdb); + + blocks + } + + /// Accept a new Nakamoto tenure via the relayer, and then try to process them. + pub fn process_nakamoto_tenure(&mut self, blocks: Vec) { + debug!("Peer will process {} Nakamoto blocks", blocks.len()); + + let sortdb = self.sortdb.take().unwrap(); + let mut node = self.stacks_node.take().unwrap(); + + let tip = SortitionDB::get_canonical_sortition_tip(sortdb.conn()).unwrap(); + let sort_handle = sortdb.index_handle(&tip); + + node.add_nakamoto_tenure_blocks(blocks.clone()); + for block in blocks.into_iter() { + let block_id = block.block_id(); + debug!("Process Nakamoto block {} ({:?}", &block_id, &block.header); + let accepted = + Relayer::process_new_nakamoto_block(&sort_handle, &mut node.chainstate, block) + .unwrap(); + if accepted { + test_debug!("Accepted Nakamoto block {}", &block_id); + self.coord.handle_new_nakamoto_stacks_block().unwrap(); + } else { + test_debug!("Did NOT accept Nakamoto block {}", &block_id); + } + } + + self.sortdb = Some(sortdb); + self.stacks_node = Some(node); + } +} diff --git a/stackslib/src/chainstate/stacks/block.rs b/stackslib/src/chainstate/stacks/block.rs index a3b9324c5a..dcc4a64021 100644 --- a/stackslib/src/chainstate/stacks/block.rs +++ b/stackslib/src/chainstate/stacks/block.rs @@ -573,25 +573,35 @@ impl StacksBlock { txs: &[StacksTransaction], epoch_id: StacksEpochId, ) -> bool { - if epoch_id < StacksEpochId::Epoch21 { - // nothing new since the start of the system is supported. - // Expand this list of things to check for as needed. - // * no pay-to-contract coinbases - // * no versioned smart contract payloads - for tx in txs.iter() { - if let TransactionPayload::Coinbase(_, ref recipient_opt) = &tx.payload { - if recipient_opt.is_some() { - // not supported - error!("Coinbase pay-to-alt-recipient not supported before Stacks 2.1"; "txid" => %tx.txid()); - return false; - } + for tx in txs.iter() { + if let TransactionPayload::Coinbase(_, ref recipient_opt, ref proof_opt) = &tx.payload { + if proof_opt.is_some() && epoch_id < StacksEpochId::Epoch30 { + // not supported + error!("Coinbase with VRF proof not supported before Stacks 3.0"; "txid" => %tx.txid()); + return false; } - if let TransactionPayload::SmartContract(_, ref version_opt) = &tx.payload { - if version_opt.is_some() { - // not supported - error!("Versioned smart contracts not supported before Stacks 2.1"); - return false; - } + if proof_opt.is_none() && epoch_id >= StacksEpochId::Epoch30 { + // not supported + error!("Coinbase with VRF proof is required in Stacks 3.0 and later"; "txid" => %tx.txid()); + return false; + } + if recipient_opt.is_some() && epoch_id < StacksEpochId::Epoch21 { + // not supported + error!("Coinbase pay-to-alt-recipient not supported before Stacks 2.1"; "txid" => %tx.txid()); + return false; + } + } + if let TransactionPayload::SmartContract(_, ref version_opt) = &tx.payload { + if version_opt.is_some() && epoch_id < StacksEpochId::Epoch21 { + // not supported + error!("Versioned smart contracts not supported before Stacks 2.1"); + return false; + } + } + if let TransactionPayload::TenureChange(..) = &tx.payload { + if epoch_id < StacksEpochId::Epoch30 { + error!("TenureChange transaction not supported before Stacks 3.0"; "txid" => %tx.txid()); + return false; } } } @@ -1435,13 +1445,13 @@ mod test { let tx_coinbase = StacksTransaction::new( TransactionVersion::Testnet, origin_auth.clone(), - TransactionPayload::Coinbase(CoinbasePayload([0u8; 32]), None), + TransactionPayload::Coinbase(CoinbasePayload([0u8; 32]), None, None), ); let tx_coinbase_2 = StacksTransaction::new( TransactionVersion::Testnet, origin_auth.clone(), - TransactionPayload::Coinbase(CoinbasePayload([1u8; 32]), None), + TransactionPayload::Coinbase(CoinbasePayload([1u8; 32]), None, None), ); let mut tx_invalid_coinbase = tx_coinbase.clone(); @@ -1569,7 +1579,7 @@ mod test { let tx_coinbase = StacksTransaction::new( TransactionVersion::Testnet, origin_auth.clone(), - TransactionPayload::Coinbase(CoinbasePayload([0u8; 32]), None), + TransactionPayload::Coinbase(CoinbasePayload([0u8; 32]), None, None), ); let mut tx_coinbase_offchain = tx_coinbase.clone(); @@ -1700,7 +1710,7 @@ mod test { let tx_coinbase = StacksTransaction::new( TransactionVersion::Testnet, origin_auth.clone(), - TransactionPayload::Coinbase(CoinbasePayload([0u8; 32]), None), + TransactionPayload::Coinbase(CoinbasePayload([0u8; 32]), None, None), ); let tx_coinbase_contract = StacksTransaction::new( @@ -1711,9 +1721,18 @@ mod test { Some(PrincipalData::Contract( QualifiedContractIdentifier::transient(), )), + None, ), ); + let proof_bytes = hex_bytes("9275df67a68c8745c0ff97b48201ee6db447f7c93b23ae24cdc2400f52fdb08a1a6ac7ec71bf9c9c76e96ee4675ebff60625af28718501047bfd87b810c2d2139b73c23bd69de66360953a642c2a330a").unwrap(); + let proof = VRFProof::from_bytes(&proof_bytes[..].to_vec()).unwrap(); + let tx_coinbase_proof = StacksTransaction::new( + TransactionVersion::Testnet, + origin_auth.clone(), + TransactionPayload::Coinbase(CoinbasePayload([0u8; 32]), None, Some(proof)), + ); + let stx_address = StacksAddress { version: 0, bytes: Hash160([0u8; 20]), @@ -1770,6 +1789,20 @@ mod test { ), ); + let tenure_change_payload = TenureChangePayload { + previous_tenure_end: StacksBlockId([0x00; 32]), + previous_tenure_blocks: 0, + cause: TenureChangeCause::BlockFound, + pubkey_hash: Hash160([0x00; 20]), + signature: SchnorrThresholdSignature::empty(), + signers: vec![], + }; + let tx_tenure_change = StacksTransaction::new( + TransactionVersion::Testnet, + origin_auth.clone(), + TransactionPayload::TenureChange(tenure_change_payload), + ); + let dup_txs = vec![ tx_coinbase.clone(), tx_transfer.clone(), @@ -1781,6 +1814,9 @@ mod test { let no_coinbase = vec![tx_transfer.clone()]; let coinbase_contract = vec![tx_coinbase_contract.clone()]; let versioned_contract = vec![tx_versioned_smart_contract.clone()]; + let nakamoto_coinbase = vec![tx_coinbase_proof.clone()]; + let tenure_change_tx = vec![tx_tenure_change.clone()]; + let nakamoto_txs = vec![tx_coinbase_proof.clone(), tx_tenure_change.clone()]; assert!(!StacksBlock::validate_transactions_unique(&dup_txs)); assert!(!StacksBlock::validate_transactions_network( @@ -1797,7 +1833,6 @@ mod test { &coinbase_contract, StacksEpochId::Epoch2_05 )); - assert!(StacksBlock::validate_transactions_static_epoch( &coinbase_contract, StacksEpochId::Epoch21 @@ -1811,6 +1846,30 @@ mod test { &versioned_contract, StacksEpochId::Epoch21 )); + assert!(!StacksBlock::validate_transactions_static_epoch( + &nakamoto_coinbase, + StacksEpochId::Epoch21 + )); + assert!(StacksBlock::validate_transactions_static_epoch( + &nakamoto_coinbase, + StacksEpochId::Epoch30 + )); + assert!(!StacksBlock::validate_transactions_static_epoch( + &coinbase_contract, + StacksEpochId::Epoch30 + )); + assert!(!StacksBlock::validate_transactions_static_epoch( + &tenure_change_tx, + StacksEpochId::Epoch21 + )); + assert!(StacksBlock::validate_transactions_static_epoch( + &nakamoto_txs, + StacksEpochId::Epoch30 + )); + assert!(!StacksBlock::validate_transactions_static_epoch( + &nakamoto_txs, + StacksEpochId::Epoch21 + )); } // TODO: diff --git a/stackslib/src/chainstate/stacks/boot/contract_tests.rs b/stackslib/src/chainstate/stacks/boot/contract_tests.rs index 5396831c1c..b6ecd2a5a5 100644 --- a/stackslib/src/chainstate/stacks/boot/contract_tests.rs +++ b/stackslib/src/chainstate/stacks/boot/contract_tests.rs @@ -408,10 +408,18 @@ impl BurnStateDB for TestSimBurnStateDB { u32::MAX } + fn get_v3_unlock_height(&self) -> u32 { + u32::MAX + } + fn get_pox_3_activation_height(&self) -> u32 { u32::MAX } + fn get_pox_4_activation_height(&self) -> u32 { + u32::MAX + } + fn get_pox_prepare_length(&self) -> u32 { self.pox_constants.prepare_length } diff --git a/stackslib/src/chainstate/stacks/boot/mod.rs b/stackslib/src/chainstate/stacks/boot/mod.rs index 969ff22aef..6ade061ddf 100644 --- a/stackslib/src/chainstate/stacks/boot/mod.rs +++ b/stackslib/src/chainstate/stacks/boot/mod.rs @@ -84,9 +84,11 @@ pub const BOOT_CODE_GENESIS: &'static str = std::include_str!("genesis.clar"); pub const POX_1_NAME: &'static str = "pox"; pub const POX_2_NAME: &'static str = "pox-2"; pub const POX_3_NAME: &'static str = "pox-3"; +pub const POX_4_NAME: &'static str = "pox-4"; const POX_2_BODY: &'static str = std::include_str!("pox-2.clar"); const POX_3_BODY: &'static str = std::include_str!("pox-3.clar"); +const POX_4_BODY: &'static str = std::include_str!("pox-4.clar"); pub const COSTS_1_NAME: &'static str = "costs"; pub const COSTS_2_NAME: &'static str = "costs-2"; @@ -107,6 +109,10 @@ lazy_static! { format!("{}\n{}", BOOT_CODE_POX_MAINNET_CONSTS, POX_3_BODY); pub static ref POX_3_TESTNET_CODE: String = format!("{}\n{}", BOOT_CODE_POX_TESTNET_CONSTS, POX_3_BODY); + pub static ref POX_4_MAINNET_CODE: String = + format!("{}\n{}", BOOT_CODE_POX_MAINNET_CONSTS, POX_4_BODY); + pub static ref POX_4_TESTNET_CODE: String = + format!("{}\n{}", BOOT_CODE_POX_TESTNET_CONSTS, POX_4_BODY); pub static ref BOOT_CODE_COST_VOTING_TESTNET: String = make_testnet_cost_voting(); pub static ref STACKS_BOOT_CODE_MAINNET: [(&'static str, &'static str); 6] = [ ("pox", &BOOT_CODE_POX_MAINNET), @@ -165,7 +171,7 @@ pub struct PoxStartCycleInfo { pub missed_reward_slots: Vec<(PrincipalData, u128)>, } -#[derive(Debug, PartialEq, Clone)] +#[derive(Debug, PartialEq, Clone, Serialize, Deserialize)] pub struct RewardSet { pub rewarded_addresses: Vec, pub start_cycle_state: PoxStartCycleInfo, @@ -351,6 +357,18 @@ impl StacksChainState { Self::handle_pox_cycle_start(clarity, cycle_number, cycle_info, POX_3_NAME) } + /// Do all the necessary Clarity operations at the start of a PoX reward cycle. + /// Currently, this just means applying any auto-unlocks to Stackers who qualified. + /// + /// This should only be called for PoX v4 cycles. + pub fn handle_pox_cycle_start_pox_4( + clarity: &mut ClarityTransactionConnection, + cycle_number: u64, + cycle_info: Option, + ) -> Result, Error> { + Self::handle_pox_cycle_start(clarity, cycle_number, cycle_info, POX_4_NAME) + } + /// Do all the necessary Clarity operations at the start of a PoX reward cycle. /// Currently, this just means applying any auto-unlocks to Stackers who qualified. /// @@ -670,7 +688,7 @@ impl StacksChainState { ) -> u128 { // set the lower limit on reward scaling at 25% of liquid_ustx // (i.e., liquid_ustx / POX_MAXIMAL_SCALING) - let scale_by = cmp::max(participation, liquid_ustx / POX_MAXIMAL_SCALING as u128); + let scale_by = cmp::max(participation, liquid_ustx / u128::from(POX_MAXIMAL_SCALING)); let threshold_precise = scale_by / reward_slots; // compute the threshold as nearest 10k > threshold_precise let ceil_amount = match threshold_precise % POX_THRESHOLD_STEPS_USTX { @@ -697,9 +715,10 @@ impl StacksChainState { // set the lower limit on reward scaling at 25% of liquid_ustx // (i.e., liquid_ustx / POX_MAXIMAL_SCALING) - let scale_by = cmp::max(participation, liquid_ustx / POX_MAXIMAL_SCALING as u128); + let scale_by = cmp::max(participation, liquid_ustx / u128::from(POX_MAXIMAL_SCALING)); - let reward_slots = pox_settings.reward_slots() as u128; + let reward_slots = u128::try_from(pox_settings.reward_slots()) + .expect("FATAL: unreachable: more than 2^128 reward slots"); let threshold_precise = scale_by / reward_slots; // compute the threshold as nearest 10k > threshold_precise let ceil_amount = match threshold_precise % POX_THRESHOLD_STEPS_USTX { @@ -720,7 +739,7 @@ impl StacksChainState { block_id: &StacksBlockId, reward_cycle: u64, ) -> Result, Error> { - if !self.is_pox_active(sortdb, block_id, reward_cycle as u128, POX_1_NAME)? { + if !self.is_pox_active(sortdb, block_id, u128::from(reward_cycle), POX_1_NAME)? { debug!( "PoX was voted disabled in block {} (reward cycle {})", block_id, reward_cycle @@ -798,7 +817,7 @@ impl StacksChainState { block_id: &StacksBlockId, reward_cycle: u64, ) -> Result, Error> { - if !self.is_pox_active(sortdb, block_id, reward_cycle as u128, POX_2_NAME)? { + if !self.is_pox_active(sortdb, block_id, u128::from(reward_cycle), POX_2_NAME)? { debug!( "PoX was voted disabled in block {} (reward cycle {})", block_id, reward_cycle @@ -887,7 +906,7 @@ impl StacksChainState { block_id: &StacksBlockId, reward_cycle: u64, ) -> Result, Error> { - if !self.is_pox_active(sortdb, block_id, reward_cycle as u128, POX_3_NAME)? { + if !self.is_pox_active(sortdb, block_id, u128::from(reward_cycle), POX_3_NAME)? { debug!( "PoX was voted disabled in block {} (reward cycle {})", block_id, reward_cycle @@ -970,6 +989,97 @@ impl StacksChainState { Ok(ret) } + /// Get all PoX reward addresses from .pox-4 + /// TODO: also return their stacker signer keys (as part of `RawRewardSetEntry` + fn get_reward_addresses_pox_4( + &mut self, + sortdb: &SortitionDB, + block_id: &StacksBlockId, + reward_cycle: u64, + ) -> Result, Error> { + if !self.is_pox_active(sortdb, block_id, u128::from(reward_cycle), POX_4_NAME)? { + debug!( + "PoX was voted disabled in block {} (reward cycle {})", + block_id, reward_cycle + ); + return Ok(vec![]); + } + + // how many in this cycle? + let num_addrs = self + .eval_boot_code_read_only( + sortdb, + block_id, + POX_4_NAME, + &format!("(get-reward-set-size u{})", reward_cycle), + )? + .expect_u128(); + + debug!( + "At block {:?} (reward cycle {}): {} PoX reward addresses", + block_id, reward_cycle, num_addrs + ); + + let mut ret = vec![]; + for i in 0..num_addrs { + // value should be (optional (tuple (pox-addr (tuple (...))) (total-ustx uint))). + let tuple = self + .eval_boot_code_read_only( + sortdb, + block_id, + POX_4_NAME, + &format!("(get-reward-set-pox-address u{} u{})", reward_cycle, i), + )? + .expect_optional() + .expect(&format!( + "FATAL: missing PoX address in slot {} out of {} in reward cycle {}", + i, num_addrs, reward_cycle + )) + .expect_tuple(); + + let pox_addr_tuple = tuple + .get("pox-addr") + .expect(&format!("FATAL: no `pox-addr` in return value from (get-reward-set-pox-address u{} u{})", reward_cycle, i)) + .to_owned(); + + let reward_address = PoxAddress::try_from_pox_tuple(self.mainnet, &pox_addr_tuple) + .expect(&format!( + "FATAL: not a valid PoX address: {:?}", + &pox_addr_tuple + )); + + let total_ustx = tuple + .get("total-ustx") + .expect(&format!("FATAL: no 'total-ustx' in return value from (get-reward-set-pox-address u{} u{})", reward_cycle, i)) + .to_owned() + .expect_u128(); + + let stacker = tuple + .get("stacker") + .expect(&format!( + "FATAL: no 'stacker' in return value from (get-reward-set-pox-address u{} u{})", + reward_cycle, i + )) + .to_owned() + .expect_optional() + .map(|value| value.expect_principal()); + + debug!( + "Parsed PoX reward address"; + "stacked_ustx" => total_ustx, + "reward_address" => %reward_address, + "stacker" => ?stacker, + ); + ret.push(RawRewardSetEntry { + reward_address, + amount_stacked: total_ustx, + stacker, + }) + } + + Ok(ret) + } + /// Get the sequence of reward addresses, as well as the PoX-specified hash mode (which gets /// lost in the conversion to StacksAddress) /// Each address will have at least (get-stacking-minimum) tokens. @@ -990,10 +1100,15 @@ impl StacksChainState { .pox_constants .active_pox_contract(reward_cycle_start_height); + debug!( + "Active PoX contract at {} (burn height {}): {}", + block_id, current_burn_height, &pox_contract_name + ); let result = match pox_contract_name { x if x == POX_1_NAME => self.get_reward_addresses_pox_1(sortdb, block_id, reward_cycle), x if x == POX_2_NAME => self.get_reward_addresses_pox_2(sortdb, block_id, reward_cycle), x if x == POX_3_NAME => self.get_reward_addresses_pox_3(sortdb, block_id, reward_cycle), + x if x == POX_4_NAME => self.get_reward_addresses_pox_4(sortdb, block_id, reward_cycle), unknown_contract => { panic!("Blockchain implementation failure: PoX contract name '{}' is unknown. Chainstate is corrupted.", unknown_contract); @@ -1111,8 +1226,20 @@ pub mod test { #[test] fn get_reward_threshold_units() { - let test_pox_constants = - PoxConstants::new(501, 1, 1, 1, 5, 5000, 10000, u32::MAX, u32::MAX, u32::MAX); + let test_pox_constants = PoxConstants::new( + 501, + 1, + 1, + 1, + 5, + 5000, + 10000, + u32::MAX, + u32::MAX, + u32::MAX, + u32::MAX, + u32::MAX, + ); // when the liquid amount = the threshold step, // the threshold should always be the step size. let liquid = POX_THRESHOLD_STEPS_USTX; @@ -1493,6 +1620,37 @@ pub mod test { make_pox_2_or_3_lockup(key, nonce, amount, addr, lock_period, burn_ht, POX_3_NAME) } + /// TODO: add signer key + pub fn make_pox_4_lockup( + key: &StacksPrivateKey, + nonce: u64, + amount: u128, + addr: PoxAddress, + lock_period: u128, + burn_ht: u64, + ) -> StacksTransaction { + // ;; TODO: add signer key + // (define-public (stack-stx (amount-ustx uint) + // (pox-addr (tuple (version (buff 1)) (hashbytes (buff 32)))) + // (burn-height uint) + // (lock-period uint)) + let addr_tuple = Value::Tuple(addr.as_clarity_tuple().unwrap()); + let payload = TransactionPayload::new_contract_call( + boot_code_test_addr(), + "pox-4", + "stack-stx", + vec![ + Value::UInt(amount), + addr_tuple, + Value::UInt(burn_ht as u128), + Value::UInt(lock_period), + ], + ) + .unwrap(); + + make_tx(key, nonce, 0, payload) + } + pub fn make_pox_2_or_3_lockup( key: &StacksPrivateKey, nonce: u64, diff --git a/stackslib/src/chainstate/stacks/boot/pox-4.clar b/stackslib/src/chainstate/stacks/boot/pox-4.clar new file mode 100644 index 0000000000..1f9ad6dad7 --- /dev/null +++ b/stackslib/src/chainstate/stacks/boot/pox-4.clar @@ -0,0 +1,1320 @@ +;; The .pox-4 contract +;; Error codes +(define-constant ERR_STACKING_UNREACHABLE 255) +(define-constant ERR_STACKING_CORRUPTED_STATE 254) +(define-constant ERR_STACKING_INSUFFICIENT_FUNDS 1) +(define-constant ERR_STACKING_INVALID_LOCK_PERIOD 2) +(define-constant ERR_STACKING_ALREADY_STACKED 3) +(define-constant ERR_STACKING_NO_SUCH_PRINCIPAL 4) +(define-constant ERR_STACKING_EXPIRED 5) +(define-constant ERR_STACKING_STX_LOCKED 6) +(define-constant ERR_STACKING_PERMISSION_DENIED 9) +(define-constant ERR_STACKING_THRESHOLD_NOT_MET 11) +(define-constant ERR_STACKING_POX_ADDRESS_IN_USE 12) +(define-constant ERR_STACKING_INVALID_POX_ADDRESS 13) +(define-constant ERR_STACKING_ALREADY_REJECTED 17) +(define-constant ERR_STACKING_INVALID_AMOUNT 18) +(define-constant ERR_NOT_ALLOWED 19) +(define-constant ERR_STACKING_ALREADY_DELEGATED 20) +(define-constant ERR_DELEGATION_EXPIRES_DURING_LOCK 21) +(define-constant ERR_DELEGATION_TOO_MUCH_LOCKED 22) +(define-constant ERR_DELEGATION_POX_ADDR_REQUIRED 23) +(define-constant ERR_INVALID_START_BURN_HEIGHT 24) +(define-constant ERR_NOT_CURRENT_STACKER 25) +(define-constant ERR_STACK_EXTEND_NOT_LOCKED 26) +(define-constant ERR_STACK_INCREASE_NOT_LOCKED 27) +(define-constant ERR_DELEGATION_NO_REWARD_SLOT 28) +(define-constant ERR_DELEGATION_WRONG_REWARD_SLOT 29) +(define-constant ERR_STACKING_IS_DELEGATED 30) +(define-constant ERR_STACKING_NOT_DELEGATED 31) + +;; PoX disabling threshold (a percent) +(define-constant POX_REJECTION_FRACTION u25) + +;; Valid values for burnchain address versions. +;; These first four correspond to address hash modes in Stacks 2.1, +;; and are defined in pox-mainnet.clar and pox-testnet.clar (so they +;; cannot be defined here again). +;; (define-constant ADDRESS_VERSION_P2PKH 0x00) +;; (define-constant ADDRESS_VERSION_P2SH 0x01) +;; (define-constant ADDRESS_VERSION_P2WPKH 0x02) +;; (define-constant ADDRESS_VERSION_P2WSH 0x03) +(define-constant ADDRESS_VERSION_NATIVE_P2WPKH 0x04) +(define-constant ADDRESS_VERSION_NATIVE_P2WSH 0x05) +(define-constant ADDRESS_VERSION_NATIVE_P2TR 0x06) +;; Keep these constants in lock-step with the address version buffs above +;; Maximum value of an address version as a uint +(define-constant MAX_ADDRESS_VERSION u6) +;; Maximum value of an address version that has a 20-byte hashbytes +;; (0x00, 0x01, 0x02, 0x03, and 0x04 have 20-byte hashbytes) +(define-constant MAX_ADDRESS_VERSION_BUFF_20 u4) +;; Maximum value of an address version that has a 32-byte hashbytes +;; (0x05 and 0x06 have 32-byte hashbytes) +(define-constant MAX_ADDRESS_VERSION_BUFF_32 u6) + +;; Data vars that store a copy of the burnchain configuration. +;; Implemented as data-vars, so that different configurations can be +;; used in e.g. test harnesses. +(define-data-var pox-prepare-cycle-length uint PREPARE_CYCLE_LENGTH) +(define-data-var pox-reward-cycle-length uint REWARD_CYCLE_LENGTH) +(define-data-var pox-rejection-fraction uint POX_REJECTION_FRACTION) +(define-data-var first-burnchain-block-height uint u0) +(define-data-var configured bool false) +(define-data-var first-2-1-reward-cycle uint u0) + +;; This function can only be called once, when it boots up +(define-public (set-burnchain-parameters (first-burn-height uint) + (prepare-cycle-length uint) + (reward-cycle-length uint) + (rejection-fraction uint) + (begin-2-1-reward-cycle uint)) + (begin + (asserts! (not (var-get configured)) (err ERR_NOT_ALLOWED)) + (var-set first-burnchain-block-height first-burn-height) + (var-set pox-prepare-cycle-length prepare-cycle-length) + (var-set pox-reward-cycle-length reward-cycle-length) + (var-set pox-rejection-fraction rejection-fraction) + (var-set first-2-1-reward-cycle begin-2-1-reward-cycle) + (var-set configured true) + (ok true)) +) + +;; The Stacking lock-up state and associated metadata. +;; Records are inserted into this map via `stack-stx`, `delegate-stack-stx`, `stack-extend` +;; `delegate-stack-extend` and burnchain transactions for invoking `stack-stx`, etc. +;; Records will be deleted from this map when auto-unlocks are processed +;; +;; This map de-normalizes some state from the `reward-cycle-pox-address-list` map +;; and the `pox-4` contract tries to keep this state in sync with the reward-cycle +;; state. The major invariants of this `stacking-state` map are: +;; (1) any entry in `reward-cycle-pox-address-list` with `some stacker` points to a real `stacking-state` +;; (2) `stacking-state.reward-set-indexes` matches the index of that `reward-cycle-pox-address-list` +;; (3) all `stacking-state.reward-set-indexes` match the index of their reward cycle entries +;; (4) `stacking-state.pox-addr` matches `reward-cycle-pox-address-list.pox-addr` +;; (5) if set, (len reward-set-indexes) == lock-period +;; (6) (reward-cycle-to-burn-height (+ lock-period first-reward-cycle)) == (get unlock-height (stx-account stacker)) +;; These invariants only hold while `cur-reward-cycle < (+ lock-period first-reward-cycle)` +;; +(define-map stacking-state + { stacker: principal } + { + ;; Description of the underlying burnchain address that will + ;; receive PoX'ed tokens. Translating this into an address + ;; depends on the burnchain being used. When Bitcoin is + ;; the burnchain, this gets translated into a p2pkh, p2sh, + ;; p2wpkh-p2sh, p2wsh-p2sh, p2wpkh, p2wsh, or p2tr UTXO, + ;; depending on the version. The `hashbytes` field *must* be + ;; either 20 bytes or 32 bytes, depending on the output. + pox-addr: { version: (buff 1), hashbytes: (buff 32) }, + ;; how long the uSTX are locked, in reward cycles. + lock-period: uint, + ;; reward cycle when rewards begin + first-reward-cycle: uint, + ;; indexes in each reward-set associated with this user. + ;; these indexes are only valid looking forward from + ;; `first-reward-cycle` (i.e., they do not correspond + ;; to entries in the reward set that may have been from + ;; previous stack-stx calls, or prior to an extend) + reward-set-indexes: (list 12 uint), + ;; principal of the delegate, if stacker has delegated + delegated-to: (optional principal) + } +) + +;; Delegation relationships +(define-map delegation-state + { stacker: principal } + { + amount-ustx: uint, ;; how many uSTX delegated? + delegated-to: principal, ;; who are we delegating? + until-burn-ht: (optional uint), ;; how long does the delegation last? + ;; does the delegate _need_ to use a specific + ;; pox recipient address? + pox-addr: (optional { version: (buff 1), hashbytes: (buff 32) }) + } +) + +;; allowed contract-callers +(define-map allowance-contract-callers + { sender: principal, contract-caller: principal } + { until-burn-ht: (optional uint) }) + +;; How many uSTX are stacked in a given reward cycle. +;; Updated when a new PoX address is registered, or when more STX are granted +;; to it. +(define-map reward-cycle-total-stacked + { reward-cycle: uint } + { total-ustx: uint } +) + +;; Internal map read by the Stacks node to iterate through the list of +;; PoX reward addresses on a per-reward-cycle basis. +(define-map reward-cycle-pox-address-list + { reward-cycle: uint, index: uint } + { + pox-addr: { version: (buff 1), hashbytes: (buff 32) }, + total-ustx: uint, + stacker: (optional principal) + } +) + +(define-map reward-cycle-pox-address-list-len + { reward-cycle: uint } + { len: uint } +) + +;; how much has been locked up for this address before +;; committing? +;; this map allows stackers to stack amounts < minimum +;; by paying the cost of aggregation during the commit +(define-map partial-stacked-by-cycle + { + pox-addr: { version: (buff 1), hashbytes: (buff 32) }, + reward-cycle: uint, + sender: principal + } + { stacked-amount: uint } +) + +;; This is identical to partial-stacked-by-cycle, but its data is never deleted. +;; It is used to preserve data for downstream clients to observe aggregate +;; commits. Each key/value pair in this map is simply the last value of +;; partial-stacked-by-cycle right after it was deleted (so, subsequent calls +;; to the `stack-aggregation-*` functions will overwrite this). +(define-map logged-partial-stacked-by-cycle + { + pox-addr: { version: (buff 1), hashbytes: (buff 32) }, + reward-cycle: uint, + sender: principal + } + { stacked-amount: uint } +) + +;; Amount of uSTX that reject PoX, by reward cycle +(define-map stacking-rejection + { reward-cycle: uint } + { amount: uint } +) + +;; Who rejected in which reward cycle +(define-map stacking-rejectors + { stacker: principal, reward-cycle: uint } + { amount: uint } +) + +;; Getter for stacking-rejectors +(define-read-only (get-pox-rejection (stacker principal) (reward-cycle uint)) + (map-get? stacking-rejectors { stacker: stacker, reward-cycle: reward-cycle })) + +;; Has PoX been rejected in the given reward cycle? +(define-read-only (is-pox-active (reward-cycle uint)) + (let ( + (reject-votes + (default-to + u0 + (get amount (map-get? stacking-rejection { reward-cycle: reward-cycle })))) + ) + ;; (100 * reject-votes) / stx-liquid-supply < pox-rejection-fraction + (< (* u100 reject-votes) + (* (var-get pox-rejection-fraction) stx-liquid-supply))) +) + +;; What's the reward cycle number of the burnchain block height? +;; Will runtime-abort if height is less than the first burnchain block (this is intentional) +(define-read-only (burn-height-to-reward-cycle (height uint)) + (/ (- height (var-get first-burnchain-block-height)) (var-get pox-reward-cycle-length))) + +;; What's the block height at the start of a given reward cycle? +(define-read-only (reward-cycle-to-burn-height (cycle uint)) + (+ (var-get first-burnchain-block-height) (* cycle (var-get pox-reward-cycle-length)))) + +;; What's the current PoX reward cycle? +(define-read-only (current-pox-reward-cycle) + (burn-height-to-reward-cycle burn-block-height)) + +;; Get the _current_ PoX stacking principal information. If the information +;; is expired, or if there's never been such a stacker, then returns none. +(define-read-only (get-stacker-info (stacker principal)) + (match (map-get? stacking-state { stacker: stacker }) + stacking-info + (if (<= (+ (get first-reward-cycle stacking-info) (get lock-period stacking-info)) (current-pox-reward-cycle)) + ;; present, but lock has expired + none + ;; present, and lock has not expired + (some stacking-info) + ) + ;; no state at all + none + )) + +(define-read-only (check-caller-allowed) + (or (is-eq tx-sender contract-caller) + (let ((caller-allowed + ;; if not in the caller map, return false + (unwrap! (map-get? allowance-contract-callers + { sender: tx-sender, contract-caller: contract-caller }) + false)) + (expires-at + ;; if until-burn-ht not set, then return true (because no expiry) + (unwrap! (get until-burn-ht caller-allowed) true))) + ;; is the caller allowance expired? + (if (>= burn-block-height expires-at) + false + true)))) + +(define-read-only (get-check-delegation (stacker principal)) + (let ((delegation-info (try! (map-get? delegation-state { stacker: stacker })))) + ;; did the existing delegation expire? + (if (match (get until-burn-ht delegation-info) + until-burn-ht (> burn-block-height until-burn-ht) + false) + ;; it expired, return none + none + ;; delegation is active + (some delegation-info)))) + +;; Get the size of the reward set for a reward cycle. +;; Note that this does _not_ return duplicate PoX addresses. +;; Note that this also _will_ return PoX addresses that are beneath +;; the minimum threshold -- i.e. the threshold can increase after insertion. +;; Used internally by the Stacks node, which filters out the entries +;; in this map to select PoX addresses with enough STX. +(define-read-only (get-reward-set-size (reward-cycle uint)) + (default-to + u0 + (get len (map-get? reward-cycle-pox-address-list-len { reward-cycle: reward-cycle })))) + +;; How many rejection votes have we been accumulating for the next block +(define-read-only (next-cycle-rejection-votes) + (default-to + u0 + (get amount (map-get? stacking-rejection { reward-cycle: (+ u1 (current-pox-reward-cycle)) })))) + +;; Add a single PoX address to a single reward cycle. +;; Used to build up a set of per-reward-cycle PoX addresses. +;; No checking will be done -- don't call if this PoX address is already registered in this reward cycle! +;; Returns the index into the reward cycle that the PoX address is stored to +(define-private (append-reward-cycle-pox-addr (pox-addr (tuple (version (buff 1)) (hashbytes (buff 32)))) + (reward-cycle uint) + (amount-ustx uint) + (stacker (optional principal))) + (let ((sz (get-reward-set-size reward-cycle))) + (map-set reward-cycle-pox-address-list + { reward-cycle: reward-cycle, index: sz } + { pox-addr: pox-addr, total-ustx: amount-ustx, stacker: stacker }) + (map-set reward-cycle-pox-address-list-len + { reward-cycle: reward-cycle } + { len: (+ u1 sz) }) + sz)) + +;; How many uSTX are stacked? +(define-read-only (get-total-ustx-stacked (reward-cycle uint)) + (default-to + u0 + (get total-ustx (map-get? reward-cycle-total-stacked { reward-cycle: reward-cycle }))) +) + +;; Called internally by the node to iterate through the list of PoX addresses in this reward cycle. +;; Returns (optional (tuple (pox-addr ) (total-ustx ))) +(define-read-only (get-reward-set-pox-address (reward-cycle uint) (index uint)) + (map-get? reward-cycle-pox-address-list { reward-cycle: reward-cycle, index: index })) + +(define-private (fold-unlock-reward-cycle (set-index uint) + (data-res (response { cycle: uint, + first-unlocked-cycle: uint, + stacker: principal + } int))) + (let ((data (try! data-res)) + (cycle (get cycle data)) + (first-unlocked-cycle (get first-unlocked-cycle data))) + ;; if current-cycle hasn't reached first-unlocked-cycle, just continue to next iter + (asserts! (>= cycle first-unlocked-cycle) (ok (merge data { cycle: (+ u1 cycle) }))) + (let ((cycle-entry (unwrap-panic (map-get? reward-cycle-pox-address-list { reward-cycle: cycle, index: set-index }))) + (cycle-entry-u (get stacker cycle-entry)) + (cycle-entry-total-ustx (get total-ustx cycle-entry)) + (cycle-last-entry-ix (- (get len (unwrap-panic (map-get? reward-cycle-pox-address-list-len { reward-cycle: cycle }))) u1))) + (asserts! (is-eq cycle-entry-u (some (get stacker data))) (err ERR_STACKING_CORRUPTED_STATE)) + (if (not (is-eq cycle-last-entry-ix set-index)) + ;; do a "move" if the entry to remove isn't last + (let ((move-entry (unwrap-panic (map-get? reward-cycle-pox-address-list { reward-cycle: cycle, index: cycle-last-entry-ix })))) + (map-set reward-cycle-pox-address-list + { reward-cycle: cycle, index: set-index } + move-entry) + (match (get stacker move-entry) moved-stacker + ;; if the moved entry had an associated stacker, update its state + (let ((moved-state (unwrap-panic (map-get? stacking-state { stacker: moved-stacker }))) + ;; calculate the index into the reward-set-indexes that `cycle` is at + (moved-cycle-index (- cycle (get first-reward-cycle moved-state))) + (moved-reward-list (get reward-set-indexes moved-state)) + ;; reward-set-indexes[moved-cycle-index] = set-index via slice?, append, concat. + (update-list (unwrap-panic (replace-at? moved-reward-list moved-cycle-index set-index)))) + (map-set stacking-state { stacker: moved-stacker } + (merge moved-state { reward-set-indexes: update-list }))) + ;; otherwise, we don't need to update stacking-state after move + true)) + ;; if not moving, just noop + true) + ;; in all cases, we now need to delete the last list entry + (map-delete reward-cycle-pox-address-list { reward-cycle: cycle, index: cycle-last-entry-ix }) + (map-set reward-cycle-pox-address-list-len { reward-cycle: cycle } { len: cycle-last-entry-ix }) + ;; finally, update `reward-cycle-total-stacked` + (map-set reward-cycle-total-stacked { reward-cycle: cycle } + { total-ustx: (- (get total-ustx (unwrap-panic (map-get? reward-cycle-total-stacked { reward-cycle: cycle }))) + cycle-entry-total-ustx) }) + (ok (merge data { cycle: (+ u1 cycle)} ))))) + +;; This method is called by the Stacks block processor directly in order to handle the contract state mutations +;; associated with an early unlock. This can only be invoked by the block processor: it is private, and no methods +;; from this contract invoke it. +(define-private (handle-unlock (user principal) (amount-locked uint) (cycle-to-unlock uint)) + (let ((user-stacking-state (unwrap-panic (map-get? stacking-state { stacker: user }))) + (first-cycle-locked (get first-reward-cycle user-stacking-state)) + (reward-set-indexes (get reward-set-indexes user-stacking-state))) + ;; iterate over each reward set the user is a member of, and remove them from the sets. only apply to reward sets after cycle-to-unlock. + (try! (fold fold-unlock-reward-cycle reward-set-indexes (ok { cycle: first-cycle-locked, first-unlocked-cycle: cycle-to-unlock, stacker: user }))) + ;; Now that we've cleaned up all the reward set entries for the user, delete the user's stacking-state + (map-delete stacking-state { stacker: user }) + (ok true))) + +;; Add a PoX address to the `cycle-index`-th reward cycle, if `cycle-index` is between 0 and the given num-cycles (exclusive). +;; Arguments are given as a tuple, so this function can be (folded ..)'ed onto a list of its arguments. +;; Used by add-pox-addr-to-reward-cycles. +;; No checking is done. +;; The returned tuple is the same as inputted `params`, but the `i` field is incremented if +;; the pox-addr was added to the given cycle. Also, `reward-set-indexes` grows to include all +;; of the `reward-cycle-index` key parts of the `reward-cycle-pox-address-list` which get added by this function. +;; This way, the caller knows which items in a given reward cycle's PoX address list got updated. +(define-private (add-pox-addr-to-ith-reward-cycle (cycle-index uint) (params (tuple + (pox-addr (tuple (version (buff 1)) (hashbytes (buff 32)))) + (reward-set-indexes (list 12 uint)) + (first-reward-cycle uint) + (num-cycles uint) + (stacker (optional principal)) + (amount-ustx uint) + (i uint)))) + (let ((reward-cycle (+ (get first-reward-cycle params) (get i params))) + (num-cycles (get num-cycles params)) + (i (get i params)) + (reward-set-index (if (< i num-cycles) + (let ((total-ustx (get-total-ustx-stacked reward-cycle)) + (reward-index + ;; record how many uSTX this pox-addr will stack for in the given reward cycle + (append-reward-cycle-pox-addr + (get pox-addr params) + reward-cycle + (get amount-ustx params) + (get stacker params) + ))) + ;; update running total + (map-set reward-cycle-total-stacked + { reward-cycle: reward-cycle } + { total-ustx: (+ (get amount-ustx params) total-ustx) }) + (some reward-index)) + none)) + (next-i (if (< i num-cycles) (+ i u1) i))) + { + pox-addr: (get pox-addr params), + first-reward-cycle: (get first-reward-cycle params), + num-cycles: num-cycles, + amount-ustx: (get amount-ustx params), + stacker: (get stacker params), + reward-set-indexes: (match + reward-set-index new (unwrap-panic (as-max-len? (append (get reward-set-indexes params) new) u12)) + (get reward-set-indexes params)), + i: next-i + })) + +;; Add a PoX address to a given sequence of reward cycle lists. +;; A PoX address can be added to at most 12 consecutive cycles. +;; No checking is done. +(define-private (add-pox-addr-to-reward-cycles (pox-addr (tuple (version (buff 1)) (hashbytes (buff 32)))) + (first-reward-cycle uint) + (num-cycles uint) + (amount-ustx uint) + (stacker principal)) + (let ((cycle-indexes (list u0 u1 u2 u3 u4 u5 u6 u7 u8 u9 u10 u11)) + (results (fold add-pox-addr-to-ith-reward-cycle cycle-indexes + { pox-addr: pox-addr, first-reward-cycle: first-reward-cycle, num-cycles: num-cycles, + reward-set-indexes: (list), amount-ustx: amount-ustx, i: u0, stacker: (some stacker) })) + (reward-set-indexes (get reward-set-indexes results))) + ;; For safety, add up the number of times (add-principal-to-ith-reward-cycle) returns 1. + ;; It _should_ be equal to num-cycles. + (asserts! (is-eq num-cycles (get i results)) (err ERR_STACKING_UNREACHABLE)) + (asserts! (is-eq num-cycles (len reward-set-indexes)) (err ERR_STACKING_UNREACHABLE)) + (ok reward-set-indexes))) + +(define-private (add-pox-partial-stacked-to-ith-cycle + (cycle-index uint) + (params { pox-addr: { version: (buff 1), hashbytes: (buff 32) }, + reward-cycle: uint, + num-cycles: uint, + amount-ustx: uint })) + (let ((pox-addr (get pox-addr params)) + (num-cycles (get num-cycles params)) + (reward-cycle (get reward-cycle params)) + (amount-ustx (get amount-ustx params))) + (let ((current-amount + (default-to u0 + (get stacked-amount + (map-get? partial-stacked-by-cycle { sender: tx-sender, pox-addr: pox-addr, reward-cycle: reward-cycle }))))) + (if (>= cycle-index num-cycles) + ;; do not add to cycles >= cycle-index + false + ;; otherwise, add to the partial-stacked-by-cycle + (map-set partial-stacked-by-cycle + { sender: tx-sender, pox-addr: pox-addr, reward-cycle: reward-cycle } + { stacked-amount: (+ amount-ustx current-amount) })) + ;; produce the next params tuple + { pox-addr: pox-addr, + reward-cycle: (+ u1 reward-cycle), + num-cycles: num-cycles, + amount-ustx: amount-ustx }))) + +;; Add a PoX address to a given sequence of partial reward cycle lists. +;; A PoX address can be added to at most 12 consecutive cycles. +;; No checking is done. +(define-private (add-pox-partial-stacked (pox-addr (tuple (version (buff 1)) (hashbytes (buff 32)))) + (first-reward-cycle uint) + (num-cycles uint) + (amount-ustx uint)) + (let ((cycle-indexes (list u0 u1 u2 u3 u4 u5 u6 u7 u8 u9 u10 u11))) + (fold add-pox-partial-stacked-to-ith-cycle cycle-indexes + { pox-addr: pox-addr, reward-cycle: first-reward-cycle, num-cycles: num-cycles, amount-ustx: amount-ustx }) + true)) + +;; What is the minimum number of uSTX to be stacked in the given reward cycle? +;; Used internally by the Stacks node, and visible publicly. +(define-read-only (get-stacking-minimum) + (/ stx-liquid-supply STACKING_THRESHOLD_25)) + +;; Is the address mode valid for a PoX address? +(define-read-only (check-pox-addr-version (version (buff 1))) + (<= (buff-to-uint-be version) MAX_ADDRESS_VERSION)) + +;; Is this buffer the right length for the given PoX address? +(define-read-only (check-pox-addr-hashbytes (version (buff 1)) (hashbytes (buff 32))) + (if (<= (buff-to-uint-be version) MAX_ADDRESS_VERSION_BUFF_20) + (is-eq (len hashbytes) u20) + (if (<= (buff-to-uint-be version) MAX_ADDRESS_VERSION_BUFF_32) + (is-eq (len hashbytes) u32) + false))) + +;; Is the given lock period valid? +(define-read-only (check-pox-lock-period (lock-period uint)) + (and (>= lock-period MIN_POX_REWARD_CYCLES) + (<= lock-period MAX_POX_REWARD_CYCLES))) + +;; Evaluate if a participant can stack an amount of STX for a given period. +;; This method is designed as a read-only method so that it can be used as +;; a set of guard conditions and also as a read-only RPC call that can be +;; performed beforehand. +(define-read-only (can-stack-stx (pox-addr (tuple (version (buff 1)) (hashbytes (buff 32)))) + (amount-ustx uint) + (first-reward-cycle uint) + (num-cycles uint)) + (begin + ;; minimum uSTX must be met + (asserts! (<= (get-stacking-minimum) amount-ustx) + (err ERR_STACKING_THRESHOLD_NOT_MET)) + + (minimal-can-stack-stx pox-addr amount-ustx first-reward-cycle num-cycles))) + +;; Evaluate if a participant can stack an amount of STX for a given period. +;; This method is designed as a read-only method so that it can be used as +;; a set of guard conditions and also as a read-only RPC call that can be +;; performed beforehand. +(define-read-only (minimal-can-stack-stx + (pox-addr (tuple (version (buff 1)) (hashbytes (buff 32)))) + (amount-ustx uint) + (first-reward-cycle uint) + (num-cycles uint)) + (begin + ;; amount must be valid + (asserts! (> amount-ustx u0) + (err ERR_STACKING_INVALID_AMOUNT)) + + ;; sender principal must not have rejected in this upcoming reward cycle + (asserts! (is-none (get-pox-rejection tx-sender first-reward-cycle)) + (err ERR_STACKING_ALREADY_REJECTED)) + + ;; lock period must be in acceptable range. + (asserts! (check-pox-lock-period num-cycles) + (err ERR_STACKING_INVALID_LOCK_PERIOD)) + + ;; address version must be valid + (asserts! (check-pox-addr-version (get version pox-addr)) + (err ERR_STACKING_INVALID_POX_ADDRESS)) + + ;; address hashbytes must be valid for the version + (asserts! (check-pox-addr-hashbytes (get version pox-addr) (get hashbytes pox-addr)) + (err ERR_STACKING_INVALID_POX_ADDRESS)) + + (ok true))) + +;; Revoke contract-caller authorization to call stacking methods +(define-public (disallow-contract-caller (caller principal)) + (begin + (asserts! (is-eq tx-sender contract-caller) + (err ERR_STACKING_PERMISSION_DENIED)) + (ok (map-delete allowance-contract-callers { sender: tx-sender, contract-caller: caller })))) + +;; Give a contract-caller authorization to call stacking methods +;; normally, stacking methods may only be invoked by _direct_ transactions +;; (i.e., the tx-sender issues a direct contract-call to the stacking methods) +;; by issuing an allowance, the tx-sender may call through the allowed contract +(define-public (allow-contract-caller (caller principal) (until-burn-ht (optional uint))) + (begin + (asserts! (is-eq tx-sender contract-caller) + (err ERR_STACKING_PERMISSION_DENIED)) + (ok (map-set allowance-contract-callers + { sender: tx-sender, contract-caller: caller } + { until-burn-ht: until-burn-ht })))) + +;; Lock up some uSTX for stacking! Note that the given amount here is in micro-STX (uSTX). +;; The STX will be locked for the given number of reward cycles (lock-period). +;; This is the self-service interface. tx-sender will be the Stacker. +;; +;; * The given stacker cannot currently be stacking. +;; * You will need the minimum uSTX threshold. This will be determined by (get-stacking-minimum) +;; at the time this method is called. +;; * You may need to increase the amount of uSTX locked up later, since the minimum uSTX threshold +;; may increase between reward cycles. +;; * The Stacker will receive rewards in the reward cycle following `start-burn-ht`. +;; Importantly, `start-burn-ht` may not be further into the future than the next reward cycle, +;; and in most cases should be set to the current burn block height. +;; +;; The tokens will unlock and be returned to the Stacker (tx-sender) automatically. +(define-public (stack-stx (amount-ustx uint) + (pox-addr (tuple (version (buff 1)) (hashbytes (buff 32)))) + (start-burn-ht uint) + (lock-period uint)) + ;; this stacker's first reward cycle is the _next_ reward cycle + (let ((first-reward-cycle (+ u1 (current-pox-reward-cycle))) + (specified-reward-cycle (+ u1 (burn-height-to-reward-cycle start-burn-ht)))) + ;; the start-burn-ht must result in the next reward cycle, do not allow stackers + ;; to "post-date" their `stack-stx` transaction + (asserts! (is-eq first-reward-cycle specified-reward-cycle) + (err ERR_INVALID_START_BURN_HEIGHT)) + + ;; must be called directly by the tx-sender or by an allowed contract-caller + (asserts! (check-caller-allowed) + (err ERR_STACKING_PERMISSION_DENIED)) + + ;; tx-sender principal must not be stacking + (asserts! (is-none (get-stacker-info tx-sender)) + (err ERR_STACKING_ALREADY_STACKED)) + + ;; tx-sender must not be delegating + (asserts! (is-none (get-check-delegation tx-sender)) + (err ERR_STACKING_ALREADY_DELEGATED)) + + ;; the Stacker must have sufficient unlocked funds + (asserts! (>= (stx-get-balance tx-sender) amount-ustx) + (err ERR_STACKING_INSUFFICIENT_FUNDS)) + + ;; ensure that stacking can be performed + (try! (can-stack-stx pox-addr amount-ustx first-reward-cycle lock-period)) + + ;; register the PoX address with the amount stacked + (let ((reward-set-indexes (try! (add-pox-addr-to-reward-cycles pox-addr first-reward-cycle lock-period amount-ustx tx-sender)))) + ;; add stacker record + (map-set stacking-state + { stacker: tx-sender } + { pox-addr: pox-addr, + reward-set-indexes: reward-set-indexes, + first-reward-cycle: first-reward-cycle, + lock-period: lock-period, + delegated-to: none }) + + ;; return the lock-up information, so the node can actually carry out the lock. + (ok { stacker: tx-sender, lock-amount: amount-ustx, unlock-burn-height: (reward-cycle-to-burn-height (+ first-reward-cycle lock-period)) })))) + +(define-public (revoke-delegate-stx) + (begin + ;; must be called directly by the tx-sender or by an allowed contract-caller + (asserts! (check-caller-allowed) + (err ERR_STACKING_PERMISSION_DENIED)) + (ok (map-delete delegation-state { stacker: tx-sender })))) + +;; Delegate to `delegate-to` the ability to stack from a given address. +;; This method _does not_ lock the funds, rather, it allows the delegate +;; to issue the stacking lock. +;; The caller specifies: +;; * amount-ustx: the total amount of ustx the delegate may be allowed to lock +;; * until-burn-ht: an optional burn height at which this delegation expires +;; * pox-addr: an optional address to which any rewards *must* be sent +(define-public (delegate-stx (amount-ustx uint) + (delegate-to principal) + (until-burn-ht (optional uint)) + (pox-addr (optional { version: (buff 1), + hashbytes: (buff 32) }))) + (begin + ;; must be called directly by the tx-sender or by an allowed contract-caller + (asserts! (check-caller-allowed) + (err ERR_STACKING_PERMISSION_DENIED)) + + ;; delegate-stx no longer requires the delegator to not currently + ;; be stacking. + ;; delegate-stack-* functions assert that + ;; 1. users can't swim in two pools at the same time. + ;; 2. users can't switch pools without cool down cycle. + ;; Other pool admins can't increase or extend. + ;; 3. users can't join a pool while already directly stacking. + + ;; pox-addr, if given, must be valid + (match pox-addr + address + (asserts! (check-pox-addr-version (get version address)) + (err ERR_STACKING_INVALID_POX_ADDRESS)) + true) + + ;; tx-sender must not be delegating + (asserts! (is-none (get-check-delegation tx-sender)) + (err ERR_STACKING_ALREADY_DELEGATED)) + + ;; add delegation record + (map-set delegation-state + { stacker: tx-sender } + { amount-ustx: amount-ustx, + delegated-to: delegate-to, + until-burn-ht: until-burn-ht, + pox-addr: pox-addr }) + + (ok true))) + +;; Commit partially stacked STX and allocate a new PoX reward address slot. +;; This allows a stacker/delegate to lock fewer STX than the minimal threshold in multiple transactions, +;; so long as: 1. The pox-addr is the same. +;; 2. This "commit" transaction is called _before_ the PoX anchor block. +;; This ensures that each entry in the reward set returned to the stacks-node is greater than the threshold, +;; but does not require it be all locked up within a single transaction +;; +;; Returns (ok uint) on success, where the given uint is the reward address's index in the list of reward +;; addresses allocated in this reward cycle. This index can then be passed to `stack-aggregation-increase` +;; to later increment the STX this PoX address represents, in amounts less than the stacking minimum. +;; +;; *New in Stacks 2.1.* +(define-private (inner-stack-aggregation-commit (pox-addr { version: (buff 1), hashbytes: (buff 32) }) + (reward-cycle uint)) + (let ((partial-stacked + ;; fetch the partial commitments + (unwrap! (map-get? partial-stacked-by-cycle { pox-addr: pox-addr, sender: tx-sender, reward-cycle: reward-cycle }) + (err ERR_STACKING_NO_SUCH_PRINCIPAL)))) + ;; must be called directly by the tx-sender or by an allowed contract-caller + (asserts! (check-caller-allowed) + (err ERR_STACKING_PERMISSION_DENIED)) + (let ((amount-ustx (get stacked-amount partial-stacked))) + (try! (can-stack-stx pox-addr amount-ustx reward-cycle u1)) + ;; Add the pox addr to the reward cycle, and extract the index of the PoX address + ;; so the delegator can later use it to call stack-aggregation-increase. + (let ((add-pox-addr-info + (add-pox-addr-to-ith-reward-cycle + u0 + { pox-addr: pox-addr, + first-reward-cycle: reward-cycle, + num-cycles: u1, + reward-set-indexes: (list), + stacker: none, + amount-ustx: amount-ustx, + i: u0 })) + (pox-addr-index (unwrap-panic + (element-at (get reward-set-indexes add-pox-addr-info) u0)))) + + ;; don't update the stacking-state map, + ;; because it _already has_ this stacker's state + ;; don't lock the STX, because the STX is already locked + ;; + ;; clear the partial-stacked state, and log it + (map-delete partial-stacked-by-cycle { pox-addr: pox-addr, sender: tx-sender, reward-cycle: reward-cycle }) + (map-set logged-partial-stacked-by-cycle { pox-addr: pox-addr, sender: tx-sender, reward-cycle: reward-cycle } partial-stacked) + (ok pox-addr-index))))) + +;; Legacy interface for stack-aggregation-commit. +;; Wraps inner-stack-aggregation-commit. See its docstring for details. +;; Returns (ok true) on success +;; Returns (err ...) on failure. +(define-public (stack-aggregation-commit (pox-addr { version: (buff 1), hashbytes: (buff 32) }) + (reward-cycle uint)) + (match (inner-stack-aggregation-commit pox-addr reward-cycle) + pox-addr-index (ok true) + commit-err (err commit-err))) + +;; Public interface to `inner-stack-aggregation-commit`. See its documentation for details. +;; *New in Stacks 2.1.* +(define-public (stack-aggregation-commit-indexed (pox-addr { version: (buff 1), hashbytes: (buff 32) }) + (reward-cycle uint)) + (inner-stack-aggregation-commit pox-addr reward-cycle)) + +;; Commit partially stacked STX to a PoX address which has already received some STX (more than the Stacking min). +;; This allows a delegator to lock up marginally more STX from new delegates, even if they collectively do not +;; exceed the Stacking minimum, so long as the target PoX address already represents at least as many STX as the +;; Stacking minimum. +;; +;; The `reward-cycle-index` is emitted as a contract event from `stack-aggregation-commit` when the initial STX are +;; locked up by this delegator. It must be passed here to add more STX behind this PoX address. If the delegator +;; called `stack-aggregation-commit` multiple times for the same PoX address, then any such `reward-cycle-index` will +;; work here. +;; +;; *New in Stacks 2.1* +;; +(define-public (stack-aggregation-increase (pox-addr { version: (buff 1), hashbytes: (buff 32) }) + (reward-cycle uint) + (reward-cycle-index uint)) + (let ((partial-stacked + ;; fetch the partial commitments + (unwrap! (map-get? partial-stacked-by-cycle { pox-addr: pox-addr, sender: tx-sender, reward-cycle: reward-cycle }) + (err ERR_STACKING_NO_SUCH_PRINCIPAL)))) + + ;; must be called directly by the tx-sender or by an allowed contract-caller + (asserts! (check-caller-allowed) + (err ERR_STACKING_PERMISSION_DENIED)) + + ;; reward-cycle must be in the future + (asserts! (> reward-cycle (current-pox-reward-cycle)) + (err ERR_STACKING_INVALID_LOCK_PERIOD)) + + (let ((amount-ustx (get stacked-amount partial-stacked)) + ;; reward-cycle must point to an existing record in reward-cycle-total-stacked + ;; infallible; getting something from partial-stacked-by-cycle succeeded so this must succeed + (existing-total (unwrap-panic (map-get? reward-cycle-total-stacked { reward-cycle: reward-cycle }))) + ;; reward-cycle and reward-cycle-index must point to an existing record in reward-cycle-pox-address-list + (existing-entry (unwrap! (map-get? reward-cycle-pox-address-list { reward-cycle: reward-cycle, index: reward-cycle-index }) + (err ERR_DELEGATION_NO_REWARD_SLOT))) + (increased-ustx (+ (get total-ustx existing-entry) amount-ustx)) + (total-ustx (+ (get total-ustx existing-total) amount-ustx))) + + ;; must be stackable + (try! (minimal-can-stack-stx pox-addr total-ustx reward-cycle u1)) + + ;; new total must exceed the stacking minimum + (asserts! (<= (get-stacking-minimum) total-ustx) + (err ERR_STACKING_THRESHOLD_NOT_MET)) + + ;; there must *not* be a stacker entry (since this is a delegator) + (asserts! (is-none (get stacker existing-entry)) + (err ERR_DELEGATION_WRONG_REWARD_SLOT)) + + ;; the given PoX address must match the one on record + (asserts! (is-eq pox-addr (get pox-addr existing-entry)) + (err ERR_DELEGATION_WRONG_REWARD_SLOT)) + + ;; update the pox-address list -- bump the total-ustx + (map-set reward-cycle-pox-address-list + { reward-cycle: reward-cycle, index: reward-cycle-index } + { pox-addr: pox-addr, + total-ustx: increased-ustx, + stacker: none }) + + ;; update the total ustx in this cycle + (map-set reward-cycle-total-stacked + { reward-cycle: reward-cycle } + { total-ustx: total-ustx }) + + ;; don't update the stacking-state map, + ;; because it _already has_ this stacker's state + ;; don't lock the STX, because the STX is already locked + ;; + ;; clear the partial-stacked state, and log it + (map-delete partial-stacked-by-cycle { pox-addr: pox-addr, sender: tx-sender, reward-cycle: reward-cycle }) + (map-set logged-partial-stacked-by-cycle { pox-addr: pox-addr, sender: tx-sender, reward-cycle: reward-cycle } partial-stacked) + (ok true)))) + +;; As a delegate, stack the given principal's STX using partial-stacked-by-cycle +;; Once the delegate has stacked > minimum, the delegate should call stack-aggregation-commit +(define-public (delegate-stack-stx (stacker principal) + (amount-ustx uint) + (pox-addr { version: (buff 1), hashbytes: (buff 32) }) + (start-burn-ht uint) + (lock-period uint)) + ;; this stacker's first reward cycle is the _next_ reward cycle + (let ((first-reward-cycle (+ u1 (current-pox-reward-cycle))) + (specified-reward-cycle (+ u1 (burn-height-to-reward-cycle start-burn-ht))) + (unlock-burn-height (reward-cycle-to-burn-height (+ (current-pox-reward-cycle) u1 lock-period)))) + ;; the start-burn-ht must result in the next reward cycle, do not allow stackers + ;; to "post-date" their `stack-stx` transaction + (asserts! (is-eq first-reward-cycle specified-reward-cycle) + (err ERR_INVALID_START_BURN_HEIGHT)) + + ;; must be called directly by the tx-sender or by an allowed contract-caller + (asserts! (check-caller-allowed) + (err ERR_STACKING_PERMISSION_DENIED)) + + ;; stacker must have delegated to the caller + (let ((delegation-info (unwrap! (get-check-delegation stacker) (err ERR_STACKING_PERMISSION_DENIED)))) + ;; must have delegated to tx-sender + (asserts! (is-eq (get delegated-to delegation-info) tx-sender) + (err ERR_STACKING_PERMISSION_DENIED)) + ;; must have delegated enough stx + (asserts! (>= (get amount-ustx delegation-info) amount-ustx) + (err ERR_DELEGATION_TOO_MUCH_LOCKED)) + ;; if pox-addr is set, must be equal to pox-addr + (asserts! (match (get pox-addr delegation-info) + specified-pox-addr (is-eq pox-addr specified-pox-addr) + true) + (err ERR_DELEGATION_POX_ADDR_REQUIRED)) + ;; delegation must not expire before lock period + (asserts! (match (get until-burn-ht delegation-info) + until-burn-ht (>= until-burn-ht + unlock-burn-height) + true) + (err ERR_DELEGATION_EXPIRES_DURING_LOCK))) + + ;; stacker principal must not be stacking + (asserts! (is-none (get-stacker-info stacker)) + (err ERR_STACKING_ALREADY_STACKED)) + + ;; the Stacker must have sufficient unlocked funds + (asserts! (>= (stx-get-balance stacker) amount-ustx) + (err ERR_STACKING_INSUFFICIENT_FUNDS)) + + ;; ensure that stacking can be performed + (try! (minimal-can-stack-stx pox-addr amount-ustx first-reward-cycle lock-period)) + + ;; register the PoX address with the amount stacked via partial stacking + ;; before it can be included in the reward set, this must be committed! + (add-pox-partial-stacked pox-addr first-reward-cycle lock-period amount-ustx) + + ;; add stacker record + (map-set stacking-state + { stacker: stacker } + { pox-addr: pox-addr, + first-reward-cycle: first-reward-cycle, + reward-set-indexes: (list), + lock-period: lock-period, + delegated-to: (some tx-sender) }) + + ;; return the lock-up information, so the node can actually carry out the lock. + (ok { stacker: stacker, + lock-amount: amount-ustx, + unlock-burn-height: unlock-burn-height }))) + +;; Reject Stacking for this reward cycle. +;; tx-sender votes all its uSTX for rejection. +;; Note that unlike PoX, rejecting PoX does not lock the tx-sender's +;; tokens. PoX rejection acts like a coin vote. +(define-public (reject-pox) + (let ( + (balance (stx-get-balance tx-sender)) + (vote-reward-cycle (+ u1 (current-pox-reward-cycle))) + ) + + ;; tx-sender principal must not have rejected in this upcoming reward cycle + (asserts! (is-none (get-pox-rejection tx-sender vote-reward-cycle)) + (err ERR_STACKING_ALREADY_REJECTED)) + + ;; tx-sender can't be a stacker + (asserts! (is-none (get-stacker-info tx-sender)) + (err ERR_STACKING_ALREADY_STACKED)) + + ;; vote for rejection + (map-set stacking-rejection + { reward-cycle: vote-reward-cycle } + { amount: (+ (next-cycle-rejection-votes) balance) } + ) + + ;; mark voted + (map-set stacking-rejectors + { stacker: tx-sender, reward-cycle: vote-reward-cycle } + { amount: balance } + ) + + (ok true)) +) + +;; Used for PoX parameters discovery +(define-read-only (get-pox-info) + (ok { + min-amount-ustx: (get-stacking-minimum), + reward-cycle-id: (current-pox-reward-cycle), + prepare-cycle-length: (var-get pox-prepare-cycle-length), + first-burnchain-block-height: (var-get first-burnchain-block-height), + reward-cycle-length: (var-get pox-reward-cycle-length), + rejection-fraction: (var-get pox-rejection-fraction), + current-rejection-votes: (next-cycle-rejection-votes), + total-liquid-supply-ustx: stx-liquid-supply, + }) +) + +;; Update the number of stacked STX in a given reward cycle entry. +;; `reward-cycle-index` is the index into the `reward-cycle-pox-address-list` map for a given reward cycle number. +;; `updates`, if `(some ..)`, encodes which PoX reward cycle entry (if any) gets updated. In particular, it must have +;; `(some stacker)` as the listed stacker, and must be an upcoming reward cycle. +(define-private (increase-reward-cycle-entry + (reward-cycle-index uint) + (updates (optional { first-cycle: uint, reward-cycle: uint, stacker: principal, add-amount: uint }))) + (let ((data (try! updates)) + (first-cycle (get first-cycle data)) + (reward-cycle (get reward-cycle data))) + (if (> first-cycle reward-cycle) + ;; not at first cycle to process yet + (some { first-cycle: first-cycle, reward-cycle: (+ u1 reward-cycle), stacker: (get stacker data), add-amount: (get add-amount data) }) + (let ((existing-entry (unwrap-panic (map-get? reward-cycle-pox-address-list { reward-cycle: reward-cycle, index: reward-cycle-index }))) + (existing-total (unwrap-panic (map-get? reward-cycle-total-stacked { reward-cycle: reward-cycle }))) + (add-amount (get add-amount data)) + (total-ustx (+ (get total-ustx existing-total) add-amount))) + ;; stacker must match + (asserts! (is-eq (get stacker existing-entry) (some (get stacker data))) none) + ;; update the pox-address list + (map-set reward-cycle-pox-address-list + { reward-cycle: reward-cycle, index: reward-cycle-index } + { pox-addr: (get pox-addr existing-entry), + ;; This addresses the bug in pox-2 (see SIP-022) + total-ustx: (+ (get total-ustx existing-entry) add-amount), + stacker: (some (get stacker data)) }) + ;; update the total + (map-set reward-cycle-total-stacked + { reward-cycle: reward-cycle } + { total-ustx: total-ustx }) + (some { first-cycle: first-cycle, + reward-cycle: (+ u1 reward-cycle), + stacker: (get stacker data), + add-amount: (get add-amount data) }))))) + +;; Increase the number of STX locked. +;; *New in Stacks 2.1* +;; This method locks up an additional amount of STX from `tx-sender`'s, indicated +;; by `increase-by`. The `tx-sender` must already be Stacking. +(define-public (stack-increase (increase-by uint)) + (let ((stacker-info (stx-account tx-sender)) + (amount-stacked (get locked stacker-info)) + (amount-unlocked (get unlocked stacker-info)) + (unlock-height (get unlock-height stacker-info)) + (cur-cycle (current-pox-reward-cycle)) + (first-increased-cycle (+ cur-cycle u1)) + (stacker-state (unwrap! (map-get? stacking-state + { stacker: tx-sender }) + (err ERR_STACK_INCREASE_NOT_LOCKED)))) + ;; tx-sender must be currently locked + (asserts! (> amount-stacked u0) + (err ERR_STACK_INCREASE_NOT_LOCKED)) + ;; must be called with positive `increase-by` + (asserts! (>= increase-by u1) + (err ERR_STACKING_INVALID_AMOUNT)) + ;; stacker must have enough stx to lock + (asserts! (>= amount-unlocked increase-by) + (err ERR_STACKING_INSUFFICIENT_FUNDS)) + ;; must be called directly by the tx-sender or by an allowed contract-caller + (asserts! (check-caller-allowed) + (err ERR_STACKING_PERMISSION_DENIED)) + ;; stacker must be directly stacking + (asserts! (> (len (get reward-set-indexes stacker-state)) u0) + (err ERR_STACKING_IS_DELEGATED)) + ;; stacker must not be delegating + (asserts! (is-none (get delegated-to stacker-state)) + (err ERR_STACKING_IS_DELEGATED)) + ;; update reward cycle amounts + (asserts! (is-some (fold increase-reward-cycle-entry + (get reward-set-indexes stacker-state) + (some { first-cycle: first-increased-cycle, + reward-cycle: (get first-reward-cycle stacker-state), + stacker: tx-sender, + add-amount: increase-by }))) + (err ERR_STACKING_UNREACHABLE)) + ;; NOTE: stacking-state map is unchanged: it does not track amount-stacked in PoX-4 + (ok { stacker: tx-sender, total-locked: (+ amount-stacked increase-by)}))) + +;; Extend an active Stacking lock. +;; *New in Stacks 2.1* +;; This method extends the `tx-sender`'s current lockup for an additional `extend-count` +;; and associates `pox-addr` with the rewards +(define-public (stack-extend (extend-count uint) + (pox-addr { version: (buff 1), hashbytes: (buff 32) })) + (let ((stacker-info (stx-account tx-sender)) + ;; to extend, there must already be an etry in the stacking-state + (stacker-state (unwrap! (get-stacker-info tx-sender) (err ERR_STACK_EXTEND_NOT_LOCKED))) + (amount-ustx (get locked stacker-info)) + (unlock-height (get unlock-height stacker-info)) + (cur-cycle (current-pox-reward-cycle)) + ;; first-extend-cycle will be the cycle in which tx-sender *would have* unlocked + (first-extend-cycle (burn-height-to-reward-cycle unlock-height)) + ;; new first cycle should be max(cur-cycle, stacker-state.first-reward-cycle) + (cur-first-reward-cycle (get first-reward-cycle stacker-state)) + (first-reward-cycle (if (> cur-cycle cur-first-reward-cycle) cur-cycle cur-first-reward-cycle))) + + ;; must be called with positive extend-count + (asserts! (>= extend-count u1) + (err ERR_STACKING_INVALID_LOCK_PERIOD)) + + ;; stacker must be directly stacking + (asserts! (> (len (get reward-set-indexes stacker-state)) u0) + (err ERR_STACKING_IS_DELEGATED)) + + ;; stacker must not be delegating + (asserts! (is-none (get delegated-to stacker-state)) + (err ERR_STACKING_IS_DELEGATED)) + + ;; TODO: add more assertions to sanity check the `stacker-info` values with + ;; the `stacker-state` values + + (let ((last-extend-cycle (- (+ first-extend-cycle extend-count) u1)) + (lock-period (+ u1 (- last-extend-cycle first-reward-cycle))) + (new-unlock-ht (reward-cycle-to-burn-height (+ u1 last-extend-cycle)))) + + ;; first cycle must be after the current cycle + (asserts! (> first-extend-cycle cur-cycle) (err ERR_STACKING_INVALID_LOCK_PERIOD)) + ;; lock period must be positive + (asserts! (> lock-period u0) (err ERR_STACKING_INVALID_LOCK_PERIOD)) + + ;; must be called directly by the tx-sender or by an allowed contract-caller + (asserts! (check-caller-allowed) + (err ERR_STACKING_PERMISSION_DENIED)) + + ;; tx-sender must be locked + (asserts! (> amount-ustx u0) + (err ERR_STACK_EXTEND_NOT_LOCKED)) + + ;; tx-sender must not be delegating + (asserts! (is-none (get-check-delegation tx-sender)) + (err ERR_STACKING_ALREADY_DELEGATED)) + + ;; standard can-stack-stx checks + (try! (can-stack-stx pox-addr amount-ustx first-extend-cycle lock-period)) + + ;; register the PoX address with the amount stacked + ;; for the new cycles + (let ((extended-reward-set-indexes (try! (add-pox-addr-to-reward-cycles pox-addr first-extend-cycle extend-count amount-ustx tx-sender))) + (reward-set-indexes + ;; use the active stacker state and extend the existing reward-set-indexes + (let ((cur-cycle-index (- first-reward-cycle (get first-reward-cycle stacker-state))) + (old-indexes (get reward-set-indexes stacker-state)) + ;; build index list by taking the old-indexes starting from cur cycle + ;; and adding the new indexes to it. this way, the index is valid starting from the current cycle + (new-list (concat (default-to (list) (slice? old-indexes cur-cycle-index (len old-indexes))) + extended-reward-set-indexes))) + (unwrap-panic (as-max-len? new-list u12))))) + ;; update stacker record + (map-set stacking-state + { stacker: tx-sender } + { pox-addr: pox-addr, + reward-set-indexes: reward-set-indexes, + first-reward-cycle: first-reward-cycle, + lock-period: lock-period, + delegated-to: none }) + + ;; return lock-up information + (ok { stacker: tx-sender, unlock-burn-height: new-unlock-ht }))))) + +;; As a delegator, increase an active Stacking lock, issuing a "partial commitment" for the +;; increased cycles. +;; *New in Stacks 2.1* +;; This method increases `stacker`'s current lockup and partially commits the additional +;; STX to `pox-addr` +(define-public (delegate-stack-increase + (stacker principal) + (pox-addr { version: (buff 1), hashbytes: (buff 32) }) + (increase-by uint)) + (let ((stacker-info (stx-account stacker)) + (existing-lock (get locked stacker-info)) + (available-stx (get unlocked stacker-info)) + (unlock-height (get unlock-height stacker-info))) + + ;; must be called with positive `increase-by` + (asserts! (>= increase-by u1) + (err ERR_STACKING_INVALID_AMOUNT)) + + (let ((unlock-in-cycle (burn-height-to-reward-cycle unlock-height)) + (cur-cycle (current-pox-reward-cycle)) + (first-increase-cycle (+ cur-cycle u1)) + (last-increase-cycle (- unlock-in-cycle u1)) + (cycle-count (try! (if (<= first-increase-cycle last-increase-cycle) + (ok (+ u1 (- last-increase-cycle first-increase-cycle))) + (err ERR_STACKING_INVALID_LOCK_PERIOD)))) + (new-total-locked (+ increase-by existing-lock)) + (stacker-state + (unwrap! (map-get? stacking-state { stacker: stacker }) + (err ERR_STACK_INCREASE_NOT_LOCKED)))) + + ;; must be called directly by the tx-sender or by an allowed contract-caller + (asserts! (check-caller-allowed) + (err ERR_STACKING_PERMISSION_DENIED)) + + ;; stacker must not be directly stacking + (asserts! (is-eq (len (get reward-set-indexes stacker-state)) u0) + (err ERR_STACKING_NOT_DELEGATED)) + + ;; stacker must be delegated to tx-sender + (asserts! (is-eq (unwrap! (get delegated-to stacker-state) + (err ERR_STACKING_NOT_DELEGATED)) + tx-sender) + (err ERR_STACKING_PERMISSION_DENIED)) + + ;; stacker must be currently locked + (asserts! (> existing-lock u0) + (err ERR_STACK_INCREASE_NOT_LOCKED)) + + ;; stacker must have enough stx to lock + (asserts! (>= available-stx increase-by) + (err ERR_STACKING_INSUFFICIENT_FUNDS)) + + ;; stacker must have delegated to the caller + (let ((delegation-info (unwrap! (get-check-delegation stacker) (err ERR_STACKING_PERMISSION_DENIED))) + (delegated-to (get delegated-to delegation-info)) + (delegated-amount (get amount-ustx delegation-info)) + (delegated-pox-addr (get pox-addr delegation-info)) + (delegated-until (get until-burn-ht delegation-info))) + ;; must have delegated to tx-sender + (asserts! (is-eq delegated-to tx-sender) + (err ERR_STACKING_PERMISSION_DENIED)) + ;; must have delegated enough stx + (asserts! (>= delegated-amount new-total-locked) + (err ERR_DELEGATION_TOO_MUCH_LOCKED)) + ;; if pox-addr is set, must be equal to pox-addr + (asserts! (match delegated-pox-addr + specified-pox-addr (is-eq pox-addr specified-pox-addr) + true) + (err ERR_DELEGATION_POX_ADDR_REQUIRED)) + ;; delegation must not expire before lock period + (asserts! (match delegated-until + until-burn-ht + (>= until-burn-ht unlock-height) + true) + (err ERR_DELEGATION_EXPIRES_DURING_LOCK))) + + ;; delegate stacking does minimal-can-stack-stx + (try! (minimal-can-stack-stx pox-addr new-total-locked first-increase-cycle (+ u1 (- last-increase-cycle first-increase-cycle)))) + + ;; register the PoX address with the amount stacked via partial stacking + ;; before it can be included in the reward set, this must be committed! + (add-pox-partial-stacked pox-addr first-increase-cycle cycle-count increase-by) + + ;; stacking-state is unchanged, so no need to update + + ;; return the lock-up information, so the node can actually carry out the lock. + (ok { stacker: stacker, total-locked: new-total-locked})))) + +;; As a delegator, extend an active stacking lock, issuing a "partial commitment" for the +;; extended-to cycles. +;; *New in Stacks 2.1* +;; This method extends `stacker`'s current lockup for an additional `extend-count` +;; and partially commits those new cycles to `pox-addr` +(define-public (delegate-stack-extend + (stacker principal) + (pox-addr { version: (buff 1), hashbytes: (buff 32) }) + (extend-count uint)) + (let ((stacker-info (stx-account stacker)) + ;; to extend, there must already be an entry in the stacking-state + (stacker-state (unwrap! (get-stacker-info stacker) (err ERR_STACK_EXTEND_NOT_LOCKED))) + (amount-ustx (get locked stacker-info)) + (unlock-height (get unlock-height stacker-info)) + ;; first-extend-cycle will be the cycle in which tx-sender *would have* unlocked + (first-extend-cycle (burn-height-to-reward-cycle unlock-height)) + (cur-cycle (current-pox-reward-cycle)) + ;; new first cycle should be max(cur-cycle, stacker-state.first-reward-cycle) + (cur-first-reward-cycle (get first-reward-cycle stacker-state)) + (first-reward-cycle (if (> cur-cycle cur-first-reward-cycle) cur-cycle cur-first-reward-cycle))) + + ;; must be called with positive extend-count + (asserts! (>= extend-count u1) + (err ERR_STACKING_INVALID_LOCK_PERIOD)) + + (let ((last-extend-cycle (- (+ first-extend-cycle extend-count) u1)) + (lock-period (+ u1 (- last-extend-cycle first-reward-cycle))) + (new-unlock-ht (reward-cycle-to-burn-height (+ u1 last-extend-cycle)))) + + ;; first cycle must be after the current cycle + (asserts! (> first-extend-cycle cur-cycle) (err ERR_STACKING_INVALID_LOCK_PERIOD)) + ;; lock period must be positive + (asserts! (> lock-period u0) (err ERR_STACKING_INVALID_LOCK_PERIOD)) + + ;; must be called directly by the tx-sender or by an allowed contract-caller + (asserts! (check-caller-allowed) + (err ERR_STACKING_PERMISSION_DENIED)) + + ;; stacker must not be directly stacking + (asserts! (is-eq (len (get reward-set-indexes stacker-state)) u0) + (err ERR_STACKING_NOT_DELEGATED)) + + ;; stacker must be delegated to tx-sender + (asserts! (is-eq (unwrap! (get delegated-to stacker-state) + (err ERR_STACKING_NOT_DELEGATED)) + tx-sender) + (err ERR_STACKING_PERMISSION_DENIED)) + + ;; check valid lock period + (asserts! (check-pox-lock-period lock-period) + (err ERR_STACKING_INVALID_LOCK_PERIOD)) + + ;; stacker must be currently locked + (asserts! (> amount-ustx u0) + (err ERR_STACK_EXTEND_NOT_LOCKED)) + + ;; stacker must have delegated to the caller + (let ((delegation-info (unwrap! (get-check-delegation stacker) (err ERR_STACKING_PERMISSION_DENIED)))) + ;; must have delegated to tx-sender + (asserts! (is-eq (get delegated-to delegation-info) tx-sender) + (err ERR_STACKING_PERMISSION_DENIED)) + ;; must have delegated enough stx + (asserts! (>= (get amount-ustx delegation-info) amount-ustx) + (err ERR_DELEGATION_TOO_MUCH_LOCKED)) + ;; if pox-addr is set, must be equal to pox-addr + (asserts! (match (get pox-addr delegation-info) + specified-pox-addr (is-eq pox-addr specified-pox-addr) + true) + (err ERR_DELEGATION_POX_ADDR_REQUIRED)) + ;; delegation must not expire before lock period + (asserts! (match (get until-burn-ht delegation-info) + until-burn-ht (>= until-burn-ht + new-unlock-ht) + true) + (err ERR_DELEGATION_EXPIRES_DURING_LOCK))) + + ;; delegate stacking does minimal-can-stack-stx + (try! (minimal-can-stack-stx pox-addr amount-ustx first-extend-cycle lock-period)) + + ;; register the PoX address with the amount stacked via partial stacking + ;; before it can be included in the reward set, this must be committed! + (add-pox-partial-stacked pox-addr first-extend-cycle extend-count amount-ustx) + + (map-set stacking-state + { stacker: stacker } + { pox-addr: pox-addr, + reward-set-indexes: (list), + first-reward-cycle: first-reward-cycle, + lock-period: lock-period, + delegated-to: (some tx-sender) }) + + ;; return the lock-up information, so the node can actually carry out the lock. + (ok { stacker: stacker, + unlock-burn-height: new-unlock-ht })))) + +;; Get the _current_ PoX stacking delegation information for a stacker. If the information +;; is expired, or if there's never been such a stacker, then returns none. +;; *New in Stacks 2.1* +(define-read-only (get-delegation-info (stacker principal)) + (get-check-delegation stacker) +) + +;; Get the burn height at which a particular contract is allowed to stack for a particular principal. +;; *New in Stacks 2.1* +;; Returns (some (some X)) if X is the burn height at which the allowance terminates +;; Returns (some none) if the caller is allowed indefinitely +;; Returns none if there is no allowance record +(define-read-only (get-allowance-contract-callers (sender principal) (calling-contract principal)) + (map-get? allowance-contract-callers { sender: sender, contract-caller: calling-contract }) +) + +;; How many PoX addresses in this reward cycle? +;; *New in Stacks 2.1* +(define-read-only (get-num-reward-set-pox-addresses (reward-cycle uint)) + (match (map-get? reward-cycle-pox-address-list-len { reward-cycle: reward-cycle }) + num-addrs + (get len num-addrs) + u0 + ) +) + +;; How many uSTX have been locked up for this address so far, before the delegator commits them? +;; *New in Stacks 2.1* +(define-read-only (get-partial-stacked-by-cycle (pox-addr { version: (buff 1), hashbytes: (buff 32) }) (reward-cycle uint) (sender principal)) + (map-get? partial-stacked-by-cycle { pox-addr: pox-addr, reward-cycle: reward-cycle, sender: sender }) +) + +;; How many uSTX have voted to reject PoX in a given reward cycle? +;; *New in Stacks 2.1* +(define-read-only (get-total-pox-rejection (reward-cycle uint)) + (match (map-get? stacking-rejection { reward-cycle: reward-cycle }) + rejected + (get amount rejected) + u0 + ) +) diff --git a/stackslib/src/chainstate/stacks/boot/pox_2_tests.rs b/stackslib/src/chainstate/stacks/boot/pox_2_tests.rs index 1944edfc63..51e203a014 100644 --- a/stackslib/src/chainstate/stacks/boot/pox_2_tests.rs +++ b/stackslib/src/chainstate/stacks/boot/pox_2_tests.rs @@ -480,6 +480,7 @@ pub fn check_stacker_link_invariants(peer: &mut TestPeer, tip: &StacksBlockId, c "cycle" => cycle_number, "cycle_start" => cycle_start, "pox_3_activation" => peer.config.burnchain.pox_constants.pox_3_activation_height, + "pox_4_activation" => peer.config.burnchain.pox_constants.pox_4_activation_height, "epoch_2_4_start" => cycle_start_epoch.start_height, ); return; @@ -528,6 +529,15 @@ pub fn check_stacker_link_invariants(peer: &mut TestPeer, tip: &StacksBlockId, c continue; } + if tip_epoch.epoch_id >= StacksEpochId::Epoch25 + && current_burn_height + <= peer.config.burnchain.pox_constants.pox_4_activation_height + { + // if the tip is epoch-2.5, and pox-5 isn't the active pox contract yet, + // the invariant checks will not make sense for the same reasons as above + continue; + } + let StackingStateCheckData { pox_addr, cycle_indexes, @@ -1238,6 +1248,7 @@ fn test_simple_pox_2_auto_unlock(alice_first: bool) { height_target + 1, burnchain.pox_constants.v1_unlock_height, burnchain.pox_constants.v2_unlock_height, + burnchain.pox_constants.v3_unlock_height, ); assert_eq!(bob_bal.amount_locked(), POX_THRESHOLD_STEPS_USTX); @@ -1267,6 +1278,7 @@ fn test_simple_pox_2_auto_unlock(alice_first: bool) { height_target + 1, burnchain.pox_constants.v1_unlock_height, burnchain.pox_constants.v2_unlock_height, + burnchain.pox_constants.v3_unlock_height, ); assert_eq!(bob_bal.amount_locked(), 0); @@ -1280,6 +1292,7 @@ fn test_simple_pox_2_auto_unlock(alice_first: bool) { height_target + 1, burnchain.pox_constants.v1_unlock_height, burnchain.pox_constants.v2_unlock_height, + burnchain.pox_constants.v3_unlock_height, ); assert_eq!(bob_bal.amount_locked(), 0); diff --git a/stackslib/src/chainstate/stacks/boot/pox_3_tests.rs b/stackslib/src/chainstate/stacks/boot/pox_3_tests.rs index 692e2f754f..a8ef5f1bad 100644 --- a/stackslib/src/chainstate/stacks/boot/pox_3_tests.rs +++ b/stackslib/src/chainstate/stacks/boot/pox_3_tests.rs @@ -147,7 +147,9 @@ fn make_test_epochs_pox() -> (Vec, PoxConstants) { pox_constants.anchor_threshold = 1; pox_constants.v1_unlock_height = (EMPTY_SORTITIONS + EPOCH_2_1_HEIGHT + 1) as u32; pox_constants.v2_unlock_height = (EMPTY_SORTITIONS + EPOCH_2_2_HEIGHT + 1) as u32; + pox_constants.v3_unlock_height = u32::MAX; pox_constants.pox_3_activation_height = (EMPTY_SORTITIONS + EPOCH_2_4_HEIGHT + 1) as u32; + pox_constants.pox_4_activation_height = u32::MAX; (epochs, pox_constants) } diff --git a/stackslib/src/chainstate/stacks/db/blocks.rs b/stackslib/src/chainstate/stacks/db/blocks.rs index abd565ebeb..c430e8f644 100644 --- a/stackslib/src/chainstate/stacks/db/blocks.rs +++ b/stackslib/src/chainstate/stacks/db/blocks.rs @@ -60,11 +60,13 @@ use crate::chainstate::burn::db::sortdb::*; use crate::chainstate::burn::operations::*; use crate::chainstate::burn::BlockSnapshot; use crate::chainstate::coordinator::BlockEventDispatcher; +use crate::chainstate::nakamoto::NakamotoChainState; use crate::chainstate::stacks::address::PoxAddress; use crate::chainstate::stacks::address::StacksAddressExtensions; use crate::chainstate::stacks::db::accounts::MinerReward; use crate::chainstate::stacks::db::transactions::TransactionNonceMismatch; use crate::chainstate::stacks::db::*; +use crate::chainstate::stacks::events::StacksBlockEventData; use crate::chainstate::stacks::index::MarfTrieId; use crate::chainstate::stacks::Error; use crate::chainstate::stacks::StacksBlockHeader; @@ -194,7 +196,7 @@ pub struct DummyEventDispatcher; impl BlockEventDispatcher for DummyEventDispatcher { fn announce_block( &self, - _block: &StacksBlock, + _block: &StacksBlockEventData, _metadata: &StacksHeaderInfo, _receipts: &[StacksTransactionReceipt], _parent: &StacksBlockId, @@ -589,7 +591,7 @@ impl StacksChainState { } })?; - let mut bound_reader = BoundReader::from_reader(&mut fd, MAX_MESSAGE_LEN as u64); + let mut bound_reader = BoundReader::from_reader(&mut fd, u64::from(MAX_MESSAGE_LEN)); let inst = T::consensus_deserialize(&mut bound_reader).map_err(Error::CodecError)?; Ok(inst) } @@ -827,7 +829,7 @@ impl StacksChainState { debug!("Zero-sized block {}", block_hash); return Ok(None); } - if sz > MAX_MESSAGE_LEN as u64 { + if sz > u64::from(MAX_MESSAGE_LEN) { debug!("Invalid block {}: too big", block_hash); return Ok(None); } @@ -1582,8 +1584,8 @@ impl StacksChainState { block.block_hash(), parent_consensus_hash ); - assert!(commit_burn < i64::MAX as u64); - assert!(sortition_burn < i64::MAX as u64); + assert!(commit_burn < u64::try_from(i64::MAX).expect("unreachable")); + assert!(sortition_burn < u64::try_from(i64::MAX).expect("unreachable")); let block_hash = block.block_hash(); let index_block_hash = @@ -1753,7 +1755,7 @@ impl StacksChainState { burn_supports: &[UserBurnSupportOp], ) -> Result<(), Error> { for burn_support in burn_supports.iter() { - assert!(burn_support.burn_fee < i64::MAX as u64); + assert!(burn_support.burn_fee < u64::try_from(i64::MAX).expect("unreachable")); } for burn_support in burn_supports.iter() { @@ -2059,7 +2061,8 @@ impl StacksChainState { ); Ok(BlocksInvData { - bitlen: block_bits.len() as u16, + bitlen: u16::try_from(block_bits.len()) + .expect("FATAL: unreachable: more than 2^16 block bits"), block_bitvec: block_bitvec, microblocks_bitvec: microblocks_bitvec, }) @@ -2091,7 +2094,10 @@ impl StacksChainState { .query_row(sql, args, |row| { let start_height_i64: i64 = row.get_unwrap(0); let end_height_i64: i64 = row.get_unwrap(1); - return Ok((start_height_i64 as u64, end_height_i64 as u64)); + return Ok(( + u64::try_from(start_height_i64).expect("FATAL: height exceeds i64::MAX"), + u64::try_from(end_height_i64).expect("FATAL: height exceeds i64::MAX"), + )); }) .optional()? .ok_or_else(|| Error::DBError(db_error::NotFoundError)) @@ -2181,7 +2187,8 @@ impl StacksChainState { ); Ok(BlocksInvData { - bitlen: block_bits.len() as u16, + bitlen: u16::try_from(block_bits.len()) + .expect("FATAL: block bits has more than 2^16 members"), block_bitvec: block_bitvec, microblocks_bitvec: microblocks_bitvec, }) @@ -3722,8 +3729,8 @@ impl StacksChainState { // key of the winning leader let leader_key = db_handle .get_leader_key_at( - block_commit.key_block_ptr as u64, - block_commit.key_vtxindex as u32, + u64::from(block_commit.key_block_ptr), + u32::from(block_commit.key_vtxindex), )? .expect("FATAL: have block commit but no leader key"); @@ -4147,7 +4154,7 @@ impl StacksChainState { 125 }; - stx_reward * (MICROSTACKS_PER_STACKS as u128) + stx_reward * (u128::from(MICROSTACKS_PER_STACKS)) } /// Create the block reward. @@ -4175,7 +4182,7 @@ impl StacksChainState { let recipient = if epoch_id >= StacksEpochId::Epoch21 { // pay to tx-designated recipient, or if there is none, pay to the origin match coinbase_tx.try_as_coinbase() { - Some((_, recipient_opt)) => recipient_opt + Some((_, recipient_opt, _)) => recipient_opt .cloned() .unwrap_or(miner_addr.to_account_principal()), None => miner_addr.to_account_principal(), @@ -4301,7 +4308,7 @@ impl StacksChainState { &[&u64_to_sql(min_arrival_time)?, &u64_to_sql(limit)?], ) .map_err(Error::DBError)?; - Ok(cnt as u64) + Ok(u64::try_from(cnt).expect("more than i64::MAX rows")) } /// How many processed staging blocks do we have, up to a limit, at or after the given @@ -4318,7 +4325,7 @@ impl StacksChainState { &[&u64_to_sql(min_arrival_time)?, &u64_to_sql(limit)?], ) .map_err(Error::DBError)?; - Ok(cnt as u64) + Ok(u64::try_from(cnt).expect("more than i64::MAX rows")) } /// Measure how long a block waited in-between when it arrived and when it got processed. @@ -4411,7 +4418,7 @@ impl StacksChainState { &candidate.anchored_block_hash, &candidate.parent_consensus_hash, &candidate.parent_anchored_block_hash, - if candidate.parent_microblock_hash != BlockHeaderHash([0u8; 32]) { (candidate.parent_microblock_seq as u32) + 1 } else { 0 }, + if candidate.parent_microblock_hash != BlockHeaderHash([0u8; 32]) { u32::from(candidate.parent_microblock_seq) + 1 } else { 0 }, &candidate.parent_microblock_hash ); @@ -4543,10 +4550,10 @@ impl StacksChainState { .map_err(|e| (e, microblock.block_hash()))?; tx_receipt.microblock_header = Some(microblock.header.clone()); - tx_receipt.tx_index = tx_index as u32; - fees = fees.checked_add(tx_fee as u128).expect("Fee overflow"); + tx_receipt.tx_index = u32::try_from(tx_index).expect("more than 2^32 items"); + fees = fees.checked_add(u128::from(tx_fee)).expect("Fee overflow"); burns = burns - .checked_add(tx_receipt.stx_burned as u128) + .checked_add(u128::from(tx_receipt.stx_burned)) .expect("Burns overflow"); receipts.push(tx_receipt); } @@ -4607,6 +4614,10 @@ impl StacksChainState { current_epoch = StacksEpochId::Epoch24; } StacksEpochId::Epoch24 => { + receipts.append(&mut clarity_tx.block.initialize_epoch_2_5()?); + current_epoch = StacksEpochId::Epoch25; + } + StacksEpochId::Epoch25 => { receipts.append(&mut clarity_tx.block.initialize_epoch_3_0()?); current_epoch = StacksEpochId::Epoch30; } @@ -4895,10 +4906,10 @@ impl StacksChainState { for tx in block_txs.iter() { let (tx_fee, mut tx_receipt) = StacksChainState::process_transaction(clarity_tx, tx, false, ast_rules)?; - fees = fees.checked_add(tx_fee as u128).expect("Fee overflow"); + fees = fees.checked_add(u128::from(tx_fee)).expect("Fee overflow"); tx_receipt.tx_index = tx_index; burns = burns - .checked_add(tx_receipt.stx_burned as u128) + .checked_add(u128::from(tx_receipt.stx_burned)) .expect("Burns overflow"); receipts.push(tx_receipt); tx_index += 1; @@ -5020,7 +5031,11 @@ impl StacksChainState { .to_owned() .expect_principal(); total_minted += amount; - StacksChainState::account_credit(tx_connection, &recipient, amount as u64); + StacksChainState::account_credit( + tx_connection, + &recipient, + u64::try_from(amount).expect("FATAL: transferred more STX than exist"), + ); let event = STXEventType::STXMintEvent(STXMintEventData { recipient, amount }); events.push(StacksTransactionEvent::STXEvent(event)); } @@ -5086,7 +5101,7 @@ impl StacksChainState { ) -> Result<(Vec, Vec, Vec), Error> { // only consider transactions in Stacks 2.1 let search_window: u8 = - if epoch_start_height + (BURNCHAIN_TX_SEARCH_WINDOW as u64) > burn_tip_height { + if epoch_start_height + u64::from(BURNCHAIN_TX_SEARCH_WINDOW) > burn_tip_height { burn_tip_height .saturating_sub(epoch_start_height) .try_into() @@ -5205,7 +5220,9 @@ impl StacksChainState { | StacksEpochId::Epoch22 | StacksEpochId::Epoch23 | StacksEpochId::Epoch24 + | StacksEpochId::Epoch25 | StacksEpochId::Epoch30 => { + // TODO: sbtc ops in epoch 3.0 StacksChainState::get_stacking_and_transfer_and_delegate_burn_ops_v210( chainstate_tx, parent_index_hash, @@ -5290,8 +5307,13 @@ impl StacksChainState { pox_start_cycle_info, ) } - StacksEpochId::Epoch24 | StacksEpochId::Epoch30 => { - Self::handle_pox_cycle_start_pox_3( + StacksEpochId::Epoch24 => Self::handle_pox_cycle_start_pox_3( + clarity_tx, + pox_reward_cycle, + pox_start_cycle_info, + ), + StacksEpochId::Epoch25 | StacksEpochId::Epoch30 => { + Self::handle_pox_cycle_start_pox_4( clarity_tx, pox_reward_cycle, pox_start_cycle_info, @@ -5508,7 +5530,7 @@ impl StacksChainState { vec![] }; - let active_pox_contract = pox_constants.active_pox_contract(burn_tip_height as u64); + let active_pox_contract = pox_constants.active_pox_contract(u64::from(burn_tip_height)); // process stacking & transfer operations from burnchain ops tx_receipts.extend(StacksChainState::process_stacking_ops( @@ -5803,7 +5825,7 @@ impl StacksChainState { )? { Some(sn) => ( sn.burn_header_hash, - sn.block_height as u32, + u32::try_from(sn.block_height).expect("FATAL: block height overflow"), sn.burn_header_timestamp, ), None => { @@ -5865,7 +5887,8 @@ impl StacksChainState { match StacksChainState::process_block_transactions( &mut clarity_tx, &block.txs, - microblock_txs_receipts.len() as u32, + u32::try_from(microblock_txs_receipts.len()) + .expect("more than 2^32 tx receipts"), ast_rules, ) { Err(e) => { @@ -5910,7 +5933,7 @@ impl StacksChainState { let mut lockup_events = match StacksChainState::finish_block( &mut clarity_tx, miner_payouts_opt.as_ref(), - block.header.total_work.work as u32, + u32::try_from(block.header.total_work.work).expect("FATAL: more than 2^32 blocks"), block.header.microblock_pubkey_hash, ) { Err(Error::InvalidStacksBlock(e)) => { @@ -5978,7 +6001,7 @@ impl StacksChainState { .accumulated_coinbase_ustx; let coinbase_at_block = StacksChainState::get_coinbase_reward( - chain_tip_burn_header_height as u64, + u64::from(chain_tip_burn_header_height), burn_dbconn.context.first_block_height, ); @@ -6058,7 +6081,9 @@ impl StacksChainState { chainstate_tx.log_transactions_processed(&new_tip.index_block_hash(), &tx_receipts); - set_last_block_transaction_count(block.txs.len() as u64); + set_last_block_transaction_count( + u64::try_from(block.txs.len()).expect("more than 2^64 txs"), + ); set_last_execution_cost_observed(&block_execution_cost, &block_limit); let epoch_receipt = StacksEpochReceipt { @@ -6255,7 +6280,7 @@ impl StacksChainState { )? { Some(sn) => ( sn.burn_header_hash, - sn.block_height as u32, + u32::try_from(sn.block_height).expect("FATAL: more than 2^32 blocks"), sn.burn_header_timestamp, sn.winning_block_txid, ), @@ -6285,7 +6310,8 @@ impl StacksChainState { }; let block = StacksChainState::extract_stacks_block(&next_staging_block)?; - let block_size = next_staging_block.block_data.len() as u64; + let block_size = u64::try_from(next_staging_block.block_data.len()) + .expect("FATAL: more than 2^64 transactions"); // sanity check -- don't process this block again if we already did so if StacksChainState::has_stacks_block( @@ -6529,7 +6555,7 @@ impl StacksChainState { &next_staging_block.parent_anchored_block_hash, ); dispatcher.announce_block( - &block, + &block.into(), &epoch_receipt.header.clone(), &epoch_receipt.tx_receipts, &parent_id, @@ -6677,21 +6703,6 @@ impl StacksChainState { } } - /// Get the highest processed block on the canonical burn chain. - /// Break ties on lexigraphical ordering of the block hash - /// (i.e. arbitrarily). The staging block will be returned, but no block data will be filled - /// in. - pub fn get_stacks_chain_tip( - &self, - sortdb: &SortitionDB, - ) -> Result, Error> { - let (consensus_hash, block_bhh) = - SortitionDB::get_canonical_stacks_chain_tip_hash(sortdb.conn())?; - let sql = "SELECT * FROM staging_blocks WHERE processed = 1 AND orphaned = 0 AND consensus_hash = ?1 AND anchored_block_hash = ?2"; - let args: &[&dyn ToSql] = &[&consensus_hash, &block_bhh]; - query_row(&self.db(), sql, args).map_err(Error::DBError) - } - /// Get the parent block of `staging_block`. pub fn get_stacks_block_parent( &self, @@ -6912,21 +6923,23 @@ impl StacksChainState { return Err(MemPoolRejection::BadAddressVersionByte); } - let (block_height, v1_unlock_height, v2_unlock_height) = clarity_connection - .with_clarity_db_readonly(|ref mut db| { + let (block_height, v1_unlock_height, v2_unlock_height, v3_unlock_height) = + clarity_connection.with_clarity_db_readonly(|ref mut db| { ( - db.get_current_burnchain_block_height() as u64, + u64::from(db.get_current_burnchain_block_height()), db.get_v1_unlock_height(), db.get_v2_unlock_height(), + db.get_v3_unlock_height(), ) }); // 5: the paying account must have enough funds if !payer.stx_balance.can_transfer_at_burn_block( - fee as u128, + u128::from(fee), block_height, v1_unlock_height, v2_unlock_height, + v3_unlock_height, ) { match &tx.payload { TransactionPayload::TokenTransfer(..) => { @@ -6934,11 +6947,12 @@ impl StacksChainState { } _ => { return Err(MemPoolRejection::NotEnoughFunds( - fee as u128, + u128::from(fee), payer.stx_balance.get_available_balance_at_burn_block( block_height, v1_unlock_height, v2_unlock_height, + v3_unlock_height, ), )); } @@ -6957,12 +6971,14 @@ impl StacksChainState { } // does the owner have the funds for the token transfer? - let total_spent = (*amount as u128) + if origin == payer { fee as u128 } else { 0 }; + let total_spent = + u128::from(*amount) + if origin == payer { u128::from(fee) } else { 0 }; if !origin.stx_balance.can_transfer_at_burn_block( total_spent, block_height, v1_unlock_height, v2_unlock_height, + v3_unlock_height, ) { return Err(MemPoolRejection::NotEnoughFunds( total_spent, @@ -6970,6 +6986,7 @@ impl StacksChainState { block_height, v1_unlock_height, v2_unlock_height, + v3_unlock_height, ), )); } @@ -6977,17 +6994,19 @@ impl StacksChainState { // if the payer for the tx is different from owner, check if they can afford fee if origin != payer { if !payer.stx_balance.can_transfer_at_burn_block( - fee as u128, + u128::from(fee), block_height, v1_unlock_height, v2_unlock_height, + v3_unlock_height, ) { return Err(MemPoolRejection::NotEnoughFunds( - fee as u128, + u128::from(fee), payer.stx_balance.get_available_balance_at_burn_block( block_height, v1_unlock_height, v2_unlock_height, + v3_unlock_height, ), )); } @@ -7134,7 +7153,7 @@ pub mod test { let mut tx_coinbase = StacksTransaction::new( TransactionVersion::Testnet, auth, - TransactionPayload::Coinbase(CoinbasePayload([0u8; 32]), None), + TransactionPayload::Coinbase(CoinbasePayload([0u8; 32]), None, None), ); tx_coinbase.anchor_mode = TransactionAnchorMode::OnChainOnly; let mut tx_signer = StacksTransactionSigner::new(&tx_coinbase); @@ -7199,7 +7218,7 @@ pub mod test { let mut tx_coinbase = StacksTransaction::new( TransactionVersion::Testnet, auth.clone(), - TransactionPayload::Coinbase(CoinbasePayload([0u8; 32]), None), + TransactionPayload::Coinbase(CoinbasePayload([0u8; 32]), None, None), ); tx_coinbase.anchor_mode = TransactionAnchorMode::OnChainOnly; let mut tx_signer = StacksTransactionSigner::new(&tx_coinbase); @@ -11415,12 +11434,11 @@ pub mod test { let sortdb = peer.sortdb.take().unwrap(); // definitely missing some blocks -- there are empty sortitions - let stacks_tip = peer - .chainstate() - .get_stacks_chain_tip(&sortdb) - .unwrap() - .unwrap(); - assert_eq!(stacks_tip.height, 8); + let stacks_tip = + NakamotoChainState::get_canonical_block_header(peer.chainstate().db(), &sortdb) + .unwrap() + .unwrap(); + assert_eq!(stacks_tip.anchored_header.height(), 8); // but we did process all burnchain operations let (consensus_hash, block_bhh) = @@ -12086,12 +12104,11 @@ pub mod test { let sortdb = peer.sortdb.take().unwrap(); // definitely missing some blocks -- there are empty sortitions - let stacks_tip = peer - .chainstate() - .get_stacks_chain_tip(&sortdb) - .unwrap() - .unwrap(); - assert_eq!(stacks_tip.height, 13); + let stacks_tip = + NakamotoChainState::get_canonical_block_header(peer.chainstate().db(), &sortdb) + .unwrap() + .unwrap(); + assert_eq!(stacks_tip.anchored_header.height(), 13); // but we did process all burnchain operations let (consensus_hash, block_bhh) = diff --git a/stackslib/src/chainstate/stacks/db/headers.rs b/stackslib/src/chainstate/stacks/db/headers.rs index d8cc18e120..ebde32d91a 100644 --- a/stackslib/src/chainstate/stacks/db/headers.rs +++ b/stackslib/src/chainstate/stacks/db/headers.rs @@ -249,6 +249,21 @@ impl StacksChainState { .map_err(Error::DBError) } + /// Get a stacks header info by its sortition's consensus hash. + /// Because the consensus hash mixes in the burnchain header hash and the PoX bit vector, + /// it's guaranteed to be unique across all burnchain forks and all PoX forks, and thus all + /// Stacks forks. + pub fn get_stacks_block_header_info_by_consensus_hash( + conn: &Connection, + consensus_hash: &ConsensusHash, + ) -> Result, Error> { + let sql = "SELECT * FROM block_headers WHERE consensus_hash = ?1"; + query_row_panic(conn, sql, &[&consensus_hash], || { + "FATAL: multiple rows for the same consensus hash".to_string() + }) + .map_err(Error::DBError) + } + /// Get an ancestor block header pub fn get_tip_ancestor( tx: &mut StacksDBTx, diff --git a/stackslib/src/chainstate/stacks/db/mod.rs b/stackslib/src/chainstate/stacks/db/mod.rs index 3a90a269e9..ccd7e86e85 100644 --- a/stackslib/src/chainstate/stacks/db/mod.rs +++ b/stackslib/src/chainstate/stacks/db/mod.rs @@ -57,7 +57,8 @@ use crate::chainstate::burn::operations::{DelegateStxOp, StackStxOp, TransferStx use crate::chainstate::burn::ConsensusHash; use crate::chainstate::burn::ConsensusHashExtensions; use crate::chainstate::nakamoto::{ - HeaderTypeNames, NakamotoBlock, NakamotoBlockHeader, NAKAMOTO_CHAINSTATE_SCHEMA_1, + HeaderTypeNames, NakamotoBlock, NakamotoBlockHeader, NakamotoChainState, + NAKAMOTO_CHAINSTATE_SCHEMA_1, }; use crate::chainstate::stacks::address::StacksAddressExtensions; use crate::chainstate::stacks::boot::*; @@ -188,14 +189,23 @@ impl From for StacksBlockHeaderTypes { #[derive(Debug, Clone, PartialEq)] pub struct StacksHeaderInfo { + /// Stacks block header pub anchored_header: StacksBlockHeaderTypes, + /// Last microblock header (Stacks 2.x only; this is None in Stacks 3.x) pub microblock_tail: Option, + /// Height of this Stacks block pub stacks_block_height: u64, + /// MARF root hash of the headers DB (not consensus critical) pub index_root: TrieHash, + /// consensus hash of the burnchain block in which this miner was selected to produce this block pub consensus_hash: ConsensusHash, + /// Hash of the burnchain block in which this miner was selected to produce this block pub burn_header_hash: BurnchainHeaderHash, + /// Height of the burnchain block pub burn_header_height: u32, + /// Timestamp of the burnchain block pub burn_header_timestamp: u64, + /// Size of the block corresponding to `anchored_header` in bytes pub anchored_block_size: u64, } @@ -250,6 +260,7 @@ impl DBConfig { StacksEpochId::Epoch22 => self.version == "3" || self.version == "4", StacksEpochId::Epoch23 => self.version == "3" || self.version == "4", StacksEpochId::Epoch24 => self.version == "3" || self.version == "4", + StacksEpochId::Epoch25 => self.version == "3" || self.version == "4", StacksEpochId::Epoch30 => self.version == "3" || self.version == "4", } } @@ -264,13 +275,9 @@ impl StacksBlockHeaderTypes { } pub fn is_first_mined(&self) -> bool { - StacksBlockHeader::is_first_block_hash(self.parent()) - } - - pub fn parent(&self) -> &BlockHeaderHash { match self { - StacksBlockHeaderTypes::Epoch2(x) => &x.parent_block, - StacksBlockHeaderTypes::Nakamoto(x) => &x.parent, + StacksBlockHeaderTypes::Epoch2(x) => x.is_first_mined(), + StacksBlockHeaderTypes::Nakamoto(x) => x.is_first_mined(), } } @@ -287,6 +294,13 @@ impl StacksBlockHeaderTypes { _ => None, } } + + pub fn as_stacks_nakamoto(&self) -> Option<&NakamotoBlockHeader> { + match &self { + StacksBlockHeaderTypes::Nakamoto(ref x) => Some(x), + _ => None, + } + } } impl StacksHeaderInfo { @@ -830,6 +844,7 @@ const CHAINSTATE_INDEXES: &'static [&'static str] = &[ "CREATE INDEX IF NOT EXISTS index_block_hash_tx_index ON transactions(index_block_hash);", "CREATE INDEX IF NOT EXISTS index_block_header_by_affirmation_weight ON block_headers(affirmation_weight);", "CREATE INDEX IF NOT EXISTS index_block_header_by_height_and_affirmation_weight ON block_headers(block_height,affirmation_weight);", + "CREATE INDEX IF NOT EXISTS index_headers_by_consensus_hash ON block_headers(consensus_hash);", ]; pub use stacks_common::consts::MINER_REWARD_MATURITY; @@ -1963,9 +1978,9 @@ impl StacksChainState { where F: FnOnce(&mut ClarityReadOnlyConnection) -> R, { - match StacksChainState::has_stacks_block(self.db(), parent_tip) { - Ok(true) => {} - Ok(false) => { + match NakamotoChainState::get_block_header(self.db(), parent_tip) { + Ok(Some(_)) => {} + Ok(None) => { return None; } Err(e) => { diff --git a/stackslib/src/chainstate/stacks/db/transactions.rs b/stackslib/src/chainstate/stacks/db/transactions.rs index d3c0764754..8dcc94705b 100644 --- a/stackslib/src/chainstate/stacks/db/transactions.rs +++ b/stackslib/src/chainstate/stacks/db/transactions.rs @@ -468,24 +468,26 @@ impl StacksChainState { fee: u64, payer_account: StacksAccount, ) -> Result { - let (cur_burn_block_height, v1_unlock_ht, v2_unlock_ht) = clarity_tx + let (cur_burn_block_height, v1_unlock_ht, v2_unlock_ht, v3_unlock_ht) = clarity_tx .with_clarity_db_readonly(|ref mut db| { ( db.get_current_burnchain_block_height(), db.get_v1_unlock_height(), db.get_v2_unlock_height(), + db.get_v3_unlock_height(), ) }); let consolidated_balance = payer_account .stx_balance .get_available_balance_at_burn_block( - cur_burn_block_height as u64, + u64::from(cur_burn_block_height), v1_unlock_ht, v2_unlock_ht, + v3_unlock_ht, ); - if consolidated_balance < fee as u128 { + if consolidated_balance < u128::from(fee) { return Err(Error::InvalidFee); } @@ -569,7 +571,7 @@ impl StacksChainState { .checked_add(amount_burned) .expect("FATAL: sent waaaaay too much STX"); - if !condition_code.check(*amount_sent_condition as u128, amount_sent) { + if !condition_code.check(u128::from(*amount_sent_condition), amount_sent) { info!( "Post-condition check failure on STX owned by {}: {:?} {:?} {}", account_principal, amount_sent_condition, condition_code, amount_sent @@ -615,7 +617,7 @@ impl StacksChainState { let amount_sent = asset_map .get_fungible_tokens(&account_principal, &asset_id) .unwrap_or(0); - if !condition_code.check(*amount_sent_condition as u128, amount_sent) { + if !condition_code.check(u128::from(*amount_sent_condition), amount_sent) { info!("Post-condition check failure on fungible asset {} owned by {}: {} {:?} {}", &asset_id, account_principal, amount_sent_condition, condition_code, amount_sent); return false; } @@ -828,7 +830,9 @@ impl StacksChainState { } Some(height) => { if height - .checked_add(MINER_REWARD_MATURITY as u32) + .checked_add( + u32::try_from(MINER_REWARD_MATURITY).expect("FATAL: maturity > 2^32"), + ) .expect("BUG: too many blocks") < current_height { @@ -850,7 +854,7 @@ impl StacksChainState { .get_microblock_poison_report(mblock_pubk_height) { // account for report loaded - env.add_memory(TypeSignature::PrincipalType.size() as u64) + env.add_memory(u64::from(TypeSignature::PrincipalType.size())) .map_err(|e| Error::from_cost_error(e, cost_before.clone(), &env.global_context))?; // u128 sequence @@ -900,7 +904,7 @@ impl StacksChainState { let tuple_data = TupleData::from_data(vec![ ( ClarityName::try_from("block_height").expect("BUG: valid string representation"), - Value::UInt(mblock_pubk_height as u128), + Value::UInt(u128::from(mblock_pubk_height)), ), ( ClarityName::try_from("microblock_pubkey_hash") @@ -913,7 +917,7 @@ impl StacksChainState { ), ( ClarityName::try_from("sequence").expect("BUG: valid string representation"), - Value::UInt(reported_seq as u128), + Value::UInt(u128::from(reported_seq)), ), ]) .expect("BUG: valid tuple representation"); @@ -956,7 +960,7 @@ impl StacksChainState { .run_stx_transfer( &origin_account.principal, addr, - *amount as u128, + u128::from(*amount), &BuffData { data: Vec::from(memo.0.clone()), }, @@ -8103,7 +8107,7 @@ pub mod test { assert_eq!( StacksChainState::get_account(&mut conn, &addr.into()) .stx_balance - .get_available_balance_at_burn_block(0, 0, 0), + .get_available_balance_at_burn_block(0, 0, 0, 0), (1000000000 - fee) as u128 ); @@ -8544,9 +8548,15 @@ pub mod test { fn get_v2_unlock_height(&self) -> u32 { u32::MAX } + fn get_v3_unlock_height(&self) -> u32 { + u32::MAX + } fn get_pox_3_activation_height(&self) -> u32 { u32::MAX } + fn get_pox_4_activation_height(&self) -> u32 { + u32::MAX + } fn get_burn_block_height(&self, sortition_id: &SortitionId) -> Option { Some(sortition_id.0[0] as u32) } @@ -8611,7 +8621,8 @@ pub mod test { StacksEpochId::Epoch22 => self.get_stacks_epoch(3), StacksEpochId::Epoch23 => self.get_stacks_epoch(4), StacksEpochId::Epoch24 => self.get_stacks_epoch(5), - StacksEpochId::Epoch30 => self.get_stacks_epoch(6), + StacksEpochId::Epoch25 => self.get_stacks_epoch(6), + StacksEpochId::Epoch30 => self.get_stacks_epoch(7), } } fn get_pox_payout_addrs( @@ -8759,9 +8770,15 @@ pub mod test { fn get_v2_unlock_height(&self) -> u32 { u32::MAX } + fn get_v3_unlock_height(&self) -> u32 { + u32::MAX + } fn get_pox_3_activation_height(&self) -> u32 { u32::MAX } + fn get_pox_4_activation_height(&self) -> u32 { + u32::MAX + } fn get_burn_block_height(&self, sortition_id: &SortitionId) -> Option { Some(sortition_id.0[0] as u32) } diff --git a/stackslib/src/chainstate/stacks/events.rs b/stackslib/src/chainstate/stacks/events.rs index 8b138c4b37..77358da4b2 100644 --- a/stackslib/src/chainstate/stacks/events.rs +++ b/stackslib/src/chainstate/stacks/events.rs @@ -1,5 +1,7 @@ use crate::burnchains::Txid; use crate::chainstate::burn::operations::BlockstackOperationType; +use crate::chainstate::nakamoto::NakamotoBlock; +use crate::chainstate::stacks::StacksBlock; use crate::chainstate::stacks::StacksMicroblockHeader; use crate::chainstate::stacks::StacksTransaction; use clarity::vm::analysis::ContractAnalysis; @@ -9,6 +11,7 @@ use clarity::vm::types::{ AssetIdentifier, PrincipalData, QualifiedContractIdentifier, StandardPrincipalData, Value, }; use stacks_common::codec::StacksMessageCodec; +use stacks_common::types::chainstate::BlockHeaderHash; use stacks_common::types::chainstate::StacksAddress; use stacks_common::util::hash::to_hex; @@ -55,3 +58,33 @@ pub struct StacksTransactionReceipt { /// This is really a string-formatted CheckError (which can't be clone()'ed) pub vm_error: Option, } + +#[derive(Clone)] +pub struct StacksBlockEventData { + pub block_hash: BlockHeaderHash, + pub parent_block_hash: BlockHeaderHash, + pub parent_microblock_hash: BlockHeaderHash, + pub parent_microblock_sequence: u16, +} + +impl From for StacksBlockEventData { + fn from(block: StacksBlock) -> StacksBlockEventData { + StacksBlockEventData { + block_hash: block.block_hash(), + parent_block_hash: block.header.parent_block, + parent_microblock_hash: block.header.parent_microblock, + parent_microblock_sequence: block.header.parent_microblock_sequence, + } + } +} + +impl From<(NakamotoBlock, BlockHeaderHash)> for StacksBlockEventData { + fn from(block: (NakamotoBlock, BlockHeaderHash)) -> StacksBlockEventData { + StacksBlockEventData { + block_hash: block.0.header.block_hash(), + parent_block_hash: block.1, + parent_microblock_hash: BlockHeaderHash([0u8; 32]), + parent_microblock_sequence: 0, + } + } +} diff --git a/stackslib/src/chainstate/stacks/index/storage.rs b/stackslib/src/chainstate/stacks/index/storage.rs index 78330b78f8..01da9b9804 100644 --- a/stackslib/src/chainstate/stacks/index/storage.rs +++ b/stackslib/src/chainstate/stacks/index/storage.rs @@ -873,7 +873,7 @@ impl TrieRAM { for j in 0..node_data.len() { let next_node = &mut self.data[node_data[j] as usize].0; if !next_node.is_leaf() { - let mut ptrs = next_node.ptrs_mut(); + let ptrs = next_node.ptrs_mut(); let num_children = ptrs.len(); for k in 0..num_children { if ptrs[k].id != TrieNodeID::Empty as u8 && !is_backptr(ptrs[k].id) { diff --git a/stackslib/src/chainstate/stacks/miner.rs b/stackslib/src/chainstate/stacks/miner.rs index d6c3ab8bfa..a3151e171f 100644 --- a/stackslib/src/chainstate/stacks/miner.rs +++ b/stackslib/src/chainstate/stacks/miner.rs @@ -173,6 +173,7 @@ pub fn set_mining_spend_amount(miner_status: Arc>, amt: u64) .set_spend_amount(amt); } +/// Policy settings for how mining will proceed #[derive(Debug, Clone)] pub struct BlockBuilderSettings { pub max_miner_time_ms: u64, @@ -214,7 +215,7 @@ struct MicroblockMinerRuntime { /// The value of `BlockLimitFunction` holds the state of the size of the block being built. /// As the value increases, the less we can add to blocks. #[derive(PartialEq)] -enum BlockLimitFunction { +pub enum BlockLimitFunction { /// The block size limit has not been hit, and there are no restrictions on what can be added to /// a block. NO_LIMIT_HIT, @@ -608,6 +609,43 @@ impl TransactionResult { } } +/// Trait that defines what it means to be a block builder +pub trait BlockBuilder { + fn try_mine_tx_with_len( + &mut self, + clarity_tx: &mut ClarityTx, + tx: &StacksTransaction, + tx_len: u64, + limit_behavior: &BlockLimitFunction, + ast_rules: ASTRules, + ) -> TransactionResult; + + /// Append a transaction if doing so won't exceed the epoch data size. + /// Errors out if we fail to mine the tx (exceed budget, or the transaction is invalid). + fn try_mine_tx( + &mut self, + clarity_tx: &mut ClarityTx, + tx: &StacksTransaction, + ast_rules: ASTRules, + ) -> Result { + let tx_len = tx.tx_len(); + match self.try_mine_tx_with_len( + clarity_tx, + tx, + tx_len, + &BlockLimitFunction::NO_LIMIT_HIT, + ast_rules, + ) { + TransactionResult::Success(s) => Ok(TransactionResult::Success(s)), + TransactionResult::Skipped(TransactionSkipped { error, .. }) + | TransactionResult::ProcessingError(TransactionError { error, .. }) => Err(error), + TransactionResult::Problematic(TransactionProblematic { tx, .. }) => { + Err(Error::ProblematicTransaction(tx.txid())) + } + } + } +} + /// /// Independent structure for building microblocks: /// StacksBlockBuilder cannot be used, since microblocks should only be broadcasted @@ -1142,7 +1180,7 @@ impl<'a> StacksMicroblockBuilder<'a> { let mut num_txs = self.runtime.num_mined; let mut num_selected = 0; let mut tx_events = Vec::new(); - let deadline = get_epoch_time_ms() + (self.settings.max_miner_time_ms as u128); + let deadline = get_epoch_time_ms() + u128::from(self.settings.max_miner_time_ms); let mut block_limit_hit = BlockLimitFunction::NO_LIMIT_HIT; mem_pool.reset_nonce_cache()?; @@ -1392,7 +1430,7 @@ impl StacksBlockBuilder { header .consensus_serialize(&mut header_bytes) .expect("FATAL: failed to serialize to vec"); - let bytes_so_far = header_bytes.len() as u64; + let bytes_so_far = u64::try_from(header_bytes.len()).expect("header bytes exceeds 2^64"); StacksBlockBuilder { chain_tip: parent_chain_tip.clone(), @@ -1539,291 +1577,51 @@ impl StacksBlockBuilder { } /// Append a transaction if doing so won't exceed the epoch data size. - /// Errors out if we fail to mine the tx (exceed budget, or the transaction is invalid). - pub fn try_mine_tx( + /// Does not check for errors + #[cfg(test)] + pub fn force_mine_tx( &mut self, clarity_tx: &mut ClarityTx, tx: &StacksTransaction, - ast_rules: ASTRules, - ) -> Result { - let tx_len = tx.tx_len(); - match self.try_mine_tx_with_len( - clarity_tx, - tx, - tx_len, - &BlockLimitFunction::NO_LIMIT_HIT, - ast_rules, - ) { - TransactionResult::Success(s) => Ok(TransactionResult::Success(s)), - TransactionResult::Skipped(TransactionSkipped { error, .. }) - | TransactionResult::ProcessingError(TransactionError { error, .. }) => Err(error), - TransactionResult::Problematic(TransactionProblematic { tx, .. }) => { - Err(Error::ProblematicTransaction(tx.txid())) - } - } - } + ) -> Result<(), Error> { + let mut tx_bytes = vec![]; + tx.consensus_serialize(&mut tx_bytes) + .map_err(Error::CodecError)?; + let tx_len = u64::try_from(tx_bytes.len()).expect("tx len exceeds 2^64 bytes"); - /// Append a transaction if doing so won't exceed the epoch data size. - /// Errors out if we exceed budget, or the transaction is invalid. - fn try_mine_tx_with_len( - &mut self, - clarity_tx: &mut ClarityTx, - tx: &StacksTransaction, - tx_len: u64, - limit_behavior: &BlockLimitFunction, - ast_rules: ASTRules, - ) -> TransactionResult { if self.bytes_so_far + tx_len >= MAX_EPOCH_SIZE.into() { - return TransactionResult::skipped_due_to_error(&tx, Error::BlockTooBigError); + warn!( + "Epoch size is {} >= {}", + self.bytes_so_far + tx_len, + MAX_EPOCH_SIZE + ); } - match limit_behavior { - BlockLimitFunction::CONTRACT_LIMIT_HIT => { - match &tx.payload { - TransactionPayload::ContractCall(cc) => { - // once we've hit the runtime limit once, allow boot code contract calls, but do not try to eval - // other contract calls - if !cc.address.is_boot_code_addr() { - return TransactionResult::skipped( - &tx, - "BlockLimitFunction::CONTRACT_LIMIT_HIT".to_string(), - ); - } - } - TransactionPayload::SmartContract(..) => { - return TransactionResult::skipped( - &tx, - "BlockLimitFunction::CONTRACT_LIMIT_HIT".to_string(), - ); - } - _ => {} - } - } - BlockLimitFunction::LIMIT_REACHED => { - return TransactionResult::skipped( - &tx, - "BlockLimitFunction::LIMIT_REACHED".to_string(), - ) - } - BlockLimitFunction::NO_LIMIT_HIT => {} - }; - let quiet = !cfg!(test); - let result = if !self.anchored_done { - // building up the anchored blocks - if tx.anchor_mode != TransactionAnchorMode::OnChainOnly - && tx.anchor_mode != TransactionAnchorMode::Any - { - return TransactionResult::skipped_due_to_error( - tx, - Error::InvalidStacksTransaction( - "Invalid transaction anchor mode for anchored data".to_string(), - false, - ), - ); - } - - // preemptively skip problematic transactions - if let Err(e) = Relayer::static_check_problematic_relayed_tx( - clarity_tx.config.mainnet, - clarity_tx.get_epoch(), - &tx, - ast_rules, - ) { - info!( - "Detected problematic tx {} while mining; dropping from mempool", - tx.txid() - ); - return TransactionResult::problematic(&tx, Error::NetError(e)); - } - let (fee, receipt) = match StacksChainState::process_transaction( - clarity_tx, tx, quiet, ast_rules, - ) { - Ok((fee, receipt)) => (fee, receipt), + if !self.anchored_done { + // save + match StacksChainState::process_transaction(clarity_tx, tx, quiet, ASTRules::Typical) { + Ok((fee, receipt)) => { + self.total_anchored_fees += fee; + } Err(e) => { - let (is_problematic, e) = - TransactionResult::is_problematic(&tx, e, clarity_tx.get_epoch()); - if is_problematic { - return TransactionResult::problematic(&tx, e); - } else { - match e { - Error::CostOverflowError(cost_before, cost_after, total_budget) => { - clarity_tx.reset_cost(cost_before.clone()); - if total_budget.proportion_largest_dimension(&cost_before) - < TX_BLOCK_LIMIT_PROPORTION_HEURISTIC - { - warn!( - "Transaction {} consumed over {}% of block budget, marking as invalid; budget was {}", - tx.txid(), - 100 - TX_BLOCK_LIMIT_PROPORTION_HEURISTIC, - &total_budget - ); - return TransactionResult::error( - &tx, - Error::TransactionTooBigError, - ); - } else { - warn!( - "Transaction {} reached block cost {}; budget was {}", - tx.txid(), - &cost_after, - &total_budget - ); - return TransactionResult::skipped_due_to_error( - &tx, - Error::BlockTooBigError, - ); - } - } - _ => return TransactionResult::error(&tx, e), - } - } + warn!("Invalid transaction {} in anchored block, but forcing inclusion (error: {:?})", &tx.txid(), &e); } - }; - info!("Include tx"; - "tx" => %tx.txid(), - "payload" => tx.payload.name(), - "origin" => %tx.origin_address()); + } - // save self.txs.push(tx.clone()); - self.total_anchored_fees += fee; - - TransactionResult::success(&tx, fee, receipt) } else { - // building up the microblocks - if tx.anchor_mode != TransactionAnchorMode::OffChainOnly - && tx.anchor_mode != TransactionAnchorMode::Any - { - return TransactionResult::skipped_due_to_error( - tx, - Error::InvalidStacksTransaction( - "Invalid transaction anchor mode for streamed data".to_string(), - false, - ), - ); - } - - // preemptively skip problematic transactions - if let Err(e) = Relayer::static_check_problematic_relayed_tx( - clarity_tx.config.mainnet, - clarity_tx.get_epoch(), - &tx, - ast_rules, - ) { - info!( - "Detected problematic tx {} while mining; dropping from mempool", - tx.txid() - ); - return TransactionResult::problematic(&tx, Error::NetError(e)); - } - let (fee, receipt) = match StacksChainState::process_transaction( - clarity_tx, tx, quiet, ast_rules, - ) { - Ok((fee, receipt)) => (fee, receipt), - Err(e) => { - let (is_problematic, e) = - TransactionResult::is_problematic(&tx, e, clarity_tx.get_epoch()); - if is_problematic { - return TransactionResult::problematic(&tx, e); - } else { - match e { - Error::CostOverflowError(cost_before, cost_after, total_budget) => { - clarity_tx.reset_cost(cost_before.clone()); - if total_budget.proportion_largest_dimension(&cost_before) - < TX_BLOCK_LIMIT_PROPORTION_HEURISTIC - { - warn!( - "Transaction {} consumed over {}% of block budget, marking as invalid; budget was {}", - tx.txid(), - 100 - TX_BLOCK_LIMIT_PROPORTION_HEURISTIC, - &total_budget - ); - return TransactionResult::error( - &tx, - Error::TransactionTooBigError, - ); - } else { - warn!( - "Transaction {} reached block cost {}; budget was {}", - tx.txid(), - &cost_after, - &total_budget - ); - return TransactionResult::skipped_due_to_error( - &tx, - Error::BlockTooBigError, - ); - } - } - _ => return TransactionResult::error(&tx, e), - } - } - } - }; - debug!( - "Include tx {} ({}) in microblock", - tx.txid(), - tx.payload.name() - ); - - // save - self.micro_txs.push(tx.clone()); - self.total_streamed_fees += fee; - - TransactionResult::success(&tx, fee, receipt) - }; - - self.bytes_so_far += tx_len; - result - } - - /// Append a transaction if doing so won't exceed the epoch data size. - /// Does not check for errors - #[cfg(test)] - pub fn force_mine_tx( - &mut self, - clarity_tx: &mut ClarityTx, - tx: &StacksTransaction, - ) -> Result<(), Error> { - let mut tx_bytes = vec![]; - tx.consensus_serialize(&mut tx_bytes) - .map_err(Error::CodecError)?; - let tx_len = tx_bytes.len() as u64; - - if self.bytes_so_far + tx_len >= MAX_EPOCH_SIZE.into() { - warn!( - "Epoch size is {} >= {}", - self.bytes_so_far + tx_len, - MAX_EPOCH_SIZE - ); - } - - let quiet = !cfg!(test); - if !self.anchored_done { - // save - match StacksChainState::process_transaction(clarity_tx, tx, quiet, ASTRules::Typical) { - Ok((fee, receipt)) => { - self.total_anchored_fees += fee; - } - Err(e) => { - warn!("Invalid transaction {} in anchored block, but forcing inclusion (error: {:?})", &tx.txid(), &e); - } - } - - self.txs.push(tx.clone()); - } else { - match StacksChainState::process_transaction(clarity_tx, tx, quiet, ASTRules::Typical) { - Ok((fee, receipt)) => { - self.total_streamed_fees += fee; - } - Err(e) => { - warn!( - "Invalid transaction {} in microblock, but forcing inclusion (error: {:?})", - &tx.txid(), - &e - ); - } + match StacksChainState::process_transaction(clarity_tx, tx, quiet, ASTRules::Typical) { + Ok((fee, receipt)) => { + self.total_streamed_fees += fee; + } + Err(e) => { + warn!( + "Invalid transaction {} in microblock, but forcing inclusion (error: {:?})", + &tx.txid(), + &e + ); + } } self.micro_txs.push(tx.clone()); @@ -1892,7 +1690,7 @@ impl StacksBlockBuilder { StacksChainState::finish_block( clarity_tx, self.miner_payouts.as_ref(), - self.header.total_work.work as u32, + u32::try_from(self.header.total_work.work).expect("FATAL: more than 2^32 blocks"), self.header.microblock_pubkey_hash, ) .expect("FATAL: call to `finish_block` failed"); @@ -2037,8 +1835,10 @@ impl StacksBlockBuilder { ); let burn_tip = SortitionDB::get_canonical_chain_tip_bhh(burn_dbconn.conn())?; - let burn_tip_height = - SortitionDB::get_canonical_burn_chain_tip(burn_dbconn.conn())?.block_height as u32; + let burn_tip_height = u32::try_from( + SortitionDB::get_canonical_burn_chain_tip(burn_dbconn.conn())?.block_height, + ) + .expect("FATAL: more than 2^32 sortitions"); let parent_microblocks = if StacksChainState::block_crosses_epoch_boundary( chainstate.db(), @@ -2140,7 +1940,8 @@ impl StacksBlockBuilder { Some(self.miner_id), )?; self.miner_payouts = matured_miner_rewards_opt; - self.total_confirmed_streamed_fees += microblock_fees as u64; + self.total_confirmed_streamed_fees += + u64::try_from(microblock_fees).expect("more than 2^64 microstx microblock fees"); Ok((clarity_tx, microblock_execution_cost)) } @@ -2275,8 +2076,8 @@ impl StacksBlockBuilder { 0, &FIRST_BURNCHAIN_CONSENSUS_HASH, &first_block_hash, - first_block_height as u32, - first_block_ts as u64, + u32::try_from(first_block_height).expect("FATAL: first block is over 2^32"), + u64::try_from(first_block_ts).expect("FATAL: first block timestamp is over 2^64"), &proof, pubkey_hash, ) @@ -2316,8 +2117,10 @@ impl StacksBlockBuilder { 0, &FIRST_BURNCHAIN_CONSENSUS_HASH, &first_block_hash, - BITCOIN_REGTEST_FIRST_BLOCK_HEIGHT as u32, - BITCOIN_REGTEST_FIRST_BLOCK_TIMESTAMP as u64, + u32::try_from(BITCOIN_REGTEST_FIRST_BLOCK_HEIGHT) + .expect("first regtest bitcoin block is over 2^32"), + u64::try_from(BITCOIN_REGTEST_FIRST_BLOCK_TIMESTAMP) + .expect("first regtest bitcoin block timestamp is over 2^64"), &proof, pubkey_hash, ) @@ -2342,80 +2145,43 @@ impl StacksBlockBuilder { Ok(builder) } - /// Given access to the mempool, mine an anchored block with no more than the given execution cost. - /// returns the assembled block, and the consumed execution budget. - pub fn build_anchored_block( - chainstate_handle: &StacksChainState, // not directly used; used as a handle to open other chainstates - burn_dbconn: &SortitionDBConn, + /// Select transactions for block inclusion from the mempool. + /// Applies them to the ongoing ClarityTx. + /// If invalid transactions are encountered, they are dropped from the mempool. + /// Returns whether or not the miner got blocked, as well as the gathered tx events + pub fn select_and_apply_transactions( + epoch_tx: &mut ClarityTx, + builder: &mut B, mempool: &mut MemPoolDB, - parent_stacks_header: &StacksHeaderInfo, // Stacks header we're building off of - total_burn: u64, // the burn so far on the burnchain (i.e. from the last burnchain block) - proof: VRFProof, // proof over the burnchain's last seed - pubkey_hash: Hash160, - coinbase_tx: &StacksTransaction, + parent_stacks_header: &StacksHeaderInfo, + coinbase_tx: Option<&StacksTransaction>, settings: BlockBuilderSettings, event_observer: Option<&dyn MemPoolEventDispatcher>, - ) -> Result<(StacksBlock, ExecutionCost, u64), Error> { - let mempool_settings = settings.mempool_settings; + ast_rules: ASTRules, + ) -> Result<(bool, Vec), Error> { let max_miner_time_ms = settings.max_miner_time_ms; - - if let TransactionPayload::Coinbase(..) = coinbase_tx.payload { - } else { - return Err(Error::MemPoolError( - "Not a coinbase transaction".to_string(), - )); - } - - let (tip_consensus_hash, tip_block_hash, tip_height) = ( - parent_stacks_header.consensus_hash.clone(), - parent_stacks_header.anchored_header.block_hash(), - parent_stacks_header.stacks_block_height, - ); - - debug!( - "Build anchored block off of {}/{} height {}", - &tip_consensus_hash, &tip_block_hash, tip_height - ); - - let (mut chainstate, _) = chainstate_handle.reopen()?; - - let mut builder = StacksBlockBuilder::make_block_builder( - chainstate.mainnet, - parent_stacks_header, - proof, - total_burn, - pubkey_hash, - )?; - + let mempool_settings = settings.mempool_settings.clone(); + let tip_height = parent_stacks_header.stacks_block_height; let ts_start = get_epoch_time_ms(); - - let mut miner_epoch_info = builder.pre_epoch_begin(&mut chainstate, burn_dbconn)?; - let ast_rules = miner_epoch_info.ast_rules; - if ast_rules != ASTRules::Typical { - builder.header.version = cmp::max( - STACKS_BLOCK_VERSION_AST_PRECHECK_SIZE, - builder.header.version, - ); - } - - let (mut epoch_tx, confirmed_mblock_cost) = - builder.epoch_begin(burn_dbconn, &mut miner_epoch_info)?; let stacks_epoch_id = epoch_tx.get_epoch(); let block_limit = epoch_tx .block_limit() .expect("Failed to obtain block limit from miner's block connection"); let mut tx_events = Vec::new(); - tx_events.push( - builder - .try_mine_tx(&mut epoch_tx, coinbase_tx, ast_rules.clone())? - .convert_to_event(), - ); - mempool.reset_nonce_cache()?; + if let Some(coinbase_tx) = coinbase_tx { + tx_events.push( + builder + .try_mine_tx(epoch_tx, coinbase_tx, ast_rules.clone())? + .convert_to_event(), + ); + } + mempool.reset_nonce_cache()?; mempool.estimate_tx_rates(100, &block_limit, &stacks_epoch_id)?; + let mut block_limit_hit = BlockLimitFunction::NO_LIMIT_HIT; let mut considered = HashSet::new(); // txids of all transactions we looked at let mut mined_origin_nonces: HashMap = HashMap::new(); // map addrs of mined transaction origins to the nonces we used let mut mined_sponsor_nonces: HashMap = HashMap::new(); // map addrs of mined transaction sponsors to the nonces we used @@ -2423,21 +2189,20 @@ impl StacksBlockBuilder { let mut invalidated_txs = vec![]; let mut to_drop_and_blacklist = vec![]; - let mut block_limit_hit = BlockLimitFunction::NO_LIMIT_HIT; - let deadline = ts_start + (max_miner_time_ms as u128); + let deadline = ts_start + u128::from(max_miner_time_ms); let mut num_txs = 0; let mut blocked = false; debug!( - "Anchored block transaction selection begins (child of {})", + "Block transaction selection begins (child of {})", &parent_stacks_header.anchored_header.block_hash() ); let result = { - let mut intermediate_result = Ok(0); + let mut intermediate_result: Result<_, Error> = Ok(0); while block_limit_hit != BlockLimitFunction::LIMIT_REACHED { let mut num_considered = 0; intermediate_result = mempool.iterate_candidates( - &mut epoch_tx, + epoch_tx, &mut tx_events, tip_height, mempool_settings.clone(), @@ -2604,7 +2369,7 @@ impl StacksBlockBuilder { break; } } - debug!("Anchored block transaction selection finished (child of {}): {} transactions selected ({} considered)", &parent_stacks_header.anchored_header.block_hash(), num_txs, considered.len()); + debug!("Block transaction selection finished (child of {}): {} transactions selected ({} considered)", &parent_stacks_header.anchored_header.block_hash(), num_txs, considered.len()); intermediate_result }; @@ -2615,14 +2380,91 @@ impl StacksBlockBuilder { observer.mempool_txs_dropped(to_drop_and_blacklist, MemPoolDropReason::PROBLEMATIC); } - match result { - Ok(_) => {} + if let Err(e) = result { + warn!("Failure building block: {}", e); + return Err(e); + } + + Ok((blocked, tx_events)) + } + + /// Given access to the mempool, mine an anchored block with no more than the given execution cost. + /// returns the assembled block, and the consumed execution budget. + pub fn build_anchored_block( + chainstate_handle: &StacksChainState, // not directly used; used as a handle to open other chainstates + burn_dbconn: &SortitionDBConn, + mempool: &mut MemPoolDB, + parent_stacks_header: &StacksHeaderInfo, // Stacks header we're building off of + total_burn: u64, // the burn so far on the burnchain (i.e. from the last burnchain block) + proof: VRFProof, // proof over the burnchain's last seed + pubkey_hash: Hash160, + coinbase_tx: &StacksTransaction, + settings: BlockBuilderSettings, + event_observer: Option<&dyn MemPoolEventDispatcher>, + ) -> Result<(StacksBlock, ExecutionCost, u64), Error> { + if let TransactionPayload::Coinbase(..) = coinbase_tx.payload { + } else { + return Err(Error::MemPoolError( + "Not a coinbase transaction".to_string(), + )); + } + + let (tip_consensus_hash, tip_block_hash, tip_height) = ( + parent_stacks_header.consensus_hash.clone(), + parent_stacks_header.anchored_header.block_hash(), + parent_stacks_header.stacks_block_height, + ); + + debug!( + "Build anchored block off of {}/{} height {}", + &tip_consensus_hash, &tip_block_hash, tip_height + ); + + let (mut chainstate, _) = chainstate_handle.reopen()?; + + let mut builder = StacksBlockBuilder::make_block_builder( + chainstate.mainnet, + parent_stacks_header, + proof, + total_burn, + pubkey_hash, + )?; + + let ts_start = get_epoch_time_ms(); + + let mut miner_epoch_info = builder.pre_epoch_begin(&mut chainstate, burn_dbconn)?; + let ast_rules = miner_epoch_info.ast_rules; + if ast_rules != ASTRules::Typical { + builder.header.version = cmp::max( + STACKS_BLOCK_VERSION_AST_PRECHECK_SIZE, + builder.header.version, + ); + } + + let (mut epoch_tx, confirmed_mblock_cost) = + builder.epoch_begin(burn_dbconn, &mut miner_epoch_info)?; + + let block_limit = epoch_tx + .block_limit() + .expect("Failed to obtain block limit from miner's block connection"); + + let (blocked, tx_events) = match Self::select_and_apply_transactions( + &mut epoch_tx, + &mut builder, + mempool, + parent_stacks_header, + Some(coinbase_tx), + settings, + event_observer, + ast_rules, + ) { + Ok(x) => x, Err(e) => { warn!("Failure building block: {}", e); epoch_tx.rollback_block(); return Err(e); } - } + }; if blocked { debug!( @@ -2632,9 +2474,6 @@ impl StacksBlockBuilder { return Err(Error::MinerAborted); } - // the prior do_rebuild logic wasn't necessary - // a transaction that caused a budget exception is rolled back in process_transaction - // save the block so we can build microblocks off of it let block = builder.mine_anchored_block(&mut epoch_tx); let size = builder.bytes_so_far; @@ -2653,7 +2492,9 @@ impl StacksBlockBuilder { ); } - set_last_mined_block_transaction_count(block.txs.len() as u64); + set_last_mined_block_transaction_count( + u64::try_from(block.txs.len()).expect("more than 2^64 txs"), + ); set_last_mined_execution_cost_observed(&consumed, &block_limit); info!( @@ -2676,3 +2517,220 @@ impl StacksBlockBuilder { Ok((block, consumed, size)) } } + +impl BlockBuilder for StacksBlockBuilder { + /// Append a transaction if doing so won't exceed the epoch data size. + /// Errors out if we exceed budget, or the transaction is invalid. + fn try_mine_tx_with_len( + &mut self, + clarity_tx: &mut ClarityTx, + tx: &StacksTransaction, + tx_len: u64, + limit_behavior: &BlockLimitFunction, + ast_rules: ASTRules, + ) -> TransactionResult { + if self.bytes_so_far + tx_len >= MAX_EPOCH_SIZE.into() { + return TransactionResult::skipped_due_to_error(&tx, Error::BlockTooBigError); + } + + match limit_behavior { + BlockLimitFunction::CONTRACT_LIMIT_HIT => { + match &tx.payload { + TransactionPayload::ContractCall(cc) => { + // once we've hit the runtime limit once, allow boot code contract calls, but do not try to eval + // other contract calls + if !cc.address.is_boot_code_addr() { + return TransactionResult::skipped( + &tx, + "BlockLimitFunction::CONTRACT_LIMIT_HIT".to_string(), + ); + } + } + TransactionPayload::SmartContract(..) => { + return TransactionResult::skipped( + &tx, + "BlockLimitFunction::CONTRACT_LIMIT_HIT".to_string(), + ); + } + _ => {} + } + } + BlockLimitFunction::LIMIT_REACHED => { + return TransactionResult::skipped( + &tx, + "BlockLimitFunction::LIMIT_REACHED".to_string(), + ) + } + BlockLimitFunction::NO_LIMIT_HIT => {} + }; + + let quiet = !cfg!(test); + let result = if !self.anchored_done { + // building up the anchored blocks + if tx.anchor_mode != TransactionAnchorMode::OnChainOnly + && tx.anchor_mode != TransactionAnchorMode::Any + { + return TransactionResult::skipped_due_to_error( + tx, + Error::InvalidStacksTransaction( + "Invalid transaction anchor mode for anchored data".to_string(), + false, + ), + ); + } + + // preemptively skip problematic transactions + if let Err(e) = Relayer::static_check_problematic_relayed_tx( + clarity_tx.config.mainnet, + clarity_tx.get_epoch(), + &tx, + ast_rules, + ) { + info!( + "Detected problematic tx {} while mining; dropping from mempool", + tx.txid() + ); + return TransactionResult::problematic(&tx, Error::NetError(e)); + } + let (fee, receipt) = match StacksChainState::process_transaction( + clarity_tx, tx, quiet, ast_rules, + ) { + Ok((fee, receipt)) => (fee, receipt), + Err(e) => { + let (is_problematic, e) = + TransactionResult::is_problematic(&tx, e, clarity_tx.get_epoch()); + if is_problematic { + return TransactionResult::problematic(&tx, e); + } else { + match e { + Error::CostOverflowError(cost_before, cost_after, total_budget) => { + clarity_tx.reset_cost(cost_before.clone()); + if total_budget.proportion_largest_dimension(&cost_before) + < TX_BLOCK_LIMIT_PROPORTION_HEURISTIC + { + warn!( + "Transaction {} consumed over {}% of block budget, marking as invalid; budget was {}", + tx.txid(), + 100 - TX_BLOCK_LIMIT_PROPORTION_HEURISTIC, + &total_budget + ); + return TransactionResult::error( + &tx, + Error::TransactionTooBigError, + ); + } else { + warn!( + "Transaction {} reached block cost {}; budget was {}", + tx.txid(), + &cost_after, + &total_budget + ); + return TransactionResult::skipped_due_to_error( + &tx, + Error::BlockTooBigError, + ); + } + } + _ => return TransactionResult::error(&tx, e), + } + } + } + }; + info!("Include tx"; + "tx" => %tx.txid(), + "payload" => tx.payload.name(), + "origin" => %tx.origin_address()); + + // save + self.txs.push(tx.clone()); + self.total_anchored_fees += fee; + + TransactionResult::success(&tx, fee, receipt) + } else { + // building up the microblocks + if tx.anchor_mode != TransactionAnchorMode::OffChainOnly + && tx.anchor_mode != TransactionAnchorMode::Any + { + return TransactionResult::skipped_due_to_error( + tx, + Error::InvalidStacksTransaction( + "Invalid transaction anchor mode for streamed data".to_string(), + false, + ), + ); + } + + // preemptively skip problematic transactions + if let Err(e) = Relayer::static_check_problematic_relayed_tx( + clarity_tx.config.mainnet, + clarity_tx.get_epoch(), + &tx, + ast_rules, + ) { + info!( + "Detected problematic tx {} while mining; dropping from mempool", + tx.txid() + ); + return TransactionResult::problematic(&tx, Error::NetError(e)); + } + let (fee, receipt) = match StacksChainState::process_transaction( + clarity_tx, tx, quiet, ast_rules, + ) { + Ok((fee, receipt)) => (fee, receipt), + Err(e) => { + let (is_problematic, e) = + TransactionResult::is_problematic(&tx, e, clarity_tx.get_epoch()); + if is_problematic { + return TransactionResult::problematic(&tx, e); + } else { + match e { + Error::CostOverflowError(cost_before, cost_after, total_budget) => { + clarity_tx.reset_cost(cost_before.clone()); + if total_budget.proportion_largest_dimension(&cost_before) + < TX_BLOCK_LIMIT_PROPORTION_HEURISTIC + { + warn!( + "Transaction {} consumed over {}% of block budget, marking as invalid; budget was {}", + tx.txid(), + 100 - TX_BLOCK_LIMIT_PROPORTION_HEURISTIC, + &total_budget + ); + return TransactionResult::error( + &tx, + Error::TransactionTooBigError, + ); + } else { + warn!( + "Transaction {} reached block cost {}; budget was {}", + tx.txid(), + &cost_after, + &total_budget + ); + return TransactionResult::skipped_due_to_error( + &tx, + Error::BlockTooBigError, + ); + } + } + _ => return TransactionResult::error(&tx, e), + } + } + } + }; + debug!( + "Include tx {} ({}) in microblock", + tx.txid(), + tx.payload.name() + ); + + // save + self.micro_txs.push(tx.clone()); + self.total_streamed_fees += fee; + + TransactionResult::success(&tx, fee, receipt) + }; + + self.bytes_so_far += tx_len; + result + } +} diff --git a/stackslib/src/chainstate/stacks/mod.rs b/stackslib/src/chainstate/stacks/mod.rs index 5e518d1e71..cb7bd8e620 100644 --- a/stackslib/src/chainstate/stacks/mod.rs +++ b/stackslib/src/chainstate/stacks/mod.rs @@ -635,7 +635,7 @@ pub enum TenureChangeCause { BlockFound = 0, /// No winning block-commits, extend current tenure NoBlockFound = 1, - /// A “null miner” won the block-commit (see the MEV solution below) + /// A null miner won the block-commit NullMiner = 2, } @@ -658,7 +658,13 @@ pub struct SchnorrThresholdSignature { //pub scalar: wsts::Scalar, } -/// Reasons why a `TenureChange` transaction can be de +impl SchnorrThresholdSignature { + pub fn empty() -> SchnorrThresholdSignature { + SchnorrThresholdSignature {} + } +} + +/// Reasons why a `TenureChange` transaction can be bad pub enum TenureChangeError { SignatureInvalid, /// Not signed by required threshold (>70%) @@ -675,7 +681,7 @@ pub struct TenureChangePayload { /// The StacksBlockId of the last block from the previous tenure pub previous_tenure_end: StacksBlockId, /// The number of blocks produced in the previous tenure - pub previous_tenure_blocks: u16, + pub previous_tenure_blocks: u32, /// A flag to indicate which of the following triggered the tenure change pub cause: TenureChangeCause, /// The ECDSA public key hash of the current tenure @@ -691,8 +697,9 @@ pub enum TransactionPayload { TokenTransfer(PrincipalData, u64, TokenTransferMemo), ContractCall(TransactionContractCall), SmartContract(TransactionSmartContract, Option), - PoisonMicroblock(StacksMicroblockHeader, StacksMicroblockHeader), // the previous epoch leader sent two microblocks with the same sequence, and this is proof - Coinbase(CoinbasePayload, Option), + // the previous epoch leader sent two microblocks with the same sequence, and this is proof + PoisonMicroblock(StacksMicroblockHeader, StacksMicroblockHeader), + Coinbase(CoinbasePayload, Option, Option), TenureChange(TenureChangePayload), } @@ -715,9 +722,12 @@ define_u8_enum!(TransactionPayloadID { ContractCall = 2, PoisonMicroblock = 3, Coinbase = 4, + // has an alt principal, but no VRF proof CoinbaseToAltRecipient = 5, VersionedSmartContract = 6, - TenureChange = 7 + TenureChange = 7, + // has a VRF proof, and may have an alt principal + NakamotoCoinbase = 8 }); /// Encoding of an asset type identifier @@ -1240,6 +1250,8 @@ pub mod test { version: 1, bytes: Hash160([0xff; 20]), }; + let proof_bytes = hex_bytes("9275df67a68c8745c0ff97b48201ee6db447f7c93b23ae24cdc2400f52fdb08a1a6ac7ec71bf9c9c76e96ee4675ebff60625af28718501047bfd87b810c2d2139b73c23bd69de66360953a642c2a330a").unwrap(); + let proof = VRFProof::from_bytes(&proof_bytes[..].to_vec()).unwrap(); let tx_payloads = vec![ TransactionPayload::TokenTransfer( stx_address.into(), @@ -1284,18 +1296,35 @@ pub mod test { }, Some(ClarityVersion::Clarity2), ), - TransactionPayload::Coinbase(CoinbasePayload([0x12; 32]), None), + TransactionPayload::Coinbase(CoinbasePayload([0x12; 32]), None, None), + TransactionPayload::Coinbase( + CoinbasePayload([0x12; 32]), + Some(PrincipalData::Contract( + QualifiedContractIdentifier::transient(), + )), + None, + ), + TransactionPayload::Coinbase( + CoinbasePayload([0x12; 32]), + Some(PrincipalData::Standard(StandardPrincipalData( + 0x01, [0x02; 20], + ))), + None, + ), + TransactionPayload::Coinbase(CoinbasePayload([0x12; 32]), None, Some(proof.clone())), TransactionPayload::Coinbase( CoinbasePayload([0x12; 32]), Some(PrincipalData::Contract( QualifiedContractIdentifier::transient(), )), + Some(proof.clone()), ), TransactionPayload::Coinbase( CoinbasePayload([0x12; 32]), Some(PrincipalData::Standard(StandardPrincipalData( 0x01, [0x02; 20], ))), + Some(proof.clone()), ), TransactionPayload::PoisonMicroblock(mblock_header_1, mblock_header_2), ]; @@ -1355,7 +1384,7 @@ pub mod test { let mut tx_coinbase = StacksTransaction::new( TransactionVersion::Mainnet, origin_auth.clone(), - TransactionPayload::Coinbase(CoinbasePayload([0u8; 32]), None), + TransactionPayload::Coinbase(CoinbasePayload([0u8; 32]), None, None), ); tx_coinbase.anchor_mode = TransactionAnchorMode::OnChainOnly; diff --git a/stackslib/src/chainstate/stacks/tests/accounting.rs b/stackslib/src/chainstate/stacks/tests/accounting.rs index ecdc9294a9..af7adf1818 100644 --- a/stackslib/src/chainstate/stacks/tests/accounting.rs +++ b/stackslib/src/chainstate/stacks/tests/accounting.rs @@ -203,7 +203,7 @@ fn test_bad_microblock_fees_pre_v210() { let mut tx_coinbase = StacksTransaction::new( TransactionVersion::Testnet, TransactionAuth::from_p2pkh(&pk).unwrap(), - TransactionPayload::Coinbase(CoinbasePayload([0x00; 32]), None), + TransactionPayload::Coinbase(CoinbasePayload([0x00; 32]), None, None), ); tx_coinbase.chain_id = 0x80000000; tx_coinbase.anchor_mode = TransactionAnchorMode::OnChainOnly; @@ -524,7 +524,7 @@ fn test_bad_microblock_fees_fix_transition() { let mut tx_coinbase = StacksTransaction::new( TransactionVersion::Testnet, TransactionAuth::from_p2pkh(&pk).unwrap(), - TransactionPayload::Coinbase(CoinbasePayload([0x00; 32]), None), + TransactionPayload::Coinbase(CoinbasePayload([0x00; 32]), None, None), ); tx_coinbase.chain_id = 0x80000000; tx_coinbase.anchor_mode = TransactionAnchorMode::OnChainOnly; @@ -878,7 +878,7 @@ fn test_get_block_info_v210() { let mut tx_coinbase = StacksTransaction::new( TransactionVersion::Testnet, TransactionAuth::from_p2pkh(&pk).unwrap(), - TransactionPayload::Coinbase(CoinbasePayload([0x00; 32]), None), + TransactionPayload::Coinbase(CoinbasePayload([0x00; 32]), None, None), ); tx_coinbase.chain_id = 0x80000000; tx_coinbase.anchor_mode = TransactionAnchorMode::OnChainOnly; @@ -1248,7 +1248,7 @@ fn test_get_block_info_v210_no_microblocks() { let mut tx_coinbase = StacksTransaction::new( TransactionVersion::Testnet, TransactionAuth::from_p2pkh(&pk).unwrap(), - TransactionPayload::Coinbase(CoinbasePayload([0x00; 32]), None), + TransactionPayload::Coinbase(CoinbasePayload([0x00; 32]), None, None), ); tx_coinbase.chain_id = 0x80000000; tx_coinbase.anchor_mode = TransactionAnchorMode::OnChainOnly; @@ -1630,6 +1630,7 @@ fn test_coinbase_pay_to_alt_recipient_v210(pay_to_contract: bool) { } else { alt_recipient_id }, + None, ), ); tx_coinbase.chain_id = 0x80000000; diff --git a/stackslib/src/chainstate/stacks/tests/block_construction.rs b/stackslib/src/chainstate/stacks/tests/block_construction.rs index 09db0e33aa..b6c0cebf76 100644 --- a/stackslib/src/chainstate/stacks/tests/block_construction.rs +++ b/stackslib/src/chainstate/stacks/tests/block_construction.rs @@ -4366,9 +4366,10 @@ fn mempool_incorporate_pox_unlocks() { let burn_block_height = db.get_current_burnchain_block_height() as u64; let v1_unlock_height = db.get_v1_unlock_height(); let v2_unlock_height = db.get_v2_unlock_height(); + let v3_unlock_height = db.get_v3_unlock_height(); let balance = db.get_account_stx_balance(&principal); info!("Checking balance"; "v1_unlock_height" => v1_unlock_height, "burn_block_height" => burn_block_height); - balance.get_available_balance_at_burn_block(burn_block_height, v1_unlock_height, v2_unlock_height) + balance.get_available_balance_at_burn_block(burn_block_height, v1_unlock_height, v2_unlock_height, v3_unlock_height) }) }).unwrap(); diff --git a/stackslib/src/chainstate/stacks/tests/mod.rs b/stackslib/src/chainstate/stacks/tests/mod.rs index af56623f43..5c0ab78446 100644 --- a/stackslib/src/chainstate/stacks/tests/mod.rs +++ b/stackslib/src/chainstate/stacks/tests/mod.rs @@ -42,6 +42,7 @@ use crate::chainstate::burn::operations::{ }; use crate::chainstate::burn::*; use crate::chainstate::coordinator::Error as CoordinatorError; +use crate::chainstate::nakamoto::NakamotoBlock; use crate::chainstate::stacks::db::blocks::test::store_staging_block; use crate::chainstate::stacks::db::test::*; use crate::chainstate::stacks::db::*; @@ -270,7 +271,9 @@ pub struct TestStacksNode { pub key_ops: HashMap, // map VRF public keys to their locations in the prev_keys array pub anchored_blocks: Vec, pub microblocks: Vec>, + pub nakamoto_blocks: Vec>, pub commit_ops: HashMap, + pub nakamoto_commit_ops: HashMap, pub test_name: String, forkable: bool, } @@ -295,7 +298,9 @@ impl TestStacksNode { key_ops: HashMap::new(), anchored_blocks: vec![], microblocks: vec![], + nakamoto_blocks: vec![], commit_ops: HashMap::new(), + nakamoto_commit_ops: HashMap::new(), test_name: test_name.to_string(), forkable: true, } @@ -309,7 +314,9 @@ impl TestStacksNode { key_ops: HashMap::new(), anchored_blocks: vec![], microblocks: vec![], + nakamoto_blocks: vec![], commit_ops: HashMap::new(), + nakamoto_commit_ops: HashMap::new(), test_name: test_name.to_string(), forkable: true, } @@ -322,7 +329,9 @@ impl TestStacksNode { key_ops: HashMap::new(), anchored_blocks: vec![], microblocks: vec![], + nakamoto_blocks: vec![], commit_ops: HashMap::new(), + nakamoto_commit_ops: HashMap::new(), test_name: "".to_string(), forkable: false, } @@ -357,7 +366,9 @@ impl TestStacksNode { key_ops: self.key_ops.clone(), anchored_blocks: self.anchored_blocks.clone(), microblocks: self.microblocks.clone(), + nakamoto_blocks: self.nakamoto_blocks.clone(), commit_ops: self.commit_ops.clone(), + nakamoto_commit_ops: self.nakamoto_commit_ops.clone(), test_name: new_test_name.to_string(), forkable: true, } @@ -574,6 +585,8 @@ impl TestStacksNode { block_commit_op } + /// Mine a single Stacks block and a microblock stream. + /// Produce its block-commit. pub fn mine_stacks_block( &mut self, sortdb: &SortitionDB, @@ -1022,6 +1035,7 @@ pub fn make_coinbase_with_nonce( TransactionPayload::Coinbase( CoinbasePayload([(burnchain_height % 256) as u8; 32]), recipient, + None, ), ); tx_coinbase.chain_id = 0x80000000; @@ -1246,7 +1260,7 @@ pub fn make_user_stacks_transfer( } pub fn make_user_coinbase(sender: &StacksPrivateKey, nonce: u64, tx_fee: u64) -> StacksTransaction { - let payload = TransactionPayload::Coinbase(CoinbasePayload([0; 32]), None); + let payload = TransactionPayload::Coinbase(CoinbasePayload([0; 32]), None, None); sign_standard_singlesig_tx(payload.into(), sender, nonce, tx_fee) } diff --git a/stackslib/src/chainstate/stacks/transaction.rs b/stackslib/src/chainstate/stacks/transaction.rs index bf1e42150b..637d1366f4 100644 --- a/stackslib/src/chainstate/stacks/transaction.rs +++ b/stackslib/src/chainstate/stacks/transaction.rs @@ -31,6 +31,7 @@ use stacks_common::util::retry::BoundReader; use stacks_common::util::secp256k1::MessageSignature; use crate::burnchains::Txid; +use crate::chainstate::stacks::TransactionPayloadID; use crate::chainstate::stacks::*; use crate::core::*; use crate::net::Error as net_error; @@ -51,7 +52,7 @@ impl StacksMessageCodec for TransactionContractCall { let contract_name: ContractName = read_next(fd)?; let function_name: ClarityName = read_next(fd)?; let function_args: Vec = { - let mut bound_read = BoundReader::from_reader(fd, MAX_TRANSACTION_LEN as u64); + let mut bound_read = BoundReader::from_reader(fd, u64::from(MAX_TRANSACTION_LEN)); read_next(&mut bound_read) }?; @@ -219,18 +220,37 @@ impl StacksMessageCodec for TransactionPayload { h1.consensus_serialize(fd)?; h2.consensus_serialize(fd)?; } - TransactionPayload::Coinbase(buf, recipient_opt) => { - match recipient_opt { - None => { + TransactionPayload::Coinbase(buf, recipient_opt, vrf_opt) => { + match (recipient_opt, vrf_opt) { + (None, None) => { // stacks 2.05 and earlier only use this path write_next(fd, &(TransactionPayloadID::Coinbase as u8))?; write_next(fd, buf)?; } - Some(recipient) => { + (Some(recipient), None) => { write_next(fd, &(TransactionPayloadID::CoinbaseToAltRecipient as u8))?; write_next(fd, buf)?; write_next(fd, &Value::Principal(recipient.clone()))?; } + (None, Some(vrf_proof)) => { + // nakamoto coinbase + // encode principal as (optional principal) + write_next(fd, &(TransactionPayloadID::NakamotoCoinbase as u8))?; + write_next(fd, buf)?; + write_next(fd, &Value::none())?; + write_next(fd, &vrf_proof.to_bytes().to_vec())?; + } + (Some(recipient), Some(vrf_proof)) => { + write_next(fd, &(TransactionPayloadID::NakamotoCoinbase as u8))?; + write_next(fd, buf)?; + write_next( + fd, + &Value::some(Value::Principal(recipient.clone())).expect( + "FATAL: failed to encode recipient principal as `optional`", + ), + )?; + write_next(fd, &vrf_proof.to_bytes().to_vec())?; + } } } TransactionPayload::TenureChange(tc) => { @@ -291,7 +311,7 @@ impl StacksMessageCodec for TransactionPayload { } TransactionPayloadID::Coinbase => { let payload: CoinbasePayload = read_next(fd)?; - TransactionPayload::Coinbase(payload, None) + TransactionPayload::Coinbase(payload, None, None) } TransactionPayloadID::CoinbaseToAltRecipient => { let payload: CoinbasePayload = read_next(fd)?; @@ -303,9 +323,37 @@ impl StacksMessageCodec for TransactionPayload { } }; - TransactionPayload::Coinbase(payload, Some(recipient)) + TransactionPayload::Coinbase(payload, Some(recipient), None) + } + TransactionPayloadID::TenureChange => { + let payload: TenureChangePayload = read_next(fd)?; + TransactionPayload::TenureChange(payload) + } + // TODO: gate this! + TransactionPayloadID::NakamotoCoinbase => { + let payload: CoinbasePayload = read_next(fd)?; + let principal_value_opt: Value = read_next(fd)?; + let recipient_opt = if let Value::Optional(optional_data) = principal_value_opt { + if let Some(principal_value) = optional_data.data { + if let Value::Principal(recipient_principal) = *principal_value { + Some(recipient_principal) + } else { + None + } + } else { + None + } + } else { + return Err(codec_error::DeserializeError("Failed to parse nakamoto coinbase transaction -- did not receive an optional recipient principal value".to_string())); + }; + let vrf_proof_bytes: Vec = read_next(fd)?; + let Some(vrf_proof) = VRFProof::from_bytes(&vrf_proof_bytes) else { + return Err(codec_error::DeserializeError( + "Failed to decode coinbase VRF proof".to_string(), + )); + }; + TransactionPayload::Coinbase(payload, recipient_opt, Some(vrf_proof)) } - TransactionPayloadID::TenureChange => TransactionPayload::TenureChange(read_next(fd)?), }; Ok(payload) @@ -537,7 +585,7 @@ impl StacksTransaction { let mut tx_bytes = vec![]; self.consensus_serialize(&mut tx_bytes) .expect("BUG: Failed to serialize a transaction object"); - tx_bytes.len() as u64 + u64::try_from(tx_bytes.len()).expect("tx len exceeds 2^64 bytes") } pub fn consensus_deserialize_with_len( @@ -633,10 +681,12 @@ impl StacksTransaction { } /// Try to convert to a coinbase payload - pub fn try_as_coinbase(&self) -> Option<(&CoinbasePayload, Option<&PrincipalData>)> { + pub fn try_as_coinbase( + &self, + ) -> Option<(&CoinbasePayload, Option<&PrincipalData>, Option<&VRFProof>)> { match &self.payload { - TransactionPayload::Coinbase(ref payload, ref recipient_opt) => { - Some((payload, recipient_opt.as_ref())) + TransactionPayload::Coinbase(ref payload, ref recipient_opt, ref vrf_proof_opt) => { + Some((payload, recipient_opt.as_ref(), vrf_proof_opt.as_ref())) } _ => None, } @@ -1603,12 +1653,16 @@ mod test { corrupt_h2.sequence += 1; TransactionPayload::PoisonMicroblock(corrupt_h1, corrupt_h2) } - TransactionPayload::Coinbase(ref buf, ref recipient_opt) => { + TransactionPayload::Coinbase(ref buf, ref recipient_opt, ref vrf_proof_opt) => { let mut corrupt_buf_bytes = buf.as_bytes().clone(); corrupt_buf_bytes[0] = (((corrupt_buf_bytes[0] as u16) + 1) % 256) as u8; let corrupt_buf = CoinbasePayload(corrupt_buf_bytes); - TransactionPayload::Coinbase(corrupt_buf, recipient_opt.clone()) + TransactionPayload::Coinbase( + corrupt_buf, + recipient_opt.clone(), + vrf_proof_opt.clone(), + ) } TransactionPayload::TenureChange(_) => todo!(), }; @@ -1846,7 +1900,8 @@ mod test { #[test] fn tx_stacks_transaction_payload_coinbase() { - let coinbase_payload = TransactionPayload::Coinbase(CoinbasePayload([0x12; 32]), None); + let coinbase_payload = + TransactionPayload::Coinbase(CoinbasePayload([0x12; 32]), None, None); let coinbase_payload_bytes = vec![ // payload type ID TransactionPayloadID::Coinbase as u8, @@ -1891,6 +1946,326 @@ mod test { ); } + #[test] + fn tx_stacks_transaction_payload_nakamoto_coinbase() { + let proof_bytes = hex_bytes("9275df67a68c8745c0ff97b48201ee6db447f7c93b23ae24cdc2400f52fdb08a1a6ac7ec71bf9c9c76e96ee4675ebff60625af28718501047bfd87b810c2d2139b73c23bd69de66360953a642c2a330a").unwrap(); + let proof = VRFProof::from_bytes(&proof_bytes[..].to_vec()).unwrap(); + + let coinbase_payload = + TransactionPayload::Coinbase(CoinbasePayload([0x12; 32]), None, Some(proof)); + let coinbase_bytes = vec![ + // payload type ID + TransactionPayloadID::NakamotoCoinbase as u8, + // buffer + 0x12, + 0x12, + 0x12, + 0x12, + 0x12, + 0x12, + 0x12, + 0x12, + 0x12, + 0x12, + 0x12, + 0x12, + 0x12, + 0x12, + 0x12, + 0x12, + 0x12, + 0x12, + 0x12, + 0x12, + 0x12, + 0x12, + 0x12, + 0x12, + 0x12, + 0x12, + 0x12, + 0x12, + 0x12, + 0x12, + 0x12, + 0x12, + // no alt recipient, so Value::none + 0x09, + // proof bytes length + 0x00, + 0x00, + 0x00, + 0x50, + // proof bytes + 0x92, + 0x75, + 0xdf, + 0x67, + 0xa6, + 0x8c, + 0x87, + 0x45, + 0xc0, + 0xff, + 0x97, + 0xb4, + 0x82, + 0x01, + 0xee, + 0x6d, + 0xb4, + 0x47, + 0xf7, + 0xc9, + 0x3b, + 0x23, + 0xae, + 0x24, + 0xcd, + 0xc2, + 0x40, + 0x0f, + 0x52, + 0xfd, + 0xb0, + 0x8a, + 0x1a, + 0x6a, + 0xc7, + 0xec, + 0x71, + 0xbf, + 0x9c, + 0x9c, + 0x76, + 0xe9, + 0x6e, + 0xe4, + 0x67, + 0x5e, + 0xbf, + 0xf6, + 0x06, + 0x25, + 0xaf, + 0x28, + 0x71, + 0x85, + 0x01, + 0x04, + 0x7b, + 0xfd, + 0x87, + 0xb8, + 0x10, + 0xc2, + 0xd2, + 0x13, + 0x9b, + 0x73, + 0xc2, + 0x3b, + 0xd6, + 0x9d, + 0xe6, + 0x63, + 0x60, + 0x95, + 0x3a, + 0x64, + 0x2c, + 0x2a, + 0x33, + 0x0a, + ]; + + check_codec_and_corruption(&coinbase_payload, &coinbase_bytes); + } + + #[test] + fn tx_stacks_transaction_payload_nakamoto_coinbase_alt_recipient() { + let proof_bytes = hex_bytes("9275df67a68c8745c0ff97b48201ee6db447f7c93b23ae24cdc2400f52fdb08a1a6ac7ec71bf9c9c76e96ee4675ebff60625af28718501047bfd87b810c2d2139b73c23bd69de66360953a642c2a330a").unwrap(); + let proof = VRFProof::from_bytes(&proof_bytes[..].to_vec()).unwrap(); + + let recipient = PrincipalData::from(QualifiedContractIdentifier { + issuer: StacksAddress { + version: 1, + bytes: Hash160([0xff; 20]), + } + .into(), + name: "foo-contract".into(), + }); + + let coinbase_payload = + TransactionPayload::Coinbase(CoinbasePayload([0x12; 32]), Some(recipient), Some(proof)); + let coinbase_bytes = vec![ + // payload type ID + TransactionPayloadID::NakamotoCoinbase as u8, + // buffer + 0x12, + 0x12, + 0x12, + 0x12, + 0x12, + 0x12, + 0x12, + 0x12, + 0x12, + 0x12, + 0x12, + 0x12, + 0x12, + 0x12, + 0x12, + 0x12, + 0x12, + 0x12, + 0x12, + 0x12, + 0x12, + 0x12, + 0x12, + 0x12, + 0x12, + 0x12, + 0x12, + 0x12, + 0x12, + 0x12, + 0x12, + 0x12, + // have contract recipient, so Some(..) + 0x0a, + // contract address type + 0x06, + // address + 0x01, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + 0xff, + // name length + 0x0c, + // name ('foo-contract') + 0x66, + 0x6f, + 0x6f, + 0x2d, + 0x63, + 0x6f, + 0x6e, + 0x74, + 0x72, + 0x61, + 0x63, + 0x74, + // proof bytes length + 0x00, + 0x00, + 0x00, + 0x50, + // proof bytes + 0x92, + 0x75, + 0xdf, + 0x67, + 0xa6, + 0x8c, + 0x87, + 0x45, + 0xc0, + 0xff, + 0x97, + 0xb4, + 0x82, + 0x01, + 0xee, + 0x6d, + 0xb4, + 0x47, + 0xf7, + 0xc9, + 0x3b, + 0x23, + 0xae, + 0x24, + 0xcd, + 0xc2, + 0x40, + 0x0f, + 0x52, + 0xfd, + 0xb0, + 0x8a, + 0x1a, + 0x6a, + 0xc7, + 0xec, + 0x71, + 0xbf, + 0x9c, + 0x9c, + 0x76, + 0xe9, + 0x6e, + 0xe4, + 0x67, + 0x5e, + 0xbf, + 0xf6, + 0x06, + 0x25, + 0xaf, + 0x28, + 0x71, + 0x85, + 0x01, + 0x04, + 0x7b, + 0xfd, + 0x87, + 0xb8, + 0x10, + 0xc2, + 0xd2, + 0x13, + 0x9b, + 0x73, + 0xc2, + 0x3b, + 0xd6, + 0x9d, + 0xe6, + 0x63, + 0x60, + 0x95, + 0x3a, + 0x64, + 0x2c, + 0x2a, + 0x33, + 0x0a, + ]; + + check_codec_and_corruption(&coinbase_payload, &coinbase_bytes); + } + #[test] fn tx_stacks_transaction_payload_microblock_poison() { let header_1 = StacksMicroblockHeader { @@ -3347,7 +3722,7 @@ mod test { let tx_coinbase = StacksTransaction::new( TransactionVersion::Mainnet, auth.clone(), - TransactionPayload::Coinbase(CoinbasePayload([0u8; 32]), None), + TransactionPayload::Coinbase(CoinbasePayload([0u8; 32]), None, None), ); let tx_stx = StacksTransaction::new( diff --git a/stackslib/src/clarity_vm/clarity.rs b/stackslib/src/clarity_vm/clarity.rs index d6123e160a..5b7a1ca7d1 100644 --- a/stackslib/src/clarity_vm/clarity.rs +++ b/stackslib/src/clarity_vm/clarity.rs @@ -49,10 +49,12 @@ use crate::chainstate::stacks::boot::POX_2_MAINNET_CODE; use crate::chainstate::stacks::boot::POX_2_TESTNET_CODE; use crate::chainstate::stacks::boot::POX_3_MAINNET_CODE; use crate::chainstate::stacks::boot::POX_3_TESTNET_CODE; +use crate::chainstate::stacks::boot::POX_4_MAINNET_CODE; +use crate::chainstate::stacks::boot::POX_4_TESTNET_CODE; use crate::chainstate::stacks::boot::{ BOOT_CODE_COSTS, BOOT_CODE_COSTS_2, BOOT_CODE_COSTS_3, BOOT_CODE_COST_VOTING_TESTNET as BOOT_CODE_COST_VOTING, BOOT_CODE_POX_TESTNET, COSTS_2_NAME, - COSTS_3_NAME, POX_2_NAME, POX_3_NAME, + COSTS_3_NAME, POX_2_NAME, POX_3_NAME, POX_4_NAME, }; use crate::chainstate::stacks::db::StacksAccount; use crate::chainstate::stacks::db::StacksChainState; @@ -905,9 +907,9 @@ impl<'a, 'b> ClarityBlockConnection<'a, 'b> { let v1_unlock_height = self.burn_state_db.get_v1_unlock_height(); let pox_2_first_cycle = PoxConstants::static_block_height_to_reward_cycle( - v1_unlock_height as u64, - first_block_height as u64, - pox_reward_cycle_length as u64, + u64::from(v1_unlock_height), + u64::from(first_block_height), + u64::from(pox_reward_cycle_length), ) .expect("PANIC: PoX-2 first reward cycle begins *before* first burn block height"); @@ -992,11 +994,11 @@ impl<'a, 'b> ClarityBlockConnection<'a, 'b> { // set burnchain params let consts_setter = PrincipalData::from(pox_2_contract_id.clone()); let params = vec![ - Value::UInt(first_block_height as u128), - Value::UInt(pox_prepare_length as u128), - Value::UInt(pox_reward_cycle_length as u128), - Value::UInt(pox_rejection_fraction as u128), - Value::UInt(pox_2_first_cycle as u128), + Value::UInt(u128::from(first_block_height)), + Value::UInt(u128::from(pox_prepare_length)), + Value::UInt(u128::from(pox_reward_cycle_length)), + Value::UInt(u128::from(pox_rejection_fraction)), + Value::UInt(u128::from(pox_2_first_cycle)), ]; let (_, _, _burnchain_params_events) = tx_conn @@ -1169,9 +1171,9 @@ impl<'a, 'b> ClarityBlockConnection<'a, 'b> { let pox_3_activation_height = self.burn_state_db.get_pox_3_activation_height(); let pox_3_first_cycle = PoxConstants::static_block_height_to_reward_cycle( - pox_3_activation_height as u64, - first_block_height as u64, - pox_reward_cycle_length as u64, + u64::from(pox_3_activation_height), + u64::from(first_block_height), + u64::from(pox_reward_cycle_length), ) .expect("PANIC: PoX-3 first reward cycle begins *before* first burn block height") + 1; @@ -1241,11 +1243,11 @@ impl<'a, 'b> ClarityBlockConnection<'a, 'b> { // set burnchain params let consts_setter = PrincipalData::from(pox_3_contract_id.clone()); let params = vec![ - Value::UInt(first_block_height as u128), - Value::UInt(pox_prepare_length as u128), - Value::UInt(pox_reward_cycle_length as u128), - Value::UInt(pox_rejection_fraction as u128), - Value::UInt(pox_3_first_cycle as u128), + Value::UInt(u128::from(first_block_height)), + Value::UInt(u128::from(pox_prepare_length)), + Value::UInt(u128::from(pox_reward_cycle_length)), + Value::UInt(u128::from(pox_rejection_fraction)), + Value::UInt(u128::from(pox_3_first_cycle)), ]; let (_, _, _burnchain_params_events) = tx_conn @@ -1277,6 +1279,143 @@ impl<'a, 'b> ClarityBlockConnection<'a, 'b> { }) } + pub fn initialize_epoch_2_5(&mut self) -> Result, Error> { + // use the `using!` statement to ensure that the old cost_tracker is placed + // back in all branches after initialization + using!(self.cost_track, "cost tracker", |old_cost_tracker| { + // epoch initialization is *free*. + // NOTE: this also means that cost functions won't be evaluated. + self.cost_track.replace(LimitedCostTracker::new_free()); + self.epoch = StacksEpochId::Epoch25; + self.as_transaction(|tx_conn| { + // bump the epoch in the Clarity DB + tx_conn + .with_clarity_db(|db| { + db.set_clarity_epoch_version(StacksEpochId::Epoch25); + Ok(()) + }) + .unwrap(); + + // require 3.0 rules henceforth in this connection as well + tx_conn.epoch = StacksEpochId::Epoch25; + }); + + /////////////////// .pox-4 //////////////////////// + let mainnet = self.mainnet; + let first_block_height = self.burn_state_db.get_burn_start_height(); + let pox_prepare_length = self.burn_state_db.get_pox_prepare_length(); + let pox_reward_cycle_length = self.burn_state_db.get_pox_reward_cycle_length(); + let pox_rejection_fraction = self.burn_state_db.get_pox_rejection_fraction(); + let pox_4_activation_height = self.burn_state_db.get_pox_4_activation_height(); + + let pox_4_first_cycle = PoxConstants::static_block_height_to_reward_cycle( + u64::from(pox_4_activation_height), + u64::from(first_block_height), + u64::from(pox_reward_cycle_length), + ) + .expect("PANIC: PoX-4 first reward cycle begins *before* first burn block height") + + 1; + + // get tx_version & boot code account information for pox-3 contract init + let tx_version = if mainnet { + TransactionVersion::Mainnet + } else { + TransactionVersion::Testnet + }; + + let boot_code_address = boot_code_addr(mainnet); + + let boot_code_auth = TransactionAuth::Standard( + TransactionSpendingCondition::Singlesig(SinglesigSpendingCondition { + signer: boot_code_address.bytes.clone(), + hash_mode: SinglesigHashMode::P2PKH, + key_encoding: TransactionPublicKeyEncoding::Uncompressed, + nonce: 0, + tx_fee: 0, + signature: MessageSignature::empty(), + }), + ); + + let boot_code_nonce = self.with_clarity_db_readonly(|db| { + db.get_account_nonce(&boot_code_address.clone().into()) + }); + + let boot_code_account = StacksAccount { + principal: PrincipalData::Standard(boot_code_address.into()), + nonce: boot_code_nonce, + stx_balance: STXBalance::zero(), + }; + + let pox_4_code = if mainnet { + &*POX_4_MAINNET_CODE + } else { + &*POX_4_TESTNET_CODE + }; + + let pox_4_contract_id = boot_code_id(POX_4_NAME, mainnet); + + let payload = TransactionPayload::SmartContract( + TransactionSmartContract { + name: ContractName::try_from(POX_4_NAME) + .expect("FATAL: invalid boot-code contract name"), + code_body: StacksString::from_str(pox_4_code) + .expect("FATAL: invalid boot code body"), + }, + Some(ClarityVersion::Clarity2), + ); + + let pox_4_contract_tx = + StacksTransaction::new(tx_version.clone(), boot_code_auth.clone(), payload); + + let pox_4_initialization_receipt = self.as_transaction(|tx_conn| { + // initialize with a synthetic transaction + debug!("Instantiate {} contract", &pox_4_contract_id); + let receipt = StacksChainState::process_transaction_payload( + tx_conn, + &pox_4_contract_tx, + &boot_code_account, + ASTRules::PrecheckSize, + ) + .expect("FATAL: Failed to process PoX 3 contract initialization"); + + // set burnchain params + let consts_setter = PrincipalData::from(pox_4_contract_id.clone()); + let params = vec![ + Value::UInt(u128::from(first_block_height)), + Value::UInt(u128::from(pox_prepare_length)), + Value::UInt(u128::from(pox_reward_cycle_length)), + Value::UInt(u128::from(pox_rejection_fraction)), + Value::UInt(u128::from(pox_4_first_cycle)), + ]; + + let (_, _, _burnchain_params_events) = tx_conn + .run_contract_call( + &consts_setter, + None, + &pox_4_contract_id, + "set-burnchain-parameters", + ¶ms, + |_, _| false, + ) + .expect("Failed to set burnchain parameters in PoX-3 contract"); + + receipt + }); + + if pox_4_initialization_receipt.result != Value::okay_true() + || pox_4_initialization_receipt.post_condition_aborted + { + panic!( + "FATAL: Failure processing PoX 4 contract initialization: {:#?}", + &pox_4_initialization_receipt + ); + } + + debug!("Epoch 2.5 initialized"); + (old_cost_tracker, Ok(vec![pox_4_initialization_receipt])) + }) + } + pub fn initialize_epoch_3_0(&mut self) -> Result, Error> { // use the `using!` statement to ensure that the old cost_tracker is placed // back in all branches after initialization @@ -2467,11 +2606,15 @@ mod tests { self.get_stacks_epoch(0) } + fn get_v1_unlock_height(&self) -> u32 { + u32::MAX + } + fn get_v2_unlock_height(&self) -> u32 { u32::MAX } - fn get_v1_unlock_height(&self) -> u32 { + fn get_v3_unlock_height(&self) -> u32 { u32::MAX } @@ -2479,6 +2622,10 @@ mod tests { u32::MAX } + fn get_pox_4_activation_height(&self) -> u32 { + u32::MAX + } + fn get_pox_prepare_length(&self) -> u32 { panic!("BlockLimitBurnStateDB should not return PoX info"); } diff --git a/stackslib/src/clarity_vm/database/mod.rs b/stackslib/src/clarity_vm/database/mod.rs index 9927760cd9..bd0b914be7 100644 --- a/stackslib/src/clarity_vm/database/mod.rs +++ b/stackslib/src/clarity_vm/database/mod.rs @@ -502,10 +502,18 @@ impl BurnStateDB for SortitionHandleTx<'_> { self.context.pox_constants.v2_unlock_height } + fn get_v3_unlock_height(&self) -> u32 { + self.context.pox_constants.v3_unlock_height + } + fn get_pox_3_activation_height(&self) -> u32 { self.context.pox_constants.pox_3_activation_height } + fn get_pox_4_activation_height(&self) -> u32 { + self.context.pox_constants.pox_4_activation_height + } + fn get_pox_prepare_length(&self) -> u32 { self.context.pox_constants.prepare_length } @@ -621,10 +629,18 @@ impl BurnStateDB for SortitionDBConn<'_> { self.context.pox_constants.v2_unlock_height } + fn get_v3_unlock_height(&self) -> u32 { + self.context.pox_constants.v3_unlock_height + } + fn get_pox_3_activation_height(&self) -> u32 { self.context.pox_constants.pox_3_activation_height } + fn get_pox_4_activation_height(&self) -> u32 { + self.context.pox_constants.pox_4_activation_height + } + fn get_pox_prepare_length(&self) -> u32 { self.context.pox_constants.prepare_length } diff --git a/stackslib/src/clarity_vm/tests/large_contract.rs b/stackslib/src/clarity_vm/tests/large_contract.rs index 0adb308261..f68a0e4f83 100644 --- a/stackslib/src/clarity_vm/tests/large_contract.rs +++ b/stackslib/src/clarity_vm/tests/large_contract.rs @@ -148,7 +148,9 @@ fn test_simple_token_system(#[case] version: ClarityVersion, #[case] epoch: Stac StacksEpochId::Epoch21 | StacksEpochId::Epoch22 | StacksEpochId::Epoch23 - | StacksEpochId::Epoch24 => { + | StacksEpochId::Epoch24 + | StacksEpochId::Epoch25 + | StacksEpochId::Epoch30 => { let (ast, _analysis) = tx .analyze_smart_contract( &boot_code_id("costs-3", false), diff --git a/stackslib/src/core/mempool.rs b/stackslib/src/core/mempool.rs index fcb87215d5..5000d7a254 100644 --- a/stackslib/src/core/mempool.rs +++ b/stackslib/src/core/mempool.rs @@ -45,6 +45,7 @@ use stacks_common::util::hash::Sha512Trunc256Sum; use crate::burnchains::Txid; use crate::chainstate::burn::db::sortdb::SortitionDB; use crate::chainstate::burn::ConsensusHash; +use crate::chainstate::nakamoto::NakamotoBlock; use crate::chainstate::stacks::events::StacksTransactionReceipt; use crate::chainstate::stacks::miner::TransactionEvent; use crate::chainstate::stacks::StacksBlock; @@ -241,6 +242,14 @@ pub trait MemPoolEventDispatcher { anchor_block_consensus_hash: ConsensusHash, anchor_block: BlockHeaderHash, ); + fn mined_nakamoto_block_event( + &self, + target_burn_height: u64, + block: &NakamotoBlock, + block_size_bytes: u64, + consumed: &ExecutionCost, + tx_results: Vec, + ); } #[derive(Debug, PartialEq, Clone)] diff --git a/stackslib/src/core/mod.rs b/stackslib/src/core/mod.rs index f5f75b56ac..b658e84785 100644 --- a/stackslib/src/core/mod.rs +++ b/stackslib/src/core/mod.rs @@ -62,14 +62,12 @@ pub const PEER_VERSION_EPOCH_2_1: u8 = 0x06; pub const PEER_VERSION_EPOCH_2_2: u8 = 0x07; pub const PEER_VERSION_EPOCH_2_3: u8 = 0x08; pub const PEER_VERSION_EPOCH_2_4: u8 = 0x09; - -/// How many tenures back from the chain tip does the node accept -/// blocks from a miner. -pub const NAKAMOTO_TENURE_BLOCK_ACCEPTANCE_PERIOD: usize = 3; +pub const PEER_VERSION_EPOCH_2_5: u8 = 0x0a; +pub const PEER_VERSION_EPOCH_3_0: u8 = 0x0b; // this should be updated to the latest network epoch version supported by // this node. this will be checked by the `validate_epochs()` method. -pub const PEER_NETWORK_EPOCH: u32 = PEER_VERSION_EPOCH_2_4 as u32; +pub const PEER_NETWORK_EPOCH: u32 = PEER_VERSION_EPOCH_3_0 as u32; // set the fourth byte of the peer version pub const PEER_VERSION_MAINNET: u32 = PEER_VERSION_MAINNET_MAJOR | PEER_NETWORK_EPOCH; @@ -128,6 +126,10 @@ pub const BITCOIN_MAINNET_STACKS_22_BURN_HEIGHT: u64 = 787_651; pub const BITCOIN_MAINNET_STACKS_23_BURN_HEIGHT: u64 = 788_240; /// This is Epoch-2.3, now Epoch-2.4, activation height proposed in SIP-024 pub const BITCOIN_MAINNET_STACKS_24_BURN_HEIGHT: u64 = 791_551; +/// This is Epoch-2.5, activation height proposed in SIP-021 +pub const BITCOIN_MAINNET_STACKS_25_BURN_HEIGHT: u64 = 1_000_000; +/// This is Epoch-3.0, activation height proposed in SIP-021 +pub const BITCOIN_MAINNET_STACKS_30_BURN_HEIGHT: u64 = 2_000_000; pub const BITCOIN_TESTNET_FIRST_BLOCK_HEIGHT: u64 = 2000000; pub const BITCOIN_TESTNET_FIRST_BLOCK_TIMESTAMP: u32 = 1622691840; @@ -138,6 +140,8 @@ pub const BITCOIN_TESTNET_STACKS_21_BURN_HEIGHT: u64 = 2_422_101; pub const BITCOIN_TESTNET_STACKS_22_BURN_HEIGHT: u64 = 2_431_300; pub const BITCOIN_TESTNET_STACKS_23_BURN_HEIGHT: u64 = 2_431_633; pub const BITCOIN_TESTNET_STACKS_24_BURN_HEIGHT: u64 = 2_432_545; +pub const BITCOIN_TESTNET_STACKS_25_BURN_HEIGHT: u64 = 20_000_000; +pub const BITCOIN_TESTNET_STACKS_30_BURN_HEIGHT: u64 = 30_000_000; pub const BITCOIN_REGTEST_FIRST_BLOCK_HEIGHT: u64 = 0; pub const BITCOIN_REGTEST_FIRST_BLOCK_TIMESTAMP: u32 = 0; @@ -187,6 +191,11 @@ pub const POX_V2_MAINNET_EARLY_UNLOCK_HEIGHT: u32 = pub const POX_V2_TESTNET_EARLY_UNLOCK_HEIGHT: u32 = (BITCOIN_TESTNET_STACKS_22_BURN_HEIGHT as u32) + 1; +pub const POX_V3_MAINNET_EARLY_UNLOCK_HEIGHT: u32 = + (BITCOIN_MAINNET_STACKS_24_BURN_HEIGHT as u32) + 1; +pub const POX_V3_TESTNET_EARLY_UNLOCK_HEIGHT: u32 = + (BITCOIN_TESTNET_STACKS_24_BURN_HEIGHT as u32) + 1; + /// Burn block height at which the ASTRules::PrecheckSize becomes the default behavior on mainnet pub const AST_RULES_PRECHECK_SIZE: u64 = 752000; // on or about Aug 30 2022 @@ -251,7 +260,7 @@ pub fn check_fault_injection(fault_name: &str) -> bool { } lazy_static! { - pub static ref STACKS_EPOCHS_MAINNET: [StacksEpoch; 7] = [ + pub static ref STACKS_EPOCHS_MAINNET: [StacksEpoch; 9] = [ StacksEpoch { epoch_id: StacksEpochId::Epoch10, start_height: 0, @@ -297,15 +306,29 @@ lazy_static! { StacksEpoch { epoch_id: StacksEpochId::Epoch24, start_height: BITCOIN_MAINNET_STACKS_24_BURN_HEIGHT, - end_height: STACKS_EPOCH_MAX, + end_height: BITCOIN_MAINNET_STACKS_25_BURN_HEIGHT, block_limit: BLOCK_LIMIT_MAINNET_21.clone(), network_epoch: PEER_VERSION_EPOCH_2_4 }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch25, + start_height: BITCOIN_MAINNET_STACKS_25_BURN_HEIGHT, + end_height: BITCOIN_MAINNET_STACKS_30_BURN_HEIGHT, + block_limit: BLOCK_LIMIT_MAINNET_21.clone(), + network_epoch: PEER_VERSION_EPOCH_2_5 + }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch30, + start_height: BITCOIN_MAINNET_STACKS_30_BURN_HEIGHT, + end_height: STACKS_EPOCH_MAX, + block_limit: BLOCK_LIMIT_MAINNET_21.clone(), + network_epoch: PEER_VERSION_EPOCH_3_0 + }, ]; } lazy_static! { - pub static ref STACKS_EPOCHS_TESTNET: [StacksEpoch; 7] = [ + pub static ref STACKS_EPOCHS_TESTNET: [StacksEpoch; 9] = [ StacksEpoch { epoch_id: StacksEpochId::Epoch10, start_height: 0, @@ -351,15 +374,29 @@ lazy_static! { StacksEpoch { epoch_id: StacksEpochId::Epoch24, start_height: BITCOIN_TESTNET_STACKS_24_BURN_HEIGHT, - end_height: STACKS_EPOCH_MAX, + end_height: BITCOIN_TESTNET_STACKS_25_BURN_HEIGHT, block_limit: BLOCK_LIMIT_MAINNET_21.clone(), network_epoch: PEER_VERSION_EPOCH_2_4 }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch25, + start_height: BITCOIN_MAINNET_STACKS_25_BURN_HEIGHT, + end_height: BITCOIN_MAINNET_STACKS_30_BURN_HEIGHT, + block_limit: BLOCK_LIMIT_MAINNET_21.clone(), + network_epoch: PEER_VERSION_EPOCH_2_5 + }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch30, + start_height: BITCOIN_MAINNET_STACKS_30_BURN_HEIGHT, + end_height: STACKS_EPOCH_MAX, + block_limit: BLOCK_LIMIT_MAINNET_21.clone(), + network_epoch: PEER_VERSION_EPOCH_3_0 + }, ]; } lazy_static! { - pub static ref STACKS_EPOCHS_REGTEST: [StacksEpoch; 7] = [ + pub static ref STACKS_EPOCHS_REGTEST: [StacksEpoch; 9] = [ StacksEpoch { epoch_id: StacksEpochId::Epoch10, start_height: 0, @@ -405,10 +442,24 @@ lazy_static! { StacksEpoch { epoch_id: StacksEpochId::Epoch24, start_height: 5000, - end_height: STACKS_EPOCH_MAX, + end_height: 6000, block_limit: HELIUM_BLOCK_LIMIT_20.clone(), network_epoch: PEER_VERSION_EPOCH_2_4 }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch25, + start_height: 6000, + end_height: 7000, + block_limit: BLOCK_LIMIT_MAINNET_21.clone(), + network_epoch: PEER_VERSION_EPOCH_2_5 + }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch30, + start_height: 7000, + end_height: STACKS_EPOCH_MAX, + block_limit: BLOCK_LIMIT_MAINNET_21.clone(), + network_epoch: PEER_VERSION_EPOCH_3_0 + }, ]; } @@ -432,9 +483,13 @@ pub static STACKS_EPOCH_2_3_MARKER: u8 = 0x08; /// *or greater*. pub static STACKS_EPOCH_2_4_MARKER: u8 = 0x09; +/// Stacks 2.5 epoch marker. All block-commits in 2.5 must have a memo bitfield with this value +/// *or greater*. +pub static STACKS_EPOCH_2_5_MARKER: u8 = 0x0a; + /// Stacks 3.0 epoch marker. All block-commits in 3.0 must have a memo bitfield with this value /// *or greater*. -pub static STACKS_EPOCH_3_0_MARKER: u8 = 0x0a; +pub static STACKS_EPOCH_3_0_MARKER: u8 = 0x0b; #[test] fn test_ord_for_stacks_epoch() { @@ -457,6 +512,32 @@ fn test_ord_for_stacks_epoch() { assert_eq!(epochs[4].cmp(&epochs[1]), Ordering::Greater); assert_eq!(epochs[4].cmp(&epochs[2]), Ordering::Greater); assert_eq!(epochs[4].cmp(&epochs[3]), Ordering::Greater); + assert_eq!(epochs[5].cmp(&epochs[0]), Ordering::Greater); + assert_eq!(epochs[5].cmp(&epochs[1]), Ordering::Greater); + assert_eq!(epochs[5].cmp(&epochs[2]), Ordering::Greater); + assert_eq!(epochs[5].cmp(&epochs[3]), Ordering::Greater); + assert_eq!(epochs[5].cmp(&epochs[4]), Ordering::Greater); + assert_eq!(epochs[6].cmp(&epochs[0]), Ordering::Greater); + assert_eq!(epochs[6].cmp(&epochs[1]), Ordering::Greater); + assert_eq!(epochs[6].cmp(&epochs[2]), Ordering::Greater); + assert_eq!(epochs[6].cmp(&epochs[3]), Ordering::Greater); + assert_eq!(epochs[6].cmp(&epochs[4]), Ordering::Greater); + assert_eq!(epochs[6].cmp(&epochs[5]), Ordering::Greater); + assert_eq!(epochs[7].cmp(&epochs[0]), Ordering::Greater); + assert_eq!(epochs[7].cmp(&epochs[1]), Ordering::Greater); + assert_eq!(epochs[7].cmp(&epochs[2]), Ordering::Greater); + assert_eq!(epochs[7].cmp(&epochs[3]), Ordering::Greater); + assert_eq!(epochs[7].cmp(&epochs[4]), Ordering::Greater); + assert_eq!(epochs[7].cmp(&epochs[5]), Ordering::Greater); + assert_eq!(epochs[7].cmp(&epochs[6]), Ordering::Greater); + assert_eq!(epochs[8].cmp(&epochs[0]), Ordering::Greater); + assert_eq!(epochs[8].cmp(&epochs[1]), Ordering::Greater); + assert_eq!(epochs[8].cmp(&epochs[2]), Ordering::Greater); + assert_eq!(epochs[8].cmp(&epochs[3]), Ordering::Greater); + assert_eq!(epochs[8].cmp(&epochs[4]), Ordering::Greater); + assert_eq!(epochs[8].cmp(&epochs[5]), Ordering::Greater); + assert_eq!(epochs[8].cmp(&epochs[6]), Ordering::Greater); + assert_eq!(epochs[8].cmp(&epochs[7]), Ordering::Greater); } #[test] @@ -516,7 +597,13 @@ pub trait StacksEpochExtension { #[cfg(test)] fn unit_test_2_4(epoch_2_0_block_height: u64) -> Vec; #[cfg(test)] + fn unit_test_2_5(epoch_2_0_block_height: u64) -> Vec; + #[cfg(test)] + fn unit_test_3_0(epoch_2_0_block_height: u64) -> Vec; + #[cfg(test)] fn unit_test_2_1_only(epoch_2_0_block_height: u64) -> Vec; + #[cfg(test)] + fn unit_test_3_0_only(first_burnchain_height: u64) -> Vec; fn all( epoch_2_0_block_height: u64, epoch_2_05_block_height: u64, @@ -909,6 +996,225 @@ impl StacksEpochExtension for StacksEpoch { ] } + #[cfg(test)] + fn unit_test_2_5(first_burnchain_height: u64) -> Vec { + info!( + "StacksEpoch unit_test_2_5 first_burn_height = {}", + first_burnchain_height + ); + + vec![ + StacksEpoch { + epoch_id: StacksEpochId::Epoch10, + start_height: 0, + end_height: first_burnchain_height, + block_limit: ExecutionCost::max_value(), + network_epoch: PEER_VERSION_EPOCH_1_0, + }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch20, + start_height: first_burnchain_height, + end_height: first_burnchain_height + 4, + block_limit: ExecutionCost::max_value(), + network_epoch: PEER_VERSION_EPOCH_2_0, + }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch2_05, + start_height: first_burnchain_height + 4, + end_height: first_burnchain_height + 8, + block_limit: ExecutionCost { + write_length: 205205, + write_count: 205205, + read_length: 205205, + read_count: 205205, + runtime: 205205, + }, + network_epoch: PEER_VERSION_EPOCH_2_05, + }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch21, + start_height: first_burnchain_height + 8, + end_height: first_burnchain_height + 12, + block_limit: ExecutionCost { + write_length: 210210, + write_count: 210210, + read_length: 210210, + read_count: 210210, + runtime: 210210, + }, + network_epoch: PEER_VERSION_EPOCH_2_1, + }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch22, + start_height: first_burnchain_height + 12, + end_height: first_burnchain_height + 16, + block_limit: ExecutionCost { + write_length: 210210, + write_count: 210210, + read_length: 210210, + read_count: 210210, + runtime: 210210, + }, + network_epoch: PEER_VERSION_EPOCH_2_2, + }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch23, + start_height: first_burnchain_height + 16, + end_height: first_burnchain_height + 20, + block_limit: ExecutionCost { + write_length: 210210, + write_count: 210210, + read_length: 210210, + read_count: 210210, + runtime: 210210, + }, + network_epoch: PEER_VERSION_EPOCH_2_3, + }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch24, + start_height: first_burnchain_height + 20, + end_height: STACKS_EPOCH_MAX, + block_limit: ExecutionCost { + write_length: 210210, + write_count: 210210, + read_length: 210210, + read_count: 210210, + runtime: 210210, + }, + network_epoch: PEER_VERSION_EPOCH_2_4, + }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch25, + start_height: first_burnchain_height + 24, + end_height: STACKS_EPOCH_MAX, + block_limit: ExecutionCost { + write_length: 210210, + write_count: 210210, + read_length: 210210, + read_count: 210210, + runtime: 210210, + }, + network_epoch: PEER_VERSION_EPOCH_2_5, + }, + ] + } + + #[cfg(test)] + fn unit_test_3_0(first_burnchain_height: u64) -> Vec { + info!( + "StacksEpoch unit_test_3_0 first_burn_height = {}", + first_burnchain_height + ); + + vec![ + StacksEpoch { + epoch_id: StacksEpochId::Epoch10, + start_height: 0, + end_height: first_burnchain_height, + block_limit: ExecutionCost::max_value(), + network_epoch: PEER_VERSION_EPOCH_1_0, + }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch20, + start_height: first_burnchain_height, + end_height: first_burnchain_height + 4, + block_limit: ExecutionCost::max_value(), + network_epoch: PEER_VERSION_EPOCH_2_0, + }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch2_05, + start_height: first_burnchain_height + 4, + end_height: first_burnchain_height + 8, + block_limit: ExecutionCost { + write_length: 205205, + write_count: 205205, + read_length: 205205, + read_count: 205205, + runtime: 205205, + }, + network_epoch: PEER_VERSION_EPOCH_2_05, + }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch21, + start_height: first_burnchain_height + 8, + end_height: first_burnchain_height + 12, + block_limit: ExecutionCost { + write_length: 210210, + write_count: 210210, + read_length: 210210, + read_count: 210210, + runtime: 210210, + }, + network_epoch: PEER_VERSION_EPOCH_2_1, + }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch22, + start_height: first_burnchain_height + 12, + end_height: first_burnchain_height + 16, + block_limit: ExecutionCost { + write_length: 210210, + write_count: 210210, + read_length: 210210, + read_count: 210210, + runtime: 210210, + }, + network_epoch: PEER_VERSION_EPOCH_2_2, + }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch23, + start_height: first_burnchain_height + 16, + end_height: first_burnchain_height + 20, + block_limit: ExecutionCost { + write_length: 210210, + write_count: 210210, + read_length: 210210, + read_count: 210210, + runtime: 210210, + }, + network_epoch: PEER_VERSION_EPOCH_2_3, + }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch24, + start_height: first_burnchain_height + 20, + end_height: STACKS_EPOCH_MAX, + block_limit: ExecutionCost { + write_length: 210210, + write_count: 210210, + read_length: 210210, + read_count: 210210, + runtime: 210210, + }, + network_epoch: PEER_VERSION_EPOCH_2_4, + }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch25, + start_height: first_burnchain_height + 24, + end_height: first_burnchain_height + 28, + block_limit: ExecutionCost { + write_length: 210210, + write_count: 210210, + read_length: 210210, + read_count: 210210, + runtime: 210210, + }, + network_epoch: PEER_VERSION_EPOCH_2_5, + }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch30, + start_height: first_burnchain_height + 28, + end_height: STACKS_EPOCH_MAX, + block_limit: ExecutionCost { + write_length: 210210, + write_count: 210210, + read_length: 210210, + read_count: 210210, + runtime: 210210, + }, + network_epoch: PEER_VERSION_EPOCH_3_0, + }, + ] + } + #[cfg(test)] fn unit_test_2_1_only(first_burnchain_height: u64) -> Vec { info!( @@ -960,6 +1266,80 @@ impl StacksEpochExtension for StacksEpoch { ] } + #[cfg(test)] + fn unit_test_3_0_only(first_burnchain_height: u64) -> Vec { + info!( + "StacksEpoch unit_test first_burn_height = {}", + first_burnchain_height + ); + + vec![ + StacksEpoch { + epoch_id: StacksEpochId::Epoch10, + start_height: 0, + end_height: 0, + block_limit: ExecutionCost::max_value(), + network_epoch: PEER_VERSION_EPOCH_1_0, + }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch20, + start_height: 0, + end_height: 0, + block_limit: ExecutionCost::max_value(), + network_epoch: PEER_VERSION_EPOCH_2_0, + }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch2_05, + start_height: 0, + end_height: 0, + block_limit: ExecutionCost::max_value(), + network_epoch: PEER_VERSION_EPOCH_2_05, + }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch21, + start_height: 0, + end_height: 0, + block_limit: ExecutionCost::max_value(), + network_epoch: PEER_VERSION_EPOCH_2_1, + }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch22, + start_height: 0, + end_height: 0, + block_limit: ExecutionCost::max_value(), + network_epoch: PEER_VERSION_EPOCH_2_2, + }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch23, + start_height: 0, + end_height: 0, + block_limit: ExecutionCost::max_value(), + network_epoch: PEER_VERSION_EPOCH_2_3, + }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch24, + start_height: 0, + end_height: 0, + block_limit: ExecutionCost::max_value(), + network_epoch: PEER_VERSION_EPOCH_2_4, + }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch25, + start_height: 0, + end_height: first_burnchain_height, + block_limit: BLOCK_LIMIT_MAINNET_21.clone(), + network_epoch: PEER_VERSION_EPOCH_2_4, + }, + StacksEpoch { + epoch_id: StacksEpochId::Epoch30, + start_height: first_burnchain_height, + end_height: STACKS_EPOCH_MAX, + block_limit: BLOCK_LIMIT_MAINNET_21, + network_epoch: PEER_VERSION_EPOCH_3_0, + }, + ] + } + #[cfg(test)] fn unit_test(stacks_epoch_id: StacksEpochId, first_burnchain_height: u64) -> Vec { match stacks_epoch_id { @@ -971,7 +1351,8 @@ impl StacksEpochExtension for StacksEpoch { StacksEpochId::Epoch22 => StacksEpoch::unit_test_2_2(first_burnchain_height), StacksEpochId::Epoch23 => StacksEpoch::unit_test_2_3(first_burnchain_height), StacksEpochId::Epoch24 => StacksEpoch::unit_test_2_4(first_burnchain_height), - StacksEpochId::Epoch30 => todo!(), + StacksEpochId::Epoch25 => StacksEpoch::unit_test_2_5(first_burnchain_height), + StacksEpochId::Epoch30 => StacksEpoch::unit_test_3_0(first_burnchain_height), } } diff --git a/stackslib/src/cost_estimates/pessimistic.rs b/stackslib/src/cost_estimates/pessimistic.rs index cff9914638..0ae3b8b833 100644 --- a/stackslib/src/cost_estimates/pessimistic.rs +++ b/stackslib/src/cost_estimates/pessimistic.rs @@ -234,6 +234,8 @@ impl PessimisticEstimator { StacksEpochId::Epoch23 => ":2.1", // reuse cost estimates in Epoch24 StacksEpochId::Epoch24 => ":2.1", + // reuse cost estimates in Epoch25 + StacksEpochId::Epoch25 => ":2.1", // reuse cost estimates in Epoch30 StacksEpochId::Epoch30 => ":2.1", }; diff --git a/stackslib/src/cost_estimates/tests/cost_estimators.rs b/stackslib/src/cost_estimates/tests/cost_estimators.rs index a3a4fd7745..037631aab6 100644 --- a/stackslib/src/cost_estimates/tests/cost_estimators.rs +++ b/stackslib/src/cost_estimates/tests/cost_estimators.rs @@ -79,7 +79,7 @@ fn make_dummy_coinbase_tx() -> StacksTransactionReceipt { StacksTransactionReceipt::from_coinbase(StacksTransaction::new( TransactionVersion::Mainnet, TransactionAuth::Standard(TransactionSpendingCondition::new_initial_sighash()), - TransactionPayload::Coinbase(CoinbasePayload([0; 32]), None), + TransactionPayload::Coinbase(CoinbasePayload([0; 32]), None, None), )) } diff --git a/stackslib/src/cost_estimates/tests/fee_medians.rs b/stackslib/src/cost_estimates/tests/fee_medians.rs index 93075b175a..c2fac17677 100644 --- a/stackslib/src/cost_estimates/tests/fee_medians.rs +++ b/stackslib/src/cost_estimates/tests/fee_medians.rs @@ -57,7 +57,7 @@ fn make_dummy_coinbase_tx() -> StacksTransaction { StacksTransaction::new( TransactionVersion::Mainnet, TransactionAuth::Standard(TransactionSpendingCondition::new_initial_sighash()), - TransactionPayload::Coinbase(CoinbasePayload([0; 32]), None), + TransactionPayload::Coinbase(CoinbasePayload([0; 32]), None, None), ) } diff --git a/stackslib/src/cost_estimates/tests/fee_scalar.rs b/stackslib/src/cost_estimates/tests/fee_scalar.rs index ecef42a9fd..20fa5a677d 100644 --- a/stackslib/src/cost_estimates/tests/fee_scalar.rs +++ b/stackslib/src/cost_estimates/tests/fee_scalar.rs @@ -77,7 +77,7 @@ fn make_dummy_coinbase_tx() -> StacksTransaction { StacksTransaction::new( TransactionVersion::Mainnet, TransactionAuth::Standard(TransactionSpendingCondition::new_initial_sighash()), - TransactionPayload::Coinbase(CoinbasePayload([0; 32]), None), + TransactionPayload::Coinbase(CoinbasePayload([0; 32]), None, None), ) } diff --git a/stackslib/src/main.rs b/stackslib/src/main.rs index c7e7af0298..7b4f1a5a46 100644 --- a/stackslib/src/main.rs +++ b/stackslib/src/main.rs @@ -46,9 +46,11 @@ use blockstack_lib::burnchains::Burnchain; use blockstack_lib::burnchains::Txid; use blockstack_lib::burnchains::BLOCKSTACK_MAGIC_MAINNET; use blockstack_lib::chainstate::burn::ConsensusHash; +use blockstack_lib::chainstate::nakamoto::NakamotoChainState; use blockstack_lib::chainstate::stacks::db::blocks::DummyEventDispatcher; use blockstack_lib::chainstate::stacks::db::blocks::StagingBlock; use blockstack_lib::chainstate::stacks::db::ChainStateBootData; +use blockstack_lib::chainstate::stacks::db::StacksBlockHeaderTypes; use blockstack_lib::chainstate::stacks::index::marf::MARFOpenOpts; use blockstack_lib::chainstate::stacks::index::marf::MarfConnection; use blockstack_lib::chainstate::stacks::index::marf::MARF; @@ -327,178 +329,6 @@ fn main() { process::exit(0); } - if argv[1] == "analyze-fees" { - if argv.len() < 4 { - eprintln!("Usage: {} analyze-fees CHAIN_STATE_DIR NUM_BLOCKS", argv[0]); - process::exit(1); - } - - let chain_state_path = format!("{}/mainnet/chainstate/", &argv[2]); - let sort_db_path = format!("{}/mainnet/burnchain/sortition", &argv[2]); - let (chainstate, _) = - StacksChainState::open(true, CHAIN_ID_MAINNET, &chain_state_path, None).unwrap(); - let sort_db = SortitionDB::open(&sort_db_path, false, PoxConstants::mainnet_default()) - .expect(&format!("Failed to open {}", &sort_db_path)); - - let num_blocks = argv[3].parse::().unwrap(); - - let mut block_info = chainstate - .get_stacks_chain_tip(&sort_db) - .unwrap() - .expect("FATAL: no chain tip"); - block_info.block_data = StacksChainState::load_block_bytes( - &chainstate.blocks_path, - &block_info.consensus_hash, - &block_info.anchored_block_hash, - ) - .unwrap() - .expect("No such block"); - - let mut tx_fees = HashMap::new(); - let mut tx_mined_heights = HashMap::new(); - let mut tx_mined_deltas: HashMap> = HashMap::new(); - - for _i in 0..num_blocks { - let block_hash = StacksBlockHeader::make_index_block_hash( - &block_info.consensus_hash, - &block_info.anchored_block_hash, - ); - debug!("Consider block {} ({} of {})", &block_hash, _i, num_blocks); - - let block = - StacksBlock::consensus_deserialize(&mut io::Cursor::new(&block_info.block_data)) - .map_err(|_e| { - eprintln!("Failed to decode block {}", &block_hash); - process::exit(1); - }) - .unwrap(); - - let microblocks = - StacksChainState::find_parent_microblock_stream(chainstate.db(), &block_info) - .unwrap() - .unwrap_or(vec![]); - - let mut txids_at_height = vec![]; - - for mblock in microblocks.iter() { - for tx in mblock.txs.iter() { - tx_fees.insert(tx.txid(), tx.get_tx_fee()); - txids_at_height.push(tx.txid()); - } - } - - for tx in block.txs.iter() { - if tx.get_tx_fee() > 0 { - // not a coinbase - tx_fees.insert(tx.txid(), tx.get_tx_fee()); - txids_at_height.push(tx.txid()); - } - } - - tx_mined_heights.insert(block_info.height, txids_at_height); - - // next block - block_info = match StacksChainState::load_staging_block_info( - chainstate.db(), - &StacksBlockHeader::make_index_block_hash( - &block_info.parent_consensus_hash, - &block_info.parent_anchored_block_hash, - ), - ) - .unwrap() - { - Some(blk) => blk, - None => { - break; - } - }; - block_info.block_data = StacksChainState::load_block_bytes( - &chainstate.blocks_path, - &block_info.consensus_hash, - &block_info.anchored_block_hash, - ) - .unwrap() - .expect("No such block"); - } - - let estimator = Box::new(UnitEstimator); - let metric = Box::new(UnitMetric); - let mempool_db = - MemPoolDB::open(true, CHAIN_ID_MAINNET, &chain_state_path, estimator, metric) - .expect("Failed to open mempool db"); - - let mut total_txs = 0; - for (_, txids) in tx_mined_heights.iter() { - total_txs += txids.len(); - } - - let mut tx_cnt = 0; - for (mined_height, txids) in tx_mined_heights.iter() { - for txid in txids.iter() { - tx_cnt += 1; - if tx_cnt % 100 == 0 { - debug!("Check tx {} of {}", tx_cnt, total_txs); - } - - if let Some(txinfo) = MemPoolDB::get_tx(&mempool_db.db, txid).unwrap() { - let delta = mined_height.saturating_sub(txinfo.metadata.block_height); - if let Some(txids_at_delta) = tx_mined_deltas.get_mut(&delta) { - txids_at_delta.push(txid.clone()); - } else { - tx_mined_deltas.insert(delta, vec![txid.clone()]); - } - } - } - } - - let mut deltas: Vec<_> = tx_mined_deltas.keys().collect(); - deltas.sort(); - - let mut reports = vec![]; - for delta in deltas { - let mut delta_tx_fees = vec![]; - let empty_txids = vec![]; - let txids = tx_mined_deltas.get(&delta).unwrap_or(&empty_txids); - if txids.len() == 0 { - continue; - } - for txid in txids.iter() { - delta_tx_fees.push(*tx_fees.get(txid).unwrap_or(&0)); - } - delta_tx_fees.sort(); - let total_tx_fees = delta_tx_fees.iter().fold(0, |acc, x| acc + x); - - let avg_tx_fee = if delta_tx_fees.len() > 0 { - total_tx_fees / (delta_tx_fees.len() as u64) - } else { - 0 - }; - let min_tx_fee = *delta_tx_fees.iter().min().unwrap_or(&0); - let median_tx_fee = delta_tx_fees[delta_tx_fees.len() / 2]; - let percent_90_tx_fee = delta_tx_fees[(delta_tx_fees.len() * 90) / 100]; - let percent_95_tx_fee = delta_tx_fees[(delta_tx_fees.len() * 95) / 100]; - let percent_99_tx_fee = delta_tx_fees[(delta_tx_fees.len() * 99) / 100]; - let max_tx_fee = *delta_tx_fees.iter().max().unwrap_or(&0); - - reports.push(json!({ - "delta": format!("{}", delta), - "tx_total": format!("{}", delta_tx_fees.len()), - "tx_fees": json!({ - "avg": format!("{}", avg_tx_fee), - "min": format!("{}", min_tx_fee), - "max": format!("{}", max_tx_fee), - "p50": format!("{}", median_tx_fee), - "p90": format!("{}", percent_90_tx_fee), - "p95": format!("{}", percent_95_tx_fee), - "p99": format!("{}", percent_99_tx_fee), - }), - })); - } - - println!("{}", serde_json::Value::Array(reports).to_string()); - process::exit(0); - } - if argv[1] == "get-block-inventory" { if argv.len() < 3 { eprintln!( @@ -757,11 +587,13 @@ simulating a miner. let mut mempool_db = MemPoolDB::open(true, chain_id, &chain_state_path, estimator, metric) .expect("Failed to open mempool db"); - let stacks_block = chain_state.get_stacks_chain_tip(&sort_db).unwrap().unwrap(); + let header_tip = NakamotoChainState::get_canonical_block_header(chain_state.db(), &sort_db) + .unwrap() + .unwrap(); let parent_header = StacksChainState::get_anchored_block_header_info( chain_state.db(), - &stacks_block.consensus_hash, - &stacks_block.anchored_block_hash, + &header_tip.consensus_hash, + &header_tip.anchored_header.block_hash(), ) .expect("Failed to load chain tip header info") .expect("Failed to load chain tip header info"); @@ -773,7 +605,7 @@ simulating a miner. let mut coinbase_tx = StacksTransaction::new( TransactionVersion::Mainnet, tx_auth, - TransactionPayload::Coinbase(CoinbasePayload([0u8; 32]), None), + TransactionPayload::Coinbase(CoinbasePayload([0u8; 32]), None, None), ); coinbase_tx.chain_id = chain_id; @@ -1458,27 +1290,49 @@ simulating a miner. tx.commit().unwrap(); } - let stacks_chain_tip = chain_state.get_stacks_chain_tip(&sort_db).unwrap().unwrap(); + let header_tip = NakamotoChainState::get_canonical_block_header(chain_state.db(), &sort_db) + .unwrap() + .unwrap(); // Find ancestor block - let mut stacks_block = stacks_chain_tip.to_owned(); + let mut stacks_header = header_tip.to_owned(); loop { - let stacks_parent_block = chain_state - .get_stacks_block_parent(&stacks_block) - .unwrap() - .unwrap(); - if stacks_parent_block.height < mine_tip_height { + let parent_block_id = match stacks_header.anchored_header { + StacksBlockHeaderTypes::Nakamoto(ref nakamoto_header) => { + nakamoto_header.parent_block_id.clone() + } + StacksBlockHeaderTypes::Epoch2(ref epoch2_header) => { + let block_info = StacksChainState::load_staging_block( + chain_state.db(), + &chain_state.blocks_path, + &stacks_header.consensus_hash, + &epoch2_header.block_hash(), + ) + .unwrap() + .unwrap(); + StacksBlockId::new( + &block_info.parent_consensus_hash, + &epoch2_header.parent_block, + ) + } + }; + + let stacks_parent_header = + NakamotoChainState::get_block_header(chain_state.db(), &parent_block_id) + .unwrap() + .unwrap(); + if stacks_parent_header.anchored_header.height() < mine_tip_height { break; } - stacks_block = stacks_parent_block; + stacks_header = stacks_parent_header; } info!( "Found stacks_chain_tip with height {}", - stacks_chain_tip.height + header_tip.anchored_header.height() ); info!( "Mining off parent block with height {}", - stacks_block.height + header_tip.anchored_header.height() ); info!( @@ -1522,8 +1376,8 @@ simulating a miner. let result = mempool_db.submit( &mut chain_state, &sort_db, - &stacks_block.consensus_hash, - &stacks_block.anchored_block_hash, + &stacks_header.consensus_hash, + &stacks_header.anchored_header.block_hash(), &raw_tx, None, &ExecutionCost::max_value(), @@ -1549,10 +1403,12 @@ simulating a miner. let start = get_epoch_time_ms(); - let parent_header = StacksChainState::get_anchored_block_header_info( + let parent_header = NakamotoChainState::get_block_header( chain_state.db(), - &stacks_block.consensus_hash, - &stacks_block.anchored_block_hash, + &StacksBlockId::new( + &stacks_header.consensus_hash, + &stacks_header.anchored_header.block_hash(), + ), ) .expect("Failed to load chain tip header info") .expect("Failed to load chain tip header info"); @@ -1564,7 +1420,7 @@ simulating a miner. let mut coinbase_tx = StacksTransaction::new( TransactionVersion::Mainnet, tx_auth, - TransactionPayload::Coinbase(CoinbasePayload([0u8; 32]), None), + TransactionPayload::Coinbase(CoinbasePayload([0u8; 32]), None, None), ); coinbase_tx.chain_id = chain_id; diff --git a/stackslib/src/net/download.rs b/stackslib/src/net/download.rs index 32eaa1882f..32dec4e1d5 100644 --- a/stackslib/src/net/download.rs +++ b/stackslib/src/net/download.rs @@ -47,6 +47,7 @@ use crate::burnchains::Burnchain; use crate::burnchains::BurnchainView; use crate::chainstate::burn::db::sortdb::{BlockHeaderCache, SortitionDB, SortitionDBConn}; use crate::chainstate::burn::BlockSnapshot; +use crate::chainstate::nakamoto::NakamotoChainState; use crate::chainstate::stacks::db::StacksChainState; use crate::chainstate::stacks::Error as chainstate_error; use crate::chainstate::stacks::StacksBlockHeader; @@ -3294,18 +3295,20 @@ pub mod test { >| { let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); - let stacks_tip_opt = chainstate.get_stacks_chain_tip(sortdb).unwrap(); + let stacks_tip_opt = + NakamotoChainState::get_canonical_block_header(chainstate.db(), sortdb) + .unwrap(); let parent_tip = match stacks_tip_opt { None => { StacksChainState::get_genesis_header_info(chainstate.db()).unwrap() } - Some(staging_block) => { + Some(header) => { let ic = sortdb.index_conn(); let snapshot = SortitionDB::get_block_snapshot_for_winning_stacks_block( &ic, &tip.sortition_id, - &staging_block.anchored_block_hash, + &header.anchored_header.block_hash(), ) .unwrap() .unwrap(); // succeeds because we don't fork diff --git a/stackslib/src/net/inv.rs b/stackslib/src/net/inv.rs index 0cc15583f1..0b3c0b63ff 100644 --- a/stackslib/src/net/inv.rs +++ b/stackslib/src/net/inv.rs @@ -1779,6 +1779,8 @@ impl PeerNetwork { // affirmation maps are compatible, so just resume scanning off of wherever we are at the // tip. + // NOTE: This code path only works in Stacks 2.x, but that's okay because this whole state + // machine is only used in Stacks 2.x let (consensus_hash, _) = SortitionDB::get_canonical_stacks_chain_tip_hash(sortdb.conn()) .unwrap_or((ConsensusHash::empty(), BlockHeaderHash([0u8; 32]))); @@ -3109,6 +3111,8 @@ mod test { u32::MAX, u32::MAX, u32::MAX, + u32::MAX, + u32::MAX, ); let mut peer_inv = PeerBlocksInv::new(vec![0x01], vec![0x01], vec![0x01], 1, 1, 0); @@ -3138,6 +3142,8 @@ mod test { u32::MAX, u32::MAX, u32::MAX, + u32::MAX, + u32::MAX, ); let mut peer_inv = PeerBlocksInv::new(vec![0x01], vec![0x01], vec![0x01], 1, 1, 0); diff --git a/stackslib/src/net/mod.rs b/stackslib/src/net/mod.rs index fe01ca1c75..41def42dda 100644 --- a/stackslib/src/net/mod.rs +++ b/stackslib/src/net/mod.rs @@ -83,6 +83,7 @@ use crate::chainstate::burn::operations::PegOutFulfillOp; use crate::chainstate::burn::operations::PegOutRequestOp; use crate::chainstate::burn::{ConsensusHash, Opcodes}; use crate::chainstate::coordinator::Error as coordinator_error; +use crate::chainstate::nakamoto::NakamotoChainState; use crate::chainstate::stacks::db::blocks::MemPoolRejection; use crate::chainstate::stacks::index::Error as marf_error; use crate::chainstate::stacks::Error as chainstate_error; @@ -2432,7 +2433,9 @@ pub mod test { use crate::chainstate::stacks::tests::*; use crate::chainstate::stacks::StacksMicroblockHeader; use crate::chainstate::stacks::*; - use crate::chainstate::stacks::{db::accounts::MinerReward, events::StacksTransactionReceipt}; + use crate::chainstate::stacks::{ + db::accounts::MinerReward, events::StacksBlockEventData, events::StacksTransactionReceipt, + }; use crate::chainstate::*; use crate::core::StacksEpoch; use crate::core::StacksEpochExtension; @@ -2653,7 +2656,7 @@ pub mod test { #[derive(Clone)] pub struct TestEventObserverBlock { - pub block: StacksBlock, + pub block: StacksBlockEventData, pub metadata: StacksHeaderInfo, pub receipts: Vec, pub parent: StacksBlockId, @@ -2681,7 +2684,7 @@ pub mod test { impl BlockEventDispatcher for TestEventObserver { fn announce_block( &self, - block: &StacksBlock, + block: &StacksBlockEventData, metadata: &StacksHeaderInfo, receipts: &[events::StacksTransactionReceipt], parent: &StacksBlockId, @@ -2775,6 +2778,8 @@ pub mod test { u32::MAX, u32::MAX, u32::MAX, + u32::MAX, + u32::MAX, ); let mut spending_account = TestMinerFactory::new().next_miner( @@ -3330,12 +3335,13 @@ pub mod test { let burn_tip_height = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()) .unwrap() .block_height; - let stacks_tip_height = stacks_node - .chainstate - .get_stacks_chain_tip(&sortdb) - .unwrap() - .map(|blkdat| blkdat.height) - .unwrap_or(0); + let stacks_tip_height = NakamotoChainState::get_canonical_block_header( + stacks_node.chainstate.db(), + &sortdb, + ) + .unwrap() + .map(|hdr| hdr.anchored_header.height()) + .unwrap_or(0); let ibd = TestPeer::infer_initial_burnchain_block_download( &self.config.burnchain, stacks_tip_height, @@ -3382,12 +3388,13 @@ pub mod test { let burn_tip_height = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()) .unwrap() .block_height; - let stacks_tip_height = stacks_node - .chainstate - .get_stacks_chain_tip(&sortdb) - .unwrap() - .map(|blkdat| blkdat.height) - .unwrap_or(0); + let stacks_tip_height = NakamotoChainState::get_canonical_block_header( + stacks_node.chainstate.db(), + &sortdb, + ) + .unwrap() + .map(|hdr| hdr.anchored_header.height()) + .unwrap_or(0); let ibd = TestPeer::infer_initial_burnchain_block_download( &self.config.burnchain, stacks_tip_height, @@ -3492,6 +3499,18 @@ pub mod test { } } + /// Generate and commit the next burnchain block with the given block operations. + /// * if `set_consensus_hash` is true, then each op's consensus_hash field will be set to + /// that of the resulting block snapshot. + /// * if `set_burn_hash` is true, then each op's burnchain header hash field will be set to + /// that of the resulting block snapshot. + /// + /// Returns ( + /// burnchain tip block height, + /// burnchain tip block hash, + /// burnchain tip consensus hash, + /// Option + /// ) fn inner_next_burnchain_block( &mut self, mut blockstack_ops: Vec, @@ -3504,8 +3523,12 @@ pub mod test { Option, ) { let sortdb = self.sortdb.take().unwrap(); - let (block_height, block_hash) = { + let (block_height, block_hash, epoch_id) = { let tip = SortitionDB::get_canonical_burn_chain_tip(&sortdb.conn()).unwrap(); + let epoch_id = SortitionDB::get_stacks_epoch(&sortdb.conn(), tip.block_height + 1) + .unwrap() + .unwrap() + .epoch_id; if set_consensus_hash { TestPeer::set_ops_consensus_hash(&mut blockstack_ops, &tip.consensus_hash); @@ -3570,11 +3593,21 @@ pub mod test { ) .unwrap(); - (block_header.block_height, block_header_hash) + (block_header.block_height, block_header_hash, epoch_id) }; - let missing_pox_anchor_block_hash_opt = - self.coord.handle_new_burnchain_block().unwrap(); + let missing_pox_anchor_block_hash_opt = if epoch_id < StacksEpochId::Epoch30 { + self.coord + .handle_new_burnchain_block() + .unwrap() + .into_missing_block_hash() + } else { + if self.coord.handle_new_nakamoto_burnchain_block().unwrap() { + None + } else { + Some(BlockHeaderHash([0x00; 32])) + } + }; let pox_id = { let ic = sortdb.index_conn(); @@ -3600,6 +3633,8 @@ pub mod test { ) } + /// Pre-process an epoch 2.x Stacks block. + /// Validate it and store it to staging. pub fn preprocess_stacks_block(&mut self, block: &StacksBlock) -> Result { let sortdb = self.sortdb.take().unwrap(); let mut node = self.stacks_node.take().unwrap(); @@ -3664,6 +3699,8 @@ pub mod test { res } + /// Preprocess epoch 2.x microblocks. + /// Validate them and store them to staging. pub fn preprocess_stacks_microblocks( &mut self, microblocks: &Vec, @@ -3714,6 +3751,8 @@ pub mod test { res } + /// Store the given epoch 2.x Stacks block and microblock to staging, and then try and + /// process them. pub fn process_stacks_epoch_at_tip( &mut self, block: &StacksBlock, @@ -3747,6 +3786,8 @@ pub mod test { self.stacks_node = Some(node); } + /// Store the given epoch 2.x Stacks block and microblock to the given node's staging, + /// using the given sortition DB as well, and then try and process them. fn inner_process_stacks_epoch_at_tip( &mut self, sortdb: &SortitionDB, @@ -3777,6 +3818,8 @@ pub mod test { Ok(()) } + /// Store the given epoch 2.x Stacks block and microblock to the given node's staging, + /// and then try and process them. pub fn process_stacks_epoch_at_tip_checked( &mut self, block: &StacksBlock, @@ -3791,6 +3834,8 @@ pub mod test { res } + /// Accept a new Stacks block and microblocks via the relayer, and then try to process + /// them. pub fn process_stacks_epoch( &mut self, block: &StacksBlock, @@ -3951,13 +3996,13 @@ pub mod test { } /// Make a tenure with the given transactions. Creates a coinbase tx with the given nonce, and then increments - /// the provided reference. + /// the provided reference. pub fn tenure_with_txs( &mut self, txs: &[StacksTransaction], coinbase_nonce: &mut usize, ) -> StacksBlockId { - let microblock_privkey = StacksPrivateKey::new(); + let microblock_privkey = self.miner.next_microblock_privkey(); let microblock_pubkeyhash = Hash160::from_node_public_key(&StacksPublicKey::from_private(µblock_privkey)); let tip = @@ -4014,7 +4059,8 @@ pub mod test { tip_id } - // Make a tenure + /// Make a tenure, using `tenure_builder` to generate a Stacks block and a list of + /// microblocks. pub fn make_tenure( &mut self, mut tenure_builder: F, @@ -4161,7 +4207,7 @@ pub mod test { ) } - // have this peer produce an anchored block and microblock tail using its internal miner. + /// Produce a default, non-empty tenure for epoch 2.x pub fn make_default_tenure( &mut self, ) -> ( diff --git a/stackslib/src/net/p2p.rs b/stackslib/src/net/p2p.rs index 34c01502f7..b90d89340e 100644 --- a/stackslib/src/net/p2p.rs +++ b/stackslib/src/net/p2p.rs @@ -93,6 +93,8 @@ use crate::net::*; use crate::util_lib::db::DBConn; use crate::util_lib::db::DBTx; use crate::util_lib::db::Error as db_error; +use stacks_common::consts::FIRST_BURNCHAIN_CONSENSUS_HASH; +use stacks_common::consts::FIRST_STACKS_BLOCK_HASH; use stacks_common::types::chainstate::{PoxId, SortitionId}; /// inter-thread request to send a p2p message from another thread in this program. @@ -3978,6 +3980,8 @@ impl PeerNetwork { // hint to the downloader to start scanning at the sortition // height we just synchronized + // NOTE: this only works in Stacks 2.x. + // Nakamoto uses a different state machine let start_download_sortition = if let Some(ref inv_state) = self.inv_state { @@ -3986,7 +3990,6 @@ impl PeerNetwork { sortdb.conn(), ) .expect("FATAL: failed to load canonical stacks chain tip hash from sortition DB"); - let stacks_tip_sortition_height = SortitionDB::get_block_snapshot_consensus( sortdb.conn(), @@ -5616,8 +5619,16 @@ impl PeerNetwork { network_result: &mut NetworkResult, event_observer: Option<&dyn MemPoolEventDispatcher>, ) -> Result<(), net_error> { - let (canonical_consensus_hash, canonical_block_hash) = - SortitionDB::get_canonical_stacks_chain_tip_hash(sortdb.conn())?; + let (canonical_consensus_hash, canonical_block_hash) = if let Some(header) = + NakamotoChainState::get_canonical_block_header(chainstate.db(), sortdb)? + { + (header.consensus_hash, header.anchored_header.block_hash()) + } else { + ( + FIRST_BURNCHAIN_CONSENSUS_HASH.clone(), + FIRST_STACKS_BLOCK_HASH.clone(), + ) + }; let sn = SortitionDB::get_canonical_burn_chain_tip(&sortdb.conn())?; diff --git a/stackslib/src/net/relay.rs b/stackslib/src/net/relay.rs index def67166c7..8fb5628352 100644 --- a/stackslib/src/net/relay.rs +++ b/stackslib/src/net/relay.rs @@ -43,6 +43,8 @@ use crate::chainstate::burn::BlockSnapshot; use crate::chainstate::burn::ConsensusHash; use crate::chainstate::coordinator::comm::CoordinatorChannels; use crate::chainstate::coordinator::BlockEventDispatcher; +use crate::chainstate::nakamoto::NakamotoBlock; +use crate::chainstate::nakamoto::NakamotoChainState; use crate::chainstate::stacks::db::unconfirmed::ProcessedUnconfirmedState; use crate::chainstate::stacks::db::{StacksChainState, StacksEpochReceipt, StacksHeaderInfo}; use crate::chainstate::stacks::events::StacksTransactionReceipt; @@ -660,6 +662,81 @@ impl Relayer { Ok(res) } + /// Insert a staging Nakamoto block that got relayed to us somehow -- e.g. uploaded via http, + /// downloaded by us, or pushed via p2p. + /// Return Ok(true) if we stored it, Ok(false) if we didn't + pub fn process_new_nakamoto_block( + sort_handle: &SortitionHandleConn, + chainstate: &mut StacksChainState, + block: NakamotoBlock, + ) -> Result { + debug!( + "Handle incoming Nakamoto block {}/{}", + &block.header.consensus_hash, + &block.header.block_hash() + ); + + // do we have this block? don't lock the DB needlessly if so. + if let Some(_) = + NakamotoChainState::get_block_header(chainstate.db(), &block.header.block_id())? + { + debug!("Already have Nakamoto block {}", &block.header.block_id()); + return Ok(false); + } + + let block_sn = + SortitionDB::get_block_snapshot_consensus(sort_handle, &block.header.consensus_hash)? + .ok_or(chainstate_error::DBError(db_error::NotFoundError))?; + + // NOTE: it's `+ 1` because the first Nakamoto block is built atop the last epoch 2.x + // tenure, right after the last 2.x sortition + let epoch_id = SortitionDB::get_stacks_epoch(sort_handle, block_sn.block_height + 1)? + .expect("FATAL: no epoch defined") + .epoch_id; + + if epoch_id < StacksEpochId::Epoch30 { + error!("Nakamoto blocks are not supported in this epoch"); + return Err(chainstate_error::InvalidStacksBlock( + "Nakamoto blocks are not supported in this epoch".into(), + )); + } + + // don't relay this block if it's using the wrong AST rules (this would render at least one of its + // txs problematic). + if !Relayer::static_check_problematic_relayed_nakamoto_block( + chainstate.mainnet, + epoch_id, + &block, + ASTRules::PrecheckSize, + ) { + warn!( + "Nakamoto block is problematic; will not store or relay"; + "stacks_block_hash" => %block.header.block_hash(), + "consensus_hash" => %block.header.consensus_hash, + "burn_height" => block.header.chain_length, + "sortition_height" => block_sn.block_height, + ); + return Ok(false); + } + + let accept_msg = format!( + "Stored incoming Nakamoto block {}/{}", + &block.header.consensus_hash, + &block.header.block_hash() + ); + + let config = chainstate.config(); + let staging_db_tx = chainstate.db_tx_begin()?; + let accepted = + NakamotoChainState::accept_block(&config, block, sort_handle, &staging_db_tx)?; + staging_db_tx.commit()?; + + if accepted { + debug!("{}", &accept_msg); + } + Ok(accepted) + } + /// Coalesce a set of microblocks into relayer hints and MicroblocksData messages, as calculated by /// process_new_blocks(). Make sure the messages don't get too big. fn make_microblocksdata_messages( @@ -1313,6 +1390,32 @@ impl Relayer { true } + /// Verify that a relayed block is not problematic -- i.e. it doesn't contain any problematic + /// transactions. This is a static check -- we only look at the block contents. + /// + /// Returns true if the check passed -- i.e. no problems. + /// Returns false if not + pub fn static_check_problematic_relayed_nakamoto_block( + mainnet: bool, + epoch_id: StacksEpochId, + block: &NakamotoBlock, + ast_rules: ASTRules, + ) -> bool { + for tx in block.txs.iter() { + if !Relayer::static_check_problematic_relayed_tx(mainnet, epoch_id, tx, ast_rules) + .is_ok() + { + info!( + "Nakamoto block {} with tx {} will not be stored or relayed", + block.header.block_hash(), + tx.txid() + ); + return false; + } + } + true + } + /// Verify that a relayed microblock is not problematic -- i.e. it doesn't contain any /// problematic transactions. This is a static check -- we only look at the microblock /// contents. @@ -1576,21 +1679,22 @@ impl Relayer { mempool: &mut MemPoolDB, event_observer: Option<&dyn MemPoolEventDispatcher>, ) -> Result, StacksTransaction)>, net_error> { - let chain_tip = match chainstate.get_stacks_chain_tip(sortdb)? { - Some(tip) => tip, - None => { - debug!( - "No Stacks chain tip; dropping {} transaction(s)", - network_result.pushed_transactions.len() - ); - return Ok(vec![]); - } - }; + let chain_tip = + match NakamotoChainState::get_canonical_block_header(chainstate.db(), sortdb)? { + Some(tip) => tip, + None => { + debug!( + "No Stacks chain tip; dropping {} transaction(s)", + network_result.pushed_transactions.len() + ); + return Ok(vec![]); + } + }; let epoch_id = SortitionDB::get_stacks_epoch(sortdb.conn(), network_result.burn_height)? .expect("FATAL: no epoch defined") .epoch_id; - let chain_height = chain_tip.height; + let chain_height = chain_tip.anchored_header.height(); Relayer::filter_problematic_transactions(network_result, chainstate.mainnet, epoch_id); if let Err(e) = PeerNetwork::store_transactions( @@ -1674,6 +1778,7 @@ impl Relayer { } /// Set up the unconfirmed chain state off of the canonical chain tip. + /// Only relevant in Stacks 2.x. Nakamoto nodes should not call this. pub fn setup_unconfirmed_state( chainstate: &mut StacksChainState, sortdb: &SortitionDB, @@ -1695,7 +1800,8 @@ impl Relayer { Ok(processed_unconfirmed_state) } - /// Set up unconfirmed chain state in a read-only fashion + /// Set up unconfirmed chain state in a read-only fashion. + /// Only relevant in Stacks 2.x. Nakamoto nodes should not call this. pub fn setup_unconfirmed_state_readonly( chainstate: &mut StacksChainState, sortdb: &SortitionDB, @@ -1716,6 +1822,8 @@ impl Relayer { Ok(()) } + /// Reload unconfirmed microblock stream. + /// Only call if we're in Stacks 2.x pub fn refresh_unconfirmed( chainstate: &mut StacksChainState, sortdb: &mut SortitionDB, @@ -4753,7 +4861,11 @@ pub mod test { let tip_opt = peers[1] .with_db_state(|sortdb, chainstate, _, _| { - let tip_opt = chainstate.get_stacks_chain_tip(sortdb).unwrap(); + let tip_opt = NakamotoChainState::get_canonical_block_header( + chainstate.db(), + sortdb, + ) + .unwrap(); Ok(tip_opt) }) .unwrap(); @@ -4883,7 +4995,11 @@ pub mod test { let tip_opt = peers[1] .with_db_state(|sortdb, chainstate, _, _| { - let tip_opt = chainstate.get_stacks_chain_tip(sortdb).unwrap(); + let tip_opt = NakamotoChainState::get_canonical_block_header( + chainstate.db(), + sortdb, + ) + .unwrap(); Ok(tip_opt) }) .unwrap(); @@ -4896,10 +5012,14 @@ pub mod test { if let Some(tip) = tip_opt { debug!( "Push at {}, need {}", - tip.height - peers[1].config.burnchain.first_block_height - 1, + tip.anchored_header.height() + - peers[1].config.burnchain.first_block_height + - 1, *pushed_i ); - if tip.height - peers[1].config.burnchain.first_block_height - 1 + if tip.anchored_header.height() + - peers[1].config.burnchain.first_block_height + - 1 == *pushed_i as u64 { // next block @@ -4922,10 +5042,14 @@ pub mod test { } debug!( "Sortition at {}, need {}", - tip.height - peers[1].config.burnchain.first_block_height - 1, + tip.anchored_header.height() + - peers[1].config.burnchain.first_block_height + - 1, *i ); - if tip.height - peers[1].config.burnchain.first_block_height - 1 + if tip.anchored_header.height() + - peers[1].config.burnchain.first_block_height + - 1 == *i as u64 { let event_id = { @@ -5548,15 +5672,17 @@ pub mod test { microblock_parent_opt: Option<&StacksMicroblockHeader>| { let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); - let stacks_tip_opt = chainstate.get_stacks_chain_tip(sortdb).unwrap(); + let stacks_tip_opt = + NakamotoChainState::get_canonical_block_header(chainstate.db(), sortdb) + .unwrap(); let parent_tip = match stacks_tip_opt { None => StacksChainState::get_genesis_header_info(chainstate.db()).unwrap(), - Some(staging_block) => { + Some(header_tip) => { let ic = sortdb.index_conn(); let snapshot = SortitionDB::get_block_snapshot_for_winning_stacks_block( &ic, &tip.sortition_id, - &staging_block.anchored_block_hash, + &header_tip.anchored_header.block_hash(), ) .unwrap() .unwrap(); // succeeds because we don't fork @@ -5717,15 +5843,17 @@ pub mod test { microblock_parent_opt: Option<&StacksMicroblockHeader>| { let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); - let stacks_tip_opt = chainstate.get_stacks_chain_tip(sortdb).unwrap(); + let stacks_tip_opt = + NakamotoChainState::get_canonical_block_header(chainstate.db(), sortdb) + .unwrap(); let parent_tip = match stacks_tip_opt { None => StacksChainState::get_genesis_header_info(chainstate.db()).unwrap(), - Some(staging_block) => { + Some(header_tip) => { let ic = sortdb.index_conn(); let snapshot = SortitionDB::get_block_snapshot_for_winning_stacks_block( &ic, &tip.sortition_id, - &staging_block.anchored_block_hash, + &header_tip.anchored_header.block_hash(), ) .unwrap() .unwrap(); // succeeds because we don't fork @@ -5896,15 +6024,17 @@ pub mod test { microblock_parent_opt: Option<&StacksMicroblockHeader>| { let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); - let stacks_tip_opt = chainstate.get_stacks_chain_tip(sortdb).unwrap(); + let stacks_tip_opt = + NakamotoChainState::get_canonical_block_header(chainstate.db(), sortdb) + .unwrap(); let parent_tip = match stacks_tip_opt { None => StacksChainState::get_genesis_header_info(chainstate.db()).unwrap(), - Some(staging_block) => { + Some(header_tip) => { let ic = sortdb.index_conn(); let snapshot = SortitionDB::get_block_snapshot_for_winning_stacks_block( &ic, &tip.sortition_id, - &staging_block.anchored_block_hash, + &header_tip.anchored_header.block_hash(), ) .unwrap() .unwrap(); // succeeds because we don't fork diff --git a/stackslib/src/net/rpc.rs b/stackslib/src/net/rpc.rs index f3f9b1f7df..48c815e760 100644 --- a/stackslib/src/net/rpc.rs +++ b/stackslib/src/net/rpc.rs @@ -62,7 +62,8 @@ use crate::burnchains::*; use crate::chainstate::burn::db::sortdb::SortitionDB; use crate::chainstate::burn::ConsensusHash; use crate::chainstate::burn::Opcodes; -use crate::chainstate::stacks::boot::{POX_1_NAME, POX_2_NAME, POX_3_NAME}; +use crate::chainstate::nakamoto::NakamotoChainState; +use crate::chainstate::stacks::boot::{POX_1_NAME, POX_2_NAME, POX_3_NAME, POX_4_NAME}; use crate::chainstate::stacks::db::blocks::CheckError; use crate::chainstate::stacks::db::{blocks::MINIMUM_TX_FEE_RATE_PER_BYTE, StacksChainState}; use crate::chainstate::stacks::Error as chain_error; @@ -318,25 +319,36 @@ impl RPCPoxInfoData { // Note: should always be 0 unless somehow configured to start later let pox_1_first_cycle = burnchain - .block_height_to_reward_cycle(burnchain.first_block_height as u64) + .block_height_to_reward_cycle(u64::from(burnchain.first_block_height)) .ok_or(net_error::ChainstateError( "PoX-1 first reward cycle begins before first burn block height".to_string(), ))?; let pox_2_first_cycle = burnchain - .block_height_to_reward_cycle(burnchain.pox_constants.v1_unlock_height as u64) + .block_height_to_reward_cycle(u64::from(burnchain.pox_constants.v1_unlock_height)) .ok_or(net_error::ChainstateError( "PoX-2 first reward cycle begins before first burn block height".to_string(), ))? + 1; let pox_3_first_cycle = burnchain - .block_height_to_reward_cycle(burnchain.pox_constants.pox_3_activation_height as u64) + .block_height_to_reward_cycle(u64::from( + burnchain.pox_constants.pox_3_activation_height, + )) .ok_or(net_error::ChainstateError( "PoX-3 first reward cycle begins before first burn block height".to_string(), ))? + 1; + let pox_4_first_cycle = burnchain + .block_height_to_reward_cycle(u64::from( + burnchain.pox_constants.pox_4_activation_height, + )) + .ok_or(net_error::ChainstateError( + "PoX-4 first reward cycle begins before first burn block height".to_string(), + ))? + + 1; + let data = chainstate .maybe_read_only_clarity_tx(&sortdb.index_conn(), tip, |clarity_tx| { clarity_tx.with_readonly_clarity_env( @@ -548,6 +560,14 @@ impl RPCPoxInfoData { as u64, first_reward_cycle_id: pox_3_first_cycle, }, + RPCPoxContractVersion { + contract_id: boot_code_id(POX_4_NAME, chainstate.mainnet).to_string(), + activation_burnchain_block_height: burnchain + .pox_constants + .pox_4_activation_height + as u64, + first_reward_cycle_id: pox_4_first_cycle, + }, ], }) } @@ -1378,6 +1398,7 @@ impl ConversationHttp { let burn_block_height = clarity_db.get_current_burnchain_block_height() as u64; let v1_unlock_height = clarity_db.get_v1_unlock_height(); let v2_unlock_height = clarity_db.get_v2_unlock_height(); + let v3_unlock_height = clarity_db.get_v3_unlock_height(); let (balance, balance_proof) = if with_proof { clarity_db .get_with_proof::(&key) @@ -1407,11 +1428,13 @@ impl ConversationHttp { burn_block_height, v1_unlock_height, v2_unlock_height, + v3_unlock_height, ); let (locked, unlock_height) = balance.get_locked_balance_at_burn_block( burn_block_height, v1_unlock_height, v2_unlock_height, + v3_unlock_height, ); let balance = format!("0x{}", to_hex(&unlocked.to_be_bytes())); @@ -2047,10 +2070,10 @@ impl ConversationHttp { if let Some(unconfirmed_chain_tip) = unconfirmed_chain_tip_opt { Ok(Some(unconfirmed_chain_tip)) } else { - match chainstate.get_stacks_chain_tip(sortdb)? { + match NakamotoChainState::get_canonical_block_header(chainstate.db(), sortdb)? { Some(tip) => Ok(Some(StacksBlockHeader::make_index_block_hash( &tip.consensus_hash, - &tip.anchored_block_hash, + &tip.anchored_header.block_hash(), ))), None => { let response_metadata = HttpResponseMetadata::from_http_request_type( @@ -2068,24 +2091,26 @@ impl ConversationHttp { } } TipRequest::SpecificTip(tip) => Ok(Some(*tip).clone()), - TipRequest::UseLatestAnchoredTip => match chainstate.get_stacks_chain_tip(sortdb)? { - Some(tip) => Ok(Some(StacksBlockHeader::make_index_block_hash( - &tip.consensus_hash, - &tip.anchored_block_hash, - ))), - None => { - let response_metadata = HttpResponseMetadata::from_http_request_type( - req, - Some(canonical_stacks_tip_height), - ); - warn!("Failed to load Stacks chain tip"); - let response = HttpResponseType::ServerError( - response_metadata, - format!("Failed to load Stacks chain tip"), - ); - response.send(http, fd).and_then(|_| Ok(None)) + TipRequest::UseLatestAnchoredTip => { + match NakamotoChainState::get_canonical_block_header(chainstate.db(), sortdb)? { + Some(tip) => Ok(Some(StacksBlockHeader::make_index_block_hash( + &tip.consensus_hash, + &tip.anchored_header.block_hash(), + ))), + None => { + let response_metadata = HttpResponseMetadata::from_http_request_type( + req, + Some(canonical_stacks_tip_height), + ); + warn!("Failed to load Stacks chain tip"); + let response = HttpResponseType::ServerError( + response_metadata, + format!("Failed to load Stacks chain tip"), + ); + response.send(http, fd).and_then(|_| Ok(None)) + } } - }, + } } } @@ -2532,9 +2557,8 @@ impl ConversationHttp { let response_metadata = HttpResponseMetadata::from_http_request_type(req, Some(canonical_stacks_tip_height)); let response = HttpResponseType::MemPoolTxStream(response_metadata); - let height = chainstate - .get_stacks_chain_tip(sortdb)? - .map(|blk| blk.height) + let height = NakamotoChainState::get_canonical_block_header(chainstate.db(), sortdb)? + .map(|hdr| hdr.anchored_header.height()) .unwrap_or(0); debug!( @@ -3103,7 +3127,7 @@ impl ConversationHttp { None } HttpRequestType::PostTransaction(ref _md, ref tx, ref attachment) => { - match chainstate.get_stacks_chain_tip(sortdb)? { + match NakamotoChainState::get_canonical_block_header(chainstate.db(), sortdb)? { Some(tip) => { let accepted = ConversationHttp::handle_post_transaction( &mut self.connection.protocol, @@ -3112,7 +3136,7 @@ impl ConversationHttp { chainstate, sortdb, tip.consensus_hash, - tip.anchored_block_hash, + tip.anchored_header.block_hash(), mempool, tx.clone(), &mut network.atlasdb, @@ -4310,7 +4334,7 @@ mod test { let mut tx_coinbase = StacksTransaction::new( TransactionVersion::Testnet, TransactionAuth::from_p2pkh(&privk1).unwrap(), - TransactionPayload::Coinbase(CoinbasePayload([0x00; 32]), None), + TransactionPayload::Coinbase(CoinbasePayload([0x00; 32]), None, None), ); tx_coinbase.chain_id = 0x80000000; tx_coinbase.anchor_mode = TransactionAnchorMode::OnChainOnly; @@ -4865,10 +4889,13 @@ mod test { let mut sortdb = peer_server.sortdb.as_mut().unwrap(); let chainstate = &mut peer_server.stacks_node.as_mut().unwrap().chainstate; let stacks_block_id = { - let tip = chainstate.get_stacks_chain_tip(sortdb).unwrap().unwrap(); + let tip = + NakamotoChainState::get_canonical_block_header(chainstate.db(), sortdb) + .unwrap() + .unwrap(); StacksBlockHeader::make_index_block_hash( &tip.consensus_hash, - &tip.anchored_block_hash, + &tip.anchored_header.block_hash(), ) }; let pox_info = RPCPoxInfoData::from_db( diff --git a/stackslib/src/net/stackerdb/config.rs b/stackslib/src/net/stackerdb/config.rs index a32f41bafa..ed5120dd37 100644 --- a/stackslib/src/net/stackerdb/config.rs +++ b/stackslib/src/net/stackerdb/config.rs @@ -53,6 +53,7 @@ use stacks_common::types::StacksEpochId; use stacks_common::util::hash::Hash160; use crate::chainstate::burn::db::sortdb::SortitionDB; +use crate::chainstate::nakamoto::NakamotoChainState; use crate::chainstate::stacks::db::StacksChainState; use crate::chainstate::stacks::Error as chainstate_error; use crate::clarity_vm::clarity::{ClarityReadOnlyConnection, Error as clarity_error}; @@ -445,9 +446,9 @@ impl StackerDBConfig { sortition_db: &SortitionDB, contract_id: &QualifiedContractIdentifier, ) -> Result { - let chain_tip = chainstate - .get_stacks_chain_tip(sortition_db)? - .ok_or(net_error::NoSuchStackerDB(contract_id.clone()))?; + let chain_tip = + NakamotoChainState::get_canonical_block_header(chainstate.db(), sortition_db)? + .ok_or(net_error::NoSuchStackerDB(contract_id.clone()))?; let burn_tip = SortitionDB::get_block_snapshot_consensus( sortition_db.conn(), @@ -455,8 +456,10 @@ impl StackerDBConfig { )? .expect("FATAL: missing snapshot for Stacks block"); - let chain_tip_hash = - StacksBlockId::new(&chain_tip.consensus_hash, &chain_tip.anchored_block_hash); + let chain_tip_hash = StacksBlockId::new( + &chain_tip.consensus_hash, + &chain_tip.anchored_header.block_hash(), + ); let cur_epoch = SortitionDB::get_stacks_epoch(sortition_db.conn(), burn_tip.block_height)? .expect("FATAL: no epoch defined"); diff --git a/testnet/stacks-node/src/event_dispatcher.rs b/testnet/stacks-node/src/event_dispatcher.rs index 77fba0b4d0..ccedd48cff 100644 --- a/testnet/stacks-node/src/event_dispatcher.rs +++ b/testnet/stacks-node/src/event_dispatcher.rs @@ -12,11 +12,12 @@ use stacks::burnchains::{PoxConstants, Txid}; use stacks::chainstate::burn::operations::BlockstackOperationType; use stacks::chainstate::burn::ConsensusHash; use stacks::chainstate::coordinator::BlockEventDispatcher; +use stacks::chainstate::nakamoto::NakamotoBlock; use stacks::chainstate::stacks::address::PoxAddress; use stacks::chainstate::stacks::db::unconfirmed::ProcessedUnconfirmedState; use stacks::chainstate::stacks::db::StacksHeaderInfo; use stacks::chainstate::stacks::events::{ - StacksTransactionEvent, StacksTransactionReceipt, TransactionOrigin, + StacksBlockEventData, StacksTransactionEvent, StacksTransactionReceipt, TransactionOrigin, }; use stacks::chainstate::stacks::miner::TransactionEvent; use stacks::chainstate::stacks::TransactionPayload; @@ -66,6 +67,7 @@ pub const PATH_MEMPOOL_TX_SUBMIT: &str = "new_mempool_tx"; pub const PATH_MEMPOOL_TX_DROP: &str = "drop_mempool_tx"; pub const PATH_MINED_BLOCK: &str = "mined_block"; pub const PATH_MINED_MICROBLOCK: &str = "mined_microblock"; +pub const PATH_MINED_NAKAMOTO_BLOCK: &str = "mined_nakamoto_block"; pub const PATH_STACKERDB_CHUNKS: &str = "stackerdb_chunks"; pub const PATH_BURN_BLOCK_SUBMIT: &str = "new_burn_block"; pub const PATH_BLOCK_PROCESSED: &str = "new_block"; @@ -91,6 +93,17 @@ pub struct MinedMicroblockEvent { pub anchor_block: BlockHeaderHash, } +#[derive(Clone, Debug, Serialize, Deserialize)] +pub struct MinedNakamotoBlockEvent { + pub target_burn_height: u64, + pub block_hash: String, + pub block_id: String, + pub stacks_height: u64, + pub block_size: u64, + pub cost: ExecutionCost, + pub tx_events: Vec, +} + impl EventObserver { pub fn send_payload(&self, payload: &serde_json::Value, path: &str) { let body = match serde_json::to_vec(&payload) { @@ -343,6 +356,10 @@ impl EventObserver { self.send_payload(payload, PATH_MINED_MICROBLOCK); } + fn send_mined_nakamoto_block(&self, payload: &serde_json::Value) { + self.send_payload(payload, PATH_MINED_NAKAMOTO_BLOCK); + } + fn send_stackerdb_chunks(&self, payload: &serde_json::Value) { self.send_payload(payload, PATH_STACKERDB_CHUNKS); } @@ -354,7 +371,7 @@ impl EventObserver { fn make_new_block_processed_payload( &self, filtered_events: Vec<(usize, &(bool, Txid, &StacksTransactionEvent))>, - block: &StacksBlock, + block: &StacksBlockEventData, metadata: &StacksHeaderInfo, receipts: &[StacksTransactionReceipt], parent_index_hash: &StacksBlockId, @@ -385,17 +402,17 @@ impl EventObserver { // Wrap events json!({ - "block_hash": format!("0x{}", block.block_hash()), + "block_hash": format!("0x{}", block.block_hash), "block_height": metadata.stacks_block_height, "burn_block_hash": format!("0x{}", metadata.burn_header_hash), "burn_block_height": metadata.burn_header_height, "miner_txid": format!("0x{}", winner_txid), "burn_block_time": metadata.burn_header_timestamp, "index_block_hash": format!("0x{}", metadata.index_block_hash()), - "parent_block_hash": format!("0x{}", block.header.parent_block), + "parent_block_hash": format!("0x{}", block.parent_block_hash), "parent_index_block_hash": format!("0x{}", parent_index_hash), - "parent_microblock": format!("0x{}", block.header.parent_microblock), - "parent_microblock_sequence": block.header.parent_microblock_sequence, + "parent_microblock": format!("0x{}", block.parent_microblock_hash), + "parent_microblock_sequence": block.parent_microblock_sequence, "matured_miner_rewards": mature_rewards.clone(), "events": serialized_events, "transactions": serialized_txs, @@ -465,6 +482,23 @@ impl MemPoolEventDispatcher for EventDispatcher { anchor_block, ); } + + fn mined_nakamoto_block_event( + &self, + target_burn_height: u64, + block: &NakamotoBlock, + block_size_bytes: u64, + consumed: &ExecutionCost, + tx_events: Vec, + ) { + self.process_mined_nakamoto_block_event( + target_burn_height, + block, + block_size_bytes, + consumed, + tx_events, + ) + } } impl StackerDBEventDispatcher for EventDispatcher { @@ -481,7 +515,7 @@ impl StackerDBEventDispatcher for EventDispatcher { impl BlockEventDispatcher for EventDispatcher { fn announce_block( &self, - block: &StacksBlock, + block: &StacksBlockEventData, metadata: &StacksHeaderInfo, receipts: &[StacksTransactionReceipt], parent: &StacksBlockId, @@ -561,8 +595,11 @@ impl EventDispatcher { .iter() .enumerate() .filter(|(obs_id, _observer)| { - self.burn_block_observers_lookup.contains(&(*obs_id as u16)) - || self.any_event_observers_lookup.contains(&(*obs_id as u16)) + self.burn_block_observers_lookup + .contains(&(u16::try_from(*obs_id).expect("FATAL: more than 2^16 observers"))) + || self.any_event_observers_lookup.contains( + &(u16::try_from(*obs_id).expect("FATAL: more than 2^16 observers")), + ) }) .collect(); if interested_observers.len() < 1 { @@ -683,7 +720,7 @@ impl EventDispatcher { pub fn process_chain_tip( &self, - block: &StacksBlock, + block: &StacksBlockEventData, metadata: &StacksHeaderInfo, receipts: &[StacksTransactionReceipt], parent_index_hash: &StacksBlockId, @@ -733,7 +770,7 @@ impl EventDispatcher { let payload = self.registered_observers[observer_id] .make_new_block_processed_payload( filtered_events, - block, + &block, metadata, receipts, parent_index_hash, @@ -767,8 +804,11 @@ impl EventDispatcher { .iter() .enumerate() .filter(|(obs_id, _observer)| { - self.microblock_observers_lookup.contains(&(*obs_id as u16)) - || self.any_event_observers_lookup.contains(&(*obs_id as u16)) + self.microblock_observers_lookup + .contains(&(u16::try_from(*obs_id).expect("FATAL: more than 2^16 observers"))) + || self.any_event_observers_lookup.contains( + &(u16::try_from(*obs_id).expect("FATAL: more than 2^16 observers")), + ) }) .collect(); if interested_observers.len() < 1 { @@ -820,8 +860,11 @@ impl EventDispatcher { .iter() .enumerate() .filter(|(obs_id, _observer)| { - self.mempool_observers_lookup.contains(&(*obs_id as u16)) - || self.any_event_observers_lookup.contains(&(*obs_id as u16)) + self.mempool_observers_lookup + .contains(&(u16::try_from(*obs_id).expect("FATAL: more than 2^16 observers"))) + || self.any_event_observers_lookup.contains( + &(u16::try_from(*obs_id).expect("FATAL: more than 2^16 observers")), + ) }) .collect(); if interested_observers.len() < 1 { @@ -848,7 +891,10 @@ impl EventDispatcher { .registered_observers .iter() .enumerate() - .filter(|(obs_id, _observer)| self.miner_observers_lookup.contains(&(*obs_id as u16))) + .filter(|(obs_id, _observer)| { + self.miner_observers_lookup + .contains(&(u16::try_from(*obs_id).expect("FATAL: more than 2^16 observers"))) + }) .collect(); if interested_observers.len() < 1 { return; @@ -883,7 +929,7 @@ impl EventDispatcher { .enumerate() .filter(|(obs_id, _observer)| { self.mined_microblocks_observers_lookup - .contains(&(*obs_id as u16)) + .contains(&(u16::try_from(*obs_id).expect("FATAL: more than 2^16 observers"))) }) .collect(); if interested_observers.len() < 1 { @@ -904,6 +950,43 @@ impl EventDispatcher { } } + pub fn process_mined_nakamoto_block_event( + &self, + target_burn_height: u64, + block: &NakamotoBlock, + block_size_bytes: u64, + consumed: &ExecutionCost, + tx_events: Vec, + ) { + let interested_observers: Vec<_> = self + .registered_observers + .iter() + .enumerate() + .filter(|(obs_id, _observer)| { + self.miner_observers_lookup + .contains(&(u16::try_from(*obs_id).expect("FATAL: more than 2^16 observers"))) + }) + .collect(); + if interested_observers.len() < 1 { + return; + } + + let payload = serde_json::to_value(MinedNakamotoBlockEvent { + target_burn_height, + block_hash: block.header.block_hash().to_string(), + block_id: block.header.block_id().to_string(), + stacks_height: block.header.chain_length, + block_size: block_size_bytes, + cost: consumed.clone(), + tx_events, + }) + .unwrap(); + + for (_, observer) in interested_observers.iter() { + observer.send_mined_nakamoto_block(&payload); + } + } + /// Forward newly-accepted StackerDB chunk metadata to downstream `stackerdb` observers. /// Infallible. pub fn process_new_stackerdb_chunks( @@ -916,7 +999,8 @@ impl EventDispatcher { .iter() .enumerate() .filter(|(obs_id, _observer)| { - self.stackerdb_observers_lookup.contains(&(*obs_id as u16)) + self.stackerdb_observers_lookup + .contains(&(u16::try_from(*obs_id).expect("FATAL: more than 2^16 observers"))) }) .collect(); if interested_observers.len() < 1 { @@ -941,8 +1025,11 @@ impl EventDispatcher { .iter() .enumerate() .filter(|(obs_id, _observer)| { - self.mempool_observers_lookup.contains(&(*obs_id as u16)) - || self.any_event_observers_lookup.contains(&(*obs_id as u16)) + self.mempool_observers_lookup + .contains(&(u16::try_from(*obs_id).expect("FATAL: more than 2^16 observers"))) + || self.any_event_observers_lookup.contains( + &(u16::try_from(*obs_id).expect("FATAL: more than 2^16 observers")), + ) }) .collect(); if interested_observers.len() < 1 { @@ -1095,7 +1182,7 @@ mod test { let payload = observer.make_new_block_processed_payload( filtered_events, - &block, + &block.into(), &metadata, &receipts, &parent_index_hash, diff --git a/testnet/stacks-node/src/mockamoto.rs b/testnet/stacks-node/src/mockamoto.rs index 6b0e52cc7b..c87ec5b652 100644 --- a/testnet/stacks-node/src/mockamoto.rs +++ b/testnet/stacks-node/src/mockamoto.rs @@ -1,7 +1,6 @@ use std::sync::atomic::AtomicBool; use std::sync::mpsc::sync_channel; use std::sync::mpsc::Receiver; -use std::sync::mpsc::SyncSender; use std::sync::Arc; use std::sync::Mutex; use std::thread; @@ -14,7 +13,6 @@ use stacks::chainstate::burn::db::sortdb::SortitionDB; use stacks::chainstate::burn::BlockSnapshot; use stacks::chainstate::burn::OpsHash; use stacks::chainstate::burn::SortitionHash; -use stacks::chainstate::coordinator::comm::CoordinatorChannels; use stacks::chainstate::coordinator::comm::CoordinatorReceivers; use stacks::chainstate::coordinator::CoordinatorCommunication; use stacks::chainstate::nakamoto::NakamotoBlock; @@ -36,7 +34,6 @@ use stacks::chainstate::stacks::TransactionPayload; use stacks::chainstate::stacks::TransactionVersion; use stacks::chainstate::stacks::MINER_BLOCK_CONSENSUS_HASH; use stacks::chainstate::stacks::MINER_BLOCK_HEADER_HASH; -use stacks::clarity_vm::database::SortitionDBRef; use stacks::core::StacksEpoch; use stacks::core::BLOCK_LIMIT_MAINNET_10; use stacks::core::HELIUM_BLOCK_LIMIT_20; @@ -68,7 +65,6 @@ use stacks_common::util::hash::Sha512Trunc256Sum; use stacks_common::util::secp256k1::MessageSignature; use stacks_common::util::secp256k1::Secp256k1PublicKey; -use crate::event_dispatcher; use crate::neon::Counters; use crate::neon_node::Globals; use crate::neon_node::PeerThread; @@ -333,10 +329,7 @@ impl MockamotoNode { sortdb_tx.commit()?; let staging_db_tx = self.chainstate.db_tx_begin()?; - NakamotoChainState::set_burn_block_processed( - &staging_db_tx, - &new_snapshot.burn_header_hash, - )?; + NakamotoChainState::set_burn_block_processed(&staging_db_tx, &new_snapshot.consensus_hash)?; staging_db_tx.commit()?; Ok(()) @@ -347,23 +340,24 @@ impl MockamotoNode { let chain_id = self.chainstate.chain_id; let (mut chainstate_tx, clarity_instance) = self.chainstate.chainstate_tx_begin().unwrap(); - let (is_genesis, chain_tip_bh, chain_tip_ch) = match NakamotoChainState::get_chain_tip( - &chainstate_tx, - &sortition_tip.consensus_hash, - self.sortdb.conn(), - ) { - Ok(chain_tip) => (false, chain_tip.0, chain_tip.1), - Err(ChainstateError::NoSuchBlockError) => - // No stacks tip yet, parent should be genesis - { - ( - true, - FIRST_STACKS_BLOCK_HASH, - FIRST_BURNCHAIN_CONSENSUS_HASH, - ) - } - Err(e) => return Err(e), - }; + let (is_genesis, chain_tip_bh, chain_tip_ch) = + match NakamotoChainState::get_canonical_block_header(&chainstate_tx, &self.sortdb) { + Ok(Some(chain_tip)) => ( + false, + chain_tip.anchored_header.block_hash(), + chain_tip.consensus_hash, + ), + Ok(None) | Err(ChainstateError::NoSuchBlockError) => + // No stacks tip yet, parent should be genesis + { + ( + true, + FIRST_STACKS_BLOCK_HASH, + FIRST_BURNCHAIN_CONSENSUS_HASH, + ) + } + Err(e) => return Err(e), + }; let (parent_chain_length, parent_burn_height) = if is_genesis { (0, 0) @@ -376,7 +370,9 @@ impl MockamotoNode { let miner_nonce = 2 * parent_chain_length; - let coinbase_tx_payload = TransactionPayload::Coinbase(CoinbasePayload([1; 32]), None); + // TODO: VRF proof cannot be None in Nakamoto rules + let coinbase_tx_payload = + TransactionPayload::Coinbase(CoinbasePayload([1; 32]), None, None); let mut coinbase_tx = StacksTransaction::new( TransactionVersion::Testnet, TransactionAuth::from_p2pkh(&self.miner_key).unwrap(), @@ -429,16 +425,15 @@ impl MockamotoNode { clarity_instance, &sortdb_handle, &self.sortdb.pox_constants, - sortition_tip.burn_header_hash.clone(), - sortition_tip.block_height.try_into().map_err(|_| { - ChainstateError::InvalidStacksBlock("Burn block height exceeded u32".into()) - })?, chain_tip_ch.clone(), chain_tip_bh.clone(), parent_chain_length, parent_burn_height, + sortition_tip.burn_header_hash.clone(), + sortition_tip.block_height.try_into().map_err(|_| { + ChainstateError::InvalidStacksBlock("Burn block height exceeded u32".into()) + })?, false, - None, true, parent_chain_length + 1, )?; @@ -486,14 +481,12 @@ impl MockamotoNode { version: 100, chain_length: parent_chain_length + 1, burn_spent: 10, - parent: chain_tip_bh, - burn_view: sortition_tip.burn_header_hash.clone(), tx_merkle_root: tx_merkle_tree.root(), state_index_root, stacker_signature: MessageSignature([0; 65]), miner_signature: MessageSignature([0; 65]), consensus_hash: sortition_tip.consensus_hash.clone(), - parent_consensus_hash: chain_tip_ch, + parent_block_id: StacksBlockId::new(&chain_tip_ch, &chain_tip_bh), }, txs, }; @@ -510,9 +503,10 @@ impl MockamotoNode { fn mine_and_stage_block(&mut self) -> Result<(), ChainstateError> { let block = self.mine_stacks_block()?; + let config = self.chainstate.config(); let chainstate_tx = self.chainstate.db_tx_begin()?; let sortition_handle = self.sortdb.index_handle_at_tip(); - NakamotoChainState::accept_block(block, &sortition_handle, &chainstate_tx)?; + NakamotoChainState::accept_block(&config, block, &sortition_handle, &chainstate_tx)?; chainstate_tx.commit()?; Ok(()) } @@ -521,21 +515,18 @@ impl MockamotoNode { let (mut chainstate_tx, clarity_instance) = self.chainstate.chainstate_tx_begin()?; let pox_constants = self.sortdb.pox_constants.clone(); let mut sortdb_tx = self.sortdb.tx_begin_at_tip(); - let Some(next_block) = NakamotoChainState::next_ready_block(&chainstate_tx)? else { + let Some((next_block, _)) = NakamotoChainState::next_ready_nakamoto_block(&chainstate_tx)? + else { return Ok(false); }; - let parent_block_id = StacksBlockId::new( - &next_block.header.parent_consensus_hash, - &next_block.header.parent, - ); + let parent_block_id = &next_block.header.parent_block_id; let parent_chain_tip = NakamotoChainState::get_block_header(&chainstate_tx, &parent_block_id)?.ok_or_else( || { warn!( "Tried to process next ready block, but its parent header cannot be found"; "block_hash" => %next_block.header.block_hash(), - "consensus_hash" => %next_block.header.consensus_hash, "parent_block_id" => %parent_block_id ); ChainstateError::NoSuchBlockError diff --git a/testnet/stacks-node/src/neon_node.rs b/testnet/stacks-node/src/neon_node.rs index 2b839f5299..40df0ed45c 100644 --- a/testnet/stacks-node/src/neon_node.rs +++ b/testnet/stacks-node/src/neon_node.rs @@ -166,6 +166,7 @@ use stacks::chainstate::burn::BlockSnapshot; use stacks::chainstate::burn::ConsensusHash; use stacks::chainstate::coordinator::comm::CoordinatorChannels; use stacks::chainstate::coordinator::{get_next_recipients, OnChainRewardSetProvider}; +use stacks::chainstate::nakamoto::NakamotoChainState; use stacks::chainstate::stacks::address::PoxAddress; use stacks::chainstate::stacks::db::unconfirmed::UnconfirmedTxMap; use stacks::chainstate::stacks::db::StacksHeaderInfo; @@ -516,8 +517,8 @@ impl Globals { LeaderKeyRegistrationState::Active(RegisteredKey { target_block_height, vrf_public_key: op.public_key, - block_height: op.block_height as u64, - op_vtxindex: op.vtxindex as u32, + block_height: u64::from(op.block_height), + op_vtxindex: u32::from(op.vtxindex), }); activated = true; } else { @@ -1270,7 +1271,7 @@ impl BlockMinerThread { let mut tx = StacksTransaction::new( version, tx_auth, - TransactionPayload::Coinbase(CoinbasePayload([0u8; 32]), recipient_opt), + TransactionPayload::Coinbase(CoinbasePayload([0u8; 32]), recipient_opt, None), ); tx.chain_id = chain_id; tx.anchor_mode = TransactionAnchorMode::OnChainOnly; @@ -1364,9 +1365,9 @@ impl BlockMinerThread { burn_db: &mut SortitionDB, chain_state: &mut StacksChainState, ) -> Option { - if let Some(stacks_tip) = chain_state - .get_stacks_chain_tip(burn_db) - .expect("FATAL: could not query chain tip") + if let Some(stacks_tip) = + NakamotoChainState::get_canonical_block_header(chain_state.db(), burn_db) + .expect("FATAL: could not query chain tip") { let miner_address = self .keychain @@ -1378,7 +1379,7 @@ impl BlockMinerThread { &self.burn_block, miner_address, &stacks_tip.consensus_hash, - &stacks_tip.anchored_block_hash, + &stacks_tip.anchored_header.block_hash(), ) { Ok(parent_info) => Some(parent_info), Err(Error::BurnchainTipChanged) => { @@ -1435,7 +1436,7 @@ impl BlockMinerThread { if last_mined_blocks.len() == 1 { debug!("Have only attempted one block; unconditionally trying again"); } - last_mined_blocks.len() as u64 + 1 + u64::try_from(last_mined_blocks.len()).expect("FATAL: more than 2^64 mined blocks") + 1 } else { let mut best_attempt = 0; debug!( @@ -1768,16 +1769,16 @@ impl BlockMinerThread { let sort_tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()) .expect("FATAL: could not query canonical sortition DB tip"); - if let Some(stacks_tip) = chainstate - .get_stacks_chain_tip(sortdb) - .expect("FATAL: could not query canonical Stacks chain tip") + if let Some(stacks_tip) = + NakamotoChainState::get_canonical_block_header(chainstate.db(), sortdb) + .expect("FATAL: could not query canonical Stacks chain tip") { // if a block hasn't been processed within some deadline seconds of receipt, don't block // mining let process_deadline = get_epoch_time_secs() - unprocessed_block_deadline; let has_unprocessed = StacksChainState::has_higher_unprocessed_blocks( chainstate.db(), - stacks_tip.height, + stacks_tip.anchored_header.height(), process_deadline, ) .expect("FATAL: failed to query staging blocks"); @@ -1799,10 +1800,12 @@ impl BlockMinerThread { // NOTE: this could be None if it's not part of the canonical PoX fork any // longer if let Some(highest_unprocessed_block_sn) = highest_unprocessed_block_sn_opt { - if stacks_tip.height + (burnchain.pox_constants.prepare_length as u64) - 1 + if stacks_tip.anchored_header.height() + + u64::from(burnchain.pox_constants.prepare_length) + - 1 >= highest_unprocessed.height && highest_unprocessed_block_sn.block_height - + (burnchain.pox_constants.prepare_length as u64) + + u64::from(burnchain.pox_constants.prepare_length) - 1 >= sort_tip.block_height { @@ -2012,9 +2015,9 @@ impl BlockMinerThread { let cur_burn_chain_tip = SortitionDB::get_canonical_burn_chain_tip(burn_db.conn()) .expect("FATAL: failed to query sortition DB for canonical burn chain tip"); - if let Some(stacks_tip) = chain_state - .get_stacks_chain_tip(&burn_db) - .expect("FATAL: could not query chain tip") + if let Some(stacks_tip) = + NakamotoChainState::get_canonical_block_header(chain_state.db(), &burn_db) + .expect("FATAL: could not query chain tip") { let is_miner_blocked = self .globals @@ -2029,7 +2032,7 @@ impl BlockMinerThread { &chain_state, self.config.miner.unprocessed_block_deadline_secs, ); - if stacks_tip.anchored_block_hash != anchored_block.header.parent_block + if stacks_tip.anchored_header.block_hash() != anchored_block.header.parent_block || parent_block_info.parent_consensus_hash != stacks_tip.consensus_hash || cur_burn_chain_tip.burn_header_hash != self.burn_block.burn_header_hash || is_miner_blocked @@ -2048,7 +2051,7 @@ impl BlockMinerThread { "old_tip_burn_block_height" => self.burn_block.block_height, "old_tip_burn_block_sortition_id" => %self.burn_block.sortition_id, "attempt" => attempt, - "new_stacks_tip_block_hash" => %stacks_tip.anchored_block_hash, + "new_stacks_tip_block_hash" => %stacks_tip.anchored_header.block_hash(), "new_stacks_tip_consensus_hash" => %stacks_tip.consensus_hash, "new_tip_burn_block_height" => cur_burn_chain_tip.block_height, "new_tip_burn_block_sortition_id" => %cur_burn_chain_tip.sortition_id, @@ -3468,26 +3471,39 @@ impl ParentStacksBlockInfo { .expect("Failed to look up block's parent snapshot"); let parent_sortition_id = &parent_snapshot.sortition_id; - let parent_winning_vtxindex = - SortitionDB::get_block_winning_vtxindex(burn_db.conn(), parent_sortition_id) + + let (parent_block_height, parent_winning_vtxindex, parent_block_total_burn) = if mine_tip_ch + == &FIRST_BURNCHAIN_CONSENSUS_HASH + { + (0, 0, 0) + } else { + let parent_winning_vtxindex = + SortitionDB::get_block_winning_vtxindex(burn_db.conn(), parent_sortition_id) + .expect("SortitionDB failure.") + .ok_or_else(|| { + error!( + "Failed to find winning vtx index for the parent sortition"; + "parent_sortition_id" => %parent_sortition_id + ); + Error::WinningVtxNotFoundForChainTip + })?; + + let parent_block = SortitionDB::get_block_snapshot(burn_db.conn(), parent_sortition_id) .expect("SortitionDB failure.") .ok_or_else(|| { error!( - "Failed to find winning vtx index for the parent sortition"; + "Failed to find block snapshot for the parent sortition"; "parent_sortition_id" => %parent_sortition_id ); - Error::WinningVtxNotFoundForChainTip + Error::SnapshotNotFoundForChainTip })?; - let parent_block = SortitionDB::get_block_snapshot(burn_db.conn(), parent_sortition_id) - .expect("SortitionDB failure.") - .ok_or_else(|| { - error!( - "Failed to find block snapshot for the parent sortition"; - "parent_sortition_id" => %parent_sortition_id - ); - Error::SnapshotNotFoundForChainTip - })?; + ( + parent_block.block_height, + parent_winning_vtxindex, + parent_block.total_burn, + ) + }; // don't mine off of an old burnchain block let burn_chain_tip = SortitionDB::get_canonical_burn_chain_tip(burn_db.conn()) @@ -3526,8 +3542,8 @@ impl ParentStacksBlockInfo { Ok(ParentStacksBlockInfo { stacks_parent_header: stacks_tip_header, parent_consensus_hash: mine_tip_ch.clone(), - parent_block_burn_height: parent_block.block_height, - parent_block_total_burn: parent_block.total_burn, + parent_block_burn_height: parent_block_height, + parent_block_total_burn: parent_block_total_burn, parent_winning_vtxindex, coinbase_nonce, }) diff --git a/testnet/stacks-node/src/node.rs b/testnet/stacks-node/src/node.rs index 933e8d232a..063a7f5f9b 100644 --- a/testnet/stacks-node/src/node.rs +++ b/testnet/stacks-node/src/node.rs @@ -1066,7 +1066,7 @@ impl Node { let mut tx = StacksTransaction::new( version, tx_auth, - TransactionPayload::Coinbase(CoinbasePayload([0u8; 32]), None), + TransactionPayload::Coinbase(CoinbasePayload([0u8; 32]), None, None), ); tx.chain_id = self.config.burnchain.chain_id; tx.anchor_mode = TransactionAnchorMode::OnChainOnly; diff --git a/testnet/stacks-node/src/run_loop/mod.rs b/testnet/stacks-node/src/run_loop/mod.rs index 176dcf6922..49feeb4f42 100644 --- a/testnet/stacks-node/src/run_loop/mod.rs +++ b/testnet/stacks-node/src/run_loop/mod.rs @@ -181,7 +181,7 @@ pub fn announce_boot_receipts( debug!("Push {} boot receipts", &boot_receipts.len()); event_dispatcher.announce_block( - &block_0, + &block_0.into(), &block_header_0, boot_receipts, &StacksBlockId::sentinel(), diff --git a/testnet/stacks-node/src/tests/epoch_21.rs b/testnet/stacks-node/src/tests/epoch_21.rs index 0659ca391e..0b2c9267ee 100644 --- a/testnet/stacks-node/src/tests/epoch_21.rs +++ b/testnet/stacks-node/src/tests/epoch_21.rs @@ -120,6 +120,8 @@ fn advance_to_2_1( u32::max_value(), u32::MAX, u32::MAX, + u32::MAX, + u32::MAX, )); burnchain_config.pox_constants = pox_constants.clone(); @@ -619,6 +621,8 @@ fn transition_fixes_bitcoin_rigidity() { u32::max_value(), u32::MAX, u32::MAX, + u32::MAX, + u32::MAX, ); burnchain_config.pox_constants = pox_constants.clone(); @@ -1063,6 +1067,8 @@ fn transition_adds_get_pox_addr_recipients() { v1_unlock_height, u32::MAX, u32::MAX, + u32::MAX, + u32::MAX, ); let mut spender_sks = vec![]; @@ -1369,6 +1375,8 @@ fn transition_adds_mining_from_segwit() { v1_unlock_height, u32::MAX, u32::MAX, + u32::MAX, + u32::MAX, ); let mut spender_sks = vec![]; @@ -1534,6 +1542,8 @@ fn transition_removes_pox_sunset() { (epoch_21 as u32) + 1, u32::MAX, u32::MAX, + u32::MAX, + u32::MAX, ); burnchain_config.pox_constants = pox_constants.clone(); @@ -1816,6 +1826,8 @@ fn transition_empty_blocks() { (epoch_2_1 + 1) as u32, u32::MAX, u32::MAX, + u32::MAX, + u32::MAX, ); burnchain_config.pox_constants = pox_constants.clone(); @@ -2174,6 +2186,8 @@ fn test_pox_reorgs_three_flaps() { v1_unlock_height, u32::MAX, u32::MAX, + u32::MAX, + u32::MAX, ); burnchain_config.pox_constants = pox_constants.clone(); @@ -2710,6 +2724,8 @@ fn test_pox_reorg_one_flap() { v1_unlock_height, u32::MAX, u32::MAX, + u32::MAX, + u32::MAX, ); burnchain_config.pox_constants = pox_constants.clone(); @@ -3134,6 +3150,8 @@ fn test_pox_reorg_flap_duel() { v1_unlock_height, u32::MAX, u32::MAX, + u32::MAX, + u32::MAX, ); burnchain_config.pox_constants = pox_constants.clone(); @@ -3568,6 +3586,8 @@ fn test_pox_reorg_flap_reward_cycles() { v1_unlock_height, u32::MAX, u32::MAX, + u32::MAX, + u32::MAX, ); burnchain_config.pox_constants = pox_constants.clone(); @@ -3996,6 +4016,8 @@ fn test_pox_missing_five_anchor_blocks() { v1_unlock_height, u32::MAX, u32::MAX, + u32::MAX, + u32::MAX, ); burnchain_config.pox_constants = pox_constants.clone(); @@ -4396,6 +4418,8 @@ fn test_sortition_divergence_pre_21() { v1_unlock_height, u32::MAX, u32::MAX, + u32::MAX, + u32::MAX, ); burnchain_config.pox_constants = pox_constants.clone(); @@ -4760,6 +4784,8 @@ fn trait_invocation_cross_epoch() { u32::max_value(), u32::MAX, u32::MAX, + u32::MAX, + u32::MAX, ); burnchain_config.pox_constants = pox_constants.clone(); @@ -5006,6 +5032,8 @@ fn test_v1_unlock_height_with_current_stackers() { v1_unlock_height as u32, u32::MAX, u32::MAX, + u32::MAX, + u32::MAX, ); burnchain_config.pox_constants = pox_constants.clone(); @@ -5268,6 +5296,8 @@ fn test_v1_unlock_height_with_delay_and_current_stackers() { v1_unlock_height as u32, u32::MAX, u32::MAX, + u32::MAX, + u32::MAX, ); burnchain_config.pox_constants = pox_constants.clone(); diff --git a/testnet/stacks-node/src/tests/epoch_22.rs b/testnet/stacks-node/src/tests/epoch_22.rs index 1ed40ef9f9..f20f59af46 100644 --- a/testnet/stacks-node/src/tests/epoch_22.rs +++ b/testnet/stacks-node/src/tests/epoch_22.rs @@ -176,6 +176,8 @@ fn disable_pox() { v1_unlock_height as u32, epoch_2_2 as u32 + 1, u32::MAX, + u32::MAX, + u32::MAX, ); burnchain_config.pox_constants = pox_constants.clone(); @@ -706,6 +708,8 @@ fn pox_2_unlock_all() { v1_unlock_height as u32, epoch_2_2 as u32 + 1, u32::MAX, + u32::MAX, + u32::MAX, ); burnchain_config.pox_constants = pox_constants.clone(); @@ -1398,6 +1402,8 @@ fn test_pox_reorg_one_flap() { v1_unlock_height, v2_unlock_height.try_into().unwrap(), u32::MAX, + u32::MAX, + u32::MAX, ); burnchain_config.pox_constants = pox_constants.clone(); diff --git a/testnet/stacks-node/src/tests/epoch_23.rs b/testnet/stacks-node/src/tests/epoch_23.rs index ac9524f07c..f9f2090c64 100644 --- a/testnet/stacks-node/src/tests/epoch_23.rs +++ b/testnet/stacks-node/src/tests/epoch_23.rs @@ -141,6 +141,8 @@ fn trait_invocation_behavior() { v1_unlock_height as u32, epoch_2_2 as u32 + 1, u32::MAX, + u32::MAX, + u32::MAX, ); burnchain_config.pox_constants = pox_constants.clone(); diff --git a/testnet/stacks-node/src/tests/epoch_24.rs b/testnet/stacks-node/src/tests/epoch_24.rs index 97f9744223..0a116353c2 100644 --- a/testnet/stacks-node/src/tests/epoch_24.rs +++ b/testnet/stacks-node/src/tests/epoch_24.rs @@ -187,7 +187,9 @@ fn fix_to_pox_contract() { u64::max_value() - 1, v1_unlock_height as u32, epoch_2_2 as u32 + 1, + u32::MAX, pox_3_activation_height as u32, + u32::MAX, ); burnchain_config.pox_constants = pox_constants.clone(); @@ -823,7 +825,9 @@ fn verify_auto_unlock_behavior() { u64::max_value() - 1, v1_unlock_height as u32, epoch_2_2 as u32 + 1, + u32::MAX, pox_3_activation_height as u32, + u32::MAX, ); burnchain_config.pox_constants = pox_constants.clone(); diff --git a/testnet/stacks-node/src/tests/mod.rs b/testnet/stacks-node/src/tests/mod.rs index f4b136e54b..106dd31877 100644 --- a/testnet/stacks-node/src/tests/mod.rs +++ b/testnet/stacks-node/src/tests/mod.rs @@ -355,7 +355,7 @@ pub fn make_poison( } pub fn make_coinbase(sender: &StacksPrivateKey, nonce: u64, tx_fee: u64) -> Vec { - let payload = TransactionPayload::Coinbase(CoinbasePayload([0; 32]), None); + let payload = TransactionPayload::Coinbase(CoinbasePayload([0; 32]), None, None); serialize_sign_standard_single_sig_tx(payload.into(), sender, nonce, tx_fee) } diff --git a/testnet/stacks-node/src/tests/neon_integrations.rs b/testnet/stacks-node/src/tests/neon_integrations.rs index 116e2954da..39b0b4be32 100644 --- a/testnet/stacks-node/src/tests/neon_integrations.rs +++ b/testnet/stacks-node/src/tests/neon_integrations.rs @@ -212,7 +212,9 @@ pub mod test_observer { use warp; use warp::Filter; - use crate::event_dispatcher::{MinedBlockEvent, MinedMicroblockEvent, StackerDBChunksEvent}; + use crate::event_dispatcher::{ + MinedBlockEvent, MinedMicroblockEvent, MinedNakamotoBlockEvent, StackerDBChunksEvent, + }; pub const EVENT_OBSERVER_PORT: u16 = 50303; @@ -220,6 +222,8 @@ pub mod test_observer { pub static ref NEW_BLOCKS: Mutex> = Mutex::new(Vec::new()); pub static ref MINED_BLOCKS: Mutex> = Mutex::new(Vec::new()); pub static ref MINED_MICROBLOCKS: Mutex> = Mutex::new(Vec::new()); + pub static ref MINED_NAKAMOTO_BLOCKS: Mutex> = + Mutex::new(Vec::new()); pub static ref NEW_MICROBLOCKS: Mutex> = Mutex::new(Vec::new()); pub static ref NEW_STACKERDB_CHUNKS: Mutex> = Mutex::new(Vec::new()); @@ -310,6 +314,43 @@ pub mod test_observer { Ok(warp::http::StatusCode::OK) } + async fn handle_mined_nakamoto_block( + block: serde_json::Value, + ) -> Result { + let mut mined_blocks = MINED_NAKAMOTO_BLOCKS.lock().unwrap(); + // assert that the mined transaction events have string-y txids + block + .as_object() + .expect("Expected JSON object for mined nakamoto block event") + .get("tx_events") + .expect("Expected tx_events key in mined nakamoto block event") + .as_array() + .expect("Expected tx_events key to be an array in mined nakamoto block event") + .iter() + .for_each(|txevent| { + let txevent_obj = txevent.as_object().expect("TransactionEvent should be object"); + let inner_obj = if let Some(inner_obj) = txevent_obj.get("Success") { + inner_obj + } else if let Some(inner_obj) = txevent_obj.get("ProcessingError") { + inner_obj + } else if let Some(inner_obj) = txevent_obj.get("Skipped") { + inner_obj + } else { + panic!("TransactionEvent object should have one of Success, ProcessingError, or Skipped") + }; + inner_obj + .as_object() + .expect("TransactionEvent should be an object") + .get("txid") + .expect("Should have txid key") + .as_str() + .expect("Expected txid to be a string"); + }); + + mined_blocks.push(serde_json::from_value(block).unwrap()); + Ok(warp::http::StatusCode::OK) + } + async fn handle_mempool_txs(txs: serde_json::Value) -> Result { let new_rawtxs = txs .as_array() @@ -419,6 +460,10 @@ pub mod test_observer { .and(warp::post()) .and(warp::body::json()) .and_then(handle_mined_block); + let mined_nakamoto_blocks = warp::path!("mined_nakamoto_block") + .and(warp::post()) + .and(warp::body::json()) + .and_then(handle_mined_nakamoto_block); let mined_microblocks = warp::path!("mined_microblock") .and(warp::post()) .and(warp::body::json()) @@ -438,6 +483,7 @@ pub mod test_observer { .or(new_microblocks) .or(mined_blocks) .or(mined_microblocks) + .or(mined_nakamoto_blocks) .or(new_stackerdb_chunks), ) .run(([127, 0, 0, 1], EVENT_OBSERVER_PORT)) @@ -1883,6 +1929,8 @@ fn stx_delegate_btc_integration_test() { u32::MAX, u32::MAX, u32::MAX, + u32::MAX, + u32::MAX, ); burnchain_config.pox_constants = pox_constants.clone(); @@ -5909,6 +5957,8 @@ fn pox_integration_test() { u32::MAX, u32::MAX, u32::MAX, + u32::MAX, + u32::MAX, ); burnchain_config.pox_constants = pox_constants.clone(); @@ -10617,6 +10667,8 @@ fn test_competing_miners_build_on_same_chain( u32::MAX, u32::MAX, u32::MAX, + u32::MAX, + u32::MAX, ); burnchain_config.pox_constants = pox_constants.clone();