Skip to content

Commit

Permalink
refactor(peer_loop): Reject responses with fishy difficulties
Browse files Browse the repository at this point in the history
In the message-handler for `ChallengeSyncResponse`, add a check to
verify that the proven difficulties of the response blocks sum up to a
larger number than our own current tip's difficulty. Otherwise, the
sync challenge is rejected and the peer is punished for a fishy
response.

Also:
 - Factor out parameters for genesis block header in
   `BlockHeader::genesis`.
 - Relativize `check_pow` to network, implying that different networks
   allow different proof-of-work evolutions.
 - Refactor various things to improve readability.
 - Add test that `max_cumulative_pow_after` works for zero
   `num_blocks`.
 - Add epsilon margin to `max_cumulative_pow_after` to tolerate small
   floating point rounding errors.
 - Document heuristic being used to reject sync challenge responses.

Co-authored-by: Alan Szepieniec <[email protected]>
  • Loading branch information
Sword-Smith and aszepieniec committed Jan 24, 2025
1 parent aa42608 commit 05ad1be
Show file tree
Hide file tree
Showing 6 changed files with 161 additions and 50 deletions.
19 changes: 19 additions & 0 deletions src/models/blockchain/block/block_header.rs
Original file line number Diff line number Diff line change
Expand Up @@ -3,16 +3,19 @@ use std::fmt::Display;
#[cfg(any(test, feature = "arbitrary-impls"))]
use arbitrary::Arbitrary;
use get_size2::GetSize;
use num_traits::Zero;
use serde::Deserialize;
use serde::Serialize;
use strum::EnumCount;
use tasm_lib::twenty_first::bfe_array;
use twenty_first::math::b_field_element::BFieldElement;
use twenty_first::math::bfield_codec::BFieldCodec;
use twenty_first::math::digest::Digest;

use super::block_height::BlockHeight;
use super::difficulty_control::Difficulty;
use super::difficulty_control::ProofOfWork;
use crate::config_models::network::Network;
use crate::models::proof_abstractions::mast_hash::HasDiscriminant;
use crate::models::proof_abstractions::mast_hash::MastHash;
use crate::models::proof_abstractions::timestamp::Timestamp;
Expand Down Expand Up @@ -96,6 +99,22 @@ impl Display for BlockHeader {
}
}

impl BlockHeader {
pub(crate) fn genesis(network: Network) -> Self {
Self {
version: BFieldElement::zero(),
height: BFieldElement::zero().into(),
prev_block_digest: Default::default(),
timestamp: network.launch_date(),

// TODO: to be set to something difficult to predict ahead of time
nonce: Digest::new(bfe_array![0, 0, 0, 0, 0]),
cumulative_proof_of_work: ProofOfWork::zero(),
difficulty: Difficulty::MINIMUM,
}
}
}

#[derive(Debug, Clone, EnumCount)]
pub enum BlockHeaderField {
Version,
Expand Down
15 changes: 12 additions & 3 deletions src/models/blockchain/block/difficulty_control.rs
Original file line number Diff line number Diff line change
Expand Up @@ -461,8 +461,8 @@ pub(crate) fn difficulty_control(
}
}

/// Determine the maximum possible cumulative proof-of-work after n blocks given
/// the start conditions.
/// Determine an upper bound for the maximum possible cumulative proof-of-work
/// after n blocks given the start conditions.
pub(crate) fn max_cumulative_pow_after(
cumulative_pow_start: ProofOfWork,
difficulty_start: Difficulty,
Expand All @@ -473,10 +473,12 @@ pub(crate) fn max_cumulative_pow_after(
// In this case the PID adjustment factor is
// f = 1 + (MINIMUM_BLOCK_TIME - TARGET_BLOCK_INTERVAL) / TARGET_BLOCK_INTERVAL * P
// = 1 - (60 - 588) / 588 / 16,
const EPSILON: f64 = 0.000001;
let f = 1.0_f64
+ (TARGET_BLOCK_INTERVAL.to_millis() - MINIMUM_BLOCK_TIME.to_millis()) as f64
/ TARGET_BLOCK_INTERVAL.to_millis() as f64
/ 16.0;
/ 16.0
+ EPSILON;
let mut max_difficulty: f64 = BigUint::from(difficulty_start).to_f64().unwrap();
let mut max_cumpow: f64 = BigUint::from(cumulative_pow_start).to_f64().unwrap();
let cap = BigUint::from(ProofOfWork::MAXIMUM).to_f64().unwrap();
Expand Down Expand Up @@ -850,6 +852,13 @@ mod test {
let _calculated_again = max_cumulative_pow_after(init_cumpow, init_difficulty, usize::MAX);
}

#[test]
fn max_pow_after_accepts_zero_num_blocks() {
let init_cumpow = ProofOfWork::from_u64(200);
let init_difficulty = Difficulty::from_u64(1000);
let _calculated = max_cumulative_pow_after(init_cumpow, init_difficulty, 0);
}

#[proptest]
fn test_sanity_max_pow_after_prop(
#[strategy(arb())] init_difficulty: u64,
Expand Down
12 changes: 1 addition & 11 deletions src/models/blockchain/block/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -485,17 +485,7 @@ impl Block {
MmrAccumulator::new_from_leafs(vec![]),
);

let header: BlockHeader = BlockHeader {
version: BFieldElement::zero(),
height: BFieldElement::zero().into(),
prev_block_digest: Default::default(),
timestamp: network.launch_date(),

// TODO: to be set to something difficult to predict ahead of time
nonce: Digest::new(bfe_array![0, 0, 0, 0, 0]),
cumulative_proof_of_work: ProofOfWork::zero(),
difficulty: Difficulty::MINIMUM,
};
let header = BlockHeader::genesis(network);

let appendix = BlockAppendix::default();

Expand Down
105 changes: 83 additions & 22 deletions src/models/peer.rs
Original file line number Diff line number Diff line change
Expand Up @@ -21,21 +21,23 @@ use serde::Serialize;
use tasm_lib::twenty_first::prelude::Mmr;
use tasm_lib::twenty_first::prelude::MmrMembershipProof;
use tasm_lib::twenty_first::util_types::mmr::mmr_accumulator::MmrAccumulator;
use tracing::debug;
use tracing::trace;
use tracing::warn;
use transaction_notification::TransactionNotification;
use transfer_transaction::TransferTransaction;
use twenty_first::math::digest::Digest;

use super::blockchain::block::block_header::BlockHeader;
use super::blockchain::block::block_height::BlockHeight;
use super::blockchain::block::difficulty_control::Difficulty;
use super::blockchain::block::difficulty_control::ProofOfWork;
use super::blockchain::block::Block;
use super::channel::BlockProposalNotification;
use super::proof_abstractions::timestamp::Timestamp;
use super::state::transaction_kernel_id::TransactionKernelId;
use crate::config_models::network::Network;
use crate::models::blockchain::block::difficulty_control::max_cumulative_pow_after;
use crate::models::blockchain::block::difficulty_control::Difficulty;
use crate::models::peer::transfer_block::TransferBlock;
use crate::prelude::twenty_first;

Expand Down Expand Up @@ -151,7 +153,8 @@ pub enum NegativePeerSanction {
InvalidSyncChallengeResponse,
TimedOutSyncChallengeResponse,
UnexpectedSyncChallengeResponse,
FishyPow,
FishyPowEvolutionChallengeResponse,
FishyDifficultiesChallengeResponse,

FloodPeerListResponse,
BlockRequestUnknownHeight,
Expand Down Expand Up @@ -237,7 +240,8 @@ impl Display for NegativePeerSanction {
NegativePeerSanction::BatchBlocksRequestTooManyDigests => {
"too many digests in batch block request"
}
NegativePeerSanction::FishyPow => "fishy pow",
NegativePeerSanction::FishyPowEvolutionChallengeResponse => "fishy pow evolution",
NegativePeerSanction::FishyDifficultiesChallengeResponse => "fishy difficulties",
};
write!(f, "{string}")
}
Expand Down Expand Up @@ -307,7 +311,8 @@ impl Sanction for NegativePeerSanction {
NegativePeerSanction::TimedOutSyncChallengeResponse => -50,
NegativePeerSanction::InvalidBlockMmrAuthentication => -4,
NegativePeerSanction::BatchBlocksRequestTooManyDigests => -50,
NegativePeerSanction::FishyPow => -51,
NegativePeerSanction::FishyPowEvolutionChallengeResponse => -51,
NegativePeerSanction::FishyDifficultiesChallengeResponse => -51,
}
}
}
Expand Down Expand Up @@ -689,6 +694,9 @@ impl IssuedSyncChallenge {
#[derive(Debug, Clone, Copy, Serialize, Deserialize, PartialEq, Eq)]
pub(crate) struct SyncChallenge {
pub(crate) tip_digest: Digest,

/// Block heights of the child blocks, for which the peer must respond with
/// (parent, child) blocks. Assumed to be ordered from small to big.
pub(crate) challenges: [BlockHeight; 10],
}

Expand Down Expand Up @@ -752,7 +760,7 @@ impl SyncChallenge {
heights.push(height);
}

// sort from small to big
// sort from small to big as that makes some validation checks easier.
heights.sort();

Self {
Expand All @@ -764,10 +772,16 @@ impl SyncChallenge {

#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
pub(crate) struct SyncChallengeResponse {
pub(crate) tip: TransferBlock,
pub(crate) tip_parent: TransferBlock,
/// (parent, child) blocks. blocks are assumed to be ordered from small to
/// big block height.
pub(crate) blocks: [(TransferBlock, TransferBlock); 10],

/// Membership proof of the child blocks, relative to the tip-MMR (after
/// appending digest of tip). Must match ordering of blocks.
pub(crate) membership_proofs: [MmrMembershipProof; 10],

pub(crate) tip_parent: TransferBlock,
pub(crate) tip: TransferBlock,
}

impl SyncChallengeResponse {
Expand Down Expand Up @@ -836,11 +850,12 @@ impl SyncChallengeResponse {

/// Determine whether the claimed evolution of the cumulative proof-of-work
/// is a) possible, and b) likely, given the difficulties.
pub(crate) fn check_pow(&self) -> bool {
let cumulative_pow_evolution_okay = [(
BlockHeight::genesis(),
ProofOfWork::zero(),
Difficulty::MINIMUM,
pub(crate) fn check_pow(&self, network: Network) -> bool {
let genesis_header = BlockHeader::genesis(network);
let parent_triples = [(
genesis_header.height,
genesis_header.cumulative_proof_of_work,
genesis_header.difficulty,
)]
.into_iter()
.chain(self.blocks.iter().map(|(child, _parent)| {
Expand All @@ -855,25 +870,32 @@ impl SyncChallengeResponse {
self.tip_parent.header.cumulative_proof_of_work,
self.tip_parent.header.difficulty,
)])
.tuple_windows()
.all(
|((start_height, start_cpow, start_diff), (stop_height, stop_cpow, _))| {
max_cumulative_pow_after(
.collect_vec();
let cumulative_pow_evolution_okay = parent_triples.iter().copied().tuple_windows().all(
|((start_height, start_cpow, start_difficulty), (stop_height, stop_cpow, _))| {
let max_pow = max_cumulative_pow_after(
start_cpow,
start_diff,
start_difficulty,
(stop_height - start_height)
.try_into()
.expect("difference of block heights guaranteed to be positive"),
) >= stop_cpow
.expect("difference of block heights guaranteed to be non-negative"),
);
// cpow must increase for each block, and is upward-bounded. But
// since response may contain duplicates, allow equality.
max_pow >= stop_cpow && start_cpow <= stop_cpow
},
);

let first = self.blocks.first().unwrap().0.header;
let first = self.blocks[0].0.header;
let last = self.tip.header;
let total_pow_increase = BigUint::from(first.cumulative_proof_of_work)
let total_pow_increase = BigUint::from(last.cumulative_proof_of_work)
- BigUint::from(first.cumulative_proof_of_work);
let span = last.height - first.height;
let average_difficulty = total_pow_increase.to_f64().unwrap() / (span as f64);
debug_assert!(
average_difficulty > 0.0,
"Average difficulty must be positive. Got: {average_difficulty}"
);

// In principle, the cumulative proof-of-work could have been boosted by
// a small number of outlying large difficulties. We require here that
Expand All @@ -896,17 +918,56 @@ impl SyncChallengeResponse {
// 3: 0.20803116452164094
// 4: 0.10979422571975492
// 5: 0.043917690287901975 .
//
// The tip is included in the below check, so if *it* doesn't have an
// above average difficulty, something is almost certainly off.

let too_few_above_mean_difficulties = self
.blocks
.iter()
.flat_map(|(l, r)| [l, r])
.chain([&self.tip_parent, &self.tip])
.map(|b| b.header.difficulty)
.filter(|d| BigUint::from(*d).to_f64().unwrap() > average_difficulty)
.filter(|d| BigUint::from(*d).to_f64().unwrap() >= average_difficulty)
.count()
== 0;

if too_few_above_mean_difficulties {
warn!("Too few above mean difficulties.");
}

if !cumulative_pow_evolution_okay {
warn!("Impossible evolution of cumulative pow.");
for (start, stop) in parent_triples.into_iter().tuple_windows() {
let upper_bound = max_cumulative_pow_after(
start.1,
start.2,
(stop.0 - start.0).try_into().unwrap(),
);
debug!(
"start ({} / {} / {}) -> stop ({} / {} / {}) with max {}",
start.0, start.1, start.2, stop.0, stop.1, stop.2, upper_bound
);
}
}

cumulative_pow_evolution_okay && !too_few_above_mean_difficulties
}

/// Check whether the claimed difficulties are large enough relative to that
/// of our own tip.
///
/// Sum all verified difficulties and verify that this number is larger than
/// our own tip difficulty. This inequality guarantees that the successful
/// attacker must have spent at least one block's worth of guessing power to
/// produce the malicious chain, and probably much more.
pub(crate) fn check_difficulty(&self, own_tip_difficulty: Difficulty) -> bool {
let own_tip_difficulty = ProofOfWork::zero() + own_tip_difficulty;
let mut fork_relative_cumpow = ProofOfWork::zero();
for (_parent, child) in self.blocks.iter() {
fork_relative_cumpow = fork_relative_cumpow + child.header.difficulty;
}

fork_relative_cumpow > own_tip_difficulty
}
}
23 changes: 12 additions & 11 deletions src/models/state/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1548,17 +1548,18 @@ impl GlobalState {
bail!("could not fetch tip and tip predecessor");
};

if tip.header().height < MIN_BLOCK_HEIGHT_FOR_SYNCING.into() {
let tip_height = tip.header().height;
if tip_height < MIN_BLOCK_HEIGHT_FOR_SYNCING.into() {
bail!("tip height is too small for sync mode")
}

let mut block_pairs: Vec<(TransferBlock, TransferBlock)> = vec![];
let mut block_mmr_mps = vec![];
for h in sync_challenge.challenges {
if h < 2u64.into() {
for child_height in sync_challenge.challenges {
if child_height < 2u64.into() {
bail!("challenge asks for genesis block");
}
if h >= tip.header().height {
if child_height >= tip.header().height {
bail!("challenge asks for height that's not ancestor to tip.");
}

Expand All @@ -1567,7 +1568,7 @@ impl GlobalState {
.archival_state()
.archival_block_mmr
.ammr()
.try_get_leaf(h.into())
.try_get_leaf(child_height.into())
.await
else {
bail!("could not get leaf from archival block mmr");
Expand All @@ -1576,20 +1577,20 @@ impl GlobalState {
bail!("could not fetch indicated block pair");
};

// The MMR membership proofs will be invalid here if the peer's tip
// does not match ours. That's a known deficiency of this function,
// and can be fixed by correctly handling the construction of old
// MMR-MPs from the current archival MMR state.
// Notice that the MMR membership proofs are relative to an MMR
// where the tip digest *has* been added. So it is not relative to
// the block MMR accumulator present in the tip block, as it only
// refers to its ancestors.
// refers to its ancestors. Rather, it's relative to the block MMR
// accumulator present in the tip's child.
block_mmr_mps.push(
self.chain
.archival_state()
.archival_block_mmr
.ammr()
.prove_membership_async(h.into())
.prove_membership_relative_to_smaller_mmr(
child_height.into(),
tip_height.next().into(),
)
.await,
);
block_pairs.push((
Expand Down
Loading

0 comments on commit 05ad1be

Please sign in to comment.