Skip to content

Commit

Permalink
Add Unchecked Header Writes to Bridge Pallet (#672)
Browse files Browse the repository at this point in the history
* Add method for unchecked header imports

* Update docs for importing unchecked headers

* Import unchecked headers in HeaderChain implementation

* Fix Clippy warnings

* Move unchecked header import out of Verifier struct

* Clean up unchecked import tests

* Change HeaderChain API to accept iterator of headers

* Use chains of headers in tests

* Remove unused Result return type when appending finalized headers

* Add test which shows that genesis changes are not enacted

* Use initial header's hash for unchecked authority set changes

* Appease Clippy

* Check ancestry before making unchecked writes

* Fix typo

* Fix Clippy warning

* Add note about `ancestry_proof` structure

* Use best hash storage item directly

Co-authored-by: Svyatoslav Nikolsky <[email protected]>
  • Loading branch information
2 people authored and bkchr committed Apr 10, 2024
1 parent acee558 commit b921a48
Show file tree
Hide file tree
Showing 5 changed files with 251 additions and 62 deletions.
35 changes: 12 additions & 23 deletions bridges/modules/finality-verifier/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,7 @@
use bp_header_chain::{justification::verify_justification, AncestryChecker, HeaderChain};
use bp_runtime::{Chain, HeaderOf};
use finality_grandpa::voter_set::VoterSet;
use frame_support::{ensure, traits::Get};
use frame_support::{dispatch::DispatchError, ensure, traits::Get};
use frame_system::ensure_signed;
use sp_runtime::traits::Header as HeaderT;

Expand All @@ -57,7 +57,7 @@ pub mod pallet {
type BridgedChain: Chain;

/// The pallet which we will use as our underlying storage mechanism.
type HeaderChain: HeaderChain<<Self::BridgedChain as Chain>::Header>;
type HeaderChain: HeaderChain<<Self::BridgedChain as Chain>::Header, DispatchError>;

/// The type through which we will verify that a given header is related to the last
/// finalized header in our storage pallet.
Expand All @@ -80,13 +80,16 @@ pub mod pallet {

#[pallet::call]
impl<T: Config> Pallet<T> {
/// Verify a header is finalized according to the given finality proof.
/// Verify a target header is finalized according to the given finality proof.
///
/// Will use the underlying storage pallet to fetch information about the current
/// authorities and best finalized header in order to verify that the header is finalized.
///
/// If successful in verification, it will write the headers to the underlying storage
/// pallet as well as import the valid finality proof.
/// If successful in verification, it will write the header as well as its ancestors (from
/// the given `ancestry_proof`) to the underlying storage pallet.
///
/// Note that the expected format for `ancestry_proof` is a continguous list of finalized
/// headers containing (current_best_finalized_header, finality_target]
#[pallet::weight(0)]
pub fn submit_finality_proof(
origin: OriginFor<T>,
Expand Down Expand Up @@ -119,22 +122,10 @@ pub mod pallet {
<Error<T>>::InvalidAncestryProof
);

// If for whatever reason we are unable to fully import headers and the corresponding
// finality proof we want to avoid writing to the base pallet storage
use frame_support::storage::{with_transaction, TransactionOutcome};
with_transaction(|| {
for header in ancestry_proof {
if T::HeaderChain::import_header(header).is_err() {
return TransactionOutcome::Rollback(Err(<Error<T>>::FailedToWriteHeader));
}
}

if T::HeaderChain::import_finality_proof(finality_target, justification).is_err() {
return TransactionOutcome::Rollback(Err(<Error<T>>::FailedToWriteFinalityProof));
}

TransactionOutcome::Commit(Ok(()))
})?;
// Note that this won't work if we ever change the `ancestry_proof` format to be
// sparse since this expects a contiguous set of finalized headers.
let _ =
T::HeaderChain::append_finalized_chain(ancestry_proof).map_err(|_| <Error<T>>::FailedToWriteHeader)?;

Ok(().into())
}
Expand All @@ -151,8 +142,6 @@ pub mod pallet {
InvalidAuthoritySet,
/// Failed to write a header to the underlying header chain.
FailedToWriteHeader,
/// Failed to write finality proof to the underlying header chain.
FailedToWriteFinalityProof,
/// The given ancestry proof is too large to be verified in a single transaction.
OversizedAncestryProof,
}
Expand Down
2 changes: 1 addition & 1 deletion bridges/modules/substrate/src/fork_tests.rs
Original file line number Diff line number Diff line change
Expand Up @@ -503,7 +503,7 @@ where
storage.update_current_authority_set(authority_set);
}

fn change_log(delay: u64) -> Digest<TestHash> {
pub(crate) fn change_log(delay: u64) -> Digest<TestHash> {
let consensus_log = ConsensusLog::<TestNumber>::ScheduledChange(sp_finality_grandpa::ScheduledChange {
next_authorities: vec![(alice(), 1), (bob(), 1)],
delay,
Expand Down
234 changes: 219 additions & 15 deletions bridges/modules/substrate/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -83,6 +83,8 @@ pub trait Config: frame_system::Config {

decl_storage! {
trait Store for Module<T: Config> as SubstrateBridge {
/// Hash of the header used to bootstrap the pallet.
InitialHash: BridgedBlockHash<T>;
/// The number of the highest block(s) we know of.
BestHeight: BridgedBlockNumber<T>;
/// Hash of the header at the highest known height.
Expand Down Expand Up @@ -151,6 +153,8 @@ decl_error! {
Halted,
/// The pallet has already been initialized.
AlreadyInitialized,
/// The given header is not a descendant of a particular header.
NotDescendant,
}
}

Expand Down Expand Up @@ -362,7 +366,7 @@ impl<T: Config> Module<T> {
}
}

impl<T: Config> bp_header_chain::HeaderChain<BridgedHeader<T>> for Module<T> {
impl<T: Config> bp_header_chain::HeaderChain<BridgedHeader<T>, sp_runtime::DispatchError> for Module<T> {
fn best_finalized() -> BridgedHeader<T> {
PalletStorage::<T>::new().best_finalized_header().header
}
Expand All @@ -371,27 +375,92 @@ impl<T: Config> bp_header_chain::HeaderChain<BridgedHeader<T>> for Module<T> {
PalletStorage::<T>::new().current_authority_set()
}

fn import_header(header: BridgedHeader<T>) -> Result<(), ()> {
let mut verifier = verifier::Verifier {
storage: PalletStorage::<T>::new(),
};
fn append_finalized_chain(
headers: impl IntoIterator<Item = BridgedHeader<T>>,
) -> Result<(), sp_runtime::DispatchError> {
let mut storage = PalletStorage::<T>::new();

let mut header_iter = headers.into_iter().peekable();
let first_header = header_iter.peek().ok_or(Error::<T>::NotDescendant)?;

// Quick ancestry check to make sure we're not writing complete nonsense to storage
ensure!(
<BestFinalized<T>>::get() == *first_header.parent_hash(),
Error::<T>::NotDescendant,
);

let _ = verifier.import_header(header.hash(), header).map_err(|_| ())?;
for header in header_iter {
import_header_unchecked::<_, T>(&mut storage, header);
}

Ok(())
}
}

fn import_finality_proof(header: BridgedHeader<T>, finality_proof: Vec<u8>) -> Result<(), ()> {
let mut verifier = verifier::Verifier {
storage: PalletStorage::<T>::new(),
};
/// Import a finalized header without checking if this is true.
///
/// This function assumes that all the given header has already been proven to be valid and
/// finalized. Using this assumption it will write them to storage with minimal checks. That
/// means it's of great importance that this function *not* called with any headers whose
/// finality has not been checked, otherwise you risk bricking your bridge.
///
/// One thing this function does do for you is GRANDPA authority set handoffs. However, since it
/// does not do verification on the incoming header it will assume that the authority set change
/// signals in the digest are well formed.
fn import_header_unchecked<S, T>(storage: &mut S, header: BridgedHeader<T>)
where
S: BridgeStorage<Header = BridgedHeader<T>>,
T: Config,
{
// Since we want to use the existing storage infrastructure we need to indicate the fork
// that we're on. We will assume that since we are using the unchecked import there are no
// forks, and can indicate that by using the first imported header's "fork".
let dummy_fork_hash = <InitialHash<T>>::get();

// If we have a pending change in storage let's check if the current header enacts it.
let enact_change = if let Some(pending_change) = storage.scheduled_set_change(dummy_fork_hash) {
pending_change.height == *header.number()
} else {
// We don't have a scheduled change in storage at the moment. Let's check if the current
// header signals an authority set change.
if let Some(change) = verifier::find_scheduled_change(&header) {
let next_set = AuthoritySet {
authorities: change.next_authorities,
set_id: storage.current_authority_set().set_id + 1,
};

let _ = verifier
.import_finality_proof(header.hash(), finality_proof.into())
.map_err(|_| ())?;
let height = *header.number() + change.delay;
let scheduled_change = ScheduledChange {
authority_set: next_set,
height,
};

Ok(())
storage.schedule_next_set_change(dummy_fork_hash, scheduled_change);

// If the delay is 0 this header will enact the change it signaled
height == *header.number()
} else {
false
}
};

if enact_change {
const ENACT_SET_PROOF: &str = "We only set `enact_change` as `true` if we are sure that there is a scheduled
authority set change in storage. Therefore, it must exist.";

// If we are unable to enact an authority set it means our storage entry for scheduled
// changes is missing. Best to crash since this is likely a bug.
let _ = storage.enact_authority_set(dummy_fork_hash).expect(ENACT_SET_PROOF);
}

storage.update_best_finalized(header.hash());

storage.write_header(&ImportedHeader {
header,
requires_justification: false,
is_finalized: true,
signal_hash: None,
});
}

/// Ensure that the origin is either root, or `ModuleOwner`.
Expand Down Expand Up @@ -448,6 +517,7 @@ fn initialize_bridge<T: Config>(init_params: InitializationData<BridgedHeader<T>
<NextScheduledChange<T>>::insert(initial_hash, change);
};

<InitialHash<T>>::put(initial_hash);
<BestHeight<T>>::put(header.number());
<BestHeaders<T>>::put(vec![initial_hash]);
<BestFinalized<T>>::put(initial_hash);
Expand Down Expand Up @@ -659,7 +729,8 @@ impl<T: Config> BridgeStorage for PalletStorage<T> {
mod tests {
use super::*;
use crate::mock::{run_test, test_header, unfinalized_header, Origin, TestHeader, TestRuntime};
use bp_test_utils::authority_list;
use bp_header_chain::HeaderChain;
use bp_test_utils::{alice, authority_list, bob};
use frame_support::{assert_noop, assert_ok};
use sp_runtime::DispatchError;

Expand Down Expand Up @@ -845,4 +916,137 @@ mod tests {
);
});
}

#[test]
fn importing_unchecked_headers_works() {
run_test(|| {
init_with_origin(Origin::root()).unwrap();
let storage = PalletStorage::<TestRuntime>::new();

let child = test_header(2);
let header = test_header(3);

let header_chain = vec![child.clone(), header.clone()];
assert_ok!(Module::<TestRuntime>::append_finalized_chain(header_chain));

assert!(storage.header_by_hash(child.hash()).unwrap().is_finalized);
assert!(storage.header_by_hash(header.hash()).unwrap().is_finalized);

assert_eq!(storage.best_finalized_header().header, header);
assert_eq!(storage.best_headers()[0].hash, header.hash());
})
}

#[test]
fn prevents_unchecked_header_import_if_headers_are_unrelated() {
run_test(|| {
init_with_origin(Origin::root()).unwrap();

// Pallet is expecting test_header(2) as the child
let not_a_child = test_header(3);
let header_chain = vec![not_a_child];

assert_noop!(
Module::<TestRuntime>::append_finalized_chain(header_chain),
Error::<TestRuntime>::NotDescendant,
);
})
}

#[test]
fn importing_unchecked_headers_enacts_new_authority_set() {
run_test(|| {
init_with_origin(Origin::root()).unwrap();
let storage = PalletStorage::<TestRuntime>::new();

let next_set_id = 2;
let next_authorities = vec![(alice(), 1), (bob(), 1)];

// Need to update the header digest to indicate that our header signals an authority set
// change. The change will be enacted when we import our header.
let mut header = test_header(2);
header.digest = fork_tests::change_log(0);

// Let's import our test header
assert_ok!(Module::<TestRuntime>::append_finalized_chain(vec![header.clone()]));

// Make sure that our header is the best finalized
assert_eq!(storage.best_finalized_header().header, header);
assert_eq!(storage.best_headers()[0].hash, header.hash());

// Make sure that the authority set actually changed upon importing our header
assert_eq!(
storage.current_authority_set(),
AuthoritySet::new(next_authorities, next_set_id),
);
})
}

#[test]
fn importing_unchecked_headers_enacts_new_authority_set_from_old_header() {
run_test(|| {
init_with_origin(Origin::root()).unwrap();
let storage = PalletStorage::<TestRuntime>::new();

let next_set_id = 2;
let next_authorities = vec![(alice(), 1), (bob(), 1)];

// Need to update the header digest to indicate that our header signals an authority set
// change. However, the change doesn't happen until the next block.
let mut schedules_change = test_header(2);
schedules_change.digest = fork_tests::change_log(1);
let header = test_header(3);

// Let's import our test headers
let header_chain = vec![schedules_change, header.clone()];
assert_ok!(Module::<TestRuntime>::append_finalized_chain(header_chain));

// Make sure that our header is the best finalized
assert_eq!(storage.best_finalized_header().header, header);
assert_eq!(storage.best_headers()[0].hash, header.hash());

// Make sure that the authority set actually changed upon importing our header
assert_eq!(
storage.current_authority_set(),
AuthoritySet::new(next_authorities, next_set_id),
);
})
}

#[test]
fn importing_unchecked_header_can_enact_set_change_scheduled_at_genesis() {
run_test(|| {
let storage = PalletStorage::<TestRuntime>::new();

let next_authorities = vec![(alice(), 1)];
let next_set_id = 2;
let next_authority_set = AuthoritySet::new(next_authorities.clone(), next_set_id);

let first_scheduled_change = ScheduledChange {
authority_set: next_authority_set,
height: 2,
};

let init_data = InitializationData {
header: test_header(1),
authority_list: authority_list(),
set_id: 1,
scheduled_change: Some(first_scheduled_change),
is_halted: false,
};

assert_ok!(Module::<TestRuntime>::initialize(Origin::root(), init_data));

// We are expecting an authority set change at height 2, so this header should enact
// that upon being imported.
let header_chain = vec![test_header(2)];
assert_ok!(Module::<TestRuntime>::append_finalized_chain(header_chain));

// Make sure that the authority set actually changed upon importing our header
assert_eq!(
storage.current_authority_set(),
AuthoritySet::new(next_authorities, next_set_id),
);
})
}
}
10 changes: 7 additions & 3 deletions bridges/modules/substrate/src/verifier.rs
Original file line number Diff line number Diff line change
Expand Up @@ -242,9 +242,13 @@ where
&proof.0,
)
.map_err(|_| FinalizationError::InvalidJustification)?;
frame_support::debug::trace!(target: "sub-bridge", "Received valid justification for {:?}", header);
frame_support::debug::trace!("Received valid justification for {:?}", header);

frame_support::debug::trace!(target: "sub-bridge", "Checking ancestry for headers between {:?} and {:?}", last_finalized, header);
frame_support::debug::trace!(
"Checking ancestry for headers between {:?} and {:?}",
last_finalized,
header
);
let mut finalized_headers =
if let Some(ancestors) = headers_between(&self.storage, last_finalized, header.clone()) {
// Since we only try and finalize headers with a height strictly greater
Expand Down Expand Up @@ -335,7 +339,7 @@ where
Some(ancestors)
}

fn find_scheduled_change<H: HeaderT>(header: &H) -> Option<sp_finality_grandpa::ScheduledChange<H::Number>> {
pub(crate) fn find_scheduled_change<H: HeaderT>(header: &H) -> Option<sp_finality_grandpa::ScheduledChange<H::Number>> {
let id = OpaqueDigestItemId::Consensus(&GRANDPA_ENGINE_ID);

let filter_log = |log: ConsensusLog<H::Number>| match log {
Expand Down
Loading

0 comments on commit b921a48

Please sign in to comment.