diff --git a/Cargo.lock b/Cargo.lock index a424738685e7..3b4a5aa4a443 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -262,9 +262,9 @@ dependencies = [ [[package]] name = "anyhow" -version = "1.0.81" +version = "1.0.86" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0952808a6c2afd1aa8947271f3a60f1a6763c7b912d210184c5149b5cf147247" +checksum = "b3d1d046238990b9cf5bcde22a3fb3584ee5cf65fb2765f454ed428c7a0063da" [[package]] name = "approx" @@ -4149,6 +4149,24 @@ dependencies = [ "staging-xcm", ] +[[package]] +name = "cumulus-pov-validator" +version = "0.1.0" +dependencies = [ + "anyhow", + "clap 4.5.9", + "parity-scale-codec", + "polkadot-node-primitives", + "polkadot-parachain-primitives", + "polkadot-primitives", + "sc-executor", + "sp-core", + "sp-io", + "sp-maybe-compressed-blob", + "tracing", + "tracing-subscriber 0.3.18", +] + [[package]] name = "cumulus-primitives-aura" version = "0.7.0" diff --git a/Cargo.toml b/Cargo.toml index 759780ce4a4c..8d1beb8d33db 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -61,6 +61,7 @@ members = [ "bridges/snowbridge/primitives/router", "bridges/snowbridge/runtime/runtime-common", "bridges/snowbridge/runtime/test-common", + "cumulus/bin/pov-validator", "cumulus/client/cli", "cumulus/client/collator", "cumulus/client/consensus/aura", diff --git a/cumulus/bin/pov-validator/Cargo.toml b/cumulus/bin/pov-validator/Cargo.toml new file mode 100644 index 000000000000..9be92960ad77 --- /dev/null +++ b/cumulus/bin/pov-validator/Cargo.toml @@ -0,0 +1,26 @@ +[package] +name = "cumulus-pov-validator" +version = "0.1.0" +authors.workspace = true +edition.workspace = true +repository.workspace = true +license.workspace = true +homepage.workspace = true +description = "A tool for validating PoVs locally" + +[dependencies] +codec.workspace = true +clap = { workspace = true, features = ["derive"] } +sc-executor.workspace = true +sp-io.workspace = true +sp-core.workspace = true +sp-maybe-compressed-blob.workspace = true +polkadot-node-primitives.workspace = true +polkadot-parachain-primitives.workspace = true +polkadot-primitives.workspace = true +anyhow.workspace = true +tracing.workspace = true +tracing-subscriber.workspace = true + +[lints] +workspace = true diff --git a/cumulus/bin/pov-validator/src/main.rs b/cumulus/bin/pov-validator/src/main.rs new file mode 100644 index 000000000000..1c08f218f6b8 --- /dev/null +++ b/cumulus/bin/pov-validator/src/main.rs @@ -0,0 +1,154 @@ +// This file is part of Cumulus. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use clap::Parser; +use codec::{Decode, Encode}; +use polkadot_node_primitives::{BlockData, PoV, POV_BOMB_LIMIT, VALIDATION_CODE_BOMB_LIMIT}; +use polkadot_parachain_primitives::primitives::ValidationParams; +use polkadot_primitives::{BlockNumber as RBlockNumber, Hash as RHash, HeadData}; +use sc_executor::WasmExecutor; +use sp_core::traits::{CallContext, CodeExecutor, RuntimeCode, WrappedRuntimeCode}; +use std::{fs, path::PathBuf, time::Instant}; +use tracing::level_filters::LevelFilter; + +/// Tool for validating a `PoV` locally. +#[derive(Parser)] +struct Cli { + /// The path to the validation code that should be used to validate the `PoV`. + /// + /// The validation code can either be downloaded from the relay chain that the parachain is + /// connected to or by building the runtime manually to obtain the WASM binary. + #[arg(long)] + validation_code: PathBuf, + + /// The path to the `PoV` to validate. + /// + /// The `PoV`'s can be obtained by running `polkadot-parachains --collator --chain YOUR_CHAIN + /// --export-pov-to-path PATH_TO_EXPORT` and then choose one of the exported `PoV`'s. + #[arg(long)] + pov: PathBuf, +} + +fn main() -> anyhow::Result<()> { + let _ = tracing_subscriber::fmt() + .with_env_filter( + tracing_subscriber::EnvFilter::from_default_env() + .add_directive(LevelFilter::INFO.into()), + ) + .with_writer(std::io::stderr) + .try_init(); + + let cli = Cli::parse(); + + let validation_code = fs::read(&cli.validation_code).map_err(|error| { + tracing::error!(%error, path = %cli.validation_code.display(), "Failed to read validation code"); + anyhow::anyhow!("Failed to read validation code") + })?; + + let validation_code = + sp_maybe_compressed_blob::decompress(&validation_code, VALIDATION_CODE_BOMB_LIMIT) + .map_err(|error| { + tracing::error!(%error, "Failed to decompress validation code"); + anyhow::anyhow!("Failed to decompress validation code") + })?; + + let pov_file = fs::read(&cli.pov).map_err(|error| { + tracing::error!(%error, path = %cli.pov.display(), "Failed to read PoV"); + anyhow::anyhow!("Failed to read PoV") + })?; + + let executor = WasmExecutor::::builder() + .with_allow_missing_host_functions(true) + .build(); + + let runtime_code = RuntimeCode { + code_fetcher: &WrappedRuntimeCode(validation_code.into()), + heap_pages: None, + // The hash is used for caching, which we need here, but we only use one wasm file. So, the + // actual hash is not that important. + hash: vec![1, 2, 3], + }; + + // We are calling `Core_version` to get the wasm file compiled. We don't care about the result. + let _ = executor + .call( + &mut sp_io::TestExternalities::default().ext(), + &runtime_code, + "Core_version", + &[], + CallContext::Offchain, + ) + .0; + + let pov_file_ptr = &mut &pov_file[..]; + let pov = PoV::decode(pov_file_ptr).map_err(|error| { + tracing::error!(%error, "Failed to decode `PoV`"); + anyhow::anyhow!("Failed to decode `PoV`") + })?; + let head_data = HeadData::decode(pov_file_ptr).map_err(|error| { + tracing::error!(%error, "Failed to `HeadData`"); + anyhow::anyhow!("Failed to decode `HeadData`") + })?; + let relay_parent_storage_root = RHash::decode(pov_file_ptr).map_err(|error| { + tracing::error!(%error, "Failed to relay storage root"); + anyhow::anyhow!("Failed to decode relay storage root") + })?; + let relay_parent_number = RBlockNumber::decode(pov_file_ptr).map_err(|error| { + tracing::error!(%error, "Failed to relay block number"); + anyhow::anyhow!("Failed to decode relay block number") + })?; + + let pov = sp_maybe_compressed_blob::decompress(&pov.block_data.0, POV_BOMB_LIMIT).map_err( + |error| { + tracing::error!(%error, "Failed to decompress `PoV`"); + anyhow::anyhow!("Failed to decompress `PoV`") + }, + )?; + + let validation_params = ValidationParams { + relay_parent_number, + relay_parent_storage_root, + parent_head: head_data, + block_data: BlockData(pov.into()), + }; + + tracing::info!("Starting validation"); + + let start = Instant::now(); + + let res = executor + .call( + &mut sp_io::TestExternalities::default().ext(), + &runtime_code, + "validate_block", + &validation_params.encode(), + CallContext::Offchain, + ) + .0; + + let duration = start.elapsed(); + + match res { + Ok(_) => tracing::info!("Validation was successful"), + Err(error) => tracing::error!(%error, "Validation failed"), + } + + tracing::info!("Validation took {}ms", duration.as_millis()); + + Ok(()) +} diff --git a/cumulus/client/consensus/aura/src/collators/lookahead.rs b/cumulus/client/consensus/aura/src/collators/lookahead.rs index 749b13112394..02d60538a732 100644 --- a/cumulus/client/consensus/aura/src/collators/lookahead.rs +++ b/cumulus/client/consensus/aura/src/collators/lookahead.rs @@ -39,10 +39,13 @@ use cumulus_primitives_aura::AuraUnincludedSegmentApi; use cumulus_primitives_core::{CollectCollationInfo, PersistedValidationData}; use cumulus_relay_chain_interface::RelayChainInterface; -use polkadot_node_primitives::SubmitCollationParams; +use polkadot_node_primitives::{PoV, SubmitCollationParams}; use polkadot_node_subsystem::messages::CollationGenerationMessage; use polkadot_overseer::Handle as OverseerHandle; -use polkadot_primitives::{CollatorPair, Id as ParaId, OccupiedCoreAssumption}; +use polkadot_primitives::{ + BlockNumber as RBlockNumber, CollatorPair, Hash as RHash, HeadData, Id as ParaId, + OccupiedCoreAssumption, +}; use futures::prelude::*; use sc_client_api::{backend::AuxStore, BlockBackend, BlockOf}; @@ -54,10 +57,49 @@ use sp_consensus_aura::{AuraApi, Slot}; use sp_core::crypto::Pair; use sp_inherents::CreateInherentDataProviders; use sp_keystore::KeystorePtr; -use sp_runtime::traits::{Block as BlockT, Header as HeaderT, Member}; -use std::{sync::Arc, time::Duration}; +use sp_runtime::traits::{Block as BlockT, Header as HeaderT, Member, NumberFor}; +use std::{ + fs::{self, File}, + path::PathBuf, + sync::Arc, + time::Duration, +}; -use crate::collator::{self as collator_util}; +use crate::{collator as collator_util, LOG_TARGET}; + +/// Export the given `pov` to the file system at `path`. +/// +/// The file will be named `block_hash_block_number.pov`. +/// +/// The `parent_header`, `relay_parent_storage_root` and `relay_parent_number` will also be +/// stored in the file alongside the `pov`. This enables stateless validation of the `pov`. +fn export_pov_to_path( + path: PathBuf, + pov: PoV, + block_hash: Block::Hash, + block_number: NumberFor, + parent_header: Block::Header, + relay_parent_storage_root: RHash, + relay_parent_number: RBlockNumber, +) { + if let Err(error) = fs::create_dir_all(&path) { + tracing::error!(target: LOG_TARGET, %error, path = %path.display(), "Failed to create PoV export directory"); + return + } + + let mut file = match File::create(path.join(format!("{block_hash:?}_{block_number}.pov"))) { + Ok(f) => f, + Err(error) => { + tracing::error!(target: LOG_TARGET, %error, "Failed to export PoV."); + return + }, + }; + + pov.encode_to(&mut file); + HeadData(parent_header.encode()).encode_to(&mut file); + relay_parent_storage_root.encode_to(&mut file); + relay_parent_number.encode_to(&mut file); +} /// Parameters for [`run`]. pub struct Params { @@ -97,7 +139,58 @@ pub struct Params { /// Run async-backing-friendly Aura. pub fn run( - mut params: Params, + params: Params, +) -> impl Future + Send + 'static +where + Block: BlockT, + Client: ProvideRuntimeApi + + BlockOf + + AuxStore + + HeaderBackend + + BlockBackend + + Send + + Sync + + 'static, + Client::Api: + AuraApi + CollectCollationInfo + AuraUnincludedSegmentApi, + Backend: sc_client_api::Backend + 'static, + RClient: RelayChainInterface + Clone + 'static, + CIDP: CreateInherentDataProviders + 'static, + CIDP::InherentDataProviders: Send, + BI: BlockImport + ParachainBlockImportMarker + Send + Sync + 'static, + Proposer: ProposerInterface + Send + Sync + 'static, + CS: CollatorServiceInterface + Send + Sync + 'static, + CHP: consensus_common::ValidationCodeHashProvider + Send + 'static, + P: Pair, + P::Public: AppPublic + Member + Codec, + P::Signature: TryFrom> + Member + Codec, +{ + run_with_export::<_, P, _, _, _, _, _, _, _, _>(ParamsWithExport { params, export_pov: None }) +} + +/// Parameters for [`run_with_export`]. +pub struct ParamsWithExport { + /// The parameters. + pub params: Params, + /// When set, the collator will export every produced `POV` to this folder. + pub export_pov: Option, +} + +/// Run async-backing-friendly Aura. +/// +/// This is exactly the same as [`run`], but it supports the optional export of each produced `POV` +/// to the file system. +pub fn run_with_export( + ParamsWithExport { mut params, export_pov }: ParamsWithExport< + BI, + CIDP, + Client, + Backend, + RClient, + CHP, + Proposer, + CS, + >, ) -> impl Future + Send + 'static where Block: BlockT, @@ -339,6 +432,18 @@ where // and provides sybil-resistance, as it should. collator.collator_service().announce_block(new_block_hash, None); + if let Some(ref export_pov) = export_pov { + export_pov_to_path::( + export_pov.clone(), + collation.proof_of_validity.clone().into_compressed(), + new_block_hash, + *block_data.header().number(), + parent_header.clone(), + *relay_parent_header.state_root(), + *relay_parent_header.number(), + ); + } + // Send a submit-collation message to the collation generation subsystem, // which then distributes this to validators. // diff --git a/cumulus/polkadot-parachain/src/cli.rs b/cumulus/polkadot-parachain/src/cli.rs index d06354dda220..a5fe33dffc96 100644 --- a/cumulus/polkadot-parachain/src/cli.rs +++ b/cumulus/polkadot-parachain/src/cli.rs @@ -90,6 +90,13 @@ pub struct Cli { #[arg(long)] pub no_hardware_benchmarks: bool, + /// Export all `PoVs` build by this collator to the given folder. + /// + /// This is useful for debugging issues that are occurring while validating these `PoVs` on the + /// relay chain. + #[arg(long)] + pub export_pov_to_path: Option, + /// Relay chain arguments #[arg(raw = true)] pub relay_chain_args: Vec, @@ -97,7 +104,10 @@ pub struct Cli { impl Cli { pub(crate) fn node_extra_args(&self) -> NodeExtraArgs { - NodeExtraArgs { use_slot_based_consensus: self.experimental_use_slot_based } + NodeExtraArgs { + use_slot_based_consensus: self.experimental_use_slot_based, + export_pov: self.export_pov_to_path.clone(), + } } } diff --git a/cumulus/polkadot-parachain/src/command.rs b/cumulus/polkadot-parachain/src/command.rs index fcf6c06f4222..e867a41bee2b 100644 --- a/cumulus/polkadot-parachain/src/command.rs +++ b/cumulus/polkadot-parachain/src/command.rs @@ -17,8 +17,7 @@ #[cfg(feature = "runtime-benchmarks")] use crate::service::Block; use crate::{ - chain_spec, - chain_spec::GenericChainSpec, + chain_spec::{self, GenericChainSpec}, cli::{Cli, RelayChainCli, Subcommand}, common::NodeExtraArgs, fake_runtime_api::{ @@ -388,7 +387,7 @@ impl SubstrateCli for RelayChainCli { fn new_node_spec( config: &sc_service::Configuration, - extra_args: NodeExtraArgs, + extra_args: &NodeExtraArgs, ) -> std::result::Result, sc_cli::Error> { Ok(match config.chain_spec.runtime()? { Runtime::AssetHubPolkadot => @@ -421,35 +420,35 @@ pub fn run() -> Result<()> { Some(Subcommand::CheckBlock(cmd)) => { let runner = cli.create_runner(cmd)?; runner.async_run(|config| { - let node = new_node_spec(&config, cli.node_extra_args())?; + let node = new_node_spec(&config, &cli.node_extra_args())?; node.prepare_check_block_cmd(config, cmd) }) }, Some(Subcommand::ExportBlocks(cmd)) => { let runner = cli.create_runner(cmd)?; runner.async_run(|config| { - let node = new_node_spec(&config, cli.node_extra_args())?; + let node = new_node_spec(&config, &cli.node_extra_args())?; node.prepare_export_blocks_cmd(config, cmd) }) }, Some(Subcommand::ExportState(cmd)) => { let runner = cli.create_runner(cmd)?; runner.async_run(|config| { - let node = new_node_spec(&config, cli.node_extra_args())?; + let node = new_node_spec(&config, &cli.node_extra_args())?; node.prepare_export_state_cmd(config, cmd) }) }, Some(Subcommand::ImportBlocks(cmd)) => { let runner = cli.create_runner(cmd)?; runner.async_run(|config| { - let node = new_node_spec(&config, cli.node_extra_args())?; + let node = new_node_spec(&config, &cli.node_extra_args())?; node.prepare_import_blocks_cmd(config, cmd) }) }, Some(Subcommand::Revert(cmd)) => { let runner = cli.create_runner(cmd)?; runner.async_run(|config| { - let node = new_node_spec(&config, cli.node_extra_args())?; + let node = new_node_spec(&config, &cli.node_extra_args())?; node.prepare_revert_cmd(config, cmd) }) }, @@ -471,7 +470,7 @@ pub fn run() -> Result<()> { Some(Subcommand::ExportGenesisHead(cmd)) => { let runner = cli.create_runner(cmd)?; runner.sync_run(|config| { - let node = new_node_spec(&config, cli.node_extra_args())?; + let node = new_node_spec(&config, &cli.node_extra_args())?; node.run_export_genesis_head_cmd(config, cmd) }) }, @@ -494,12 +493,12 @@ pub fn run() -> Result<()> { )) }), BenchmarkCmd::Block(cmd) => runner.sync_run(|config| { - let node = new_node_spec(&config, cli.node_extra_args())?; + let node = new_node_spec(&config, &cli.node_extra_args())?; node.run_benchmark_block_cmd(config, cmd) }), #[cfg(feature = "runtime-benchmarks")] BenchmarkCmd::Storage(cmd) => runner.sync_run(|config| { - let node = new_node_spec(&config, cli.node_extra_args())?; + let node = new_node_spec(&config, &cli.node_extra_args())?; node.run_benchmark_storage_cmd(config, cmd) }), BenchmarkCmd::Machine(cmd) => @@ -607,9 +606,9 @@ async fn start_node( extra_args: NodeExtraArgs, hwbench: Option, ) -> Result { - let node_spec = new_node_spec(&config, extra_args)?; + let node_spec = new_node_spec(&config, &extra_args)?; node_spec - .start_node(config, polkadot_config, collator_options, id, hwbench) + .start_node(config, polkadot_config, collator_options, id, hwbench, extra_args) .await .map_err(Into::into) } diff --git a/cumulus/polkadot-parachain/src/common/mod.rs b/cumulus/polkadot-parachain/src/common/mod.rs index 9f5febafe304..d7718931b872 100644 --- a/cumulus/polkadot-parachain/src/common/mod.rs +++ b/cumulus/polkadot-parachain/src/common/mod.rs @@ -26,6 +26,7 @@ use sp_block_builder::BlockBuilder; use sp_runtime::traits::Block as BlockT; use sp_session::SessionKeys; use sp_transaction_pool::runtime_api::TaggedTransactionQueue; +use std::path::PathBuf; /// Convenience trait that defines the basic bounds for the `RuntimeApi` of a parachain node. pub trait NodeRuntimeApi: @@ -69,4 +70,7 @@ where /// Extra args that are passed when creating a new node spec. pub struct NodeExtraArgs { pub use_slot_based_consensus: bool, + + /// If set, each `PoV` build by the node will be exported to this folder. + pub export_pov: Option, } diff --git a/cumulus/polkadot-parachain/src/service.rs b/cumulus/polkadot-parachain/src/service.rs index 6a6cf15635e0..ef01f7f1f6a6 100644 --- a/cumulus/polkadot-parachain/src/service.rs +++ b/cumulus/polkadot-parachain/src/service.rs @@ -125,6 +125,7 @@ where overseer_handle: OverseerHandle, announce_block: Arc>) + Send + Sync>, backend: Arc, + node_extra_args: NodeExtraArgs, ) -> Result<(), sc_service::Error>; } @@ -226,6 +227,7 @@ pub(crate) trait NodeSpec { collator_options: CollatorOptions, para_id: ParaId, hwbench: Option, + node_extra_args: NodeExtraArgs, ) -> Pin>>> where Net: NetworkBackend, @@ -361,6 +363,7 @@ pub(crate) trait NodeSpec { overseer_handle, announce_block, backend.clone(), + node_extra_args, )?; } @@ -524,7 +527,7 @@ where const SYBIL_RESISTANCE: CollatorSybilResistance = CollatorSybilResistance::Resistant; } -pub fn new_aura_node_spec(extra_args: NodeExtraArgs) -> Box +pub fn new_aura_node_spec(extra_args: &NodeExtraArgs) -> Box where RuntimeApi: ConstructNodeRuntimeApi>, RuntimeApi::RuntimeApi: AuraRuntimeApi @@ -567,6 +570,7 @@ impl StartConsensus for StartRelayChainConsensus { overseer_handle: OverseerHandle, announce_block: Arc>) + Send + Sync>, _backend: Arc, + _node_extra_args: NodeExtraArgs, ) -> Result<(), Error> { let proposer_factory = sc_basic_authorship::ProposerFactory::with_proof_recording( task_manager.spawn_handle(), @@ -691,6 +695,7 @@ where _overseer_handle: OverseerHandle, announce_block: Arc>) + Send + Sync>, backend: Arc, + _node_extra_args: NodeExtraArgs, ) -> Result<(), Error> { let proposer_factory = sc_basic_authorship::ProposerFactory::with_proof_recording( task_manager.spawn_handle(), @@ -786,6 +791,7 @@ where overseer_handle: OverseerHandle, announce_block: Arc>) + Send + Sync>, backend: Arc, + node_extra_args: NodeExtraArgs, ) -> Result<(), Error> { let proposer_factory = sc_basic_authorship::ProposerFactory::with_proof_recording( task_manager.spawn_handle(), @@ -802,33 +808,37 @@ where client.clone(), ); - let params = AuraParams { - create_inherent_data_providers: move |_, ()| async move { Ok(()) }, - block_import, - para_client: client.clone(), - para_backend: backend, - relay_client: relay_chain_interface, - code_hash_provider: { - let client = client.clone(); - move |block_hash| { - client.code_at(block_hash).ok().map(|c| ValidationCode::from(c).hash()) - } + let params = aura::ParamsWithExport { + export_pov: node_extra_args.export_pov, + params: AuraParams { + create_inherent_data_providers: move |_, ()| async move { Ok(()) }, + block_import, + para_client: client.clone(), + para_backend: backend, + relay_client: relay_chain_interface, + code_hash_provider: { + let client = client.clone(); + move |block_hash| { + client.code_at(block_hash).ok().map(|c| ValidationCode::from(c).hash()) + } + }, + keystore, + collator_key, + para_id, + overseer_handle, + relay_chain_slot_duration, + proposer: Proposer::new(proposer_factory), + collator_service, + authoring_duration: Duration::from_millis(1500), + reinitialize: false, }, - keystore, - collator_key, - para_id, - overseer_handle, - relay_chain_slot_duration, - proposer: Proposer::new(proposer_factory), - collator_service, - authoring_duration: Duration::from_millis(1500), - reinitialize: false, }; - let fut = async move { - wait_for_aura(client).await; - aura::run::::Pair, _, _, _, _, _, _, _, _>(params).await; - }; + let fut = + async move { + wait_for_aura(client).await; + aura::run_with_export::::Pair, _, _, _, _, _, _, _, _>(params).await; + }; task_manager.spawn_essential_handle().spawn("aura", None, fut); Ok(()) @@ -910,6 +920,7 @@ pub(crate) trait DynNodeSpec { collator_options: CollatorOptions, para_id: ParaId, hwbench: Option, + node_extra_args: NodeExtraArgs, ) -> Pin>>>; } @@ -1000,6 +1011,7 @@ where collator_options: CollatorOptions, para_id: ParaId, hwbench: Option, + node_extra_args: NodeExtraArgs, ) -> Pin>>> { match parachain_config.network.network_backend { sc_network::config::NetworkBackendType::Libp2p => @@ -1009,6 +1021,7 @@ where collator_options, para_id, hwbench, + node_extra_args, ), sc_network::config::NetworkBackendType::Litep2p => ::start_node::( @@ -1017,6 +1030,7 @@ where collator_options, para_id, hwbench, + node_extra_args, ), } } diff --git a/prdoc/pr_4640.prdoc b/prdoc/pr_4640.prdoc new file mode 100644 index 000000000000..52abc8f4baa5 --- /dev/null +++ b/prdoc/pr_4640.prdoc @@ -0,0 +1,20 @@ +title: Introduce tool for validating PoVs locally + +doc: + - audience: + - Runtime Dev + - Node Dev + description: | + Introduces the `cumulus-pov-validator` for running PoVs locally. This can be helpful for debugging issues that are + only happening when the PoV gets validated on the relay chain or for example to profile the validation code. + Besides that the `polkadot-parachain` was extended with the CLI flag `--export-pov-to-path` to let a collator export + all its build PoV's to the given directory. These PoV's can then be feed into the `cumulus-pov-validator`. + +crates: + - name: polkadot-parachain-bin + bump: minor + - name: cumulus-client-consensus-aura + bump: minor + - name: cumulus-pov-validator + bump: patch + validate: false