,
- /// Transaction pool instance.
- pub pool: Arc,
- /// Whether to deny unsafe calls
- pub deny_unsafe: DenyUnsafe,
+pub(crate) trait BuildRpcExtensions {
+ fn build_rpc_extensions(
+ deny_unsafe: DenyUnsafe,
+ client: Arc,
+ backend: Arc,
+ pool: Arc,
+ ) -> sc_service::error::Result;
}
-/// Instantiate all RPC extensions.
-pub fn create_full(
- deps: FullDeps,
- backend: Arc,
-) -> Result>
+pub(crate) struct BuildEmptyRpcExtensions(PhantomData);
+
+impl
+ BuildRpcExtensions<
+ ParachainClient,
+ ParachainBackend,
+ sc_transaction_pool::FullPool>,
+ > for BuildEmptyRpcExtensions
where
- C: ProvideRuntimeApi
- + HeaderBackend
- + AuxStore
- + HeaderMetadata
- + Send
- + Sync
- + 'static,
- C::Api: substrate_frame_rpc_system::AccountNonceApi,
- C::Api: pallet_transaction_payment_rpc::TransactionPaymentRuntimeApi,
- C::Api: BlockBuilder,
- P: TransactionPool + Sync + Send + 'static,
- B: sc_client_api::Backend + Send + Sync + 'static,
- B::State: sc_client_api::backend::StateBackend>,
+ RuntimeApi: ConstructNodeRuntimeApi> + Send + Sync + 'static,
{
- use pallet_transaction_payment_rpc::{TransactionPayment, TransactionPaymentApiServer};
- use substrate_frame_rpc_system::{System, SystemApiServer};
- use substrate_state_trie_migration_rpc::{StateMigration, StateMigrationApiServer};
-
- let mut module = RpcExtension::new(());
- let FullDeps { client, pool, deny_unsafe } = deps;
-
- module.merge(System::new(client.clone(), pool, deny_unsafe).into_rpc())?;
- module.merge(TransactionPayment::new(client.clone()).into_rpc())?;
- module.merge(StateMigration::new(client, backend, deny_unsafe).into_rpc())?;
-
- Ok(module)
+ fn build_rpc_extensions(
+ _deny_unsafe: DenyUnsafe,
+ _client: Arc>,
+ _backend: Arc,
+ _pool: Arc>>,
+ ) -> sc_service::error::Result {
+ Ok(RpcExtension::new(()))
+ }
}
-/// Instantiate all RPCs we want at the contracts-rococo chain.
-pub fn create_contracts_rococo(
- deps: FullDeps,
-) -> Result>
+pub(crate) struct BuildParachainRpcExtensions(PhantomData);
+
+impl
+ BuildRpcExtensions<
+ ParachainClient,
+ ParachainBackend,
+ sc_transaction_pool::FullPool>,
+ > for BuildParachainRpcExtensions
where
- C: ProvideRuntimeApi
- + sc_client_api::BlockBackend
- + HeaderBackend
- + AuxStore
- + HeaderMetadata
- + Send
- + Sync
- + 'static,
- C::Api: substrate_frame_rpc_system::AccountNonceApi,
- C::Api: pallet_transaction_payment_rpc::TransactionPaymentRuntimeApi,
- C::Api: BlockBuilder,
- P: TransactionPool + Sync + Send + 'static,
+ RuntimeApi: ConstructNodeRuntimeApi> + Send + Sync + 'static,
+ RuntimeApi::RuntimeApi: pallet_transaction_payment_rpc::TransactionPaymentRuntimeApi
+ + substrate_frame_rpc_system::AccountNonceApi,
{
- use pallet_transaction_payment_rpc::{TransactionPayment, TransactionPaymentApiServer};
- use sc_rpc::dev::{Dev, DevApiServer};
- use substrate_frame_rpc_system::{System, SystemApiServer};
-
- let mut module = RpcExtension::new(());
- let FullDeps { client, pool, deny_unsafe } = deps;
-
- module.merge(System::new(client.clone(), pool, deny_unsafe).into_rpc())?;
- module.merge(TransactionPayment::new(client.clone()).into_rpc())?;
- module.merge(Dev::new(client, deny_unsafe).into_rpc())?;
-
- Ok(module)
+ fn build_rpc_extensions(
+ deny_unsafe: DenyUnsafe,
+ client: Arc>,
+ backend: Arc,
+ pool: Arc>>,
+ ) -> sc_service::error::Result {
+ let build = || -> Result> {
+ let mut module = RpcExtension::new(());
+
+ module.merge(System::new(client.clone(), pool, deny_unsafe).into_rpc())?;
+ module.merge(TransactionPayment::new(client.clone()).into_rpc())?;
+ module.merge(StateMigration::new(client.clone(), backend, deny_unsafe).into_rpc())?;
+ module.merge(Dev::new(client, deny_unsafe).into_rpc())?;
+
+ Ok(module)
+ };
+ build().map_err(Into::into)
+ }
}
diff --git a/cumulus/polkadot-parachain/src/service.rs b/cumulus/polkadot-parachain/src/service.rs
index 0f2aed8ee4d8..f5f6189d1f0d 100644
--- a/cumulus/polkadot-parachain/src/service.rs
+++ b/cumulus/polkadot-parachain/src/service.rs
@@ -14,7 +14,7 @@
// You should have received a copy of the GNU General Public License
// along with Cumulus. If not, see .
-use cumulus_client_cli::CollatorOptions;
+use cumulus_client_cli::{CollatorOptions, ExportGenesisHeadCommand};
use cumulus_client_collator::service::CollatorService;
use cumulus_client_consensus_aura::collators::{
lookahead::{self as aura, Params as AuraParams},
@@ -22,6 +22,7 @@ use cumulus_client_consensus_aura::collators::{
};
use cumulus_client_consensus_common::ParachainBlockImport as TParachainBlockImport;
use cumulus_client_consensus_proposer::Proposer;
+use cumulus_client_consensus_relay_chain::Verifier as RelayChainVerifier;
#[allow(deprecated)]
use cumulus_client_service::old_consensus;
use cumulus_client_service::{
@@ -30,39 +31,40 @@ use cumulus_client_service::{
};
use cumulus_primitives_core::{relay_chain::ValidationCode, ParaId};
use cumulus_relay_chain_interface::{OverseerHandle, RelayChainInterface};
-use sc_rpc::DenyUnsafe;
-
-use jsonrpsee::RpcModule;
use crate::{
common::{
aura::{AuraIdT, AuraRuntimeApi},
- ConstructNodeRuntimeApi,
+ ConstructNodeRuntimeApi, NodeExtraArgs,
},
fake_runtime_api::aura::RuntimeApi as FakeRuntimeApi,
- rpc,
+ rpc::BuildRpcExtensions,
};
-pub use parachains_common::{AccountId, AuraId, Balance, Block, Hash, Nonce};
+pub use parachains_common::{AccountId, Balance, Block, Hash, Nonce};
-use cumulus_client_consensus_relay_chain::Verifier as RelayChainVerifier;
+use crate::rpc::{BuildEmptyRpcExtensions, BuildParachainRpcExtensions};
+use frame_benchmarking_cli::BlockCmd;
+#[cfg(any(feature = "runtime-benchmarks"))]
+use frame_benchmarking_cli::StorageCmd;
use futures::prelude::*;
+use polkadot_primitives::CollatorPair;
use prometheus_endpoint::Registry;
+use sc_cli::{CheckBlockCmd, ExportBlocksCmd, ExportStateCmd, ImportBlocksCmd, RevertCmd};
use sc_client_api::BlockchainEvents;
use sc_consensus::{
import_queue::{BasicQueue, Verifier as VerifierT},
- BlockImportParams, ImportQueue,
+ BlockImportParams, DefaultImportQueue, ImportQueue,
};
use sc_executor::{HeapAllocStrategy, WasmExecutor, DEFAULT_HEAP_ALLOC_STRATEGY};
use sc_network::{config::FullNetworkConfiguration, service::traits::NetworkBackend, NetworkBlock};
-use sc_service::{Configuration, PartialComponents, TFullBackend, TFullClient, TaskManager};
+use sc_service::{Configuration, Error, PartialComponents, TFullBackend, TFullClient, TaskManager};
+use sc_sysinfo::HwBench;
use sc_telemetry::{Telemetry, TelemetryHandle, TelemetryWorker, TelemetryWorkerHandle};
-use sp_api::{ApiExt, ConstructRuntimeApi, ProvideRuntimeApi};
-use sp_consensus_aura::AuraApi;
+use sc_transaction_pool::FullPool;
+use sp_api::ProvideRuntimeApi;
use sp_keystore::KeystorePtr;
use sp_runtime::{app_crypto::AppCrypto, traits::Header as HeaderT};
-use std::{marker::PhantomData, sync::Arc, time::Duration};
-
-use polkadot_primitives::CollatorPair;
+use std::{marker::PhantomData, pin::Pin, sync::Arc, time::Duration};
#[cfg(not(feature = "runtime-benchmarks"))]
type HostFunctions = cumulus_client_service::ParachainHostFunctions;
@@ -73,9 +75,9 @@ type HostFunctions = (
frame_benchmarking::benchmarking::HostFunctions,
);
-type ParachainClient = TFullClient>;
+pub type ParachainClient = TFullClient>;
-type ParachainBackend = TFullBackend;
+pub type ParachainBackend = TFullBackend;
type ParachainBlockImport =
TParachainBlockImport>, ParachainBackend>;
@@ -90,413 +92,312 @@ pub type Service = PartialComponents<
(ParachainBlockImport, Option, Option),
>;
-/// Starts a `ServiceBuilder` for a full service.
-///
-/// Use this macro if you don't actually need the full service, but just the builder in order to
-/// be able to perform chain operations.
-pub fn new_partial(
- config: &Configuration,
- build_import_queue: BIQ,
-) -> Result, sc_service::Error>
-where
- RuntimeApi: ConstructNodeRuntimeApi>,
- BIQ: FnOnce(
- Arc>,
- ParachainBlockImport,
- &Configuration,
- Option,
- &TaskManager,
- ) -> Result, sc_service::Error>,
-{
- let telemetry = config
- .telemetry_endpoints
- .clone()
- .filter(|x| !x.is_empty())
- .map(|endpoints| -> Result<_, sc_telemetry::Error> {
- let worker = TelemetryWorker::new(16)?;
- let telemetry = worker.handle().new_telemetry(endpoints);
- Ok((worker, telemetry))
- })
- .transpose()?;
-
- let heap_pages = config
- .default_heap_pages
- .map_or(DEFAULT_HEAP_ALLOC_STRATEGY, |h| HeapAllocStrategy::Static { extra_pages: h as _ });
-
- let executor = sc_executor::WasmExecutor::::builder()
- .with_execution_method(config.wasm_method)
- .with_max_runtime_instances(config.max_runtime_instances)
- .with_runtime_cache_size(config.runtime_cache_size)
- .with_onchain_heap_alloc_strategy(heap_pages)
- .with_offchain_heap_alloc_strategy(heap_pages)
- .build();
-
- let (client, backend, keystore_container, task_manager) =
- sc_service::new_full_parts_record_import::(
- config,
- telemetry.as_ref().map(|(_, telemetry)| telemetry.handle()),
- executor,
- true,
- )?;
- let client = Arc::new(client);
-
- let telemetry_worker_handle = telemetry.as_ref().map(|(worker, _)| worker.handle());
-
- let telemetry = telemetry.map(|(worker, telemetry)| {
- task_manager.spawn_handle().spawn("telemetry", None, worker.run());
- telemetry
- });
-
- let transaction_pool = sc_transaction_pool::BasicPool::new_full(
- config.transaction_pool.clone(),
- config.role.is_authority().into(),
- config.prometheus_registry(),
- task_manager.spawn_essential_handle(),
- client.clone(),
- );
-
- let block_import = ParachainBlockImport::new(client.clone(), backend.clone());
-
- let import_queue = build_import_queue(
- client.clone(),
- block_import.clone(),
- config,
- telemetry.as_ref().map(|telemetry| telemetry.handle()),
- &task_manager,
- )?;
-
- Ok(PartialComponents {
- backend,
- client,
- import_queue,
- keystore_container,
- task_manager,
- transaction_pool,
- select_chain: (),
- other: (block_import, telemetry, telemetry_worker_handle),
- })
+pub(crate) trait BuildImportQueue {
+ fn build_import_queue(
+ client: Arc>,
+ block_import: ParachainBlockImport,
+ config: &Configuration,
+ telemetry_handle: Option,
+ task_manager: &TaskManager,
+ ) -> sc_service::error::Result>;
}
-/// Start a node with the given parachain `Configuration` and relay chain `Configuration`.
-///
-/// This is the actual implementation that is abstract over the executor and the runtime api.
-#[sc_tracing::logging::prefix_logs_with("Parachain")]
-async fn start_node_impl(
- parachain_config: Configuration,
- polkadot_config: Configuration,
- collator_options: CollatorOptions,
- sybil_resistance_level: CollatorSybilResistance,
- para_id: ParaId,
- rpc_ext_builder: RB,
- build_import_queue: BIQ,
- start_consensus: SC,
- hwbench: Option,
-) -> sc_service::error::Result<(TaskManager, Arc>)>
+pub(crate) trait StartConsensus
where
RuntimeApi: ConstructNodeRuntimeApi>,
- RB: Fn(
- DenyUnsafe,
- Arc>,
- Arc,
- Arc>>,
- ) -> Result, sc_service::Error>
- + 'static,
- BIQ: FnOnce(
- Arc>,
- ParachainBlockImport,
- &Configuration,
- Option,
- &TaskManager,
- ) -> Result, sc_service::Error>,
- SC: FnOnce(
- Arc>,
- ParachainBlockImport,
- Option<&Registry>,
- Option,
- &TaskManager,
- Arc,
- Arc>>,
- KeystorePtr,
- Duration,
- ParaId,
- CollatorPair,
- OverseerHandle,
- Arc>) + Send + Sync>,
- Arc,
- ) -> Result<(), sc_service::Error>,
- Net: NetworkBackend,
{
- let parachain_config = prepare_node_config(parachain_config);
-
- let params = new_partial::(¶chain_config, build_import_queue)?;
- let (block_import, mut telemetry, telemetry_worker_handle) = params.other;
-
- let client = params.client.clone();
- let backend = params.backend.clone();
-
- let mut task_manager = params.task_manager;
- let (relay_chain_interface, collator_key) = build_relay_chain_interface(
- polkadot_config,
- ¶chain_config,
- telemetry_worker_handle,
- &mut task_manager,
- collator_options.clone(),
- hwbench.clone(),
- )
- .await
- .map_err(|e| sc_service::Error::Application(Box::new(e) as Box<_>))?;
-
- let validator = parachain_config.role.is_authority();
- let prometheus_registry = parachain_config.prometheus_registry().cloned();
- let transaction_pool = params.transaction_pool.clone();
- let import_queue_service = params.import_queue.service();
- let net_config = FullNetworkConfiguration::<_, _, Net>::new(¶chain_config.network);
-
- let (network, system_rpc_tx, tx_handler_controller, start_network, sync_service) =
- build_network(BuildNetworkParams {
- parachain_config: ¶chain_config,
- net_config,
- client: client.clone(),
- transaction_pool: transaction_pool.clone(),
- para_id,
- spawn_handle: task_manager.spawn_handle(),
- relay_chain_interface: relay_chain_interface.clone(),
- import_queue: params.import_queue,
- sybil_resistance_level,
- })
- .await?;
-
- let rpc_builder = {
- let client = client.clone();
- let transaction_pool = transaction_pool.clone();
- let backend_for_rpc = backend.clone();
-
- Box::new(move |deny_unsafe, _| {
- rpc_ext_builder(
- deny_unsafe,
- client.clone(),
- backend_for_rpc.clone(),
- transaction_pool.clone(),
- )
- })
- };
-
- sc_service::spawn_tasks(sc_service::SpawnTasksParams {
- rpc_builder,
- client: client.clone(),
- transaction_pool: transaction_pool.clone(),
- task_manager: &mut task_manager,
- config: parachain_config,
- keystore: params.keystore_container.keystore(),
- backend: backend.clone(),
- network: network.clone(),
- sync_service: sync_service.clone(),
- system_rpc_tx,
- tx_handler_controller,
- telemetry: telemetry.as_mut(),
- })?;
-
- if let Some(hwbench) = hwbench {
- sc_sysinfo::print_hwbench(&hwbench);
- if validator {
- warn_if_slow_hardware(&hwbench);
- }
-
- if let Some(ref mut telemetry) = telemetry {
- let telemetry_handle = telemetry.handle();
- task_manager.spawn_handle().spawn(
- "telemetry_hwbench",
- None,
- sc_sysinfo::initialize_hwbench_telemetry(telemetry_handle, hwbench),
- );
- }
- }
-
- let announce_block = {
- let sync_service = sync_service.clone();
- Arc::new(move |hash, data| sync_service.announce_block(hash, data))
- };
+ fn start_consensus(
+ client: Arc>,
+ block_import: ParachainBlockImport,
+ prometheus_registry: Option<&Registry>,
+ telemetry: Option,
+ task_manager: &TaskManager,
+ relay_chain_interface: Arc,
+ transaction_pool: Arc>>,
+ keystore: KeystorePtr,
+ relay_chain_slot_duration: Duration,
+ para_id: ParaId,
+ collator_key: CollatorPair,
+ overseer_handle: OverseerHandle,
+ announce_block: Arc>) + Send + Sync>,
+ backend: Arc,
+ ) -> Result<(), sc_service::Error>;
+}
- let relay_chain_slot_duration = Duration::from_secs(6);
+pub(crate) trait NodeSpec {
+ type RuntimeApi: ConstructNodeRuntimeApi>;
+
+ type BuildImportQueue: BuildImportQueue + 'static;
+
+ type BuildRpcExtensions: BuildRpcExtensions<
+ ParachainClient,
+ ParachainBackend,
+ sc_transaction_pool::FullPool>,
+ > + 'static;
+
+ type StartConsensus: StartConsensus + 'static;
+
+ const SYBIL_RESISTANCE: CollatorSybilResistance;
+
+ /// Starts a `ServiceBuilder` for a full service.
+ ///
+ /// Use this macro if you don't actually need the full service, but just the builder in order to
+ /// be able to perform chain operations.
+ fn new_partial(config: &Configuration) -> sc_service::error::Result> {
+ let telemetry = config
+ .telemetry_endpoints
+ .clone()
+ .filter(|x| !x.is_empty())
+ .map(|endpoints| -> Result<_, sc_telemetry::Error> {
+ let worker = TelemetryWorker::new(16)?;
+ let telemetry = worker.handle().new_telemetry(endpoints);
+ Ok((worker, telemetry))
+ })
+ .transpose()?;
+
+ let heap_pages = config.default_heap_pages.map_or(DEFAULT_HEAP_ALLOC_STRATEGY, |h| {
+ HeapAllocStrategy::Static { extra_pages: h as _ }
+ });
+
+ let executor = sc_executor::WasmExecutor::::builder()
+ .with_execution_method(config.wasm_method)
+ .with_max_runtime_instances(config.max_runtime_instances)
+ .with_runtime_cache_size(config.runtime_cache_size)
+ .with_onchain_heap_alloc_strategy(heap_pages)
+ .with_offchain_heap_alloc_strategy(heap_pages)
+ .build();
+
+ let (client, backend, keystore_container, task_manager) =
+ sc_service::new_full_parts_record_import::(
+ config,
+ telemetry.as_ref().map(|(_, telemetry)| telemetry.handle()),
+ executor,
+ true,
+ )?;
+ let client = Arc::new(client);
+
+ let telemetry_worker_handle = telemetry.as_ref().map(|(worker, _)| worker.handle());
+
+ let telemetry = telemetry.map(|(worker, telemetry)| {
+ task_manager.spawn_handle().spawn("telemetry", None, worker.run());
+ telemetry
+ });
+
+ let transaction_pool = sc_transaction_pool::BasicPool::new_full(
+ config.transaction_pool.clone(),
+ config.role.is_authority().into(),
+ config.prometheus_registry(),
+ task_manager.spawn_essential_handle(),
+ client.clone(),
+ );
- let overseer_handle = relay_chain_interface
- .overseer_handle()
- .map_err(|e| sc_service::Error::Application(Box::new(e)))?;
+ let block_import = ParachainBlockImport::new(client.clone(), backend.clone());
- start_relay_chain_tasks(StartRelayChainTasksParams {
- client: client.clone(),
- announce_block: announce_block.clone(),
- para_id,
- relay_chain_interface: relay_chain_interface.clone(),
- task_manager: &mut task_manager,
- da_recovery_profile: if validator {
- DARecoveryProfile::Collator
- } else {
- DARecoveryProfile::FullNode
- },
- import_queue: import_queue_service,
- relay_chain_slot_duration,
- recovery_handle: Box::new(overseer_handle.clone()),
- sync_service: sync_service.clone(),
- })?;
-
- if validator {
- start_consensus(
+ let import_queue = Self::BuildImportQueue::build_import_queue(
client.clone(),
- block_import,
- prometheus_registry.as_ref(),
- telemetry.as_ref().map(|t| t.handle()),
+ block_import.clone(),
+ config,
+ telemetry.as_ref().map(|telemetry| telemetry.handle()),
&task_manager,
- relay_chain_interface.clone(),
- transaction_pool,
- params.keystore_container.keystore(),
- relay_chain_slot_duration,
- para_id,
- collator_key.expect("Command line arguments do not allow this. qed"),
- overseer_handle,
- announce_block,
- backend.clone(),
)?;
+
+ Ok(PartialComponents {
+ backend,
+ client,
+ import_queue,
+ keystore_container,
+ task_manager,
+ transaction_pool,
+ select_chain: (),
+ other: (block_import, telemetry, telemetry_worker_handle),
+ })
}
- start_network.start_network();
+ /// Start a node with the given parachain spec.
+ ///
+ /// This is the actual implementation that is abstract over the executor and the runtime api.
+ fn start_node(
+ parachain_config: Configuration,
+ polkadot_config: Configuration,
+ collator_options: CollatorOptions,
+ para_id: ParaId,
+ hwbench: Option,
+ ) -> Pin>>>
+ where
+ Net: NetworkBackend,
+ {
+ Box::pin(async move {
+ let parachain_config = prepare_node_config(parachain_config);
+
+ let params = Self::new_partial(¶chain_config)?;
+ let (block_import, mut telemetry, telemetry_worker_handle) = params.other;
+
+ let client = params.client.clone();
+ let backend = params.backend.clone();
+
+ let mut task_manager = params.task_manager;
+ let (relay_chain_interface, collator_key) = build_relay_chain_interface(
+ polkadot_config,
+ ¶chain_config,
+ telemetry_worker_handle,
+ &mut task_manager,
+ collator_options.clone(),
+ hwbench.clone(),
+ )
+ .await
+ .map_err(|e| sc_service::Error::Application(Box::new(e) as Box<_>))?;
+
+ let validator = parachain_config.role.is_authority();
+ let prometheus_registry = parachain_config.prometheus_registry().cloned();
+ let transaction_pool = params.transaction_pool.clone();
+ let import_queue_service = params.import_queue.service();
+ let net_config = FullNetworkConfiguration::<_, _, Net>::new(¶chain_config.network);
+
+ let (network, system_rpc_tx, tx_handler_controller, start_network, sync_service) =
+ build_network(BuildNetworkParams {
+ parachain_config: ¶chain_config,
+ net_config,
+ client: client.clone(),
+ transaction_pool: transaction_pool.clone(),
+ para_id,
+ spawn_handle: task_manager.spawn_handle(),
+ relay_chain_interface: relay_chain_interface.clone(),
+ import_queue: params.import_queue,
+ sybil_resistance_level: Self::SYBIL_RESISTANCE,
+ })
+ .await?;
+
+ let rpc_builder = {
+ let client = client.clone();
+ let transaction_pool = transaction_pool.clone();
+ let backend_for_rpc = backend.clone();
+
+ Box::new(move |deny_unsafe, _| {
+ Self::BuildRpcExtensions::build_rpc_extensions(
+ deny_unsafe,
+ client.clone(),
+ backend_for_rpc.clone(),
+ transaction_pool.clone(),
+ )
+ })
+ };
+
+ sc_service::spawn_tasks(sc_service::SpawnTasksParams {
+ rpc_builder,
+ client: client.clone(),
+ transaction_pool: transaction_pool.clone(),
+ task_manager: &mut task_manager,
+ config: parachain_config,
+ keystore: params.keystore_container.keystore(),
+ backend: backend.clone(),
+ network: network.clone(),
+ sync_service: sync_service.clone(),
+ system_rpc_tx,
+ tx_handler_controller,
+ telemetry: telemetry.as_mut(),
+ })?;
+
+ if let Some(hwbench) = hwbench {
+ sc_sysinfo::print_hwbench(&hwbench);
+ if validator {
+ warn_if_slow_hardware(&hwbench);
+ }
- Ok((task_manager, client))
-}
+ if let Some(ref mut telemetry) = telemetry {
+ let telemetry_handle = telemetry.handle();
+ task_manager.spawn_handle().spawn(
+ "telemetry_hwbench",
+ None,
+ sc_sysinfo::initialize_hwbench_telemetry(telemetry_handle, hwbench),
+ );
+ }
+ }
-/// Build the import queue for Aura-based runtimes.
-pub fn build_aura_import_queue(
- client: Arc>,
- block_import: ParachainBlockImport,
- config: &Configuration,
- telemetry: Option,
- task_manager: &TaskManager,
-) -> Result, sc_service::Error> {
- let slot_duration = cumulus_client_consensus_aura::slot_duration(&*client)?;
-
- cumulus_client_consensus_aura::import_queue::<
- sp_consensus_aura::sr25519::AuthorityPair,
- _,
- _,
- _,
- _,
- _,
- >(cumulus_client_consensus_aura::ImportQueueParams {
- block_import,
- client,
- create_inherent_data_providers: move |_, _| async move {
- let timestamp = sp_timestamp::InherentDataProvider::from_system_time();
-
- let slot =
- sp_consensus_aura::inherents::InherentDataProvider::from_timestamp_and_slot_duration(
- *timestamp,
- slot_duration,
- );
-
- Ok((slot, timestamp))
- },
- registry: config.prometheus_registry(),
- spawner: &task_manager.spawn_essential_handle(),
- telemetry,
- })
- .map_err(Into::into)
-}
+ let announce_block = {
+ let sync_service = sync_service.clone();
+ Arc::new(move |hash, data| sync_service.announce_block(hash, data))
+ };
+
+ let relay_chain_slot_duration = Duration::from_secs(6);
+
+ let overseer_handle = relay_chain_interface
+ .overseer_handle()
+ .map_err(|e| sc_service::Error::Application(Box::new(e)))?;
+
+ start_relay_chain_tasks(StartRelayChainTasksParams {
+ client: client.clone(),
+ announce_block: announce_block.clone(),
+ para_id,
+ relay_chain_interface: relay_chain_interface.clone(),
+ task_manager: &mut task_manager,
+ da_recovery_profile: if validator {
+ DARecoveryProfile::Collator
+ } else {
+ DARecoveryProfile::FullNode
+ },
+ import_queue: import_queue_service,
+ relay_chain_slot_duration,
+ recovery_handle: Box::new(overseer_handle.clone()),
+ sync_service,
+ })?;
+
+ if validator {
+ Self::StartConsensus::start_consensus(
+ client.clone(),
+ block_import,
+ prometheus_registry.as_ref(),
+ telemetry.as_ref().map(|t| t.handle()),
+ &task_manager,
+ relay_chain_interface.clone(),
+ transaction_pool,
+ params.keystore_container.keystore(),
+ relay_chain_slot_duration,
+ para_id,
+ collator_key.expect("Command line arguments do not allow this. qed"),
+ overseer_handle,
+ announce_block,
+ backend.clone(),
+ )?;
+ }
-/// Start a rococo parachain node.
-pub async fn start_rococo_parachain_node>(
- parachain_config: Configuration,
- polkadot_config: Configuration,
- collator_options: CollatorOptions,
- para_id: ParaId,
- use_experimental_slot_based: bool,
- hwbench: Option,
-) -> sc_service::error::Result<(TaskManager, Arc>)> {
- let consensus_starter = if use_experimental_slot_based {
- start_slot_based_aura_consensus::<_, AuraId>
- } else {
- start_lookahead_aura_consensus::<_, AuraId>
- };
- start_node_impl::(
- parachain_config,
- polkadot_config,
- collator_options,
- CollatorSybilResistance::Resistant, // Aura
- para_id,
- build_parachain_rpc_extensions::,
- build_aura_import_queue,
- consensus_starter,
- hwbench,
- )
- .await
-}
+ start_network.start_network();
-/// Build the import queue for the shell runtime.
-pub fn build_shell_import_queue(
- client: Arc>,
- block_import: ParachainBlockImport,
- config: &Configuration,
- _: Option,
- task_manager: &TaskManager,
-) -> Result, sc_service::Error> {
- cumulus_client_consensus_relay_chain::import_queue(
- client,
- block_import,
- |_, _| async { Ok(()) },
- &task_manager.spawn_essential_handle(),
- config.prometheus_registry(),
- )
- .map_err(Into::into)
+ Ok(task_manager)
+ })
+ }
}
-fn build_parachain_rpc_extensions(
- deny_unsafe: sc_rpc::DenyUnsafe,
- client: Arc>,
- backend: Arc,
- pool: Arc>>,
-) -> Result, sc_service::Error>
-where
- RuntimeApi: ConstructRuntimeApi> + Send + Sync + 'static,
- RuntimeApi::RuntimeApi: sp_transaction_pool::runtime_api::TaggedTransactionQueue
- + sp_block_builder::BlockBuilder
- + pallet_transaction_payment_rpc::TransactionPaymentRuntimeApi
- + substrate_frame_rpc_system::AccountNonceApi,
-{
- let deps = rpc::FullDeps { client, pool, deny_unsafe };
-
- rpc::create_full(deps, backend).map_err(Into::into)
+/// Build the import queue for the shell runtime.
+pub(crate) struct BuildShellImportQueue(PhantomData);
+
+impl BuildImportQueue for BuildShellImportQueue {
+ fn build_import_queue(
+ client: Arc>,
+ block_import: ParachainBlockImport,
+ config: &Configuration,
+ _telemetry_handle: Option,
+ task_manager: &TaskManager,
+ ) -> sc_service::error::Result> {
+ cumulus_client_consensus_relay_chain::import_queue(
+ client,
+ block_import,
+ |_, _| async { Ok(()) },
+ &task_manager.spawn_essential_handle(),
+ config.prometheus_registry(),
+ )
+ .map_err(Into::into)
+ }
}
-fn build_contracts_rpc_extensions(
- deny_unsafe: sc_rpc::DenyUnsafe,
- client: Arc>,
- _backend: Arc,
- pool: Arc>>,
-) -> Result, sc_service::Error> {
- let deps = crate::rpc::FullDeps { client: client.clone(), pool: pool.clone(), deny_unsafe };
+pub(crate) struct ShellNode;
- crate::rpc::create_contracts_rococo(deps).map_err(Into::into)
-}
+impl NodeSpec for ShellNode {
+ type RuntimeApi = FakeRuntimeApi;
+ type BuildImportQueue = BuildShellImportQueue;
+ type BuildRpcExtensions = BuildEmptyRpcExtensions;
+ type StartConsensus = StartRelayChainConsensus;
-/// Start a polkadot-shell parachain node.
-pub async fn start_shell_node>(
- parachain_config: Configuration,
- polkadot_config: Configuration,
- collator_options: CollatorOptions,
- para_id: ParaId,
- hwbench: Option,
-) -> sc_service::error::Result<(TaskManager, Arc>)> {
- start_node_impl::(
- parachain_config,
- polkadot_config,
- collator_options,
- CollatorSybilResistance::Unresistant, // free-for-all consensus
- para_id,
- |_, _, _, _| Ok(RpcModule::new(())),
- build_shell_import_queue,
- start_relay_chain_consensus,
- hwbench,
- )
- .await
+ const SYBIL_RESISTANCE: CollatorSybilResistance = CollatorSybilResistance::Unresistant;
}
struct Verifier {
@@ -527,435 +428,374 @@ where
/// Build the import queue for parachain runtimes that started with relay chain consensus and
/// switched to aura.
-pub fn build_relay_to_aura_import_queue(
- client: Arc>,
- block_import: ParachainBlockImport,
- config: &Configuration,
- telemetry_handle: Option,
- task_manager: &TaskManager,
-) -> Result, sc_service::Error>
+pub(crate) struct BuildRelayToAuraImportQueue(
+ PhantomData<(RuntimeApi, AuraId)>,
+);
+
+impl BuildImportQueue
+ for BuildRelayToAuraImportQueue
where
RuntimeApi: ConstructNodeRuntimeApi>,
RuntimeApi::RuntimeApi: AuraRuntimeApi,
AuraId: AuraIdT + Sync,
{
- let verifier_client = client.clone();
-
- let aura_verifier = cumulus_client_consensus_aura::build_verifier::<
- ::Pair,
- _,
- _,
- _,
- >(cumulus_client_consensus_aura::BuildVerifierParams {
- client: verifier_client.clone(),
- create_inherent_data_providers: move |parent_hash, _| {
- let cidp_client = verifier_client.clone();
- async move {
- let slot_duration =
- cumulus_client_consensus_aura::slot_duration_at(&*cidp_client, parent_hash)?;
- let timestamp = sp_timestamp::InherentDataProvider::from_system_time();
-
- let slot =
- sp_consensus_aura::inherents::InherentDataProvider::from_timestamp_and_slot_duration(
- *timestamp,
- slot_duration,
- );
+ fn build_import_queue(
+ client: Arc>,
+ block_import: ParachainBlockImport,
+ config: &Configuration,
+ telemetry_handle: Option,
+ task_manager: &TaskManager,
+ ) -> sc_service::error::Result> {
+ let verifier_client = client.clone();
+
+ let aura_verifier =
+ cumulus_client_consensus_aura::build_verifier::<::Pair, _, _, _>(
+ cumulus_client_consensus_aura::BuildVerifierParams {
+ client: verifier_client.clone(),
+ create_inherent_data_providers: move |parent_hash, _| {
+ let cidp_client = verifier_client.clone();
+ async move {
+ let slot_duration = cumulus_client_consensus_aura::slot_duration_at(
+ &*cidp_client,
+ parent_hash,
+ )?;
+ let timestamp = sp_timestamp::InherentDataProvider::from_system_time();
+
+ let slot =
+ sp_consensus_aura::inherents::InherentDataProvider::from_timestamp_and_slot_duration(
+ *timestamp,
+ slot_duration,
+ );
+
+ Ok((slot, timestamp))
+ }
+ },
+ telemetry: telemetry_handle,
+ },
+ );
- Ok((slot, timestamp))
- }
- },
- telemetry: telemetry_handle,
- });
-
- let relay_chain_verifier =
- Box::new(RelayChainVerifier::new(client.clone(), |_, _| async { Ok(()) })) as Box<_>;
-
- let verifier = Verifier {
- client,
- relay_chain_verifier,
- aura_verifier: Box::new(aura_verifier),
- _phantom: PhantomData,
- };
+ let relay_chain_verifier =
+ Box::new(RelayChainVerifier::new(client.clone(), |_, _| async { Ok(()) }));
+
+ let verifier = Verifier {
+ client,
+ relay_chain_verifier,
+ aura_verifier: Box::new(aura_verifier),
+ _phantom: PhantomData,
+ };
- let registry = config.prometheus_registry();
- let spawner = task_manager.spawn_essential_handle();
+ let registry = config.prometheus_registry();
+ let spawner = task_manager.spawn_essential_handle();
- Ok(BasicQueue::new(verifier, Box::new(block_import), None, &spawner, registry))
+ Ok(BasicQueue::new(verifier, Box::new(block_import), None, &spawner, registry))
+ }
}
/// Uses the lookahead collator to support async backing.
///
/// Start an aura powered parachain node. Some system chains use this.
-pub async fn start_generic_aura_async_backing_node>(
- parachain_config: Configuration,
- polkadot_config: Configuration,
- collator_options: CollatorOptions,
- para_id: ParaId,
- use_experimental_slot_based: bool,
- hwbench: Option,
-) -> sc_service::error::Result<(TaskManager, Arc>)> {
- let consensus_starter = if use_experimental_slot_based {
- start_slot_based_aura_consensus::<_, AuraId>
- } else {
- start_lookahead_aura_consensus::<_, AuraId>
- };
- start_node_impl::(
- parachain_config,
- polkadot_config,
- collator_options,
- CollatorSybilResistance::Resistant, // Aura
- para_id,
- build_parachain_rpc_extensions::,
- build_relay_to_aura_import_queue::<_, AuraId>,
- consensus_starter,
- hwbench,
- )
- .await
+pub(crate) struct AuraNode(
+ pub PhantomData<(RuntimeApi, AuraId, StartConsensus)>,
+);
+
+impl Default for AuraNode {
+ fn default() -> Self {
+ Self(Default::default())
+ }
}
-/// Start a shell node which should later transition into an Aura powered parachain node. Asset Hub
-/// uses this because at genesis, Asset Hub was on the `shell` runtime which didn't have Aura and
-/// needs to sync and upgrade before it can run `AuraApi` functions.
-///
-/// Uses the lookahead collator to support async backing.
-#[sc_tracing::logging::prefix_logs_with("Parachain")]
-pub async fn start_asset_hub_async_backing_node(
- parachain_config: Configuration,
- polkadot_config: Configuration,
- collator_options: CollatorOptions,
- para_id: ParaId,
- use_experimental_slot_based: bool,
- hwbench: Option,
-) -> sc_service::error::Result<(TaskManager, Arc>)>
+impl NodeSpec for AuraNode
where
RuntimeApi: ConstructNodeRuntimeApi>,
RuntimeApi::RuntimeApi: AuraRuntimeApi
+ pallet_transaction_payment_rpc::TransactionPaymentRuntimeApi
+ substrate_frame_rpc_system::AccountNonceApi,
AuraId: AuraIdT + Sync,
- Net: NetworkBackend,
+ StartConsensus: self::StartConsensus + 'static,
{
- let consensus_starter = if use_experimental_slot_based {
- start_slot_based_aura_consensus::<_, AuraId>
- } else {
- start_lookahead_aura_consensus::<_, AuraId>
- };
-
- start_node_impl::(
- parachain_config,
- polkadot_config,
- collator_options,
- CollatorSybilResistance::Resistant, // Aura
- para_id,
- build_parachain_rpc_extensions,
- build_relay_to_aura_import_queue::<_, AuraId>,
- consensus_starter,
- hwbench,
- )
- .await
+ type RuntimeApi = RuntimeApi;
+ type BuildImportQueue = BuildRelayToAuraImportQueue;
+ type BuildRpcExtensions = BuildParachainRpcExtensions;
+ type StartConsensus = StartConsensus;
+ const SYBIL_RESISTANCE: CollatorSybilResistance = CollatorSybilResistance::Resistant;
}
-/// Wait for the Aura runtime API to appear on chain.
-/// This is useful for chains that started out without Aura. Components that
-/// are depending on Aura functionality will wait until Aura appears in the runtime.
-async fn wait_for_aura(client: Arc>)
+pub fn new_aura_node_spec(extra_args: NodeExtraArgs) -> Box
where
RuntimeApi: ConstructNodeRuntimeApi>,
- RuntimeApi::RuntimeApi: AuraRuntimeApi,
+ RuntimeApi::RuntimeApi: AuraRuntimeApi
+ + pallet_transaction_payment_rpc::TransactionPaymentRuntimeApi
+ + substrate_frame_rpc_system::AccountNonceApi,
AuraId: AuraIdT + Sync,
{
- let finalized_hash = client.chain_info().finalized_hash;
- if client
- .runtime_api()
- .has_api::>(finalized_hash)
- .unwrap_or(false)
- {
- return;
- };
-
- let mut stream = client.finality_notification_stream();
- while let Some(notification) = stream.next().await {
- let has_aura_api = client
- .runtime_api()
- .has_api::>(notification.hash)
- .unwrap_or(false);
- if has_aura_api {
- return;
- }
+ if extra_args.use_slot_based_consensus {
+ Box::new(AuraNode::<
+ RuntimeApi,
+ AuraId,
+ StartSlotBasedAuraConsensus,
+ >::default())
+ } else {
+ Box::new(AuraNode::<
+ RuntimeApi,
+ AuraId,
+ StartLookaheadAuraConsensus,
+ >::default())
}
}
/// Start relay-chain consensus that is free for all. Everyone can submit a block, the relay-chain
/// decides what is backed and included.
-fn start_relay_chain_consensus(
- client: Arc>,
- block_import: ParachainBlockImport,
- prometheus_registry: Option<&Registry>,
- telemetry: Option,
- task_manager: &TaskManager,
- relay_chain_interface: Arc,
- transaction_pool: Arc>>,
- _keystore: KeystorePtr,
- _relay_chain_slot_duration: Duration,
- para_id: ParaId,
- collator_key: CollatorPair,
- overseer_handle: OverseerHandle,
- announce_block: Arc