From e6d37b094c90a9205830df8076105dcda40203f9 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Fri, 11 Feb 2022 18:43:49 -0600 Subject: [PATCH 01/87] docs and skeleton --- node/core/prospective-parachains/Cargo.toml | 22 +++++++++ node/core/prospective-parachains/src/lib.rs | 50 +++++++++++++++++++++ 2 files changed, 72 insertions(+) create mode 100644 node/core/prospective-parachains/Cargo.toml create mode 100644 node/core/prospective-parachains/src/lib.rs diff --git a/node/core/prospective-parachains/Cargo.toml b/node/core/prospective-parachains/Cargo.toml new file mode 100644 index 000000000000..010560bdcc81 --- /dev/null +++ b/node/core/prospective-parachains/Cargo.toml @@ -0,0 +1,22 @@ +[package] +name = "polkadot-node-core-fragment-manager" +version = "0.9.16" +authors = ["Parity Technologies "] +edition = "2018" + +[dependencies] +futures = "0.3.19" +tracing = "0.1.29" +parity-scale-codec = "2" +thiserror = "1.0.30" + +polkadot-primitives = { path = "../../../primitives" } +polkadot-node-primitives = { path = "../../primitives" } +polkadot-node-subsystem = { path = "../../subsystem" } +polkadot-node-subsystem-util = { path = "../../subsystem-util" } + +[dev-dependencies] + +[features] +# If not enabled, the dispute coordinator will do nothing. +disputes = [] diff --git a/node/core/prospective-parachains/src/lib.rs b/node/core/prospective-parachains/src/lib.rs new file mode 100644 index 000000000000..c2c503944aaa --- /dev/null +++ b/node/core/prospective-parachains/src/lib.rs @@ -0,0 +1,50 @@ +// Copyright 2022 Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Implementation of the Prospective Parachains subsystem - this tracks and handles +//! prospective parachain fragments and informs other backing-stage subsystems +//! of work to be done. +//! +//! This is the main coordinator of work within the node for the collation and +//! backing phases of parachain consensus. +//! +//! This is primarily an implementation of "Fragment Trees", as described in +//! [`polkadot_node_subsystem_util::inclusion_emulator::staging`]. +//! +//! This also handles concerns such as the relay-chain being forkful, +//! session changes, predicting validator group assignments, and +//! the re-backing of parachain blocks as a result of these changes. +//! +//! ## Re-backing +//! +//! Since this subsystems deals in enabling the collation and extension +//! of parachains in advance of actually being recorded on the relay-chain, +//! it is possible for the validator-group that initially backed the parablock +//! to be no longer assigned at the point that the parablock is submitted +//! to the relay-chain. +//! +//! This presents an issue, because the relay-chain only accepts blocks +//! which are backed by the currently-assigned group of validators, not +//! by the group of validators previously assigned to the parachain. +//! +//! In order to avoid wasting work at group rotation boundaries, we must +//! allow validators to re-validate the work of the preceding group. +//! This process is known as re-backing. +//! +//! What happens in practice is that validators observe that they are +//! scheduled to be assigned to a specific para in the near future. +//! And as a result, they dig into the existing fragment-trees to +//! re-back what already existed. From 0c7784326ed92e0e2992fe365b8509a98749aab1 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Tue, 15 Feb 2022 11:37:24 -0600 Subject: [PATCH 02/87] subsystem skeleton --- Cargo.lock | 14 ++++++++ Cargo.toml | 1 + node/core/prospective-parachains/Cargo.toml | 2 +- node/core/prospective-parachains/src/lib.rs | 40 +++++++++++++++++++++ 4 files changed, 56 insertions(+), 1 deletion(-) diff --git a/Cargo.lock b/Cargo.lock index 2fdc862bc031..900959f2a256 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6720,6 +6720,20 @@ dependencies = [ "tracing", ] +[[package]] +name = "polkadot-node-core-prospective-parachains" +version = "0.9.16" +dependencies = [ + "futures 0.3.19", + "parity-scale-codec", + "polkadot-node-primitives", + "polkadot-node-subsystem", + "polkadot-node-subsystem-util", + "polkadot-primitives", + "thiserror", + "tracing", +] + [[package]] name = "polkadot-node-core-provisioner" version = "0.9.17" diff --git a/Cargo.toml b/Cargo.toml index 190a3ca2f87a..357c47fd8d0a 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -68,6 +68,7 @@ members = [ "node/core/chain-selection", "node/core/dispute-coordinator", "node/core/parachains-inherent", + "node/core/prospective-parachains", "node/core/provisioner", "node/core/pvf", "node/core/pvf-checker", diff --git a/node/core/prospective-parachains/Cargo.toml b/node/core/prospective-parachains/Cargo.toml index 010560bdcc81..def2fd47f10e 100644 --- a/node/core/prospective-parachains/Cargo.toml +++ b/node/core/prospective-parachains/Cargo.toml @@ -1,5 +1,5 @@ [package] -name = "polkadot-node-core-fragment-manager" +name = "polkadot-node-core-prospective-parachains" version = "0.9.16" authors = ["Parity Technologies "] edition = "2018" diff --git a/node/core/prospective-parachains/src/lib.rs b/node/core/prospective-parachains/src/lib.rs index c2c503944aaa..cddc12e96e1e 100644 --- a/node/core/prospective-parachains/src/lib.rs +++ b/node/core/prospective-parachains/src/lib.rs @@ -48,3 +48,43 @@ //! scheduled to be assigned to a specific para in the near future. //! And as a result, they dig into the existing fragment-trees to //! re-back what already existed. + +use std::sync::Arc; + +use futures::prelude::*; + +use polkadot_node_subsystem_util::metrics::{self, prometheus}; +use polkadot_primitives::vstaging::{Block, BlockId}; +use polkadot_node_subsystem::{ + overseer, FromOverseer, OverseerSignal, SpawnedSubsystem, + SubsystemContext, SubsystemError, SubsystemResult, +}; + +const LOG_TARGET: &str = "parachain::prospective-parachains"; + +/// The Prospective Parachains Subsystem. +pub struct ProspectiveParachainsSubsystems { + metrics: Metrics, +} + +// TODO [now]: add this enum to the broader subsystem types. +pub enum ProspectiveParachainsMessage { } + +async fn run( + mut ctx: Context, +) -> SubsystemResult<()> +where + Context: SubsystemContext, + Context: overseer::SubsystemContext +{ + unimplemented!() +} + +#[derive(Clone)] +struct MetricsInner; + +/// Prospective parachain metrics. +#[derive(Default, Clone)] +pub struct Metrics(Option); + +// TODO [now]: impl metrics From 6c82e7cb8a3fa2b32be0060cfb52c436fb67d4fa Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Tue, 15 Feb 2022 11:41:55 -0600 Subject: [PATCH 03/87] main loop --- node/core/prospective-parachains/src/lib.rs | 16 +++++++++++++++- 1 file changed, 15 insertions(+), 1 deletion(-) diff --git a/node/core/prospective-parachains/src/lib.rs b/node/core/prospective-parachains/src/lib.rs index cddc12e96e1e..021e8ad2cf02 100644 --- a/node/core/prospective-parachains/src/lib.rs +++ b/node/core/prospective-parachains/src/lib.rs @@ -49,6 +49,9 @@ //! And as a result, they dig into the existing fragment-trees to //! re-back what already existed. +// TODO [now]: remove +#![allow(unused)] + use std::sync::Arc; use futures::prelude::*; @@ -77,7 +80,18 @@ where Context: SubsystemContext, Context: overseer::SubsystemContext { - unimplemented!() + loop { + match ctx.recv().await? { + FromOverseer::Signal(OverseerSignal::Conclude) => return Ok(()), + FromOverseer::Signal(OverseerSignal::ActiveLeaves(_)) => { + // TODO [now]: handle active leaves and obsolete leaves. + } + FromOverseer::Signal(OverseerSignal::BlockFinalized(..)) => {} + FromOverseer::Communication { msg } => match msg { + // TODO [now]: handle messages + } + } + } } #[derive(Clone)] From 4d7313875414ee668561ec2e9b6fb43569508cc6 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Tue, 15 Feb 2022 11:56:25 -0600 Subject: [PATCH 04/87] fragment tree basics & fmt --- node/core/prospective-parachains/src/lib.rs | 44 +++++++++++++++------ 1 file changed, 31 insertions(+), 13 deletions(-) diff --git a/node/core/prospective-parachains/src/lib.rs b/node/core/prospective-parachains/src/lib.rs index 021e8ad2cf02..cc9f76776948 100644 --- a/node/core/prospective-parachains/src/lib.rs +++ b/node/core/prospective-parachains/src/lib.rs @@ -52,16 +52,19 @@ // TODO [now]: remove #![allow(unused)] -use std::sync::Arc; +use std::{collections::HashMap, sync::Arc}; use futures::prelude::*; -use polkadot_node_subsystem_util::metrics::{self, prometheus}; -use polkadot_primitives::vstaging::{Block, BlockId}; use polkadot_node_subsystem::{ - overseer, FromOverseer, OverseerSignal, SpawnedSubsystem, - SubsystemContext, SubsystemError, SubsystemResult, + overseer, FromOverseer, OverseerSignal, SpawnedSubsystem, SubsystemContext, SubsystemError, + SubsystemResult, }; +use polkadot_node_subsystem_util::{ + inclusion_emulator::staging::{ConstraintModifications, Constraints, Fragment}, + metrics::{self, prometheus}, +}; +use polkadot_primitives::vstaging::{Block, BlockId, CandidateHash, Hash, Id as ParaId}; const LOG_TARGET: &str = "parachain::prospective-parachains"; @@ -71,29 +74,44 @@ pub struct ProspectiveParachainsSubsystems { } // TODO [now]: add this enum to the broader subsystem types. -pub enum ProspectiveParachainsMessage { } +pub enum ProspectiveParachainsMessage {} -async fn run( - mut ctx: Context, -) -> SubsystemResult<()> +async fn run(mut ctx: Context) -> SubsystemResult<()> where Context: SubsystemContext, - Context: overseer::SubsystemContext + Context: overseer::SubsystemContext, { loop { match ctx.recv().await? { FromOverseer::Signal(OverseerSignal::Conclude) => return Ok(()), FromOverseer::Signal(OverseerSignal::ActiveLeaves(_)) => { // TODO [now]: handle active leaves and obsolete leaves. - } - FromOverseer::Signal(OverseerSignal::BlockFinalized(..)) => {} + }, + FromOverseer::Signal(OverseerSignal::BlockFinalized(..)) => {}, FromOverseer::Communication { msg } => match msg { // TODO [now]: handle messages - } + }, } } } +struct FragmentTree { + para: ParaId, + // Fragment nodes based on fragment head-data + nodes: HashMap, + // The root hash of this fragment-tree. + root: Hash, +} + +struct FragmentNode { + // Head-data of the parent node. + parent: Hash, + // Head-data of children. + // TODO [now]: make sure traversal detects loops. + children: Vec, + fragment: Fragment, +} + #[derive(Clone)] struct MetricsInner; From d7d5ff7633ee5132f90d9c30a55891a5105fdfb1 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Tue, 15 Feb 2022 12:17:14 -0600 Subject: [PATCH 05/87] begin fragment trees & view --- node/core/prospective-parachains/src/lib.rs | 94 ++++++++++++++++----- 1 file changed, 74 insertions(+), 20 deletions(-) diff --git a/node/core/prospective-parachains/src/lib.rs b/node/core/prospective-parachains/src/lib.rs index cc9f76776948..b86b743e1143 100644 --- a/node/core/prospective-parachains/src/lib.rs +++ b/node/core/prospective-parachains/src/lib.rs @@ -52,16 +52,21 @@ // TODO [now]: remove #![allow(unused)] -use std::{collections::HashMap, sync::Arc}; +use std::{ + collections::{HashMap, HashSet}, + sync::Arc, +}; use futures::prelude::*; use polkadot_node_subsystem::{ - overseer, FromOverseer, OverseerSignal, SpawnedSubsystem, SubsystemContext, SubsystemError, - SubsystemResult, + overseer, ActiveLeavesUpdate, FromOverseer, OverseerSignal, SpawnedSubsystem, SubsystemContext, + SubsystemError, SubsystemResult, }; use polkadot_node_subsystem_util::{ - inclusion_emulator::staging::{ConstraintModifications, Constraints, Fragment}, + inclusion_emulator::staging::{ + ConstraintModifications, Constraints, Fragment, RelayChainBlockInfo, + }, metrics::{self, prometheus}, }; use polkadot_primitives::vstaging::{Block, BlockId, CandidateHash, Hash, Id as ParaId}; @@ -73,19 +78,67 @@ pub struct ProspectiveParachainsSubsystems { metrics: Metrics, } +// TODO [now]: error types, fatal & non-fatal. + // TODO [now]: add this enum to the broader subsystem types. pub enum ProspectiveParachainsMessage {} +struct FragmentTrees { + para: ParaId, + // Fragment nodes based on fragment head-data + nodes: HashMap, + // The root hashes of this fragment-tree by head-data. + roots: HashSet, +} + +struct FragmentNode { + // Head-data of the parent node. + parent: Hash, + // Head-data of children. + // TODO [now]: make sure traversal detects loops. + children: Vec, + fragment: Fragment, +} + +// TODO [now] rename maybe +struct RelevantParaFragments { + para: ParaId, + relay_parent: Hash, + constraints: Constraints, + relevant: HashSet, +} + +struct RelayBlockViewData { + // Relevant fragments for each parachain that is scheduled. + relevant_fragments: HashMap, + block_info: RelayChainBlockInfo, + base_constraints: Constraints, + // TODO [now]: other stuff +} + +struct View { + // Active or recent relay-chain blocks by block hash. + active_or_recent: HashMap, + fragment_trees: HashMap, +} + +impl View { + fn new() -> Self { + View { active_or_recent: HashMap::new(), fragment_trees: HashMap::new() } + } +} + async fn run(mut ctx: Context) -> SubsystemResult<()> where Context: SubsystemContext, Context: overseer::SubsystemContext, { + let mut view = View::new(); loop { match ctx.recv().await? { FromOverseer::Signal(OverseerSignal::Conclude) => return Ok(()), - FromOverseer::Signal(OverseerSignal::ActiveLeaves(_)) => { - // TODO [now]: handle active leaves and obsolete leaves. + FromOverseer::Signal(OverseerSignal::ActiveLeaves(update)) => { + update_view(&mut view, &mut ctx, update).await?; }, FromOverseer::Signal(OverseerSignal::BlockFinalized(..)) => {}, FromOverseer::Communication { msg } => match msg { @@ -95,21 +148,22 @@ where } } -struct FragmentTree { - para: ParaId, - // Fragment nodes based on fragment head-data - nodes: HashMap, - // The root hash of this fragment-tree. - root: Hash, -} +// TODO [now]; non-fatal error type. +async fn update_view( + view: &mut View, + ctx: &mut Context, + update: ActiveLeavesUpdate, +) -> SubsystemResult<()> +where + Context: SubsystemContext, + Context: overseer::SubsystemContext, +{ + // TODO [now]: get block info for all new blocks. + // update ref counts for anything still relevant + // clean up outgoing blocks + // clean up unreferenced fragments -struct FragmentNode { - // Head-data of the parent node. - parent: Hash, - // Head-data of children. - // TODO [now]: make sure traversal detects loops. - children: Vec, - fragment: Fragment, + unimplemented!() } #[derive(Clone)] From 30ba334b56b4af78cb502c074f146e6d6253094f Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Tue, 15 Feb 2022 12:42:06 -0600 Subject: [PATCH 06/87] flesh out more of view update logic --- node/core/prospective-parachains/src/lib.rs | 95 +++++++++++++++++++-- 1 file changed, 86 insertions(+), 9 deletions(-) diff --git a/node/core/prospective-parachains/src/lib.rs b/node/core/prospective-parachains/src/lib.rs index b86b743e1143..55baa1f1f68c 100644 --- a/node/core/prospective-parachains/src/lib.rs +++ b/node/core/prospective-parachains/src/lib.rs @@ -69,7 +69,7 @@ use polkadot_node_subsystem_util::{ }, metrics::{self, prometheus}, }; -use polkadot_primitives::vstaging::{Block, BlockId, CandidateHash, Hash, Id as ParaId}; +use polkadot_primitives::vstaging::{Block, BlockId, CandidateHash, Hash, Header, Id as ParaId}; const LOG_TARGET: &str = "parachain::prospective-parachains"; @@ -103,8 +103,7 @@ struct FragmentNode { // TODO [now] rename maybe struct RelevantParaFragments { para: ParaId, - relay_parent: Hash, - constraints: Constraints, + base_constraints: Constraints, relevant: HashSet, } @@ -112,19 +111,23 @@ struct RelayBlockViewData { // Relevant fragments for each parachain that is scheduled. relevant_fragments: HashMap, block_info: RelayChainBlockInfo, - base_constraints: Constraints, // TODO [now]: other stuff } struct View { // Active or recent relay-chain blocks by block hash. + active_leaves: HashSet, active_or_recent: HashMap, fragment_trees: HashMap, } impl View { fn new() -> Self { - View { active_or_recent: HashMap::new(), fragment_trees: HashMap::new() } + View { + active_leaves: HashSet::new(), + active_or_recent: HashMap::new(), + fragment_trees: HashMap::new(), + } } } @@ -158,11 +161,85 @@ where Context: SubsystemContext, Context: overseer::SubsystemContext, { - // TODO [now]: get block info for all new blocks. - // update ref counts for anything still relevant - // clean up outgoing blocks - // clean up unreferenced fragments + // Update active_leaves + { + for activated in update.activated.into_iter() { + view.active_leaves.insert(activated.hash); + } + + for deactivated in update.deactivated.into_iter() { + view.active_leaves.remove(&deactivated); + } + } + + // Find the set of blocks we care about. + let relevant_blocks = find_all_relevant_blocks(ctx, &view.active_leaves).await?; + + // Prune everything that was relevant but isn't anymore. + { + let all_removed: Vec<_> = view + .active_or_recent + .keys() + .cloned() + .filter(|h| !relevant_blocks.contains_key(&h)) + .collect(); + + for removed in all_removed { + let view_data = view.active_or_recent.remove(&removed).expect( + "key was gathered from iterating over all present keys; therefore is present; qed", + ); + + // TODO [now]: update fragment trees accordingly + } + } + + // Add new blocks and get data if necessary. + { + let all_new: Vec<_> = relevant_blocks + .iter() + .filter(|(h, _hdr)| !view.active_or_recent.contains_key(h)) + .collect(); + + for (new_hash, new_header) in all_new { + let block_info = RelayChainBlockInfo { + hash: *new_hash, + number: new_header.number, + storage_root: new_header.state_root, + }; + + // TODO [now]: determine parachains to hold fragments for. + // TODO [now]: determine relevant fragments according to constraints. + // TODO [now]: update ref counts in fragment trees + // TODO [now]: insert into `active_or_recent` + } + } + + unimplemented!() +} + +// TODO [now]; non-fatal error type. +async fn get_base_constraints( + ctx: &mut Context, + relay_block: Hash, + para_id: ParaId, +) -> SubsystemResult +where + Context: SubsystemContext, + Context: overseer::SubsystemContext, +{ + unimplemented!() +} +// TODO [now]; non-fatal error type. +async fn find_all_relevant_blocks( + ctx: &mut Context, + active_leaves: &HashSet, +) -> SubsystemResult> +where + Context: SubsystemContext, + Context: overseer::SubsystemContext, +{ + const LOOKBACK: usize = 2; unimplemented!() } From 6a052e0c70ea5dcb272e1a8a14f9e99c59485500 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Tue, 15 Feb 2022 13:00:11 -0600 Subject: [PATCH 07/87] further flesh out update logic --- node/core/prospective-parachains/src/lib.rs | 38 ++++++++++++++++++--- 1 file changed, 34 insertions(+), 4 deletions(-) diff --git a/node/core/prospective-parachains/src/lib.rs b/node/core/prospective-parachains/src/lib.rs index 55baa1f1f68c..e72df2833970 100644 --- a/node/core/prospective-parachains/src/lib.rs +++ b/node/core/prospective-parachains/src/lib.rs @@ -91,6 +91,12 @@ struct FragmentTrees { roots: HashSet, } +impl FragmentTrees { + fn is_empty(&self) -> bool { + self.nodes.is_empty() + } +} + struct FragmentNode { // Head-data of the parent node. parent: Hash, @@ -118,6 +124,9 @@ struct View { // Active or recent relay-chain blocks by block hash. active_leaves: HashSet, active_or_recent: HashMap, + + // Fragment trees, one for each parachain. + // TODO [now]: handle cleanup when these go obsolete. fragment_trees: HashMap, } @@ -190,6 +199,7 @@ where ); // TODO [now]: update fragment trees accordingly + // TODO [now]: prune empty fragment trees } } @@ -207,10 +217,18 @@ where storage_root: new_header.state_root, }; - // TODO [now]: determine parachains to hold fragments for. - // TODO [now]: determine relevant fragments according to constraints. - // TODO [now]: update ref counts in fragment trees - // TODO [now]: insert into `active_or_recent` + let all_parachains = get_all_parachains(ctx, *new_hash).await?; + + let mut relevant_fragments = HashMap::new(); + for p in all_parachains { + let constraints = get_base_constraints(ctx, *new_hash, p).await?; + + // TODO [now]: determine relevant fragments according to constraints. + // TODO [now]: update ref counts in fragment trees + } + + view.active_or_recent + .insert(*new_hash, RelayBlockViewData { relevant_fragments, block_info }); } } @@ -230,6 +248,18 @@ where unimplemented!() } +// TODO [now]; non-fatal error type. +async fn get_all_parachains( + ctx: &mut Context, + relay_block: Hash, +) -> SubsystemResult> +where + Context: SubsystemContext, + Context: overseer::SubsystemContext, +{ + unimplemented!() +} + // TODO [now]; non-fatal error type. async fn find_all_relevant_blocks( ctx: &mut Context, From 65b68fc9e0635b0ada97e42d0efac1260173f75e Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Tue, 15 Feb 2022 13:11:16 -0600 Subject: [PATCH 08/87] some refcount functions for fragment trees --- node/core/prospective-parachains/src/lib.rs | 37 +++++++++++++++++++++ 1 file changed, 37 insertions(+) diff --git a/node/core/prospective-parachains/src/lib.rs b/node/core/prospective-parachains/src/lib.rs index e72df2833970..66510d7082e7 100644 --- a/node/core/prospective-parachains/src/lib.rs +++ b/node/core/prospective-parachains/src/lib.rs @@ -54,6 +54,7 @@ use std::{ collections::{HashMap, HashSet}, + collections::hash_map::Entry as HEntry, sync::Arc, }; @@ -95,6 +96,40 @@ impl FragmentTrees { fn is_empty(&self) -> bool { self.nodes.is_empty() } + + fn determine_relevant_fragments( + &self, + constraints: &Constraints, + ) -> Vec { + unimplemented!() + } + + fn add_refcount(&mut self, fragment_hash: &Hash) { + if let Some(entry) = self.nodes.get_mut(fragment_hash) { + entry.1 += 1; + } + } + + fn remove_refcount(&mut self, fragment_hash: Hash) { + let node = match self.nodes.entry(fragment_hash) { + HEntry::Vacant(_) => return, + HEntry::Occupied(mut entry) => { + if entry.get().1 == 1 { + entry.remove().0 + } else { + entry.get_mut().1 -= 1; + return; + } + } + }; + + // TODO [now]: verify that this means 'it was present' + if self.roots.remove(&fragment_hash) { + for child in node.children { + self.roots.insert(child); + } + } + } } struct FragmentNode { @@ -170,6 +205,8 @@ where Context: SubsystemContext, Context: overseer::SubsystemContext, { + // TODO [now]: separate determining updates from updates themselves. + // Update active_leaves { for activated in update.activated.into_iter() { From 4041460f9b45f6d04d218de8a0b2d2c34d1d4826 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Wed, 16 Feb 2022 15:49:12 -0600 Subject: [PATCH 09/87] add fatal/non-fatal errors --- node/core/prospective-parachains/src/error.rs | 115 ++++++++++++++++++ node/core/prospective-parachains/src/lib.rs | 6 +- 2 files changed, 119 insertions(+), 2 deletions(-) create mode 100644 node/core/prospective-parachains/src/error.rs diff --git a/node/core/prospective-parachains/src/error.rs b/node/core/prospective-parachains/src/error.rs new file mode 100644 index 000000000000..f892d64a5408 --- /dev/null +++ b/node/core/prospective-parachains/src/error.rs @@ -0,0 +1,115 @@ +// Copyright 2022 Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Error types. + +use futures::channel::oneshot; +use thiserror::Error; + +use polkadot_node_subsystem::{ + errors::{ChainApiError, RuntimeApiError}, + SubsystemError, +}; +use polkadot_node_subsystem_util::{rolling_session_window::SessionsUnavailable, runtime}; + +use crate::LOG_TARGET; +use parity_scale_codec::Error as CodecError; + +/// Errors for this subsystem. +#[derive(Debug, Error)] +#[error(transparent)] +pub enum Error { + /// All fatal errors. + Fatal(#[from] Fatal), + /// All nonfatal/potentially recoverable errors. + NonFatal(#[from] NonFatal), +} + +/// General `Result` type for dispute coordinator. +pub type Result = std::result::Result; +/// Result type with only fatal errors. +pub type FatalResult = std::result::Result; +/// Result type with only non fatal errors. +pub type NonFatalResult = std::result::Result; + +impl From for Error { + fn from(o: SubsystemError) -> Self { + match o { + SubsystemError::Context(msg) => Self::Fatal(Fatal::SubsystemContext(msg)), + _ => Self::NonFatal(NonFatal::Subsystem(o)), + } + } +} + +/// Fatal errors of this subsystem. +#[derive(Debug, Error)] +pub enum Fatal { + /// We received a legacy `SubystemError::Context` error which is considered fatal. + #[error("SubsystemError::Context error: {0}")] + SubsystemContext(String), + + /// `ctx.spawn` failed with an error. + #[error("Spawning a task failed: {0}")] + SpawnFailed(SubsystemError), + + #[error("Participation worker receiver exhausted.")] + ParticipationWorkerReceiverExhausted, + + /// Receiving subsystem message from overseer failed. + #[error("Receiving message from overseer failed: {0}")] + SubsystemReceive(#[source] SubsystemError), +} + +#[derive(Debug, thiserror::Error)] +#[allow(missing_docs)] +pub enum NonFatal { + #[error(transparent)] + RuntimeApi(#[from] RuntimeApiError), + + #[error(transparent)] + ChainApi(#[from] ChainApiError), + + #[error(transparent)] + Subsystem(SubsystemError), +} + +/// Utility for eating top level errors and log them. +/// +/// We basically always want to try and continue on error. This utility function is meant to +/// consume top-level errors by simply logging them +pub fn log_error(result: Result<()>) -> std::result::Result<(), Fatal> { + match result { + Err(Error::Fatal(f)) => Err(f), + Err(Error::NonFatal(error)) => { + error.log(); + Ok(()) + }, + Ok(()) => Ok(()), + } +} + +impl NonFatal { + /// Log a `NonFatal`. + pub fn log(self) { + match self { + // don't spam the log with spurious errors + Self::RuntimeApi(_) => + tracing::debug!(target: LOG_TARGET, error = ?self), + // it's worth reporting otherwise + _ => tracing::warn!(target: LOG_TARGET, error = ?self), + } + } +} diff --git a/node/core/prospective-parachains/src/lib.rs b/node/core/prospective-parachains/src/lib.rs index 66510d7082e7..75de69ac0072 100644 --- a/node/core/prospective-parachains/src/lib.rs +++ b/node/core/prospective-parachains/src/lib.rs @@ -72,6 +72,10 @@ use polkadot_node_subsystem_util::{ }; use polkadot_primitives::vstaging::{Block, BlockId, CandidateHash, Hash, Header, Id as ParaId}; +use crate::error::{Error, FatalResult, NonFatal, Result}; + +mod error; + const LOG_TARGET: &str = "parachain::prospective-parachains"; /// The Prospective Parachains Subsystem. @@ -79,8 +83,6 @@ pub struct ProspectiveParachainsSubsystems { metrics: Metrics, } -// TODO [now]: error types, fatal & non-fatal. - // TODO [now]: add this enum to the broader subsystem types. pub enum ProspectiveParachainsMessage {} From e7010ed4ef9b18278650da07c6a7fb6bcdc01d72 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Wed, 16 Feb 2022 15:51:58 -0600 Subject: [PATCH 10/87] use non-fatal results --- node/core/prospective-parachains/src/lib.rs | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/node/core/prospective-parachains/src/lib.rs b/node/core/prospective-parachains/src/lib.rs index 75de69ac0072..6cbf2ed7012e 100644 --- a/node/core/prospective-parachains/src/lib.rs +++ b/node/core/prospective-parachains/src/lib.rs @@ -72,7 +72,7 @@ use polkadot_node_subsystem_util::{ }; use polkadot_primitives::vstaging::{Block, BlockId, CandidateHash, Hash, Header, Id as ParaId}; -use crate::error::{Error, FatalResult, NonFatal, Result}; +use crate::error::{Error, FatalResult, NonFatal, Result, NonFatalResult}; mod error; @@ -177,7 +177,7 @@ impl View { } } -async fn run(mut ctx: Context) -> SubsystemResult<()> +async fn run(mut ctx: Context) -> Result<()> where Context: SubsystemContext, Context: overseer::SubsystemContext, @@ -202,7 +202,7 @@ async fn update_view( view: &mut View, ctx: &mut Context, update: ActiveLeavesUpdate, -) -> SubsystemResult<()> +) -> NonFatalResult<()> where Context: SubsystemContext, Context: overseer::SubsystemContext, @@ -279,7 +279,7 @@ async fn get_base_constraints( ctx: &mut Context, relay_block: Hash, para_id: ParaId, -) -> SubsystemResult +) -> NonFatalResult where Context: SubsystemContext, Context: overseer::SubsystemContext, @@ -291,7 +291,7 @@ where async fn get_all_parachains( ctx: &mut Context, relay_block: Hash, -) -> SubsystemResult> +) -> NonFatalResult> where Context: SubsystemContext, Context: overseer::SubsystemContext, @@ -303,7 +303,7 @@ where async fn find_all_relevant_blocks( ctx: &mut Context, active_leaves: &HashSet, -) -> SubsystemResult> +) -> NonFatalResult> where Context: SubsystemContext, Context: overseer::SubsystemContext, From 53dd19671260bc3bcc165e5d57eb44e3858d6e0a Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Wed, 16 Feb 2022 16:32:22 -0600 Subject: [PATCH 11/87] clear up some TODOs --- node/core/prospective-parachains/src/lib.rs | 4 ---- 1 file changed, 4 deletions(-) diff --git a/node/core/prospective-parachains/src/lib.rs b/node/core/prospective-parachains/src/lib.rs index 6cbf2ed7012e..419504644776 100644 --- a/node/core/prospective-parachains/src/lib.rs +++ b/node/core/prospective-parachains/src/lib.rs @@ -125,7 +125,6 @@ impl FragmentTrees { } }; - // TODO [now]: verify that this means 'it was present' if self.roots.remove(&fragment_hash) { for child in node.children { self.roots.insert(child); @@ -274,7 +273,6 @@ where unimplemented!() } -// TODO [now]; non-fatal error type. async fn get_base_constraints( ctx: &mut Context, relay_block: Hash, @@ -287,7 +285,6 @@ where unimplemented!() } -// TODO [now]; non-fatal error type. async fn get_all_parachains( ctx: &mut Context, relay_block: Hash, @@ -299,7 +296,6 @@ where unimplemented!() } -// TODO [now]; non-fatal error type. async fn find_all_relevant_blocks( ctx: &mut Context, active_leaves: &HashSet, From f419c66eb6a38ac0905d66eef6d42a9f3b740bb9 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Wed, 16 Feb 2022 23:30:20 -0600 Subject: [PATCH 12/87] ideal format for scheduling info --- node/core/prospective-parachains/src/error.rs | 3 +- node/core/prospective-parachains/src/lib.rs | 69 +++++++++++++------ 2 files changed, 48 insertions(+), 24 deletions(-) diff --git a/node/core/prospective-parachains/src/error.rs b/node/core/prospective-parachains/src/error.rs index f892d64a5408..1627860a95db 100644 --- a/node/core/prospective-parachains/src/error.rs +++ b/node/core/prospective-parachains/src/error.rs @@ -106,8 +106,7 @@ impl NonFatal { pub fn log(self) { match self { // don't spam the log with spurious errors - Self::RuntimeApi(_) => - tracing::debug!(target: LOG_TARGET, error = ?self), + Self::RuntimeApi(_) => tracing::debug!(target: LOG_TARGET, error = ?self), // it's worth reporting otherwise _ => tracing::warn!(target: LOG_TARGET, error = ?self), } diff --git a/node/core/prospective-parachains/src/lib.rs b/node/core/prospective-parachains/src/lib.rs index 419504644776..da8c06fd8689 100644 --- a/node/core/prospective-parachains/src/lib.rs +++ b/node/core/prospective-parachains/src/lib.rs @@ -53,8 +53,7 @@ #![allow(unused)] use std::{ - collections::{HashMap, HashSet}, - collections::hash_map::Entry as HEntry, + collections::{hash_map::Entry as HEntry, HashMap, HashSet}, sync::Arc, }; @@ -70,9 +69,12 @@ use polkadot_node_subsystem_util::{ }, metrics::{self, prometheus}, }; -use polkadot_primitives::vstaging::{Block, BlockId, CandidateHash, Hash, Header, Id as ParaId}; +use polkadot_primitives::vstaging::{ + Block, BlockId, BlockNumber, CandidateHash, GroupIndex, GroupRotationInfo, Hash, Header, + Id as ParaId, SessionIndex, ValidatorIndex, +}; -use crate::error::{Error, FatalResult, NonFatal, Result, NonFatalResult}; +use crate::error::{Error, FatalResult, NonFatal, NonFatalResult, Result}; mod error; @@ -99,10 +101,7 @@ impl FragmentTrees { self.nodes.is_empty() } - fn determine_relevant_fragments( - &self, - constraints: &Constraints, - ) -> Vec { + fn determine_relevant_fragments(&self, constraints: &Constraints) -> Vec { unimplemented!() } @@ -115,14 +114,13 @@ impl FragmentTrees { fn remove_refcount(&mut self, fragment_hash: Hash) { let node = match self.nodes.entry(fragment_hash) { HEntry::Vacant(_) => return, - HEntry::Occupied(mut entry) => { + HEntry::Occupied(mut entry) => if entry.get().1 == 1 { entry.remove().0 } else { entry.get_mut().1 -= 1; - return; - } - } + return + }, }; if self.roots.remove(&fragment_hash) { @@ -142,16 +140,20 @@ struct FragmentNode { fragment: Fragment, } -// TODO [now] rename maybe +impl FragmentNode { + fn relay_parent(&self) -> Hash { + self.fragment.relay_parent().hash + } +} + struct RelevantParaFragments { para: ParaId, base_constraints: Constraints, - relevant: HashSet, } struct RelayBlockViewData { // Relevant fragments for each parachain that is scheduled. - relevant_fragments: HashMap, + scheduling: HashMap, block_info: RelayChainBlockInfo, // TODO [now]: other stuff } @@ -255,24 +257,28 @@ where storage_root: new_header.state_root, }; - let all_parachains = get_all_parachains(ctx, *new_hash).await?; + let scheduling_info = get_scheduling_info(ctx, *new_hash).await?; let mut relevant_fragments = HashMap::new(); - for p in all_parachains { - let constraints = get_base_constraints(ctx, *new_hash, p).await?; + for core_info in scheduling_info.cores { + let constraints = get_base_constraints(ctx, *new_hash, core_info.para_id).await?; // TODO [now]: determine relevant fragments according to constraints. // TODO [now]: update ref counts in fragment trees } - view.active_or_recent - .insert(*new_hash, RelayBlockViewData { relevant_fragments, block_info }); + view.active_or_recent.insert( + *new_hash, + RelayBlockViewData { scheduling: relevant_fragments, block_info }, + ); } } unimplemented!() } +// TODO [now]: don't accept too many fragments per para per relay-parent + async fn get_base_constraints( ctx: &mut Context, relay_block: Hash, @@ -285,10 +291,29 @@ where unimplemented!() } -async fn get_all_parachains( +// Scheduling info. +// - group rotation info: validator groups, group rotation info +// - information about parachains that are predictably going to be assigned +// to each core. For now that's just parachains, but it's worth noting that +// parathread claims are anchored to a specific core. +struct SchedulingInfo { + validator_groups: Vec>, + group_rotation_info: GroupRotationInfo, + // One core per parachain. this should have same length as 'validator-groups' + cores: Vec, +} + +struct CoreInfo { + para_id: ParaId, + + // (candidate hash, hash, timeout_at) if any + pending_availability: Option<(CandidateHash, Hash, BlockNumber)>, +} + +async fn get_scheduling_info( ctx: &mut Context, relay_block: Hash, -) -> NonFatalResult> +) -> NonFatalResult where Context: SubsystemContext, Context: overseer::SubsystemContext, From da1bac8d6631c4284bd1159dee99fe833c0975b5 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Thu, 17 Feb 2022 00:51:05 -0600 Subject: [PATCH 13/87] add a bunch of TODOs --- node/core/prospective-parachains/src/lib.rs | 78 +++++++++------------ 1 file changed, 32 insertions(+), 46 deletions(-) diff --git a/node/core/prospective-parachains/src/lib.rs b/node/core/prospective-parachains/src/lib.rs index da8c06fd8689..cc20e495f812 100644 --- a/node/core/prospective-parachains/src/lib.rs +++ b/node/core/prospective-parachains/src/lib.rs @@ -88,12 +88,12 @@ pub struct ProspectiveParachainsSubsystems { // TODO [now]: add this enum to the broader subsystem types. pub enum ProspectiveParachainsMessage {} +// TODO [now]: rename. more of a pile than a tree really. +// We only use it as a tree when traversing to select what to build upon. struct FragmentTrees { para: ParaId, - // Fragment nodes based on fragment head-data - nodes: HashMap, - // The root hashes of this fragment-tree by head-data. - roots: HashSet, + // Fragment nodes based on fragment head-data. + nodes: HashMap, } impl FragmentTrees { @@ -105,29 +105,9 @@ impl FragmentTrees { unimplemented!() } - fn add_refcount(&mut self, fragment_hash: &Hash) { - if let Some(entry) = self.nodes.get_mut(fragment_hash) { - entry.1 += 1; - } - } - - fn remove_refcount(&mut self, fragment_hash: Hash) { - let node = match self.nodes.entry(fragment_hash) { - HEntry::Vacant(_) => return, - HEntry::Occupied(mut entry) => - if entry.get().1 == 1 { - entry.remove().0 - } else { - entry.get_mut().1 -= 1; - return - }, - }; - - if self.roots.remove(&fragment_hash) { - for child in node.children { - self.roots.insert(child); - } - } + // Retain fragments whose relay-parent passes the predicate. + fn retain(&mut self, pred: impl Fn(&Hash) -> bool) { + self.nodes.retain(|_, v| pred(&v.relay_parent())); } } @@ -193,6 +173,9 @@ where FromOverseer::Signal(OverseerSignal::BlockFinalized(..)) => {}, FromOverseer::Communication { msg } => match msg { // TODO [now]: handle messages + // 1. Notification of new fragment (orphaned?) + // 2. Notification of new fragment being backed + // 3. Request for backable candidates }, } } @@ -224,8 +207,13 @@ where // Find the set of blocks we care about. let relevant_blocks = find_all_relevant_blocks(ctx, &view.active_leaves).await?; - // Prune everything that was relevant but isn't anymore. + let all_new: Vec<_> = relevant_blocks + .iter() + .filter(|(h, _hdr)| !view.active_or_recent.contains_key(h)) + .collect(); + { + // Prune everything that was relevant but isn't anymore. let all_removed: Vec<_> = view .active_or_recent .keys() @@ -234,22 +222,10 @@ where .collect(); for removed in all_removed { - let view_data = view.active_or_recent.remove(&removed).expect( - "key was gathered from iterating over all present keys; therefore is present; qed", - ); - - // TODO [now]: update fragment trees accordingly - // TODO [now]: prune empty fragment trees + let _ = view.active_or_recent.remove(&removed); } - } - - // Add new blocks and get data if necessary. - { - let all_new: Vec<_> = relevant_blocks - .iter() - .filter(|(h, _hdr)| !view.active_or_recent.contains_key(h)) - .collect(); + // Add new blocks and get data if necessary. Dispatch work to backing subsystems. for (new_hash, new_header) in all_new { let block_info = RelayChainBlockInfo { hash: *new_hash, @@ -260,11 +236,9 @@ where let scheduling_info = get_scheduling_info(ctx, *new_hash).await?; let mut relevant_fragments = HashMap::new(); - for core_info in scheduling_info.cores { - let constraints = get_base_constraints(ctx, *new_hash, core_info.para_id).await?; - // TODO [now]: determine relevant fragments according to constraints. - // TODO [now]: update ref counts in fragment trees + for core_info in scheduling_info.cores { + // TODO [now]: construct RelayBlockViewData appropriately } view.active_or_recent.insert( @@ -272,12 +246,24 @@ where RelayBlockViewData { scheduling: relevant_fragments, block_info }, ); } + + // TODO [now]: GC fragment trees: + // 1. Keep only fragment trees for paras that are scheduled at any of our blocks. + // 2. Keep only fragments that are built on any of our blocks. + + + // TODO [now]: give all backing subsystems messages or signals. + // There are, annoyingly, going to be race conditions with networking. + // Move networking into a backing 'super-subsystem'? + // + // Which ones need to care about 'orphaned' fragments? } unimplemented!() } // TODO [now]: don't accept too many fragments per para per relay-parent +// Well I guess we're bounded/protected here by backing (Seconded messages) async fn get_base_constraints( ctx: &mut Context, From 2c51312ba4dbf449eef4fc85b5d7943e102f8a76 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Thu, 24 Feb 2022 16:25:51 -0600 Subject: [PATCH 14/87] some more fluff --- node/core/prospective-parachains/src/lib.rs | 107 +++++++++++--------- 1 file changed, 58 insertions(+), 49 deletions(-) diff --git a/node/core/prospective-parachains/src/lib.rs b/node/core/prospective-parachains/src/lib.rs index cc20e495f812..37cbb517ec7c 100644 --- a/node/core/prospective-parachains/src/lib.rs +++ b/node/core/prospective-parachains/src/lib.rs @@ -27,27 +27,6 @@ //! This also handles concerns such as the relay-chain being forkful, //! session changes, predicting validator group assignments, and //! the re-backing of parachain blocks as a result of these changes. -//! -//! ## Re-backing -//! -//! Since this subsystems deals in enabling the collation and extension -//! of parachains in advance of actually being recorded on the relay-chain, -//! it is possible for the validator-group that initially backed the parablock -//! to be no longer assigned at the point that the parablock is submitted -//! to the relay-chain. -//! -//! This presents an issue, because the relay-chain only accepts blocks -//! which are backed by the currently-assigned group of validators, not -//! by the group of validators previously assigned to the parachain. -//! -//! In order to avoid wasting work at group rotation boundaries, we must -//! allow validators to re-validate the work of the preceding group. -//! This process is known as re-backing. -//! -//! What happens in practice is that validators observe that they are -//! scheduled to be assigned to a specific para in the near future. -//! And as a result, they dig into the existing fragment-trees to -//! re-back what already existed. // TODO [now]: remove #![allow(unused)] @@ -70,8 +49,8 @@ use polkadot_node_subsystem_util::{ metrics::{self, prometheus}, }; use polkadot_primitives::vstaging::{ - Block, BlockId, BlockNumber, CandidateHash, GroupIndex, GroupRotationInfo, Hash, Header, - Id as ParaId, SessionIndex, ValidatorIndex, + Block, BlockId, BlockNumber, CandidateDescriptor, CandidateHash, CommittedCandidateReceipt, + GroupIndex, GroupRotationInfo, Hash, Header, Id as ParaId, SessionIndex, ValidatorIndex, }; use crate::error::{Error, FatalResult, NonFatal, NonFatalResult, Result}; @@ -80,6 +59,15 @@ mod error; const LOG_TARGET: &str = "parachain::prospective-parachains"; +// The maximum depth the subsystem will allow. 'depth' is defined as the +// amount of blocks between the para head in a relay-chain block's state +// and a candidate with a particular relay-parent. +// +// This value is chosen mostly for reasons of resource-limitation. +// Without it, a malicious validator group could create arbitrarily long, +// useless prospective parachains and DoS honest nodes. +const MAX_DEPTH: usize = 4; + /// The Prospective Parachains Subsystem. pub struct ProspectiveParachainsSubsystems { metrics: Metrics, @@ -88,52 +76,77 @@ pub struct ProspectiveParachainsSubsystems { // TODO [now]: add this enum to the broader subsystem types. pub enum ProspectiveParachainsMessage {} -// TODO [now]: rename. more of a pile than a tree really. -// We only use it as a tree when traversing to select what to build upon. -struct FragmentTrees { +struct Fragments { para: ParaId, // Fragment nodes based on fragment head-data. nodes: HashMap, } -impl FragmentTrees { +impl Fragments { fn is_empty(&self) -> bool { self.nodes.is_empty() } - fn determine_relevant_fragments(&self, constraints: &Constraints) -> Vec { - unimplemented!() - } + // TODO [now]: pruning +} - // Retain fragments whose relay-parent passes the predicate. - fn retain(&mut self, pred: impl Fn(&Hash) -> bool) { - self.nodes.retain(|_, v| pred(&v.relay_parent())); - } +enum FragmentState { + // The fragment has been seconded. + Seconded, + // The fragment has been completely backed by the group. + Backed, } struct FragmentNode { // Head-data of the parent node. - parent: Hash, - // Head-data of children. - // TODO [now]: make sure traversal detects loops. - children: Vec, + parent_fragment: CandidateHash, + // Candidate hashes of children. + children: Vec, fragment: Fragment, + erasure_root: Hash, + state: FragmentState, + depth: usize, } impl FragmentNode { fn relay_parent(&self) -> Hash { self.fragment.relay_parent().hash } + + fn depth(&self) -> usize { + self.depth + } + + /// Produce a candidate receipt from this fragment node. + fn produce_candidate_receipt(&self, para_id: ParaId) -> CommittedCandidateReceipt { + let candidate = self.fragment.candidate(); + + CommittedCandidateReceipt { + commitments: candidate.commitments.clone(), + descriptor: CandidateDescriptor { + para_id, + relay_parent: self.relay_parent(), + collator: candidate.collator.clone(), + signature: candidate.collator_signature.clone(), + persisted_validation_data_hash: candidate.persisted_validation_data.hash(), + pov_hash: candidate.pov_hash, + erasure_root: self.erasure_root, + para_head: candidate.commitments.head_data.hash(), + validation_code_hash: candidate.validation_code_hash.clone(), + }, + } + } } -struct RelevantParaFragments { +struct ScheduledPara { para: ParaId, base_constraints: Constraints, + validator_group: GroupIndex, } struct RelayBlockViewData { - // Relevant fragments for each parachain that is scheduled. - scheduling: HashMap, + // Scheduling info for paras and upcoming paras. + scheduling: HashMap, block_info: RelayChainBlockInfo, // TODO [now]: other stuff } @@ -145,7 +158,7 @@ struct View { // Fragment trees, one for each parachain. // TODO [now]: handle cleanup when these go obsolete. - fragment_trees: HashMap, + fragments: HashMap, } impl View { @@ -153,7 +166,7 @@ impl View { View { active_leaves: HashSet::new(), active_or_recent: HashMap::new(), - fragment_trees: HashMap::new(), + fragments: HashMap::new(), } } } @@ -251,7 +264,6 @@ where // 1. Keep only fragment trees for paras that are scheduled at any of our blocks. // 2. Keep only fragments that are built on any of our blocks. - // TODO [now]: give all backing subsystems messages or signals. // There are, annoyingly, going to be race conditions with networking. // Move networking into a backing 'super-subsystem'? @@ -290,10 +302,8 @@ struct SchedulingInfo { } struct CoreInfo { - para_id: ParaId, - - // (candidate hash, hash, timeout_at) if any - pending_availability: Option<(CandidateHash, Hash, BlockNumber)>, + // all para-ids that the core could accept blocks for in the near future. + near_future: Vec, } async fn get_scheduling_info( @@ -315,7 +325,6 @@ where Context: SubsystemContext, Context: overseer::SubsystemContext, { - const LOOKBACK: usize = 2; unimplemented!() } From efcba0a8168800b423322d7b77de4820253437cd Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Thu, 24 Feb 2022 17:04:27 -0600 Subject: [PATCH 15/87] extract fragment graph to submodule --- node/core/prospective-parachains/src/error.rs | 2 + .../src/fragment_graph.rs | 117 ++++++++++++++++++ node/core/prospective-parachains/src/lib.rs | 71 +---------- 3 files changed, 124 insertions(+), 66 deletions(-) create mode 100644 node/core/prospective-parachains/src/fragment_graph.rs diff --git a/node/core/prospective-parachains/src/error.rs b/node/core/prospective-parachains/src/error.rs index 1627860a95db..53c4afcab7f8 100644 --- a/node/core/prospective-parachains/src/error.rs +++ b/node/core/prospective-parachains/src/error.rs @@ -28,6 +28,8 @@ use polkadot_node_subsystem_util::{rolling_session_window::SessionsUnavailable, use crate::LOG_TARGET; use parity_scale_codec::Error as CodecError; +// TODO [now]: update to use fatality (thanks Bernhard) + /// Errors for this subsystem. #[derive(Debug, Error)] #[error(transparent)] diff --git a/node/core/prospective-parachains/src/fragment_graph.rs b/node/core/prospective-parachains/src/fragment_graph.rs new file mode 100644 index 000000000000..a88fc673c6a8 --- /dev/null +++ b/node/core/prospective-parachains/src/fragment_graph.rs @@ -0,0 +1,117 @@ +// Copyright 2022 Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! A graph utility for managing fragments +//! +//! Each node in the graph represents a candidate. Nodes do not uniquely refer to a parachain +//! block for two reasons. +//! 1. There's no requirement that head-data is unique +//! for a parachain. Furthermore, a parachain is under no obligation to be acyclic, and this is mostly +//! just because it's totally inefficient to enforce it. Practical use-cases are acyclic, but there is +//! still more than one way to reach the same head-data. +//! 2. and candidates only refer to their parent by its head-data. +//! +//! The implication is that when we receive a candidate receipt, there are actually multiple +//! possibilities for any candidates between the para-head recorded in the relay parent's state +//! and the candidate we're examining. +//! +//! This means that our nodes need to handle multiple parents and that depth is an +//! attribute of a path, not a candidate. +//! +//! We also need to handle cycles, including nodes for candidates which produce a header +//! which is the same as the parent's. + +use std::{ + collections::{hash_map::Entry as HEntry, HashMap, HashSet}, + sync::Arc, +}; + +use polkadot_node_subsystem::{ + overseer, ActiveLeavesUpdate, FromOverseer, OverseerSignal, SpawnedSubsystem, SubsystemContext, + SubsystemError, SubsystemResult, +}; +use polkadot_node_subsystem_util::{ + inclusion_emulator::staging::{ + ConstraintModifications, Constraints, Fragment, RelayChainBlockInfo, + }, + metrics::{self, prometheus}, +}; +use polkadot_primitives::vstaging::{ + Block, BlockId, BlockNumber, CandidateDescriptor, CandidateHash, CommittedCandidateReceipt, + GroupIndex, GroupRotationInfo, Hash, Header, Id as ParaId, SessionIndex, ValidatorIndex, +}; + +pub(crate) struct FragmentGraph { + para: ParaId, + // Fragment nodes based on fragment head-data. + nodes: HashMap, +} + +impl FragmentGraph { + fn is_empty(&self) -> bool { + self.nodes.is_empty() + } + + // TODO [now]: pruning +} + +enum FragmentState { + // The fragment has been seconded. + Seconded, + // The fragment has been completely backed by the group. + Backed, +} + +struct FragmentNode { + // Head-data of the parent node. + parent_fragment: CandidateHash, + // Candidate hashes of children. + children: Vec, + fragment: Fragment, + erasure_root: Hash, + state: FragmentState, + depth: usize, +} + +impl FragmentNode { + fn relay_parent(&self) -> Hash { + self.fragment.relay_parent().hash + } + + fn depth(&self) -> usize { + self.depth + } + + /// Produce a candidate receipt from this fragment node. + fn produce_candidate_receipt(&self, para_id: ParaId) -> CommittedCandidateReceipt { + let candidate = self.fragment.candidate(); + + CommittedCandidateReceipt { + commitments: candidate.commitments.clone(), + descriptor: CandidateDescriptor { + para_id, + relay_parent: self.relay_parent(), + collator: candidate.collator.clone(), + signature: candidate.collator_signature.clone(), + persisted_validation_data_hash: candidate.persisted_validation_data.hash(), + pov_hash: candidate.pov_hash, + erasure_root: self.erasure_root, + para_head: candidate.commitments.head_data.hash(), + validation_code_hash: candidate.validation_code_hash.clone(), + }, + } + } +} diff --git a/node/core/prospective-parachains/src/lib.rs b/node/core/prospective-parachains/src/lib.rs index 37cbb517ec7c..90a092678215 100644 --- a/node/core/prospective-parachains/src/lib.rs +++ b/node/core/prospective-parachains/src/lib.rs @@ -25,8 +25,7 @@ //! [`polkadot_node_subsystem_util::inclusion_emulator::staging`]. //! //! This also handles concerns such as the relay-chain being forkful, -//! session changes, predicting validator group assignments, and -//! the re-backing of parachain blocks as a result of these changes. +//! session changes, predicting validator group assignments. // TODO [now]: remove #![allow(unused)] @@ -54,8 +53,10 @@ use polkadot_primitives::vstaging::{ }; use crate::error::{Error, FatalResult, NonFatal, NonFatalResult, Result}; +use crate::fragment_graph::FragmentGraph; mod error; +mod fragment_graph; const LOG_TARGET: &str = "parachain::prospective-parachains"; @@ -76,68 +77,6 @@ pub struct ProspectiveParachainsSubsystems { // TODO [now]: add this enum to the broader subsystem types. pub enum ProspectiveParachainsMessage {} -struct Fragments { - para: ParaId, - // Fragment nodes based on fragment head-data. - nodes: HashMap, -} - -impl Fragments { - fn is_empty(&self) -> bool { - self.nodes.is_empty() - } - - // TODO [now]: pruning -} - -enum FragmentState { - // The fragment has been seconded. - Seconded, - // The fragment has been completely backed by the group. - Backed, -} - -struct FragmentNode { - // Head-data of the parent node. - parent_fragment: CandidateHash, - // Candidate hashes of children. - children: Vec, - fragment: Fragment, - erasure_root: Hash, - state: FragmentState, - depth: usize, -} - -impl FragmentNode { - fn relay_parent(&self) -> Hash { - self.fragment.relay_parent().hash - } - - fn depth(&self) -> usize { - self.depth - } - - /// Produce a candidate receipt from this fragment node. - fn produce_candidate_receipt(&self, para_id: ParaId) -> CommittedCandidateReceipt { - let candidate = self.fragment.candidate(); - - CommittedCandidateReceipt { - commitments: candidate.commitments.clone(), - descriptor: CandidateDescriptor { - para_id, - relay_parent: self.relay_parent(), - collator: candidate.collator.clone(), - signature: candidate.collator_signature.clone(), - persisted_validation_data_hash: candidate.persisted_validation_data.hash(), - pov_hash: candidate.pov_hash, - erasure_root: self.erasure_root, - para_head: candidate.commitments.head_data.hash(), - validation_code_hash: candidate.validation_code_hash.clone(), - }, - } - } -} - struct ScheduledPara { para: ParaId, base_constraints: Constraints, @@ -156,9 +95,9 @@ struct View { active_leaves: HashSet, active_or_recent: HashMap, - // Fragment trees, one for each parachain. + // Fragment graphs, one for each parachain. // TODO [now]: handle cleanup when these go obsolete. - fragments: HashMap, + fragments: HashMap, } impl View { From 435042d9b8be07fff4649d107a31fa983ae0cd29 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Fri, 25 Feb 2022 12:51:02 -0600 Subject: [PATCH 16/87] begin fragment graph API --- .../src/fragment_graph.rs | 33 ++++++++++--------- 1 file changed, 17 insertions(+), 16 deletions(-) diff --git a/node/core/prospective-parachains/src/fragment_graph.rs b/node/core/prospective-parachains/src/fragment_graph.rs index a88fc673c6a8..28856e3d3d17 100644 --- a/node/core/prospective-parachains/src/fragment_graph.rs +++ b/node/core/prospective-parachains/src/fragment_graph.rs @@ -14,7 +14,7 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -//! A graph utility for managing fragments +//! A graph utility for managing unbacked parachain fragments. //! //! Each node in the graph represents a candidate. Nodes do not uniquely refer to a parachain //! block for two reasons. @@ -32,7 +32,10 @@ //! attribute of a path, not a candidate. //! //! We also need to handle cycles, including nodes for candidates which produce a header -//! which is the same as the parent's. +//! which is the same as its parent's. +//! +//! The graph exposes a 'frontier' of nodes which appear to be the best to build upon +//! and is the primary means for higher-level code to select candidates to build upon. use std::{ collections::{hash_map::Entry as HEntry, HashMap, HashSet}, @@ -54,18 +57,19 @@ use polkadot_primitives::vstaging::{ GroupIndex, GroupRotationInfo, Hash, Header, Id as ParaId, SessionIndex, ValidatorIndex, }; +// TODO [now]: separate graph per relay-parent (constraints)? +// TODO [now]: keep nodes and graphs separate? recompute / prune graphs +// on every new relay parent? +// TODO [now]: API for selecting backed candidates pub(crate) struct FragmentGraph { para: ParaId, - // Fragment nodes based on fragment head-data. - nodes: HashMap, + relay_parent: RelayChainBlockInfo, + base_constraints: Constraints, } -impl FragmentGraph { - fn is_empty(&self) -> bool { - self.nodes.is_empty() - } - - // TODO [now]: pruning +struct CandidateGraph { + // TODO [now]: semi-ordered pile of candidates. + // we'll need to support some kinds of traversal and insertions } enum FragmentState { @@ -76,14 +80,13 @@ enum FragmentState { } struct FragmentNode { - // Head-data of the parent node. - parent_fragment: CandidateHash, + // The hash of the head-data of the parent node + parent: Hash, // Candidate hashes of children. children: Vec, fragment: Fragment, erasure_root: Hash, state: FragmentState, - depth: usize, } impl FragmentNode { @@ -91,9 +94,7 @@ impl FragmentNode { self.fragment.relay_parent().hash } - fn depth(&self) -> usize { - self.depth - } + /// Produce a candidate receipt from this fragment node. fn produce_candidate_receipt(&self, para_id: ParaId) -> CommittedCandidateReceipt { From fff081791ff2e42c3c081b35a7f473eeda9574d3 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Fri, 25 Feb 2022 15:57:09 -0600 Subject: [PATCH 17/87] trees, not graphs --- Cargo.lock | 441 ++++++++++-------- .../src/fragment_graph.rs | 118 ----- .../src/fragment_tree.rs | 202 ++++++++ node/core/prospective-parachains/src/lib.rs | 13 +- 4 files changed, 452 insertions(+), 322 deletions(-) delete mode 100644 node/core/prospective-parachains/src/fragment_graph.rs create mode 100644 node/core/prospective-parachains/src/fragment_tree.rs diff --git a/Cargo.lock b/Cargo.lock index 900959f2a256..9f57ec29a5d7 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -456,7 +456,7 @@ dependencies = [ "fnv", "futures 0.3.21", "log", - "parity-scale-codec", + "parity-scale-codec 3.0.0", "parking_lot 0.12.0", "sc-chain-spec", "sc-client-api", @@ -489,7 +489,7 @@ dependencies = [ "jsonrpc-derive", "jsonrpc-pubsub", "log", - "parity-scale-codec", + "parity-scale-codec 3.0.0", "parking_lot 0.12.0", "sc-rpc", "sc-utils", @@ -509,7 +509,7 @@ name = "beefy-primitives" version = "4.0.0-dev" source = "git+https://github.com/paritytech/substrate?branch=master#c22fce5a311beede13479c9a00cca85d823b6b00" dependencies = [ - "parity-scale-codec", + "parity-scale-codec 3.0.0", "scale-info", "sp-api", "sp-application-crypto", @@ -558,16 +558,28 @@ version = "1.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" +[[package]] +name = "bitvec" +version = "0.20.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7774144344a4faa177370406a7ff5f1da24303817368584c6206c8303eb07848" +dependencies = [ + "funty 1.1.0", + "radium 0.6.2", + "tap", + "wyz 0.2.0", +] + [[package]] name = "bitvec" version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1489fcb93a5bb47da0462ca93ad252ad6af2145cce58d10d46a83931ba9f016b" dependencies = [ - "funty", - "radium", + "funty 2.0.0", + "radium 0.7.0", "tap", - "wyz", + "wyz 0.5.0", ] [[package]] @@ -728,7 +740,7 @@ dependencies = [ "bp-test-utils", "finality-grandpa", "frame-support", - "parity-scale-codec", + "parity-scale-codec 3.0.0", "scale-info", "serde", "sp-core", @@ -743,7 +755,7 @@ version = "0.1.0" dependencies = [ "bp-runtime", "frame-support", - "parity-scale-codec", + "parity-scale-codec 3.0.0", "scale-info", "sp-std", ] @@ -752,12 +764,12 @@ dependencies = [ name = "bp-messages" version = "0.1.0" dependencies = [ - "bitvec", + "bitvec 1.0.0", "bp-runtime", "frame-support", "frame-system", "impl-trait-for-tuples", - "parity-scale-codec", + "parity-scale-codec 3.0.0", "scale-info", "serde", "sp-std", @@ -772,7 +784,7 @@ dependencies = [ "frame-support", "frame-system", "hex", - "parity-scale-codec", + "parity-scale-codec 3.0.0", "scale-info", "sp-api", "sp-core", @@ -789,7 +801,7 @@ dependencies = [ "bp-polkadot-core", "bp-runtime", "frame-support", - "parity-scale-codec", + "parity-scale-codec 3.0.0", "smallvec", "sp-api", "sp-runtime", @@ -805,7 +817,7 @@ dependencies = [ "hash-db", "hex-literal", "num-traits", - "parity-scale-codec", + "parity-scale-codec 3.0.0", "scale-info", "sp-core", "sp-io", @@ -822,7 +834,7 @@ dependencies = [ "bp-header-chain", "ed25519-dalek", "finality-grandpa", - "parity-scale-codec", + "parity-scale-codec 3.0.0", "sp-application-crypto", "sp-finality-grandpa", "sp-runtime", @@ -837,7 +849,7 @@ dependencies = [ "bp-polkadot-core", "bp-rococo", "bp-runtime", - "parity-scale-codec", + "parity-scale-codec 3.0.0", "sp-api", "sp-runtime", "sp-std", @@ -857,7 +869,7 @@ dependencies = [ "pallet-bridge-grandpa", "pallet-bridge-messages", "pallet-transaction-payment", - "parity-scale-codec", + "parity-scale-codec 3.0.0", "scale-info", "sp-core", "sp-runtime", @@ -1977,7 +1989,7 @@ dependencies = [ "futures-timer", "log", "num-traits", - "parity-scale-codec", + "parity-scale-codec 3.0.0", "parking_lot 0.11.2", "scale-info", ] @@ -2051,7 +2063,7 @@ name = "fork-tree" version = "3.0.0" source = "git+https://github.com/paritytech/substrate?branch=master#c22fce5a311beede13479c9a00cca85d823b6b00" dependencies = [ - "parity-scale-codec", + "parity-scale-codec 3.0.0", ] [[package]] @@ -2073,7 +2085,7 @@ dependencies = [ "frame-system", "linregress", "log", - "parity-scale-codec", + "parity-scale-codec 3.0.0", "paste", "scale-info", "serde", @@ -2099,7 +2111,7 @@ dependencies = [ "handlebars", "linked-hash-map", "log", - "parity-scale-codec", + "parity-scale-codec 3.0.0", "sc-cli", "sc-client-db", "sc-executor", @@ -2120,7 +2132,7 @@ source = "git+https://github.com/paritytech/substrate?branch=master#c22fce5a311b dependencies = [ "frame-support", "frame-system", - "parity-scale-codec", + "parity-scale-codec 3.0.0", "scale-info", "sp-arithmetic", "sp-npos-elections", @@ -2134,7 +2146,7 @@ source = "git+https://github.com/paritytech/substrate?branch=master#c22fce5a311b dependencies = [ "frame-support", "frame-system", - "parity-scale-codec", + "parity-scale-codec 3.0.0", "scale-info", "sp-core", "sp-io", @@ -2150,7 +2162,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "df6bb8542ef006ef0de09a5c4420787d79823c0ed7924225822362fd2bf2ff2d" dependencies = [ "cfg-if 1.0.0", - "parity-scale-codec", + "parity-scale-codec 3.0.0", "scale-info", "serde", ] @@ -2166,7 +2178,7 @@ dependencies = [ "impl-trait-for-tuples", "log", "once_cell", - "parity-scale-codec", + "parity-scale-codec 3.0.0", "paste", "scale-info", "serde", @@ -2226,7 +2238,7 @@ dependencies = [ "frame-support", "frame-support-test-pallet", "frame-system", - "parity-scale-codec", + "parity-scale-codec 3.0.0", "pretty_assertions", "rustversion", "scale-info", @@ -2248,7 +2260,7 @@ source = "git+https://github.com/paritytech/substrate?branch=master#c22fce5a311b dependencies = [ "frame-support", "frame-system", - "parity-scale-codec", + "parity-scale-codec 3.0.0", "scale-info", ] @@ -2259,7 +2271,7 @@ source = "git+https://github.com/paritytech/substrate?branch=master#c22fce5a311b dependencies = [ "frame-support", "log", - "parity-scale-codec", + "parity-scale-codec 3.0.0", "scale-info", "serde", "sp-core", @@ -2277,7 +2289,7 @@ dependencies = [ "frame-benchmarking", "frame-support", "frame-system", - "parity-scale-codec", + "parity-scale-codec 3.0.0", "scale-info", "sp-core", "sp-runtime", @@ -2289,7 +2301,7 @@ name = "frame-system-rpc-runtime-api" version = "4.0.0-dev" source = "git+https://github.com/paritytech/substrate?branch=master#c22fce5a311beede13479c9a00cca85d823b6b00" dependencies = [ - "parity-scale-codec", + "parity-scale-codec 3.0.0", "sp-api", ] @@ -2354,6 +2366,12 @@ version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3dcaa9ae7725d12cdb85b3ad99a434db70b468c09ded17e012d86b5c1010f7a7" +[[package]] +name = "funty" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fed34cd105917e91daa4da6b3728c47b068749d6a62c59811f06ed2ac71d9da7" + [[package]] name = "funty" version = "2.0.0" @@ -2953,7 +2971,7 @@ version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ba6a270039626615617f3f36d15fc827041df3b78c439da2cadfa47455a77f2f" dependencies = [ - "parity-scale-codec", + "parity-scale-codec 3.0.0", ] [[package]] @@ -3421,7 +3439,7 @@ name = "kusama-runtime" version = "0.9.17" dependencies = [ "beefy-primitives", - "bitvec", + "bitvec 1.0.0", "frame-benchmarking", "frame-election-provider-support", "frame-executive", @@ -3471,7 +3489,7 @@ dependencies = [ "pallet-utility", "pallet-vesting", "pallet-xcm", - "parity-scale-codec", + "parity-scale-codec 3.0.0", "polkadot-primitives", "polkadot-runtime-common", "polkadot-runtime-parachains", @@ -4943,7 +4961,7 @@ dependencies = [ "frame-benchmarking", "frame-support", "frame-system", - "parity-scale-codec", + "parity-scale-codec 3.0.0", "scale-info", "sp-runtime", "sp-std", @@ -4957,7 +4975,7 @@ dependencies = [ "frame-support", "frame-system", "pallet-session", - "parity-scale-codec", + "parity-scale-codec 3.0.0", "scale-info", "sp-application-crypto", "sp-authority-discovery", @@ -4973,7 +4991,7 @@ dependencies = [ "frame-support", "frame-system", "impl-trait-for-tuples", - "parity-scale-codec", + "parity-scale-codec 3.0.0", "scale-info", "sp-authorship", "sp-runtime", @@ -4992,7 +5010,7 @@ dependencies = [ "pallet-authorship", "pallet-session", "pallet-timestamp", - "parity-scale-codec", + "parity-scale-codec 3.0.0", "scale-info", "sp-application-crypto", "sp-consensus-babe", @@ -5015,7 +5033,7 @@ dependencies = [ "frame-system", "log", "pallet-balances", - "parity-scale-codec", + "parity-scale-codec 3.0.0", "scale-info", "sp-core", "sp-io", @@ -5053,7 +5071,7 @@ dependencies = [ "frame-support", "frame-system", "log", - "parity-scale-codec", + "parity-scale-codec 3.0.0", "scale-info", "sp-runtime", "sp-std", @@ -5068,7 +5086,7 @@ dependencies = [ "frame-support", "frame-system", "pallet-session", - "parity-scale-codec", + "parity-scale-codec 3.0.0", "scale-info", "serde", "sp-runtime", @@ -5091,7 +5109,7 @@ dependencies = [ "pallet-mmr", "pallet-mmr-primitives", "pallet-session", - "parity-scale-codec", + "parity-scale-codec 3.0.0", "scale-info", "serde", "sp-core", @@ -5110,7 +5128,7 @@ dependencies = [ "frame-system", "log", "pallet-treasury", - "parity-scale-codec", + "parity-scale-codec 3.0.0", "scale-info", "sp-core", "sp-io", @@ -5127,7 +5145,7 @@ dependencies = [ "frame-support", "frame-system", "log", - "parity-scale-codec", + "parity-scale-codec 3.0.0", "scale-info", "sp-core", "sp-io", @@ -5148,7 +5166,7 @@ dependencies = [ "frame-system", "log", "num-traits", - "parity-scale-codec", + "parity-scale-codec 3.0.0", "scale-info", "serde", "sp-core", @@ -5163,7 +5181,7 @@ dependencies = [ name = "pallet-bridge-messages" version = "0.1.0" dependencies = [ - "bitvec", + "bitvec 1.0.0", "bp-message-dispatch", "bp-messages", "bp-runtime", @@ -5175,7 +5193,7 @@ dependencies = [ "log", "num-traits", "pallet-balances", - "parity-scale-codec", + "parity-scale-codec 3.0.0", "scale-info", "serde", "sp-core", @@ -5193,7 +5211,7 @@ dependencies = [ "frame-support", "frame-system", "log", - "parity-scale-codec", + "parity-scale-codec 3.0.0", "scale-info", "sp-core", "sp-io", @@ -5209,7 +5227,7 @@ dependencies = [ "frame-benchmarking", "frame-support", "frame-system", - "parity-scale-codec", + "parity-scale-codec 3.0.0", "scale-info", "serde", "sp-io", @@ -5227,7 +5245,7 @@ dependencies = [ "frame-support", "frame-system", "log", - "parity-scale-codec", + "parity-scale-codec 3.0.0", "rand 0.7.3", "scale-info", "sp-arithmetic", @@ -5249,7 +5267,7 @@ dependencies = [ "frame-support", "frame-system", "log", - "parity-scale-codec", + "parity-scale-codec 3.0.0", "scale-info", "sp-core", "sp-io", @@ -5266,7 +5284,7 @@ dependencies = [ "frame-benchmarking", "frame-support", "frame-system", - "parity-scale-codec", + "parity-scale-codec 3.0.0", "scale-info", "sp-arithmetic", "sp-runtime", @@ -5284,7 +5302,7 @@ dependencies = [ "log", "pallet-authorship", "pallet-session", - "parity-scale-codec", + "parity-scale-codec 3.0.0", "scale-info", "sp-application-crypto", "sp-core", @@ -5305,7 +5323,7 @@ dependencies = [ "frame-benchmarking", "frame-support", "frame-system", - "parity-scale-codec", + "parity-scale-codec 3.0.0", "scale-info", "sp-io", "sp-runtime", @@ -5322,7 +5340,7 @@ dependencies = [ "frame-system", "log", "pallet-authorship", - "parity-scale-codec", + "parity-scale-codec 3.0.0", "scale-info", "sp-application-crypto", "sp-core", @@ -5340,7 +5358,7 @@ dependencies = [ "frame-benchmarking", "frame-support", "frame-system", - "parity-scale-codec", + "parity-scale-codec 3.0.0", "scale-info", "sp-core", "sp-io", @@ -5358,7 +5376,7 @@ dependencies = [ "frame-support", "frame-system", "log", - "parity-scale-codec", + "parity-scale-codec 3.0.0", "scale-info", "sp-core", "sp-io", @@ -5376,7 +5394,7 @@ dependencies = [ "frame-support", "frame-system", "pallet-mmr-primitives", - "parity-scale-codec", + "parity-scale-codec 3.0.0", "scale-info", "sp-core", "sp-io", @@ -5392,7 +5410,7 @@ dependencies = [ "frame-support", "frame-system", "log", - "parity-scale-codec", + "parity-scale-codec 3.0.0", "serde", "sp-api", "sp-core", @@ -5409,7 +5427,7 @@ dependencies = [ "jsonrpc-core-client", "jsonrpc-derive", "pallet-mmr-primitives", - "parity-scale-codec", + "parity-scale-codec 3.0.0", "serde", "sp-api", "sp-blockchain", @@ -5425,7 +5443,7 @@ dependencies = [ "frame-benchmarking", "frame-support", "frame-system", - "parity-scale-codec", + "parity-scale-codec 3.0.0", "scale-info", "sp-io", "sp-runtime", @@ -5439,7 +5457,7 @@ source = "git+https://github.com/paritytech/substrate?branch=master#c22fce5a311b dependencies = [ "frame-support", "frame-system", - "parity-scale-codec", + "parity-scale-codec 3.0.0", "scale-info", "sp-io", "sp-runtime", @@ -5455,7 +5473,7 @@ dependencies = [ "frame-system", "log", "pallet-balances", - "parity-scale-codec", + "parity-scale-codec 3.0.0", "scale-info", "serde", "sp-runtime", @@ -5479,7 +5497,7 @@ dependencies = [ "pallet-offences", "pallet-session", "pallet-staking", - "parity-scale-codec", + "parity-scale-codec 3.0.0", "scale-info", "sp-runtime", "sp-staking", @@ -5494,7 +5512,7 @@ dependencies = [ "frame-benchmarking", "frame-support", "frame-system", - "parity-scale-codec", + "parity-scale-codec 3.0.0", "scale-info", "sp-core", "sp-io", @@ -5510,7 +5528,7 @@ dependencies = [ "frame-benchmarking", "frame-support", "frame-system", - "parity-scale-codec", + "parity-scale-codec 3.0.0", "scale-info", "sp-io", "sp-runtime", @@ -5524,7 +5542,7 @@ source = "git+https://github.com/paritytech/substrate?branch=master#c22fce5a311b dependencies = [ "frame-support", "frame-system", - "parity-scale-codec", + "parity-scale-codec 3.0.0", "scale-info", "sp-io", "sp-runtime", @@ -5540,7 +5558,7 @@ dependencies = [ "frame-support", "frame-system", "log", - "parity-scale-codec", + "parity-scale-codec 3.0.0", "scale-info", "sp-io", "sp-runtime", @@ -5557,7 +5575,7 @@ dependencies = [ "impl-trait-for-tuples", "log", "pallet-timestamp", - "parity-scale-codec", + "parity-scale-codec 3.0.0", "scale-info", "sp-core", "sp-io", @@ -5591,7 +5609,7 @@ source = "git+https://github.com/paritytech/substrate?branch=master#c22fce5a311b dependencies = [ "frame-support", "frame-system", - "parity-scale-codec", + "parity-scale-codec 3.0.0", "rand_chacha 0.2.2", "scale-info", "sp-runtime", @@ -5610,7 +5628,7 @@ dependencies = [ "log", "pallet-authorship", "pallet-session", - "parity-scale-codec", + "parity-scale-codec 3.0.0", "rand_chacha 0.2.2", "scale-info", "serde", @@ -5648,7 +5666,7 @@ source = "git+https://github.com/paritytech/substrate?branch=master#c22fce5a311b dependencies = [ "frame-support", "frame-system", - "parity-scale-codec", + "parity-scale-codec 3.0.0", "scale-info", "sp-io", "sp-runtime", @@ -5664,7 +5682,7 @@ dependencies = [ "frame-support", "frame-system", "log", - "parity-scale-codec", + "parity-scale-codec 3.0.0", "scale-info", "sp-inherents", "sp-io", @@ -5683,7 +5701,7 @@ dependencies = [ "frame-system", "log", "pallet-treasury", - "parity-scale-codec", + "parity-scale-codec 3.0.0", "scale-info", "serde", "sp-core", @@ -5699,7 +5717,7 @@ source = "git+https://github.com/paritytech/substrate?branch=master#c22fce5a311b dependencies = [ "frame-support", "frame-system", - "parity-scale-codec", + "parity-scale-codec 3.0.0", "scale-info", "serde", "smallvec", @@ -5718,7 +5736,7 @@ dependencies = [ "jsonrpc-core-client", "jsonrpc-derive", "pallet-transaction-payment-rpc-runtime-api", - "parity-scale-codec", + "parity-scale-codec 3.0.0", "sp-api", "sp-blockchain", "sp-core", @@ -5732,7 +5750,7 @@ version = "4.0.0-dev" source = "git+https://github.com/paritytech/substrate?branch=master#c22fce5a311beede13479c9a00cca85d823b6b00" dependencies = [ "pallet-transaction-payment", - "parity-scale-codec", + "parity-scale-codec 3.0.0", "sp-api", "sp-runtime", ] @@ -5747,7 +5765,7 @@ dependencies = [ "frame-system", "impl-trait-for-tuples", "pallet-balances", - "parity-scale-codec", + "parity-scale-codec 3.0.0", "scale-info", "serde", "sp-runtime", @@ -5762,7 +5780,7 @@ dependencies = [ "frame-benchmarking", "frame-support", "frame-system", - "parity-scale-codec", + "parity-scale-codec 3.0.0", "scale-info", "sp-core", "sp-io", @@ -5779,7 +5797,7 @@ dependencies = [ "frame-support", "frame-system", "log", - "parity-scale-codec", + "parity-scale-codec 3.0.0", "scale-info", "sp-runtime", "sp-std", @@ -5793,7 +5811,7 @@ dependencies = [ "frame-system", "log", "pallet-balances", - "parity-scale-codec", + "parity-scale-codec 3.0.0", "polkadot-parachain", "polkadot-runtime-parachains", "scale-info", @@ -5818,7 +5836,7 @@ dependencies = [ "pallet-assets", "pallet-balances", "pallet-xcm", - "parity-scale-codec", + "parity-scale-codec 3.0.0", "polkadot-primitives", "polkadot-runtime-common", "scale-info", @@ -5851,6 +5869,19 @@ dependencies = [ "snap", ] +[[package]] +name = "parity-scale-codec" +version = "2.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "373b1a4c1338d9cd3d1fa53b3a11bdab5ab6bd80a20f7f7becd76953ae2be909" +dependencies = [ + "arrayvec 0.7.2", + "bitvec 0.20.4", + "byte-slice-cast", + "impl-trait-for-tuples", + "serde", +] + [[package]] name = "parity-scale-codec" version = "3.0.0" @@ -5858,7 +5889,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2a7f3fcf5e45fc28b84dcdab6b983e77f197ec01f325a33f404ba6855afd1070" dependencies = [ "arrayvec 0.7.2", - "bitvec", + "bitvec 1.0.0", "byte-slice-cast", "impl-trait-for-tuples", "parity-scale-codec-derive", @@ -6229,7 +6260,7 @@ name = "polkadot-availability-bitfield-distribution" version = "0.9.17" dependencies = [ "assert_matches", - "bitvec", + "bitvec 1.0.0", "env_logger 0.9.0", "futures 0.3.21", "log", @@ -6255,7 +6286,7 @@ dependencies = [ "futures 0.3.21", "futures-timer", "lru 0.7.2", - "parity-scale-codec", + "parity-scale-codec 3.0.0", "polkadot-erasure-coding", "polkadot-node-network-protocol", "polkadot-node-primitives", @@ -6285,7 +6316,7 @@ dependencies = [ "futures-timer", "log", "lru 0.7.2", - "parity-scale-codec", + "parity-scale-codec 3.0.0", "polkadot-erasure-coding", "polkadot-node-network-protocol", "polkadot-node-primitives", @@ -6369,7 +6400,7 @@ dependencies = [ "futures 0.3.21", "futures-timer", "log", - "parity-scale-codec", + "parity-scale-codec 3.0.0", "polkadot-node-network-protocol", "polkadot-node-primitives", "polkadot-node-subsystem", @@ -6390,7 +6421,7 @@ dependencies = [ name = "polkadot-core-primitives" version = "0.9.17" dependencies = [ - "parity-scale-codec", + "parity-scale-codec 3.0.0", "parity-util-mem", "scale-info", "sp-core", @@ -6410,7 +6441,7 @@ dependencies = [ "futures-timer", "lazy_static", "lru 0.7.2", - "parity-scale-codec", + "parity-scale-codec 3.0.0", "polkadot-erasure-coding", "polkadot-node-network-protocol", "polkadot-node-primitives", @@ -6433,7 +6464,7 @@ dependencies = [ name = "polkadot-erasure-coding" version = "0.9.17" dependencies = [ - "parity-scale-codec", + "parity-scale-codec 3.0.0", "polkadot-node-primitives", "polkadot-primitives", "reed-solomon-novelpoly", @@ -6476,7 +6507,7 @@ dependencies = [ "async-trait", "futures 0.3.21", "futures-timer", - "parity-scale-codec", + "parity-scale-codec 3.0.0", "parking_lot 0.12.0", "polkadot-node-network-protocol", "polkadot-node-subsystem", @@ -6497,7 +6528,7 @@ name = "polkadot-node-collation-generation" version = "0.9.17" dependencies = [ "futures 0.3.21", - "parity-scale-codec", + "parity-scale-codec 3.0.0", "polkadot-erasure-coding", "polkadot-node-primitives", "polkadot-node-subsystem", @@ -6516,7 +6547,7 @@ name = "polkadot-node-core-approval-voting" version = "0.9.17" dependencies = [ "assert_matches", - "bitvec", + "bitvec 1.0.0", "derive_more", "futures 0.3.21", "futures-timer", @@ -6524,7 +6555,7 @@ dependencies = [ "kvdb-memorydb", "lru 0.7.2", "merlin", - "parity-scale-codec", + "parity-scale-codec 3.0.0", "parking_lot 0.12.0", "polkadot-node-jaeger", "polkadot-node-primitives", @@ -6553,14 +6584,14 @@ name = "polkadot-node-core-av-store" version = "0.9.17" dependencies = [ "assert_matches", - "bitvec", + "bitvec 1.0.0", "env_logger 0.9.0", "futures 0.3.21", "futures-timer", "kvdb", "kvdb-memorydb", "log", - "parity-scale-codec", + "parity-scale-codec 3.0.0", "parking_lot 0.12.0", "polkadot-erasure-coding", "polkadot-node-primitives", @@ -6581,7 +6612,7 @@ name = "polkadot-node-core-backing" version = "0.9.17" dependencies = [ "assert_matches", - "bitvec", + "bitvec 1.0.0", "futures 0.3.21", "polkadot-erasure-coding", "polkadot-node-primitives", @@ -6624,7 +6655,7 @@ dependencies = [ "assert_matches", "async-trait", "futures 0.3.21", - "parity-scale-codec", + "parity-scale-codec 3.0.0", "polkadot-node-core-pvf", "polkadot-node-primitives", "polkadot-node-subsystem", @@ -6645,7 +6676,7 @@ version = "0.9.17" dependencies = [ "futures 0.3.21", "maplit", - "parity-scale-codec", + "parity-scale-codec 3.0.0", "polkadot-node-primitives", "polkadot-node-subsystem", "polkadot-node-subsystem-test-helpers", @@ -6667,7 +6698,7 @@ dependencies = [ "futures-timer", "kvdb", "kvdb-memorydb", - "parity-scale-codec", + "parity-scale-codec 3.0.0", "parking_lot 0.12.0", "polkadot-node-primitives", "polkadot-node-subsystem", @@ -6689,7 +6720,7 @@ dependencies = [ "kvdb", "kvdb-memorydb", "lru 0.7.2", - "parity-scale-codec", + "parity-scale-codec 3.0.0", "polkadot-node-primitives", "polkadot-node-subsystem", "polkadot-node-subsystem-test-helpers", @@ -6724,8 +6755,8 @@ dependencies = [ name = "polkadot-node-core-prospective-parachains" version = "0.9.16" dependencies = [ - "futures 0.3.19", - "parity-scale-codec", + "futures 0.3.21", + "parity-scale-codec 2.3.1", "polkadot-node-primitives", "polkadot-node-subsystem", "polkadot-node-subsystem-util", @@ -6738,7 +6769,7 @@ dependencies = [ name = "polkadot-node-core-provisioner" version = "0.9.17" dependencies = [ - "bitvec", + "bitvec 1.0.0", "futures 0.3.21", "futures-timer", "polkadot-node-primitives", @@ -6765,7 +6796,7 @@ dependencies = [ "futures 0.3.21", "futures-timer", "hex-literal", - "parity-scale-codec", + "parity-scale-codec 3.0.0", "pin-project 1.0.10", "polkadot-core-primitives", "polkadot-node-subsystem-util", @@ -6839,7 +6870,7 @@ dependencies = [ "lazy_static", "log", "mick-jaeger", - "parity-scale-codec", + "parity-scale-codec 3.0.0", "parking_lot 0.12.0", "polkadot-node-primitives", "polkadot-primitives", @@ -6860,7 +6891,7 @@ dependencies = [ "log", "metered-channel", "nix", - "parity-scale-codec", + "parity-scale-codec 3.0.0", "polkadot-primitives", "polkadot-test-service", "prometheus-parse", @@ -6883,7 +6914,7 @@ dependencies = [ "async-trait", "fatality", "futures 0.3.21", - "parity-scale-codec", + "parity-scale-codec 3.0.0", "polkadot-node-jaeger", "polkadot-node-primitives", "polkadot-primitives", @@ -6899,7 +6930,7 @@ version = "0.9.17" dependencies = [ "bounded-vec", "futures 0.3.21", - "parity-scale-codec", + "parity-scale-codec 3.0.0", "polkadot-erasure-coding", "polkadot-parachain", "polkadot-primitives", @@ -6975,7 +7006,7 @@ dependencies = [ "log", "lru 0.7.2", "metered-channel", - "parity-scale-codec", + "parity-scale-codec 3.0.0", "pin-project 1.0.10", "polkadot-node-jaeger", "polkadot-node-metrics", @@ -7054,7 +7085,7 @@ version = "0.9.17" dependencies = [ "derive_more", "frame-support", - "parity-scale-codec", + "parity-scale-codec 3.0.0", "parity-util-mem", "polkadot-core-primitives", "scale-info", @@ -7082,10 +7113,10 @@ dependencies = [ name = "polkadot-primitives" version = "0.9.17" dependencies = [ - "bitvec", + "bitvec 1.0.0", "frame-system", "hex-literal", - "parity-scale-codec", + "parity-scale-codec 3.0.0", "parity-util-mem", "polkadot-core-primitives", "polkadot-parachain", @@ -7152,7 +7183,7 @@ name = "polkadot-runtime" version = "0.9.17" dependencies = [ "beefy-primitives", - "bitvec", + "bitvec 1.0.0", "frame-benchmarking", "frame-election-provider-support", "frame-executive", @@ -7198,7 +7229,7 @@ dependencies = [ "pallet-utility", "pallet-vesting", "pallet-xcm", - "parity-scale-codec", + "parity-scale-codec 3.0.0", "polkadot-primitives", "polkadot-runtime-common", "polkadot-runtime-constants", @@ -7241,7 +7272,7 @@ name = "polkadot-runtime-common" version = "0.9.17" dependencies = [ "beefy-primitives", - "bitvec", + "bitvec 1.0.0", "frame-benchmarking", "frame-election-provider-support", "frame-support", @@ -7263,7 +7294,7 @@ dependencies = [ "pallet-transaction-payment", "pallet-treasury", "pallet-vesting", - "parity-scale-codec", + "parity-scale-codec 3.0.0", "polkadot-primitives", "polkadot-primitives-test-helpers", "polkadot-runtime-parachains", @@ -7304,7 +7335,7 @@ name = "polkadot-runtime-metrics" version = "0.9.17" dependencies = [ "bs58", - "parity-scale-codec", + "parity-scale-codec 3.0.0", "polkadot-primitives", "sp-std", "sp-tracing", @@ -7316,7 +7347,7 @@ version = "0.9.17" dependencies = [ "assert_matches", "bitflags", - "bitvec", + "bitvec 1.0.0", "derive_more", "frame-benchmarking", "frame-support", @@ -7333,7 +7364,7 @@ dependencies = [ "pallet-staking", "pallet-timestamp", "pallet-vesting", - "parity-scale-codec", + "parity-scale-codec 3.0.0", "polkadot-primitives", "polkadot-primitives-test-helpers", "polkadot-runtime-metrics", @@ -7477,7 +7508,7 @@ dependencies = [ "futures 0.3.21", "futures-timer", "indexmap", - "parity-scale-codec", + "parity-scale-codec 3.0.0", "polkadot-node-network-protocol", "polkadot-node-primitives", "polkadot-node-subsystem", @@ -7501,7 +7532,7 @@ dependencies = [ name = "polkadot-statement-table" version = "0.9.17" dependencies = [ - "parity-scale-codec", + "parity-scale-codec 3.0.0", "polkadot-primitives", "sp-core", ] @@ -7511,7 +7542,7 @@ name = "polkadot-test-client" version = "0.9.17" dependencies = [ "futures 0.3.21", - "parity-scale-codec", + "parity-scale-codec 3.0.0", "polkadot-node-subsystem", "polkadot-primitives", "polkadot-test-runtime", @@ -7564,7 +7595,7 @@ name = "polkadot-test-runtime" version = "0.9.17" dependencies = [ "beefy-primitives", - "bitvec", + "bitvec 1.0.0", "frame-election-provider-support", "frame-executive", "frame-support", @@ -7590,7 +7621,7 @@ dependencies = [ "pallet-transaction-payment-rpc-runtime-api", "pallet-vesting", "pallet-xcm", - "parity-scale-codec", + "parity-scale-codec 3.0.0", "polkadot-parachain", "polkadot-primitives", "polkadot-runtime-common", @@ -7990,6 +8021,12 @@ dependencies = [ "proc-macro2", ] +[[package]] +name = "radium" +version = "0.6.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "643f8f41a8ebc4c5dc4515c82bb8abd397b527fc20fd681b7c011c2aee5d44fb" + [[package]] name = "radium" version = "0.7.0" @@ -8246,7 +8283,7 @@ dependencies = [ "env_logger 0.9.0", "jsonrpsee 0.8.0", "log", - "parity-scale-codec", + "parity-scale-codec 3.0.0", "serde", "serde_json", "sp-core", @@ -8385,7 +8422,7 @@ dependencies = [ "pallet-transaction-payment-rpc-runtime-api", "pallet-utility", "pallet-xcm", - "parity-scale-codec", + "parity-scale-codec 3.0.0", "polkadot-parachain", "polkadot-primitives", "polkadot-runtime-common", @@ -8607,7 +8644,7 @@ dependencies = [ "ip_network", "libp2p", "log", - "parity-scale-codec", + "parity-scale-codec 3.0.0", "prost", "prost-build", "rand 0.7.3", @@ -8631,7 +8668,7 @@ dependencies = [ "futures 0.3.21", "futures-timer", "log", - "parity-scale-codec", + "parity-scale-codec 3.0.0", "sc-block-builder", "sc-client-api", "sc-proposer-metrics", @@ -8651,7 +8688,7 @@ name = "sc-block-builder" version = "0.10.0-dev" source = "git+https://github.com/paritytech/substrate?branch=master#c22fce5a311beede13479c9a00cca85d823b6b00" dependencies = [ - "parity-scale-codec", + "parity-scale-codec 3.0.0", "sc-client-api", "sp-api", "sp-block-builder", @@ -8669,7 +8706,7 @@ source = "git+https://github.com/paritytech/substrate?branch=master#c22fce5a311b dependencies = [ "impl-trait-for-tuples", "memmap2 0.5.0", - "parity-scale-codec", + "parity-scale-codec 3.0.0", "sc-chain-spec-derive", "sc-network", "sc-telemetry", @@ -8703,7 +8740,7 @@ dependencies = [ "libp2p", "log", "names", - "parity-scale-codec", + "parity-scale-codec 3.0.0", "rand 0.7.3", "regex", "rpassword", @@ -8737,7 +8774,7 @@ dependencies = [ "futures 0.3.21", "hash-db", "log", - "parity-scale-codec", + "parity-scale-codec 3.0.0", "parking_lot 0.12.0", "sc-executor", "sc-transaction-pool-api", @@ -8768,7 +8805,7 @@ dependencies = [ "linked-hash-map", "log", "parity-db", - "parity-scale-codec", + "parity-scale-codec 3.0.0", "parking_lot 0.12.0", "sc-client-api", "sc-state-db", @@ -8818,7 +8855,7 @@ dependencies = [ "num-bigint", "num-rational 0.2.4", "num-traits", - "parity-scale-codec", + "parity-scale-codec 3.0.0", "parking_lot 0.12.0", "rand 0.7.3", "retain_mut", @@ -8878,7 +8915,7 @@ version = "0.10.0-dev" source = "git+https://github.com/paritytech/substrate?branch=master#c22fce5a311beede13479c9a00cca85d823b6b00" dependencies = [ "fork-tree", - "parity-scale-codec", + "parity-scale-codec 3.0.0", "sc-client-api", "sc-consensus", "sp-blockchain", @@ -8894,7 +8931,7 @@ dependencies = [ "futures 0.3.21", "futures-timer", "log", - "parity-scale-codec", + "parity-scale-codec 3.0.0", "sc-client-api", "sc-consensus", "sc-telemetry", @@ -8929,7 +8966,7 @@ dependencies = [ "lazy_static", "libsecp256k1", "lru 0.6.6", - "parity-scale-codec", + "parity-scale-codec 3.0.0", "parking_lot 0.12.0", "sc-executor-common", "sc-executor-wasmi", @@ -8955,7 +8992,7 @@ version = "0.10.0-dev" source = "git+https://github.com/paritytech/substrate?branch=master#c22fce5a311beede13479c9a00cca85d823b6b00" dependencies = [ "environmental", - "parity-scale-codec", + "parity-scale-codec 3.0.0", "sc-allocator", "sp-core", "sp-maybe-compressed-blob", @@ -8972,7 +9009,7 @@ version = "0.10.0-dev" source = "git+https://github.com/paritytech/substrate?branch=master#c22fce5a311beede13479c9a00cca85d823b6b00" dependencies = [ "log", - "parity-scale-codec", + "parity-scale-codec 3.0.0", "sc-allocator", "sc-executor-common", "scoped-tls", @@ -8990,7 +9027,7 @@ dependencies = [ "cfg-if 1.0.0", "libc", "log", - "parity-scale-codec", + "parity-scale-codec 3.0.0", "parity-wasm 0.42.2", "sc-allocator", "sc-executor-common", @@ -9013,7 +9050,7 @@ dependencies = [ "futures 0.3.21", "futures-timer", "log", - "parity-scale-codec", + "parity-scale-codec 3.0.0", "parking_lot 0.12.0", "rand 0.8.5", "sc-block-builder", @@ -9051,7 +9088,7 @@ dependencies = [ "jsonrpc-derive", "jsonrpc-pubsub", "log", - "parity-scale-codec", + "parity-scale-codec 3.0.0", "sc-client-api", "sc-finality-grandpa", "sc-rpc", @@ -9117,7 +9154,7 @@ dependencies = [ "linked_hash_set", "log", "lru 0.7.2", - "parity-scale-codec", + "parity-scale-codec 3.0.0", "parking_lot 0.12.0", "pin-project 1.0.10", "prost", @@ -9175,7 +9212,7 @@ dependencies = [ "hyper-rustls", "num_cpus", "once_cell", - "parity-scale-codec", + "parity-scale-codec 3.0.0", "parking_lot 0.12.0", "rand 0.7.3", "sc-client-api", @@ -9221,7 +9258,7 @@ dependencies = [ "jsonrpc-core", "jsonrpc-pubsub", "log", - "parity-scale-codec", + "parity-scale-codec 3.0.0", "parking_lot 0.12.0", "sc-block-builder", "sc-chain-spec", @@ -9253,7 +9290,7 @@ dependencies = [ "jsonrpc-derive", "jsonrpc-pubsub", "log", - "parity-scale-codec", + "parity-scale-codec 3.0.0", "parking_lot 0.12.0", "sc-chain-spec", "sc-transaction-pool-api", @@ -9298,7 +9335,7 @@ dependencies = [ "jsonrpc-core", "jsonrpc-pubsub", "log", - "parity-scale-codec", + "parity-scale-codec 3.0.0", "parity-util-mem", "parking_lot 0.12.0", "pin-project 1.0.10", @@ -9354,7 +9391,7 @@ version = "0.10.0-dev" source = "git+https://github.com/paritytech/substrate?branch=master#c22fce5a311beede13479c9a00cca85d823b6b00" dependencies = [ "log", - "parity-scale-codec", + "parity-scale-codec 3.0.0", "parity-util-mem", "parity-util-mem-derive", "parking_lot 0.12.0", @@ -9370,7 +9407,7 @@ dependencies = [ "jsonrpc-core", "jsonrpc-core-client", "jsonrpc-derive", - "parity-scale-codec", + "parity-scale-codec 3.0.0", "sc-chain-spec", "sc-client-api", "sc-consensus-babe", @@ -9452,7 +9489,7 @@ dependencies = [ "futures-timer", "linked-hash-map", "log", - "parity-scale-codec", + "parity-scale-codec 3.0.0", "parity-util-mem", "parking_lot 0.12.0", "retain_mut", @@ -9501,10 +9538,10 @@ version = "2.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0563970d79bcbf3c537ce3ad36d859b30d36fc5b190efd227f1f7a84d7cf0d42" dependencies = [ - "bitvec", + "bitvec 1.0.0", "cfg-if 1.0.0", "derive_more", - "parity-scale-codec", + "parity-scale-codec 3.0.0", "scale-info-derive", "serde", ] @@ -9877,7 +9914,7 @@ name = "slot-range-helper" version = "0.9.17" dependencies = [ "enumn", - "parity-scale-codec", + "parity-scale-codec 3.0.0", "paste", "sp-runtime", "sp-std", @@ -9966,7 +10003,7 @@ source = "git+https://github.com/paritytech/substrate?branch=master#c22fce5a311b dependencies = [ "hash-db", "log", - "parity-scale-codec", + "parity-scale-codec 3.0.0", "sp-api-proc-macro", "sp-core", "sp-runtime", @@ -9993,7 +10030,7 @@ name = "sp-application-crypto" version = "5.0.0" source = "git+https://github.com/paritytech/substrate?branch=master#c22fce5a311beede13479c9a00cca85d823b6b00" dependencies = [ - "parity-scale-codec", + "parity-scale-codec 3.0.0", "scale-info", "serde", "sp-core", @@ -10008,7 +10045,7 @@ source = "git+https://github.com/paritytech/substrate?branch=master#c22fce5a311b dependencies = [ "integer-sqrt", "num-traits", - "parity-scale-codec", + "parity-scale-codec 3.0.0", "scale-info", "serde", "sp-debug-derive", @@ -10021,7 +10058,7 @@ name = "sp-authority-discovery" version = "4.0.0-dev" source = "git+https://github.com/paritytech/substrate?branch=master#c22fce5a311beede13479c9a00cca85d823b6b00" dependencies = [ - "parity-scale-codec", + "parity-scale-codec 3.0.0", "scale-info", "sp-api", "sp-application-crypto", @@ -10035,7 +10072,7 @@ version = "4.0.0-dev" source = "git+https://github.com/paritytech/substrate?branch=master#c22fce5a311beede13479c9a00cca85d823b6b00" dependencies = [ "async-trait", - "parity-scale-codec", + "parity-scale-codec 3.0.0", "sp-inherents", "sp-runtime", "sp-std", @@ -10046,7 +10083,7 @@ name = "sp-block-builder" version = "4.0.0-dev" source = "git+https://github.com/paritytech/substrate?branch=master#c22fce5a311beede13479c9a00cca85d823b6b00" dependencies = [ - "parity-scale-codec", + "parity-scale-codec 3.0.0", "sp-api", "sp-inherents", "sp-runtime", @@ -10061,7 +10098,7 @@ dependencies = [ "futures 0.3.21", "log", "lru 0.7.2", - "parity-scale-codec", + "parity-scale-codec 3.0.0", "parking_lot 0.12.0", "sp-api", "sp-consensus", @@ -10080,7 +10117,7 @@ dependencies = [ "futures 0.3.21", "futures-timer", "log", - "parity-scale-codec", + "parity-scale-codec 3.0.0", "sp-core", "sp-inherents", "sp-runtime", @@ -10097,7 +10134,7 @@ source = "git+https://github.com/paritytech/substrate?branch=master#c22fce5a311b dependencies = [ "async-trait", "merlin", - "parity-scale-codec", + "parity-scale-codec 3.0.0", "scale-info", "serde", "sp-api", @@ -10118,7 +10155,7 @@ name = "sp-consensus-slots" version = "0.10.0-dev" source = "git+https://github.com/paritytech/substrate?branch=master#c22fce5a311beede13479c9a00cca85d823b6b00" dependencies = [ - "parity-scale-codec", + "parity-scale-codec 3.0.0", "scale-info", "serde", "sp-arithmetic", @@ -10132,7 +10169,7 @@ name = "sp-consensus-vrf" version = "0.10.0-dev" source = "git+https://github.com/paritytech/substrate?branch=master#c22fce5a311beede13479c9a00cca85d823b6b00" dependencies = [ - "parity-scale-codec", + "parity-scale-codec 3.0.0", "schnorrkel", "sp-core", "sp-runtime", @@ -10160,7 +10197,7 @@ dependencies = [ "log", "merlin", "num-traits", - "parity-scale-codec", + "parity-scale-codec 3.0.0", "parity-util-mem", "parking_lot 0.12.0", "primitive-types", @@ -10234,7 +10271,7 @@ version = "0.11.0" source = "git+https://github.com/paritytech/substrate?branch=master#c22fce5a311beede13479c9a00cca85d823b6b00" dependencies = [ "environmental", - "parity-scale-codec", + "parity-scale-codec 3.0.0", "sp-std", "sp-storage", ] @@ -10246,7 +10283,7 @@ source = "git+https://github.com/paritytech/substrate?branch=master#c22fce5a311b dependencies = [ "finality-grandpa", "log", - "parity-scale-codec", + "parity-scale-codec 3.0.0", "scale-info", "serde", "sp-api", @@ -10264,7 +10301,7 @@ source = "git+https://github.com/paritytech/substrate?branch=master#c22fce5a311b dependencies = [ "async-trait", "impl-trait-for-tuples", - "parity-scale-codec", + "parity-scale-codec 3.0.0", "sp-core", "sp-runtime", "sp-std", @@ -10280,7 +10317,7 @@ dependencies = [ "hash-db", "libsecp256k1", "log", - "parity-scale-codec", + "parity-scale-codec 3.0.0", "parking_lot 0.12.0", "sp-core", "sp-externalities", @@ -10314,7 +10351,7 @@ dependencies = [ "async-trait", "futures 0.3.21", "merlin", - "parity-scale-codec", + "parity-scale-codec 3.0.0", "parking_lot 0.12.0", "schnorrkel", "serde", @@ -10337,7 +10374,7 @@ name = "sp-npos-elections" version = "4.0.0-dev" source = "git+https://github.com/paritytech/substrate?branch=master#c22fce5a311beede13479c9a00cca85d823b6b00" dependencies = [ - "parity-scale-codec", + "parity-scale-codec 3.0.0", "scale-info", "serde", "sp-arithmetic", @@ -10397,7 +10434,7 @@ dependencies = [ "hash256-std-hasher", "impl-trait-for-tuples", "log", - "parity-scale-codec", + "parity-scale-codec 3.0.0", "parity-util-mem", "paste", "rand 0.7.3", @@ -10416,7 +10453,7 @@ version = "5.0.0" source = "git+https://github.com/paritytech/substrate?branch=master#c22fce5a311beede13479c9a00cca85d823b6b00" dependencies = [ "impl-trait-for-tuples", - "parity-scale-codec", + "parity-scale-codec 3.0.0", "primitive-types", "sp-externalities", "sp-runtime-interface-proc-macro", @@ -10453,7 +10490,7 @@ name = "sp-session" version = "4.0.0-dev" source = "git+https://github.com/paritytech/substrate?branch=master#c22fce5a311beede13479c9a00cca85d823b6b00" dependencies = [ - "parity-scale-codec", + "parity-scale-codec 3.0.0", "scale-info", "sp-api", "sp-core", @@ -10467,7 +10504,7 @@ name = "sp-staking" version = "4.0.0-dev" source = "git+https://github.com/paritytech/substrate?branch=master#c22fce5a311beede13479c9a00cca85d823b6b00" dependencies = [ - "parity-scale-codec", + "parity-scale-codec 3.0.0", "scale-info", "sp-runtime", "sp-std", @@ -10481,7 +10518,7 @@ dependencies = [ "hash-db", "log", "num-traits", - "parity-scale-codec", + "parity-scale-codec 3.0.0", "parking_lot 0.12.0", "rand 0.7.3", "smallvec", @@ -10507,7 +10544,7 @@ version = "5.0.0" source = "git+https://github.com/paritytech/substrate?branch=master#c22fce5a311beede13479c9a00cca85d823b6b00" dependencies = [ "impl-serde", - "parity-scale-codec", + "parity-scale-codec 3.0.0", "ref-cast", "serde", "sp-debug-derive", @@ -10535,7 +10572,7 @@ dependencies = [ "async-trait", "futures-timer", "log", - "parity-scale-codec", + "parity-scale-codec 3.0.0", "sp-api", "sp-inherents", "sp-runtime", @@ -10548,7 +10585,7 @@ name = "sp-tracing" version = "4.0.0" source = "git+https://github.com/paritytech/substrate?branch=master#c22fce5a311beede13479c9a00cca85d823b6b00" dependencies = [ - "parity-scale-codec", + "parity-scale-codec 3.0.0", "sp-std", "tracing", "tracing-core", @@ -10571,7 +10608,7 @@ source = "git+https://github.com/paritytech/substrate?branch=master#c22fce5a311b dependencies = [ "async-trait", "log", - "parity-scale-codec", + "parity-scale-codec 3.0.0", "scale-info", "sp-core", "sp-inherents", @@ -10587,7 +10624,7 @@ source = "git+https://github.com/paritytech/substrate?branch=master#c22fce5a311b dependencies = [ "hash-db", "memory-db", - "parity-scale-codec", + "parity-scale-codec 3.0.0", "scale-info", "sp-core", "sp-std", @@ -10601,7 +10638,7 @@ version = "4.0.0-dev" source = "git+https://github.com/paritytech/substrate?branch=master#c22fce5a311beede13479c9a00cca85d823b6b00" dependencies = [ "impl-serde", - "parity-scale-codec", + "parity-scale-codec 3.0.0", "parity-wasm 0.42.2", "scale-info", "serde", @@ -10617,7 +10654,7 @@ name = "sp-version-proc-macro" version = "4.0.0-dev" source = "git+https://github.com/paritytech/substrate?branch=master#c22fce5a311beede13479c9a00cca85d823b6b00" dependencies = [ - "parity-scale-codec", + "parity-scale-codec 3.0.0", "proc-macro2", "quote", "syn", @@ -10630,7 +10667,7 @@ source = "git+https://github.com/paritytech/substrate?branch=master#c22fce5a311b dependencies = [ "impl-trait-for-tuples", "log", - "parity-scale-codec", + "parity-scale-codec 3.0.0", "sp-std", "wasmi", "wasmtime", @@ -10687,7 +10724,7 @@ dependencies = [ "pallet-election-provider-multi-phase", "pallet-staking", "pallet-transaction-payment", - "parity-scale-codec", + "parity-scale-codec 3.0.0", "paste", "polkadot-core-primitives", "polkadot-runtime", @@ -10842,7 +10879,7 @@ dependencies = [ "jsonrpc-core-client", "jsonrpc-derive", "log", - "parity-scale-codec", + "parity-scale-codec 3.0.0", "sc-client-api", "sc-rpc-api", "sc-transaction-pool-api", @@ -10874,7 +10911,7 @@ dependencies = [ "async-trait", "futures 0.3.21", "hex", - "parity-scale-codec", + "parity-scale-codec 3.0.0", "sc-client-api", "sc-client-db", "sc-consensus", @@ -11027,7 +11064,7 @@ name = "test-parachain-adder" version = "0.9.17" dependencies = [ "dlmalloc", - "parity-scale-codec", + "parity-scale-codec 3.0.0", "polkadot-parachain", "sp-io", "sp-std", @@ -11043,7 +11080,7 @@ dependencies = [ "futures 0.3.21", "futures-timer", "log", - "parity-scale-codec", + "parity-scale-codec 3.0.0", "polkadot-cli", "polkadot-node-core-pvf", "polkadot-node-primitives", @@ -11072,7 +11109,7 @@ dependencies = [ name = "test-parachains" version = "0.9.17" dependencies = [ - "parity-scale-codec", + "parity-scale-codec 3.0.0", "sp-core", "test-parachain-adder", "test-parachain-halt", @@ -11540,7 +11577,7 @@ dependencies = [ "clap", "jsonrpsee 0.4.1", "log", - "parity-scale-codec", + "parity-scale-codec 3.0.0", "remote-externalities", "sc-chain-spec", "sc-cli", @@ -12179,7 +12216,7 @@ name = "westend-runtime" version = "0.9.17" dependencies = [ "beefy-primitives", - "bitvec", + "bitvec 1.0.0", "frame-benchmarking", "frame-election-provider-support", "frame-executive", @@ -12227,7 +12264,7 @@ dependencies = [ "pallet-vesting", "pallet-xcm", "pallet-xcm-benchmarks", - "parity-scale-codec", + "parity-scale-codec 3.0.0", "polkadot-parachain", "polkadot-primitives", "polkadot-runtime-common", @@ -12405,6 +12442,12 @@ dependencies = [ "winapi-build", ] +[[package]] +name = "wyz" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "85e60b0d1b5f99db2556934e21937020776a5d31520bf169e851ac44e6420214" + [[package]] name = "wyz" version = "0.5.0" @@ -12432,7 +12475,7 @@ dependencies = [ "derivative", "impl-trait-for-tuples", "log", - "parity-scale-codec", + "parity-scale-codec 3.0.0", "scale-info", "xcm-procedural", ] @@ -12447,7 +12490,7 @@ dependencies = [ "pallet-balances", "pallet-transaction-payment", "pallet-xcm", - "parity-scale-codec", + "parity-scale-codec 3.0.0", "polkadot-parachain", "polkadot-runtime-parachains", "scale-info", @@ -12468,7 +12511,7 @@ dependencies = [ "frame-support", "impl-trait-for-tuples", "log", - "parity-scale-codec", + "parity-scale-codec 3.0.0", "sp-arithmetic", "sp-core", "sp-io", @@ -12512,7 +12555,7 @@ name = "xcm-simulator" version = "0.9.17" dependencies = [ "frame-support", - "parity-scale-codec", + "parity-scale-codec 3.0.0", "paste", "polkadot-core-primitives", "polkadot-parachain", @@ -12531,7 +12574,7 @@ dependencies = [ "frame-system", "pallet-balances", "pallet-xcm", - "parity-scale-codec", + "parity-scale-codec 3.0.0", "polkadot-core-primitives", "polkadot-parachain", "polkadot-runtime-parachains", @@ -12555,7 +12598,7 @@ dependencies = [ "honggfuzz", "pallet-balances", "pallet-xcm", - "parity-scale-codec", + "parity-scale-codec 3.0.0", "polkadot-core-primitives", "polkadot-parachain", "polkadot-runtime-parachains", @@ -12611,7 +12654,7 @@ version = "0.9.17" dependencies = [ "futures-util", "lazy_static", - "parity-scale-codec", + "parity-scale-codec 3.0.0", "reqwest", "serde", "serde_json", diff --git a/node/core/prospective-parachains/src/fragment_graph.rs b/node/core/prospective-parachains/src/fragment_graph.rs deleted file mode 100644 index 28856e3d3d17..000000000000 --- a/node/core/prospective-parachains/src/fragment_graph.rs +++ /dev/null @@ -1,118 +0,0 @@ -// Copyright 2022 Parity Technologies (UK) Ltd. -// This file is part of Polkadot. - -// Polkadot is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Polkadot is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Polkadot. If not, see . - -//! A graph utility for managing unbacked parachain fragments. -//! -//! Each node in the graph represents a candidate. Nodes do not uniquely refer to a parachain -//! block for two reasons. -//! 1. There's no requirement that head-data is unique -//! for a parachain. Furthermore, a parachain is under no obligation to be acyclic, and this is mostly -//! just because it's totally inefficient to enforce it. Practical use-cases are acyclic, but there is -//! still more than one way to reach the same head-data. -//! 2. and candidates only refer to their parent by its head-data. -//! -//! The implication is that when we receive a candidate receipt, there are actually multiple -//! possibilities for any candidates between the para-head recorded in the relay parent's state -//! and the candidate we're examining. -//! -//! This means that our nodes need to handle multiple parents and that depth is an -//! attribute of a path, not a candidate. -//! -//! We also need to handle cycles, including nodes for candidates which produce a header -//! which is the same as its parent's. -//! -//! The graph exposes a 'frontier' of nodes which appear to be the best to build upon -//! and is the primary means for higher-level code to select candidates to build upon. - -use std::{ - collections::{hash_map::Entry as HEntry, HashMap, HashSet}, - sync::Arc, -}; - -use polkadot_node_subsystem::{ - overseer, ActiveLeavesUpdate, FromOverseer, OverseerSignal, SpawnedSubsystem, SubsystemContext, - SubsystemError, SubsystemResult, -}; -use polkadot_node_subsystem_util::{ - inclusion_emulator::staging::{ - ConstraintModifications, Constraints, Fragment, RelayChainBlockInfo, - }, - metrics::{self, prometheus}, -}; -use polkadot_primitives::vstaging::{ - Block, BlockId, BlockNumber, CandidateDescriptor, CandidateHash, CommittedCandidateReceipt, - GroupIndex, GroupRotationInfo, Hash, Header, Id as ParaId, SessionIndex, ValidatorIndex, -}; - -// TODO [now]: separate graph per relay-parent (constraints)? -// TODO [now]: keep nodes and graphs separate? recompute / prune graphs -// on every new relay parent? -// TODO [now]: API for selecting backed candidates -pub(crate) struct FragmentGraph { - para: ParaId, - relay_parent: RelayChainBlockInfo, - base_constraints: Constraints, -} - -struct CandidateGraph { - // TODO [now]: semi-ordered pile of candidates. - // we'll need to support some kinds of traversal and insertions -} - -enum FragmentState { - // The fragment has been seconded. - Seconded, - // The fragment has been completely backed by the group. - Backed, -} - -struct FragmentNode { - // The hash of the head-data of the parent node - parent: Hash, - // Candidate hashes of children. - children: Vec, - fragment: Fragment, - erasure_root: Hash, - state: FragmentState, -} - -impl FragmentNode { - fn relay_parent(&self) -> Hash { - self.fragment.relay_parent().hash - } - - - - /// Produce a candidate receipt from this fragment node. - fn produce_candidate_receipt(&self, para_id: ParaId) -> CommittedCandidateReceipt { - let candidate = self.fragment.candidate(); - - CommittedCandidateReceipt { - commitments: candidate.commitments.clone(), - descriptor: CandidateDescriptor { - para_id, - relay_parent: self.relay_parent(), - collator: candidate.collator.clone(), - signature: candidate.collator_signature.clone(), - persisted_validation_data_hash: candidate.persisted_validation_data.hash(), - pov_hash: candidate.pov_hash, - erasure_root: self.erasure_root, - para_head: candidate.commitments.head_data.hash(), - validation_code_hash: candidate.validation_code_hash.clone(), - }, - } - } -} diff --git a/node/core/prospective-parachains/src/fragment_tree.rs b/node/core/prospective-parachains/src/fragment_tree.rs new file mode 100644 index 000000000000..adcb17a3b11e --- /dev/null +++ b/node/core/prospective-parachains/src/fragment_tree.rs @@ -0,0 +1,202 @@ +// Copyright 2022 Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! A tree utility for managing unbacked parachain fragments. +//! +//! This module exposes two main types: [`FragmentTree`] and [`CandidateStorage`] +//! which are meant to be used in close conjunction. Each tree is associated with a particular +//! relay-parent, and it's expected that higher-level code will have a tree for each +//! relay-chain block which might reasonably have blocks built upon it. +//! +//! Trees only store references into the [`CandidateStorage`] and the storage is meant to +//! be pruned when trees are dropped by higher-level code. +//! +//! Each node in the tree represents a candidate. Nodes do not uniquely refer to a parachain +//! block for two reasons. +//! 1. There's no requirement that head-data is unique +//! for a parachain. Furthermore, a parachain is under no obligation to be acyclic, and this is mostly +//! just because it's totally inefficient to enforce it. Practical use-cases are acyclic, but there is +//! still more than one way to reach the same head-data. +//! 2. and candidates only refer to their parent by its head-data. +//! +//! The implication is that when we receive a candidate receipt, there are actually multiple +//! possibilities for any candidates between the para-head recorded in the relay parent's state +//! and the candidate we're examining. +//! +//! This means that our nodes need to handle multiple parents and that depth is an +//! attribute of a node in a tree, not a candidate. Put another way, the same candidate might +//! have different depths in different parts of the tree. +//! +//! We also need to handle cycles, including nodes for candidates which produce a header +//! which is the same as its parent's. Within a [`Fragment] +// TODO [now]: review & update. + +use std::{ + collections::{hash_map::Entry as HEntry, HashMap, HashSet}, + sync::Arc, +}; + +use polkadot_node_subsystem::{ + overseer, ActiveLeavesUpdate, FromOverseer, OverseerSignal, SpawnedSubsystem, SubsystemContext, + SubsystemError, SubsystemResult, +}; +use polkadot_node_subsystem_util::{ + inclusion_emulator::staging::{ + ConstraintModifications, Constraints, Fragment, ProspectiveCandidate, RelayChainBlockInfo, + }, + metrics::{self, prometheus}, +}; +use polkadot_primitives::vstaging::{ + Block, BlockId, BlockNumber, CandidateDescriptor, CandidateHash, CommittedCandidateReceipt, + GroupIndex, GroupRotationInfo, Hash, HeadData, Header, Id as ParaId, PersistedValidationData, + SessionIndex, ValidatorIndex, +}; + +/// An error indicating that a supplied candidate didn't match the persisted +/// validation data provided alongside it. +#[derive(Debug, Clone, PartialEq)] +pub(crate) struct PersistedValidationDataMismatch; + +pub(crate) struct CandidateStorage { + // Index from parent head hash to candidate hashes. + by_parent_head: HashMap>, + + // Index from candidate hash to fragment node. + by_candidate_hash: HashMap, +} + +impl CandidateStorage { + /// Create a new `CandidateStorage`. + pub fn new() -> Self { + CandidateStorage { by_parent_head: HashMap::new(), by_candidate_hash: HashMap::new() } + } + + /// Introduce a new candidate. The candidate passed to this function + /// should have been seconded before introduction. + pub fn add_candidate( + &mut self, + candidate: CommittedCandidateReceipt, + persisted_validation_data: PersistedValidationData, + ) -> Result<(), PersistedValidationDataMismatch> { + let candidate_hash = candidate.hash(); + + if self.by_candidate_hash.contains_key(&candidate_hash) { + return Ok(()) + } + + if persisted_validation_data.hash() != candidate.descriptor.persisted_validation_data_hash { + return Err(PersistedValidationDataMismatch) + } + + let parent_head_hash = persisted_validation_data.parent_head.hash(); + + let entry = CandidateEntry { + candidate_hash, + relay_parent: candidate.descriptor.relay_parent, + erasure_root: candidate.descriptor.erasure_root, + state: CandidateState::Seconded, + candidate: ProspectiveCandidate { + commitments: candidate.commitments, + collator: candidate.descriptor.collator, + collator_signature: candidate.descriptor.signature, + persisted_validation_data, + pov_hash: candidate.descriptor.pov_hash, + validation_code_hash: candidate.descriptor.validation_code_hash, + }, + }; + + self.by_parent_head.entry(parent_head_hash).or_default().insert(candidate_hash); + // sanity-checked already. + self.by_candidate_hash.insert(candidate_hash, entry); + + Ok(()) + } + + // TODO [now]: fn restrict_to(&graphs) which will be our main pruning function +} + +/// The state of a candidate. +/// +/// Candidates aren't even considered until they've at least been seconded. +pub(crate) enum CandidateState { + /// The candidate has been seconded. + Seconded, + /// The candidate has been completely backed by the group. + Backed, +} + +struct CandidateEntry { + candidate_hash: CandidateHash, + relay_parent: Hash, + candidate: ProspectiveCandidate, + state: CandidateState, + erasure_root: Hash, +} + +/// This is a graph of candidates associated +// TODO [now]: API for selecting backed candidates +pub(crate) struct FragmentTree { + para: ParaId, + relay_parent: RelayChainBlockInfo, + ancestors: Vec, + base_constraints: Constraints, + max_depth: usize, +} + +impl FragmentTree { + // TODO [now]: populate from existing candidate storage. + + // TODO [now]: add new candidate and recursively populate as necessary. + + // TODO [now]: alternatively, always compute paths ad-hoc. +} + +struct FragmentNode { + // The hash of the head-data of the parent node + parent: Hash, + fragment: Fragment, + erasure_root: Hash, +} + +impl FragmentNode { + fn relay_parent(&self) -> Hash { + self.fragment.relay_parent().hash + } + + fn parent_head_data(&self) -> &HeadData { + &self.fragment.candidate().persisted_validation_data.parent_head + } + + /// Produce a candidate receipt from this fragment node. + fn produce_candidate_receipt(&self, para_id: ParaId) -> CommittedCandidateReceipt { + let candidate = self.fragment.candidate(); + + CommittedCandidateReceipt { + commitments: candidate.commitments.clone(), + descriptor: CandidateDescriptor { + para_id, + relay_parent: self.relay_parent(), + collator: candidate.collator.clone(), + signature: candidate.collator_signature.clone(), + persisted_validation_data_hash: candidate.persisted_validation_data.hash(), + pov_hash: candidate.pov_hash, + erasure_root: self.erasure_root, + para_head: candidate.commitments.head_data.hash(), + validation_code_hash: candidate.validation_code_hash.clone(), + }, + } + } +} diff --git a/node/core/prospective-parachains/src/lib.rs b/node/core/prospective-parachains/src/lib.rs index 90a092678215..7ed9f6e00896 100644 --- a/node/core/prospective-parachains/src/lib.rs +++ b/node/core/prospective-parachains/src/lib.rs @@ -52,11 +52,13 @@ use polkadot_primitives::vstaging::{ GroupIndex, GroupRotationInfo, Hash, Header, Id as ParaId, SessionIndex, ValidatorIndex, }; -use crate::error::{Error, FatalResult, NonFatal, NonFatalResult, Result}; -use crate::fragment_graph::FragmentGraph; +use crate::{ + error::{Error, FatalResult, NonFatal, NonFatalResult, Result}, + fragment_tree::FragmentTree, +}; mod error; -mod fragment_graph; +mod fragment_tree; const LOG_TARGET: &str = "parachain::prospective-parachains"; @@ -96,8 +98,9 @@ struct View { active_or_recent: HashMap, // Fragment graphs, one for each parachain. - // TODO [now]: handle cleanup when these go obsolete. - fragments: HashMap, + // TODO [now]: make this per-para per active-leaf + // TODO [now]: have global candidate storage per para id + fragments: HashMap, } impl View { From f49482c80f1516e47d61858f0cd79015934bd8c5 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Fri, 25 Feb 2022 16:05:20 -0600 Subject: [PATCH 18/87] improve docs --- node/core/prospective-parachains/src/fragment_tree.rs | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/node/core/prospective-parachains/src/fragment_tree.rs b/node/core/prospective-parachains/src/fragment_tree.rs index adcb17a3b11e..dd06f050bdf0 100644 --- a/node/core/prospective-parachains/src/fragment_tree.rs +++ b/node/core/prospective-parachains/src/fragment_tree.rs @@ -41,7 +41,12 @@ //! have different depths in different parts of the tree. //! //! We also need to handle cycles, including nodes for candidates which produce a header -//! which is the same as its parent's. Within a [`Fragment] +//! which is the same as its parent's. Within a [`FragmentTree`], cycles are bounded by the +//! maximum depth allowed by the tree. +//! +//! As long as the [`CandidateStorage`] has bounded input on the number of candidates supplied, +//! [`FragmentTree`] complexity is bounded. This means that higher-level code needs to be selective +//! about limiting the amount of candidates that are considered. // TODO [now]: review & update. use std::{ From 73a208f1b1747728d61c54c5a1b5574002df0e45 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Fri, 25 Feb 2022 17:10:39 -0600 Subject: [PATCH 19/87] scope and constructor for trees --- .../src/fragment_tree.rs | 94 ++++++++++++++++--- 1 file changed, 81 insertions(+), 13 deletions(-) diff --git a/node/core/prospective-parachains/src/fragment_tree.rs b/node/core/prospective-parachains/src/fragment_tree.rs index dd06f050bdf0..e368abd2d3ce 100644 --- a/node/core/prospective-parachains/src/fragment_tree.rs +++ b/node/core/prospective-parachains/src/fragment_tree.rs @@ -14,14 +14,14 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -//! A tree utility for managing unbacked parachain fragments. +//! A tree utility for managing parachain fragments unreferenced by the relay-chain. //! //! This module exposes two main types: [`FragmentTree`] and [`CandidateStorage`] //! which are meant to be used in close conjunction. Each tree is associated with a particular //! relay-parent, and it's expected that higher-level code will have a tree for each //! relay-chain block which might reasonably have blocks built upon it. //! -//! Trees only store references into the [`CandidateStorage`] and the storage is meant to +//! Trees only store indices into the [`CandidateStorage`] and the storage is meant to //! be pruned when trees are dropped by higher-level code. //! //! Each node in the tree represents a candidate. Nodes do not uniquely refer to a parachain @@ -34,15 +34,15 @@ //! //! The implication is that when we receive a candidate receipt, there are actually multiple //! possibilities for any candidates between the para-head recorded in the relay parent's state -//! and the candidate we're examining. +//! and the candidate in question. //! //! This means that our nodes need to handle multiple parents and that depth is an //! attribute of a node in a tree, not a candidate. Put another way, the same candidate might //! have different depths in different parts of the tree. //! -//! We also need to handle cycles, including nodes for candidates which produce a header -//! which is the same as its parent's. Within a [`FragmentTree`], cycles are bounded by the -//! maximum depth allowed by the tree. +//! As an extreme example, a candidate which produces head-data which is the same as its parent +//! can correspond to multiple nodes within the same [`FragmentTree`]. Such cycles are bounded +//! by the maximum depth allowed by the tree. //! //! As long as the [`CandidateStorage`] has bounded input on the number of candidates supplied, //! [`FragmentTree`] complexity is bounded. This means that higher-level code needs to be selective @@ -50,7 +50,7 @@ // TODO [now]: review & update. use std::{ - collections::{hash_map::Entry as HEntry, HashMap, HashSet}, + collections::{hash_map::Entry as HEntry, HashMap, HashSet, BTreeMap}, sync::Arc, }; @@ -151,22 +151,90 @@ struct CandidateEntry { erasure_root: Hash, } -/// This is a graph of candidates associated -// TODO [now]: API for selecting backed candidates -pub(crate) struct FragmentTree { +/// The scope of a [`FragmentTree`]. +pub(crate) struct Scope { para: ParaId, relay_parent: RelayChainBlockInfo, - ancestors: Vec, + ancestors: BTreeMap, + ancestors_by_hash: HashSet, base_constraints: Constraints, max_depth: usize, } +/// An error variant indicating that ancestors provided to a scope +/// had unexpected order. +#[derive(Debug)] +pub struct UnexpectedAncestor; + +impl Scope { + /// Define a new [`Scope`]. + /// + /// All arguments are straightforward except the ancestors. + /// + /// Ancestors should be in reverse order, starting with the parent + /// of the `relay_parent`, and proceeding backwards in block number + /// increments of 1. Ancestors not following these conditions will be + /// rejected. + /// + /// Only ancestors whose children have the same session as the relay-parent's + /// children should be provided. + /// + /// It is allowed to provide zero ancestors. + pub fn with_ancestors( + para: ParaId, + relay_parent: RelayChainBlockInfo, + base_constraints: Constraints, + max_depth: usize, + ancestors: impl IntoIterator, + ) -> Result { + let mut ancestors_map = BTreeMap::new(); + let mut ancestors_by_hash = HashSet::new(); + { + let mut prev = relay_parent.number; + for ancestor in ancestors { + if prev == 0 { + return Err(UnexpectedAncestor); + } else if ancestor.number != prev - 1 { + return Err(UnexpectedAncestor); + } else { + prev = ancestor.number; + ancestors_by_hash.insert(ancestor.hash); + ancestors_map.insert(ancestor.number, ancestor); + } + } + } + + Ok(Scope { + para, + relay_parent, + base_constraints, + max_depth, + ancestors: ancestors_map, + ancestors_by_hash, + }) + } +} + +/// This is a tree of candidates based on some underlying storage of candidates +/// and a scope. +pub(crate) struct FragmentTree { + scope: Scope, + // TODO [now]: actual tree +} + impl FragmentTree { - // TODO [now]: populate from existing candidate storage. + /// Create a new [`FragmentTree`] with given scope, and populated from + /// the provided node storage. + pub fn new(scope: Scope, storage: &CandidateStorage) -> Self { + FragmentTree { + scope, + // TODO [now]: populate + } + } // TODO [now]: add new candidate and recursively populate as necessary. - // TODO [now]: alternatively, always compute paths ad-hoc. + // TODO [now]: API for selecting backed candidates } struct FragmentNode { From 1fc33ec42290477466fb7f3bc9145193e6fd626f Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Mon, 28 Feb 2022 21:30:10 -0600 Subject: [PATCH 20/87] add some test TODOs --- node/core/prospective-parachains/src/fragment_tree.rs | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/node/core/prospective-parachains/src/fragment_tree.rs b/node/core/prospective-parachains/src/fragment_tree.rs index e368abd2d3ce..8f9134167ff9 100644 --- a/node/core/prospective-parachains/src/fragment_tree.rs +++ b/node/core/prospective-parachains/src/fragment_tree.rs @@ -273,3 +273,14 @@ impl FragmentNode { } } } + +#[cfg(test)] +mod tests { + // TODO [now]: scope rejects ancestors that skip blocks + + // TODO [now]: scope rejects ancestor of 0 + + // TODO [now]: storage sets up links correctly. + + // TODO [now]: storage pruning. +} From 5a400a37e502448542e70e94efa132d3b1ca8eb0 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Mon, 28 Feb 2022 23:00:00 -0600 Subject: [PATCH 21/87] limit max ancestors and store constraints --- .../prospective-parachains/src/fragment_tree.rs | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/node/core/prospective-parachains/src/fragment_tree.rs b/node/core/prospective-parachains/src/fragment_tree.rs index 8f9134167ff9..276b2f25675a 100644 --- a/node/core/prospective-parachains/src/fragment_tree.rs +++ b/node/core/prospective-parachains/src/fragment_tree.rs @@ -155,7 +155,7 @@ struct CandidateEntry { pub(crate) struct Scope { para: ParaId, relay_parent: RelayChainBlockInfo, - ancestors: BTreeMap, + ancestors: BTreeMap, ancestors_by_hash: HashSet, base_constraints: Constraints, max_depth: usize, @@ -176,6 +176,9 @@ impl Scope { /// increments of 1. Ancestors not following these conditions will be /// rejected. /// + /// This function will only consume ancestors up to the `min_relay_parent_number` of + /// the `base_constraints`. + /// /// Only ancestors whose children have the same session as the relay-parent's /// children should be provided. /// @@ -185,21 +188,23 @@ impl Scope { relay_parent: RelayChainBlockInfo, base_constraints: Constraints, max_depth: usize, - ancestors: impl IntoIterator, + ancestors: impl IntoIterator, ) -> Result { let mut ancestors_map = BTreeMap::new(); let mut ancestors_by_hash = HashSet::new(); { let mut prev = relay_parent.number; - for ancestor in ancestors { + for (ancestor, constraints) in ancestors { if prev == 0 { return Err(UnexpectedAncestor); } else if ancestor.number != prev - 1 { return Err(UnexpectedAncestor); + } else if prev == base_constraints.min_relay_parent_number { + break } else { prev = ancestor.number; ancestors_by_hash.insert(ancestor.hash); - ancestors_map.insert(ancestor.number, ancestor); + ancestors_map.insert(ancestor.number, (ancestor, constraints)); } } } From 6ff3e310fd067982abd2b8b5e960af49e6ff9481 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Mon, 28 Feb 2022 23:36:32 -0600 Subject: [PATCH 22/87] constructor --- .../src/fragment_tree.rs | 30 +++++++++++++++---- 1 file changed, 24 insertions(+), 6 deletions(-) diff --git a/node/core/prospective-parachains/src/fragment_tree.rs b/node/core/prospective-parachains/src/fragment_tree.rs index 276b2f25675a..4082e8a794a5 100644 --- a/node/core/prospective-parachains/src/fragment_tree.rs +++ b/node/core/prospective-parachains/src/fragment_tree.rs @@ -69,6 +69,7 @@ use polkadot_primitives::vstaging::{ GroupIndex, GroupRotationInfo, Hash, HeadData, Header, Id as ParaId, PersistedValidationData, SessionIndex, ValidatorIndex, }; +use super::LOG_TARGET; /// An error indicating that a supplied candidate didn't match the persisted /// validation data provided alongside it. @@ -220,21 +221,37 @@ impl Scope { } } +// We use indices into a flat vector to refer to nodes in the tree. +type NodePointer = usize; + /// This is a tree of candidates based on some underlying storage of candidates /// and a scope. pub(crate) struct FragmentTree { scope: Scope, - // TODO [now]: actual tree + nodes: Vec, } impl FragmentTree { /// Create a new [`FragmentTree`] with given scope, and populated from /// the provided node storage. pub fn new(scope: Scope, storage: &CandidateStorage) -> Self { - FragmentTree { + let mut tree = FragmentTree { scope, - // TODO [now]: populate - } + nodes: Vec::new(), + }; + + tracing::trace!( + target: LOG_TARGET, + relay_parent = ?tree.scope.relay_parent.hash, + relay_parent_num = tree.scope.relay_parent.number, + para_id = ?tree.scope.para, + ancestors = tree.scope.ancestors.len(), + "Instantiating Fragment Tree", + ); + + // populate. + + tree } // TODO [now]: add new candidate and recursively populate as necessary. @@ -243,8 +260,9 @@ impl FragmentTree { } struct FragmentNode { - // The hash of the head-data of the parent node - parent: Hash, + // A pointer to the parent node. `None` indicates that this is a root + // node. + parent: Option, fragment: Fragment, erasure_root: Hash, } From 109e0f4af6bb9c5412f6e5a521db36778689e3b2 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Tue, 1 Mar 2022 17:32:56 -0600 Subject: [PATCH 23/87] constraints: fix bug in HRMP watermarks --- .../src/inclusion_emulator/staging.rs | 67 ++++++++++++++++--- 1 file changed, 58 insertions(+), 9 deletions(-) diff --git a/node/subsystem-util/src/inclusion_emulator/staging.rs b/node/subsystem-util/src/inclusion_emulator/staging.rs index e886a9a0ff22..2c5b8ee243b0 100644 --- a/node/subsystem-util/src/inclusion_emulator/staging.rs +++ b/node/subsystem-util/src/inclusion_emulator/staging.rs @@ -225,7 +225,8 @@ impl Constraints { &self, modifications: &ConstraintModifications, ) -> Result<(), ModificationError> { - if let Some(hrmp_watermark) = modifications.hrmp_watermark { + if let Some(HrmpWatermarkUpdate::Trunk(hrmp_watermark)) = modifications.hrmp_watermark { + // head updates are always valid. if self .hrmp_inbound .valid_watermarks @@ -300,12 +301,22 @@ impl Constraints { new.required_parent = required_parent.clone(); } - if let Some(hrmp_watermark) = modifications.hrmp_watermark { - match new.hrmp_inbound.valid_watermarks.iter().position(|w| w == &hrmp_watermark) { - Some(pos) => { + if let Some(ref hrmp_watermark) = modifications.hrmp_watermark { + match new.hrmp_inbound.valid_watermarks.binary_search(&hrmp_watermark.watermark()) { + Ok(pos) => { + // Exact match, so this is OK in all cases. let _ = new.hrmp_inbound.valid_watermarks.drain(..pos + 1); + } + Err(pos) => match hrmp_watermark { + HrmpWatermarkUpdate::Head(_) => { + // Updates to Head are always OK. + let _ = new.hrmp_inbound.valid_watermarks.drain(..pos); + } + HrmpWatermarkUpdate::Trunk(n) => { + // Trunk update landing on disallowed watermark is not OK. + return Err(ModificationError::DisallowedHrmpWatermark(*n)) + } }, - None => return Err(ModificationError::DisallowedHrmpWatermark(hrmp_watermark)), } } @@ -388,13 +399,33 @@ pub struct OutboundHrmpChannelModification { pub messages_submitted: usize, } +/// An update to the HRMP Watermark. +#[derive(Debug, Clone, PartialEq)] +pub enum HrmpWatermarkUpdate { + /// This is an update placing the watermark at the head of the chain, + /// which is always legal. + Head(BlockNumber), + /// This is an update placing the watermark behind the head of the + /// chain, which is only legal if it lands on a block where messages + /// were queued. + Trunk(BlockNumber), +} + +impl HrmpWatermarkUpdate { + fn watermark(&self) -> BlockNumber { + match *self { + HrmpWatermarkUpdate::Head(n) | HrmpWatermarkUpdate::Trunk(n) => n, + } + } +} + /// Modifications to constraints as a result of prospective candidates. #[derive(Debug, Clone, PartialEq)] pub struct ConstraintModifications { /// The required parent head to build upon. pub required_parent: Option, /// The new HRMP watermark - pub hrmp_watermark: Option, + pub hrmp_watermark: Option, /// Outbound HRMP channel modifications. pub outbound_hrmp: HashMap, /// The amount of UMP messages sent. @@ -546,7 +577,13 @@ impl Fragment { let commitments = &candidate.commitments; ConstraintModifications { required_parent: Some(commitments.head_data.clone()), - hrmp_watermark: Some(commitments.hrmp_watermark), + hrmp_watermark: Some({ + if commitments.hrmp_watermark == relay_parent.number { + HrmpWatermarkUpdate::Head(commitments.hrmp_watermark) + } else { + HrmpWatermarkUpdate::Trunk(commitments.hrmp_watermark) + } + }), outbound_hrmp: { let mut outbound_hrmp = HashMap::<_, OutboundHrmpChannelModification>::new(); @@ -843,10 +880,10 @@ mod tests { } #[test] - fn constraints_disallowed_watermark() { + fn constraints_disallowed_trunk_watermark() { let constraints = make_constraints(); let mut modifications = ConstraintModifications::identity(); - modifications.hrmp_watermark = Some(7); + modifications.hrmp_watermark = Some(HrmpWatermarkUpdate::Trunk(7)); assert_eq!( constraints.check_modifications(&modifications), @@ -859,6 +896,18 @@ mod tests { ); } + #[test] + fn constraints_always_allow_head_watermark() { + let constraints = make_constraints(); + let mut modifications = ConstraintModifications::identity(); + modifications.hrmp_watermark = Some(HrmpWatermarkUpdate::Head(7)); + + assert!(constraints.check_modifications(&modifications).is_ok()); + + let new_constraints = constraints.apply_modifications(&modifications).unwrap(); + assert_eq!(new_constraints.hrmp_inbound.valid_watermarks, vec![8]); + } + #[test] fn constraints_no_such_hrmp_channel() { let constraints = make_constraints(); From 3b88edb6c5b81d64dc1ea5936e19f7698af9b973 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Tue, 1 Mar 2022 23:46:50 -0600 Subject: [PATCH 24/87] fragment tree population logic --- .../src/fragment_tree.rs | 199 +++++++++++++++--- 1 file changed, 174 insertions(+), 25 deletions(-) diff --git a/node/core/prospective-parachains/src/fragment_tree.rs b/node/core/prospective-parachains/src/fragment_tree.rs index 4082e8a794a5..d53aa477ee00 100644 --- a/node/core/prospective-parachains/src/fragment_tree.rs +++ b/node/core/prospective-parachains/src/fragment_tree.rs @@ -47,13 +47,19 @@ //! As long as the [`CandidateStorage`] has bounded input on the number of candidates supplied, //! [`FragmentTree`] complexity is bounded. This means that higher-level code needs to be selective //! about limiting the amount of candidates that are considered. +//! +//! The code in this module is not designed for speed or efficiency, but conceptual simplicity. +//! Our assumption is that the amount of candidates and parachains we consider will be reasonably +//! bounded and in practice will not exceed a few thousand at any time. This naïve implementation +//! will still perform fairly well under these conditions, despite being somewhat wasteful of memory. // TODO [now]: review & update. use std::{ - collections::{hash_map::Entry as HEntry, HashMap, HashSet, BTreeMap}, + collections::{hash_map::Entry as HEntry, BTreeMap, HashMap, HashSet}, sync::Arc, }; +use super::LOG_TARGET; use polkadot_node_subsystem::{ overseer, ActiveLeavesUpdate, FromOverseer, OverseerSignal, SpawnedSubsystem, SubsystemContext, SubsystemError, SubsystemResult, @@ -69,7 +75,6 @@ use polkadot_primitives::vstaging::{ GroupIndex, GroupRotationInfo, Hash, HeadData, Header, Id as ParaId, PersistedValidationData, SessionIndex, ValidatorIndex, }; -use super::LOG_TARGET; /// An error indicating that a supplied candidate didn't match the persisted /// validation data provided alongside it. @@ -131,6 +136,18 @@ impl CandidateStorage { Ok(()) } + fn iter_para_children<'a>( + &'a self, + parent_head_hash: &Hash, + ) -> impl Iterator + 'a { + let by_candidate_hash = &self.by_candidate_hash; + self.by_parent_head + .get(parent_head_hash) + .into_iter() + .flat_map(|hashes| hashes.iter()) + .filter_map(move |h| by_candidate_hash.get(h)) + } + // TODO [now]: fn restrict_to(&graphs) which will be our main pruning function } @@ -157,7 +174,7 @@ pub(crate) struct Scope { para: ParaId, relay_parent: RelayChainBlockInfo, ancestors: BTreeMap, - ancestors_by_hash: HashSet, + ancestors_by_hash: HashMap, base_constraints: Constraints, max_depth: usize, } @@ -189,22 +206,22 @@ impl Scope { relay_parent: RelayChainBlockInfo, base_constraints: Constraints, max_depth: usize, - ancestors: impl IntoIterator, + ancestors: impl IntoIterator, ) -> Result { let mut ancestors_map = BTreeMap::new(); - let mut ancestors_by_hash = HashSet::new(); + let mut ancestors_by_hash = HashMap::new(); { let mut prev = relay_parent.number; for (ancestor, constraints) in ancestors { if prev == 0 { - return Err(UnexpectedAncestor); + return Err(UnexpectedAncestor) } else if ancestor.number != prev - 1 { - return Err(UnexpectedAncestor); + return Err(UnexpectedAncestor) } else if prev == base_constraints.min_relay_parent_number { break } else { prev = ancestor.number; - ancestors_by_hash.insert(ancestor.hash); + ancestors_by_hash.insert(ancestor.hash, ancestor.clone()); ancestors_map.insert(ancestor.number, (ancestor, constraints)); } } @@ -219,52 +236,184 @@ impl Scope { ancestors_by_hash, }) } + + fn earliest_relay_parent(&self) -> RelayChainBlockInfo { + self.ancestors.iter().next().map(|(_, v)| v.0.clone()) + .unwrap_or_else(|| self.relay_parent.clone()) + } + + fn ancestor_by_hash(&self, hash: &Hash) -> Option { + if hash == &self.relay_parent.hash { + return Some(self.relay_parent.clone()) + } + + self.ancestors_by_hash.get(hash) + .map(|info| info.clone()) + } } // We use indices into a flat vector to refer to nodes in the tree. -type NodePointer = usize; +// Every tree also has an implicit root. +#[derive(Debug, Clone, Copy, PartialEq)] +enum NodePointer { + Root, + Storage(usize), +} /// This is a tree of candidates based on some underlying storage of candidates /// and a scope. pub(crate) struct FragmentTree { scope: Scope, + + // Invariant: a contiguous prefix of the 'nodes' storage will contain + // the top-level children. nodes: Vec, } impl FragmentTree { - /// Create a new [`FragmentTree`] with given scope, and populated from - /// the provided node storage. - pub fn new(scope: Scope, storage: &CandidateStorage) -> Self { - let mut tree = FragmentTree { - scope, - nodes: Vec::new(), - }; - + /// Create a new [`FragmentTree`] with given scope and populated from the + /// storage. + pub fn populate(scope: Scope, storage: &CandidateStorage) -> Self { tracing::trace!( target: LOG_TARGET, - relay_parent = ?tree.scope.relay_parent.hash, - relay_parent_num = tree.scope.relay_parent.number, - para_id = ?tree.scope.para, - ancestors = tree.scope.ancestors.len(), + relay_parent = ?scope.relay_parent.hash, + relay_parent_num = scope.relay_parent.number, + para_id = ?scope.para, + ancestors = scope.ancestors.len(), "Instantiating Fragment Tree", ); - // populate. + let mut tree = FragmentTree { scope, nodes: Vec::new() }; + + // Populate the tree breadth-first. + let mut last_sweep_start = None; + loop { + let sweep_start = tree.nodes.len(); + + if Some(sweep_start) == last_sweep_start { + break + } + + let parents: Vec = + if let Some(last_start) = last_sweep_start { + (last_start..tree.nodes.len()).map(NodePointer::Storage).collect() + } else { + // This indicates depth = 0, as we're on the first + // iteration. + vec![NodePointer::Root] + }; + + // 1. get parent head and find constraints + // 2. iterate all candidates building on the right head and viable relay parent + // 3. add new node + for parent_pointer in parents { + let (modifications, child_depth, earliest_rp) = match parent_pointer { + NodePointer::Root => { + (ConstraintModifications::identity(), 0, tree.scope.earliest_relay_parent()) + } + NodePointer::Storage(ptr) => { + let node = &tree.nodes[ptr]; + let parent_rp = tree.scope.ancestor_by_hash(&node.relay_parent()) + .expect("nodes in tree can only contain ancestors within scope; qed"); + + (node.cumulative_modifications.clone(), node.depth + 1, parent_rp) + } + }; + + if child_depth >= tree.scope.max_depth { continue } + + let child_constraints = match tree.scope.base_constraints.apply_modifications(&modifications) { + Err(e) => { + tracing::debug!( + target: LOG_TARGET, + new_parent_head = ?modifications.required_parent, + err = ?e, + "Failed to apply modifications", + ); + + continue + } + Ok(c) => c, + }; + + // Add nodes to tree wherever + // 1. parent hash is correct + // 2. relay-parent does not move backwards + // 3. candidate outputs fulfill constraints + let required_head_hash = child_constraints.required_parent.hash(); + for candidate in storage.iter_para_children(&required_head_hash) { + let relay_parent = match tree.scope.ancestor_by_hash(&candidate.relay_parent) { + None => continue, // not in chain + Some(info) => { + if info.number < earliest_rp.number { + // moved backwards + continue + } + + info + } + }; + + let fragment = { + let f = Fragment::new( + relay_parent, + child_constraints.clone(), + candidate.candidate.clone(), + ); + + match f { + Ok(f) => f, + Err(_) => continue, + } + }; + + let mut cumulative_modifications = modifications.clone(); + cumulative_modifications.stack(fragment.constraint_modifications()); + let node = FragmentNode { + parent: parent_pointer, + fragment, + erasure_root: candidate.erasure_root.clone(), + candidate_hash: candidate.candidate_hash.clone(), + depth: child_depth, + cumulative_modifications, + children: Vec::new(), + }; + + tree.insert_node(node); + } + } + + last_sweep_start = Some(sweep_start); + } tree } + // Inserts a node and updates child references in a non-root parent. + fn insert_node(&mut self, node: FragmentNode) { + let pointer = NodePointer::Storage(self.nodes.len()); + let parent_pointer = node.parent; + self.nodes.push(node); + + if let NodePointer::Storage(ptr) = parent_pointer { + self.nodes[ptr].children.push(pointer); + } + } + // TODO [now]: add new candidate and recursively populate as necessary. // TODO [now]: API for selecting backed candidates } struct FragmentNode { - // A pointer to the parent node. `None` indicates that this is a root - // node. - parent: Option, + // A pointer to the parent node. + parent: NodePointer, fragment: Fragment, erasure_root: Hash, + candidate_hash: CandidateHash, + depth: usize, + cumulative_modifications: ConstraintModifications, + children: Vec, } impl FragmentNode { From eb3c0cc8dc2a029255d5d12972c660efccaf49d2 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Sat, 5 Mar 2022 18:17:32 -0600 Subject: [PATCH 25/87] set::retain --- .../src/fragment_tree.rs | 30 +++++++++++++++++-- 1 file changed, 27 insertions(+), 3 deletions(-) diff --git a/node/core/prospective-parachains/src/fragment_tree.rs b/node/core/prospective-parachains/src/fragment_tree.rs index d53aa477ee00..0f560c2b8394 100644 --- a/node/core/prospective-parachains/src/fragment_tree.rs +++ b/node/core/prospective-parachains/src/fragment_tree.rs @@ -136,6 +136,15 @@ impl CandidateStorage { Ok(()) } + /// Retain only candidates which pass the predicate. + pub(crate) fn retain(&mut self, pred: impl Fn(&CandidateHash) -> bool) { + self.by_candidate_hash.retain(|h, _v| pred(h)); + self.by_parent_head.retain(|parent, children| { + children.retain(|h| pred(h)); + !children.is_empty() + }) + } + fn iter_para_children<'a>( &'a self, parent_head_hash: &Hash, @@ -147,8 +156,6 @@ impl CandidateStorage { .flat_map(|hashes| hashes.iter()) .filter_map(move |h| by_candidate_hash.get(h)) } - - // TODO [now]: fn restrict_to(&graphs) which will be our main pruning function } /// The state of a candidate. @@ -283,7 +290,10 @@ impl FragmentTree { "Instantiating Fragment Tree", ); - let mut tree = FragmentTree { scope, nodes: Vec::new() }; + let mut tree = FragmentTree { + scope, + nodes: Vec::new(), + }; // Populate the tree breadth-first. let mut last_sweep_start = None; @@ -400,6 +410,20 @@ impl FragmentTree { } } + /// Returns a set of candidate hashes contained in nodes. + /// + /// This runs in O(n) time in the number of nodes + /// and allocates memory. + pub(crate) fn candidates(&self) -> HashSet { + let mut set = HashSet::with_capacity(self.nodes.len()); + + for f in &self.nodes { + set.insert(f.candidate_hash); + } + + set + } + // TODO [now]: add new candidate and recursively populate as necessary. // TODO [now]: API for selecting backed candidates From a6a5c5fcb1372285ac621c2ad3d3a615c5af69a0 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Sun, 6 Mar 2022 13:26:12 -0600 Subject: [PATCH 26/87] extract population logic --- .../src/fragment_tree.rs | 126 ++++++++++++------ 1 file changed, 86 insertions(+), 40 deletions(-) diff --git a/node/core/prospective-parachains/src/fragment_tree.rs b/node/core/prospective-parachains/src/fragment_tree.rs index 0f560c2b8394..0f400d5455a2 100644 --- a/node/core/prospective-parachains/src/fragment_tree.rs +++ b/node/core/prospective-parachains/src/fragment_tree.rs @@ -295,10 +295,76 @@ impl FragmentTree { nodes: Vec::new(), }; + tree.populate_from_bases(storage, vec![NodePointer::Root]); + + tree + } + + // Inserts a node and updates child references in a non-root parent. + fn insert_node(&mut self, node: FragmentNode) { + let pointer = NodePointer::Storage(self.nodes.len()); + let parent_pointer = node.parent; + let candidate_hash = node.candidate_hash; + + match parent_pointer { + NodePointer::Storage(ptr) => { + self.nodes.push(node); + self.nodes[ptr].children.push((pointer, candidate_hash)) + } + NodePointer::Root => { + // Maintain the invariant of node storage beginning with depth-0. + if self.nodes.last().map_or(true, |last| last.parent == NodePointer::Root) { + self.nodes.push(node); + } else { + let pos = self.nodes.iter().take_while(|n| n.parent == NodePointer::Root).count(); + self.nodes.insert(pos, node); + } + } + } + } + + fn node_has_candidate_children(&self, pointer: NodePointer, candidate_hash: &CandidateHash) -> bool { + match pointer { + NodePointer::Root => + self.nodes.iter().take_while(|n| n.parent == NodePointer::Root).find(|n| &n.candidate_hash == candidate_hash).is_some(), + NodePointer::Storage(ptr) => + self.nodes.get(ptr).map_or(false, |n| n.has_candidate_child(candidate_hash)), + } + } + + /// Returns a set of candidate hashes contained in nodes. + /// + /// This runs in O(n) time in the number of nodes + /// and allocates memory. + pub(crate) fn candidates(&self) -> HashSet { + let mut set = HashSet::with_capacity(self.nodes.len()); + + for f in &self.nodes { + set.insert(f.candidate_hash); + } + + set + } + + /// Add a candidate and recursively populate from storage. + pub(crate) fn add_and_populate( + &mut self, + hash: CandidateHash, + storage: &CandidateStorage, + ) { + unimplemented!() + } + + fn populate_from_bases( + &mut self, + storage: &CandidateStorage, + initial_bases: Vec, + ) { // Populate the tree breadth-first. let mut last_sweep_start = None; + loop { - let sweep_start = tree.nodes.len(); + let sweep_start = self.nodes.len(); if Some(sweep_start) == last_sweep_start { break @@ -306,11 +372,9 @@ impl FragmentTree { let parents: Vec = if let Some(last_start) = last_sweep_start { - (last_start..tree.nodes.len()).map(NodePointer::Storage).collect() + (last_start..self.nodes.len()).map(NodePointer::Storage).collect() } else { - // This indicates depth = 0, as we're on the first - // iteration. - vec![NodePointer::Root] + initial_bases.clone() }; // 1. get parent head and find constraints @@ -319,20 +383,20 @@ impl FragmentTree { for parent_pointer in parents { let (modifications, child_depth, earliest_rp) = match parent_pointer { NodePointer::Root => { - (ConstraintModifications::identity(), 0, tree.scope.earliest_relay_parent()) + (ConstraintModifications::identity(), 0, self.scope.earliest_relay_parent()) } NodePointer::Storage(ptr) => { - let node = &tree.nodes[ptr]; - let parent_rp = tree.scope.ancestor_by_hash(&node.relay_parent()) + let node = &self.nodes[ptr]; + let parent_rp = self.scope.ancestor_by_hash(&node.relay_parent()) .expect("nodes in tree can only contain ancestors within scope; qed"); (node.cumulative_modifications.clone(), node.depth + 1, parent_rp) } }; - if child_depth >= tree.scope.max_depth { continue } + if child_depth >= self.scope.max_depth { continue } - let child_constraints = match tree.scope.base_constraints.apply_modifications(&modifications) { + let child_constraints = match self.scope.base_constraints.apply_modifications(&modifications) { Err(e) => { tracing::debug!( target: LOG_TARGET, @@ -352,7 +416,7 @@ impl FragmentTree { // 3. candidate outputs fulfill constraints let required_head_hash = child_constraints.required_parent.hash(); for candidate in storage.iter_para_children(&required_head_hash) { - let relay_parent = match tree.scope.ancestor_by_hash(&candidate.relay_parent) { + let relay_parent = match self.scope.ancestor_by_hash(&candidate.relay_parent) { None => continue, // not in chain Some(info) => { if info.number < earliest_rp.number { @@ -364,6 +428,11 @@ impl FragmentTree { } }; + // don't add candidates where the parent already has it as a child. + if self.node_has_candidate_children(parent_pointer, &candidate.candidate_hash) { + continue; + } + let fragment = { let f = Fragment::new( relay_parent, @@ -389,39 +458,12 @@ impl FragmentTree { children: Vec::new(), }; - tree.insert_node(node); + self.insert_node(node); } } last_sweep_start = Some(sweep_start); } - - tree - } - - // Inserts a node and updates child references in a non-root parent. - fn insert_node(&mut self, node: FragmentNode) { - let pointer = NodePointer::Storage(self.nodes.len()); - let parent_pointer = node.parent; - self.nodes.push(node); - - if let NodePointer::Storage(ptr) = parent_pointer { - self.nodes[ptr].children.push(pointer); - } - } - - /// Returns a set of candidate hashes contained in nodes. - /// - /// This runs in O(n) time in the number of nodes - /// and allocates memory. - pub(crate) fn candidates(&self) -> HashSet { - let mut set = HashSet::with_capacity(self.nodes.len()); - - for f in &self.nodes { - set.insert(f.candidate_hash); - } - - set } // TODO [now]: add new candidate and recursively populate as necessary. @@ -437,7 +479,7 @@ struct FragmentNode { candidate_hash: CandidateHash, depth: usize, cumulative_modifications: ConstraintModifications, - children: Vec, + children: Vec<(NodePointer, CandidateHash)>, } impl FragmentNode { @@ -468,6 +510,10 @@ impl FragmentNode { }, } } + + fn has_candidate_child(&self, candidate_hash: &CandidateHash) -> bool { + self.children.iter().find(|(_, c)| c == candidate_hash).is_some() + } } #[cfg(test)] From 67dc653b1d59e6461743feefafed0b9b9174c75b Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Sun, 6 Mar 2022 13:39:41 -0600 Subject: [PATCH 27/87] implement add_and_populate --- .../src/fragment_tree.rs | 31 ++++++++++++++++++- 1 file changed, 30 insertions(+), 1 deletion(-) diff --git a/node/core/prospective-parachains/src/fragment_tree.rs b/node/core/prospective-parachains/src/fragment_tree.rs index 0f400d5455a2..62963a72755f 100644 --- a/node/core/prospective-parachains/src/fragment_tree.rs +++ b/node/core/prospective-parachains/src/fragment_tree.rs @@ -156,6 +156,10 @@ impl CandidateStorage { .flat_map(|hashes| hashes.iter()) .filter_map(move |h| by_candidate_hash.get(h)) } + + fn get(&'_ self, candidate_hash: &CandidateHash) -> Option<&'_ CandidateEntry> { + self.by_candidate_hash.get(candidate_hash) + } } /// The state of a candidate. @@ -352,7 +356,32 @@ impl FragmentTree { hash: CandidateHash, storage: &CandidateStorage, ) { - unimplemented!() + let candidate_entry = match storage.get(&hash) { + None => return, + Some(e) => e, + }; + + let candidate_parent = &candidate_entry.candidate.persisted_validation_data.parent_head; + + // Select an initial set of bases, whose required relay-parent matches that of the candidate. + let root_base = if &self.scope.base_constraints.required_parent == candidate_parent { + Some(NodePointer::Root) + } else { + None + }; + + let non_root_bases = self.nodes.iter() + .enumerate() + .filter(|(_, n)| + n.cumulative_modifications.required_parent.as_ref() == Some(candidate_parent) + ) + .map(|(i, _)| NodePointer::Storage(i)); + + let bases = root_base.into_iter().chain(non_root_bases).collect(); + + // Pass this into the population function, which will sanity-check stuff like depth, fragments, + // etc. and then recursively populate. + self.populate_from_bases(storage, bases); } fn populate_from_bases( From 592cbbf2b337d81d0eb6550406178a738aec7a25 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Sun, 6 Mar 2022 13:40:40 -0600 Subject: [PATCH 28/87] fmt --- .../src/fragment_tree.rs | 111 +++++++++--------- .../src/inclusion_emulator/staging.rs | 6 +- 2 files changed, 61 insertions(+), 56 deletions(-) diff --git a/node/core/prospective-parachains/src/fragment_tree.rs b/node/core/prospective-parachains/src/fragment_tree.rs index 62963a72755f..606509f15449 100644 --- a/node/core/prospective-parachains/src/fragment_tree.rs +++ b/node/core/prospective-parachains/src/fragment_tree.rs @@ -249,7 +249,10 @@ impl Scope { } fn earliest_relay_parent(&self) -> RelayChainBlockInfo { - self.ancestors.iter().next().map(|(_, v)| v.0.clone()) + self.ancestors + .iter() + .next() + .map(|(_, v)| v.0.clone()) .unwrap_or_else(|| self.relay_parent.clone()) } @@ -258,8 +261,7 @@ impl Scope { return Some(self.relay_parent.clone()) } - self.ancestors_by_hash.get(hash) - .map(|info| info.clone()) + self.ancestors_by_hash.get(hash).map(|info| info.clone()) } } @@ -294,10 +296,7 @@ impl FragmentTree { "Instantiating Fragment Tree", ); - let mut tree = FragmentTree { - scope, - nodes: Vec::new(), - }; + let mut tree = FragmentTree { scope, nodes: Vec::new() }; tree.populate_from_bases(storage, vec![NodePointer::Root]); @@ -314,23 +313,32 @@ impl FragmentTree { NodePointer::Storage(ptr) => { self.nodes.push(node); self.nodes[ptr].children.push((pointer, candidate_hash)) - } + }, NodePointer::Root => { // Maintain the invariant of node storage beginning with depth-0. if self.nodes.last().map_or(true, |last| last.parent == NodePointer::Root) { self.nodes.push(node); } else { - let pos = self.nodes.iter().take_while(|n| n.parent == NodePointer::Root).count(); + let pos = + self.nodes.iter().take_while(|n| n.parent == NodePointer::Root).count(); self.nodes.insert(pos, node); } - } + }, } } - fn node_has_candidate_children(&self, pointer: NodePointer, candidate_hash: &CandidateHash) -> bool { + fn node_has_candidate_children( + &self, + pointer: NodePointer, + candidate_hash: &CandidateHash, + ) -> bool { match pointer { - NodePointer::Root => - self.nodes.iter().take_while(|n| n.parent == NodePointer::Root).find(|n| &n.candidate_hash == candidate_hash).is_some(), + NodePointer::Root => self + .nodes + .iter() + .take_while(|n| n.parent == NodePointer::Root) + .find(|n| &n.candidate_hash == candidate_hash) + .is_some(), NodePointer::Storage(ptr) => self.nodes.get(ptr).map_or(false, |n| n.has_candidate_child(candidate_hash)), } @@ -351,11 +359,7 @@ impl FragmentTree { } /// Add a candidate and recursively populate from storage. - pub(crate) fn add_and_populate( - &mut self, - hash: CandidateHash, - storage: &CandidateStorage, - ) { + pub(crate) fn add_and_populate(&mut self, hash: CandidateHash, storage: &CandidateStorage) { let candidate_entry = match storage.get(&hash) { None => return, Some(e) => e, @@ -370,11 +374,13 @@ impl FragmentTree { None }; - let non_root_bases = self.nodes.iter() + let non_root_bases = self + .nodes + .iter() .enumerate() - .filter(|(_, n)| + .filter(|(_, n)| { n.cumulative_modifications.required_parent.as_ref() == Some(candidate_parent) - ) + }) .map(|(i, _)| NodePointer::Storage(i)); let bases = root_base.into_iter().chain(non_root_bases).collect(); @@ -384,11 +390,7 @@ impl FragmentTree { self.populate_from_bases(storage, bases); } - fn populate_from_bases( - &mut self, - storage: &CandidateStorage, - initial_bases: Vec, - ) { + fn populate_from_bases(&mut self, storage: &CandidateStorage, initial_bases: Vec) { // Populate the tree breadth-first. let mut last_sweep_start = None; @@ -399,45 +401,48 @@ impl FragmentTree { break } - let parents: Vec = - if let Some(last_start) = last_sweep_start { - (last_start..self.nodes.len()).map(NodePointer::Storage).collect() - } else { - initial_bases.clone() - }; + let parents: Vec = if let Some(last_start) = last_sweep_start { + (last_start..self.nodes.len()).map(NodePointer::Storage).collect() + } else { + initial_bases.clone() + }; // 1. get parent head and find constraints // 2. iterate all candidates building on the right head and viable relay parent // 3. add new node for parent_pointer in parents { let (modifications, child_depth, earliest_rp) = match parent_pointer { - NodePointer::Root => { - (ConstraintModifications::identity(), 0, self.scope.earliest_relay_parent()) - } + NodePointer::Root => + (ConstraintModifications::identity(), 0, self.scope.earliest_relay_parent()), NodePointer::Storage(ptr) => { let node = &self.nodes[ptr]; - let parent_rp = self.scope.ancestor_by_hash(&node.relay_parent()) + let parent_rp = self + .scope + .ancestor_by_hash(&node.relay_parent()) .expect("nodes in tree can only contain ancestors within scope; qed"); (node.cumulative_modifications.clone(), node.depth + 1, parent_rp) - } + }, }; - if child_depth >= self.scope.max_depth { continue } - - let child_constraints = match self.scope.base_constraints.apply_modifications(&modifications) { - Err(e) => { - tracing::debug!( - target: LOG_TARGET, - new_parent_head = ?modifications.required_parent, - err = ?e, - "Failed to apply modifications", - ); + if child_depth >= self.scope.max_depth { + continue + } - continue - } - Ok(c) => c, - }; + let child_constraints = + match self.scope.base_constraints.apply_modifications(&modifications) { + Err(e) => { + tracing::debug!( + target: LOG_TARGET, + new_parent_head = ?modifications.required_parent, + err = ?e, + "Failed to apply modifications", + ); + + continue + }, + Ok(c) => c, + }; // Add nodes to tree wherever // 1. parent hash is correct @@ -454,12 +459,12 @@ impl FragmentTree { } info - } + }, }; // don't add candidates where the parent already has it as a child. if self.node_has_candidate_children(parent_pointer, &candidate.candidate_hash) { - continue; + continue } let fragment = { diff --git a/node/subsystem-util/src/inclusion_emulator/staging.rs b/node/subsystem-util/src/inclusion_emulator/staging.rs index 2c5b8ee243b0..5e1ff83e46f3 100644 --- a/node/subsystem-util/src/inclusion_emulator/staging.rs +++ b/node/subsystem-util/src/inclusion_emulator/staging.rs @@ -306,16 +306,16 @@ impl Constraints { Ok(pos) => { // Exact match, so this is OK in all cases. let _ = new.hrmp_inbound.valid_watermarks.drain(..pos + 1); - } + }, Err(pos) => match hrmp_watermark { HrmpWatermarkUpdate::Head(_) => { // Updates to Head are always OK. let _ = new.hrmp_inbound.valid_watermarks.drain(..pos); - } + }, HrmpWatermarkUpdate::Trunk(n) => { // Trunk update landing on disallowed watermark is not OK. return Err(ModificationError::DisallowedHrmpWatermark(*n)) - } + }, }, } } From 2112ba52ba10735b24aa3913cea65b3ec32a2070 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Sun, 6 Mar 2022 14:01:48 -0600 Subject: [PATCH 29/87] add some TODOs in tests --- .../core/prospective-parachains/src/fragment_tree.rs | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/node/core/prospective-parachains/src/fragment_tree.rs b/node/core/prospective-parachains/src/fragment_tree.rs index 606509f15449..9c434b402baa 100644 --- a/node/core/prospective-parachains/src/fragment_tree.rs +++ b/node/core/prospective-parachains/src/fragment_tree.rs @@ -500,8 +500,6 @@ impl FragmentTree { } } - // TODO [now]: add new candidate and recursively populate as necessary. - // TODO [now]: API for selecting backed candidates } @@ -558,5 +556,13 @@ mod tests { // TODO [now]: storage sets up links correctly. - // TODO [now]: storage pruning. + // TODO [now]: retain + + // TODO [now]: recursive populate + + // TODO [now]: enforce root-child nodes contiguous + + // TODO [now]: add candidate child of root + + // TODO [now]: add candidate child of non-root } From ddcfa78b5da65eb8620d57d327bd1b80d47815be Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Sun, 6 Mar 2022 14:16:34 -0600 Subject: [PATCH 30/87] implement child-selection --- .../src/fragment_tree.rs | 73 ++++++++++++++++--- 1 file changed, 64 insertions(+), 9 deletions(-) diff --git a/node/core/prospective-parachains/src/fragment_tree.rs b/node/core/prospective-parachains/src/fragment_tree.rs index 9c434b402baa..edcdb849010d 100644 --- a/node/core/prospective-parachains/src/fragment_tree.rs +++ b/node/core/prospective-parachains/src/fragment_tree.rs @@ -327,20 +327,29 @@ impl FragmentTree { } } - fn node_has_candidate_children( + fn node_has_candidate_child( &self, pointer: NodePointer, candidate_hash: &CandidateHash, ) -> bool { + self.node_candidate_child(pointer, candidate_hash).is_some() + } + + fn node_candidate_child( + &self, + pointer: NodePointer, + candidate_hash: &CandidateHash, + ) -> Option { match pointer { NodePointer::Root => self .nodes .iter() .take_while(|n| n.parent == NodePointer::Root) - .find(|n| &n.candidate_hash == candidate_hash) - .is_some(), + .enumerate() + .find(|(_, n)| &n.candidate_hash == candidate_hash) + .map(|(i, _)| NodePointer::Storage(i)), NodePointer::Storage(ptr) => - self.nodes.get(ptr).map_or(false, |n| n.has_candidate_child(candidate_hash)), + self.nodes.get(ptr).and_then(|n| n.candidate_child(candidate_hash)), } } @@ -390,6 +399,54 @@ impl FragmentTree { self.populate_from_bases(storage, bases); } + /// Select a candidate after the given `required_path` which pass + /// the predicate. + /// + /// If there are multiple possibilities, this will select the first one. + /// + /// This returns `None` if there is no candidate meeting those criteria. + /// + /// The intention of the `required_path` is to allow queries on the basis of + /// one or more candidates which were previously pending availability becoming + /// available and opening up more room on the core. + pub(crate) fn select_child( + &self, + required_path: &[CandidateHash], + pred: impl Fn(&CandidateHash) -> bool, + ) -> Option { + let base_node = { + // traverse the required path. + let mut node = NodePointer::Root; + for required_step in required_path { + node = self.node_candidate_child(node, &required_step)?; + } + + node + }; + + // TODO [now]: taking the first selection might introduce bias + // or become gameable. + // + // For plausibly unique parachains, this shouldn't matter much. + // figure out alternative selection criteria? + match base_node { + NodePointer::Root => { + self.nodes.iter() + .take_while(|n| n.parent == NodePointer::Root) + .filter(|n| pred(&n.candidate_hash)) + .map(|n| n.candidate_hash) + .next() + } + NodePointer::Storage(ptr) => { + self.nodes[ptr].children + .iter() + .filter(|n| pred(&n.1)) + .map(|n| n.1) + .next() + } + } + } + fn populate_from_bases(&mut self, storage: &CandidateStorage, initial_bases: Vec) { // Populate the tree breadth-first. let mut last_sweep_start = None; @@ -463,7 +520,7 @@ impl FragmentTree { }; // don't add candidates where the parent already has it as a child. - if self.node_has_candidate_children(parent_pointer, &candidate.candidate_hash) { + if self.node_has_candidate_child(parent_pointer, &candidate.candidate_hash) { continue } @@ -499,8 +556,6 @@ impl FragmentTree { last_sweep_start = Some(sweep_start); } } - - // TODO [now]: API for selecting backed candidates } struct FragmentNode { @@ -543,8 +598,8 @@ impl FragmentNode { } } - fn has_candidate_child(&self, candidate_hash: &CandidateHash) -> bool { - self.children.iter().find(|(_, c)| c == candidate_hash).is_some() + fn candidate_child(&self, candidate_hash: &CandidateHash) -> Option { + self.children.iter().find(|(_, c)| c == candidate_hash).map(|(p, _)| *p) } } From 2d2e8418b9deffef0cc82f1b5da82226c882daa0 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Sun, 6 Mar 2022 17:54:17 -0600 Subject: [PATCH 31/87] strip out old stuff based on wrong assumptions --- node/core/prospective-parachains/src/lib.rs | 141 +------------------- 1 file changed, 7 insertions(+), 134 deletions(-) diff --git a/node/core/prospective-parachains/src/lib.rs b/node/core/prospective-parachains/src/lib.rs index 7ed9f6e00896..dee37d2da64f 100644 --- a/node/core/prospective-parachains/src/lib.rs +++ b/node/core/prospective-parachains/src/lib.rs @@ -54,7 +54,7 @@ use polkadot_primitives::vstaging::{ use crate::{ error::{Error, FatalResult, NonFatal, NonFatalResult, Result}, - fragment_tree::FragmentTree, + fragment_tree::{FragmentTree, CandidateStorage}, }; mod error; @@ -87,28 +87,23 @@ struct ScheduledPara { struct RelayBlockViewData { // Scheduling info for paras and upcoming paras. - scheduling: HashMap, + fragment_trees: HashMap, block_info: RelayChainBlockInfo, // TODO [now]: other stuff + // e.g. ancestors in same session } struct View { // Active or recent relay-chain blocks by block hash. - active_leaves: HashSet, - active_or_recent: HashMap, - - // Fragment graphs, one for each parachain. - // TODO [now]: make this per-para per active-leaf - // TODO [now]: have global candidate storage per para id - fragments: HashMap, + active_leaves: HashMap, + candidate_storage: HashMap, } impl View { fn new() -> Self { View { - active_leaves: HashSet::new(), - active_or_recent: HashMap::new(), - fragments: HashMap::new(), + active_leaves: HashMap::new(), + candidate_storage: HashMap::new(), } } } @@ -136,133 +131,11 @@ where } } -// TODO [now]; non-fatal error type. async fn update_view( view: &mut View, ctx: &mut Context, update: ActiveLeavesUpdate, ) -> NonFatalResult<()> -where - Context: SubsystemContext, - Context: overseer::SubsystemContext, -{ - // TODO [now]: separate determining updates from updates themselves. - - // Update active_leaves - { - for activated in update.activated.into_iter() { - view.active_leaves.insert(activated.hash); - } - - for deactivated in update.deactivated.into_iter() { - view.active_leaves.remove(&deactivated); - } - } - - // Find the set of blocks we care about. - let relevant_blocks = find_all_relevant_blocks(ctx, &view.active_leaves).await?; - - let all_new: Vec<_> = relevant_blocks - .iter() - .filter(|(h, _hdr)| !view.active_or_recent.contains_key(h)) - .collect(); - - { - // Prune everything that was relevant but isn't anymore. - let all_removed: Vec<_> = view - .active_or_recent - .keys() - .cloned() - .filter(|h| !relevant_blocks.contains_key(&h)) - .collect(); - - for removed in all_removed { - let _ = view.active_or_recent.remove(&removed); - } - - // Add new blocks and get data if necessary. Dispatch work to backing subsystems. - for (new_hash, new_header) in all_new { - let block_info = RelayChainBlockInfo { - hash: *new_hash, - number: new_header.number, - storage_root: new_header.state_root, - }; - - let scheduling_info = get_scheduling_info(ctx, *new_hash).await?; - - let mut relevant_fragments = HashMap::new(); - - for core_info in scheduling_info.cores { - // TODO [now]: construct RelayBlockViewData appropriately - } - - view.active_or_recent.insert( - *new_hash, - RelayBlockViewData { scheduling: relevant_fragments, block_info }, - ); - } - - // TODO [now]: GC fragment trees: - // 1. Keep only fragment trees for paras that are scheduled at any of our blocks. - // 2. Keep only fragments that are built on any of our blocks. - - // TODO [now]: give all backing subsystems messages or signals. - // There are, annoyingly, going to be race conditions with networking. - // Move networking into a backing 'super-subsystem'? - // - // Which ones need to care about 'orphaned' fragments? - } - - unimplemented!() -} - -// TODO [now]: don't accept too many fragments per para per relay-parent -// Well I guess we're bounded/protected here by backing (Seconded messages) - -async fn get_base_constraints( - ctx: &mut Context, - relay_block: Hash, - para_id: ParaId, -) -> NonFatalResult -where - Context: SubsystemContext, - Context: overseer::SubsystemContext, -{ - unimplemented!() -} - -// Scheduling info. -// - group rotation info: validator groups, group rotation info -// - information about parachains that are predictably going to be assigned -// to each core. For now that's just parachains, but it's worth noting that -// parathread claims are anchored to a specific core. -struct SchedulingInfo { - validator_groups: Vec>, - group_rotation_info: GroupRotationInfo, - // One core per parachain. this should have same length as 'validator-groups' - cores: Vec, -} - -struct CoreInfo { - // all para-ids that the core could accept blocks for in the near future. - near_future: Vec, -} - -async fn get_scheduling_info( - ctx: &mut Context, - relay_block: Hash, -) -> NonFatalResult -where - Context: SubsystemContext, - Context: overseer::SubsystemContext, -{ - unimplemented!() -} - -async fn find_all_relevant_blocks( - ctx: &mut Context, - active_leaves: &HashSet, -) -> NonFatalResult> where Context: SubsystemContext, Context: overseer::SubsystemContext, From e18811855dffc74a0714b1d733ed547285882bbd Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Sun, 6 Mar 2022 18:08:51 -0600 Subject: [PATCH 32/87] use fatality --- Cargo.lock | 1 + node/core/prospective-parachains/Cargo.toml | 1 + node/core/prospective-parachains/src/error.rs | 74 +++++-------------- node/core/prospective-parachains/src/lib.rs | 6 +- 4 files changed, 24 insertions(+), 58 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 9f57ec29a5d7..6ab6eca1e4de 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6755,6 +6755,7 @@ dependencies = [ name = "polkadot-node-core-prospective-parachains" version = "0.9.16" dependencies = [ + "fatality", "futures 0.3.21", "parity-scale-codec 2.3.1", "polkadot-node-primitives", diff --git a/node/core/prospective-parachains/Cargo.toml b/node/core/prospective-parachains/Cargo.toml index def2fd47f10e..fa885208b3bd 100644 --- a/node/core/prospective-parachains/Cargo.toml +++ b/node/core/prospective-parachains/Cargo.toml @@ -9,6 +9,7 @@ futures = "0.3.19" tracing = "0.1.29" parity-scale-codec = "2" thiserror = "1.0.30" +fatality = "0.0.6" polkadot-primitives = { path = "../../../primitives" } polkadot-node-primitives = { path = "../../primitives" } diff --git a/node/core/prospective-parachains/src/error.rs b/node/core/prospective-parachains/src/error.rs index 53c4afcab7f8..d2a7c640fd3d 100644 --- a/node/core/prospective-parachains/src/error.rs +++ b/node/core/prospective-parachains/src/error.rs @@ -27,57 +27,27 @@ use polkadot_node_subsystem_util::{rolling_session_window::SessionsUnavailable, use crate::LOG_TARGET; use parity_scale_codec::Error as CodecError; +use fatality::Nested; -// TODO [now]: update to use fatality (thanks Bernhard) - -/// Errors for this subsystem. -#[derive(Debug, Error)] -#[error(transparent)] +#[allow(missing_docs)] +#[fatality::fatality(splitable)] pub enum Error { - /// All fatal errors. - Fatal(#[from] Fatal), - /// All nonfatal/potentially recoverable errors. - NonFatal(#[from] NonFatal), -} - -/// General `Result` type for dispute coordinator. -pub type Result = std::result::Result; -/// Result type with only fatal errors. -pub type FatalResult = std::result::Result; -/// Result type with only non fatal errors. -pub type NonFatalResult = std::result::Result; - -impl From for Error { - fn from(o: SubsystemError) -> Self { - match o { - SubsystemError::Context(msg) => Self::Fatal(Fatal::SubsystemContext(msg)), - _ => Self::NonFatal(NonFatal::Subsystem(o)), - } - } -} - -/// Fatal errors of this subsystem. -#[derive(Debug, Error)] -pub enum Fatal { - /// We received a legacy `SubystemError::Context` error which is considered fatal. + #[fatal] #[error("SubsystemError::Context error: {0}")] SubsystemContext(String), - /// `ctx.spawn` failed with an error. + #[fatal] #[error("Spawning a task failed: {0}")] SpawnFailed(SubsystemError), + #[fatal] #[error("Participation worker receiver exhausted.")] ParticipationWorkerReceiverExhausted, - /// Receiving subsystem message from overseer failed. + #[fatal] #[error("Receiving message from overseer failed: {0}")] SubsystemReceive(#[source] SubsystemError), -} -#[derive(Debug, thiserror::Error)] -#[allow(missing_docs)] -pub enum NonFatal { #[error(transparent)] RuntimeApi(#[from] RuntimeApiError), @@ -88,29 +58,23 @@ pub enum NonFatal { Subsystem(SubsystemError), } +/// General `Result` type. +pub type Result = std::result::Result; +/// Result for non-fatal only failures. +pub type JfyiErrorResult = std::result::Result; +/// Result for fatal only failures. +pub type FatalResult = std::result::Result; + /// Utility for eating top level errors and log them. /// /// We basically always want to try and continue on error. This utility function is meant to /// consume top-level errors by simply logging them -pub fn log_error(result: Result<()>) -> std::result::Result<(), Fatal> { - match result { - Err(Error::Fatal(f)) => Err(f), - Err(Error::NonFatal(error)) => { - error.log(); +pub fn log_error(result: Result<()>, ctx: &'static str) -> std::result::Result<(), FatalError> { + match result.into_nested()? { + Ok(()) => Ok(()), + Err(jfyi) => { + tracing::debug!(target: LOG_TARGET, error = ?jfyi, ctx); Ok(()) }, - Ok(()) => Ok(()), - } -} - -impl NonFatal { - /// Log a `NonFatal`. - pub fn log(self) { - match self { - // don't spam the log with spurious errors - Self::RuntimeApi(_) => tracing::debug!(target: LOG_TARGET, error = ?self), - // it's worth reporting otherwise - _ => tracing::warn!(target: LOG_TARGET, error = ?self), - } } } diff --git a/node/core/prospective-parachains/src/lib.rs b/node/core/prospective-parachains/src/lib.rs index dee37d2da64f..92b28159f719 100644 --- a/node/core/prospective-parachains/src/lib.rs +++ b/node/core/prospective-parachains/src/lib.rs @@ -53,7 +53,7 @@ use polkadot_primitives::vstaging::{ }; use crate::{ - error::{Error, FatalResult, NonFatal, NonFatalResult, Result}, + error::{Error, FatalError, JfyiError, Result, FatalResult, JfyiErrorResult}, fragment_tree::{FragmentTree, CandidateStorage}, }; @@ -115,7 +115,7 @@ where { let mut view = View::new(); loop { - match ctx.recv().await? { + match ctx.recv().await.map_err(FatalError::SubsystemReceive)? { FromOverseer::Signal(OverseerSignal::Conclude) => return Ok(()), FromOverseer::Signal(OverseerSignal::ActiveLeaves(update)) => { update_view(&mut view, &mut ctx, update).await?; @@ -135,7 +135,7 @@ async fn update_view( view: &mut View, ctx: &mut Context, update: ActiveLeavesUpdate, -) -> NonFatalResult<()> +) -> JfyiErrorResult<()> where Context: SubsystemContext, Context: overseer::SubsystemContext, From 7af95bb4910fd878ec4ec83553b57aab4f6e36ec Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Sun, 6 Mar 2022 18:48:03 -0600 Subject: [PATCH 33/87] implement pruning --- node/core/prospective-parachains/src/lib.rs | 55 +++++++++++++++++++-- 1 file changed, 52 insertions(+), 3 deletions(-) diff --git a/node/core/prospective-parachains/src/lib.rs b/node/core/prospective-parachains/src/lib.rs index 92b28159f719..7f7383bac624 100644 --- a/node/core/prospective-parachains/src/lib.rs +++ b/node/core/prospective-parachains/src/lib.rs @@ -77,6 +77,8 @@ pub struct ProspectiveParachainsSubsystems { } // TODO [now]: add this enum to the broader subsystem types. +// TODO [now]: notify about candidate seconded +// TODO [now]: notify about candidate backed pub enum ProspectiveParachainsMessage {} struct ScheduledPara { @@ -113,12 +115,13 @@ where Context: SubsystemContext, Context: overseer::SubsystemContext, { + // TODO [now]: run_until_error where view is preserved let mut view = View::new(); loop { match ctx.recv().await.map_err(FatalError::SubsystemReceive)? { FromOverseer::Signal(OverseerSignal::Conclude) => return Ok(()), FromOverseer::Signal(OverseerSignal::ActiveLeaves(update)) => { - update_view(&mut view, &mut ctx, update).await?; + view = update_view(&mut ctx, view, update).await?; }, FromOverseer::Signal(OverseerSignal::BlockFinalized(..)) => {}, FromOverseer::Communication { msg } => match msg { @@ -132,14 +135,60 @@ where } async fn update_view( - view: &mut View, ctx: &mut Context, + mut view: View, update: ActiveLeavesUpdate, -) -> JfyiErrorResult<()> +) -> JfyiErrorResult where Context: SubsystemContext, Context: overseer::SubsystemContext, { + // 1. clean up inactive leaves + // 2. determine all scheduled para at each block + // 3. construct new fragment tree for each para for each new leaf + // 4. prune candidate storage. + + for deactivated in update.deactivated { + view.active_leaves.remove(&deactivated); + } + + // TODO [now]: 2 + + // TODO [now]: 3 + + prune_view_candidate_storage(&mut view); + + Ok(view) +} + +fn prune_view_candidate_storage(view: &mut View) { + let active_leaves = &view.active_leaves; + view.candidate_storage.retain(|para_id, storage| { + let mut coverage = HashSet::new(); + let mut contained = false; + for head in active_leaves.values() { + if let Some(tree) = head.fragment_trees.get(¶_id) { + coverage.extend(tree.candidates()); + } + } + + if !contained { + return false; + } + + storage.retain(|h| coverage.contains(&h)); + + // Even if `storage` is now empty, we retain. + // This maintains a convenient invariant that para-id storage exists + // as long as there's an active head which schedules the para. + true + }) +} + +async fn fetch_constraints( + ctx: &mut Context, + para_id: ParaId, +) -> JfyiErrorResult> { unimplemented!() } From 5c3fc42f8646c9e2522b998b68bf2326f7ceb124 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Sun, 6 Mar 2022 19:16:19 -0600 Subject: [PATCH 34/87] remove unused ancestor constraints --- node/core/prospective-parachains/src/fragment_tree.rs | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/node/core/prospective-parachains/src/fragment_tree.rs b/node/core/prospective-parachains/src/fragment_tree.rs index edcdb849010d..0566b5585289 100644 --- a/node/core/prospective-parachains/src/fragment_tree.rs +++ b/node/core/prospective-parachains/src/fragment_tree.rs @@ -184,7 +184,7 @@ struct CandidateEntry { pub(crate) struct Scope { para: ParaId, relay_parent: RelayChainBlockInfo, - ancestors: BTreeMap, + ancestors: BTreeMap, ancestors_by_hash: HashMap, base_constraints: Constraints, max_depth: usize, @@ -217,13 +217,13 @@ impl Scope { relay_parent: RelayChainBlockInfo, base_constraints: Constraints, max_depth: usize, - ancestors: impl IntoIterator, + ancestors: impl IntoIterator, ) -> Result { let mut ancestors_map = BTreeMap::new(); let mut ancestors_by_hash = HashMap::new(); { let mut prev = relay_parent.number; - for (ancestor, constraints) in ancestors { + for ancestor in ancestors { if prev == 0 { return Err(UnexpectedAncestor) } else if ancestor.number != prev - 1 { @@ -233,7 +233,7 @@ impl Scope { } else { prev = ancestor.number; ancestors_by_hash.insert(ancestor.hash, ancestor.clone()); - ancestors_map.insert(ancestor.number, (ancestor, constraints)); + ancestors_map.insert(ancestor.number, ancestor); } } } @@ -252,7 +252,7 @@ impl Scope { self.ancestors .iter() .next() - .map(|(_, v)| v.0.clone()) + .map(|(_, v)| v.clone()) .unwrap_or_else(|| self.relay_parent.clone()) } From 8d7611ab7acf9143b437ce49ffd4bbff82b70861 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Sun, 6 Mar 2022 19:24:35 -0600 Subject: [PATCH 35/87] fragment tree instantiation --- node/core/prospective-parachains/src/lib.rs | 97 +++++++++++++++++++-- 1 file changed, 91 insertions(+), 6 deletions(-) diff --git a/node/core/prospective-parachains/src/lib.rs b/node/core/prospective-parachains/src/lib.rs index 7f7383bac624..750a648ccb5d 100644 --- a/node/core/prospective-parachains/src/lib.rs +++ b/node/core/prospective-parachains/src/lib.rs @@ -54,7 +54,7 @@ use polkadot_primitives::vstaging::{ use crate::{ error::{Error, FatalError, JfyiError, Result, FatalResult, JfyiErrorResult}, - fragment_tree::{FragmentTree, CandidateStorage}, + fragment_tree::{FragmentTree, CandidateStorage, Scope as TreeScope}, }; mod error; @@ -71,6 +71,9 @@ const LOG_TARGET: &str = "parachain::prospective-parachains"; // useless prospective parachains and DoS honest nodes. const MAX_DEPTH: usize = 4; +// The maximum ancestry we support. +const MAX_ANCESTRY: usize = 5; + /// The Prospective Parachains Subsystem. pub struct ProspectiveParachainsSubsystems { metrics: Metrics, @@ -144,19 +147,84 @@ where Context: overseer::SubsystemContext, { // 1. clean up inactive leaves - // 2. determine all scheduled para at each block + // 2. determine all scheduled para at new block // 3. construct new fragment tree for each para for each new leaf // 4. prune candidate storage. - for deactivated in update.deactivated { - view.active_leaves.remove(&deactivated); + for deactivated in &update.deactivated { + view.active_leaves.remove(deactivated); } - // TODO [now]: 2 + if let Some(activated) = update.activated { + let hash = activated.hash; + let scheduled_paras = fetch_upcoming_paras( + ctx, + &hash, + ).await?; + + let block_info: RelayChainBlockInfo = unimplemented!(); + + let ancestry = fetch_ancestry( + &mut ctx, + hash, + MAX_ANCESTRY, + ).await?; + + // Find constraints. + let mut fragment_trees = HashMap::new(); + for para in scheduled_paras { + let candidate_storage = view.candidate_storage + .entry(para) + .or_insert_with(CandidateStorage::new); + + let constraints = fetch_constraints( + &mut ctx, + &hash, + para, + ).await?; + + let constraints = match constraints { + Some(c) => c, + None => { + // This indicates a runtime conflict of some kind. + + tracing::debug!( + target: LOG_TARGET, + para_id = ?para, + relay_parent = ?hash, + "Failed to get inclusion constraints." + ); + + continue + } + }; + + let scope = TreeScope::with_ancestors( + para, + block_info.clone(), + constraints, + MAX_DEPTH, + ancestry.iter().cloned(), + ).expect("ancestors are provided in reverse order and correctly; qed"); + + let tree = FragmentTree::populate(scope, &*candidate_storage); + fragment_trees.insert(para, tree); + } + + // TODO [now]: notify subsystems of new trees. + + view.active_leaves.insert(hash, RelayBlockViewData { + block_info, + fragment_trees, + }); + } // TODO [now]: 3 - prune_view_candidate_storage(&mut view); + if !update.deactivated.is_empty() { + // This has potential to be a hotspot. + prune_view_candidate_storage(&mut view); + } Ok(view) } @@ -187,11 +255,28 @@ fn prune_view_candidate_storage(view: &mut View) { async fn fetch_constraints( ctx: &mut Context, + relay_parent: &Hash, para_id: ParaId, ) -> JfyiErrorResult> { unimplemented!() } +async fn fetch_upcoming_paras( + ctx: &mut Context, + relay_parent: &Hash +) -> JfyiErrorResult> { + unimplemented!() +} + +// Fetch ancestors in descending order, up to the amount requested. +async fn fetch_ancestry( + ctx: &mut Context, + relay_parent: Hash, + ancestors: usize, +) -> JfyiErrorResult> { + unimplemented!() +} + #[derive(Clone)] struct MetricsInner; From b2f3350caa99070ddbd87f56e4889e4ae0a76011 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Sun, 6 Mar 2022 19:24:51 -0600 Subject: [PATCH 36/87] remove outdated comment --- node/core/prospective-parachains/src/lib.rs | 2 -- 1 file changed, 2 deletions(-) diff --git a/node/core/prospective-parachains/src/lib.rs b/node/core/prospective-parachains/src/lib.rs index 750a648ccb5d..26a77bd03f21 100644 --- a/node/core/prospective-parachains/src/lib.rs +++ b/node/core/prospective-parachains/src/lib.rs @@ -219,8 +219,6 @@ where }); } - // TODO [now]: 3 - if !update.deactivated.is_empty() { // This has potential to be a hotspot. prune_view_candidate_storage(&mut view); From 0a2fe277ec5d098a24a16d2fb89b2394bbcc3ff8 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Sun, 6 Mar 2022 21:24:53 -0600 Subject: [PATCH 37/87] add message/request types and skeleton for handling --- node/core/prospective-parachains/src/lib.rs | 87 ++++++++++++++++++--- 1 file changed, 78 insertions(+), 9 deletions(-) diff --git a/node/core/prospective-parachains/src/lib.rs b/node/core/prospective-parachains/src/lib.rs index 26a77bd03f21..5d060bcd15f0 100644 --- a/node/core/prospective-parachains/src/lib.rs +++ b/node/core/prospective-parachains/src/lib.rs @@ -36,6 +36,7 @@ use std::{ }; use futures::prelude::*; +use futures::channel::oneshot; use polkadot_node_subsystem::{ overseer, ActiveLeavesUpdate, FromOverseer, OverseerSignal, SpawnedSubsystem, SubsystemContext, @@ -50,6 +51,7 @@ use polkadot_node_subsystem_util::{ use polkadot_primitives::vstaging::{ Block, BlockId, BlockNumber, CandidateDescriptor, CandidateHash, CommittedCandidateReceipt, GroupIndex, GroupRotationInfo, Hash, Header, Id as ParaId, SessionIndex, ValidatorIndex, + PersistedValidationData, }; use crate::{ @@ -80,9 +82,23 @@ pub struct ProspectiveParachainsSubsystems { } // TODO [now]: add this enum to the broader subsystem types. -// TODO [now]: notify about candidate seconded -// TODO [now]: notify about candidate backed -pub enum ProspectiveParachainsMessage {} +pub enum ProspectiveParachainsMessage { + // TODO [now] : docs + CandidateSeconded( + ParaId, + CommittedCandidateReceipt, + PersistedValidationData, + ), + // TODO [now]: docs + CandidateBacked(ParaId, CandidateHash), + // TODO [now]: docs + GetBackableCandidate( + Hash, + ParaId, + Vec, + oneshot::Sender>, + ), +} struct ScheduledPara { para: ParaId, @@ -124,20 +140,31 @@ where match ctx.recv().await.map_err(FatalError::SubsystemReceive)? { FromOverseer::Signal(OverseerSignal::Conclude) => return Ok(()), FromOverseer::Signal(OverseerSignal::ActiveLeaves(update)) => { - view = update_view(&mut ctx, view, update).await?; + view = handle_active_leaves_update(&mut ctx, view, update).await?; }, FromOverseer::Signal(OverseerSignal::BlockFinalized(..)) => {}, FromOverseer::Communication { msg } => match msg { - // TODO [now]: handle messages - // 1. Notification of new fragment (orphaned?) - // 2. Notification of new fragment being backed - // 3. Request for backable candidates + ProspectiveParachainsMessage::CandidateSeconded( + para, + candidate, + pvd, + ) => handle_candidate_seconded(&mut ctx, &mut view, para, candidate, pvd).await?, + ProspectiveParachainsMessage::CandidateBacked( + para, + candidate_hash, + ) => handle_candidate_backed(&mut ctx, &mut view, para, candidate_hash).await?, + ProspectiveParachainsMessage::GetBackableCandidate( + relay_parent, + para, + required_path, + tx, + ) => answer_get_backable_candidate(&mut ctx, &view, relay_parent, para, required_path, tx).await?, }, } } } -async fn update_view( +async fn handle_active_leaves_update( ctx: &mut Context, mut view: View, update: ActiveLeavesUpdate, @@ -251,6 +278,48 @@ fn prune_view_candidate_storage(view: &mut View) { }) } +async fn handle_candidate_seconded( + ctx: &mut Context, + view: &mut View, + para: ParaId, + candidate: CommittedCandidateReceipt, + pvd: PersistedValidationData, +) -> JfyiErrorResult<()> +where + Context: SubsystemContext, + Context: overseer::SubsystemContext, +{ + unimplemented!() +} + +async fn handle_candidate_backed( + ctx: &mut Context, + view: &mut View, + para: ParaId, + candidate_hash: CandidateHash, +) -> JfyiErrorResult<()> +where + Context: SubsystemContext, + Context: overseer::SubsystemContext, +{ + unimplemented!() +} + +async fn answer_get_backable_candidate( + ctx: &mut Context, + view: &View, + relay_parent: Hash, + para: ParaId, + required_path: Vec, + tx: oneshot::Sender>, +) -> JfyiErrorResult<()> +where + Context: SubsystemContext, + Context: overseer::SubsystemContext, +{ + unimplemented!() +} + async fn fetch_constraints( ctx: &mut Context, relay_parent: &Hash, From 2c997519f0ec08c401703baf0d66fd0c5df6e3aa Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Sun, 6 Mar 2022 21:25:03 -0600 Subject: [PATCH 38/87] fmt --- node/core/prospective-parachains/src/error.rs | 2 +- .../src/fragment_tree.rs | 23 ++--- node/core/prospective-parachains/src/lib.rs | 90 +++++++------------ 3 files changed, 44 insertions(+), 71 deletions(-) diff --git a/node/core/prospective-parachains/src/error.rs b/node/core/prospective-parachains/src/error.rs index d2a7c640fd3d..b5f4468e0393 100644 --- a/node/core/prospective-parachains/src/error.rs +++ b/node/core/prospective-parachains/src/error.rs @@ -26,8 +26,8 @@ use polkadot_node_subsystem::{ use polkadot_node_subsystem_util::{rolling_session_window::SessionsUnavailable, runtime}; use crate::LOG_TARGET; -use parity_scale_codec::Error as CodecError; use fatality::Nested; +use parity_scale_codec::Error as CodecError; #[allow(missing_docs)] #[fatality::fatality(splitable)] diff --git a/node/core/prospective-parachains/src/fragment_tree.rs b/node/core/prospective-parachains/src/fragment_tree.rs index 0566b5585289..64b53ec19c9a 100644 --- a/node/core/prospective-parachains/src/fragment_tree.rs +++ b/node/core/prospective-parachains/src/fragment_tree.rs @@ -430,20 +430,15 @@ impl FragmentTree { // For plausibly unique parachains, this shouldn't matter much. // figure out alternative selection criteria? match base_node { - NodePointer::Root => { - self.nodes.iter() - .take_while(|n| n.parent == NodePointer::Root) - .filter(|n| pred(&n.candidate_hash)) - .map(|n| n.candidate_hash) - .next() - } - NodePointer::Storage(ptr) => { - self.nodes[ptr].children - .iter() - .filter(|n| pred(&n.1)) - .map(|n| n.1) - .next() - } + NodePointer::Root => self + .nodes + .iter() + .take_while(|n| n.parent == NodePointer::Root) + .filter(|n| pred(&n.candidate_hash)) + .map(|n| n.candidate_hash) + .next(), + NodePointer::Storage(ptr) => + self.nodes[ptr].children.iter().filter(|n| pred(&n.1)).map(|n| n.1).next(), } } diff --git a/node/core/prospective-parachains/src/lib.rs b/node/core/prospective-parachains/src/lib.rs index 5d060bcd15f0..e254e33d43e3 100644 --- a/node/core/prospective-parachains/src/lib.rs +++ b/node/core/prospective-parachains/src/lib.rs @@ -35,8 +35,7 @@ use std::{ sync::Arc, }; -use futures::prelude::*; -use futures::channel::oneshot; +use futures::{channel::oneshot, prelude::*}; use polkadot_node_subsystem::{ overseer, ActiveLeavesUpdate, FromOverseer, OverseerSignal, SpawnedSubsystem, SubsystemContext, @@ -50,13 +49,13 @@ use polkadot_node_subsystem_util::{ }; use polkadot_primitives::vstaging::{ Block, BlockId, BlockNumber, CandidateDescriptor, CandidateHash, CommittedCandidateReceipt, - GroupIndex, GroupRotationInfo, Hash, Header, Id as ParaId, SessionIndex, ValidatorIndex, - PersistedValidationData, + GroupIndex, GroupRotationInfo, Hash, Header, Id as ParaId, PersistedValidationData, + SessionIndex, ValidatorIndex, }; use crate::{ - error::{Error, FatalError, JfyiError, Result, FatalResult, JfyiErrorResult}, - fragment_tree::{FragmentTree, CandidateStorage, Scope as TreeScope}, + error::{Error, FatalError, FatalResult, JfyiError, JfyiErrorResult, Result}, + fragment_tree::{CandidateStorage, FragmentTree, Scope as TreeScope}, }; mod error; @@ -84,20 +83,11 @@ pub struct ProspectiveParachainsSubsystems { // TODO [now]: add this enum to the broader subsystem types. pub enum ProspectiveParachainsMessage { // TODO [now] : docs - CandidateSeconded( - ParaId, - CommittedCandidateReceipt, - PersistedValidationData, - ), + CandidateSeconded(ParaId, CommittedCandidateReceipt, PersistedValidationData), // TODO [now]: docs CandidateBacked(ParaId, CandidateHash), // TODO [now]: docs - GetBackableCandidate( - Hash, - ParaId, - Vec, - oneshot::Sender>, - ), + GetBackableCandidate(Hash, ParaId, Vec, oneshot::Sender>), } struct ScheduledPara { @@ -122,10 +112,7 @@ struct View { impl View { fn new() -> Self { - View { - active_leaves: HashMap::new(), - candidate_storage: HashMap::new(), - } + View { active_leaves: HashMap::new(), candidate_storage: HashMap::new() } } } @@ -144,21 +131,25 @@ where }, FromOverseer::Signal(OverseerSignal::BlockFinalized(..)) => {}, FromOverseer::Communication { msg } => match msg { - ProspectiveParachainsMessage::CandidateSeconded( - para, - candidate, - pvd, - ) => handle_candidate_seconded(&mut ctx, &mut view, para, candidate, pvd).await?, - ProspectiveParachainsMessage::CandidateBacked( - para, - candidate_hash, - ) => handle_candidate_backed(&mut ctx, &mut view, para, candidate_hash).await?, + ProspectiveParachainsMessage::CandidateSeconded(para, candidate, pvd) => + handle_candidate_seconded(&mut ctx, &mut view, para, candidate, pvd).await?, + ProspectiveParachainsMessage::CandidateBacked(para, candidate_hash) => + handle_candidate_backed(&mut ctx, &mut view, para, candidate_hash).await?, ProspectiveParachainsMessage::GetBackableCandidate( relay_parent, para, required_path, tx, - ) => answer_get_backable_candidate(&mut ctx, &view, relay_parent, para, required_path, tx).await?, + ) => + answer_get_backable_candidate( + &mut ctx, + &view, + relay_parent, + para, + required_path, + tx, + ) + .await?, }, } } @@ -184,31 +175,19 @@ where if let Some(activated) = update.activated { let hash = activated.hash; - let scheduled_paras = fetch_upcoming_paras( - ctx, - &hash, - ).await?; + let scheduled_paras = fetch_upcoming_paras(ctx, &hash).await?; let block_info: RelayChainBlockInfo = unimplemented!(); - let ancestry = fetch_ancestry( - &mut ctx, - hash, - MAX_ANCESTRY, - ).await?; + let ancestry = fetch_ancestry(&mut ctx, hash, MAX_ANCESTRY).await?; // Find constraints. let mut fragment_trees = HashMap::new(); for para in scheduled_paras { - let candidate_storage = view.candidate_storage - .entry(para) - .or_insert_with(CandidateStorage::new); + let candidate_storage = + view.candidate_storage.entry(para).or_insert_with(CandidateStorage::new); - let constraints = fetch_constraints( - &mut ctx, - &hash, - para, - ).await?; + let constraints = fetch_constraints(&mut ctx, &hash, para).await?; let constraints = match constraints { Some(c) => c, @@ -223,7 +202,7 @@ where ); continue - } + }, }; let scope = TreeScope::with_ancestors( @@ -232,7 +211,8 @@ where constraints, MAX_DEPTH, ancestry.iter().cloned(), - ).expect("ancestors are provided in reverse order and correctly; qed"); + ) + .expect("ancestors are provided in reverse order and correctly; qed"); let tree = FragmentTree::populate(scope, &*candidate_storage); fragment_trees.insert(para, tree); @@ -240,10 +220,8 @@ where // TODO [now]: notify subsystems of new trees. - view.active_leaves.insert(hash, RelayBlockViewData { - block_info, - fragment_trees, - }); + view.active_leaves + .insert(hash, RelayBlockViewData { block_info, fragment_trees }); } if !update.deactivated.is_empty() { @@ -266,7 +244,7 @@ fn prune_view_candidate_storage(view: &mut View) { } if !contained { - return false; + return false } storage.retain(|h| coverage.contains(&h)); @@ -330,7 +308,7 @@ async fn fetch_constraints( async fn fetch_upcoming_paras( ctx: &mut Context, - relay_parent: &Hash + relay_parent: &Hash, ) -> JfyiErrorResult> { unimplemented!() } From 71b2853cf214baf7ff7ceccd3da980180c57632a Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Sun, 6 Mar 2022 21:33:41 -0600 Subject: [PATCH 39/87] implement handle_candidate_seconded --- .../src/fragment_tree.rs | 6 +-- node/core/prospective-parachains/src/lib.rs | 42 ++++++++++++++++++- 2 files changed, 44 insertions(+), 4 deletions(-) diff --git a/node/core/prospective-parachains/src/fragment_tree.rs b/node/core/prospective-parachains/src/fragment_tree.rs index 64b53ec19c9a..f318e8933235 100644 --- a/node/core/prospective-parachains/src/fragment_tree.rs +++ b/node/core/prospective-parachains/src/fragment_tree.rs @@ -101,11 +101,11 @@ impl CandidateStorage { &mut self, candidate: CommittedCandidateReceipt, persisted_validation_data: PersistedValidationData, - ) -> Result<(), PersistedValidationDataMismatch> { + ) -> Result { let candidate_hash = candidate.hash(); if self.by_candidate_hash.contains_key(&candidate_hash) { - return Ok(()) + return Ok(candidate_hash) } if persisted_validation_data.hash() != candidate.descriptor.persisted_validation_data_hash { @@ -133,7 +133,7 @@ impl CandidateStorage { // sanity-checked already. self.by_candidate_hash.insert(candidate_hash, entry); - Ok(()) + Ok(candidate_hash) } /// Retain only candidates which pass the predicate. diff --git a/node/core/prospective-parachains/src/lib.rs b/node/core/prospective-parachains/src/lib.rs index e254e33d43e3..867a0c932ecf 100644 --- a/node/core/prospective-parachains/src/lib.rs +++ b/node/core/prospective-parachains/src/lib.rs @@ -267,7 +267,47 @@ where Context: SubsystemContext, Context: overseer::SubsystemContext, { - unimplemented!() + // Add the candidate to storage. + // Then attempt to add it to all trees. + let storage = match view.candidate_storage.get_mut(¶) { + None => { + tracing::warn!( + target: LOG_TARGET, + para_id = ?para, + candidate_hash = ?candidate.hash(), + "Received seconded candidate for inactive para", + ); + + return Ok(()) + }, + Some(storage) => storage, + }; + + let candidate_hash = match storage.add_candidate(candidate, pvd) { + Ok(c) => c, + Err(crate::fragment_tree::PersistedValidationDataMismatch) => { + // We can't log the candidate hash without either doing more ~expensive + // hashing but this branch indicates something is seriously wrong elsewhere + // so it's doubtful that it would affect debugging. + + tracing::warn!( + target: LOG_TARGET, + para = ?para, + "Received seconded candidate had mismatching validation data", + ); + + return Ok(()) + }, + }; + + for (_, leaf_data) in &mut view.active_leaves { + if let Some(tree) = leaf_data.fragment_trees.get_mut(¶) { + tree.add_and_populate(candidate_hash, &*storage); + // TODO [now]: notify other subsystems of changes. + } + } + + Ok(()) } async fn handle_candidate_backed( From a462fe7343bf65957dc3c0c93ea7fe4d7567ef00 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Sun, 6 Mar 2022 21:36:15 -0600 Subject: [PATCH 40/87] candidate storage: handle backed --- .../prospective-parachains/src/fragment_tree.rs | 16 +++++++++++++++- 1 file changed, 15 insertions(+), 1 deletion(-) diff --git a/node/core/prospective-parachains/src/fragment_tree.rs b/node/core/prospective-parachains/src/fragment_tree.rs index f318e8933235..01469110dcbd 100644 --- a/node/core/prospective-parachains/src/fragment_tree.rs +++ b/node/core/prospective-parachains/src/fragment_tree.rs @@ -136,6 +136,19 @@ impl CandidateStorage { Ok(candidate_hash) } + /// Note that an existing candidate has been backed. + pub fn mark_backed(&mut self, candidate_hash: &CandidateHash) { + if let Some(entry) = self.by_candidate_hash.get_mut(candidate_hash) { + entry.state = CandidateState::Backed; + } + } + + /// Whether a candidate is recorded as being backed. + pub fn is_backed(&self, candidate_hash: &CandidateHash) -> bool { + self.by_candidate_hash.get(candidate_hash) + .map_or(false, |e| e.state == CandidateState::Backed) + } + /// Retain only candidates which pass the predicate. pub(crate) fn retain(&mut self, pred: impl Fn(&CandidateHash) -> bool) { self.by_candidate_hash.retain(|h, _v| pred(h)); @@ -165,7 +178,8 @@ impl CandidateStorage { /// The state of a candidate. /// /// Candidates aren't even considered until they've at least been seconded. -pub(crate) enum CandidateState { +#[derive(Debug, PartialEq)] +enum CandidateState { /// The candidate has been seconded. Seconded, /// The candidate has been completely backed by the group. From b8e57a057b182ed613d2be89a9ae072c44adbf44 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Sun, 6 Mar 2022 21:39:20 -0600 Subject: [PATCH 41/87] implement handle_candidate_backed --- .../src/fragment_tree.rs | 5 +++ node/core/prospective-parachains/src/lib.rs | 39 ++++++++++++++++++- 2 files changed, 43 insertions(+), 1 deletion(-) diff --git a/node/core/prospective-parachains/src/fragment_tree.rs b/node/core/prospective-parachains/src/fragment_tree.rs index 01469110dcbd..e6c89fe11e88 100644 --- a/node/core/prospective-parachains/src/fragment_tree.rs +++ b/node/core/prospective-parachains/src/fragment_tree.rs @@ -149,6 +149,11 @@ impl CandidateStorage { .map_or(false, |e| e.state == CandidateState::Backed) } + /// Whether a candidate is contained within the storage already. + pub fn contains(&self, candidate_hash: &CandidateHash) -> bool { + self.by_candidate_hash.contains_key(candidate_hash) + } + /// Retain only candidates which pass the predicate. pub(crate) fn retain(&mut self, pred: impl Fn(&CandidateHash) -> bool) { self.by_candidate_hash.retain(|h, _v| pred(h)); diff --git a/node/core/prospective-parachains/src/lib.rs b/node/core/prospective-parachains/src/lib.rs index 867a0c932ecf..d1bea54480f7 100644 --- a/node/core/prospective-parachains/src/lib.rs +++ b/node/core/prospective-parachains/src/lib.rs @@ -320,7 +320,44 @@ where Context: SubsystemContext, Context: overseer::SubsystemContext, { - unimplemented!() + let storage = match view.candidate_storage.get_mut(¶) { + None => { + tracing::warn!( + target: LOG_TARGET, + para_id = ?para, + ?candidate_hash, + "Received instructio to back candidate", + ); + + return Ok(()) + }, + Some(storage) => storage, + }; + + if !storage.contains(&candidate_hash) { + tracing::warn!( + target: LOG_TARGET, + para_id = ?para, + ?candidate_hash, + "Received instruction to mark unknown candidate as backed.", + ); + + return Ok(()) + } + + if storage.is_backed(&candidate_hash) { + tracing::debug!( + target: LOG_TARGET, + para_id = ?para, + ?candidate_hash, + "Received redundant instruction to mark candidate as backed", + ); + + return Ok(()) + } + + storage.mark_backed(&candidate_hash); + Ok(()) } async fn answer_get_backable_candidate( From 55f5ba521da087a1c2d8787e8070422f4b4e7a1e Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Sun, 6 Mar 2022 21:47:45 -0600 Subject: [PATCH 42/87] implement answer_get_backable_candidate --- .../src/fragment_tree.rs | 3 +- node/core/prospective-parachains/src/lib.rs | 49 ++++++++++++++++++- 2 files changed, 50 insertions(+), 2 deletions(-) diff --git a/node/core/prospective-parachains/src/fragment_tree.rs b/node/core/prospective-parachains/src/fragment_tree.rs index e6c89fe11e88..2a266f0f97bf 100644 --- a/node/core/prospective-parachains/src/fragment_tree.rs +++ b/node/core/prospective-parachains/src/fragment_tree.rs @@ -145,7 +145,8 @@ impl CandidateStorage { /// Whether a candidate is recorded as being backed. pub fn is_backed(&self, candidate_hash: &CandidateHash) -> bool { - self.by_candidate_hash.get(candidate_hash) + self.by_candidate_hash + .get(candidate_hash) .map_or(false, |e| e.state == CandidateState::Backed) } diff --git a/node/core/prospective-parachains/src/lib.rs b/node/core/prospective-parachains/src/lib.rs index d1bea54480f7..cfd00538b082 100644 --- a/node/core/prospective-parachains/src/lib.rs +++ b/node/core/prospective-parachains/src/lib.rs @@ -372,7 +372,54 @@ where Context: SubsystemContext, Context: overseer::SubsystemContext, { - unimplemented!() + let data = match view.active_leaves.get(&relay_parent) { + None => { + tracing::debug!( + target: LOG_TARGET, + ?relay_parent, + para_id = ?para, + "Requested backable candidate for inactive relay-parent." + ); + + let _ = tx.send(None); + return Ok(()) + }, + Some(d) => d, + }; + + let tree = match data.fragment_trees.get(¶) { + None => { + tracing::debug!( + target: LOG_TARGET, + ?relay_parent, + para_id = ?para, + "Requested backable candidate for inactive para." + ); + + let _ = tx.send(None); + return Ok(()) + }, + Some(tree) => tree, + }; + + let storage = match view.candidate_storage.get(¶) { + None => { + tracing::warn!( + target: LOG_TARGET, + ?relay_parent, + para_id = ?para, + "No candidate storage for active para", + ); + + let _ = tx.send(None); + return Ok(()) + }, + Some(s) => s, + }; + + let _ = tx.send(tree.select_child(&required_path, |candidate| storage.is_backed(candidate))); + + Ok(()) } async fn fetch_constraints( From 4cf2a447a9d893cc0889815e69e59a388d146bbb Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Sun, 6 Mar 2022 21:49:06 -0600 Subject: [PATCH 43/87] remove async where not needed --- node/core/prospective-parachains/src/lib.rs | 28 +++++---------------- 1 file changed, 6 insertions(+), 22 deletions(-) diff --git a/node/core/prospective-parachains/src/lib.rs b/node/core/prospective-parachains/src/lib.rs index cfd00538b082..51fbafe2043b 100644 --- a/node/core/prospective-parachains/src/lib.rs +++ b/node/core/prospective-parachains/src/lib.rs @@ -140,16 +140,7 @@ where para, required_path, tx, - ) => - answer_get_backable_candidate( - &mut ctx, - &view, - relay_parent, - para, - required_path, - tx, - ) - .await?, + ) => answer_get_backable_candidate(&view, relay_parent, para, required_path, tx), }, } } @@ -360,18 +351,13 @@ where Ok(()) } -async fn answer_get_backable_candidate( - ctx: &mut Context, +fn answer_get_backable_candidate( view: &View, relay_parent: Hash, para: ParaId, required_path: Vec, tx: oneshot::Sender>, -) -> JfyiErrorResult<()> -where - Context: SubsystemContext, - Context: overseer::SubsystemContext, -{ +) { let data = match view.active_leaves.get(&relay_parent) { None => { tracing::debug!( @@ -382,7 +368,7 @@ where ); let _ = tx.send(None); - return Ok(()) + return }, Some(d) => d, }; @@ -397,7 +383,7 @@ where ); let _ = tx.send(None); - return Ok(()) + return }, Some(tree) => tree, }; @@ -412,14 +398,12 @@ where ); let _ = tx.send(None); - return Ok(()) + return }, Some(s) => s, }; let _ = tx.send(tree.select_child(&required_path, |candidate| storage.is_backed(candidate))); - - Ok(()) } async fn fetch_constraints( From 5a108f2ba9c93f2f1644eced8057e7aea0f6b2bb Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Sun, 6 Mar 2022 22:05:22 -0600 Subject: [PATCH 44/87] implement fetch_ancestry --- node/core/prospective-parachains/src/error.rs | 3 + node/core/prospective-parachains/src/lib.rs | 87 +++++++++++++++++-- 2 files changed, 81 insertions(+), 9 deletions(-) diff --git a/node/core/prospective-parachains/src/error.rs b/node/core/prospective-parachains/src/error.rs index b5f4468e0393..18808edbe1af 100644 --- a/node/core/prospective-parachains/src/error.rs +++ b/node/core/prospective-parachains/src/error.rs @@ -56,6 +56,9 @@ pub enum Error { #[error(transparent)] Subsystem(SubsystemError), + + #[error("Request to chain API subsystem dropped")] + ChainApiRequestCanceled(oneshot::Canceled), } /// General `Result` type. diff --git a/node/core/prospective-parachains/src/lib.rs b/node/core/prospective-parachains/src/lib.rs index 51fbafe2043b..baf38a2c84ec 100644 --- a/node/core/prospective-parachains/src/lib.rs +++ b/node/core/prospective-parachains/src/lib.rs @@ -40,6 +40,7 @@ use futures::{channel::oneshot, prelude::*}; use polkadot_node_subsystem::{ overseer, ActiveLeavesUpdate, FromOverseer, OverseerSignal, SpawnedSubsystem, SubsystemContext, SubsystemError, SubsystemResult, + messages::ChainApiMessage, }; use polkadot_node_subsystem_util::{ inclusion_emulator::staging::{ @@ -164,13 +165,30 @@ where view.active_leaves.remove(deactivated); } - if let Some(activated) = update.activated { + for activated in update.activated.into_iter() { let hash = activated.hash; - let scheduled_paras = fetch_upcoming_paras(ctx, &hash).await?; - - let block_info: RelayChainBlockInfo = unimplemented!(); + let scheduled_paras = fetch_upcoming_paras(&mut *ctx, &hash).await?; + + let block_info: RelayChainBlockInfo = match fetch_block_info( + &mut *ctx, + hash, + ).await? { + None => { + tracing::warn!( + target: LOG_TARGET, + block_hash = ?hash, + "Failed to get block info for newly activated leaf block." + ); + + // `update.activated` is an option, but we can use this + // to exit the 'loop' and skip this block without skipping + // pruning logic. + continue + } + Some(info) => info, + }; - let ancestry = fetch_ancestry(&mut ctx, hash, MAX_ANCESTRY).await?; + let ancestry = fetch_ancestry(&mut *ctx, hash, MAX_ANCESTRY).await?; // Find constraints. let mut fragment_trees = HashMap::new(); @@ -178,7 +196,7 @@ where let candidate_storage = view.candidate_storage.entry(para).or_insert_with(CandidateStorage::new); - let constraints = fetch_constraints(&mut ctx, &hash, para).await?; + let constraints = fetch_constraints(&mut *ctx, &hash, para).await?; let constraints = match constraints { Some(c) => c, @@ -411,6 +429,7 @@ async fn fetch_constraints( relay_parent: &Hash, para_id: ParaId, ) -> JfyiErrorResult> { + // TODO [now]: probably a new runtime API. unimplemented!() } @@ -418,16 +437,66 @@ async fn fetch_upcoming_paras( ctx: &mut Context, relay_parent: &Hash, ) -> JfyiErrorResult> { + // TODO [now]: use `availability_cores` or something like it. unimplemented!() } // Fetch ancestors in descending order, up to the amount requested. async fn fetch_ancestry( ctx: &mut Context, - relay_parent: Hash, + relay_hash: Hash, ancestors: usize, -) -> JfyiErrorResult> { - unimplemented!() +) -> JfyiErrorResult> +where + Context: SubsystemContext, + Context: overseer::SubsystemContext, +{ + let (tx, rx) = oneshot::channel(); + ctx.send_message(ChainApiMessage::Ancestors { + hash: relay_hash, + k: ancestors, + response_channel: tx, + }).await; + + let hashes = rx.map_err(JfyiError::ChainApiRequestCanceled).await??; + let mut block_info = Vec::with_capacity(hashes.len()); + for hash in hashes { + match fetch_block_info(ctx, relay_hash).await? { + None => { + tracing::warn!( + target: LOG_TARGET, + relay_hash = ?hash, + "Failed to fetch info for hash returned from ancestry.", + ); + + // Return, however far we got. + return Ok(block_info); + } + Some(info) => { + block_info.push(info); + } + } + } + + Ok(block_info) +} + +async fn fetch_block_info( + ctx: &mut Context, + relay_hash: Hash, +) -> JfyiErrorResult> where + Context: SubsystemContext, + Context: overseer::SubsystemContext, +{ + let (tx, rx) = oneshot::channel(); + + ctx.send_message(ChainApiMessage::BlockHeader(relay_hash, tx)).await; + let header = rx.map_err(JfyiError::ChainApiRequestCanceled).await??; + Ok(header.map(|header| RelayChainBlockInfo { + hash: relay_hash, + number: header.number, + storage_root: header.state_root, + })) } #[derive(Clone)] From 4ae34c3c7ed871817d3adb38301c7e5c0092ffb7 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Sun, 6 Mar 2022 22:11:06 -0600 Subject: [PATCH 45/87] add logic for run_iteration --- node/core/prospective-parachains/src/error.rs | 2 +- node/core/prospective-parachains/src/lib.rs | 30 +++++++++++++------ 2 files changed, 22 insertions(+), 10 deletions(-) diff --git a/node/core/prospective-parachains/src/error.rs b/node/core/prospective-parachains/src/error.rs index 18808edbe1af..a47230abc18f 100644 --- a/node/core/prospective-parachains/src/error.rs +++ b/node/core/prospective-parachains/src/error.rs @@ -72,7 +72,7 @@ pub type FatalResult = std::result::Result; /// /// We basically always want to try and continue on error. This utility function is meant to /// consume top-level errors by simply logging them -pub fn log_error(result: Result<()>, ctx: &'static str) -> std::result::Result<(), FatalError> { +pub fn log_error(result: Result<()>, ctx: &'static str) -> FatalResult<()> { match result.into_nested()? { Ok(()) => Ok(()), Err(jfyi) => { diff --git a/node/core/prospective-parachains/src/lib.rs b/node/core/prospective-parachains/src/lib.rs index baf38a2c84ec..4e658c551baa 100644 --- a/node/core/prospective-parachains/src/lib.rs +++ b/node/core/prospective-parachains/src/lib.rs @@ -117,25 +117,37 @@ impl View { } } -async fn run(mut ctx: Context) -> Result<()> +async fn run(mut ctx: Context) -> FatalResult<()> where Context: SubsystemContext, Context: overseer::SubsystemContext, { - // TODO [now]: run_until_error where view is preserved let mut view = View::new(); + loop { + crate::error::log_error( + run_iteration(&mut ctx, &mut view).await, + "Encountered issue during run iteration", + )?; + } +} + +async fn run_iteration(ctx: &mut Context, view: &mut View) -> Result<()> +where + Context: SubsystemContext, + Context: overseer::SubsystemContext, +{ loop { match ctx.recv().await.map_err(FatalError::SubsystemReceive)? { FromOverseer::Signal(OverseerSignal::Conclude) => return Ok(()), FromOverseer::Signal(OverseerSignal::ActiveLeaves(update)) => { - view = handle_active_leaves_update(&mut ctx, view, update).await?; + handle_active_leaves_update(&mut *ctx, view, update).await?; }, FromOverseer::Signal(OverseerSignal::BlockFinalized(..)) => {}, FromOverseer::Communication { msg } => match msg { ProspectiveParachainsMessage::CandidateSeconded(para, candidate, pvd) => - handle_candidate_seconded(&mut ctx, &mut view, para, candidate, pvd).await?, + handle_candidate_seconded(&mut *ctx, view, para, candidate, pvd).await?, ProspectiveParachainsMessage::CandidateBacked(para, candidate_hash) => - handle_candidate_backed(&mut ctx, &mut view, para, candidate_hash).await?, + handle_candidate_backed(&mut *ctx, view, para, candidate_hash).await?, ProspectiveParachainsMessage::GetBackableCandidate( relay_parent, para, @@ -149,9 +161,9 @@ where async fn handle_active_leaves_update( ctx: &mut Context, - mut view: View, + view: &mut View, update: ActiveLeavesUpdate, -) -> JfyiErrorResult +) -> JfyiErrorResult<()> where Context: SubsystemContext, Context: overseer::SubsystemContext, @@ -235,10 +247,10 @@ where if !update.deactivated.is_empty() { // This has potential to be a hotspot. - prune_view_candidate_storage(&mut view); + prune_view_candidate_storage(view); } - Ok(view) + Ok(()) } fn prune_view_candidate_storage(view: &mut View) { From 843c79d517f720d4fb21c9c35474831f0de82bcd Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Sun, 6 Mar 2022 22:13:34 -0600 Subject: [PATCH 46/87] add some docs --- node/core/prospective-parachains/src/lib.rs | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/node/core/prospective-parachains/src/lib.rs b/node/core/prospective-parachains/src/lib.rs index 4e658c551baa..0bbf730794f5 100644 --- a/node/core/prospective-parachains/src/lib.rs +++ b/node/core/prospective-parachains/src/lib.rs @@ -83,11 +83,15 @@ pub struct ProspectiveParachainsSubsystems { // TODO [now]: add this enum to the broader subsystem types. pub enum ProspectiveParachainsMessage { - // TODO [now] : docs + /// Inform the Prospective Parachains Subsystem of a new candidate. CandidateSeconded(ParaId, CommittedCandidateReceipt, PersistedValidationData), - // TODO [now]: docs + /// Inform the Prospective Parachains Subsystem that a previously seconded candidate + /// has been backed. This requires that `CandidateSeconded` was sent for the candidate + /// some time in the past. CandidateBacked(ParaId, CandidateHash), - // TODO [now]: docs + /// Get a backable candidate hash for the given parachain, under the given relay-parent hash, + /// which is a descendant of the given candidate hashes. Returns `None` on the channel + /// if no such candidate exists. GetBackableCandidate(Hash, ParaId, Vec, oneshot::Sender>), } From 3a47b716f4915949e0397f3a5822feec96fe32ea Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Sun, 6 Mar 2022 22:25:25 -0600 Subject: [PATCH 47/87] remove global allow(unused), fix warnings --- node/core/prospective-parachains/src/error.rs | 3 -- .../src/fragment_tree.rs | 17 +++---- node/core/prospective-parachains/src/lib.rs | 46 ++++++------------- 3 files changed, 21 insertions(+), 45 deletions(-) diff --git a/node/core/prospective-parachains/src/error.rs b/node/core/prospective-parachains/src/error.rs index a47230abc18f..f28e4e1037aa 100644 --- a/node/core/prospective-parachains/src/error.rs +++ b/node/core/prospective-parachains/src/error.rs @@ -17,17 +17,14 @@ //! Error types. use futures::channel::oneshot; -use thiserror::Error; use polkadot_node_subsystem::{ errors::{ChainApiError, RuntimeApiError}, SubsystemError, }; -use polkadot_node_subsystem_util::{rolling_session_window::SessionsUnavailable, runtime}; use crate::LOG_TARGET; use fatality::Nested; -use parity_scale_codec::Error as CodecError; #[allow(missing_docs)] #[fatality::fatality(splitable)] diff --git a/node/core/prospective-parachains/src/fragment_tree.rs b/node/core/prospective-parachains/src/fragment_tree.rs index 2a266f0f97bf..4253c3c71154 100644 --- a/node/core/prospective-parachains/src/fragment_tree.rs +++ b/node/core/prospective-parachains/src/fragment_tree.rs @@ -55,25 +55,18 @@ // TODO [now]: review & update. use std::{ - collections::{hash_map::Entry as HEntry, BTreeMap, HashMap, HashSet}, - sync::Arc, + collections::{BTreeMap, HashMap, HashSet}, }; use super::LOG_TARGET; -use polkadot_node_subsystem::{ - overseer, ActiveLeavesUpdate, FromOverseer, OverseerSignal, SpawnedSubsystem, SubsystemContext, - SubsystemError, SubsystemResult, -}; use polkadot_node_subsystem_util::{ inclusion_emulator::staging::{ ConstraintModifications, Constraints, Fragment, ProspectiveCandidate, RelayChainBlockInfo, }, - metrics::{self, prometheus}, }; use polkadot_primitives::vstaging::{ - Block, BlockId, BlockNumber, CandidateDescriptor, CandidateHash, CommittedCandidateReceipt, - GroupIndex, GroupRotationInfo, Hash, HeadData, Header, Id as ParaId, PersistedValidationData, - SessionIndex, ValidatorIndex, + BlockNumber, CandidateDescriptor, CandidateHash, CommittedCandidateReceipt, + Hash, HeadData, Id as ParaId, PersistedValidationData, }; /// An error indicating that a supplied candidate didn't match the persisted @@ -158,7 +151,7 @@ impl CandidateStorage { /// Retain only candidates which pass the predicate. pub(crate) fn retain(&mut self, pred: impl Fn(&CandidateHash) -> bool) { self.by_candidate_hash.retain(|h, _v| pred(h)); - self.by_parent_head.retain(|parent, children| { + self.by_parent_head.retain(|_parent, children| { children.retain(|h| pred(h)); !children.is_empty() }) @@ -573,6 +566,7 @@ impl FragmentTree { } } +#[allow(unused)] // TODO [now] struct FragmentNode { // A pointer to the parent node. parent: NodePointer, @@ -584,6 +578,7 @@ struct FragmentNode { children: Vec<(NodePointer, CandidateHash)>, } +#[allow(unused)] // TODO [now] impl FragmentNode { fn relay_parent(&self) -> Hash { self.fragment.relay_parent().hash diff --git a/node/core/prospective-parachains/src/lib.rs b/node/core/prospective-parachains/src/lib.rs index 0bbf730794f5..4a2d4c5c61ec 100644 --- a/node/core/prospective-parachains/src/lib.rs +++ b/node/core/prospective-parachains/src/lib.rs @@ -27,35 +27,28 @@ //! This also handles concerns such as the relay-chain being forkful, //! session changes, predicting validator group assignments. -// TODO [now]: remove -#![allow(unused)] - use std::{ - collections::{hash_map::Entry as HEntry, HashMap, HashSet}, - sync::Arc, + collections::{HashMap, HashSet}, }; use futures::{channel::oneshot, prelude::*}; use polkadot_node_subsystem::{ - overseer, ActiveLeavesUpdate, FromOverseer, OverseerSignal, SpawnedSubsystem, SubsystemContext, - SubsystemError, SubsystemResult, + overseer, ActiveLeavesUpdate, FromOverseer, OverseerSignal, SubsystemContext, messages::ChainApiMessage, }; use polkadot_node_subsystem_util::{ inclusion_emulator::staging::{ - ConstraintModifications, Constraints, Fragment, RelayChainBlockInfo, + Constraints, RelayChainBlockInfo, }, - metrics::{self, prometheus}, }; use polkadot_primitives::vstaging::{ - Block, BlockId, BlockNumber, CandidateDescriptor, CandidateHash, CommittedCandidateReceipt, - GroupIndex, GroupRotationInfo, Hash, Header, Id as ParaId, PersistedValidationData, - SessionIndex, ValidatorIndex, + CandidateHash, CommittedCandidateReceipt, + Hash, Id as ParaId, PersistedValidationData, }; use crate::{ - error::{Error, FatalError, FatalResult, JfyiError, JfyiErrorResult, Result}, + error::{FatalError, FatalResult, JfyiError, JfyiErrorResult, Result}, fragment_tree::{CandidateStorage, FragmentTree, Scope as TreeScope}, }; @@ -76,12 +69,8 @@ const MAX_DEPTH: usize = 4; // The maximum ancestry we support. const MAX_ANCESTRY: usize = 5; -/// The Prospective Parachains Subsystem. -pub struct ProspectiveParachainsSubsystems { - metrics: Metrics, -} - // TODO [now]: add this enum to the broader subsystem types. +/// Messages sent to the Prospective Parachains subsystem. pub enum ProspectiveParachainsMessage { /// Inform the Prospective Parachains Subsystem of a new candidate. CandidateSeconded(ParaId, CommittedCandidateReceipt, PersistedValidationData), @@ -95,18 +84,9 @@ pub enum ProspectiveParachainsMessage { GetBackableCandidate(Hash, ParaId, Vec, oneshot::Sender>), } -struct ScheduledPara { - para: ParaId, - base_constraints: Constraints, - validator_group: GroupIndex, -} - struct RelayBlockViewData { // Scheduling info for paras and upcoming paras. fragment_trees: HashMap, - block_info: RelayChainBlockInfo, - // TODO [now]: other stuff - // e.g. ancestors in same session } struct View { @@ -121,7 +101,8 @@ impl View { } } -async fn run(mut ctx: Context) -> FatalResult<()> +// TODO [now]: this is temporarily `pub` to make the unused lint behave reasonably. +pub async fn run(mut ctx: Context) -> FatalResult<()> where Context: SubsystemContext, Context: overseer::SubsystemContext, @@ -246,7 +227,7 @@ where // TODO [now]: notify subsystems of new trees. view.active_leaves - .insert(hash, RelayBlockViewData { block_info, fragment_trees }); + .insert(hash, RelayBlockViewData { fragment_trees }); } if !update.deactivated.is_empty() { @@ -265,6 +246,7 @@ fn prune_view_candidate_storage(view: &mut View) { for head in active_leaves.values() { if let Some(tree) = head.fragment_trees.get(¶_id) { coverage.extend(tree.candidates()); + contained = true; } } @@ -282,7 +264,7 @@ fn prune_view_candidate_storage(view: &mut View) { } async fn handle_candidate_seconded( - ctx: &mut Context, + _ctx: &mut Context, view: &mut View, para: ParaId, candidate: CommittedCandidateReceipt, @@ -336,7 +318,7 @@ where } async fn handle_candidate_backed( - ctx: &mut Context, + _ctx: &mut Context, view: &mut View, para: ParaId, candidate_hash: CandidateHash, @@ -440,6 +422,7 @@ fn answer_get_backable_candidate( let _ = tx.send(tree.select_child(&required_path, |candidate| storage.is_backed(candidate))); } +#[allow(unused)] // TODO [now] async fn fetch_constraints( ctx: &mut Context, relay_parent: &Hash, @@ -449,6 +432,7 @@ async fn fetch_constraints( unimplemented!() } +#[allow(unused)] // TODO [now] async fn fetch_upcoming_paras( ctx: &mut Context, relay_parent: &Hash, From a938910e7e9b0533af50fb2253e0f2870d1c3629 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Sun, 6 Mar 2022 22:26:38 -0600 Subject: [PATCH 48/87] make spellcheck happy (despite English) --- node/core/prospective-parachains/src/fragment_tree.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/node/core/prospective-parachains/src/fragment_tree.rs b/node/core/prospective-parachains/src/fragment_tree.rs index 4253c3c71154..b639bb9c73c3 100644 --- a/node/core/prospective-parachains/src/fragment_tree.rs +++ b/node/core/prospective-parachains/src/fragment_tree.rs @@ -14,7 +14,7 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -//! A tree utility for managing parachain fragments unreferenced by the relay-chain. +//! A tree utility for managing parachain fragments not referenced by the relay-chain. //! //! This module exposes two main types: [`FragmentTree`] and [`CandidateStorage`] //! which are meant to be used in close conjunction. Each tree is associated with a particular @@ -50,7 +50,7 @@ //! //! The code in this module is not designed for speed or efficiency, but conceptual simplicity. //! Our assumption is that the amount of candidates and parachains we consider will be reasonably -//! bounded and in practice will not exceed a few thousand at any time. This naïve implementation +//! bounded and in practice will not exceed a few thousand at any time. This naive implementation //! will still perform fairly well under these conditions, despite being somewhat wasteful of memory. // TODO [now]: review & update. From 5cad84154b5f8d07ad6d5393edcccaf721f52085 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Sun, 6 Mar 2022 22:38:35 -0600 Subject: [PATCH 49/87] fmt --- .../src/fragment_tree.rs | 14 +++---- node/core/prospective-parachains/src/lib.rs | 39 +++++++------------ 2 files changed, 20 insertions(+), 33 deletions(-) diff --git a/node/core/prospective-parachains/src/fragment_tree.rs b/node/core/prospective-parachains/src/fragment_tree.rs index b639bb9c73c3..3fcd69c352e9 100644 --- a/node/core/prospective-parachains/src/fragment_tree.rs +++ b/node/core/prospective-parachains/src/fragment_tree.rs @@ -54,19 +54,15 @@ //! will still perform fairly well under these conditions, despite being somewhat wasteful of memory. // TODO [now]: review & update. -use std::{ - collections::{BTreeMap, HashMap, HashSet}, -}; +use std::collections::{BTreeMap, HashMap, HashSet}; use super::LOG_TARGET; -use polkadot_node_subsystem_util::{ - inclusion_emulator::staging::{ - ConstraintModifications, Constraints, Fragment, ProspectiveCandidate, RelayChainBlockInfo, - }, +use polkadot_node_subsystem_util::inclusion_emulator::staging::{ + ConstraintModifications, Constraints, Fragment, ProspectiveCandidate, RelayChainBlockInfo, }; use polkadot_primitives::vstaging::{ - BlockNumber, CandidateDescriptor, CandidateHash, CommittedCandidateReceipt, - Hash, HeadData, Id as ParaId, PersistedValidationData, + BlockNumber, CandidateDescriptor, CandidateHash, CommittedCandidateReceipt, Hash, HeadData, + Id as ParaId, PersistedValidationData, }; /// An error indicating that a supplied candidate didn't match the persisted diff --git a/node/core/prospective-parachains/src/lib.rs b/node/core/prospective-parachains/src/lib.rs index 4a2d4c5c61ec..2bd0ff525f4e 100644 --- a/node/core/prospective-parachains/src/lib.rs +++ b/node/core/prospective-parachains/src/lib.rs @@ -27,24 +27,17 @@ //! This also handles concerns such as the relay-chain being forkful, //! session changes, predicting validator group assignments. -use std::{ - collections::{HashMap, HashSet}, -}; +use std::collections::{HashMap, HashSet}; use futures::{channel::oneshot, prelude::*}; use polkadot_node_subsystem::{ - overseer, ActiveLeavesUpdate, FromOverseer, OverseerSignal, SubsystemContext, - messages::ChainApiMessage, -}; -use polkadot_node_subsystem_util::{ - inclusion_emulator::staging::{ - Constraints, RelayChainBlockInfo, - }, + messages::ChainApiMessage, overseer, ActiveLeavesUpdate, FromOverseer, OverseerSignal, + SubsystemContext, }; +use polkadot_node_subsystem_util::inclusion_emulator::staging::{Constraints, RelayChainBlockInfo}; use polkadot_primitives::vstaging::{ - CandidateHash, CommittedCandidateReceipt, - Hash, Id as ParaId, PersistedValidationData, + CandidateHash, CommittedCandidateReceipt, Hash, Id as ParaId, PersistedValidationData, }; use crate::{ @@ -166,10 +159,7 @@ where let hash = activated.hash; let scheduled_paras = fetch_upcoming_paras(&mut *ctx, &hash).await?; - let block_info: RelayChainBlockInfo = match fetch_block_info( - &mut *ctx, - hash, - ).await? { + let block_info: RelayChainBlockInfo = match fetch_block_info(&mut *ctx, hash).await? { None => { tracing::warn!( target: LOG_TARGET, @@ -181,7 +171,7 @@ where // to exit the 'loop' and skip this block without skipping // pruning logic. continue - } + }, Some(info) => info, }; @@ -226,8 +216,7 @@ where // TODO [now]: notify subsystems of new trees. - view.active_leaves - .insert(hash, RelayBlockViewData { fragment_trees }); + view.active_leaves.insert(hash, RelayBlockViewData { fragment_trees }); } if !update.deactivated.is_empty() { @@ -456,7 +445,8 @@ where hash: relay_hash, k: ancestors, response_channel: tx, - }).await; + }) + .await; let hashes = rx.map_err(JfyiError::ChainApiRequestCanceled).await??; let mut block_info = Vec::with_capacity(hashes.len()); @@ -470,11 +460,11 @@ where ); // Return, however far we got. - return Ok(block_info); - } + return Ok(block_info) + }, Some(info) => { block_info.push(info); - } + }, } } @@ -484,7 +474,8 @@ where async fn fetch_block_info( ctx: &mut Context, relay_hash: Hash, -) -> JfyiErrorResult> where +) -> JfyiErrorResult> +where Context: SubsystemContext, Context: overseer::SubsystemContext, { From 2a0e0f098a90e1c3faf28b5ab7d263d3136b4772 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Tue, 5 Apr 2022 18:05:45 +0200 Subject: [PATCH 50/87] bump Cargo.lock --- Cargo.lock | 460 ++++++++++++++++++++++++++++++----------------------- 1 file changed, 259 insertions(+), 201 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 626092555417..dd5c1fa96d59 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -458,7 +458,7 @@ dependencies = [ "futures-timer", "hex", "log", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "parking_lot 0.12.0", "sc-chain-spec", "sc-client-api", @@ -493,7 +493,7 @@ dependencies = [ "jsonrpc-derive", "jsonrpc-pubsub", "log", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "parking_lot 0.12.0", "sc-rpc", "sc-utils", @@ -513,7 +513,7 @@ name = "beefy-primitives" version = "4.0.0-dev" source = "git+https://github.com/paritytech/substrate?branch=master#62fcc8abb7679221814fd3a76d36ad5eafb65657" dependencies = [ - "parity-scale-codec", + "parity-scale-codec 3.1.2", "scale-info", "sp-api", "sp-application-crypto", @@ -562,16 +562,28 @@ version = "1.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" +[[package]] +name = "bitvec" +version = "0.20.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7774144344a4faa177370406a7ff5f1da24303817368584c6206c8303eb07848" +dependencies = [ + "funty 1.1.0", + "radium 0.6.2", + "tap", + "wyz 0.2.0", +] + [[package]] name = "bitvec" version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1489fcb93a5bb47da0462ca93ad252ad6af2145cce58d10d46a83931ba9f016b" dependencies = [ - "funty", - "radium", + "funty 2.0.0", + "radium 0.7.0", "tap", - "wyz", + "wyz 0.5.0", ] [[package]] @@ -735,7 +747,7 @@ dependencies = [ "frame-support", "hex", "hex-literal", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "scale-info", "serde", "sp-core", @@ -750,7 +762,7 @@ version = "0.1.0" dependencies = [ "bp-runtime", "frame-support", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "scale-info", "sp-std", ] @@ -759,14 +771,14 @@ dependencies = [ name = "bp-messages" version = "0.1.0" dependencies = [ - "bitvec", + "bitvec 1.0.0", "bp-runtime", "frame-support", "frame-system", "hex", "hex-literal", "impl-trait-for-tuples", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "scale-info", "serde", "sp-core", @@ -782,7 +794,7 @@ dependencies = [ "frame-support", "frame-system", "hex", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "scale-info", "sp-api", "sp-core", @@ -799,7 +811,7 @@ dependencies = [ "bp-polkadot-core", "bp-runtime", "frame-support", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "smallvec", "sp-api", "sp-runtime", @@ -815,7 +827,7 @@ dependencies = [ "hash-db", "hex-literal", "num-traits", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "scale-info", "sp-core", "sp-io", @@ -832,7 +844,7 @@ dependencies = [ "bp-header-chain", "ed25519-dalek", "finality-grandpa", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "sp-application-crypto", "sp-finality-grandpa", "sp-runtime", @@ -847,7 +859,7 @@ dependencies = [ "bp-polkadot-core", "bp-rococo", "bp-runtime", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "sp-api", "sp-runtime", "sp-std", @@ -869,7 +881,7 @@ dependencies = [ "pallet-bridge-grandpa", "pallet-bridge-messages", "pallet-transaction-payment", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "scale-info", "sp-api", "sp-core", @@ -2034,7 +2046,7 @@ dependencies = [ "futures-timer", "log", "num-traits", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "parking_lot 0.11.2", "scale-info", ] @@ -2108,7 +2120,7 @@ name = "fork-tree" version = "3.0.0" source = "git+https://github.com/paritytech/substrate?branch=master#62fcc8abb7679221814fd3a76d36ad5eafb65657" dependencies = [ - "parity-scale-codec", + "parity-scale-codec 3.1.2", ] [[package]] @@ -2130,7 +2142,7 @@ dependencies = [ "frame-system", "linregress", "log", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "paste", "scale-info", "serde", @@ -2162,7 +2174,7 @@ dependencies = [ "linked-hash-map", "log", "memory-db", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "rand 0.8.5", "sc-block-builder", "sc-cli", @@ -2207,7 +2219,7 @@ dependencies = [ "frame-election-provider-solution-type", "frame-support", "frame-system", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "scale-info", "sp-arithmetic", "sp-npos-elections", @@ -2222,7 +2234,7 @@ source = "git+https://github.com/paritytech/substrate?branch=master#62fcc8abb767 dependencies = [ "frame-support", "frame-system", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "scale-info", "sp-core", "sp-io", @@ -2238,7 +2250,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "df6bb8542ef006ef0de09a5c4420787d79823c0ed7924225822362fd2bf2ff2d" dependencies = [ "cfg-if 1.0.0", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "scale-info", "serde", ] @@ -2254,7 +2266,7 @@ dependencies = [ "impl-trait-for-tuples", "log", "once_cell", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "paste", "scale-info", "serde", @@ -2314,7 +2326,7 @@ dependencies = [ "frame-support", "frame-support-test-pallet", "frame-system", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "pretty_assertions", "rustversion", "scale-info", @@ -2336,7 +2348,7 @@ source = "git+https://github.com/paritytech/substrate?branch=master#62fcc8abb767 dependencies = [ "frame-support", "frame-system", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "scale-info", ] @@ -2347,7 +2359,7 @@ source = "git+https://github.com/paritytech/substrate?branch=master#62fcc8abb767 dependencies = [ "frame-support", "log", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "scale-info", "serde", "sp-core", @@ -2365,7 +2377,7 @@ dependencies = [ "frame-benchmarking", "frame-support", "frame-system", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "scale-info", "sp-core", "sp-runtime", @@ -2377,7 +2389,7 @@ name = "frame-system-rpc-runtime-api" version = "4.0.0-dev" source = "git+https://github.com/paritytech/substrate?branch=master#62fcc8abb7679221814fd3a76d36ad5eafb65657" dependencies = [ - "parity-scale-codec", + "parity-scale-codec 3.1.2", "sp-api", ] @@ -2442,6 +2454,12 @@ version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3dcaa9ae7725d12cdb85b3ad99a434db70b468c09ded17e012d86b5c1010f7a7" +[[package]] +name = "funty" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fed34cd105917e91daa4da6b3728c47b068749d6a62c59811f06ed2ac71d9da7" + [[package]] name = "funty" version = "2.0.0" @@ -3041,7 +3059,7 @@ version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ba6a270039626615617f3f36d15fc827041df3b78c439da2cadfa47455a77f2f" dependencies = [ - "parity-scale-codec", + "parity-scale-codec 3.1.2", ] [[package]] @@ -3509,7 +3527,7 @@ name = "kusama-runtime" version = "0.9.18" dependencies = [ "beefy-primitives", - "bitvec", + "bitvec 1.0.0", "frame-benchmarking", "frame-election-provider-support", "frame-executive", @@ -3560,7 +3578,7 @@ dependencies = [ "pallet-utility", "pallet-vesting", "pallet-xcm", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "polkadot-primitives", "polkadot-runtime-common", "polkadot-runtime-parachains", @@ -5051,7 +5069,7 @@ dependencies = [ "frame-benchmarking", "frame-support", "frame-system", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "scale-info", "sp-runtime", "sp-std", @@ -5065,7 +5083,7 @@ dependencies = [ "frame-support", "frame-system", "pallet-session", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "scale-info", "sp-application-crypto", "sp-authority-discovery", @@ -5081,7 +5099,7 @@ dependencies = [ "frame-support", "frame-system", "impl-trait-for-tuples", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "scale-info", "sp-authorship", "sp-runtime", @@ -5100,7 +5118,7 @@ dependencies = [ "pallet-authorship", "pallet-session", "pallet-timestamp", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "scale-info", "sp-application-crypto", "sp-consensus-babe", @@ -5123,7 +5141,7 @@ dependencies = [ "frame-system", "log", "pallet-balances", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "scale-info", "sp-core", "sp-io", @@ -5161,7 +5179,7 @@ dependencies = [ "frame-support", "frame-system", "log", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "scale-info", "sp-runtime", "sp-std", @@ -5176,7 +5194,7 @@ dependencies = [ "frame-support", "frame-system", "pallet-session", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "scale-info", "serde", "sp-runtime", @@ -5199,7 +5217,7 @@ dependencies = [ "pallet-mmr", "pallet-mmr-primitives", "pallet-session", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "scale-info", "serde", "sp-core", @@ -5218,7 +5236,7 @@ dependencies = [ "frame-system", "log", "pallet-treasury", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "scale-info", "sp-core", "sp-io", @@ -5235,7 +5253,7 @@ dependencies = [ "frame-support", "frame-system", "log", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "scale-info", "sp-core", "sp-io", @@ -5256,7 +5274,7 @@ dependencies = [ "frame-system", "log", "num-traits", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "scale-info", "serde", "sp-core", @@ -5271,7 +5289,7 @@ dependencies = [ name = "pallet-bridge-messages" version = "0.1.0" dependencies = [ - "bitvec", + "bitvec 1.0.0", "bp-message-dispatch", "bp-messages", "bp-runtime", @@ -5281,7 +5299,7 @@ dependencies = [ "log", "num-traits", "pallet-balances", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "scale-info", "serde", "sp-core", @@ -5301,7 +5319,7 @@ dependencies = [ "log", "pallet-bounties", "pallet-treasury", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "scale-info", "sp-core", "sp-io", @@ -5318,7 +5336,7 @@ dependencies = [ "frame-support", "frame-system", "log", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "scale-info", "sp-core", "sp-io", @@ -5334,7 +5352,7 @@ dependencies = [ "frame-benchmarking", "frame-support", "frame-system", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "scale-info", "serde", "sp-io", @@ -5352,7 +5370,7 @@ dependencies = [ "frame-support", "frame-system", "log", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "rand 0.7.3", "scale-info", "sp-arithmetic", @@ -5374,7 +5392,7 @@ dependencies = [ "frame-support", "frame-system", "log", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "scale-info", "sp-core", "sp-io", @@ -5391,7 +5409,7 @@ dependencies = [ "frame-benchmarking", "frame-support", "frame-system", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "scale-info", "sp-arithmetic", "sp-runtime", @@ -5409,7 +5427,7 @@ dependencies = [ "log", "pallet-authorship", "pallet-session", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "scale-info", "sp-application-crypto", "sp-core", @@ -5430,7 +5448,7 @@ dependencies = [ "frame-benchmarking", "frame-support", "frame-system", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "scale-info", "sp-io", "sp-runtime", @@ -5447,7 +5465,7 @@ dependencies = [ "frame-system", "log", "pallet-authorship", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "scale-info", "sp-application-crypto", "sp-core", @@ -5465,7 +5483,7 @@ dependencies = [ "frame-benchmarking", "frame-support", "frame-system", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "scale-info", "sp-core", "sp-io", @@ -5483,7 +5501,7 @@ dependencies = [ "frame-support", "frame-system", "log", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "scale-info", "sp-core", "sp-io", @@ -5501,7 +5519,7 @@ dependencies = [ "frame-support", "frame-system", "pallet-mmr-primitives", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "scale-info", "sp-core", "sp-io", @@ -5517,7 +5535,7 @@ dependencies = [ "frame-support", "frame-system", "log", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "serde", "sp-api", "sp-core", @@ -5534,7 +5552,7 @@ dependencies = [ "jsonrpc-core-client", "jsonrpc-derive", "pallet-mmr-primitives", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "serde", "sp-api", "sp-blockchain", @@ -5550,7 +5568,7 @@ dependencies = [ "frame-benchmarking", "frame-support", "frame-system", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "scale-info", "sp-io", "sp-runtime", @@ -5564,7 +5582,7 @@ source = "git+https://github.com/paritytech/substrate?branch=master#62fcc8abb767 dependencies = [ "frame-support", "frame-system", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "scale-info", "sp-io", "sp-runtime", @@ -5580,7 +5598,7 @@ dependencies = [ "frame-system", "log", "pallet-balances", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "scale-info", "serde", "sp-runtime", @@ -5604,7 +5622,7 @@ dependencies = [ "pallet-offences", "pallet-session", "pallet-staking", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "scale-info", "sp-runtime", "sp-staking", @@ -5619,7 +5637,7 @@ dependencies = [ "frame-benchmarking", "frame-support", "frame-system", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "scale-info", "sp-core", "sp-io", @@ -5635,7 +5653,7 @@ dependencies = [ "frame-benchmarking", "frame-support", "frame-system", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "scale-info", "sp-io", "sp-runtime", @@ -5649,7 +5667,7 @@ source = "git+https://github.com/paritytech/substrate?branch=master#62fcc8abb767 dependencies = [ "frame-support", "frame-system", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "scale-info", "sp-io", "sp-runtime", @@ -5665,7 +5683,7 @@ dependencies = [ "frame-support", "frame-system", "log", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "scale-info", "sp-io", "sp-runtime", @@ -5682,7 +5700,7 @@ dependencies = [ "impl-trait-for-tuples", "log", "pallet-timestamp", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "scale-info", "sp-core", "sp-io", @@ -5716,7 +5734,7 @@ source = "git+https://github.com/paritytech/substrate?branch=master#62fcc8abb767 dependencies = [ "frame-support", "frame-system", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "rand_chacha 0.2.2", "scale-info", "sp-runtime", @@ -5735,7 +5753,7 @@ dependencies = [ "log", "pallet-authorship", "pallet-session", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "rand_chacha 0.2.2", "scale-info", "serde", @@ -5773,7 +5791,7 @@ source = "git+https://github.com/paritytech/substrate?branch=master#62fcc8abb767 dependencies = [ "frame-support", "frame-system", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "scale-info", "sp-io", "sp-runtime", @@ -5789,7 +5807,7 @@ dependencies = [ "frame-support", "frame-system", "log", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "scale-info", "sp-inherents", "sp-io", @@ -5808,7 +5826,7 @@ dependencies = [ "frame-system", "log", "pallet-treasury", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "scale-info", "serde", "sp-core", @@ -5824,7 +5842,7 @@ source = "git+https://github.com/paritytech/substrate?branch=master#62fcc8abb767 dependencies = [ "frame-support", "frame-system", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "scale-info", "serde", "smallvec", @@ -5843,7 +5861,7 @@ dependencies = [ "jsonrpc-core-client", "jsonrpc-derive", "pallet-transaction-payment-rpc-runtime-api", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "sp-api", "sp-blockchain", "sp-core", @@ -5857,7 +5875,7 @@ version = "4.0.0-dev" source = "git+https://github.com/paritytech/substrate?branch=master#62fcc8abb7679221814fd3a76d36ad5eafb65657" dependencies = [ "pallet-transaction-payment", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "sp-api", "sp-runtime", ] @@ -5872,7 +5890,7 @@ dependencies = [ "frame-system", "impl-trait-for-tuples", "pallet-balances", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "scale-info", "serde", "sp-runtime", @@ -5887,7 +5905,7 @@ dependencies = [ "frame-benchmarking", "frame-support", "frame-system", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "scale-info", "sp-core", "sp-io", @@ -5904,7 +5922,7 @@ dependencies = [ "frame-support", "frame-system", "log", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "scale-info", "sp-runtime", "sp-std", @@ -5918,7 +5936,7 @@ dependencies = [ "frame-system", "log", "pallet-balances", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "polkadot-parachain", "polkadot-runtime-parachains", "scale-info", @@ -5943,7 +5961,7 @@ dependencies = [ "pallet-assets", "pallet-balances", "pallet-xcm", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "polkadot-primitives", "polkadot-runtime-common", "scale-info", @@ -5976,6 +5994,19 @@ dependencies = [ "snap", ] +[[package]] +name = "parity-scale-codec" +version = "2.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "373b1a4c1338d9cd3d1fa53b3a11bdab5ab6bd80a20f7f7becd76953ae2be909" +dependencies = [ + "arrayvec 0.7.2", + "bitvec 0.20.4", + "byte-slice-cast", + "impl-trait-for-tuples", + "serde", +] + [[package]] name = "parity-scale-codec" version = "3.1.2" @@ -5983,7 +6014,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e8b44461635bbb1a0300f100a841e571e7d919c81c73075ef5d152ffdb521066" dependencies = [ "arrayvec 0.7.2", - "bitvec", + "bitvec 1.0.0", "byte-slice-cast", "impl-trait-for-tuples", "parity-scale-codec-derive", @@ -6354,7 +6385,7 @@ name = "polkadot-availability-bitfield-distribution" version = "0.9.18" dependencies = [ "assert_matches", - "bitvec", + "bitvec 1.0.0", "env_logger 0.9.0", "futures 0.3.21", "log", @@ -6381,7 +6412,7 @@ dependencies = [ "futures 0.3.21", "futures-timer", "lru 0.7.4", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "polkadot-erasure-coding", "polkadot-node-network-protocol", "polkadot-node-primitives", @@ -6411,7 +6442,7 @@ dependencies = [ "futures-timer", "log", "lru 0.7.4", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "polkadot-erasure-coding", "polkadot-node-network-protocol", "polkadot-node-primitives", @@ -6506,7 +6537,7 @@ dependencies = [ "futures 0.3.21", "futures-timer", "log", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "polkadot-node-network-protocol", "polkadot-node-primitives", "polkadot-node-subsystem", @@ -6527,7 +6558,7 @@ dependencies = [ name = "polkadot-core-primitives" version = "0.9.18" dependencies = [ - "parity-scale-codec", + "parity-scale-codec 3.1.2", "parity-util-mem", "scale-info", "sp-core", @@ -6547,7 +6578,7 @@ dependencies = [ "futures-timer", "lazy_static", "lru 0.7.4", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "polkadot-erasure-coding", "polkadot-node-network-protocol", "polkadot-node-primitives", @@ -6570,7 +6601,7 @@ dependencies = [ name = "polkadot-erasure-coding" version = "0.9.18" dependencies = [ - "parity-scale-codec", + "parity-scale-codec 3.1.2", "polkadot-node-primitives", "polkadot-primitives", "reed-solomon-novelpoly", @@ -6613,7 +6644,7 @@ dependencies = [ "async-trait", "futures 0.3.21", "futures-timer", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "parking_lot 0.12.0", "polkadot-node-network-protocol", "polkadot-node-subsystem", @@ -6634,7 +6665,7 @@ name = "polkadot-node-collation-generation" version = "0.9.18" dependencies = [ "futures 0.3.21", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "polkadot-erasure-coding", "polkadot-node-primitives", "polkadot-node-subsystem", @@ -6653,7 +6684,7 @@ name = "polkadot-node-core-approval-voting" version = "0.9.18" dependencies = [ "assert_matches", - "bitvec", + "bitvec 1.0.0", "derive_more", "futures 0.3.21", "futures-timer", @@ -6661,7 +6692,7 @@ dependencies = [ "kvdb-memorydb", "lru 0.7.4", "merlin", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "parking_lot 0.12.0", "polkadot-node-jaeger", "polkadot-node-primitives", @@ -6690,14 +6721,14 @@ name = "polkadot-node-core-av-store" version = "0.9.18" dependencies = [ "assert_matches", - "bitvec", + "bitvec 1.0.0", "env_logger 0.9.0", "futures 0.3.21", "futures-timer", "kvdb", "kvdb-memorydb", "log", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "parking_lot 0.12.0", "polkadot-erasure-coding", "polkadot-node-primitives", @@ -6718,7 +6749,7 @@ name = "polkadot-node-core-backing" version = "0.9.18" dependencies = [ "assert_matches", - "bitvec", + "bitvec 1.0.0", "futures 0.3.21", "polkadot-erasure-coding", "polkadot-node-primitives", @@ -6761,7 +6792,7 @@ dependencies = [ "assert_matches", "async-trait", "futures 0.3.21", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "polkadot-node-core-pvf", "polkadot-node-primitives", "polkadot-node-subsystem", @@ -6782,7 +6813,7 @@ version = "0.9.18" dependencies = [ "futures 0.3.21", "maplit", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "polkadot-node-primitives", "polkadot-node-subsystem", "polkadot-node-subsystem-test-helpers", @@ -6804,7 +6835,7 @@ dependencies = [ "futures-timer", "kvdb", "kvdb-memorydb", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "parking_lot 0.12.0", "polkadot-node-primitives", "polkadot-node-subsystem", @@ -6826,7 +6857,7 @@ dependencies = [ "kvdb", "kvdb-memorydb", "lru 0.7.4", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "polkadot-node-primitives", "polkadot-node-subsystem", "polkadot-node-subsystem-test-helpers", @@ -6857,11 +6888,26 @@ dependencies = [ "tracing-gum", ] +[[package]] +name = "polkadot-node-core-prospective-parachains" +version = "0.9.16" +dependencies = [ + "fatality", + "futures 0.3.21", + "parity-scale-codec 2.3.1", + "polkadot-node-primitives", + "polkadot-node-subsystem", + "polkadot-node-subsystem-util", + "polkadot-primitives", + "thiserror", + "tracing", +] + [[package]] name = "polkadot-node-core-provisioner" version = "0.9.18" dependencies = [ - "bitvec", + "bitvec 1.0.0", "futures 0.3.21", "futures-timer", "polkadot-node-primitives", @@ -6888,7 +6934,7 @@ dependencies = [ "futures 0.3.21", "futures-timer", "hex-literal", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "pin-project 1.0.10", "polkadot-core-primitives", "polkadot-node-subsystem-util", @@ -6962,7 +7008,7 @@ dependencies = [ "lazy_static", "log", "mick-jaeger", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "parking_lot 0.12.0", "polkadot-node-primitives", "polkadot-primitives", @@ -6983,7 +7029,7 @@ dependencies = [ "log", "metered-channel", "nix", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "polkadot-primitives", "polkadot-test-service", "prometheus-parse", @@ -7006,7 +7052,7 @@ dependencies = [ "async-trait", "fatality", "futures 0.3.21", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "polkadot-node-jaeger", "polkadot-node-primitives", "polkadot-primitives", @@ -7022,7 +7068,7 @@ version = "0.9.18" dependencies = [ "bounded-vec", "futures 0.3.21", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "polkadot-erasure-coding", "polkadot-parachain", "polkadot-primitives", @@ -7101,7 +7147,7 @@ dependencies = [ "lru 0.7.4", "metered-channel", "parity-db", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "parity-util-mem", "parking_lot 0.11.2", "pin-project 1.0.10", @@ -7183,7 +7229,7 @@ version = "0.9.18" dependencies = [ "derive_more", "frame-support", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "parity-util-mem", "polkadot-core-primitives", "scale-info", @@ -7211,10 +7257,10 @@ dependencies = [ name = "polkadot-primitives" version = "0.9.18" dependencies = [ - "bitvec", + "bitvec 1.0.0", "frame-system", "hex-literal", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "parity-util-mem", "polkadot-core-primitives", "polkadot-parachain", @@ -7283,7 +7329,7 @@ name = "polkadot-runtime" version = "0.9.18" dependencies = [ "beefy-primitives", - "bitvec", + "bitvec 1.0.0", "frame-benchmarking", "frame-election-provider-support", "frame-executive", @@ -7330,7 +7376,7 @@ dependencies = [ "pallet-utility", "pallet-vesting", "pallet-xcm", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "polkadot-primitives", "polkadot-runtime-common", "polkadot-runtime-constants", @@ -7373,7 +7419,7 @@ name = "polkadot-runtime-common" version = "0.9.18" dependencies = [ "beefy-primitives", - "bitvec", + "bitvec 1.0.0", "frame-benchmarking", "frame-election-provider-support", "frame-support", @@ -7395,7 +7441,7 @@ dependencies = [ "pallet-transaction-payment", "pallet-treasury", "pallet-vesting", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "polkadot-primitives", "polkadot-primitives-test-helpers", "polkadot-runtime-parachains", @@ -7436,7 +7482,7 @@ name = "polkadot-runtime-metrics" version = "0.9.18" dependencies = [ "bs58", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "polkadot-primitives", "sp-std", "sp-tracing", @@ -7448,7 +7494,7 @@ version = "0.9.18" dependencies = [ "assert_matches", "bitflags", - "bitvec", + "bitvec 1.0.0", "derive_more", "frame-benchmarking", "frame-support", @@ -7465,7 +7511,7 @@ dependencies = [ "pallet-staking", "pallet-timestamp", "pallet-vesting", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "polkadot-primitives", "polkadot-primitives-test-helpers", "polkadot-runtime-metrics", @@ -7611,7 +7657,7 @@ dependencies = [ "futures 0.3.21", "futures-timer", "indexmap", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "polkadot-node-network-protocol", "polkadot-node-primitives", "polkadot-node-subsystem", @@ -7635,7 +7681,7 @@ dependencies = [ name = "polkadot-statement-table" version = "0.9.18" dependencies = [ - "parity-scale-codec", + "parity-scale-codec 3.1.2", "polkadot-primitives", "sp-core", ] @@ -7645,7 +7691,7 @@ name = "polkadot-test-client" version = "0.9.18" dependencies = [ "futures 0.3.21", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "polkadot-node-subsystem", "polkadot-primitives", "polkadot-test-runtime", @@ -7698,7 +7744,7 @@ name = "polkadot-test-runtime" version = "0.9.18" dependencies = [ "beefy-primitives", - "bitvec", + "bitvec 1.0.0", "frame-election-provider-support", "frame-executive", "frame-support", @@ -7724,7 +7770,7 @@ dependencies = [ "pallet-transaction-payment-rpc-runtime-api", "pallet-vesting", "pallet-xcm", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "polkadot-parachain", "polkadot-primitives", "polkadot-runtime-common", @@ -8124,6 +8170,12 @@ dependencies = [ "proc-macro2", ] +[[package]] +name = "radium" +version = "0.6.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "643f8f41a8ebc4c5dc4515c82bb8abd397b527fc20fd681b7c011c2aee5d44fb" + [[package]] name = "radium" version = "0.7.0" @@ -8380,7 +8432,7 @@ dependencies = [ "env_logger 0.9.0", "jsonrpsee 0.8.0", "log", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "serde", "serde_json", "sp-core", @@ -8521,7 +8573,7 @@ dependencies = [ "pallet-transaction-payment-rpc-runtime-api", "pallet-utility", "pallet-xcm", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "polkadot-parachain", "polkadot-primitives", "polkadot-runtime-common", @@ -8743,7 +8795,7 @@ dependencies = [ "ip_network", "libp2p", "log", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "prost", "prost-build", "rand 0.7.3", @@ -8767,7 +8819,7 @@ dependencies = [ "futures 0.3.21", "futures-timer", "log", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "sc-block-builder", "sc-client-api", "sc-proposer-metrics", @@ -8787,7 +8839,7 @@ name = "sc-block-builder" version = "0.10.0-dev" source = "git+https://github.com/paritytech/substrate?branch=master#62fcc8abb7679221814fd3a76d36ad5eafb65657" dependencies = [ - "parity-scale-codec", + "parity-scale-codec 3.1.2", "sc-client-api", "sp-api", "sp-block-builder", @@ -8805,7 +8857,7 @@ source = "git+https://github.com/paritytech/substrate?branch=master#62fcc8abb767 dependencies = [ "impl-trait-for-tuples", "memmap2 0.5.0", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "sc-chain-spec-derive", "sc-network", "sc-telemetry", @@ -8839,7 +8891,7 @@ dependencies = [ "libp2p", "log", "names", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "rand 0.7.3", "regex", "rpassword", @@ -8873,7 +8925,7 @@ dependencies = [ "futures 0.3.21", "hash-db", "log", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "parking_lot 0.12.0", "sc-executor", "sc-transaction-pool-api", @@ -8904,7 +8956,7 @@ dependencies = [ "linked-hash-map", "log", "parity-db", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "parking_lot 0.12.0", "sc-client-api", "sc-state-db", @@ -8954,7 +9006,7 @@ dependencies = [ "num-bigint", "num-rational 0.2.4", "num-traits", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "parking_lot 0.12.0", "rand 0.7.3", "retain_mut", @@ -9014,7 +9066,7 @@ version = "0.10.0-dev" source = "git+https://github.com/paritytech/substrate?branch=master#62fcc8abb7679221814fd3a76d36ad5eafb65657" dependencies = [ "fork-tree", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "sc-client-api", "sc-consensus", "sp-blockchain", @@ -9030,7 +9082,7 @@ dependencies = [ "futures 0.3.21", "futures-timer", "log", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "sc-client-api", "sc-consensus", "sc-telemetry", @@ -9064,7 +9116,7 @@ source = "git+https://github.com/paritytech/substrate?branch=master#62fcc8abb767 dependencies = [ "lazy_static", "lru 0.7.4", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "parking_lot 0.12.0", "sc-executor-common", "sc-executor-wasmi", @@ -9090,7 +9142,7 @@ version = "0.10.0-dev" source = "git+https://github.com/paritytech/substrate?branch=master#62fcc8abb7679221814fd3a76d36ad5eafb65657" dependencies = [ "environmental", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "sc-allocator", "sp-core", "sp-maybe-compressed-blob", @@ -9107,7 +9159,7 @@ version = "0.10.0-dev" source = "git+https://github.com/paritytech/substrate?branch=master#62fcc8abb7679221814fd3a76d36ad5eafb65657" dependencies = [ "log", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "sc-allocator", "sc-executor-common", "scoped-tls", @@ -9125,7 +9177,7 @@ dependencies = [ "cfg-if 1.0.0", "libc", "log", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "parity-wasm 0.42.2", "sc-allocator", "sc-executor-common", @@ -9149,7 +9201,7 @@ dependencies = [ "futures-timer", "hex", "log", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "parking_lot 0.12.0", "rand 0.8.5", "sc-block-builder", @@ -9187,7 +9239,7 @@ dependencies = [ "jsonrpc-derive", "jsonrpc-pubsub", "log", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "sc-client-api", "sc-finality-grandpa", "sc-rpc", @@ -9253,7 +9305,7 @@ dependencies = [ "linked_hash_set", "log", "lru 0.7.4", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "parking_lot 0.12.0", "pin-project 1.0.10", "prost", @@ -9311,7 +9363,7 @@ dependencies = [ "hyper-rustls", "num_cpus", "once_cell", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "parking_lot 0.12.0", "rand 0.7.3", "sc-client-api", @@ -9357,7 +9409,7 @@ dependencies = [ "jsonrpc-core", "jsonrpc-pubsub", "log", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "parking_lot 0.12.0", "sc-block-builder", "sc-chain-spec", @@ -9389,7 +9441,7 @@ dependencies = [ "jsonrpc-derive", "jsonrpc-pubsub", "log", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "parking_lot 0.12.0", "sc-chain-spec", "sc-transaction-pool-api", @@ -9435,7 +9487,7 @@ dependencies = [ "jsonrpc-core", "jsonrpc-pubsub", "log", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "parity-util-mem", "parking_lot 0.12.0", "pin-project 1.0.10", @@ -9491,7 +9543,7 @@ version = "0.10.0-dev" source = "git+https://github.com/paritytech/substrate?branch=master#62fcc8abb7679221814fd3a76d36ad5eafb65657" dependencies = [ "log", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "parity-util-mem", "parity-util-mem-derive", "parking_lot 0.12.0", @@ -9507,7 +9559,7 @@ dependencies = [ "jsonrpc-core", "jsonrpc-core-client", "jsonrpc-derive", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "sc-chain-spec", "sc-client-api", "sc-consensus-babe", @@ -9589,7 +9641,7 @@ dependencies = [ "futures-timer", "linked-hash-map", "log", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "parity-util-mem", "parking_lot 0.12.0", "retain_mut", @@ -9639,10 +9691,10 @@ version = "2.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0563970d79bcbf3c537ce3ad36d859b30d36fc5b190efd227f1f7a84d7cf0d42" dependencies = [ - "bitvec", + "bitvec 1.0.0", "cfg-if 1.0.0", "derive_more", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "scale-info-derive", "serde", ] @@ -10042,7 +10094,7 @@ name = "slot-range-helper" version = "0.9.18" dependencies = [ "enumn", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "paste", "sp-runtime", "sp-std", @@ -10131,7 +10183,7 @@ source = "git+https://github.com/paritytech/substrate?branch=master#62fcc8abb767 dependencies = [ "hash-db", "log", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "sp-api-proc-macro", "sp-core", "sp-runtime", @@ -10158,7 +10210,7 @@ name = "sp-application-crypto" version = "6.0.0" source = "git+https://github.com/paritytech/substrate?branch=master#62fcc8abb7679221814fd3a76d36ad5eafb65657" dependencies = [ - "parity-scale-codec", + "parity-scale-codec 3.1.2", "scale-info", "serde", "sp-core", @@ -10173,7 +10225,7 @@ source = "git+https://github.com/paritytech/substrate?branch=master#62fcc8abb767 dependencies = [ "integer-sqrt", "num-traits", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "scale-info", "serde", "sp-debug-derive", @@ -10186,7 +10238,7 @@ name = "sp-authority-discovery" version = "4.0.0-dev" source = "git+https://github.com/paritytech/substrate?branch=master#62fcc8abb7679221814fd3a76d36ad5eafb65657" dependencies = [ - "parity-scale-codec", + "parity-scale-codec 3.1.2", "scale-info", "sp-api", "sp-application-crypto", @@ -10200,7 +10252,7 @@ version = "4.0.0-dev" source = "git+https://github.com/paritytech/substrate?branch=master#62fcc8abb7679221814fd3a76d36ad5eafb65657" dependencies = [ "async-trait", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "sp-inherents", "sp-runtime", "sp-std", @@ -10211,7 +10263,7 @@ name = "sp-block-builder" version = "4.0.0-dev" source = "git+https://github.com/paritytech/substrate?branch=master#62fcc8abb7679221814fd3a76d36ad5eafb65657" dependencies = [ - "parity-scale-codec", + "parity-scale-codec 3.1.2", "sp-api", "sp-inherents", "sp-runtime", @@ -10226,7 +10278,7 @@ dependencies = [ "futures 0.3.21", "log", "lru 0.7.4", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "parking_lot 0.12.0", "sp-api", "sp-consensus", @@ -10245,7 +10297,7 @@ dependencies = [ "futures 0.3.21", "futures-timer", "log", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "sp-core", "sp-inherents", "sp-runtime", @@ -10262,7 +10314,7 @@ source = "git+https://github.com/paritytech/substrate?branch=master#62fcc8abb767 dependencies = [ "async-trait", "merlin", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "scale-info", "serde", "sp-api", @@ -10283,7 +10335,7 @@ name = "sp-consensus-slots" version = "0.10.0-dev" source = "git+https://github.com/paritytech/substrate?branch=master#62fcc8abb7679221814fd3a76d36ad5eafb65657" dependencies = [ - "parity-scale-codec", + "parity-scale-codec 3.1.2", "scale-info", "serde", "sp-arithmetic", @@ -10297,7 +10349,7 @@ name = "sp-consensus-vrf" version = "0.10.0-dev" source = "git+https://github.com/paritytech/substrate?branch=master#62fcc8abb7679221814fd3a76d36ad5eafb65657" dependencies = [ - "parity-scale-codec", + "parity-scale-codec 3.1.2", "schnorrkel", "sp-core", "sp-runtime", @@ -10325,7 +10377,7 @@ dependencies = [ "log", "merlin", "num-traits", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "parity-util-mem", "parking_lot 0.12.0", "primitive-types", @@ -10400,7 +10452,7 @@ version = "0.12.0" source = "git+https://github.com/paritytech/substrate?branch=master#62fcc8abb7679221814fd3a76d36ad5eafb65657" dependencies = [ "environmental", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "sp-std", "sp-storage", ] @@ -10412,7 +10464,7 @@ source = "git+https://github.com/paritytech/substrate?branch=master#62fcc8abb767 dependencies = [ "finality-grandpa", "log", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "scale-info", "serde", "sp-api", @@ -10430,7 +10482,7 @@ source = "git+https://github.com/paritytech/substrate?branch=master#62fcc8abb767 dependencies = [ "async-trait", "impl-trait-for-tuples", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "sp-core", "sp-runtime", "sp-std", @@ -10446,7 +10498,7 @@ dependencies = [ "hash-db", "libsecp256k1", "log", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "parking_lot 0.12.0", "secp256k1", "sp-core", @@ -10481,7 +10533,7 @@ dependencies = [ "async-trait", "futures 0.3.21", "merlin", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "parking_lot 0.12.0", "schnorrkel", "serde", @@ -10504,7 +10556,7 @@ name = "sp-npos-elections" version = "4.0.0-dev" source = "git+https://github.com/paritytech/substrate?branch=master#62fcc8abb7679221814fd3a76d36ad5eafb65657" dependencies = [ - "parity-scale-codec", + "parity-scale-codec 3.1.2", "scale-info", "serde", "sp-arithmetic", @@ -10552,7 +10604,7 @@ dependencies = [ "hash256-std-hasher", "impl-trait-for-tuples", "log", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "parity-util-mem", "paste", "rand 0.7.3", @@ -10571,7 +10623,7 @@ version = "6.0.0" source = "git+https://github.com/paritytech/substrate?branch=master#62fcc8abb7679221814fd3a76d36ad5eafb65657" dependencies = [ "impl-trait-for-tuples", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "primitive-types", "sp-externalities", "sp-runtime-interface-proc-macro", @@ -10608,7 +10660,7 @@ name = "sp-session" version = "4.0.0-dev" source = "git+https://github.com/paritytech/substrate?branch=master#62fcc8abb7679221814fd3a76d36ad5eafb65657" dependencies = [ - "parity-scale-codec", + "parity-scale-codec 3.1.2", "scale-info", "sp-api", "sp-core", @@ -10622,7 +10674,7 @@ name = "sp-staking" version = "4.0.0-dev" source = "git+https://github.com/paritytech/substrate?branch=master#62fcc8abb7679221814fd3a76d36ad5eafb65657" dependencies = [ - "parity-scale-codec", + "parity-scale-codec 3.1.2", "scale-info", "sp-runtime", "sp-std", @@ -10636,7 +10688,7 @@ dependencies = [ "hash-db", "log", "num-traits", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "parking_lot 0.12.0", "rand 0.7.3", "smallvec", @@ -10661,7 +10713,7 @@ version = "6.0.0" source = "git+https://github.com/paritytech/substrate?branch=master#62fcc8abb7679221814fd3a76d36ad5eafb65657" dependencies = [ "impl-serde", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "ref-cast", "serde", "sp-debug-derive", @@ -10689,7 +10741,7 @@ dependencies = [ "async-trait", "futures-timer", "log", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "sp-api", "sp-inherents", "sp-runtime", @@ -10702,7 +10754,7 @@ name = "sp-tracing" version = "5.0.0" source = "git+https://github.com/paritytech/substrate?branch=master#62fcc8abb7679221814fd3a76d36ad5eafb65657" dependencies = [ - "parity-scale-codec", + "parity-scale-codec 3.1.2", "sp-std", "tracing", "tracing-core", @@ -10725,7 +10777,7 @@ source = "git+https://github.com/paritytech/substrate?branch=master#62fcc8abb767 dependencies = [ "async-trait", "log", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "scale-info", "sp-core", "sp-inherents", @@ -10741,7 +10793,7 @@ source = "git+https://github.com/paritytech/substrate?branch=master#62fcc8abb767 dependencies = [ "hash-db", "memory-db", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "scale-info", "sp-core", "sp-std", @@ -10756,7 +10808,7 @@ version = "5.0.0" source = "git+https://github.com/paritytech/substrate?branch=master#62fcc8abb7679221814fd3a76d36ad5eafb65657" dependencies = [ "impl-serde", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "parity-wasm 0.42.2", "scale-info", "serde", @@ -10772,7 +10824,7 @@ name = "sp-version-proc-macro" version = "4.0.0-dev" source = "git+https://github.com/paritytech/substrate?branch=master#62fcc8abb7679221814fd3a76d36ad5eafb65657" dependencies = [ - "parity-scale-codec", + "parity-scale-codec 3.1.2", "proc-macro2", "quote", "syn", @@ -10785,7 +10837,7 @@ source = "git+https://github.com/paritytech/substrate?branch=master#62fcc8abb767 dependencies = [ "impl-trait-for-tuples", "log", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "sp-std", "wasmi", "wasmtime", @@ -10843,7 +10895,7 @@ dependencies = [ "pallet-election-provider-multi-phase", "pallet-staking", "pallet-transaction-payment", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "paste", "polkadot-core-primitives", "polkadot-runtime", @@ -10998,7 +11050,7 @@ dependencies = [ "jsonrpc-core-client", "jsonrpc-derive", "log", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "sc-client-api", "sc-rpc-api", "sc-transaction-pool-api", @@ -11031,7 +11083,7 @@ dependencies = [ "jsonrpc-core-client", "jsonrpc-derive", "log", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "sc-client-api", "sc-rpc-api", "scale-info", @@ -11053,7 +11105,7 @@ dependencies = [ "async-trait", "futures 0.3.21", "hex", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "sc-client-api", "sc-client-db", "sc-consensus", @@ -11206,7 +11258,7 @@ name = "test-parachain-adder" version = "0.9.18" dependencies = [ "dlmalloc", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "polkadot-parachain", "sp-io", "sp-std", @@ -11222,7 +11274,7 @@ dependencies = [ "futures 0.3.21", "futures-timer", "log", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "polkadot-cli", "polkadot-node-core-pvf", "polkadot-node-primitives", @@ -11253,7 +11305,7 @@ version = "0.9.18" dependencies = [ "dlmalloc", "log", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "polkadot-parachain", "sp-io", "sp-std", @@ -11269,7 +11321,7 @@ dependencies = [ "futures 0.3.21", "futures-timer", "log", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "polkadot-cli", "polkadot-node-core-pvf", "polkadot-node-primitives", @@ -11291,7 +11343,7 @@ dependencies = [ name = "test-parachains" version = "0.9.18" dependencies = [ - "parity-scale-codec", + "parity-scale-codec 3.1.2", "sp-core", "test-parachain-adder", "test-parachain-halt", @@ -11781,7 +11833,7 @@ dependencies = [ "clap", "jsonrpsee 0.4.1", "log", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "remote-externalities", "sc-chain-spec", "sc-cli", @@ -12427,7 +12479,7 @@ name = "westend-runtime" version = "0.9.18" dependencies = [ "beefy-primitives", - "bitvec", + "bitvec 1.0.0", "frame-benchmarking", "frame-election-provider-support", "frame-executive", @@ -12475,7 +12527,7 @@ dependencies = [ "pallet-vesting", "pallet-xcm", "pallet-xcm-benchmarks", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "polkadot-parachain", "polkadot-primitives", "polkadot-runtime-common", @@ -12653,6 +12705,12 @@ dependencies = [ "winapi-build", ] +[[package]] +name = "wyz" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "85e60b0d1b5f99db2556934e21937020776a5d31520bf169e851ac44e6420214" + [[package]] name = "wyz" version = "0.5.0" @@ -12680,7 +12738,7 @@ dependencies = [ "derivative", "impl-trait-for-tuples", "log", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "scale-info", "xcm-procedural", ] @@ -12695,7 +12753,7 @@ dependencies = [ "pallet-balances", "pallet-transaction-payment", "pallet-xcm", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "polkadot-parachain", "polkadot-runtime-parachains", "scale-info", @@ -12716,7 +12774,7 @@ dependencies = [ "frame-support", "impl-trait-for-tuples", "log", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "sp-arithmetic", "sp-core", "sp-io", @@ -12760,7 +12818,7 @@ name = "xcm-simulator" version = "0.9.18" dependencies = [ "frame-support", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "paste", "polkadot-core-primitives", "polkadot-parachain", @@ -12779,7 +12837,7 @@ dependencies = [ "frame-system", "pallet-balances", "pallet-xcm", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "polkadot-core-primitives", "polkadot-parachain", "polkadot-runtime-parachains", @@ -12803,7 +12861,7 @@ dependencies = [ "honggfuzz", "pallet-balances", "pallet-xcm", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "polkadot-core-primitives", "polkadot-parachain", "polkadot-runtime-parachains", @@ -12859,7 +12917,7 @@ version = "0.9.18" dependencies = [ "futures-util", "lazy_static", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "reqwest", "serde", "serde_json", From 777a3e86ee2f4521a72a2c9e2f1b9084e21edbd4 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Fri, 15 Apr 2022 17:20:20 -0500 Subject: [PATCH 51/87] replace tracing with gum --- Cargo.lock | 2 +- node/core/prospective-parachains/Cargo.toml | 2 +- node/core/prospective-parachains/src/error.rs | 2 +- .../src/fragment_tree.rs | 4 ++-- node/core/prospective-parachains/src/lib.rs | 22 +++++++++---------- 5 files changed, 16 insertions(+), 16 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index dd5c1fa96d59..dac55b4f0648 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6900,7 +6900,7 @@ dependencies = [ "polkadot-node-subsystem-util", "polkadot-primitives", "thiserror", - "tracing", + "tracing-gum", ] [[package]] diff --git a/node/core/prospective-parachains/Cargo.toml b/node/core/prospective-parachains/Cargo.toml index fa885208b3bd..754d5aac523f 100644 --- a/node/core/prospective-parachains/Cargo.toml +++ b/node/core/prospective-parachains/Cargo.toml @@ -6,7 +6,7 @@ edition = "2018" [dependencies] futures = "0.3.19" -tracing = "0.1.29" +gum = { package = "tracing-gum", path = "../../gum" } parity-scale-codec = "2" thiserror = "1.0.30" fatality = "0.0.6" diff --git a/node/core/prospective-parachains/src/error.rs b/node/core/prospective-parachains/src/error.rs index f28e4e1037aa..9f1fd6607afc 100644 --- a/node/core/prospective-parachains/src/error.rs +++ b/node/core/prospective-parachains/src/error.rs @@ -73,7 +73,7 @@ pub fn log_error(result: Result<()>, ctx: &'static str) -> FatalResult<()> { match result.into_nested()? { Ok(()) => Ok(()), Err(jfyi) => { - tracing::debug!(target: LOG_TARGET, error = ?jfyi, ctx); + gum::debug!(target: LOG_TARGET, error = ?jfyi, ctx); Ok(()) }, } diff --git a/node/core/prospective-parachains/src/fragment_tree.rs b/node/core/prospective-parachains/src/fragment_tree.rs index 3fcd69c352e9..e16529426a42 100644 --- a/node/core/prospective-parachains/src/fragment_tree.rs +++ b/node/core/prospective-parachains/src/fragment_tree.rs @@ -296,7 +296,7 @@ impl FragmentTree { /// Create a new [`FragmentTree`] with given scope and populated from the /// storage. pub fn populate(scope: Scope, storage: &CandidateStorage) -> Self { - tracing::trace!( + gum::trace!( target: LOG_TARGET, relay_parent = ?scope.relay_parent.hash, relay_parent_num = scope.relay_parent.number, @@ -493,7 +493,7 @@ impl FragmentTree { let child_constraints = match self.scope.base_constraints.apply_modifications(&modifications) { Err(e) => { - tracing::debug!( + gum::debug!( target: LOG_TARGET, new_parent_head = ?modifications.required_parent, err = ?e, diff --git a/node/core/prospective-parachains/src/lib.rs b/node/core/prospective-parachains/src/lib.rs index 2bd0ff525f4e..c292fd947712 100644 --- a/node/core/prospective-parachains/src/lib.rs +++ b/node/core/prospective-parachains/src/lib.rs @@ -161,7 +161,7 @@ where let block_info: RelayChainBlockInfo = match fetch_block_info(&mut *ctx, hash).await? { None => { - tracing::warn!( + gum::warn!( target: LOG_TARGET, block_hash = ?hash, "Failed to get block info for newly activated leaf block." @@ -190,7 +190,7 @@ where None => { // This indicates a runtime conflict of some kind. - tracing::debug!( + gum::debug!( target: LOG_TARGET, para_id = ?para, relay_parent = ?hash, @@ -267,7 +267,7 @@ where // Then attempt to add it to all trees. let storage = match view.candidate_storage.get_mut(¶) { None => { - tracing::warn!( + gum::warn!( target: LOG_TARGET, para_id = ?para, candidate_hash = ?candidate.hash(), @@ -286,7 +286,7 @@ where // hashing but this branch indicates something is seriously wrong elsewhere // so it's doubtful that it would affect debugging. - tracing::warn!( + gum::warn!( target: LOG_TARGET, para = ?para, "Received seconded candidate had mismatching validation data", @@ -318,7 +318,7 @@ where { let storage = match view.candidate_storage.get_mut(¶) { None => { - tracing::warn!( + gum::warn!( target: LOG_TARGET, para_id = ?para, ?candidate_hash, @@ -331,7 +331,7 @@ where }; if !storage.contains(&candidate_hash) { - tracing::warn!( + gum::warn!( target: LOG_TARGET, para_id = ?para, ?candidate_hash, @@ -342,7 +342,7 @@ where } if storage.is_backed(&candidate_hash) { - tracing::debug!( + gum::debug!( target: LOG_TARGET, para_id = ?para, ?candidate_hash, @@ -365,7 +365,7 @@ fn answer_get_backable_candidate( ) { let data = match view.active_leaves.get(&relay_parent) { None => { - tracing::debug!( + gum::debug!( target: LOG_TARGET, ?relay_parent, para_id = ?para, @@ -380,7 +380,7 @@ fn answer_get_backable_candidate( let tree = match data.fragment_trees.get(¶) { None => { - tracing::debug!( + gum::debug!( target: LOG_TARGET, ?relay_parent, para_id = ?para, @@ -395,7 +395,7 @@ fn answer_get_backable_candidate( let storage = match view.candidate_storage.get(¶) { None => { - tracing::warn!( + gum::warn!( target: LOG_TARGET, ?relay_parent, para_id = ?para, @@ -453,7 +453,7 @@ where for hash in hashes { match fetch_block_info(ctx, relay_hash).await? { None => { - tracing::warn!( + gum::warn!( target: LOG_TARGET, relay_hash = ?hash, "Failed to fetch info for hash returned from ancestry.", From 3c5a5d665ce636feb2fcc196de0c6ad4f6a31214 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Sat, 16 Apr 2022 08:29:01 -0500 Subject: [PATCH 52/87] introduce PopulateFrom trait --- .../src/fragment_tree.rs | 33 ++++++++++++++++++- 1 file changed, 32 insertions(+), 1 deletion(-) diff --git a/node/core/prospective-parachains/src/fragment_tree.rs b/node/core/prospective-parachains/src/fragment_tree.rs index e16529426a42..8ed51eb59f6e 100644 --- a/node/core/prospective-parachains/src/fragment_tree.rs +++ b/node/core/prospective-parachains/src/fragment_tree.rs @@ -282,6 +282,33 @@ enum NodePointer { Storage(usize), } +/// Abstraction around `&'a CandidateStorage`. +trait PopulateFrom<'a> { + type ParaChildrenIter: Iterator + 'a; + + fn get(&self, candidate_hash: &CandidateHash) -> Option<&'a CandidateEntry>; + + fn iter_para_children( + &self, + parent_head_hash: &Hash, + ) -> Self::ParaChildrenIter; +} + +impl<'a> PopulateFrom<'a> for &'a CandidateStorage { + type ParaChildrenIter = Box + 'a>; + + fn get(&self, candidate_hash: &CandidateHash) -> Option<&'a CandidateEntry> { + CandidateStorage::get(self, candidate_hash) + } + + fn iter_para_children( + &self, + parent_head_hash: &Hash, + ) -> Self::ParaChildrenIter { + Box::new(CandidateStorage::iter_para_children(self, parent_head_hash)) + } +} + /// This is a tree of candidates based on some underlying storage of candidates /// and a scope. pub(crate) struct FragmentTree { @@ -378,6 +405,10 @@ impl FragmentTree { /// Add a candidate and recursively populate from storage. pub(crate) fn add_and_populate(&mut self, hash: CandidateHash, storage: &CandidateStorage) { + self.add_and_populate_from(hash, storage); + } + + fn add_and_populate_from<'a>(&mut self, hash: CandidateHash, storage: impl PopulateFrom<'a>) { let candidate_entry = match storage.get(&hash) { None => return, Some(e) => e, @@ -451,7 +482,7 @@ impl FragmentTree { } } - fn populate_from_bases(&mut self, storage: &CandidateStorage, initial_bases: Vec) { + fn populate_from_bases<'a>(&mut self, storage: impl PopulateFrom<'a>, initial_bases: Vec) { // Populate the tree breadth-first. let mut last_sweep_start = None; From 519238cade7c4c34ced289eba1af15061f5cadfb Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Sun, 17 Apr 2022 22:36:24 -0500 Subject: [PATCH 53/87] implement GetHypotheticalDepths --- Cargo.lock | 1 + node/core/prospective-parachains/Cargo.toml | 1 + .../src/fragment_tree.rs | 81 ++++++++++++++++--- node/core/prospective-parachains/src/lib.rs | 39 +++++++++ 4 files changed, 109 insertions(+), 13 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index dac55b4f0648..99c5d28c343d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6892,6 +6892,7 @@ dependencies = [ name = "polkadot-node-core-prospective-parachains" version = "0.9.16" dependencies = [ + "bitvec 1.0.0", "fatality", "futures 0.3.21", "parity-scale-codec 2.3.1", diff --git a/node/core/prospective-parachains/Cargo.toml b/node/core/prospective-parachains/Cargo.toml index 754d5aac523f..a59636924b43 100644 --- a/node/core/prospective-parachains/Cargo.toml +++ b/node/core/prospective-parachains/Cargo.toml @@ -10,6 +10,7 @@ gum = { package = "tracing-gum", path = "../../gum" } parity-scale-codec = "2" thiserror = "1.0.30" fatality = "0.0.6" +bitvec = "1" polkadot-primitives = { path = "../../../primitives" } polkadot-node-primitives = { path = "../../primitives" } diff --git a/node/core/prospective-parachains/src/fragment_tree.rs b/node/core/prospective-parachains/src/fragment_tree.rs index 8ed51eb59f6e..6b5cddce0b4f 100644 --- a/node/core/prospective-parachains/src/fragment_tree.rs +++ b/node/core/prospective-parachains/src/fragment_tree.rs @@ -64,6 +64,7 @@ use polkadot_primitives::vstaging::{ BlockNumber, CandidateDescriptor, CandidateHash, CommittedCandidateReceipt, Hash, HeadData, Id as ParaId, PersistedValidationData, }; +use bitvec::prelude::*; /// An error indicating that a supplied candidate didn't match the persisted /// validation data provided alongside it. @@ -317,6 +318,10 @@ pub(crate) struct FragmentTree { // Invariant: a contiguous prefix of the 'nodes' storage will contain // the top-level children. nodes: Vec, + + // The candidates stored in this tree, mapped to a bitvec indicating the depths + // where the candidate is stored. + candidates: HashMap>, } impl FragmentTree { @@ -332,7 +337,11 @@ impl FragmentTree { "Instantiating Fragment Tree", ); - let mut tree = FragmentTree { scope, nodes: Vec::new() }; + let mut tree = FragmentTree { + scope, + nodes: Vec::new(), + candidates: HashMap::new(), + }; tree.populate_from_bases(storage, vec![NodePointer::Root]); @@ -345,6 +354,12 @@ impl FragmentTree { let parent_pointer = node.parent; let candidate_hash = node.candidate_hash; + let max_depth = self.scope.max_depth; + + self.candidates.entry(candidate_hash) + .or_insert_with(|| bitvec![u16, Msb0; 0; max_depth]) + .set(node.depth, true); + match parent_pointer { NodePointer::Storage(ptr) => { self.nodes.push(node); @@ -389,18 +404,10 @@ impl FragmentTree { } } - /// Returns a set of candidate hashes contained in nodes. - /// - /// This runs in O(n) time in the number of nodes - /// and allocates memory. - pub(crate) fn candidates(&self) -> HashSet { - let mut set = HashSet::with_capacity(self.nodes.len()); - - for f in &self.nodes { - set.insert(f.candidate_hash); - } - - set + /// Returns an O(n) iterator over the hashes of candidates contained in the + /// tree. + pub(crate) fn candidates<'a>(&'a self) -> impl Iterator + 'a { + self.candidates.keys().cloned() } /// Add a candidate and recursively populate from storage. @@ -408,6 +415,50 @@ impl FragmentTree { self.add_and_populate_from(hash, storage); } + /// Returns the hypothetical depths where a candidate with the given hash and parent head data + /// would be added to the tree. + /// + /// If the candidate is already known, this returns the actual depths where this + /// candidate is part of the tree. + pub(crate) fn hypothetical_depths( + &self, + hash: CandidateHash, + parent_head_data_hash: Hash, + candidate_relay_parent: Hash, + ) -> Vec { + // if known. + if let Some(depths) = self.candidates.get(&hash) { + return depths.iter_ones().collect(); + } + + // if out of scope. + let candidate_relay_parent_number = if let Some(info) = self.scope.ancestors_by_hash.get(&candidate_relay_parent) { + info.number + } else { + return Vec::new(); + }; + + let max_depth = self.scope.max_depth; + let mut depths = bitvec![u16, Msb0; 0; max_depth]; + + // iterate over all nodes < max_depth where parent head-data matches, + // relay-parent number is <= candidate, and depth < max_depth. + for node in &self.nodes { + if node.depth == max_depth { continue } + if node.fragment.relay_parent().number > candidate_relay_parent_number { continue } + if node.head_data_hash == parent_head_data_hash { + depths.set(node.depth + 1, true); + } + } + + // compare against root as well. + if self.scope.base_constraints.required_parent.hash() == parent_head_data_hash { + depths.set(0, true); + } + + depths.iter_ones().collect() + } + fn add_and_populate_from<'a>(&mut self, hash: CandidateHash, storage: impl PopulateFrom<'a>) { let candidate_entry = match storage.get(&hash) { None => return, @@ -574,6 +625,8 @@ impl FragmentTree { let mut cumulative_modifications = modifications.clone(); cumulative_modifications.stack(fragment.constraint_modifications()); + + let head_data_hash = fragment.candidate().commitments.head_data.hash(); let node = FragmentNode { parent: parent_pointer, fragment, @@ -582,6 +635,7 @@ impl FragmentTree { depth: child_depth, cumulative_modifications, children: Vec::new(), + head_data_hash, }; self.insert_node(node); @@ -602,6 +656,7 @@ struct FragmentNode { candidate_hash: CandidateHash, depth: usize, cumulative_modifications: ConstraintModifications, + head_data_hash: Hash, children: Vec<(NodePointer, CandidateHash)>, } diff --git a/node/core/prospective-parachains/src/lib.rs b/node/core/prospective-parachains/src/lib.rs index c292fd947712..7a089374ad5e 100644 --- a/node/core/prospective-parachains/src/lib.rs +++ b/node/core/prospective-parachains/src/lib.rs @@ -62,6 +62,21 @@ const MAX_DEPTH: usize = 4; // The maximum ancestry we support. const MAX_ANCESTRY: usize = 5; +/// A request for the depths a hypothetical candidate would occupy within +/// some fragment tree. +pub struct HypotheticalDepthRequest { + /// The hash of the potential candidate. + pub candidate_hash: CandidateHash, + /// The para of the candidate. + pub candidate_para: ParaId, + /// The hash of the parent head-data of the candidate. + pub parent_head_data_hash: Hash, + /// The relay-parent of the candidate. + pub candidate_relay_parent: Hash, + /// The relay-parent of the fragment tree we are comparing to. + pub fragment_tree_relay_parent: Hash, +} + // TODO [now]: add this enum to the broader subsystem types. /// Messages sent to the Prospective Parachains subsystem. pub enum ProspectiveParachainsMessage { @@ -75,6 +90,15 @@ pub enum ProspectiveParachainsMessage { /// which is a descendant of the given candidate hashes. Returns `None` on the channel /// if no such candidate exists. GetBackableCandidate(Hash, ParaId, Vec, oneshot::Sender>), + /// Get the hypothetical depths that a candidate with the given properties would + /// occupy in the fragment tree for the given relay-parent. + /// + /// If the candidate is already known, this returns the depths the candidate + /// occupies. + /// + /// Returns an empty vector either if there is no such depth or the fragment tree relay-parent + /// is unknown. + GetHypotheticalDepth(HypotheticalDepthRequest, oneshot::Sender>), } struct RelayBlockViewData { @@ -132,6 +156,7 @@ where required_path, tx, ) => answer_get_backable_candidate(&view, relay_parent, para, required_path, tx), + ProspectiveParachainsMessage::GetHypotheticalDepth(request, tx) => answer_hypothetical_depths_request(&view, request, tx), }, } } @@ -411,6 +436,20 @@ fn answer_get_backable_candidate( let _ = tx.send(tree.select_child(&required_path, |candidate| storage.is_backed(candidate))); } +fn answer_hypothetical_depths_request( + view: &View, + request: HypotheticalDepthRequest, + tx: oneshot::Sender>, +) { + match view.active_leaves.get(&request.fragment_tree_relay_parent).and_then(|l| l.fragment_trees.get(&request.candidate_para)) { + Some(fragment_tree) => { + let depths = fragment_tree.hypothetical_depths(request.candidate_hash, request.parent_head_data_hash, request.candidate_relay_parent); + let _ = tx.send(depths); + } + None => { let _ = tx.send(Vec::new()); } + } +} + #[allow(unused)] // TODO [now] async fn fetch_constraints( ctx: &mut Context, From a7fe08c160e7a9860e9c6c75ec76028169b93211 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Sun, 17 Apr 2022 22:47:42 -0500 Subject: [PATCH 54/87] revise docs slightly --- node/core/prospective-parachains/src/fragment_tree.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/node/core/prospective-parachains/src/fragment_tree.rs b/node/core/prospective-parachains/src/fragment_tree.rs index 6b5cddce0b4f..e2e438f0286d 100644 --- a/node/core/prospective-parachains/src/fragment_tree.rs +++ b/node/core/prospective-parachains/src/fragment_tree.rs @@ -30,13 +30,14 @@ //! for a parachain. Furthermore, a parachain is under no obligation to be acyclic, and this is mostly //! just because it's totally inefficient to enforce it. Practical use-cases are acyclic, but there is //! still more than one way to reach the same head-data. -//! 2. and candidates only refer to their parent by its head-data. +//! 2. and candidates only refer to their parent by its head-data. This whole issue could be +//! resolved by having candidates reference their parent by candidate hash. //! //! The implication is that when we receive a candidate receipt, there are actually multiple //! possibilities for any candidates between the para-head recorded in the relay parent's state //! and the candidate in question. //! -//! This means that our nodes need to handle multiple parents and that depth is an +//! This means that our candidates need to handle multiple parents and that depth is an //! attribute of a node in a tree, not a candidate. Put another way, the same candidate might //! have different depths in different parts of the tree. //! @@ -52,7 +53,6 @@ //! Our assumption is that the amount of candidates and parachains we consider will be reasonably //! bounded and in practice will not exceed a few thousand at any time. This naive implementation //! will still perform fairly well under these conditions, despite being somewhat wasteful of memory. -// TODO [now]: review & update. use std::collections::{BTreeMap, HashMap, HashSet}; From 9123ee4719b626bd17d34df0dacb5f1b85483c18 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Sun, 17 Apr 2022 23:03:22 -0500 Subject: [PATCH 55/87] first fragment tree scope test --- Cargo.lock | 1 + node/core/prospective-parachains/Cargo.toml | 1 + .../src/fragment_tree.rs | 59 ++++++++++++++++++- 3 files changed, 60 insertions(+), 1 deletion(-) diff --git a/Cargo.lock b/Cargo.lock index 99c5d28c343d..30501f24367b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6892,6 +6892,7 @@ dependencies = [ name = "polkadot-node-core-prospective-parachains" version = "0.9.16" dependencies = [ + "assert_matches", "bitvec 1.0.0", "fatality", "futures 0.3.21", diff --git a/node/core/prospective-parachains/Cargo.toml b/node/core/prospective-parachains/Cargo.toml index a59636924b43..cdb28516a20f 100644 --- a/node/core/prospective-parachains/Cargo.toml +++ b/node/core/prospective-parachains/Cargo.toml @@ -18,6 +18,7 @@ polkadot-node-subsystem = { path = "../../subsystem" } polkadot-node-subsystem-util = { path = "../../subsystem-util" } [dev-dependencies] +assert_matches = "1" [features] # If not enabled, the dispute coordinator will do nothing. diff --git a/node/core/prospective-parachains/src/fragment_tree.rs b/node/core/prospective-parachains/src/fragment_tree.rs index e2e438f0286d..b5404ddd7e53 100644 --- a/node/core/prospective-parachains/src/fragment_tree.rs +++ b/node/core/prospective-parachains/src/fragment_tree.rs @@ -191,6 +191,7 @@ struct CandidateEntry { } /// The scope of a [`FragmentTree`]. +#[derive(Debug)] pub(crate) struct Scope { para: ParaId, relay_parent: RelayChainBlockInfo, @@ -697,7 +698,57 @@ impl FragmentNode { #[cfg(test)] mod tests { - // TODO [now]: scope rejects ancestors that skip blocks + use super::*; + use assert_matches::assert_matches; + use polkadot_node_subsystem_util::inclusion_emulator::staging::InboundHrmpLimitations; + + #[test] + fn scope_rejects_ancestors_that_skip_blocks() { + let para_id = ParaId::from(5u32); + let relay_parent = RelayChainBlockInfo { + number: 10, + hash: Hash::repeat_byte(10), + storage_root: Hash::repeat_byte(69), + }; + + let ancestors = vec![ + RelayChainBlockInfo { + number: 8, + hash: Hash::repeat_byte(8), + storage_root: Hash::repeat_byte(69), + }, + ]; + + let max_depth = 2; + let base_constraints = Constraints { + min_relay_parent_number: 8, + max_pov_size: 1_000_000, + max_code_size: 1_000_000, + ump_remaining: 10, + ump_remaining_bytes: 1_000, + dmp_remaining_messages: 10, + hrmp_inbound: InboundHrmpLimitations { + valid_watermarks: vec![8, 9], + }, + hrmp_channels_out: HashMap::new(), + max_hrmp_num_per_candidate: 0, + required_parent: HeadData(vec![1, 2, 3]), + validation_code_hash: Hash::repeat_byte(69).into(), + upgrade_restriction: None, + future_validation_code: None, + }; + + assert_matches!( + Scope::with_ancestors( + para_id, + relay_parent, + base_constraints, + max_depth, + ancestors, + ), + Err(UnexpectedAncestor) + ); + } // TODO [now]: scope rejects ancestor of 0 @@ -712,4 +763,10 @@ mod tests { // TODO [now]: add candidate child of root // TODO [now]: add candidate child of non-root + + // TODO [now]: hypothetical_depths for existing candidate. + + // TODO [now]: hypothetical_depths for non-existing candidate based on root. + + // TODO [now]: hypothetical_depths for non-existing candidate not based on root. } From e89fc3a1e53eff3eab8a41646360642627e076fe Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Sun, 17 Apr 2022 23:07:16 -0500 Subject: [PATCH 56/87] more scope tests --- .../src/fragment_tree.rs | 106 +++++++++++++++++- 1 file changed, 105 insertions(+), 1 deletion(-) diff --git a/node/core/prospective-parachains/src/fragment_tree.rs b/node/core/prospective-parachains/src/fragment_tree.rs index b5404ddd7e53..8b5a8e99eeb3 100644 --- a/node/core/prospective-parachains/src/fragment_tree.rs +++ b/node/core/prospective-parachains/src/fragment_tree.rs @@ -750,7 +750,111 @@ mod tests { ); } - // TODO [now]: scope rejects ancestor of 0 + #[test] + fn scope_rejects_ancestor_for_0_block() { + let para_id = ParaId::from(5u32); + let relay_parent = RelayChainBlockInfo { + number: 0, + hash: Hash::repeat_byte(0), + storage_root: Hash::repeat_byte(69), + }; + + let ancestors = vec![ + RelayChainBlockInfo { + number: 99999, + hash: Hash::repeat_byte(99), + storage_root: Hash::repeat_byte(69), + }, + ]; + + let max_depth = 2; + let base_constraints = Constraints { + min_relay_parent_number: 0, + max_pov_size: 1_000_000, + max_code_size: 1_000_000, + ump_remaining: 10, + ump_remaining_bytes: 1_000, + dmp_remaining_messages: 10, + hrmp_inbound: InboundHrmpLimitations { + valid_watermarks: vec![8, 9], + }, + hrmp_channels_out: HashMap::new(), + max_hrmp_num_per_candidate: 0, + required_parent: HeadData(vec![1, 2, 3]), + validation_code_hash: Hash::repeat_byte(69).into(), + upgrade_restriction: None, + future_validation_code: None, + }; + + assert_matches!( + Scope::with_ancestors( + para_id, + relay_parent, + base_constraints, + max_depth, + ancestors, + ), + Err(UnexpectedAncestor) + ); + } + + #[test] + fn scope_only_takes_ancestors_up_to_min() { + let para_id = ParaId::from(5u32); + let relay_parent = RelayChainBlockInfo { + number: 5, + hash: Hash::repeat_byte(0), + storage_root: Hash::repeat_byte(69), + }; + + let ancestors = vec![ + RelayChainBlockInfo { + number: 4, + hash: Hash::repeat_byte(4), + storage_root: Hash::repeat_byte(69), + }, + RelayChainBlockInfo { + number: 3, + hash: Hash::repeat_byte(3), + storage_root: Hash::repeat_byte(69), + }, + RelayChainBlockInfo { + number: 2, + hash: Hash::repeat_byte(2), + storage_root: Hash::repeat_byte(69), + }, + ]; + + let max_depth = 2; + let base_constraints = Constraints { + min_relay_parent_number: 3, + max_pov_size: 1_000_000, + max_code_size: 1_000_000, + ump_remaining: 10, + ump_remaining_bytes: 1_000, + dmp_remaining_messages: 10, + hrmp_inbound: InboundHrmpLimitations { + valid_watermarks: vec![8, 9], + }, + hrmp_channels_out: HashMap::new(), + max_hrmp_num_per_candidate: 0, + required_parent: HeadData(vec![1, 2, 3]), + validation_code_hash: Hash::repeat_byte(69).into(), + upgrade_restriction: None, + future_validation_code: None, + }; + + let scope = Scope::with_ancestors( + para_id, + relay_parent, + base_constraints, + max_depth, + ancestors, + ).unwrap(); + + assert_eq!(scope.ancestors.len(), 2); + assert_eq!(scope.ancestors_by_hash.len(), 2); + } // TODO [now]: storage sets up links correctly. From 9714b6cc385d3c1776b86a44b97c9ce01ded4356 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Sun, 17 Apr 2022 23:22:13 -0500 Subject: [PATCH 57/87] test add_candidate --- Cargo.lock | 1 + node/core/prospective-parachains/Cargo.toml | 1 + .../src/fragment_tree.rs | 41 ++++++++++++++++++- 3 files changed, 42 insertions(+), 1 deletion(-) diff --git a/Cargo.lock b/Cargo.lock index 30501f24367b..ae85d79e4728 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6901,6 +6901,7 @@ dependencies = [ "polkadot-node-subsystem", "polkadot-node-subsystem-util", "polkadot-primitives", + "polkadot-primitives-test-helpers", "thiserror", "tracing-gum", ] diff --git a/node/core/prospective-parachains/Cargo.toml b/node/core/prospective-parachains/Cargo.toml index cdb28516a20f..71374285707b 100644 --- a/node/core/prospective-parachains/Cargo.toml +++ b/node/core/prospective-parachains/Cargo.toml @@ -19,6 +19,7 @@ polkadot-node-subsystem-util = { path = "../../subsystem-util" } [dev-dependencies] assert_matches = "1" +polkadot-primitives-test-helpers = { path = "../../../primitives/test-helpers" } [features] # If not enabled, the dispute coordinator will do nothing. diff --git a/node/core/prospective-parachains/src/fragment_tree.rs b/node/core/prospective-parachains/src/fragment_tree.rs index 8b5a8e99eeb3..dcdfa3a1c094 100644 --- a/node/core/prospective-parachains/src/fragment_tree.rs +++ b/node/core/prospective-parachains/src/fragment_tree.rs @@ -701,6 +701,8 @@ mod tests { use super::*; use assert_matches::assert_matches; use polkadot_node_subsystem_util::inclusion_emulator::staging::InboundHrmpLimitations; + use polkadot_primitives_test_helpers as test_helpers; + use polkadot_primitives::vstaging::{CandidateCommitments, CandidateDescriptor}; #[test] fn scope_rejects_ancestors_that_skip_blocks() { @@ -856,7 +858,44 @@ mod tests { assert_eq!(scope.ancestors_by_hash.len(), 2); } - // TODO [now]: storage sets up links correctly. + #[test] + fn storage_add_candidate() { + let mut storage = CandidateStorage::new(); + let persisted_validation_data = PersistedValidationData { + parent_head: vec![4, 5, 6].into(), + relay_parent_number: 8, + relay_parent_storage_root: Hash::repeat_byte(69), + max_pov_size: 1_000_000, + }; + + let candidate = CommittedCandidateReceipt { + descriptor: CandidateDescriptor { + para_id: ParaId::from(5u32), + relay_parent: Hash::repeat_byte(69), + collator: test_helpers::dummy_collator(), + persisted_validation_data_hash: persisted_validation_data.hash(), + pov_hash: Hash::repeat_byte(1), + erasure_root: Hash::repeat_byte(1), + signature: test_helpers::dummy_collator_signature(), + para_head: Hash::repeat_byte(1), + validation_code_hash: Hash::repeat_byte(1).into(), + }, + commitments: CandidateCommitments { + upward_messages: Vec::new(), + horizontal_messages: Vec::new(), + new_validation_code: None, + head_data: vec![1, 2, 3].into(), + processed_downward_messages: 0, + hrmp_watermark: 10, + }, + }; + let candidate_hash = candidate.hash(); + let parent_head_hash = persisted_validation_data.parent_head.hash(); + + storage.add_candidate(candidate, persisted_validation_data).unwrap(); + assert!(storage.contains(&candidate_hash)); + assert_eq!(storage.iter_para_children(&parent_head_hash).count(), 1); + } // TODO [now]: retain From d0dcdb05dce921e9b25011275ae88189fdbcc1ba Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Sun, 17 Apr 2022 23:22:22 -0500 Subject: [PATCH 58/87] fmt --- .../src/fragment_tree.rs | 114 +++++++----------- node/core/prospective-parachains/src/lib.rs | 21 +++- 2 files changed, 60 insertions(+), 75 deletions(-) diff --git a/node/core/prospective-parachains/src/fragment_tree.rs b/node/core/prospective-parachains/src/fragment_tree.rs index dcdfa3a1c094..b2ad7e7e59f1 100644 --- a/node/core/prospective-parachains/src/fragment_tree.rs +++ b/node/core/prospective-parachains/src/fragment_tree.rs @@ -57,6 +57,7 @@ use std::collections::{BTreeMap, HashMap, HashSet}; use super::LOG_TARGET; +use bitvec::prelude::*; use polkadot_node_subsystem_util::inclusion_emulator::staging::{ ConstraintModifications, Constraints, Fragment, ProspectiveCandidate, RelayChainBlockInfo, }; @@ -64,7 +65,6 @@ use polkadot_primitives::vstaging::{ BlockNumber, CandidateDescriptor, CandidateHash, CommittedCandidateReceipt, Hash, HeadData, Id as ParaId, PersistedValidationData, }; -use bitvec::prelude::*; /// An error indicating that a supplied candidate didn't match the persisted /// validation data provided alongside it. @@ -290,10 +290,7 @@ trait PopulateFrom<'a> { fn get(&self, candidate_hash: &CandidateHash) -> Option<&'a CandidateEntry>; - fn iter_para_children( - &self, - parent_head_hash: &Hash, - ) -> Self::ParaChildrenIter; + fn iter_para_children(&self, parent_head_hash: &Hash) -> Self::ParaChildrenIter; } impl<'a> PopulateFrom<'a> for &'a CandidateStorage { @@ -303,10 +300,7 @@ impl<'a> PopulateFrom<'a> for &'a CandidateStorage { CandidateStorage::get(self, candidate_hash) } - fn iter_para_children( - &self, - parent_head_hash: &Hash, - ) -> Self::ParaChildrenIter { + fn iter_para_children(&self, parent_head_hash: &Hash) -> Self::ParaChildrenIter { Box::new(CandidateStorage::iter_para_children(self, parent_head_hash)) } } @@ -338,11 +332,7 @@ impl FragmentTree { "Instantiating Fragment Tree", ); - let mut tree = FragmentTree { - scope, - nodes: Vec::new(), - candidates: HashMap::new(), - }; + let mut tree = FragmentTree { scope, nodes: Vec::new(), candidates: HashMap::new() }; tree.populate_from_bases(storage, vec![NodePointer::Root]); @@ -357,7 +347,8 @@ impl FragmentTree { let max_depth = self.scope.max_depth; - self.candidates.entry(candidate_hash) + self.candidates + .entry(candidate_hash) .or_insert_with(|| bitvec![u16, Msb0; 0; max_depth]) .set(node.depth, true); @@ -407,7 +398,7 @@ impl FragmentTree { /// Returns an O(n) iterator over the hashes of candidates contained in the /// tree. - pub(crate) fn candidates<'a>(&'a self) -> impl Iterator + 'a { + pub(crate) fn candidates<'a>(&'a self) -> impl Iterator + 'a { self.candidates.keys().cloned() } @@ -429,15 +420,16 @@ impl FragmentTree { ) -> Vec { // if known. if let Some(depths) = self.candidates.get(&hash) { - return depths.iter_ones().collect(); + return depths.iter_ones().collect() } // if out of scope. - let candidate_relay_parent_number = if let Some(info) = self.scope.ancestors_by_hash.get(&candidate_relay_parent) { - info.number - } else { - return Vec::new(); - }; + let candidate_relay_parent_number = + if let Some(info) = self.scope.ancestors_by_hash.get(&candidate_relay_parent) { + info.number + } else { + return Vec::new() + }; let max_depth = self.scope.max_depth; let mut depths = bitvec![u16, Msb0; 0; max_depth]; @@ -445,8 +437,12 @@ impl FragmentTree { // iterate over all nodes < max_depth where parent head-data matches, // relay-parent number is <= candidate, and depth < max_depth. for node in &self.nodes { - if node.depth == max_depth { continue } - if node.fragment.relay_parent().number > candidate_relay_parent_number { continue } + if node.depth == max_depth { + continue + } + if node.fragment.relay_parent().number > candidate_relay_parent_number { + continue + } if node.head_data_hash == parent_head_data_hash { depths.set(node.depth + 1, true); } @@ -534,7 +530,11 @@ impl FragmentTree { } } - fn populate_from_bases<'a>(&mut self, storage: impl PopulateFrom<'a>, initial_bases: Vec) { + fn populate_from_bases<'a>( + &mut self, + storage: impl PopulateFrom<'a>, + initial_bases: Vec, + ) { // Populate the tree breadth-first. let mut last_sweep_start = None; @@ -701,8 +701,8 @@ mod tests { use super::*; use assert_matches::assert_matches; use polkadot_node_subsystem_util::inclusion_emulator::staging::InboundHrmpLimitations; - use polkadot_primitives_test_helpers as test_helpers; use polkadot_primitives::vstaging::{CandidateCommitments, CandidateDescriptor}; + use polkadot_primitives_test_helpers as test_helpers; #[test] fn scope_rejects_ancestors_that_skip_blocks() { @@ -713,13 +713,11 @@ mod tests { storage_root: Hash::repeat_byte(69), }; - let ancestors = vec![ - RelayChainBlockInfo { - number: 8, - hash: Hash::repeat_byte(8), - storage_root: Hash::repeat_byte(69), - }, - ]; + let ancestors = vec![RelayChainBlockInfo { + number: 8, + hash: Hash::repeat_byte(8), + storage_root: Hash::repeat_byte(69), + }]; let max_depth = 2; let base_constraints = Constraints { @@ -729,9 +727,7 @@ mod tests { ump_remaining: 10, ump_remaining_bytes: 1_000, dmp_remaining_messages: 10, - hrmp_inbound: InboundHrmpLimitations { - valid_watermarks: vec![8, 9], - }, + hrmp_inbound: InboundHrmpLimitations { valid_watermarks: vec![8, 9] }, hrmp_channels_out: HashMap::new(), max_hrmp_num_per_candidate: 0, required_parent: HeadData(vec![1, 2, 3]), @@ -741,13 +737,7 @@ mod tests { }; assert_matches!( - Scope::with_ancestors( - para_id, - relay_parent, - base_constraints, - max_depth, - ancestors, - ), + Scope::with_ancestors(para_id, relay_parent, base_constraints, max_depth, ancestors,), Err(UnexpectedAncestor) ); } @@ -761,13 +751,11 @@ mod tests { storage_root: Hash::repeat_byte(69), }; - let ancestors = vec![ - RelayChainBlockInfo { - number: 99999, - hash: Hash::repeat_byte(99), - storage_root: Hash::repeat_byte(69), - }, - ]; + let ancestors = vec![RelayChainBlockInfo { + number: 99999, + hash: Hash::repeat_byte(99), + storage_root: Hash::repeat_byte(69), + }]; let max_depth = 2; let base_constraints = Constraints { @@ -777,9 +765,7 @@ mod tests { ump_remaining: 10, ump_remaining_bytes: 1_000, dmp_remaining_messages: 10, - hrmp_inbound: InboundHrmpLimitations { - valid_watermarks: vec![8, 9], - }, + hrmp_inbound: InboundHrmpLimitations { valid_watermarks: vec![8, 9] }, hrmp_channels_out: HashMap::new(), max_hrmp_num_per_candidate: 0, required_parent: HeadData(vec![1, 2, 3]), @@ -789,13 +775,7 @@ mod tests { }; assert_matches!( - Scope::with_ancestors( - para_id, - relay_parent, - base_constraints, - max_depth, - ancestors, - ), + Scope::with_ancestors(para_id, relay_parent, base_constraints, max_depth, ancestors,), Err(UnexpectedAncestor) ); } @@ -835,9 +815,7 @@ mod tests { ump_remaining: 10, ump_remaining_bytes: 1_000, dmp_remaining_messages: 10, - hrmp_inbound: InboundHrmpLimitations { - valid_watermarks: vec![8, 9], - }, + hrmp_inbound: InboundHrmpLimitations { valid_watermarks: vec![8, 9] }, hrmp_channels_out: HashMap::new(), max_hrmp_num_per_candidate: 0, required_parent: HeadData(vec![1, 2, 3]), @@ -846,13 +824,9 @@ mod tests { future_validation_code: None, }; - let scope = Scope::with_ancestors( - para_id, - relay_parent, - base_constraints, - max_depth, - ancestors, - ).unwrap(); + let scope = + Scope::with_ancestors(para_id, relay_parent, base_constraints, max_depth, ancestors) + .unwrap(); assert_eq!(scope.ancestors.len(), 2); assert_eq!(scope.ancestors_by_hash.len(), 2); diff --git a/node/core/prospective-parachains/src/lib.rs b/node/core/prospective-parachains/src/lib.rs index 7a089374ad5e..3145501b7043 100644 --- a/node/core/prospective-parachains/src/lib.rs +++ b/node/core/prospective-parachains/src/lib.rs @@ -156,7 +156,8 @@ where required_path, tx, ) => answer_get_backable_candidate(&view, relay_parent, para, required_path, tx), - ProspectiveParachainsMessage::GetHypotheticalDepth(request, tx) => answer_hypothetical_depths_request(&view, request, tx), + ProspectiveParachainsMessage::GetHypotheticalDepth(request, tx) => + answer_hypothetical_depths_request(&view, request, tx), }, } } @@ -441,12 +442,22 @@ fn answer_hypothetical_depths_request( request: HypotheticalDepthRequest, tx: oneshot::Sender>, ) { - match view.active_leaves.get(&request.fragment_tree_relay_parent).and_then(|l| l.fragment_trees.get(&request.candidate_para)) { + match view + .active_leaves + .get(&request.fragment_tree_relay_parent) + .and_then(|l| l.fragment_trees.get(&request.candidate_para)) + { Some(fragment_tree) => { - let depths = fragment_tree.hypothetical_depths(request.candidate_hash, request.parent_head_data_hash, request.candidate_relay_parent); + let depths = fragment_tree.hypothetical_depths( + request.candidate_hash, + request.parent_head_data_hash, + request.candidate_relay_parent, + ); let _ = tx.send(depths); - } - None => { let _ = tx.send(Vec::new()); } + }, + None => { + let _ = tx.send(Vec::new()); + }, } } From 1cf2d8a510e1e400f6240595c85f31429dc42507 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Sun, 17 Apr 2022 23:23:22 -0500 Subject: [PATCH 59/87] test retain --- .../src/fragment_tree.rs | 44 ++++++++++++++++++- 1 file changed, 43 insertions(+), 1 deletion(-) diff --git a/node/core/prospective-parachains/src/fragment_tree.rs b/node/core/prospective-parachains/src/fragment_tree.rs index b2ad7e7e59f1..21ab0a98ebd7 100644 --- a/node/core/prospective-parachains/src/fragment_tree.rs +++ b/node/core/prospective-parachains/src/fragment_tree.rs @@ -871,7 +871,49 @@ mod tests { assert_eq!(storage.iter_para_children(&parent_head_hash).count(), 1); } - // TODO [now]: retain + #[test] + fn storage_retain() { + let mut storage = CandidateStorage::new(); + let persisted_validation_data = PersistedValidationData { + parent_head: vec![4, 5, 6].into(), + relay_parent_number: 8, + relay_parent_storage_root: Hash::repeat_byte(69), + max_pov_size: 1_000_000, + }; + + let candidate = CommittedCandidateReceipt { + descriptor: CandidateDescriptor { + para_id: ParaId::from(5u32), + relay_parent: Hash::repeat_byte(69), + collator: test_helpers::dummy_collator(), + persisted_validation_data_hash: persisted_validation_data.hash(), + pov_hash: Hash::repeat_byte(1), + erasure_root: Hash::repeat_byte(1), + signature: test_helpers::dummy_collator_signature(), + para_head: Hash::repeat_byte(1), + validation_code_hash: Hash::repeat_byte(1).into(), + }, + commitments: CandidateCommitments { + upward_messages: Vec::new(), + horizontal_messages: Vec::new(), + new_validation_code: None, + head_data: vec![1, 2, 3].into(), + processed_downward_messages: 0, + hrmp_watermark: 10, + }, + }; + let candidate_hash = candidate.hash(); + let parent_head_hash = persisted_validation_data.parent_head.hash(); + + storage.add_candidate(candidate, persisted_validation_data).unwrap(); + storage.retain(|_| true); + assert!(storage.contains(&candidate_hash)); + assert_eq!(storage.iter_para_children(&parent_head_hash).count(), 1); + + storage.retain(|_| false); + assert!(!storage.contains(&candidate_hash)); + assert_eq!(storage.iter_para_children(&parent_head_hash).count(), 0); + } // TODO [now]: recursive populate From f338eecf837a2a78956f263e38a2669bf82e91fa Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Mon, 18 Apr 2022 21:22:57 -0500 Subject: [PATCH 60/87] refactor test code --- .../src/fragment_tree.rs | 201 +++++++++--------- 1 file changed, 96 insertions(+), 105 deletions(-) diff --git a/node/core/prospective-parachains/src/fragment_tree.rs b/node/core/prospective-parachains/src/fragment_tree.rs index 21ab0a98ebd7..dc02c524cf2f 100644 --- a/node/core/prospective-parachains/src/fragment_tree.rs +++ b/node/core/prospective-parachains/src/fragment_tree.rs @@ -701,9 +701,73 @@ mod tests { use super::*; use assert_matches::assert_matches; use polkadot_node_subsystem_util::inclusion_emulator::staging::InboundHrmpLimitations; - use polkadot_primitives::vstaging::{CandidateCommitments, CandidateDescriptor}; + use polkadot_primitives::vstaging::{ + BlockNumber, CandidateCommitments, CandidateDescriptor, HeadData, + }; use polkadot_primitives_test_helpers as test_helpers; + fn make_constraints( + min_relay_parent_number: BlockNumber, + valid_watermarks: Vec, + required_parent: HeadData, + ) -> Constraints { + Constraints { + min_relay_parent_number, + max_pov_size: 1_000_000, + max_code_size: 1_000_000, + ump_remaining: 10, + ump_remaining_bytes: 1_000, + dmp_remaining_messages: 10, + hrmp_inbound: InboundHrmpLimitations { valid_watermarks }, + hrmp_channels_out: HashMap::new(), + max_hrmp_num_per_candidate: 0, + required_parent, + validation_code_hash: Hash::repeat_byte(42).into(), + upgrade_restriction: None, + future_validation_code: None, + } + } + + fn make_committed_candidate( + para_id: ParaId, + relay_parent: Hash, + relay_parent_number: BlockNumber, + parent_head: HeadData, + para_head: HeadData, + hrmp_watermark: BlockNumber, + ) -> (PersistedValidationData, CommittedCandidateReceipt) { + let persisted_validation_data = PersistedValidationData { + parent_head, + relay_parent_number, + relay_parent_storage_root: Hash::repeat_byte(69), + max_pov_size: 1_000_000, + }; + + let candidate = CommittedCandidateReceipt { + descriptor: CandidateDescriptor { + para_id, + relay_parent, + collator: test_helpers::dummy_collator(), + persisted_validation_data_hash: persisted_validation_data.hash(), + pov_hash: Hash::repeat_byte(1), + erasure_root: Hash::repeat_byte(1), + signature: test_helpers::dummy_collator_signature(), + para_head: para_head.hash(), + validation_code_hash: Hash::repeat_byte(42).into(), + }, + commitments: CandidateCommitments { + upward_messages: Vec::new(), + horizontal_messages: Vec::new(), + new_validation_code: None, + head_data: para_head, + processed_downward_messages: 0, + hrmp_watermark, + }, + }; + + (persisted_validation_data, candidate) + } + #[test] fn scope_rejects_ancestors_that_skip_blocks() { let para_id = ParaId::from(5u32); @@ -720,21 +784,7 @@ mod tests { }]; let max_depth = 2; - let base_constraints = Constraints { - min_relay_parent_number: 8, - max_pov_size: 1_000_000, - max_code_size: 1_000_000, - ump_remaining: 10, - ump_remaining_bytes: 1_000, - dmp_remaining_messages: 10, - hrmp_inbound: InboundHrmpLimitations { valid_watermarks: vec![8, 9] }, - hrmp_channels_out: HashMap::new(), - max_hrmp_num_per_candidate: 0, - required_parent: HeadData(vec![1, 2, 3]), - validation_code_hash: Hash::repeat_byte(69).into(), - upgrade_restriction: None, - future_validation_code: None, - }; + let base_constraints = make_constraints(8, vec![8, 9], vec![1, 2, 3].into()); assert_matches!( Scope::with_ancestors(para_id, relay_parent, base_constraints, max_depth, ancestors,), @@ -758,21 +808,7 @@ mod tests { }]; let max_depth = 2; - let base_constraints = Constraints { - min_relay_parent_number: 0, - max_pov_size: 1_000_000, - max_code_size: 1_000_000, - ump_remaining: 10, - ump_remaining_bytes: 1_000, - dmp_remaining_messages: 10, - hrmp_inbound: InboundHrmpLimitations { valid_watermarks: vec![8, 9] }, - hrmp_channels_out: HashMap::new(), - max_hrmp_num_per_candidate: 0, - required_parent: HeadData(vec![1, 2, 3]), - validation_code_hash: Hash::repeat_byte(69).into(), - upgrade_restriction: None, - future_validation_code: None, - }; + let base_constraints = make_constraints(0, vec![], vec![1, 2, 3].into()); assert_matches!( Scope::with_ancestors(para_id, relay_parent, base_constraints, max_depth, ancestors,), @@ -808,21 +844,7 @@ mod tests { ]; let max_depth = 2; - let base_constraints = Constraints { - min_relay_parent_number: 3, - max_pov_size: 1_000_000, - max_code_size: 1_000_000, - ump_remaining: 10, - ump_remaining_bytes: 1_000, - dmp_remaining_messages: 10, - hrmp_inbound: InboundHrmpLimitations { valid_watermarks: vec![8, 9] }, - hrmp_channels_out: HashMap::new(), - max_hrmp_num_per_candidate: 0, - required_parent: HeadData(vec![1, 2, 3]), - validation_code_hash: Hash::repeat_byte(69).into(), - upgrade_restriction: None, - future_validation_code: None, - }; + let base_constraints = make_constraints(3, vec![2], vec![1, 2, 3].into()); let scope = Scope::with_ancestors(para_id, relay_parent, base_constraints, max_depth, ancestors) @@ -835,38 +857,20 @@ mod tests { #[test] fn storage_add_candidate() { let mut storage = CandidateStorage::new(); - let persisted_validation_data = PersistedValidationData { - parent_head: vec![4, 5, 6].into(), - relay_parent_number: 8, - relay_parent_storage_root: Hash::repeat_byte(69), - max_pov_size: 1_000_000, - }; - let candidate = CommittedCandidateReceipt { - descriptor: CandidateDescriptor { - para_id: ParaId::from(5u32), - relay_parent: Hash::repeat_byte(69), - collator: test_helpers::dummy_collator(), - persisted_validation_data_hash: persisted_validation_data.hash(), - pov_hash: Hash::repeat_byte(1), - erasure_root: Hash::repeat_byte(1), - signature: test_helpers::dummy_collator_signature(), - para_head: Hash::repeat_byte(1), - validation_code_hash: Hash::repeat_byte(1).into(), - }, - commitments: CandidateCommitments { - upward_messages: Vec::new(), - horizontal_messages: Vec::new(), - new_validation_code: None, - head_data: vec![1, 2, 3].into(), - processed_downward_messages: 0, - hrmp_watermark: 10, - }, - }; + let (pvd, candidate) = make_committed_candidate( + ParaId::from(5u32), + Hash::repeat_byte(69), + 8, + vec![4, 5, 6].into(), + vec![1, 2, 3].into(), + 7, + ); + let candidate_hash = candidate.hash(); - let parent_head_hash = persisted_validation_data.parent_head.hash(); + let parent_head_hash = pvd.parent_head.hash(); - storage.add_candidate(candidate, persisted_validation_data).unwrap(); + storage.add_candidate(candidate, pvd).unwrap(); assert!(storage.contains(&candidate_hash)); assert_eq!(storage.iter_para_children(&parent_head_hash).count(), 1); } @@ -874,38 +878,20 @@ mod tests { #[test] fn storage_retain() { let mut storage = CandidateStorage::new(); - let persisted_validation_data = PersistedValidationData { - parent_head: vec![4, 5, 6].into(), - relay_parent_number: 8, - relay_parent_storage_root: Hash::repeat_byte(69), - max_pov_size: 1_000_000, - }; - let candidate = CommittedCandidateReceipt { - descriptor: CandidateDescriptor { - para_id: ParaId::from(5u32), - relay_parent: Hash::repeat_byte(69), - collator: test_helpers::dummy_collator(), - persisted_validation_data_hash: persisted_validation_data.hash(), - pov_hash: Hash::repeat_byte(1), - erasure_root: Hash::repeat_byte(1), - signature: test_helpers::dummy_collator_signature(), - para_head: Hash::repeat_byte(1), - validation_code_hash: Hash::repeat_byte(1).into(), - }, - commitments: CandidateCommitments { - upward_messages: Vec::new(), - horizontal_messages: Vec::new(), - new_validation_code: None, - head_data: vec![1, 2, 3].into(), - processed_downward_messages: 0, - hrmp_watermark: 10, - }, - }; + let (pvd, candidate) = make_committed_candidate( + ParaId::from(5u32), + Hash::repeat_byte(69), + 8, + vec![4, 5, 6].into(), + vec![1, 2, 3].into(), + 7, + ); + let candidate_hash = candidate.hash(); - let parent_head_hash = persisted_validation_data.parent_head.hash(); + let parent_head_hash = pvd.parent_head.hash(); - storage.add_candidate(candidate, persisted_validation_data).unwrap(); + storage.add_candidate(candidate, pvd).unwrap(); storage.retain(|_| true); assert!(storage.contains(&candidate_hash)); assert_eq!(storage.iter_para_children(&parent_head_hash).count(), 1); @@ -915,7 +901,12 @@ mod tests { assert_eq!(storage.iter_para_children(&parent_head_hash).count(), 0); } - // TODO [now]: recursive populate + #[test] + fn populate_works_recursively() { + let mut storage = CandidateStorage::new(); + + // TODO [now] + } // TODO [now]: enforce root-child nodes contiguous From 1da91503e251e959e074e226e73b0c98ffe013fd Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Mon, 18 Apr 2022 21:46:57 -0500 Subject: [PATCH 61/87] test populate is recursive --- .../src/fragment_tree.rs | 75 ++++++++++++++++++- 1 file changed, 71 insertions(+), 4 deletions(-) diff --git a/node/core/prospective-parachains/src/fragment_tree.rs b/node/core/prospective-parachains/src/fragment_tree.rs index dc02c524cf2f..622b92ed0cad 100644 --- a/node/core/prospective-parachains/src/fragment_tree.rs +++ b/node/core/prospective-parachains/src/fragment_tree.rs @@ -613,14 +613,24 @@ impl FragmentTree { let fragment = { let f = Fragment::new( - relay_parent, + relay_parent.clone(), child_constraints.clone(), candidate.candidate.clone(), ); match f { Ok(f) => f, - Err(_) => continue, + Err(e) => { + gum::debug!( + target: LOG_TARGET, + err = ?e, + ?relay_parent, + candidate_hash = ?candidate.candidate_hash, + "Failed to instantiate fragment", + ); + + continue + }, } }; @@ -905,10 +915,67 @@ mod tests { fn populate_works_recursively() { let mut storage = CandidateStorage::new(); - // TODO [now] + let para_id = ParaId::from(5u32); + let relay_parent_a = Hash::repeat_byte(1); + let relay_parent_b = Hash::repeat_byte(2); + + let (pvd_a, candidate_a) = make_committed_candidate( + para_id, + relay_parent_a, + 0, + vec![0x0a].into(), + vec![0x0b].into(), + 0, + ); + let candidate_a_hash = candidate_a.hash(); + + let (pvd_b, candidate_b) = make_committed_candidate( + para_id, + relay_parent_b, + 1, + vec![0x0b].into(), + vec![0x0c].into(), + 1, + ); + let candidate_b_hash = candidate_b.hash(); + + let base_constraints = make_constraints(0, vec![0], vec![0x0a].into()); + + let ancestors = vec![RelayChainBlockInfo { + number: pvd_a.relay_parent_number, + hash: relay_parent_a, + storage_root: pvd_a.relay_parent_storage_root, + }]; + + let relay_parent_b_info = RelayChainBlockInfo { + number: pvd_b.relay_parent_number, + hash: relay_parent_b, + storage_root: pvd_b.relay_parent_storage_root, + }; + + storage.add_candidate(candidate_a, pvd_a).unwrap(); + storage.add_candidate(candidate_b, pvd_b).unwrap(); + let scope = + Scope::with_ancestors(para_id, relay_parent_b_info, base_constraints, 4, ancestors) + .unwrap(); + let tree = FragmentTree::populate(scope, &storage); + + let candidates: Vec<_> = tree.candidates().collect(); + assert_eq!(candidates.len(), 2); + assert!(candidates.contains(&candidate_a_hash)); + assert!(candidates.contains(&candidate_b_hash)); + + assert_eq!(tree.nodes.len(), 2); + assert_eq!(tree.nodes[0].parent, NodePointer::Root); + assert_eq!(tree.nodes[0].candidate_hash, candidate_a_hash); + assert_eq!(tree.nodes[0].depth, 0); + + assert_eq!(tree.nodes[1].parent, NodePointer::Storage(0)); + assert_eq!(tree.nodes[1].candidate_hash, candidate_b_hash); + assert_eq!(tree.nodes[1].depth, 1); } - // TODO [now]: enforce root-child nodes contiguous + // TODO [now]: enforce root-child (depth 0) nodes contiguous // TODO [now]: add candidate child of root From ff8136aa76501b3978db335e9c371320d132df8f Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Mon, 18 Apr 2022 22:02:11 -0500 Subject: [PATCH 62/87] test contiguity of depth 0 is maintained --- .../src/fragment_tree.rs | 67 ++++++++++++++++++- 1 file changed, 66 insertions(+), 1 deletion(-) diff --git a/node/core/prospective-parachains/src/fragment_tree.rs b/node/core/prospective-parachains/src/fragment_tree.rs index 622b92ed0cad..f412a721becb 100644 --- a/node/core/prospective-parachains/src/fragment_tree.rs +++ b/node/core/prospective-parachains/src/fragment_tree.rs @@ -975,7 +975,72 @@ mod tests { assert_eq!(tree.nodes[1].depth, 1); } - // TODO [now]: enforce root-child (depth 0) nodes contiguous + #[test] + fn children_of_root_are_contiguous() { + let mut storage = CandidateStorage::new(); + + let para_id = ParaId::from(5u32); + let relay_parent_a = Hash::repeat_byte(1); + let relay_parent_b = Hash::repeat_byte(2); + + let (pvd_a, candidate_a) = make_committed_candidate( + para_id, + relay_parent_a, + 0, + vec![0x0a].into(), + vec![0x0b].into(), + 0, + ); + + let (pvd_b, candidate_b) = make_committed_candidate( + para_id, + relay_parent_b, + 1, + vec![0x0b].into(), + vec![0x0c].into(), + 1, + ); + + let (pvd_a2, candidate_a2) = make_committed_candidate( + para_id, + relay_parent_a, + 0, + vec![0x0a].into(), + vec![0x0b, 1].into(), + 0, + ); + let candidate_a2_hash = candidate_a2.hash(); + + let base_constraints = make_constraints(0, vec![0], vec![0x0a].into()); + + let ancestors = vec![RelayChainBlockInfo { + number: pvd_a.relay_parent_number, + hash: relay_parent_a, + storage_root: pvd_a.relay_parent_storage_root, + }]; + + let relay_parent_b_info = RelayChainBlockInfo { + number: pvd_b.relay_parent_number, + hash: relay_parent_b, + storage_root: pvd_b.relay_parent_storage_root, + }; + + storage.add_candidate(candidate_a, pvd_a).unwrap(); + storage.add_candidate(candidate_b, pvd_b).unwrap(); + let scope = + Scope::with_ancestors(para_id, relay_parent_b_info, base_constraints, 4, ancestors) + .unwrap(); + let mut tree = FragmentTree::populate(scope, &storage); + + storage.add_candidate(candidate_a2, pvd_a2).unwrap(); + tree.add_and_populate(candidate_a2_hash, &storage); + let candidates: Vec<_> = tree.candidates().collect(); + assert_eq!(candidates.len(), 3); + + assert_eq!(tree.nodes[0].parent, NodePointer::Root); + assert_eq!(tree.nodes[1].parent, NodePointer::Root); + assert_eq!(tree.nodes[2].parent, NodePointer::Storage(0)); + } // TODO [now]: add candidate child of root From a45a567b9e5a658611cb79dfcdd5e911a0df754e Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Mon, 18 Apr 2022 22:05:36 -0500 Subject: [PATCH 63/87] add_and_populate tests --- .../src/fragment_tree.rs | 98 ++++++++++++++++++- 1 file changed, 96 insertions(+), 2 deletions(-) diff --git a/node/core/prospective-parachains/src/fragment_tree.rs b/node/core/prospective-parachains/src/fragment_tree.rs index f412a721becb..949ea8cadc23 100644 --- a/node/core/prospective-parachains/src/fragment_tree.rs +++ b/node/core/prospective-parachains/src/fragment_tree.rs @@ -1042,9 +1042,103 @@ mod tests { assert_eq!(tree.nodes[2].parent, NodePointer::Storage(0)); } - // TODO [now]: add candidate child of root + #[test] + fn add_candidate_child_of_root() { + let mut storage = CandidateStorage::new(); + + let para_id = ParaId::from(5u32); + let relay_parent_a = Hash::repeat_byte(1); + + let (pvd_a, candidate_a) = make_committed_candidate( + para_id, + relay_parent_a, + 0, + vec![0x0a].into(), + vec![0x0b].into(), + 0, + ); + + let (pvd_b, candidate_b) = make_committed_candidate( + para_id, + relay_parent_a, + 0, + vec![0x0a].into(), + vec![0x0c].into(), + 0, + ); + let candidate_b_hash = candidate_b.hash(); + + let base_constraints = make_constraints(0, vec![0], vec![0x0a].into()); + + let relay_parent_a_info = RelayChainBlockInfo { + number: pvd_a.relay_parent_number, + hash: relay_parent_a, + storage_root: pvd_a.relay_parent_storage_root, + }; + + storage.add_candidate(candidate_a, pvd_a).unwrap(); + let scope = + Scope::with_ancestors(para_id, relay_parent_a_info, base_constraints, 4, vec![]) + .unwrap(); + let mut tree = FragmentTree::populate(scope, &storage); + + storage.add_candidate(candidate_b, pvd_b).unwrap(); + tree.add_and_populate(candidate_b_hash, &storage); + let candidates: Vec<_> = tree.candidates().collect(); + assert_eq!(candidates.len(), 2); + + assert_eq!(tree.nodes[0].parent, NodePointer::Root); + assert_eq!(tree.nodes[1].parent, NodePointer::Root); + } - // TODO [now]: add candidate child of non-root + #[test] + fn add_candidate_child_of_non_root() { + let mut storage = CandidateStorage::new(); + + let para_id = ParaId::from(5u32); + let relay_parent_a = Hash::repeat_byte(1); + + let (pvd_a, candidate_a) = make_committed_candidate( + para_id, + relay_parent_a, + 0, + vec![0x0a].into(), + vec![0x0b].into(), + 0, + ); + + let (pvd_b, candidate_b) = make_committed_candidate( + para_id, + relay_parent_a, + 0, + vec![0x0b].into(), + vec![0x0c].into(), + 0, + ); + let candidate_b_hash = candidate_b.hash(); + + let base_constraints = make_constraints(0, vec![0], vec![0x0a].into()); + + let relay_parent_a_info = RelayChainBlockInfo { + number: pvd_a.relay_parent_number, + hash: relay_parent_a, + storage_root: pvd_a.relay_parent_storage_root, + }; + + storage.add_candidate(candidate_a, pvd_a).unwrap(); + let scope = + Scope::with_ancestors(para_id, relay_parent_a_info, base_constraints, 4, vec![]) + .unwrap(); + let mut tree = FragmentTree::populate(scope, &storage); + + storage.add_candidate(candidate_b, pvd_b).unwrap(); + tree.add_and_populate(candidate_b_hash, &storage); + let candidates: Vec<_> = tree.candidates().collect(); + assert_eq!(candidates.len(), 2); + + assert_eq!(tree.nodes[0].parent, NodePointer::Root); + assert_eq!(tree.nodes[1].parent, NodePointer::Storage(0)); + } // TODO [now]: hypothetical_depths for existing candidate. From d9f5d3a4d350c56538752d465c054c6e518d60e3 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Mon, 18 Apr 2022 22:10:34 -0500 Subject: [PATCH 64/87] cycle tests --- .../src/fragment_tree.rs | 104 ++++++++++++++++++ 1 file changed, 104 insertions(+) diff --git a/node/core/prospective-parachains/src/fragment_tree.rs b/node/core/prospective-parachains/src/fragment_tree.rs index 949ea8cadc23..3cfe507e0358 100644 --- a/node/core/prospective-parachains/src/fragment_tree.rs +++ b/node/core/prospective-parachains/src/fragment_tree.rs @@ -1140,6 +1140,110 @@ mod tests { assert_eq!(tree.nodes[1].parent, NodePointer::Storage(0)); } + #[test] + fn graceful_cycle_of_0() { + let mut storage = CandidateStorage::new(); + + let para_id = ParaId::from(5u32); + let relay_parent_a = Hash::repeat_byte(1); + + let (pvd_a, candidate_a) = make_committed_candidate( + para_id, + relay_parent_a, + 0, + vec![0x0a].into(), + vec![0x0a].into(), // input same as output + 0, + ); + let candidate_a_hash = candidate_a.hash(); + let base_constraints = make_constraints(0, vec![0], vec![0x0a].into()); + + let relay_parent_a_info = RelayChainBlockInfo { + number: pvd_a.relay_parent_number, + hash: relay_parent_a, + storage_root: pvd_a.relay_parent_storage_root, + }; + + let max_depth = 4; + storage.add_candidate(candidate_a, pvd_a).unwrap(); + let scope = + Scope::with_ancestors(para_id, relay_parent_a_info, base_constraints, max_depth, vec![]) + .unwrap(); + let tree = FragmentTree::populate(scope, &storage); + + let candidates: Vec<_> = tree.candidates().collect(); + assert_eq!(candidates.len(), 1); + assert_eq!(tree.nodes.len(), max_depth); + + assert_eq!(tree.nodes[0].parent, NodePointer::Root); + assert_eq!(tree.nodes[1].parent, NodePointer::Storage(0)); + assert_eq!(tree.nodes[2].parent, NodePointer::Storage(1)); + assert_eq!(tree.nodes[3].parent, NodePointer::Storage(2)); + + assert_eq!(tree.nodes[0].candidate_hash, candidate_a_hash); + assert_eq!(tree.nodes[1].candidate_hash, candidate_a_hash); + assert_eq!(tree.nodes[2].candidate_hash, candidate_a_hash); + assert_eq!(tree.nodes[3].candidate_hash, candidate_a_hash); + } + + #[test] + fn graceful_cycle_of_1() { + let mut storage = CandidateStorage::new(); + + let para_id = ParaId::from(5u32); + let relay_parent_a = Hash::repeat_byte(1); + + let (pvd_a, candidate_a) = make_committed_candidate( + para_id, + relay_parent_a, + 0, + vec![0x0a].into(), + vec![0x0b].into(), // input same as output + 0, + ); + let candidate_a_hash = candidate_a.hash(); + + let (pvd_b, candidate_b) = make_committed_candidate( + para_id, + relay_parent_a, + 0, + vec![0x0b].into(), + vec![0x0a].into(), // input same as output + 0, + ); + let candidate_b_hash = candidate_b.hash(); + + let base_constraints = make_constraints(0, vec![0], vec![0x0a].into()); + + let relay_parent_a_info = RelayChainBlockInfo { + number: pvd_a.relay_parent_number, + hash: relay_parent_a, + storage_root: pvd_a.relay_parent_storage_root, + }; + + let max_depth = 4; + storage.add_candidate(candidate_a, pvd_a).unwrap(); + storage.add_candidate(candidate_b, pvd_b).unwrap(); + let scope = + Scope::with_ancestors(para_id, relay_parent_a_info, base_constraints, max_depth, vec![]) + .unwrap(); + let tree = FragmentTree::populate(scope, &storage); + + let candidates: Vec<_> = tree.candidates().collect(); + assert_eq!(candidates.len(), 2); + assert_eq!(tree.nodes.len(), max_depth); + + assert_eq!(tree.nodes[0].parent, NodePointer::Root); + assert_eq!(tree.nodes[1].parent, NodePointer::Storage(0)); + assert_eq!(tree.nodes[2].parent, NodePointer::Storage(1)); + assert_eq!(tree.nodes[3].parent, NodePointer::Storage(2)); + + assert_eq!(tree.nodes[0].candidate_hash, candidate_a_hash); + assert_eq!(tree.nodes[1].candidate_hash, candidate_b_hash); + assert_eq!(tree.nodes[2].candidate_hash, candidate_a_hash); + assert_eq!(tree.nodes[3].candidate_hash, candidate_b_hash); + } + // TODO [now]: hypothetical_depths for existing candidate. // TODO [now]: hypothetical_depths for non-existing candidate based on root. From c2306caa8108c638c360b4f684ff0a84dac69f6d Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Mon, 18 Apr 2022 22:46:09 -0500 Subject: [PATCH 65/87] remove PopulateFrom trait --- .../src/fragment_tree.rs | 83 +++++++------------ 1 file changed, 29 insertions(+), 54 deletions(-) diff --git a/node/core/prospective-parachains/src/fragment_tree.rs b/node/core/prospective-parachains/src/fragment_tree.rs index 3cfe507e0358..45cdfdd52297 100644 --- a/node/core/prospective-parachains/src/fragment_tree.rs +++ b/node/core/prospective-parachains/src/fragment_tree.rs @@ -284,27 +284,6 @@ enum NodePointer { Storage(usize), } -/// Abstraction around `&'a CandidateStorage`. -trait PopulateFrom<'a> { - type ParaChildrenIter: Iterator + 'a; - - fn get(&self, candidate_hash: &CandidateHash) -> Option<&'a CandidateEntry>; - - fn iter_para_children(&self, parent_head_hash: &Hash) -> Self::ParaChildrenIter; -} - -impl<'a> PopulateFrom<'a> for &'a CandidateStorage { - type ParaChildrenIter = Box + 'a>; - - fn get(&self, candidate_hash: &CandidateHash) -> Option<&'a CandidateEntry> { - CandidateStorage::get(self, candidate_hash) - } - - fn iter_para_children(&self, parent_head_hash: &Hash) -> Self::ParaChildrenIter { - Box::new(CandidateStorage::iter_para_children(self, parent_head_hash)) - } -} - /// This is a tree of candidates based on some underlying storage of candidates /// and a scope. pub(crate) struct FragmentTree { @@ -404,7 +383,34 @@ impl FragmentTree { /// Add a candidate and recursively populate from storage. pub(crate) fn add_and_populate(&mut self, hash: CandidateHash, storage: &CandidateStorage) { - self.add_and_populate_from(hash, storage); + let candidate_entry = match storage.get(&hash) { + None => return, + Some(e) => e, + }; + + let candidate_parent = &candidate_entry.candidate.persisted_validation_data.parent_head; + + // Select an initial set of bases, whose required relay-parent matches that of the candidate. + let root_base = if &self.scope.base_constraints.required_parent == candidate_parent { + Some(NodePointer::Root) + } else { + None + }; + + let non_root_bases = self + .nodes + .iter() + .enumerate() + .filter(|(_, n)| { + n.cumulative_modifications.required_parent.as_ref() == Some(candidate_parent) + }) + .map(|(i, _)| NodePointer::Storage(i)); + + let bases = root_base.into_iter().chain(non_root_bases).collect(); + + // Pass this into the population function, which will sanity-check stuff like depth, fragments, + // etc. and then recursively populate. + self.populate_from_bases(storage, bases); } /// Returns the hypothetical depths where a candidate with the given hash and parent head data @@ -456,37 +462,6 @@ impl FragmentTree { depths.iter_ones().collect() } - fn add_and_populate_from<'a>(&mut self, hash: CandidateHash, storage: impl PopulateFrom<'a>) { - let candidate_entry = match storage.get(&hash) { - None => return, - Some(e) => e, - }; - - let candidate_parent = &candidate_entry.candidate.persisted_validation_data.parent_head; - - // Select an initial set of bases, whose required relay-parent matches that of the candidate. - let root_base = if &self.scope.base_constraints.required_parent == candidate_parent { - Some(NodePointer::Root) - } else { - None - }; - - let non_root_bases = self - .nodes - .iter() - .enumerate() - .filter(|(_, n)| { - n.cumulative_modifications.required_parent.as_ref() == Some(candidate_parent) - }) - .map(|(i, _)| NodePointer::Storage(i)); - - let bases = root_base.into_iter().chain(non_root_bases).collect(); - - // Pass this into the population function, which will sanity-check stuff like depth, fragments, - // etc. and then recursively populate. - self.populate_from_bases(storage, bases); - } - /// Select a candidate after the given `required_path` which pass /// the predicate. /// @@ -532,7 +507,7 @@ impl FragmentTree { fn populate_from_bases<'a>( &mut self, - storage: impl PopulateFrom<'a>, + storage: &'a CandidateStorage, initial_bases: Vec, ) { // Populate the tree breadth-first. From 8bfdca8a04c8faf9d9cc765d5bde0e59996cc320 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Mon, 18 Apr 2022 22:46:16 -0500 Subject: [PATCH 66/87] fmt --- .../src/fragment_tree.rs | 22 ++++++++++++++----- 1 file changed, 16 insertions(+), 6 deletions(-) diff --git a/node/core/prospective-parachains/src/fragment_tree.rs b/node/core/prospective-parachains/src/fragment_tree.rs index 45cdfdd52297..901ebed9ae48 100644 --- a/node/core/prospective-parachains/src/fragment_tree.rs +++ b/node/core/prospective-parachains/src/fragment_tree.rs @@ -1141,9 +1141,14 @@ mod tests { let max_depth = 4; storage.add_candidate(candidate_a, pvd_a).unwrap(); - let scope = - Scope::with_ancestors(para_id, relay_parent_a_info, base_constraints, max_depth, vec![]) - .unwrap(); + let scope = Scope::with_ancestors( + para_id, + relay_parent_a_info, + base_constraints, + max_depth, + vec![], + ) + .unwrap(); let tree = FragmentTree::populate(scope, &storage); let candidates: Vec<_> = tree.candidates().collect(); @@ -1199,9 +1204,14 @@ mod tests { let max_depth = 4; storage.add_candidate(candidate_a, pvd_a).unwrap(); storage.add_candidate(candidate_b, pvd_b).unwrap(); - let scope = - Scope::with_ancestors(para_id, relay_parent_a_info, base_constraints, max_depth, vec![]) - .unwrap(); + let scope = Scope::with_ancestors( + para_id, + relay_parent_a_info, + base_constraints, + max_depth, + vec![], + ) + .unwrap(); let tree = FragmentTree::populate(scope, &storage); let candidates: Vec<_> = tree.candidates().collect(); From f237bb64c94715e8bc0bc1b4c3ca2befb7af1c57 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Mon, 18 Apr 2022 23:08:05 -0500 Subject: [PATCH 67/87] test hypothetical depths (non-recursive) --- .../src/fragment_tree.rs | 109 ++++++++++++++++-- 1 file changed, 99 insertions(+), 10 deletions(-) diff --git a/node/core/prospective-parachains/src/fragment_tree.rs b/node/core/prospective-parachains/src/fragment_tree.rs index 901ebed9ae48..613b06d2f9c9 100644 --- a/node/core/prospective-parachains/src/fragment_tree.rs +++ b/node/core/prospective-parachains/src/fragment_tree.rs @@ -328,7 +328,7 @@ impl FragmentTree { self.candidates .entry(candidate_hash) - .or_insert_with(|| bitvec![u16, Msb0; 0; max_depth]) + .or_insert_with(|| bitvec![u16, Msb0; 0; max_depth + 1]) .set(node.depth, true); match parent_pointer { @@ -414,7 +414,7 @@ impl FragmentTree { } /// Returns the hypothetical depths where a candidate with the given hash and parent head data - /// would be added to the tree. + /// would be added to the tree, without applying other candidates recursively on top of it. /// /// If the candidate is already known, this returns the actual depths where this /// candidate is part of the tree. @@ -431,14 +431,16 @@ impl FragmentTree { // if out of scope. let candidate_relay_parent_number = - if let Some(info) = self.scope.ancestors_by_hash.get(&candidate_relay_parent) { + if self.scope.relay_parent.hash == candidate_relay_parent { + self.scope.relay_parent.number + } else if let Some(info) = self.scope.ancestors_by_hash.get(&candidate_relay_parent) { info.number } else { return Vec::new() }; let max_depth = self.scope.max_depth; - let mut depths = bitvec![u16, Msb0; 0; max_depth]; + let mut depths = bitvec![u16, Msb0; 0; max_depth + 1]; // iterate over all nodes < max_depth where parent head-data matches, // relay-parent number is <= candidate, and depth < max_depth. @@ -544,7 +546,7 @@ impl FragmentTree { }, }; - if child_depth >= self.scope.max_depth { + if child_depth > self.scope.max_depth { continue } @@ -1153,17 +1155,19 @@ mod tests { let candidates: Vec<_> = tree.candidates().collect(); assert_eq!(candidates.len(), 1); - assert_eq!(tree.nodes.len(), max_depth); + assert_eq!(tree.nodes.len(), max_depth + 1); assert_eq!(tree.nodes[0].parent, NodePointer::Root); assert_eq!(tree.nodes[1].parent, NodePointer::Storage(0)); assert_eq!(tree.nodes[2].parent, NodePointer::Storage(1)); assert_eq!(tree.nodes[3].parent, NodePointer::Storage(2)); + assert_eq!(tree.nodes[4].parent, NodePointer::Storage(3)); assert_eq!(tree.nodes[0].candidate_hash, candidate_a_hash); assert_eq!(tree.nodes[1].candidate_hash, candidate_a_hash); assert_eq!(tree.nodes[2].candidate_hash, candidate_a_hash); assert_eq!(tree.nodes[3].candidate_hash, candidate_a_hash); + assert_eq!(tree.nodes[4].candidate_hash, candidate_a_hash); } #[test] @@ -1216,22 +1220,107 @@ mod tests { let candidates: Vec<_> = tree.candidates().collect(); assert_eq!(candidates.len(), 2); - assert_eq!(tree.nodes.len(), max_depth); + assert_eq!(tree.nodes.len(), max_depth + 1); assert_eq!(tree.nodes[0].parent, NodePointer::Root); assert_eq!(tree.nodes[1].parent, NodePointer::Storage(0)); assert_eq!(tree.nodes[2].parent, NodePointer::Storage(1)); assert_eq!(tree.nodes[3].parent, NodePointer::Storage(2)); + assert_eq!(tree.nodes[4].parent, NodePointer::Storage(3)); assert_eq!(tree.nodes[0].candidate_hash, candidate_a_hash); assert_eq!(tree.nodes[1].candidate_hash, candidate_b_hash); assert_eq!(tree.nodes[2].candidate_hash, candidate_a_hash); assert_eq!(tree.nodes[3].candidate_hash, candidate_b_hash); + assert_eq!(tree.nodes[4].candidate_hash, candidate_a_hash); } - // TODO [now]: hypothetical_depths for existing candidate. + #[test] + fn hypothetical_depths_known_and_unknown() { + let mut storage = CandidateStorage::new(); + + let para_id = ParaId::from(5u32); + let relay_parent_a = Hash::repeat_byte(1); + + let (pvd_a, candidate_a) = make_committed_candidate( + para_id, + relay_parent_a, + 0, + vec![0x0a].into(), + vec![0x0b].into(), // input same as output + 0, + ); + let candidate_a_hash = candidate_a.hash(); + + let (pvd_b, candidate_b) = make_committed_candidate( + para_id, + relay_parent_a, + 0, + vec![0x0b].into(), + vec![0x0a].into(), // input same as output + 0, + ); + let candidate_b_hash = candidate_b.hash(); - // TODO [now]: hypothetical_depths for non-existing candidate based on root. + let base_constraints = make_constraints(0, vec![0], vec![0x0a].into()); - // TODO [now]: hypothetical_depths for non-existing candidate not based on root. + let relay_parent_a_info = RelayChainBlockInfo { + number: pvd_a.relay_parent_number, + hash: relay_parent_a, + storage_root: pvd_a.relay_parent_storage_root, + }; + + let max_depth = 4; + storage.add_candidate(candidate_a, pvd_a).unwrap(); + storage.add_candidate(candidate_b, pvd_b).unwrap(); + let scope = Scope::with_ancestors( + para_id, + relay_parent_a_info, + base_constraints, + max_depth, + vec![], + ) + .unwrap(); + let tree = FragmentTree::populate(scope, &storage); + + let candidates: Vec<_> = tree.candidates().collect(); + assert_eq!(candidates.len(), 2); + assert_eq!(tree.nodes.len(), max_depth + 1); + + assert_eq!( + tree.hypothetical_depths( + candidate_a_hash, + HeadData::from(vec![0x0a]).hash(), + relay_parent_a, + ), + vec![0, 2, 4], + ); + + assert_eq!( + tree.hypothetical_depths( + candidate_b_hash, + HeadData::from(vec![0x0b]).hash(), + relay_parent_a, + ), + vec![1, 3], + ); + + assert_eq!( + tree.hypothetical_depths( + CandidateHash(Hash::repeat_byte(21)), + HeadData::from(vec![0x0a]).hash(), + relay_parent_a, + ), + vec![0, 2, 4], + ); + + assert_eq!( + tree.hypothetical_depths( + CandidateHash(Hash::repeat_byte(22)), + HeadData::from(vec![0x0b]).hash(), + relay_parent_a, + ), + vec![1, 3] + ); + } } From 1dc948b78c09672ab360761f7b52bb0222085f55 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Wed, 20 Apr 2022 12:40:38 -0500 Subject: [PATCH 68/87] have CandidateSeconded return membership --- .../src/fragment_tree.rs | 24 ++++++++---- node/core/prospective-parachains/src/lib.rs | 37 ++++++++++++++++--- 2 files changed, 47 insertions(+), 14 deletions(-) diff --git a/node/core/prospective-parachains/src/fragment_tree.rs b/node/core/prospective-parachains/src/fragment_tree.rs index 613b06d2f9c9..bdae0688add8 100644 --- a/node/core/prospective-parachains/src/fragment_tree.rs +++ b/node/core/prospective-parachains/src/fragment_tree.rs @@ -66,10 +66,15 @@ use polkadot_primitives::vstaging::{ Id as ParaId, PersistedValidationData, }; -/// An error indicating that a supplied candidate didn't match the persisted -/// validation data provided alongside it. +/// Kinds of failures to import a candidate into storage. #[derive(Debug, Clone, PartialEq)] -pub(crate) struct PersistedValidationDataMismatch; +pub enum CandidateStorageInsertionError { + /// An error indicating that a supplied candidate didn't match the persisted + /// validation data provided alongside it. + PersistedValidationDataMismatch, + /// The candidate was already known. + CandidateAlreadyKnown(CandidateHash), +} pub(crate) struct CandidateStorage { // Index from parent head hash to candidate hashes. @@ -91,15 +96,15 @@ impl CandidateStorage { &mut self, candidate: CommittedCandidateReceipt, persisted_validation_data: PersistedValidationData, - ) -> Result { + ) -> Result { let candidate_hash = candidate.hash(); if self.by_candidate_hash.contains_key(&candidate_hash) { - return Ok(candidate_hash) + return Err(CandidateStorageInsertionError::CandidateAlreadyKnown(candidate_hash)) } if persisted_validation_data.hash() != candidate.descriptor.persisted_validation_data_hash { - return Err(PersistedValidationDataMismatch) + return Err(CandidateStorageInsertionError::PersistedValidationDataMismatch) } let parent_head_hash = persisted_validation_data.parent_head.hash(); @@ -381,6 +386,11 @@ impl FragmentTree { self.candidates.keys().cloned() } + /// Whether the candidate exists and at what depths. + pub(crate) fn candidate(&self, candidate: &CandidateHash) -> Option> { + self.candidates.get(candidate).map(|d| d.iter_ones().collect()) + } + /// Add a candidate and recursively populate from storage. pub(crate) fn add_and_populate(&mut self, hash: CandidateHash, storage: &CandidateStorage) { let candidate_entry = match storage.get(&hash) { @@ -635,7 +645,6 @@ impl FragmentTree { } } -#[allow(unused)] // TODO [now] struct FragmentNode { // A pointer to the parent node. parent: NodePointer, @@ -648,7 +657,6 @@ struct FragmentNode { children: Vec<(NodePointer, CandidateHash)>, } -#[allow(unused)] // TODO [now] impl FragmentNode { fn relay_parent(&self) -> Hash { self.fragment.relay_parent().hash diff --git a/node/core/prospective-parachains/src/lib.rs b/node/core/prospective-parachains/src/lib.rs index 3145501b7043..a67bec69fcc4 100644 --- a/node/core/prospective-parachains/src/lib.rs +++ b/node/core/prospective-parachains/src/lib.rs @@ -77,11 +77,23 @@ pub struct HypotheticalDepthRequest { pub fragment_tree_relay_parent: Hash, } +/// Indicates the relay-parents whose fragment tree the candidate +/// is present in and the depths of that tree the candidate is present in. +pub type CandidateMembership = Vec<(Hash, Vec)>; + // TODO [now]: add this enum to the broader subsystem types. /// Messages sent to the Prospective Parachains subsystem. pub enum ProspectiveParachainsMessage { /// Inform the Prospective Parachains Subsystem of a new candidate. - CandidateSeconded(ParaId, CommittedCandidateReceipt, PersistedValidationData), + /// + /// The response sender accepts the candidate membership, which is empty + /// if the candidate was already known. + CandidateSeconded( + ParaId, + CommittedCandidateReceipt, + PersistedValidationData, + oneshot::Sender, + ), /// Inform the Prospective Parachains Subsystem that a previously seconded candidate /// has been backed. This requires that `CandidateSeconded` was sent for the candidate /// some time in the past. @@ -146,8 +158,8 @@ where }, FromOverseer::Signal(OverseerSignal::BlockFinalized(..)) => {}, FromOverseer::Communication { msg } => match msg { - ProspectiveParachainsMessage::CandidateSeconded(para, candidate, pvd) => - handle_candidate_seconded(&mut *ctx, view, para, candidate, pvd).await?, + ProspectiveParachainsMessage::CandidateSeconded(para, candidate, pvd, tx) => + handle_candidate_seconded(&mut *ctx, view, para, candidate, pvd, tx).await?, ProspectiveParachainsMessage::CandidateBacked(para, candidate_hash) => handle_candidate_backed(&mut *ctx, view, para, candidate_hash).await?, ProspectiveParachainsMessage::GetBackableCandidate( @@ -284,6 +296,7 @@ async fn handle_candidate_seconded( para: ParaId, candidate: CommittedCandidateReceipt, pvd: PersistedValidationData, + tx: oneshot::Sender, ) -> JfyiErrorResult<()> where Context: SubsystemContext, @@ -300,6 +313,7 @@ where "Received seconded candidate for inactive para", ); + let _ = tx.send(Vec::new()); return Ok(()) }, Some(storage) => storage, @@ -307,7 +321,13 @@ where let candidate_hash = match storage.add_candidate(candidate, pvd) { Ok(c) => c, - Err(crate::fragment_tree::PersistedValidationDataMismatch) => { + Err(crate::fragment_tree::CandidateStorageInsertionError::CandidateAlreadyKnown(_)) => { + let _ = tx.send(Vec::new()); + return Ok(()) + }, + Err( + crate::fragment_tree::CandidateStorageInsertionError::PersistedValidationDataMismatch, + ) => { // We can't log the candidate hash without either doing more ~expensive // hashing but this branch indicates something is seriously wrong elsewhere // so it's doubtful that it would affect debugging. @@ -318,16 +338,21 @@ where "Received seconded candidate had mismatching validation data", ); + let _ = tx.send(Vec::new()); return Ok(()) }, }; - for (_, leaf_data) in &mut view.active_leaves { + let mut membership = Vec::new(); + for (relay_parent, leaf_data) in &mut view.active_leaves { if let Some(tree) = leaf_data.fragment_trees.get_mut(¶) { tree.add_and_populate(candidate_hash, &*storage); - // TODO [now]: notify other subsystems of changes. + if let Some(depths) = tree.candidate(&candidate_hash) { + membership.push((*relay_parent, depths)); + } } } + let _ = tx.send(membership); Ok(()) } From 9db46eb4117e992dd5209013632f70f2f2ad6c5f Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Wed, 20 Apr 2022 12:47:24 -0500 Subject: [PATCH 69/87] tree membership requests --- node/core/prospective-parachains/src/lib.rs | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/node/core/prospective-parachains/src/lib.rs b/node/core/prospective-parachains/src/lib.rs index a67bec69fcc4..26900ec0d684 100644 --- a/node/core/prospective-parachains/src/lib.rs +++ b/node/core/prospective-parachains/src/lib.rs @@ -111,6 +111,8 @@ pub enum ProspectiveParachainsMessage { /// Returns an empty vector either if there is no such depth or the fragment tree relay-parent /// is unknown. GetHypotheticalDepth(HypotheticalDepthRequest, oneshot::Sender>), + /// Get the membership of the candidate in all fragment trees. + GetTreeMembership(ParaId, CandidateHash, oneshot::Sender), } struct RelayBlockViewData { @@ -170,6 +172,8 @@ where ) => answer_get_backable_candidate(&view, relay_parent, para, required_path, tx), ProspectiveParachainsMessage::GetHypotheticalDepth(request, tx) => answer_hypothetical_depths_request(&view, request, tx), + ProspectiveParachainsMessage::GetTreeMembership(para, candidate, tx) => + answer_tree_membership_request(&view, para, candidate, tx), }, } } @@ -486,6 +490,23 @@ fn answer_hypothetical_depths_request( } } +fn answer_tree_membership_request( + view: &View, + para: ParaId, + candidate: CandidateHash, + tx: oneshot::Sender, +) { + let mut membership = Vec::new(); + for (relay_parent, view_data) in &view.active_leaves { + if let Some(tree) = view_data.fragment_trees.get(¶) { + if let Some(depths) = tree.candidate(&candidate) { + membership.push((*relay_parent, depths)); + } + } + } + let _ = tx.send(membership); +} + #[allow(unused)] // TODO [now] async fn fetch_constraints( ctx: &mut Context, From 7568d0626032d7f1e9af6ca3f7aa5cdb81873b89 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Wed, 20 Apr 2022 12:54:04 -0500 Subject: [PATCH 70/87] Add a ProspectiveParachainsSubsystem struct --- node/core/prospective-parachains/src/lib.rs | 25 ++++++++++++++++----- 1 file changed, 20 insertions(+), 5 deletions(-) diff --git a/node/core/prospective-parachains/src/lib.rs b/node/core/prospective-parachains/src/lib.rs index 26900ec0d684..3433eea8a2d6 100644 --- a/node/core/prospective-parachains/src/lib.rs +++ b/node/core/prospective-parachains/src/lib.rs @@ -33,7 +33,7 @@ use futures::{channel::oneshot, prelude::*}; use polkadot_node_subsystem::{ messages::ChainApiMessage, overseer, ActiveLeavesUpdate, FromOverseer, OverseerSignal, - SubsystemContext, + SpawnedSubsystem, SubsystemContext, SubsystemError, }; use polkadot_node_subsystem_util::inclusion_emulator::staging::{Constraints, RelayChainBlockInfo}; use polkadot_primitives::vstaging::{ @@ -132,6 +132,25 @@ impl View { } } +/// The prospective parachains subsystem. +#[derive(Default)] +pub struct ProspectiveParachainsSubsystem; + +impl overseer::Subsystem for ProspectiveParachainsSubsystem +where + Context: SubsystemContext, + Context: overseer::SubsystemContext, +{ + fn start(self, ctx: Context) -> SpawnedSubsystem { + SpawnedSubsystem { + future: run(ctx) + .map_err(|e| SubsystemError::with_origin("prospective-parachains", e)) + .boxed(), + name: "prospective-parachains-subsystem", + } + } +} + // TODO [now]: this is temporarily `pub` to make the unused lint behave reasonably. pub async fn run(mut ctx: Context) -> FatalResult<()> where @@ -256,8 +275,6 @@ where fragment_trees.insert(para, tree); } - // TODO [now]: notify subsystems of new trees. - view.active_leaves.insert(hash, RelayBlockViewData { fragment_trees }); } @@ -507,7 +524,6 @@ fn answer_tree_membership_request( let _ = tx.send(membership); } -#[allow(unused)] // TODO [now] async fn fetch_constraints( ctx: &mut Context, relay_parent: &Hash, @@ -517,7 +533,6 @@ async fn fetch_constraints( unimplemented!() } -#[allow(unused)] // TODO [now] async fn fetch_upcoming_paras( ctx: &mut Context, relay_parent: &Hash, From d1d240c89f76f1f4be437e320a80b965edf1ffff Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Wed, 20 Apr 2022 16:06:29 -0500 Subject: [PATCH 71/87] add a staging API for base constraints --- node/core/prospective-parachains/src/lib.rs | 4 +- node/core/runtime-api/src/tests.rs | 5 ++ primitives/src/runtime_api.rs | 6 ++- primitives/src/vstaging/mod.rs | 58 +++++++++++++++++++++ runtime/kusama/src/lib.rs | 4 ++ runtime/polkadot/src/lib.rs | 4 ++ runtime/rococo/src/lib.rs | 4 ++ runtime/test-runtime/src/lib.rs | 4 ++ runtime/westend/src/lib.rs | 4 ++ 9 files changed, 90 insertions(+), 3 deletions(-) diff --git a/node/core/prospective-parachains/src/lib.rs b/node/core/prospective-parachains/src/lib.rs index 3433eea8a2d6..1da4a821fa78 100644 --- a/node/core/prospective-parachains/src/lib.rs +++ b/node/core/prospective-parachains/src/lib.rs @@ -244,7 +244,7 @@ where let candidate_storage = view.candidate_storage.entry(para).or_insert_with(CandidateStorage::new); - let constraints = fetch_constraints(&mut *ctx, &hash, para).await?; + let constraints = fetch_base_constraints(&mut *ctx, &hash, para).await?; let constraints = match constraints { Some(c) => c, @@ -524,7 +524,7 @@ fn answer_tree_membership_request( let _ = tx.send(membership); } -async fn fetch_constraints( +async fn fetch_base_constraints( ctx: &mut Context, relay_parent: &Hash, para_id: ParaId, diff --git a/node/core/runtime-api/src/tests.rs b/node/core/runtime-api/src/tests.rs index 0d732ba20c35..2fe6369dc4b2 100644 --- a/node/core/runtime-api/src/tests.rs +++ b/node/core/runtime-api/src/tests.rs @@ -20,6 +20,7 @@ use ::test_helpers::{dummy_committed_candidate_receipt, dummy_validation_code}; use futures::channel::oneshot; use polkadot_node_primitives::{BabeAllowedSlots, BabeEpoch, BabeEpochConfiguration}; use polkadot_node_subsystem_test_helpers::make_subsystem_context; +use polkadot_primitives::vstaging; use polkadot_primitives::v2::{ AuthorityDiscoveryId, BlockNumber, CandidateEvent, CandidateHash, CommittedCandidateReceipt, CoreState, DisputeState, GroupRotationInfo, Id as ParaId, InboundDownwardMessage, @@ -194,6 +195,10 @@ sp_api::mock_impl_runtime_apis! { fn staging_get_disputes() -> Vec<(SessionIndex, CandidateHash, DisputeState)> { unimplemented!() } + + fn staging_validity_constraints(para_id: ParaId) -> Option { + unimplemented!("Staging API not implemented"); + } } impl BabeApi for MockRuntimeApi { diff --git a/primitives/src/runtime_api.rs b/primitives/src/runtime_api.rs index 84d2cf0ec4ca..fe695336eb82 100644 --- a/primitives/src/runtime_api.rs +++ b/primitives/src/runtime_api.rs @@ -44,7 +44,7 @@ //! For more details about how the API versioning works refer to `spi_api` //! documentation [here](https://docs.substrate.io/rustdocs/latest/sp_api/macro.decl_runtime_apis.html). -use crate::v2; +use crate::{v2, vstaging}; use parity_scale_codec::{Decode, Encode}; use polkadot_core_primitives as pcp; use polkadot_parachain::primitives as ppp; @@ -155,5 +155,9 @@ sp_api::decl_runtime_apis! { /// Returns all onchain disputes. /// This is a staging method! Do not use on production runtimes! fn staging_get_disputes() -> Vec<(v2::SessionIndex, v2::CandidateHash, v2::DisputeState)>; + + /// Returns the base constraints of the given para, if they exist. + /// This is a staging method! Do not use on production runtimes! + fn staging_validity_constraints(_: ppp::Id) -> Option; } } diff --git a/primitives/src/vstaging/mod.rs b/primitives/src/vstaging/mod.rs index c6dd4d1bb76a..509beac8b4bf 100644 --- a/primitives/src/vstaging/mod.rs +++ b/primitives/src/vstaging/mod.rs @@ -18,3 +18,61 @@ // Put any primitives used by staging API functions here pub use crate::v2::*; +use sp_std::prelude::*; + +use parity_scale_codec::{Decode, Encode}; +use primitives::RuntimeDebug; +use scale_info::TypeInfo; + +/// Useful type alias for Para IDs. +pub type ParaId = Id; + +/// Constraints on inbound HRMP channels. +#[derive(RuntimeDebug, Clone, PartialEq, Encode, Decode, TypeInfo)] +pub struct InboundHrmpLimitations { + /// An exhaustive set of all valid watermarks, sorted ascending + pub valid_watermarks: Vec, +} + +/// Constraints on outbound HRMP channels. +#[derive(RuntimeDebug, Clone, PartialEq, Encode, Decode, TypeInfo)] +pub struct OutboundHrmpChannelLimitations { + /// The maximum bytes that can be written to the channel. + pub bytes_remaining: u32, + /// The maximum messages that can be written to the channel. + pub messages_remaining: u32, +} + +/// Constraints on the actions that can be taken by a new parachain +/// block. These limitations are implicitly associated with some particular +/// parachain, which should be apparent from usage. +#[derive(RuntimeDebug, Clone, PartialEq, Encode, Decode, TypeInfo)] +pub struct Constraints { + /// The minimum relay-parent number accepted under these constraints. + pub min_relay_parent_number: BlockNumber, + /// The maximum Proof-of-Validity size allowed, in bytes. + pub max_pov_size: u32, + /// The maximum new validation code size allowed, in bytes. + pub max_code_size: u32, + /// The amount of UMP messages remaining. + pub ump_remaining: u32, + /// The amount of UMP bytes remaining. + pub ump_remaining_bytes: u32, + /// The amount of remaining DMP messages. + pub dmp_remaining_messages: u32, + /// The limitations of all registered inbound HRMP channels. + pub hrmp_inbound: InboundHrmpLimitations, + /// The limitations of all registered outbound HRMP channels. + pub hrmp_channels_out: Vec<(ParaId, OutboundHrmpChannelLimitations)>, + /// The maximum number of HRMP messages allowed per candidate. + pub max_hrmp_num_per_candidate: u32, + /// The required parent head-data of the parachain. + pub required_parent: HeadData, + /// The expected validation-code-hash of this parachain. + pub validation_code_hash: ValidationCodeHash, + /// The code upgrade restriction signal as-of this parachain. + pub upgrade_restriction: Option, + /// The future validation code hash, if any, and at what relay-parent + /// number the upgrade would be minimally applied. + pub future_validation_code: Option<(BlockNumber, ValidationCodeHash)>, +} diff --git a/runtime/kusama/src/lib.rs b/runtime/kusama/src/lib.rs index 0ddf10b72f12..18b057edbc1d 100644 --- a/runtime/kusama/src/lib.rs +++ b/runtime/kusama/src/lib.rs @@ -1774,6 +1774,10 @@ sp_api::impl_runtime_apis! { fn staging_get_disputes() -> Vec<(SessionIndex, CandidateHash, DisputeState)> { unimplemented!() } + + fn staging_validity_constraints(_: ParaId) -> Option { + unimplemented!("Staging API not implemented"); + } } impl beefy_primitives::BeefyApi for Runtime { diff --git a/runtime/polkadot/src/lib.rs b/runtime/polkadot/src/lib.rs index 5c0735f7421c..262a1d529c4d 100644 --- a/runtime/polkadot/src/lib.rs +++ b/runtime/polkadot/src/lib.rs @@ -1723,6 +1723,10 @@ sp_api::impl_runtime_apis! { fn staging_get_disputes() -> Vec<(SessionIndex, CandidateHash, DisputeState)> { unimplemented!() } + + fn staging_validity_constraints(_: ParaId) -> Option { + unimplemented!("Staging API not implemented"); + } } impl beefy_primitives::BeefyApi for Runtime { diff --git a/runtime/rococo/src/lib.rs b/runtime/rococo/src/lib.rs index 31a123721533..19b55f599812 100644 --- a/runtime/rococo/src/lib.rs +++ b/runtime/rococo/src/lib.rs @@ -1256,6 +1256,10 @@ sp_api::impl_runtime_apis! { fn staging_get_disputes() -> Vec<(SessionIndex, CandidateHash, DisputeState)> { unimplemented!() } + + fn staging_validity_constraints(_: ParaId) -> Option { + unimplemented!("Staging API not implemented"); + } } impl fg_primitives::GrandpaApi for Runtime { diff --git a/runtime/test-runtime/src/lib.rs b/runtime/test-runtime/src/lib.rs index 03b9eb726d34..fc0d9a2691b1 100644 --- a/runtime/test-runtime/src/lib.rs +++ b/runtime/test-runtime/src/lib.rs @@ -903,6 +903,10 @@ sp_api::impl_runtime_apis! { fn staging_get_disputes() -> Vec<(SessionIndex, CandidateHash, DisputeState)> { polkadot_runtime_parachains::runtime_api_impl::vstaging::get_session_disputes::() } + + fn staging_validity_constraints(_: ParaId) -> Option { + unimplemented!("Staging API not implemented"); + } } impl beefy_primitives::BeefyApi for Runtime { diff --git a/runtime/westend/src/lib.rs b/runtime/westend/src/lib.rs index f9a4aba12dff..0077e8d87889 100644 --- a/runtime/westend/src/lib.rs +++ b/runtime/westend/src/lib.rs @@ -1326,6 +1326,10 @@ sp_api::impl_runtime_apis! { fn staging_get_disputes() -> Vec<(SessionIndex, CandidateHash, DisputeState)> { runtime_parachains::runtime_api_impl::vstaging::get_session_disputes::() } + + fn staging_validity_constraints(_: ParaId) -> Option { + unimplemented!("Staging API not implemented"); + } } impl beefy_primitives::BeefyApi for Runtime { From f7b3f88da66295e760104ebee2a04489a766032b Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Wed, 20 Apr 2022 17:20:14 -0500 Subject: [PATCH 72/87] add a `From` impl --- .../src/inclusion_emulator/staging.rs | 27 ++++++++++++++++++- 1 file changed, 26 insertions(+), 1 deletion(-) diff --git a/node/subsystem-util/src/inclusion_emulator/staging.rs b/node/subsystem-util/src/inclusion_emulator/staging.rs index 5e1ff83e46f3..512d539951f1 100644 --- a/node/subsystem-util/src/inclusion_emulator/staging.rs +++ b/node/subsystem-util/src/inclusion_emulator/staging.rs @@ -115,7 +115,7 @@ use polkadot_primitives::vstaging::{ BlockNumber, CandidateCommitments, CollatorId, CollatorSignature, Hash, HeadData, Id as ParaId, - PersistedValidationData, UpgradeRestriction, ValidationCodeHash, + PersistedValidationData, UpgradeRestriction, ValidationCodeHash, Constraints as PrimitiveConstraints, }; use std::collections::HashMap; @@ -169,6 +169,31 @@ pub struct Constraints { pub future_validation_code: Option<(BlockNumber, ValidationCodeHash)>, } +impl From for Constraints { + fn from(c: PrimitiveConstraints) -> Self { + Constraints { + min_relay_parent_number: c.min_relay_parent_number, + max_pov_size: c.max_pov_size as _, + max_code_size: c.max_code_size as _, + ump_remaining: c.ump_remaining as _, + ump_remaining_bytes: c.ump_remaining_bytes as _, + dmp_remaining_messages: c.dmp_remaining_messages as _, + hrmp_inbound: InboundHrmpLimitations { + valid_watermarks: c.hrmp_inbound.valid_watermarks, + }, + hrmp_channels_out: c.hrmp_channels_out.into_iter().map(|(para_id, limits)| (para_id, OutboundHrmpChannelLimitations { + bytes_remaining: limits.bytes_remaining as _, + messages_remaining: limits.messages_remaining as _, + })).collect(), + max_hrmp_num_per_candidate: c.max_hrmp_num_per_candidate as _, + required_parent: c.required_parent, + validation_code_hash: c.validation_code_hash, + upgrade_restriction: c.upgrade_restriction, + future_validation_code: c.future_validation_code, + } + } +} + /// Kinds of errors that can occur when modifying constraints. #[derive(Debug, Clone, PartialEq)] pub enum ModificationError { From 092f88ea97c6ad402de983db444e36620d3f87e0 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Wed, 20 Apr 2022 17:35:33 -0500 Subject: [PATCH 73/87] add runtime API for staging_validity_constraints --- node/core/runtime-api/src/cache.rs | 44 ++++++++++++++++--- node/core/runtime-api/src/lib.rs | 13 ++++++ node/core/runtime-api/src/tests.rs | 18 ++++---- node/subsystem-types/src/messages.rs | 22 ++++++---- .../src/inclusion_emulator/staging.rs | 22 +++++++--- primitives/src/v2/mod.rs | 1 + primitives/src/vstaging/mod.rs | 6 +++ 7 files changed, 98 insertions(+), 28 deletions(-) diff --git a/node/core/runtime-api/src/cache.rs b/node/core/runtime-api/src/cache.rs index 3dab90e4c74e..1d087a8609ae 100644 --- a/node/core/runtime-api/src/cache.rs +++ b/node/core/runtime-api/src/cache.rs @@ -20,12 +20,15 @@ use memory_lru::{MemoryLruCache, ResidentSize}; use parity_util_mem::{MallocSizeOf, MallocSizeOfExt}; use sp_consensus_babe::Epoch; -use polkadot_primitives::v2::{ - AuthorityDiscoveryId, BlockNumber, CandidateCommitments, CandidateEvent, - CommittedCandidateReceipt, CoreState, GroupRotationInfo, Hash, Id as ParaId, - InboundDownwardMessage, InboundHrmpMessage, OccupiedCoreAssumption, PersistedValidationData, - PvfCheckStatement, ScrapedOnChainVotes, SessionIndex, SessionInfo, ValidationCode, - ValidationCodeHash, ValidatorId, ValidatorIndex, ValidatorSignature, +use polkadot_primitives::{ + v2::{ + AuthorityDiscoveryId, BlockNumber, CandidateCommitments, CandidateEvent, + CommittedCandidateReceipt, CoreState, GroupRotationInfo, Hash, Id as ParaId, + InboundDownwardMessage, InboundHrmpMessage, OccupiedCoreAssumption, + PersistedValidationData, PvfCheckStatement, ScrapedOnChainVotes, SessionIndex, SessionInfo, + ValidationCode, ValidationCodeHash, ValidatorId, ValidatorIndex, ValidatorSignature, + }, + vstaging as vstaging_primitives, }; const AUTHORITIES_CACHE_SIZE: usize = 128 * 1024; @@ -48,6 +51,8 @@ const PVFS_REQUIRE_PRECHECK_SIZE: usize = 1024; const VALIDATION_CODE_HASH_CACHE_SIZE: usize = 64 * 1024; const VERSION_CACHE_SIZE: usize = 4 * 1024; +const STAGING_VALIDITY_CONSTRAINTS_CACHE_SIZE: usize = 10 * 1024; + struct ResidentSizeOf(T); impl ResidentSize for ResidentSizeOf { @@ -114,6 +119,10 @@ pub(crate) struct RequestResultCache { (Hash, ParaId, OccupiedCoreAssumption), ResidentSizeOf>, >, + + staging_validity_constraints: + MemoryLruCache<(Hash, ParaId), ResidentSizeOf>>, + version: MemoryLruCache>, } @@ -141,6 +150,11 @@ impl Default for RequestResultCache { on_chain_votes: MemoryLruCache::new(ON_CHAIN_VOTES_CACHE_SIZE), pvfs_require_precheck: MemoryLruCache::new(PVFS_REQUIRE_PRECHECK_SIZE), validation_code_hash: MemoryLruCache::new(VALIDATION_CODE_HASH_CACHE_SIZE), + + staging_validity_constraints: MemoryLruCache::new( + STAGING_VALIDITY_CONSTRAINTS_CACHE_SIZE, + ), + version: MemoryLruCache::new(VERSION_CACHE_SIZE), } } @@ -400,6 +414,21 @@ impl RequestResultCache { self.validation_code_hash.insert(key, ResidentSizeOf(value)); } + pub(crate) fn staging_validity_constraints( + &mut self, + key: (Hash, ParaId), + ) -> Option<&Option> { + self.staging_validity_constraints.get(&key).map(|v| &v.0) + } + + pub(crate) fn cache_staging_validity_constraints( + &mut self, + key: (Hash, ParaId), + value: Option, + ) { + self.staging_validity_constraints.insert(key, ResidentSizeOf(value)); + } + pub(crate) fn version(&mut self, relay_parent: &Hash) -> Option<&u32> { self.version.get(&relay_parent).map(|v| &v.0) } @@ -441,5 +470,8 @@ pub(crate) enum RequestResult { // This is a request with side-effects and no result, hence (). SubmitPvfCheckStatement(Hash, PvfCheckStatement, ValidatorSignature, ()), ValidationCodeHash(Hash, ParaId, OccupiedCoreAssumption, Option), + + StagingValidityConstraints(Hash, ParaId, Option), + Version(Hash, u32), } diff --git a/node/core/runtime-api/src/lib.rs b/node/core/runtime-api/src/lib.rs index 3665cb57ccbe..045a99bd4dc6 100644 --- a/node/core/runtime-api/src/lib.rs +++ b/node/core/runtime-api/src/lib.rs @@ -165,6 +165,11 @@ where ValidationCodeHash(relay_parent, para_id, assumption, hash) => self .requests_cache .cache_validation_code_hash((relay_parent, para_id, assumption), hash), + + StagingValidityConstraints(relay_parent, para_id, constraints) => self + .requests_cache + .cache_staging_validity_constraints((relay_parent, para_id), constraints), + Version(relay_parent, version) => self.requests_cache.cache_version(relay_parent, version), } @@ -268,6 +273,10 @@ where Request::ValidationCodeHash(para, assumption, sender) => query!(validation_code_hash(para, assumption), sender) .map(|sender| Request::ValidationCodeHash(para, assumption, sender)), + + Request::StagingValidityConstraints(para, sender) => + query!(staging_validity_constraints(para), sender) + .map(|sender| Request::StagingValidityConstraints(para, sender)), } } @@ -524,6 +533,10 @@ where }, Request::ValidationCodeHash(para, assumption, sender) => query!(ValidationCodeHash, validation_code_hash(para, assumption), ver = 2, sender), + + Request::StagingValidityConstraints(para, sender) => { + query!(StagingValidityConstraints, staging_validity_constraints(para), ver = 2, sender) + }, } } diff --git a/node/core/runtime-api/src/tests.rs b/node/core/runtime-api/src/tests.rs index 2fe6369dc4b2..2a318d3deff3 100644 --- a/node/core/runtime-api/src/tests.rs +++ b/node/core/runtime-api/src/tests.rs @@ -20,13 +20,15 @@ use ::test_helpers::{dummy_committed_candidate_receipt, dummy_validation_code}; use futures::channel::oneshot; use polkadot_node_primitives::{BabeAllowedSlots, BabeEpoch, BabeEpochConfiguration}; use polkadot_node_subsystem_test_helpers::make_subsystem_context; -use polkadot_primitives::vstaging; -use polkadot_primitives::v2::{ - AuthorityDiscoveryId, BlockNumber, CandidateEvent, CandidateHash, CommittedCandidateReceipt, - CoreState, DisputeState, GroupRotationInfo, Id as ParaId, InboundDownwardMessage, - InboundHrmpMessage, OccupiedCoreAssumption, PersistedValidationData, PvfCheckStatement, - ScrapedOnChainVotes, SessionIndex, SessionInfo, ValidationCode, ValidationCodeHash, - ValidatorId, ValidatorIndex, ValidatorSignature, +use polkadot_primitives::{ + v2::{ + AuthorityDiscoveryId, BlockNumber, CandidateEvent, CandidateHash, + CommittedCandidateReceipt, CoreState, DisputeState, GroupRotationInfo, Id as ParaId, + InboundDownwardMessage, InboundHrmpMessage, OccupiedCoreAssumption, + PersistedValidationData, PvfCheckStatement, ScrapedOnChainVotes, SessionIndex, SessionInfo, + ValidationCode, ValidationCodeHash, ValidatorId, ValidatorIndex, ValidatorSignature, + }, + vstaging, }; use sp_core::testing::TaskExecutor; use std::{ @@ -196,7 +198,7 @@ sp_api::mock_impl_runtime_apis! { unimplemented!() } - fn staging_validity_constraints(para_id: ParaId) -> Option { + fn staging_validity_constraints(_: ParaId) -> Option { unimplemented!("Staging API not implemented"); } } diff --git a/node/subsystem-types/src/messages.rs b/node/subsystem-types/src/messages.rs index 756c46935be8..eac661d67d3a 100644 --- a/node/subsystem-types/src/messages.rs +++ b/node/subsystem-types/src/messages.rs @@ -38,14 +38,17 @@ use polkadot_node_primitives::{ CollationSecondedSignal, DisputeMessage, ErasureChunk, PoV, SignedDisputeStatement, SignedFullStatement, ValidationResult, }; -use polkadot_primitives::v2::{ - AuthorityDiscoveryId, BackedCandidate, BlockNumber, CandidateEvent, CandidateHash, - CandidateIndex, CandidateReceipt, CollatorId, CommittedCandidateReceipt, CoreState, GroupIndex, - GroupRotationInfo, Hash, Header as BlockHeader, Id as ParaId, InboundDownwardMessage, - InboundHrmpMessage, MultiDisputeStatementSet, OccupiedCoreAssumption, PersistedValidationData, - PvfCheckStatement, SessionIndex, SessionInfo, SignedAvailabilityBitfield, - SignedAvailabilityBitfields, ValidationCode, ValidationCodeHash, ValidatorId, ValidatorIndex, - ValidatorSignature, +use polkadot_primitives::{ + v2::{ + AuthorityDiscoveryId, BackedCandidate, BlockNumber, CandidateEvent, CandidateHash, + CandidateIndex, CandidateReceipt, CollatorId, CommittedCandidateReceipt, CoreState, + GroupIndex, GroupRotationInfo, Hash, Header as BlockHeader, Id as ParaId, + InboundDownwardMessage, InboundHrmpMessage, MultiDisputeStatementSet, + OccupiedCoreAssumption, PersistedValidationData, PvfCheckStatement, SessionIndex, + SessionInfo, SignedAvailabilityBitfield, SignedAvailabilityBitfields, ValidationCode, + ValidationCodeHash, ValidatorId, ValidatorIndex, ValidatorSignature, + }, + vstaging as vstaging_primitives, }; use polkadot_statement_table::v2::Misbehavior; use std::{ @@ -693,6 +696,9 @@ pub enum RuntimeApiRequest { OccupiedCoreAssumption, RuntimeApiSender>, ), + /// Get the validity constraints of the given para. + /// This is a staging API that will not be available on production runtimes. + StagingValidityConstraints(ParaId, RuntimeApiSender>), } /// A message to the Runtime API subsystem. diff --git a/node/subsystem-util/src/inclusion_emulator/staging.rs b/node/subsystem-util/src/inclusion_emulator/staging.rs index 512d539951f1..60eecb9b5180 100644 --- a/node/subsystem-util/src/inclusion_emulator/staging.rs +++ b/node/subsystem-util/src/inclusion_emulator/staging.rs @@ -114,8 +114,9 @@ //! in practice at most once every few weeks. use polkadot_primitives::vstaging::{ - BlockNumber, CandidateCommitments, CollatorId, CollatorSignature, Hash, HeadData, Id as ParaId, - PersistedValidationData, UpgradeRestriction, ValidationCodeHash, Constraints as PrimitiveConstraints, + BlockNumber, CandidateCommitments, CollatorId, CollatorSignature, + Constraints as PrimitiveConstraints, Hash, HeadData, Id as ParaId, PersistedValidationData, + UpgradeRestriction, ValidationCodeHash, }; use std::collections::HashMap; @@ -181,10 +182,19 @@ impl From for Constraints { hrmp_inbound: InboundHrmpLimitations { valid_watermarks: c.hrmp_inbound.valid_watermarks, }, - hrmp_channels_out: c.hrmp_channels_out.into_iter().map(|(para_id, limits)| (para_id, OutboundHrmpChannelLimitations { - bytes_remaining: limits.bytes_remaining as _, - messages_remaining: limits.messages_remaining as _, - })).collect(), + hrmp_channels_out: c + .hrmp_channels_out + .into_iter() + .map(|(para_id, limits)| { + ( + para_id, + OutboundHrmpChannelLimitations { + bytes_remaining: limits.bytes_remaining as _, + messages_remaining: limits.messages_remaining as _, + }, + ) + }) + .collect(), max_hrmp_num_per_candidate: c.max_hrmp_num_per_candidate as _, required_parent: c.required_parent, validation_code_hash: c.validation_code_hash, diff --git a/primitives/src/v2/mod.rs b/primitives/src/v2/mod.rs index 53423807a76d..bcaf31b5c118 100644 --- a/primitives/src/v2/mod.rs +++ b/primitives/src/v2/mod.rs @@ -1129,6 +1129,7 @@ pub struct AbridgedHrmpChannel { /// A possible upgrade restriction that prevents a parachain from performing an upgrade. #[derive(Copy, Clone, Encode, Decode, PartialEq, RuntimeDebug, TypeInfo)] +#[cfg_attr(feature = "std", derive(MallocSizeOf))] pub enum UpgradeRestriction { /// There is an upgrade restriction and there are no details about its specifics nor how long /// it could last. diff --git a/primitives/src/vstaging/mod.rs b/primitives/src/vstaging/mod.rs index 509beac8b4bf..87cf8c8ba85c 100644 --- a/primitives/src/vstaging/mod.rs +++ b/primitives/src/vstaging/mod.rs @@ -24,11 +24,15 @@ use parity_scale_codec::{Decode, Encode}; use primitives::RuntimeDebug; use scale_info::TypeInfo; +#[cfg(feature = "std")] +use parity_util_mem::MallocSizeOf; + /// Useful type alias for Para IDs. pub type ParaId = Id; /// Constraints on inbound HRMP channels. #[derive(RuntimeDebug, Clone, PartialEq, Encode, Decode, TypeInfo)] +#[cfg_attr(feature = "std", derive(MallocSizeOf))] pub struct InboundHrmpLimitations { /// An exhaustive set of all valid watermarks, sorted ascending pub valid_watermarks: Vec, @@ -36,6 +40,7 @@ pub struct InboundHrmpLimitations { /// Constraints on outbound HRMP channels. #[derive(RuntimeDebug, Clone, PartialEq, Encode, Decode, TypeInfo)] +#[cfg_attr(feature = "std", derive(MallocSizeOf))] pub struct OutboundHrmpChannelLimitations { /// The maximum bytes that can be written to the channel. pub bytes_remaining: u32, @@ -47,6 +52,7 @@ pub struct OutboundHrmpChannelLimitations { /// block. These limitations are implicitly associated with some particular /// parachain, which should be apparent from usage. #[derive(RuntimeDebug, Clone, PartialEq, Encode, Decode, TypeInfo)] +#[cfg_attr(feature = "std", derive(MallocSizeOf))] pub struct Constraints { /// The minimum relay-parent number accepted under these constraints. pub min_relay_parent_number: BlockNumber, From 4519a78e49fba8ba9891296b5316baa7c5f81313 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Wed, 20 Apr 2022 17:46:25 -0500 Subject: [PATCH 74/87] implement fetch_base_constraints --- node/core/prospective-parachains/src/error.rs | 3 ++ node/core/prospective-parachains/src/lib.rs | 28 +++++++++++++------ 2 files changed, 22 insertions(+), 9 deletions(-) diff --git a/node/core/prospective-parachains/src/error.rs b/node/core/prospective-parachains/src/error.rs index 9f1fd6607afc..e7fa2f0e9641 100644 --- a/node/core/prospective-parachains/src/error.rs +++ b/node/core/prospective-parachains/src/error.rs @@ -56,6 +56,9 @@ pub enum Error { #[error("Request to chain API subsystem dropped")] ChainApiRequestCanceled(oneshot::Canceled), + + #[error("Request to runtime API subsystem dropped")] + RuntimeApiRequestCanceled(oneshot::Canceled), } /// General `Result` type. diff --git a/node/core/prospective-parachains/src/lib.rs b/node/core/prospective-parachains/src/lib.rs index 1da4a821fa78..7f9b97152735 100644 --- a/node/core/prospective-parachains/src/lib.rs +++ b/node/core/prospective-parachains/src/lib.rs @@ -32,8 +32,9 @@ use std::collections::{HashMap, HashSet}; use futures::{channel::oneshot, prelude::*}; use polkadot_node_subsystem::{ - messages::ChainApiMessage, overseer, ActiveLeavesUpdate, FromOverseer, OverseerSignal, - SpawnedSubsystem, SubsystemContext, SubsystemError, + messages::{ChainApiMessage, RuntimeApiMessage, RuntimeApiRequest}, + overseer, ActiveLeavesUpdate, FromOverseer, OverseerSignal, SpawnedSubsystem, SubsystemContext, + SubsystemError, }; use polkadot_node_subsystem_util::inclusion_emulator::staging::{Constraints, RelayChainBlockInfo}; use polkadot_primitives::vstaging::{ @@ -151,8 +152,7 @@ where } } -// TODO [now]: this is temporarily `pub` to make the unused lint behave reasonably. -pub async fn run(mut ctx: Context) -> FatalResult<()> +async fn run(mut ctx: Context) -> FatalResult<()> where Context: SubsystemContext, Context: overseer::SubsystemContext, @@ -244,7 +244,7 @@ where let candidate_storage = view.candidate_storage.entry(para).or_insert_with(CandidateStorage::new); - let constraints = fetch_base_constraints(&mut *ctx, &hash, para).await?; + let constraints = fetch_base_constraints(&mut *ctx, hash, para).await?; let constraints = match constraints { Some(c) => c, @@ -526,11 +526,21 @@ fn answer_tree_membership_request( async fn fetch_base_constraints( ctx: &mut Context, - relay_parent: &Hash, + relay_parent: Hash, para_id: ParaId, -) -> JfyiErrorResult> { - // TODO [now]: probably a new runtime API. - unimplemented!() +) -> JfyiErrorResult> +where + Context: SubsystemContext, + Context: overseer::SubsystemContext, +{ + let (tx, rx) = oneshot::channel(); + ctx.send_message(RuntimeApiMessage::Request( + relay_parent, + RuntimeApiRequest::StagingValidityConstraints(para_id, tx), + )) + .await; + + Ok(rx.await.map_err(JfyiError::RuntimeApiRequestCanceled)??.map(From::from)) } async fn fetch_upcoming_paras( From f4cc8f13a3df5899c9f4ddb08573003985bb7a12 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Wed, 20 Apr 2022 17:53:31 -0500 Subject: [PATCH 75/87] implement `fetch_upcoming_paras` --- node/core/prospective-parachains/src/lib.rs | 42 ++++++++++++++++++--- 1 file changed, 37 insertions(+), 5 deletions(-) diff --git a/node/core/prospective-parachains/src/lib.rs b/node/core/prospective-parachains/src/lib.rs index 7f9b97152735..2a5cbcb476dc 100644 --- a/node/core/prospective-parachains/src/lib.rs +++ b/node/core/prospective-parachains/src/lib.rs @@ -39,6 +39,7 @@ use polkadot_node_subsystem::{ use polkadot_node_subsystem_util::inclusion_emulator::staging::{Constraints, RelayChainBlockInfo}; use polkadot_primitives::vstaging::{ CandidateHash, CommittedCandidateReceipt, Hash, Id as ParaId, PersistedValidationData, + CoreState, }; use crate::{ @@ -218,7 +219,7 @@ where for activated in update.activated.into_iter() { let hash = activated.hash; - let scheduled_paras = fetch_upcoming_paras(&mut *ctx, &hash).await?; + let scheduled_paras = fetch_upcoming_paras(&mut *ctx, hash).await?; let block_info: RelayChainBlockInfo = match fetch_block_info(&mut *ctx, hash).await? { None => { @@ -545,10 +546,41 @@ where async fn fetch_upcoming_paras( ctx: &mut Context, - relay_parent: &Hash, -) -> JfyiErrorResult> { - // TODO [now]: use `availability_cores` or something like it. - unimplemented!() + relay_parent: Hash, +) -> JfyiErrorResult> +where + Context: SubsystemContext, + Context: overseer::SubsystemContext, +{ + let (tx, rx) = oneshot::channel(); + + // This'll have to get more sophisticated with parathreads, + // but for now we can just use the `AvailabilityCores`. + ctx.send_message(RuntimeApiMessage::Request( + relay_parent, + RuntimeApiRequest::AvailabilityCores(tx), + )).await; + + let cores = rx.await.map_err(JfyiError::RuntimeApiRequestCanceled)??; + let mut upcoming = HashSet::new(); + for core in cores { + match core { + CoreState::Occupied(occupied) => { + if let Some(next_up_on_available) = occupied.next_up_on_available { + upcoming.insert(next_up_on_available.para_id); + } + if let Some(next_up_on_time_out) = occupied.next_up_on_time_out { + upcoming.insert(next_up_on_time_out.para_id); + } + } + CoreState::Scheduled(scheduled) => { + upcoming.insert(scheduled.para_id); + } + CoreState::Free => {} + } + } + + Ok(upcoming.into_iter().collect()) } // Fetch ancestors in descending order, up to the amount requested. From 703e9d60f62a55acfa856595a2c4707117c45864 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Wed, 20 Apr 2022 17:55:36 -0500 Subject: [PATCH 76/87] remove reconstruction of candidate receipt; no obvious usecase --- .../src/fragment_tree.rs | 30 +------------------ 1 file changed, 1 insertion(+), 29 deletions(-) diff --git a/node/core/prospective-parachains/src/fragment_tree.rs b/node/core/prospective-parachains/src/fragment_tree.rs index bdae0688add8..996e12110c6a 100644 --- a/node/core/prospective-parachains/src/fragment_tree.rs +++ b/node/core/prospective-parachains/src/fragment_tree.rs @@ -62,7 +62,7 @@ use polkadot_node_subsystem_util::inclusion_emulator::staging::{ ConstraintModifications, Constraints, Fragment, ProspectiveCandidate, RelayChainBlockInfo, }; use polkadot_primitives::vstaging::{ - BlockNumber, CandidateDescriptor, CandidateHash, CommittedCandidateReceipt, Hash, HeadData, + BlockNumber, CandidateHash, CommittedCandidateReceipt, Hash, Id as ParaId, PersistedValidationData, }; @@ -112,7 +112,6 @@ impl CandidateStorage { let entry = CandidateEntry { candidate_hash, relay_parent: candidate.descriptor.relay_parent, - erasure_root: candidate.descriptor.erasure_root, state: CandidateState::Seconded, candidate: ProspectiveCandidate { commitments: candidate.commitments, @@ -192,7 +191,6 @@ struct CandidateEntry { relay_parent: Hash, candidate: ProspectiveCandidate, state: CandidateState, - erasure_root: Hash, } /// The scope of a [`FragmentTree`]. @@ -628,7 +626,6 @@ impl FragmentTree { let node = FragmentNode { parent: parent_pointer, fragment, - erasure_root: candidate.erasure_root.clone(), candidate_hash: candidate.candidate_hash.clone(), depth: child_depth, cumulative_modifications, @@ -649,7 +646,6 @@ struct FragmentNode { // A pointer to the parent node. parent: NodePointer, fragment: Fragment, - erasure_root: Hash, candidate_hash: CandidateHash, depth: usize, cumulative_modifications: ConstraintModifications, @@ -662,30 +658,6 @@ impl FragmentNode { self.fragment.relay_parent().hash } - fn parent_head_data(&self) -> &HeadData { - &self.fragment.candidate().persisted_validation_data.parent_head - } - - /// Produce a candidate receipt from this fragment node. - fn produce_candidate_receipt(&self, para_id: ParaId) -> CommittedCandidateReceipt { - let candidate = self.fragment.candidate(); - - CommittedCandidateReceipt { - commitments: candidate.commitments.clone(), - descriptor: CandidateDescriptor { - para_id, - relay_parent: self.relay_parent(), - collator: candidate.collator.clone(), - signature: candidate.collator_signature.clone(), - persisted_validation_data_hash: candidate.persisted_validation_data.hash(), - pov_hash: candidate.pov_hash, - erasure_root: self.erasure_root, - para_head: candidate.commitments.head_data.hash(), - validation_code_hash: candidate.validation_code_hash.clone(), - }, - } - } - fn candidate_child(&self, candidate_hash: &CandidateHash) -> Option { self.children.iter().find(|(_, c)| c == candidate_hash).map(|(p, _)| *p) } From 20bbbb07dd05585fa27bf58b8d84f4ba3804913c Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Wed, 20 Apr 2022 17:55:49 -0500 Subject: [PATCH 77/87] fmt --- .../prospective-parachains/src/fragment_tree.rs | 4 ++-- node/core/prospective-parachains/src/lib.rs | 13 +++++++------ 2 files changed, 9 insertions(+), 8 deletions(-) diff --git a/node/core/prospective-parachains/src/fragment_tree.rs b/node/core/prospective-parachains/src/fragment_tree.rs index 996e12110c6a..fd97857c2d9e 100644 --- a/node/core/prospective-parachains/src/fragment_tree.rs +++ b/node/core/prospective-parachains/src/fragment_tree.rs @@ -62,8 +62,8 @@ use polkadot_node_subsystem_util::inclusion_emulator::staging::{ ConstraintModifications, Constraints, Fragment, ProspectiveCandidate, RelayChainBlockInfo, }; use polkadot_primitives::vstaging::{ - BlockNumber, CandidateHash, CommittedCandidateReceipt, Hash, - Id as ParaId, PersistedValidationData, + BlockNumber, CandidateHash, CommittedCandidateReceipt, Hash, Id as ParaId, + PersistedValidationData, }; /// Kinds of failures to import a candidate into storage. diff --git a/node/core/prospective-parachains/src/lib.rs b/node/core/prospective-parachains/src/lib.rs index 2a5cbcb476dc..2ce2671d31dd 100644 --- a/node/core/prospective-parachains/src/lib.rs +++ b/node/core/prospective-parachains/src/lib.rs @@ -38,8 +38,8 @@ use polkadot_node_subsystem::{ }; use polkadot_node_subsystem_util::inclusion_emulator::staging::{Constraints, RelayChainBlockInfo}; use polkadot_primitives::vstaging::{ - CandidateHash, CommittedCandidateReceipt, Hash, Id as ParaId, PersistedValidationData, - CoreState, + CandidateHash, CommittedCandidateReceipt, CoreState, Hash, Id as ParaId, + PersistedValidationData, }; use crate::{ @@ -559,7 +559,8 @@ where ctx.send_message(RuntimeApiMessage::Request( relay_parent, RuntimeApiRequest::AvailabilityCores(tx), - )).await; + )) + .await; let cores = rx.await.map_err(JfyiError::RuntimeApiRequestCanceled)??; let mut upcoming = HashSet::new(); @@ -572,11 +573,11 @@ where if let Some(next_up_on_time_out) = occupied.next_up_on_time_out { upcoming.insert(next_up_on_time_out.para_id); } - } + }, CoreState::Scheduled(scheduled) => { upcoming.insert(scheduled.para_id); - } - CoreState::Free => {} + }, + CoreState::Free => {}, } } From 522830400ab68e46ec5ce0eddcf186e2cbf34b2d Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Wed, 20 Apr 2022 17:56:35 -0500 Subject: [PATCH 78/87] export message to broader module --- node/core/prospective-parachains/src/lib.rs | 38 ++------------------- node/subsystem-types/src/messages.rs | 34 ++++++++++++++++++ 2 files changed, 37 insertions(+), 35 deletions(-) diff --git a/node/core/prospective-parachains/src/lib.rs b/node/core/prospective-parachains/src/lib.rs index 2ce2671d31dd..adfad11a1e73 100644 --- a/node/core/prospective-parachains/src/lib.rs +++ b/node/core/prospective-parachains/src/lib.rs @@ -32,7 +32,9 @@ use std::collections::{HashMap, HashSet}; use futures::{channel::oneshot, prelude::*}; use polkadot_node_subsystem::{ - messages::{ChainApiMessage, RuntimeApiMessage, RuntimeApiRequest}, + messages::{ + ChainApiMessage, ProspectiveParachainsSubsystem, RuntimeApiMessage, RuntimeApiRequest, + }, overseer, ActiveLeavesUpdate, FromOverseer, OverseerSignal, SpawnedSubsystem, SubsystemContext, SubsystemError, }; @@ -83,40 +85,6 @@ pub struct HypotheticalDepthRequest { /// is present in and the depths of that tree the candidate is present in. pub type CandidateMembership = Vec<(Hash, Vec)>; -// TODO [now]: add this enum to the broader subsystem types. -/// Messages sent to the Prospective Parachains subsystem. -pub enum ProspectiveParachainsMessage { - /// Inform the Prospective Parachains Subsystem of a new candidate. - /// - /// The response sender accepts the candidate membership, which is empty - /// if the candidate was already known. - CandidateSeconded( - ParaId, - CommittedCandidateReceipt, - PersistedValidationData, - oneshot::Sender, - ), - /// Inform the Prospective Parachains Subsystem that a previously seconded candidate - /// has been backed. This requires that `CandidateSeconded` was sent for the candidate - /// some time in the past. - CandidateBacked(ParaId, CandidateHash), - /// Get a backable candidate hash for the given parachain, under the given relay-parent hash, - /// which is a descendant of the given candidate hashes. Returns `None` on the channel - /// if no such candidate exists. - GetBackableCandidate(Hash, ParaId, Vec, oneshot::Sender>), - /// Get the hypothetical depths that a candidate with the given properties would - /// occupy in the fragment tree for the given relay-parent. - /// - /// If the candidate is already known, this returns the depths the candidate - /// occupies. - /// - /// Returns an empty vector either if there is no such depth or the fragment tree relay-parent - /// is unknown. - GetHypotheticalDepth(HypotheticalDepthRequest, oneshot::Sender>), - /// Get the membership of the candidate in all fragment trees. - GetTreeMembership(ParaId, CandidateHash, oneshot::Sender), -} - struct RelayBlockViewData { // Scheduling info for paras and upcoming paras. fragment_trees: HashMap, diff --git a/node/subsystem-types/src/messages.rs b/node/subsystem-types/src/messages.rs index eac661d67d3a..3ee6bafe4e83 100644 --- a/node/subsystem-types/src/messages.rs +++ b/node/subsystem-types/src/messages.rs @@ -935,3 +935,37 @@ pub enum GossipSupportMessage { /// Currently non-instantiable. #[derive(Debug)] pub enum PvfCheckerMessage {} + +/// Messages sent to the Prospective Parachains subsystem. +#[derive(Debug)] +pub enum ProspectiveParachainsMessage { + /// Inform the Prospective Parachains Subsystem of a new candidate. + /// + /// The response sender accepts the candidate membership, which is empty + /// if the candidate was already known. + CandidateSeconded( + ParaId, + CommittedCandidateReceipt, + PersistedValidationData, + oneshot::Sender, + ), + /// Inform the Prospective Parachains Subsystem that a previously seconded candidate + /// has been backed. This requires that `CandidateSeconded` was sent for the candidate + /// some time in the past. + CandidateBacked(ParaId, CandidateHash), + /// Get a backable candidate hash for the given parachain, under the given relay-parent hash, + /// which is a descendant of the given candidate hashes. Returns `None` on the channel + /// if no such candidate exists. + GetBackableCandidate(Hash, ParaId, Vec, oneshot::Sender>), + /// Get the hypothetical depths that a candidate with the given properties would + /// occupy in the fragment tree for the given relay-parent. + /// + /// If the candidate is already known, this returns the depths the candidate + /// occupies. + /// + /// Returns an empty vector either if there is no such depth or the fragment tree relay-parent + /// is unknown. + GetHypotheticalDepth(HypotheticalDepthRequest, oneshot::Sender>), + /// Get the membership of the candidate in all fragment trees. + GetTreeMembership(ParaId, CandidateHash, oneshot::Sender), +} From d58e53d08b388fc402ecdbca8d9998f4a4066cba Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Wed, 20 Apr 2022 17:56:46 -0500 Subject: [PATCH 79/87] remove last TODO --- node/core/prospective-parachains/src/lib.rs | 2 -- 1 file changed, 2 deletions(-) diff --git a/node/core/prospective-parachains/src/lib.rs b/node/core/prospective-parachains/src/lib.rs index adfad11a1e73..7adb5149725e 100644 --- a/node/core/prospective-parachains/src/lib.rs +++ b/node/core/prospective-parachains/src/lib.rs @@ -618,5 +618,3 @@ struct MetricsInner; /// Prospective parachain metrics. #[derive(Default, Clone)] pub struct Metrics(Option); - -// TODO [now]: impl metrics From 0c994371df90d18eaea58642a35b9ec289637c82 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Wed, 20 Apr 2022 18:52:45 -0500 Subject: [PATCH 80/87] correctly export --- node/core/prospective-parachains/src/lib.rs | 26 ++++----------------- node/subsystem-types/src/messages.rs | 19 +++++++++++++++ 2 files changed, 23 insertions(+), 22 deletions(-) diff --git a/node/core/prospective-parachains/src/lib.rs b/node/core/prospective-parachains/src/lib.rs index 7adb5149725e..b95b757d533b 100644 --- a/node/core/prospective-parachains/src/lib.rs +++ b/node/core/prospective-parachains/src/lib.rs @@ -33,7 +33,8 @@ use futures::{channel::oneshot, prelude::*}; use polkadot_node_subsystem::{ messages::{ - ChainApiMessage, ProspectiveParachainsSubsystem, RuntimeApiMessage, RuntimeApiRequest, + ChainApiMessage, FragmentTreeMembership, HypotheticalDepthRequest, + ProspectiveParachainsSubsystem, RuntimeApiMessage, RuntimeApiRequest, }, overseer, ActiveLeavesUpdate, FromOverseer, OverseerSignal, SpawnedSubsystem, SubsystemContext, SubsystemError, @@ -66,25 +67,6 @@ const MAX_DEPTH: usize = 4; // The maximum ancestry we support. const MAX_ANCESTRY: usize = 5; -/// A request for the depths a hypothetical candidate would occupy within -/// some fragment tree. -pub struct HypotheticalDepthRequest { - /// The hash of the potential candidate. - pub candidate_hash: CandidateHash, - /// The para of the candidate. - pub candidate_para: ParaId, - /// The hash of the parent head-data of the candidate. - pub parent_head_data_hash: Hash, - /// The relay-parent of the candidate. - pub candidate_relay_parent: Hash, - /// The relay-parent of the fragment tree we are comparing to. - pub fragment_tree_relay_parent: Hash, -} - -/// Indicates the relay-parents whose fragment tree the candidate -/// is present in and the depths of that tree the candidate is present in. -pub type CandidateMembership = Vec<(Hash, Vec)>; - struct RelayBlockViewData { // Scheduling info for paras and upcoming paras. fragment_trees: HashMap, @@ -286,7 +268,7 @@ async fn handle_candidate_seconded( para: ParaId, candidate: CommittedCandidateReceipt, pvd: PersistedValidationData, - tx: oneshot::Sender, + tx: oneshot::Sender, ) -> JfyiErrorResult<()> where Context: SubsystemContext, @@ -480,7 +462,7 @@ fn answer_tree_membership_request( view: &View, para: ParaId, candidate: CandidateHash, - tx: oneshot::Sender, + tx: oneshot::Sender, ) { let mut membership = Vec::new(); for (relay_parent, view_data) in &view.active_leaves { diff --git a/node/subsystem-types/src/messages.rs b/node/subsystem-types/src/messages.rs index 3ee6bafe4e83..8370793d2086 100644 --- a/node/subsystem-types/src/messages.rs +++ b/node/subsystem-types/src/messages.rs @@ -936,6 +936,25 @@ pub enum GossipSupportMessage { #[derive(Debug)] pub enum PvfCheckerMessage {} +/// A request for the depths a hypothetical candidate would occupy within +/// some fragment tree. +pub struct HypotheticalDepthRequest { + /// The hash of the potential candidate. + pub candidate_hash: CandidateHash, + /// The para of the candidate. + pub candidate_para: ParaId, + /// The hash of the parent head-data of the candidate. + pub parent_head_data_hash: Hash, + /// The relay-parent of the candidate. + pub candidate_relay_parent: Hash, + /// The relay-parent of the fragment tree we are comparing to. + pub fragment_tree_relay_parent: Hash, +} + +/// Indicates the relay-parents whose fragment tree a candidate +/// is present in and the depths of that tree the candidate is present in. +pub type FragmentTreeMembership = Vec<(Hash, Vec)>; + /// Messages sent to the Prospective Parachains subsystem. #[derive(Debug)] pub enum ProspectiveParachainsMessage { From abec50141d67ad126983eefce255bb79fa11fe0f Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Thu, 21 Apr 2022 15:34:23 -0500 Subject: [PATCH 81/87] fix compilation and add GetMinimumRelayParent request --- .../src/fragment_tree.rs | 8 ++++++- node/core/prospective-parachains/src/lib.rs | 21 +++++++++++++++++-- node/subsystem-types/src/messages.rs | 12 +++++++++-- 3 files changed, 36 insertions(+), 5 deletions(-) diff --git a/node/core/prospective-parachains/src/fragment_tree.rs b/node/core/prospective-parachains/src/fragment_tree.rs index fd97857c2d9e..9972b60490a1 100644 --- a/node/core/prospective-parachains/src/fragment_tree.rs +++ b/node/core/prospective-parachains/src/fragment_tree.rs @@ -262,7 +262,8 @@ impl Scope { }) } - fn earliest_relay_parent(&self) -> RelayChainBlockInfo { + /// Get the earliest relay-parent allowed in the scope of the fragment tree. + pub fn earliest_relay_parent(&self) -> RelayChainBlockInfo { self.ancestors .iter() .next() @@ -321,6 +322,11 @@ impl FragmentTree { tree } + /// Get the scope of the Fragment Tree. + pub fn scope(&self) -> &Scope { + &self.scope + } + // Inserts a node and updates child references in a non-root parent. fn insert_node(&mut self, node: FragmentNode) { let pointer = NodePointer::Storage(self.nodes.len()); diff --git a/node/core/prospective-parachains/src/lib.rs b/node/core/prospective-parachains/src/lib.rs index b95b757d533b..c6b415a6ddb3 100644 --- a/node/core/prospective-parachains/src/lib.rs +++ b/node/core/prospective-parachains/src/lib.rs @@ -34,14 +34,14 @@ use futures::{channel::oneshot, prelude::*}; use polkadot_node_subsystem::{ messages::{ ChainApiMessage, FragmentTreeMembership, HypotheticalDepthRequest, - ProspectiveParachainsSubsystem, RuntimeApiMessage, RuntimeApiRequest, + ProspectiveParachainsMessage, RuntimeApiMessage, RuntimeApiRequest, }, overseer, ActiveLeavesUpdate, FromOverseer, OverseerSignal, SpawnedSubsystem, SubsystemContext, SubsystemError, }; use polkadot_node_subsystem_util::inclusion_emulator::staging::{Constraints, RelayChainBlockInfo}; use polkadot_primitives::vstaging::{ - CandidateHash, CommittedCandidateReceipt, CoreState, Hash, Id as ParaId, + BlockNumber, CandidateHash, CommittedCandidateReceipt, CoreState, Hash, Id as ParaId, PersistedValidationData, }; @@ -144,6 +144,8 @@ where answer_hypothetical_depths_request(&view, request, tx), ProspectiveParachainsMessage::GetTreeMembership(para, candidate, tx) => answer_tree_membership_request(&view, para, candidate, tx), + ProspectiveParachainsMessage::GetMinimumRelayParent(para, relay_parent, tx) => + answer_minimum_relay_parent_request(&view, para, relay_parent, tx), }, } } @@ -475,6 +477,21 @@ fn answer_tree_membership_request( let _ = tx.send(membership); } +fn answer_minimum_relay_parent_request( + view: &View, + para: ParaId, + relay_parent: Hash, + tx: oneshot::Sender>, +) { + let res = view + .active_leaves + .get(&relay_parent) + .and_then(|data| data.fragment_trees.get(¶)) + .map(|tree| tree.scope().earliest_relay_parent().number); + + let _ = tx.send(res); +} + async fn fetch_base_constraints( ctx: &mut Context, relay_parent: Hash, diff --git a/node/subsystem-types/src/messages.rs b/node/subsystem-types/src/messages.rs index 8370793d2086..841e52e0acdd 100644 --- a/node/subsystem-types/src/messages.rs +++ b/node/subsystem-types/src/messages.rs @@ -938,6 +938,7 @@ pub enum PvfCheckerMessage {} /// A request for the depths a hypothetical candidate would occupy within /// some fragment tree. +#[derive(Debug)] pub struct HypotheticalDepthRequest { /// The hash of the potential candidate. pub candidate_hash: CandidateHash, @@ -966,7 +967,7 @@ pub enum ProspectiveParachainsMessage { ParaId, CommittedCandidateReceipt, PersistedValidationData, - oneshot::Sender, + oneshot::Sender, ), /// Inform the Prospective Parachains Subsystem that a previously seconded candidate /// has been backed. This requires that `CandidateSeconded` was sent for the candidate @@ -986,5 +987,12 @@ pub enum ProspectiveParachainsMessage { /// is unknown. GetHypotheticalDepth(HypotheticalDepthRequest, oneshot::Sender>), /// Get the membership of the candidate in all fragment trees. - GetTreeMembership(ParaId, CandidateHash, oneshot::Sender), + GetTreeMembership(ParaId, CandidateHash, oneshot::Sender), + /// Get the minimum accepted relay-parent number in the fragment tree + /// for the given relay-parent and para-id. + /// + /// That is, if the relay-parent is known and there's a fragment tree for it, + /// in this para-id, this returns the minimum relay-parent block number in the + /// same chain which is accepted in the fragment tree for the para-id. + GetMinimumRelayParent(ParaId, Hash, oneshot::Sender>), } From 8a7f1d9eb3ddf1c267cac535d7ba5573c29f5847 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Wed, 18 May 2022 15:45:42 -0500 Subject: [PATCH 82/87] make provisioner into a real subsystem with proper mesage bounds --- node/core/prospective-parachains/src/lib.rs | 53 +++++++-------------- node/overseer/src/dummy.rs | 8 +++- node/overseer/src/lib.rs | 8 +++- node/service/src/overseer.rs | 2 + 4 files changed, 31 insertions(+), 40 deletions(-) diff --git a/node/core/prospective-parachains/src/lib.rs b/node/core/prospective-parachains/src/lib.rs index c6b415a6ddb3..a599cb106352 100644 --- a/node/core/prospective-parachains/src/lib.rs +++ b/node/core/prospective-parachains/src/lib.rs @@ -36,7 +36,7 @@ use polkadot_node_subsystem::{ ChainApiMessage, FragmentTreeMembership, HypotheticalDepthRequest, ProspectiveParachainsMessage, RuntimeApiMessage, RuntimeApiRequest, }, - overseer, ActiveLeavesUpdate, FromOverseer, OverseerSignal, SpawnedSubsystem, SubsystemContext, + overseer, ActiveLeavesUpdate, FromOverseer, OverseerSignal, SpawnedSubsystem, SubsystemError, }; use polkadot_node_subsystem_util::inclusion_emulator::staging::{Constraints, RelayChainBlockInfo}; @@ -88,10 +88,10 @@ impl View { #[derive(Default)] pub struct ProspectiveParachainsSubsystem; -impl overseer::Subsystem for ProspectiveParachainsSubsystem +#[overseer::subsystem(ProspectiveParachains, error = SubsystemError, prefix = self::overseer)] +impl ProspectiveParachainsSubsystem where - Context: SubsystemContext, - Context: overseer::SubsystemContext, + Context: Send + Sync { fn start(self, ctx: Context) -> SpawnedSubsystem { SpawnedSubsystem { @@ -103,11 +103,8 @@ where } } -async fn run(mut ctx: Context) -> FatalResult<()> -where - Context: SubsystemContext, - Context: overseer::SubsystemContext, -{ +#[overseer::contextbounds(ProspectiveParachains, prefix = self::overseer)] +async fn run(mut ctx: Context) -> FatalResult<()> { let mut view = View::new(); loop { crate::error::log_error( @@ -117,11 +114,8 @@ where } } -async fn run_iteration(ctx: &mut Context, view: &mut View) -> Result<()> -where - Context: SubsystemContext, - Context: overseer::SubsystemContext, -{ +#[overseer::contextbounds(ProspectiveParachains, prefix = self::overseer)] +async fn run_iteration(ctx: &mut Context, view: &mut View) -> Result<()> { loop { match ctx.recv().await.map_err(FatalError::SubsystemReceive)? { FromOverseer::Signal(OverseerSignal::Conclude) => return Ok(()), @@ -151,15 +145,12 @@ where } } +#[overseer::contextbounds(ProspectiveParachains, prefix = self::overseer)] async fn handle_active_leaves_update( ctx: &mut Context, view: &mut View, update: ActiveLeavesUpdate, -) -> JfyiErrorResult<()> -where - Context: SubsystemContext, - Context: overseer::SubsystemContext, -{ +) -> JfyiErrorResult<()> { // 1. clean up inactive leaves // 2. determine all scheduled para at new block // 3. construct new fragment tree for each para for each new leaf @@ -264,6 +255,7 @@ fn prune_view_candidate_storage(view: &mut View) { }) } +#[overseer::contextbounds(ProspectiveParachains, prefix = self::overseer)] async fn handle_candidate_seconded( _ctx: &mut Context, view: &mut View, @@ -272,9 +264,6 @@ async fn handle_candidate_seconded( pvd: PersistedValidationData, tx: oneshot::Sender, ) -> JfyiErrorResult<()> -where - Context: SubsystemContext, - Context: overseer::SubsystemContext, { // Add the candidate to storage. // Then attempt to add it to all trees. @@ -331,15 +320,13 @@ where Ok(()) } +#[overseer::contextbounds(ProspectiveParachains, prefix = self::overseer)] async fn handle_candidate_backed( _ctx: &mut Context, view: &mut View, para: ParaId, candidate_hash: CandidateHash, ) -> JfyiErrorResult<()> -where - Context: SubsystemContext, - Context: overseer::SubsystemContext, { let storage = match view.candidate_storage.get_mut(¶) { None => { @@ -492,14 +479,12 @@ fn answer_minimum_relay_parent_request( let _ = tx.send(res); } +#[overseer::contextbounds(ProspectiveParachains, prefix = self::overseer)] async fn fetch_base_constraints( ctx: &mut Context, relay_parent: Hash, para_id: ParaId, ) -> JfyiErrorResult> -where - Context: SubsystemContext, - Context: overseer::SubsystemContext, { let (tx, rx) = oneshot::channel(); ctx.send_message(RuntimeApiMessage::Request( @@ -511,13 +496,11 @@ where Ok(rx.await.map_err(JfyiError::RuntimeApiRequestCanceled)??.map(From::from)) } +#[overseer::contextbounds(ProspectiveParachains, prefix = self::overseer)] async fn fetch_upcoming_paras( ctx: &mut Context, relay_parent: Hash, ) -> JfyiErrorResult> -where - Context: SubsystemContext, - Context: overseer::SubsystemContext, { let (tx, rx) = oneshot::channel(); @@ -552,14 +535,12 @@ where } // Fetch ancestors in descending order, up to the amount requested. +#[overseer::contextbounds(ProspectiveParachains, prefix = self::overseer)] async fn fetch_ancestry( ctx: &mut Context, relay_hash: Hash, ancestors: usize, ) -> JfyiErrorResult> -where - Context: SubsystemContext, - Context: overseer::SubsystemContext, { let (tx, rx) = oneshot::channel(); ctx.send_message(ChainApiMessage::Ancestors { @@ -592,13 +573,11 @@ where Ok(block_info) } +#[overseer::contextbounds(ProspectiveParachains, prefix = self::overseer)] async fn fetch_block_info( ctx: &mut Context, relay_hash: Hash, ) -> JfyiErrorResult> -where - Context: SubsystemContext, - Context: overseer::SubsystemContext, { let (tx, rx) = oneshot::channel(); diff --git a/node/overseer/src/dummy.rs b/node/overseer/src/dummy.rs index b4a97c3e6321..a41bd240a1f4 100644 --- a/node/overseer/src/dummy.rs +++ b/node/overseer/src/dummy.rs @@ -86,6 +86,7 @@ pub fn dummy_overseer_builder<'a, Spawner, SupportsParachains>( DummySubsystem, DummySubsystem, DummySubsystem, + DummySubsystem, >, SubsystemError, > @@ -127,6 +128,7 @@ pub fn one_for_all_overseer_builder<'a, Spawner, SupportsParachains, Sub>( Sub, Sub, Sub, + Sub, >, SubsystemError, > @@ -154,7 +156,8 @@ where + Subsystem, SubsystemError> + Subsystem, SubsystemError> + Subsystem, SubsystemError> - + Subsystem, SubsystemError>, + + Subsystem, SubsystemError> + + Subsystem, SubsystemError> { let metrics = ::register(registry)?; @@ -179,7 +182,8 @@ where .gossip_support(subsystem.clone()) .dispute_coordinator(subsystem.clone()) .dispute_distribution(subsystem.clone()) - .chain_selection(subsystem) + .chain_selection(subsystem.clone()) + .prospective_parachains(subsystem.clone()) .activation_external_listeners(Default::default()) .span_per_active_leaf(Default::default()) .active_leaves(Default::default()) diff --git a/node/overseer/src/lib.rs b/node/overseer/src/lib.rs index bcf486d2a0db..b80e6ce0bde0 100644 --- a/node/overseer/src/lib.rs +++ b/node/overseer/src/lib.rs @@ -84,7 +84,7 @@ use polkadot_node_subsystem_types::messages::{ ChainSelectionMessage, CollationGenerationMessage, CollatorProtocolMessage, DisputeCoordinatorMessage, DisputeDistributionMessage, GossipSupportMessage, NetworkBridgeMessage, ProvisionerMessage, PvfCheckerMessage, RuntimeApiMessage, - StatementDistributionMessage, + StatementDistributionMessage, ProspectiveParachainsMessage, }; pub use polkadot_node_subsystem_types::{ errors::{SubsystemError, SubsystemResult}, @@ -564,6 +564,12 @@ pub struct Overseer { #[subsystem(blocking, ChainSelectionMessage, sends: [ChainApiMessage])] chain_selection: ChainSelection, + #[subsystem(ProspectiveParachainsMessage, sends: [ + RuntimeApiMessage, + ChainApiMessage, + ])] + prospective_parachains: ProspectiveParachains, + /// External listeners waiting for a hash to be in the active-leave set. pub activation_external_listeners: HashMap>>>, diff --git a/node/service/src/overseer.rs b/node/service/src/overseer.rs index bb3d9e840f1c..e0dce76b9393 100644 --- a/node/service/src/overseer.rs +++ b/node/service/src/overseer.rs @@ -178,6 +178,7 @@ pub fn prepared_overseer_builder<'a, Spawner, RuntimeClient>( DisputeCoordinatorSubsystem, DisputeDistributionSubsystem, ChainSelectionSubsystem, + polkadot_overseer::DummySubsystem, // TODO [now]: use real prospective parachains >, Error, > @@ -291,6 +292,7 @@ where Metrics::register(registry)?, )) .chain_selection(ChainSelectionSubsystem::new(chain_selection_config, parachains_db)) + .prospective_parachains(polkadot_overseer::DummySubsystem) .leaves(Vec::from_iter( leaves .into_iter() From 8f62772a300a832e2d1beeb08b57e265e7a6024c Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Wed, 18 May 2022 15:50:44 -0500 Subject: [PATCH 83/87] fmt --- node/core/prospective-parachains/src/lib.rs | 23 +++++++-------------- node/overseer/src/dummy.rs | 2 +- node/overseer/src/lib.rs | 4 ++-- 3 files changed, 11 insertions(+), 18 deletions(-) diff --git a/node/core/prospective-parachains/src/lib.rs b/node/core/prospective-parachains/src/lib.rs index a599cb106352..0e447aa69b1f 100644 --- a/node/core/prospective-parachains/src/lib.rs +++ b/node/core/prospective-parachains/src/lib.rs @@ -36,8 +36,7 @@ use polkadot_node_subsystem::{ ChainApiMessage, FragmentTreeMembership, HypotheticalDepthRequest, ProspectiveParachainsMessage, RuntimeApiMessage, RuntimeApiRequest, }, - overseer, ActiveLeavesUpdate, FromOverseer, OverseerSignal, SpawnedSubsystem, - SubsystemError, + overseer, ActiveLeavesUpdate, FromOverseer, OverseerSignal, SpawnedSubsystem, SubsystemError, }; use polkadot_node_subsystem_util::inclusion_emulator::staging::{Constraints, RelayChainBlockInfo}; use polkadot_primitives::vstaging::{ @@ -91,7 +90,7 @@ pub struct ProspectiveParachainsSubsystem; #[overseer::subsystem(ProspectiveParachains, error = SubsystemError, prefix = self::overseer)] impl ProspectiveParachainsSubsystem where - Context: Send + Sync + Context: Send + Sync, { fn start(self, ctx: Context) -> SpawnedSubsystem { SpawnedSubsystem { @@ -263,8 +262,7 @@ async fn handle_candidate_seconded( candidate: CommittedCandidateReceipt, pvd: PersistedValidationData, tx: oneshot::Sender, -) -> JfyiErrorResult<()> -{ +) -> JfyiErrorResult<()> { // Add the candidate to storage. // Then attempt to add it to all trees. let storage = match view.candidate_storage.get_mut(¶) { @@ -326,8 +324,7 @@ async fn handle_candidate_backed( view: &mut View, para: ParaId, candidate_hash: CandidateHash, -) -> JfyiErrorResult<()> -{ +) -> JfyiErrorResult<()> { let storage = match view.candidate_storage.get_mut(¶) { None => { gum::warn!( @@ -484,8 +481,7 @@ async fn fetch_base_constraints( ctx: &mut Context, relay_parent: Hash, para_id: ParaId, -) -> JfyiErrorResult> -{ +) -> JfyiErrorResult> { let (tx, rx) = oneshot::channel(); ctx.send_message(RuntimeApiMessage::Request( relay_parent, @@ -500,8 +496,7 @@ async fn fetch_base_constraints( async fn fetch_upcoming_paras( ctx: &mut Context, relay_parent: Hash, -) -> JfyiErrorResult> -{ +) -> JfyiErrorResult> { let (tx, rx) = oneshot::channel(); // This'll have to get more sophisticated with parathreads, @@ -540,8 +535,7 @@ async fn fetch_ancestry( ctx: &mut Context, relay_hash: Hash, ancestors: usize, -) -> JfyiErrorResult> -{ +) -> JfyiErrorResult> { let (tx, rx) = oneshot::channel(); ctx.send_message(ChainApiMessage::Ancestors { hash: relay_hash, @@ -577,8 +571,7 @@ async fn fetch_ancestry( async fn fetch_block_info( ctx: &mut Context, relay_hash: Hash, -) -> JfyiErrorResult> -{ +) -> JfyiErrorResult> { let (tx, rx) = oneshot::channel(); ctx.send_message(ChainApiMessage::BlockHeader(relay_hash, tx)).await; diff --git a/node/overseer/src/dummy.rs b/node/overseer/src/dummy.rs index a41bd240a1f4..19d24fb82dfa 100644 --- a/node/overseer/src/dummy.rs +++ b/node/overseer/src/dummy.rs @@ -157,7 +157,7 @@ where + Subsystem, SubsystemError> + Subsystem, SubsystemError> + Subsystem, SubsystemError> - + Subsystem, SubsystemError> + + Subsystem, SubsystemError>, { let metrics = ::register(registry)?; diff --git a/node/overseer/src/lib.rs b/node/overseer/src/lib.rs index b80e6ce0bde0..21cd09aee03d 100644 --- a/node/overseer/src/lib.rs +++ b/node/overseer/src/lib.rs @@ -83,8 +83,8 @@ use polkadot_node_subsystem_types::messages::{ BitfieldSigningMessage, CandidateBackingMessage, CandidateValidationMessage, ChainApiMessage, ChainSelectionMessage, CollationGenerationMessage, CollatorProtocolMessage, DisputeCoordinatorMessage, DisputeDistributionMessage, GossipSupportMessage, - NetworkBridgeMessage, ProvisionerMessage, PvfCheckerMessage, RuntimeApiMessage, - StatementDistributionMessage, ProspectiveParachainsMessage, + NetworkBridgeMessage, ProspectiveParachainsMessage, ProvisionerMessage, PvfCheckerMessage, + RuntimeApiMessage, StatementDistributionMessage, }; pub use polkadot_node_subsystem_types::{ errors::{SubsystemError, SubsystemResult}, From 801c62cd91d65cd53a3a880bd1204c59acd8b873 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Wed, 18 May 2022 16:01:26 -0500 Subject: [PATCH 84/87] fix ChannelsOut in overseer test --- node/overseer/src/tests.rs | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/node/overseer/src/tests.rs b/node/overseer/src/tests.rs index 9fb030140191..9d9209b265a2 100644 --- a/node/overseer/src/tests.rs +++ b/node/overseer/src/tests.rs @@ -1053,6 +1053,7 @@ fn context_holds_onto_message_until_enough_signals_received() { let (dispute_distribution_bounded_tx, _) = metered::channel(CHANNEL_CAPACITY); let (chain_selection_bounded_tx, _) = metered::channel(CHANNEL_CAPACITY); let (pvf_checker_bounded_tx, _) = metered::channel(CHANNEL_CAPACITY); + let (prospective_parachains_bounded_tx, _) = metered::channel(CHANNEL_CAPACITY); let (candidate_validation_unbounded_tx, _) = metered::unbounded(); let (candidate_backing_unbounded_tx, _) = metered::unbounded(); @@ -1075,6 +1076,7 @@ fn context_holds_onto_message_until_enough_signals_received() { let (dispute_distribution_unbounded_tx, _) = metered::unbounded(); let (chain_selection_unbounded_tx, _) = metered::unbounded(); let (pvf_checker_unbounded_tx, _) = metered::unbounded(); + let (prospective_parachains_unbounded_tx, _) = metered::unbounded(); let channels_out = ChannelsOut { candidate_validation: candidate_validation_bounded_tx.clone(), @@ -1098,6 +1100,7 @@ fn context_holds_onto_message_until_enough_signals_received() { dispute_distribution: dispute_distribution_bounded_tx.clone(), chain_selection: chain_selection_bounded_tx.clone(), pvf_checker: pvf_checker_bounded_tx.clone(), + prospective_parachains: prospective_parachains_bounded_tx.clone(), candidate_validation_unbounded: candidate_validation_unbounded_tx.clone(), candidate_backing_unbounded: candidate_backing_unbounded_tx.clone(), @@ -1120,6 +1123,7 @@ fn context_holds_onto_message_until_enough_signals_received() { dispute_distribution_unbounded: dispute_distribution_unbounded_tx.clone(), chain_selection_unbounded: chain_selection_unbounded_tx.clone(), pvf_checker_unbounded: pvf_checker_unbounded_tx.clone(), + prospective_parachains_unbounded: prospective_parachains_unbounded_tx.clone(), }; let (mut signal_tx, signal_rx) = metered::channel(CHANNEL_CAPACITY); From 25d09790e364c844155acf6f0dd7d4e21c05a697 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Wed, 18 May 2022 16:03:29 -0500 Subject: [PATCH 85/87] fix overseer tests --- node/overseer/src/tests.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/node/overseer/src/tests.rs b/node/overseer/src/tests.rs index 9d9209b265a2..c6cbd23729d6 100644 --- a/node/overseer/src/tests.rs +++ b/node/overseer/src/tests.rs @@ -913,7 +913,7 @@ fn test_chain_selection_msg() -> ChainSelectionMessage { // Checks that `stop`, `broadcast_signal` and `broadcast_message` are implemented correctly. #[test] fn overseer_all_subsystems_receive_signals_and_messages() { - const NUM_SUBSYSTEMS: usize = 21; + const NUM_SUBSYSTEMS: usize = 22; // -4 for BitfieldSigning, GossipSupport, AvailabilityDistribution and PvfCheckerSubsystem. const NUM_SUBSYSTEMS_MESSAGED: usize = NUM_SUBSYSTEMS - 4; From 16612b963fd55ea0aff889af8c7d55a0221292f6 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Wed, 18 May 2022 16:05:41 -0500 Subject: [PATCH 86/87] fix again --- node/overseer/src/tests.rs | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/node/overseer/src/tests.rs b/node/overseer/src/tests.rs index c6cbd23729d6..a870ba0effcd 100644 --- a/node/overseer/src/tests.rs +++ b/node/overseer/src/tests.rs @@ -30,7 +30,7 @@ use polkadot_node_subsystem_types::{ }; use polkadot_primitives::v2::{ CandidateHash, CandidateReceipt, CollatorPair, InvalidDisputeStatementKind, - ValidDisputeStatementKind, ValidatorIndex, + ValidDisputeStatementKind, ValidatorIndex, Id as ParaId, }; use crate::{ @@ -910,6 +910,10 @@ fn test_chain_selection_msg() -> ChainSelectionMessage { ChainSelectionMessage::Approved(Default::default()) } +fn test_prospective_parachains_msg() -> ProspectiveParachainsMessage { + ProspectiveParachainsMessage::CandidateBacked(ParaId::from(5), CandidateHash(Hash::repeat_byte(0))) +} + // Checks that `stop`, `broadcast_signal` and `broadcast_message` are implemented correctly. #[test] fn overseer_all_subsystems_receive_signals_and_messages() { @@ -998,6 +1002,9 @@ fn overseer_all_subsystems_receive_signals_and_messages() { handle .send_msg_anon(AllMessages::ChainSelection(test_chain_selection_msg())) .await; + handle + .send_msg_anon(AllMessages::ProspectiveParachains(test_prospective_parachains_msg())) + .await; // handle.send_msg_anon(AllMessages::PvfChecker(test_pvf_checker_msg())).await; // Wait until all subsystems have received. Otherwise the messages might race against From 502406035f21faa2ab4e64e727c1ffa4a80e7be8 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Wed, 18 May 2022 16:05:46 -0500 Subject: [PATCH 87/87] fmt --- node/overseer/src/tests.rs | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/node/overseer/src/tests.rs b/node/overseer/src/tests.rs index a870ba0effcd..ab7303297aea 100644 --- a/node/overseer/src/tests.rs +++ b/node/overseer/src/tests.rs @@ -29,8 +29,8 @@ use polkadot_node_subsystem_types::{ ActivatedLeaf, LeafStatus, }; use polkadot_primitives::v2::{ - CandidateHash, CandidateReceipt, CollatorPair, InvalidDisputeStatementKind, - ValidDisputeStatementKind, ValidatorIndex, Id as ParaId, + CandidateHash, CandidateReceipt, CollatorPair, Id as ParaId, InvalidDisputeStatementKind, + ValidDisputeStatementKind, ValidatorIndex, }; use crate::{ @@ -911,7 +911,10 @@ fn test_chain_selection_msg() -> ChainSelectionMessage { } fn test_prospective_parachains_msg() -> ProspectiveParachainsMessage { - ProspectiveParachainsMessage::CandidateBacked(ParaId::from(5), CandidateHash(Hash::repeat_byte(0))) + ProspectiveParachainsMessage::CandidateBacked( + ParaId::from(5), + CandidateHash(Hash::repeat_byte(0)), + ) } // Checks that `stop`, `broadcast_signal` and `broadcast_message` are implemented correctly.