From 729e524da3c0b4fda05c529ac4523e17a11339bc Mon Sep 17 00:00:00 2001 From: arkpar Date: Tue, 14 Mar 2023 11:32:06 +0100 Subject: [PATCH 01/78] WIP Statement store --- Cargo.lock | 55 +++ Cargo.toml | 3 + bin/node-template/node/Cargo.toml | 1 + bin/node-template/node/src/service.rs | 8 +- bin/node/cli/Cargo.toml | 1 + bin/node/cli/src/service.rs | 8 +- client/db/Cargo.toml | 1 + client/network/statement/Cargo.toml | 28 ++ client/network/statement/src/config.rs | 35 ++ client/network/statement/src/lib.rs | 488 +++++++++++++++++++++++++ client/rpc-api/src/lib.rs | 1 + client/rpc-api/src/statement/error.rs | 55 +++ client/rpc-api/src/statement/mod.rs | 48 +++ client/rpc/Cargo.toml | 1 + client/rpc/src/lib.rs | 1 + client/rpc/src/statement/mod.rs | 91 +++++ client/rpc/src/statement/tests.rs | 19 + client/service/Cargo.toml | 2 + client/service/src/builder.rs | 61 +++- client/service/src/error.rs | 3 + client/service/src/lib.rs | 3 + client/statement-store/Cargo.toml | 42 +++ client/statement-store/README.md | 4 + client/statement-store/src/lib.rs | 42 +++ client/statement-store/src/metrics.rs | 121 ++++++ client/statement-store/src/store.rs | 250 +++++++++++++ primitives/blockchain/src/error.rs | 3 + primitives/statement-store/Cargo.toml | 31 ++ primitives/statement-store/README.md | 4 + primitives/statement-store/src/lib.rs | 156 ++++++++ 30 files changed, 1563 insertions(+), 3 deletions(-) create mode 100644 client/network/statement/Cargo.toml create mode 100644 client/network/statement/src/config.rs create mode 100644 client/network/statement/src/lib.rs create mode 100644 client/rpc-api/src/statement/error.rs create mode 100644 client/rpc-api/src/statement/mod.rs create mode 100644 client/rpc/src/statement/mod.rs create mode 100644 client/rpc/src/statement/tests.rs create mode 100644 client/statement-store/Cargo.toml create mode 100644 client/statement-store/README.md create mode 100644 client/statement-store/src/lib.rs create mode 100644 client/statement-store/src/metrics.rs create mode 100644 client/statement-store/src/store.rs create mode 100644 primitives/statement-store/Cargo.toml create mode 100644 primitives/statement-store/README.md create mode 100644 primitives/statement-store/src/lib.rs diff --git a/Cargo.lock b/Cargo.lock index 9a63e25b4878c..4ba77795de802 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4899,6 +4899,7 @@ dependencies = [ "sc-rpc", "sc-service", "sc-service-test", + "sc-statement-store", "sc-storage-monitor", "sc-sync-state-rpc", "sc-sysinfo", @@ -5063,6 +5064,7 @@ dependencies = [ "sc-rpc", "sc-rpc-api", "sc-service", + "sc-statement-store", "sc-telemetry", "sc-transaction-pool", "sc-transaction-pool-api", @@ -8280,6 +8282,7 @@ dependencies = [ "sp-database", "sp-runtime", "sp-state-machine", + "sp-statement-store", "sp-tracing", "sp-trie", "substrate-test-runtime-client", @@ -8919,6 +8922,25 @@ dependencies = [ "thiserror", ] +[[package]] +name = "sc-network-statement" +version = "0.10.0-dev" +dependencies = [ + "array-bytes", + "futures", + "libp2p", + "log", + "parity-scale-codec", + "pin-project", + "sc-network-common", + "sc-peerset", + "sc-utils", + "sp-consensus", + "sp-runtime", + "sp-statement-store", + "substrate-prometheus-endpoint", +] + [[package]] name = "sc-network-sync" version = "0.10.0-dev" @@ -9096,6 +9118,7 @@ dependencies = [ "sp-rpc", "sp-runtime", "sp-session", + "sp-statement-store", "sp-version", "substrate-test-runtime-client", "tokio", @@ -9205,12 +9228,14 @@ dependencies = [ "sc-network-bitswap", "sc-network-common", "sc-network-light", + "sc-network-statement", "sc-network-sync", "sc-network-transactions", "sc-offchain", "sc-rpc", "sc-rpc-server", "sc-rpc-spec-v2", + "sc-statement-store", "sc-storage-monitor", "sc-sysinfo", "sc-telemetry", @@ -9292,6 +9317,24 @@ dependencies = [ "sp-core", ] +[[package]] +name = "sc-statement-store" +version = "4.0.0-dev" +dependencies = [ + "async-trait", + "futures", + "futures-timer", + "log", + "parity-db", + "parity-scale-codec", + "parking_lot 0.12.1", + "sp-core", + "sp-statement-store", + "sp-tracing", + "substrate-prometheus-endpoint", + "substrate-test-runtime", +] + [[package]] name = "sc-storage-monitor" version = "0.1.0" @@ -10563,6 +10606,18 @@ dependencies = [ "trie-db", ] +[[package]] +name = "sp-statement-store" +version = "4.0.0-dev" +dependencies = [ + "parity-scale-codec", + "scale-info", + "sp-core", + "sp-runtime", + "sp-std", + "thiserror", +] + [[package]] name = "sp-std" version = "5.0.0" diff --git a/Cargo.toml b/Cargo.toml index de562ad79e47e..0aae82478aa14 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -46,6 +46,7 @@ members = [ "client/merkle-mountain-range/rpc", "client/network", "client/network/transactions", + "client/network/statement", "client/network-gossip", "client/network/bitswap", "client/network/common", @@ -63,6 +64,7 @@ members = [ "client/service", "client/service/test", "client/state-db", + "client/statement-store", "client/storage-monitor", "client/sysinfo", "client/sync-state-rpc", @@ -217,6 +219,7 @@ members = [ "primitives/session", "primitives/staking", "primitives/state-machine", + "primitives/statement-store", "primitives/std", "primitives/storage", "primitives/test-primitives", diff --git a/bin/node-template/node/Cargo.toml b/bin/node-template/node/Cargo.toml index 979c00120ddb7..1d0d8adf3dc27 100644 --- a/bin/node-template/node/Cargo.toml +++ b/bin/node-template/node/Cargo.toml @@ -28,6 +28,7 @@ sc-telemetry = { version = "4.0.0-dev", path = "../../../client/telemetry" } sc-keystore = { version = "4.0.0-dev", path = "../../../client/keystore" } sc-transaction-pool = { version = "4.0.0-dev", path = "../../../client/transaction-pool" } sc-transaction-pool-api = { version = "4.0.0-dev", path = "../../../client/transaction-pool/api" } +sc-statement-store = { version = "4.0.0-dev", path = "../../../client/statement-store" } sc-consensus-aura = { version = "0.10.0-dev", path = "../../../client/consensus/aura" } sp-consensus-aura = { version = "0.10.0-dev", path = "../../../primitives/consensus/aura" } sp-consensus = { version = "0.10.0-dev", path = "../../../primitives/consensus/common" } diff --git a/bin/node-template/node/src/service.rs b/bin/node-template/node/src/service.rs index 34e4e566d92fc..fc6ebc6214214 100644 --- a/bin/node-template/node/src/service.rs +++ b/bin/node-template/node/src/service.rs @@ -135,6 +135,8 @@ pub fn new_partial( compatibility_mode: Default::default(), })?; + let statement_store = sc_statement_store::Store::new(config.database.path().unwrap())?; + Ok(sc_service::PartialComponents { client, backend, @@ -143,6 +145,7 @@ pub fn new_partial( keystore_container, select_chain, transaction_pool, + statement_store, other: (grandpa_block_import, grandpa_link, telemetry), }) } @@ -164,6 +167,7 @@ pub fn new_full(mut config: Configuration) -> Result mut keystore_container, select_chain, transaction_pool, + statement_store, other: (block_import, grandpa_link, mut telemetry), } = new_partial(&config)?; @@ -192,11 +196,12 @@ pub fn new_full(mut config: Configuration) -> Result Vec::default(), )); - let (network, system_rpc_tx, tx_handler_controller, network_starter, sync_service) = + let (network, system_rpc_tx, tx_handler_controller, _statement_handler_controller, network_starter, sync_service) = sc_service::build_network(sc_service::BuildNetworkParams { config: &config, client: client.clone(), transaction_pool: transaction_pool.clone(), + statement_store: statement_store.clone(), spawn_handle: task_manager.spawn_handle(), import_queue, block_announce_validator_builder: None, @@ -236,6 +241,7 @@ pub fn new_full(mut config: Configuration) -> Result keystore: keystore_container.sync_keystore(), task_manager: &mut task_manager, transaction_pool: transaction_pool.clone(), + statement_store: statement_store.clone(), rpc_builder: rpc_extensions_builder, backend, system_rpc_tx, diff --git a/bin/node/cli/Cargo.toml b/bin/node/cli/Cargo.toml index 4451935c36035..46f48f286f017 100644 --- a/bin/node/cli/Cargo.toml +++ b/bin/node/cli/Cargo.toml @@ -66,6 +66,7 @@ sc-chain-spec = { version = "4.0.0-dev", path = "../../../client/chain-spec" } sc-consensus = { version = "0.10.0-dev", path = "../../../client/consensus/common" } sc-transaction-pool = { version = "4.0.0-dev", path = "../../../client/transaction-pool" } sc-transaction-pool-api = { version = "4.0.0-dev", path = "../../../client/transaction-pool/api" } +sc-statement-store = { version = "4.0.0-dev", path = "../../../client/statement-store" } sc-network = { version = "0.10.0-dev", path = "../../../client/network" } sc-network-common = { version = "0.10.0-dev", path = "../../../client/network/common" } sc-network-sync = { version = "0.10.0-dev", path = "../../../client/network/sync" } diff --git a/bin/node/cli/src/service.rs b/bin/node/cli/src/service.rs index e9a34b2a5c728..6c469efbe1384 100644 --- a/bin/node/cli/src/service.rs +++ b/bin/node/cli/src/service.rs @@ -284,6 +284,8 @@ pub fn new_partial( (rpc_extensions_builder, shared_voter_state2) }; + let statement_store = sc_statement_store::Store::new(config.database.path().unwrap())?; + Ok(sc_service::PartialComponents { client, backend, @@ -292,6 +294,7 @@ pub fn new_partial( select_chain, import_queue, transaction_pool, + statement_store, other: (rpc_extensions_builder, import_setup, rpc_setup, telemetry), }) } @@ -336,6 +339,7 @@ pub fn new_full_base( keystore_container, select_chain, transaction_pool, + statement_store, other: (rpc_builder, import_setup, rpc_setup, mut telemetry), } = new_partial(&config)?; @@ -356,11 +360,12 @@ pub fn new_full_base( Vec::default(), )); - let (network, system_rpc_tx, tx_handler_controller, network_starter, sync_service) = + let (network, system_rpc_tx, tx_handler_controller, _statement_handler_controller, network_starter, sync_service) = sc_service::build_network(sc_service::BuildNetworkParams { config: &config, client: client.clone(), transaction_pool: transaction_pool.clone(), + statement_store: statement_store.clone(), spawn_handle: task_manager.spawn_handle(), import_queue, block_announce_validator_builder: None, @@ -396,6 +401,7 @@ pub fn new_full_base( system_rpc_tx, tx_handler_controller, sync_service: sync_service.clone(), + statement_store, telemetry: telemetry.as_mut(), })?; diff --git a/client/db/Cargo.toml b/client/db/Cargo.toml index ed26f373733e4..5c87b49cb70d3 100644 --- a/client/db/Cargo.toml +++ b/client/db/Cargo.toml @@ -33,6 +33,7 @@ sp-core = { version = "7.0.0", path = "../../primitives/core" } sp-database = { version = "4.0.0-dev", path = "../../primitives/database" } sp-runtime = { version = "7.0.0", path = "../../primitives/runtime" } sp-state-machine = { version = "0.13.0", path = "../../primitives/state-machine" } +sp-statement-store = { version = "4.0.0-dev", path = "../../primitives/statement-store" } sp-trie = { version = "7.0.0", path = "../../primitives/trie" } [dev-dependencies] diff --git a/client/network/statement/Cargo.toml b/client/network/statement/Cargo.toml new file mode 100644 index 0000000000000..e5230a9de4ab9 --- /dev/null +++ b/client/network/statement/Cargo.toml @@ -0,0 +1,28 @@ +[package] +description = "Substrate statement protocol" +name = "sc-network-statement" +version = "0.10.0-dev" +license = "GPL-3.0-or-later WITH Classpath-exception-2.0" +authors = ["Parity Technologies "] +edition = "2021" +homepage = "https://substrate.io" +repository = "https://github.com/paritytech/substrate/" +documentation = "https://docs.rs/sc-network-statement" + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] + +[dependencies] +array-bytes = "4.1" +codec = { package = "parity-scale-codec", version = "3.2.2", features = ["derive"] } +futures = "0.3.21" +libp2p = "0.50.0" +log = "0.4.17" +pin-project = "1.0.12" +prometheus-endpoint = { package = "substrate-prometheus-endpoint", version = "0.10.0-dev", path = "../../../utils/prometheus" } +sc-network-common = { version = "0.10.0-dev", path = "../common" } +sc-peerset = { version = "4.0.0-dev", path = "../../peerset" } +sc-utils = { version = "4.0.0-dev", path = "../../utils" } +sp-runtime = { version = "7.0.0", path = "../../../primitives/runtime" } +sp-consensus = { version = "0.10.0-dev", path = "../../../primitives/consensus/common" } +sp-statement-store = { version = "4.0.0-dev", path = "../../../primitives/statement-store" } diff --git a/client/network/statement/src/config.rs b/client/network/statement/src/config.rs new file mode 100644 index 0000000000000..e05ed7e1e17c5 --- /dev/null +++ b/client/network/statement/src/config.rs @@ -0,0 +1,35 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +//! Configuration of the statement protocol + +use std::time; + +/// Interval at which we propagate statements; +pub(crate) const PROPAGATE_TIMEOUT: time::Duration = time::Duration::from_millis(2900); + +/// Maximum number of known statement hashes to keep for a peer. +/// +pub(crate) const MAX_KNOWN_STATEMENTS: usize = 10240; + +/// Maximum allowed size for a statement notification. +pub(crate) const MAX_STATEMENT_SIZE: u64 = 256 * 1024; + +/// Maximum number of statement validation request we keep at any moment. +pub(crate) const MAX_PENDING_STATEMENTS: usize = 8192; + diff --git a/client/network/statement/src/lib.rs b/client/network/statement/src/lib.rs new file mode 100644 index 0000000000000..a9a1d9d842df9 --- /dev/null +++ b/client/network/statement/src/lib.rs @@ -0,0 +1,488 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +//! Statement handling to plug on top of the network service. +//! +//! Usage: +//! +//! - Use [`StatementHandlerPrototype::new`] to create a prototype. +//! - Pass the return value of [`StatementHandlerPrototype::set_config`] to the network +//! configuration as an extra peers set. +//! - Use [`StatementHandlerPrototype::build`] then [`StatementHandler::run`] to obtain a +//! `Future` that processes statements. + +use crate::config::*; +use codec::{Decode, Encode}; +use futures::{prelude::*, stream::FuturesUnordered}; +use libp2p::{multiaddr, PeerId}; +use log::{debug, trace, warn}; +use prometheus_endpoint::{register, Counter, PrometheusError, Registry, U64}; +use sc_network_common::{ + config::{NonDefaultSetConfig, NonReservedPeerMode, ProtocolId, SetConfig}, + error, + protocol::{event::Event, role::ObservedRole, ProtocolName}, + service::{NetworkEventStream, NetworkNotification, NetworkPeers}, + sync::{SyncEvent, SyncEventStream}, + utils::{interval, LruHashSet}, +}; +use sp_statement_store::{Hash, Statement, StatementStore, SubmitResult}; +use sc_utils::mpsc::{tracing_unbounded, TracingUnboundedReceiver, TracingUnboundedSender}; +use std::{ + collections::{hash_map::Entry, HashMap}, + iter, + num::NonZeroUsize, + pin::Pin, + sync::Arc, + task::Poll, +}; + +pub mod config; + +/// A set of statements. +pub type Statements = Vec; +/// Future resolving to statement import result. +pub type StatementImportFuture = Pin + Send>>; + +mod rep { + use sc_peerset::ReputationChange as Rep; + /// Reputation change when a peer sends us any statement. + /// + /// This forces node to verify it, thus the negative value here. Once statement is verified, + /// reputation change should be refunded with `ANY_STATEMENT_REFUND` + pub const ANY_STATEMENT: Rep = Rep::new(-(1 << 4), "Any statement"); + /// Reputation change when a peer sends us any statement that is not invalid. + pub const ANY_STATEMENT_REFUND: Rep = Rep::new(1 << 4, "Any statement (refund)"); + /// Reputation change when a peer sends us an statement that we didn't know about. + pub const GOOD_STATEMENT: Rep = Rep::new(1 << 7, "Good statement"); + /// Reputation change when a peer sends us a bad statement. + pub const BAD_STATEMENT: Rep = Rep::new(-(1 << 12), "Bad statement"); +} + +struct Metrics { + propagated_statements: Counter, +} + +impl Metrics { + fn register(r: &Registry) -> Result { + Ok(Self { + propagated_statements: register( + Counter::new( + "substrate_sync_propagated_statements", + "Number of statements propagated to at least one peer", + )?, + r, + )?, + }) + } +} + +#[pin_project::pin_project] +struct PendingStatement { + #[pin] + validation: StatementImportFuture, + hash: Hash, +} + +impl Future for PendingStatement { + type Output = (Hash, SubmitResult); + + fn poll(self: Pin<&mut Self>, cx: &mut std::task::Context<'_>) -> Poll { + let mut this = self.project(); + + if let Poll::Ready(import_result) = Pin::new(&mut this.validation).poll_unpin(cx) { + return Poll::Ready((this.hash.clone(), import_result)) + } + + Poll::Pending + } +} + +/// Prototype for a [`StatementHandler`]. +pub struct StatementHandlerPrototype { + protocol_name: ProtocolName, + fallback_protocol_names: Vec, +} + +impl StatementHandlerPrototype { + /// Create a new instance. + pub fn new>( + protocol_id: ProtocolId, + genesis_hash: Hash, + fork_id: Option<&str>, + ) -> Self { + let genesis_hash = genesis_hash.as_ref(); + let protocol_name = if let Some(fork_id) = fork_id { + format!("/{}/{}/statement/1", array_bytes::bytes2hex("", genesis_hash), fork_id) + } else { + format!("/{}/statement/1", array_bytes::bytes2hex("", genesis_hash)) + }; + let legacy_protocol_name = format!("/{}/statement/1", protocol_id.as_ref()); + + Self { + protocol_name: protocol_name.into(), + fallback_protocol_names: iter::once(legacy_protocol_name.into()).collect(), + } + } + + /// Returns the configuration of the set to put in the network configuration. + pub fn set_config(&self) -> NonDefaultSetConfig { + NonDefaultSetConfig { + notifications_protocol: self.protocol_name.clone(), + fallback_names: self.fallback_protocol_names.clone(), + max_notification_size: MAX_STATEMENT_SIZE, + handshake: None, + set_config: SetConfig { + in_peers: 0, + out_peers: 0, + reserved_nodes: Vec::new(), + non_reserved_mode: NonReservedPeerMode::Deny, + }, + } + } + + /// Turns the prototype into the actual handler. Returns a controller that allows controlling + /// the behaviour of the handler while it's running. + /// + /// Important: the statements handler is initially disabled and doesn't gossip statements. + /// Gossiping is enabled when major syncing is done. + pub fn build< + N: NetworkPeers + NetworkEventStream + NetworkNotification, + S: SyncEventStream + sp_consensus::SyncOracle, + >( + self, + network: N, + sync: S, + statement_store: Arc, + metrics_registry: Option<&Registry>, + ) -> error::Result<(StatementHandler, StatementHandlerController)> { + let net_event_stream = network.event_stream("statement-handler-net"); + let sync_event_stream = sync.event_stream("statement-handler-sync"); + let (to_handler, from_controller) = tracing_unbounded("mpsc_statement_handler", 100_000); + + let handler = StatementHandler { + protocol_name: self.protocol_name, + propagate_timeout: (Box::pin(interval(PROPAGATE_TIMEOUT)) + as Pin + Send>>) + .fuse(), + pending_statements: FuturesUnordered::new(), + pending_statements_peers: HashMap::new(), + network, + sync, + net_event_stream: net_event_stream.fuse(), + sync_event_stream: sync_event_stream.fuse(), + peers: HashMap::new(), + statement_store, + from_controller, + metrics: if let Some(r) = metrics_registry { + Some(Metrics::register(r)?) + } else { + None + }, + }; + + let controller = StatementHandlerController { to_handler }; + + Ok((handler, controller)) + } +} + +/// Controls the behaviour of a [`StatementHandler`] it is connected to. +pub struct StatementHandlerController { + to_handler: TracingUnboundedSender, +} + +impl StatementHandlerController { + /// You may call this when new statements are imported by the statement store. + /// + /// All statements will be fetched from the `StatementStore` and propagated to peers. + pub fn propagate_statements(&self) { + let _ = self.to_handler.unbounded_send(ToHandler::PropagateStatements); + } + + /// You must call when new a statement is imported by the statement store. + /// + /// This statement will be fetched from the `StatementStore` and propagated to peers. + pub fn propagate_statement(&self, hash: Hash) { + let _ = self.to_handler.unbounded_send(ToHandler::PropagateStatement(hash)); + } +} + +enum ToHandler { + PropagateStatements, + PropagateStatement(Hash), +} + +/// Handler for statements. Call [`StatementHandler::run`] to start the processing. +pub struct StatementHandler< + N: NetworkPeers + NetworkEventStream + NetworkNotification, + S: SyncEventStream + sp_consensus::SyncOracle, +> { + protocol_name: ProtocolName, + /// Interval at which we call `propagate_statements`. + propagate_timeout: stream::Fuse + Send>>>, + /// Pending statements verification tasks. + pending_statements: FuturesUnordered, + /// As multiple peers can send us the same statement, we group + /// these peers using the statement hash while the statement is + /// imported. This prevents that we import the same statement + /// multiple times concurrently. + pending_statements_peers: HashMap>, + /// Network service to use to send messages and manage peers. + network: N, + /// Syncing service. + sync: S, + /// Stream of networking events. + net_event_stream: stream::Fuse + Send>>>, + /// Receiver for syncing-related events. + sync_event_stream: stream::Fuse + Send>>>, + // All connected peers + peers: HashMap, + statement_store: Arc, + from_controller: TracingUnboundedReceiver, + /// Prometheus metrics. + metrics: Option, +} + +/// Peer information +#[derive(Debug)] +struct Peer { + /// Holds a set of statements known to this peer. + known_statements: LruHashSet, + role: ObservedRole, +} + +impl StatementHandler +where + N: NetworkPeers + NetworkEventStream + NetworkNotification, + S: SyncEventStream + sp_consensus::SyncOracle, +{ + /// Turns the [`StatementHandler`] into a future that should run forever and not be + /// interrupted. + pub async fn run(mut self) { + loop { + futures::select! { + _ = self.propagate_timeout.next() => { + self.propagate_statements(); + }, + (hash, result) = self.pending_statements.select_next_some() => { + if let Some(peers) = self.pending_statements_peers.remove(&hash) { + peers.into_iter().for_each(|p| self.on_handle_statement_import(p, &result)); + } else { + warn!(target: "sub-libp2p", "Inconsistent state, no peers for pending statement!"); + } + }, + network_event = self.net_event_stream.next() => { + if let Some(network_event) = network_event { + self.handle_network_event(network_event).await; + } else { + // Networking has seemingly closed. Closing as well. + return; + } + }, + sync_event = self.sync_event_stream.next() => { + if let Some(sync_event) = sync_event { + self.handle_sync_event(sync_event); + } else { + // Syncing has seemingly closed. Closing as well. + return; + } + } + message = self.from_controller.select_next_some() => { + match message { + ToHandler::PropagateStatement(hash) => self.propagate_statement(&hash), + ToHandler::PropagateStatements => self.propagate_statements(), + } + }, + } + } + } + + fn handle_sync_event(&mut self, event: SyncEvent) { + match event { + SyncEvent::PeerConnected(remote) => { + let addr = iter::once(multiaddr::Protocol::P2p(remote.into())) + .collect::(); + let result = self.network.add_peers_to_reserved_set( + self.protocol_name.clone(), + iter::once(addr).collect(), + ); + if let Err(err) = result { + log::error!(target: "sync", "Add reserved peer failed: {}", err); + } + }, + SyncEvent::PeerDisconnected(remote) => { + self.network.remove_peers_from_reserved_set( + self.protocol_name.clone(), + iter::once(remote).collect(), + ); + }, + } + } + + async fn handle_network_event(&mut self, event: Event) { + match event { + Event::Dht(_) => {}, + Event::NotificationStreamOpened { remote, protocol, role, .. } + if protocol == self.protocol_name => + { + let _was_in = self.peers.insert( + remote, + Peer { + known_statements: LruHashSet::new( + NonZeroUsize::new(MAX_KNOWN_STATEMENTS).expect("Constant is nonzero"), + ), + role, + }, + ); + debug_assert!(_was_in.is_none()); + }, + Event::NotificationStreamClosed { remote, protocol } + if protocol == self.protocol_name => + { + let _peer = self.peers.remove(&remote); + debug_assert!(_peer.is_some()); + }, + + Event::NotificationsReceived { remote, messages } => { + for (protocol, message) in messages { + if protocol != self.protocol_name { + continue + } + // Accept statements only when node is not major syncing + if self.sync.is_major_syncing() { + trace!(target: "sync", "{remote}: Ignoring statements while major syncing"); + continue + } + if let Ok(statements) = + ::decode(&mut message.as_ref()) + { + self.on_statements(remote, statements); + } else { + debug!(target: "sub-libp2p", "Failed to decode statement list from {remote}"); + } + } + }, + + // Not our concern. + Event::NotificationStreamOpened { .. } | Event::NotificationStreamClosed { .. } => {}, + } + } + + /// Called when peer sends us new statements + fn on_statements(&mut self, who: PeerId, statements: Statements) { + trace!(target: "sync", "Received {} statements from {}", statements.len(), who); + if let Some(ref mut peer) = self.peers.get_mut(&who) { + for s in statements { + if self.pending_statements.len() > MAX_PENDING_STATEMENTS { + debug!( + target: "sync", + "Ignoring any further statements that exceed `MAX_PENDING_STATEMENTS`({}) limit", + MAX_PENDING_STATEMENTS, + ); + break + } + + let hash = s.hash(); + peer.known_statements.insert(hash.clone()); + + self.network.report_peer(who, rep::ANY_STATEMENT); + + match self.pending_statements_peers.entry(hash.clone()) { + Entry::Vacant(entry) => { + self.pending_statements.push(PendingStatement { + validation: self.statement_store.submit_async(s), + hash, + }); + entry.insert(vec![who]); + }, + Entry::Occupied(mut entry) => { + entry.get_mut().push(who); + }, + } + } + } + } + + fn on_handle_statement_import(&mut self, who: PeerId, import: &SubmitResult) { + match import { + SubmitResult::OkNew(_) => + self.network.report_peer(who, rep::ANY_STATEMENT_REFUND), + SubmitResult::OkKnown(_) => self.network.report_peer(who, rep::GOOD_STATEMENT), + SubmitResult::Bad(_) => self.network.report_peer(who, rep::BAD_STATEMENT), + SubmitResult::InternalError(_) => {}, + } + } + + /// Propagate one statement. + pub fn propagate_statement(&mut self, hash: &Hash) { + // Accept statements only when node is not major syncing + if self.sync.is_major_syncing() { + return + } + + debug!(target: "sync", "Propagating statement [{:?}]", hash); + if let Ok(Some(statement)) = self.statement_store.statement(hash) { + self.do_propagate_statements(&[(hash.clone(), statement)]); + } + } + + fn do_propagate_statements( + &mut self, + statements: &[(Hash, Statement)], + ) { + let mut propagated_statements = 0; + + for (who, peer) in self.peers.iter_mut() { + // never send statements to the light node + if matches!(peer.role, ObservedRole::Light) { + continue + } + + let (hashes, to_send): (Vec<_>, Vec<_>) = statements + .iter() + .filter(|&(ref hash, _)| peer.known_statements.insert(hash.clone())) + .cloned() + .unzip(); + + propagated_statements += hashes.len(); + + if !to_send.is_empty() { + trace!(target: "sync", "Sending {} statements to {}", to_send.len(), who); + self.network + .write_notification(*who, self.protocol_name.clone(), to_send.encode()); + } + } + + if let Some(ref metrics) = self.metrics { + metrics.propagated_statements.inc_by(propagated_statements as _) + } + } + + /// Call when we must propagate ready statements to peers. + fn propagate_statements(&mut self) { + // Send out statements only when node is not major syncing + if self.sync.is_major_syncing() { + return + } + + debug!(target: "sync", "Propagating statements"); + if let Ok(statements) = self.statement_store.dump() { + self.do_propagate_statements(&statements); + } + } +} diff --git a/client/rpc-api/src/lib.rs b/client/rpc-api/src/lib.rs index bc76d029ab6bb..83054584370a1 100644 --- a/client/rpc-api/src/lib.rs +++ b/client/rpc-api/src/lib.rs @@ -32,4 +32,5 @@ pub mod child_state; pub mod dev; pub mod offchain; pub mod state; +pub mod statement; pub mod system; diff --git a/client/rpc-api/src/statement/error.rs b/client/rpc-api/src/statement/error.rs new file mode 100644 index 0000000000000..549b147115fb2 --- /dev/null +++ b/client/rpc-api/src/statement/error.rs @@ -0,0 +1,55 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +//! Statement RPC errors. + +use jsonrpsee::{ + core::Error as JsonRpseeError, + types::error::{CallError, ErrorObject}, +}; + +/// Statement RPC Result type. +pub type Result = std::result::Result; + +/// Statement RPC errors. +#[derive(Debug, thiserror::Error)] +pub enum Error { + /// Statement store internal error. + #[error("Statement store error")] + StatementStore(String), + /// Call to an unsafe RPC was denied. + #[error(transparent)] + UnsafeRpcCalled(#[from] crate::policy::UnsafeRpcError), +} + +/// Base error code for all statement errors. +const BASE_ERROR: i32 = 6000; + +impl From for JsonRpseeError { + fn from(e: Error) -> Self { + match e { + Error::StatementStore(message) => CallError::Custom(ErrorObject::owned( + BASE_ERROR + 1, + format!("Statement store error: {message}"), + None::<()>, + )) + .into(), + Error::UnsafeRpcCalled(e) => e.into(), + } + } +} diff --git a/client/rpc-api/src/statement/mod.rs b/client/rpc-api/src/statement/mod.rs new file mode 100644 index 0000000000000..0231e30db3a61 --- /dev/null +++ b/client/rpc-api/src/statement/mod.rs @@ -0,0 +1,48 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +//! Substrate offchain API. + +use jsonrpsee::{core::RpcResult, proc_macros::rpc}; +use sp_core::Bytes; + +pub mod error; + +/// Substrate statement RPC API +#[rpc(client, server)] +pub trait StatementApi { + /// Return all statements, SCALE-encoded. + #[method(name = "statement_dump")] + fn dump(&self, ) -> RpcResult>; + + /// Return the data of all known statements which include all topics and have no `DecryptionKey` field. + #[method(name = "statement_broadcasts")] + fn broadcasts(&self, match_all_topics: Vec<[u8; 32]>) -> RpcResult>; + + /// Return the data of all known statements whose decryption key is identified as `dest` (this will generally be the public key or a hash thereof for symmetric ciphers, or a hash of the private key for symmetric ciphers). + #[method(name = "statement_posted")] + fn posted(&self, match_all_topics: Vec<[u8; 32]>, dest: [u8; 32]) -> RpcResult>; + + /// Return the decrypted data of all known statements whose decryption key is identified as `dest`. The key must be available to the client. + #[method(name = "statement_postedClear")] + fn posted_clear(&self, match_all_topics: Vec<[u8; 32]>, dest: [u8; 32]) -> RpcResult>; + + /// Submit a pre-encoded statement. + #[method(name = "statement_submit")] + fn submit(&self, encoded: Bytes) -> RpcResult<()>; +} diff --git a/client/rpc/Cargo.toml b/client/rpc/Cargo.toml index a22f657878812..5a6e3e1083923 100644 --- a/client/rpc/Cargo.toml +++ b/client/rpc/Cargo.toml @@ -35,6 +35,7 @@ sp-rpc = { version = "6.0.0", path = "../../primitives/rpc" } sp-runtime = { version = "7.0.0", path = "../../primitives/runtime" } sp-session = { version = "4.0.0-dev", path = "../../primitives/session" } sp-version = { version = "5.0.0", path = "../../primitives/version" } +sp-statement-store = { version = "4.0.0-dev", path = "../../primitives/statement-store" } tokio = "1.22.0" diff --git a/client/rpc/src/lib.rs b/client/rpc/src/lib.rs index 6230ef6648e20..475fc77a9b5bd 100644 --- a/client/rpc/src/lib.rs +++ b/client/rpc/src/lib.rs @@ -36,6 +36,7 @@ pub mod chain; pub mod dev; pub mod offchain; pub mod state; +pub mod statement; pub mod system; #[cfg(any(test, feature = "test-helpers"))] diff --git a/client/rpc/src/statement/mod.rs b/client/rpc/src/statement/mod.rs new file mode 100644 index 0000000000000..571ccf2a7dfc2 --- /dev/null +++ b/client/rpc/src/statement/mod.rs @@ -0,0 +1,91 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +//! Substrate statement store API. + +#[cfg(test)] +mod tests; + +//use self::error::Error; +use jsonrpsee::core::{async_trait, RpcResult}; +/// Re-export the API for backward compatibility. +pub use sc_rpc_api::statement::{error::Error, StatementApiServer}; +use sc_rpc_api::DenyUnsafe; +use sp_core::Bytes; +use sp_statement_store::SubmitResult; +//use sp_statement_store::StatementStore; +use std::sync::Arc; + +/// Statement store API +pub struct StatementStore { + store: Arc, + deny_unsafe: DenyUnsafe, +} + +impl StatementStore { + /// Create new instance of Offchain API. + pub fn new(store: Arc, deny_unsafe: DenyUnsafe) -> Self { + StatementStore { store, deny_unsafe } + } +} + +#[async_trait] +impl StatementApiServer for StatementStore { + fn dump(&self, ) -> RpcResult> { + self.deny_unsafe.check_if_safe()?; + + let statements = self.store.dump_encoded(). + map_err(|e| Error::StatementStore(e.to_string()))?; + Ok(statements.into_iter().map(|(_, s)| s.into()).collect()) + } + + fn broadcasts(&self, match_all_topics: Vec<[u8; 32]>) -> RpcResult> { + Ok(self.store.broadcasts(&match_all_topics) + .map_err(|e| Error::StatementStore(e.to_string()))? + .into_iter() + .map(Into::into) + .collect() + ) + } + + fn posted(&self, match_all_topics: Vec<[u8; 32]>, dest: [u8; 32]) -> RpcResult> { + Ok(self.store.posted(&match_all_topics, dest) + .map_err(|e| Error::StatementStore(e.to_string()))? + .into_iter() + .map(Into::into) + .collect() + ) + } + + fn posted_clear(&self, match_all_topics: Vec<[u8; 32]>, dest: [u8; 32]) -> RpcResult> { + Ok(self.store.posted_clear(&match_all_topics, dest) + .map_err(|e| Error::StatementStore(e.to_string()))? + .into_iter() + .map(Into::into) + .collect() + ) + } + + fn submit(&self, encoded: Bytes) -> RpcResult<()> { + match self.store.submit_encoded(&encoded) { + SubmitResult::OkNew(_) | SubmitResult::OkKnown(_) => Ok(()), + SubmitResult::Bad(e) => Err(Error::StatementStore(e).into()), + SubmitResult::InternalError(e) => Err(Error::StatementStore(e.to_string()).into()), + } + } +} diff --git a/client/rpc/src/statement/tests.rs b/client/rpc/src/statement/tests.rs new file mode 100644 index 0000000000000..ec691e40e8a8c --- /dev/null +++ b/client/rpc/src/statement/tests.rs @@ -0,0 +1,19 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + + diff --git a/client/service/Cargo.toml b/client/service/Cargo.toml index b4ce3bbbb7f1c..4739dbead3248 100644 --- a/client/service/Cargo.toml +++ b/client/service/Cargo.toml @@ -53,12 +53,14 @@ sc-network-common = { version = "0.10.0-dev", path = "../network/common" } sc-network-light = { version = "0.10.0-dev", path = "../network/light" } sc-network-sync = { version = "0.10.0-dev", path = "../network/sync" } sc-network-transactions = { version = "0.10.0-dev", path = "../network/transactions" } +sc-network-statement = { version = "0.10.0-dev", path = "../network/statement" } sc-chain-spec = { version = "4.0.0-dev", path = "../chain-spec" } sc-client-api = { version = "4.0.0-dev", path = "../api" } sp-api = { version = "4.0.0-dev", path = "../../primitives/api" } sc-client-db = { version = "0.10.0-dev", default-features = false, path = "../db" } codec = { package = "parity-scale-codec", version = "3.2.2" } sc-executor = { version = "0.10.0-dev", path = "../executor" } +sc-statement-store = { version = "4.0.0-dev", path = "../statement-store" } sc-transaction-pool = { version = "4.0.0-dev", path = "../transaction-pool" } sp-transaction-pool = { version = "4.0.0-dev", path = "../../primitives/transaction-pool" } sc-transaction-pool-api = { version = "4.0.0-dev", path = "../transaction-pool/api" } diff --git a/client/service/src/builder.rs b/client/service/src/builder.rs index ea4b630003123..6e47f8bd757ab 100644 --- a/client/service/src/builder.rs +++ b/client/service/src/builder.rs @@ -57,12 +57,14 @@ use sc_rpc::{ chain::ChainApiServer, offchain::OffchainApiServer, state::{ChildStateApiServer, StateApiServer}, + statement::StatementApiServer, system::SystemApiServer, DenyUnsafe, SubscriptionTaskExecutor, }; use sc_rpc_spec_v2::{chain_head::ChainHeadApiServer, transaction::TransactionApiServer}; use sc_telemetry::{telemetry, ConnectionMessage, Telemetry, TelemetryHandle, SUBSTRATE_INFO}; use sc_transaction_pool_api::MaintainedTransactionPool; +use sc_statement_store::Store as StatementStore; use sc_utils::mpsc::{tracing_unbounded, TracingUnboundedSender}; use sp_api::{CallApiAt, ProvideRuntimeApi}; use sp_blockchain::{HeaderBackend, HeaderMetadata}; @@ -380,6 +382,8 @@ pub struct SpawnTasksParams<'a, TBl: BlockT, TCl, TExPool, TRpc, Backend> { pub keystore: SyncCryptoStorePtr, /// A shared transaction pool. pub transaction_pool: Arc, + /// Shared statement store. + pub statement_store: Arc, /// Builds additional [`RpcModule`]s that should be added to the server pub rpc_builder: Box Result, Error>>, @@ -465,6 +469,7 @@ where backend, keystore, transaction_pool, + statement_store, rpc_builder, network, system_rpc_tx, @@ -512,6 +517,13 @@ where ), ); + // Inform the statement store about finalized blocks. + spawn_handle.spawn( + "statement-store-notifications", + Some("statement-store"), + statement_store_notifications(client.clone(), statement_store.clone()), + ); + // Prometheus metrics. let metrics_service = if let Some(PrometheusConfig { port, registry }) = config.prometheus_config.clone() { @@ -549,6 +561,7 @@ where task_manager.spawn_handle(), client.clone(), transaction_pool.clone(), + Some(statement_store.clone()), keystore.clone(), system_rpc_tx.clone(), &config, @@ -605,6 +618,17 @@ async fn transaction_notifications( .await; } +async fn statement_store_notifications(client: Arc, store: Arc) +where + Block: BlockT, + Client: sc_client_api::BlockchainEvents, +{ + let finality_stream = client.finality_notification_stream().fuse(); + finality_stream + .for_each(|_evt| store.maintain()) + .await +} + fn init_telemetry( config: &mut Configuration, network: Network, @@ -648,6 +672,7 @@ fn gen_rpc_module( spawn_handle: SpawnTaskHandle, client: Arc, transaction_pool: Arc, + statement_store: Option>, keystore: SyncCryptoStorePtr, system_rpc_tx: TracingUnboundedSender>, config: &Configuration, @@ -735,6 +760,11 @@ where rpc_api.merge(offchain).map_err(|e| Error::Application(e.into()))?; } + if let Some(store) = statement_store { + let store = sc_rpc::statement::StatementStore::new(store, deny_unsafe).into_rpc(); + + rpc_api.merge(store).map_err(|e| Error::Application(e.into()))?; + } // Part of the RPC v2 spec. rpc_api.merge(transaction_v2).map_err(|e| Error::Application(e.into()))?; @@ -761,6 +791,8 @@ pub struct BuildNetworkParams<'a, TBl: BlockT, TExPool, TImpQu, TCl> { pub client: Arc, /// A shared transaction pool. pub transaction_pool: Arc, + /// A shared statement store. + pub statement_store: Arc, /// A handle for spawning tasks. pub spawn_handle: SpawnTaskHandle, /// An import queue. @@ -779,6 +811,7 @@ pub fn build_network( Arc::Hash>>, TracingUnboundedSender>, sc_network_transactions::TransactionsHandlerController<::Hash>, + sc_network_statement::StatementHandlerController, NetworkStarter, Arc>, ), @@ -802,6 +835,7 @@ where config, client, transaction_pool, + statement_store, spawn_handle, import_queue, block_announce_validator_builder, @@ -951,6 +985,21 @@ where .extra_sets .insert(0, transactions_handler_proto.set_config()); + // crate statment protocol and add it to the list of supported protocols of `network_params` + let statement_handler_proto = sc_network_statement::StatementHandlerPrototype::new( + protocol_id.clone(), + client + .block_hash(0u32.into()) + .ok() + .flatten() + .expect("Genesis block exists; qed"), + config.chain_spec.fork_id(), + ); + network_params + .network_config + .extra_sets + .insert(0, statement_handler_proto.set_config()); + let has_bootnodes = !network_params.network_config.boot_nodes.is_empty(); let network_mut = sc_network::NetworkWorker::new(network_params)?; let network = network_mut.service().clone(); @@ -961,8 +1010,17 @@ where Arc::new(TransactionPoolAdapter { pool: transaction_pool, client: client.clone() }), config.prometheus_config.as_ref().map(|config| &config.registry), )?; - spawn_handle.spawn("network-transactions-handler", Some("networking"), tx_handler.run()); + + // crate statement goissip protocol and add it to the list of supported protocols of `network_params` + let (statement_handler, statement_handler_controller) = statement_handler_proto.build( + network.clone(), + sync_service.clone(), + statement_store.clone(), + config.prometheus_config.as_ref().map(|config| &config.registry), + )?; + spawn_handle.spawn("network-statement-handler", Some("networking"), statement_handler.run()); + spawn_handle.spawn( "chain-sync-network-service-provider", Some("networking"), @@ -1030,6 +1088,7 @@ where network, system_rpc_tx, tx_handler_controller, + statement_handler_controller, NetworkStarter(network_start_tx), sync_service.clone(), )) diff --git a/client/service/src/error.rs b/client/service/src/error.rs index 6b71e46b4e0ef..f454a14f689de 100644 --- a/client/service/src/error.rs +++ b/client/service/src/error.rs @@ -60,6 +60,9 @@ pub enum Error { #[error("Application")] Application(#[from] Box), + #[error(transparent)] + StatementStore(#[from] sc_statement_store::Error), + #[error("Other: {0}")] Other(String), } diff --git a/client/service/src/lib.rs b/client/service/src/lib.rs index 9dab81a5b4ed9..88eadd92b60ff 100644 --- a/client/service/src/lib.rs +++ b/client/service/src/lib.rs @@ -90,6 +90,7 @@ pub use sc_rpc::{ pub use sc_tracing::TracingReceiver; pub use sc_transaction_pool::Options as TransactionPoolOptions; pub use sc_transaction_pool_api::{error::IntoPoolError, InPoolTransaction, TransactionPool}; +pub use sc_statement_store::Store as StatementStore; #[doc(hidden)] pub use std::{ops::Deref, result::Result, sync::Arc}; pub use task_manager::{SpawnTaskHandle, Task, TaskManager, TaskRegistry, DEFAULT_GROUP_NAME}; @@ -142,6 +143,8 @@ pub struct PartialComponents, + /// A shared statement store. + pub statement_store: Arc, /// Everything else that needs to be passed into the main build function. pub other: Other, } diff --git a/client/statement-store/Cargo.toml b/client/statement-store/Cargo.toml new file mode 100644 index 0000000000000..7996a694eb93b --- /dev/null +++ b/client/statement-store/Cargo.toml @@ -0,0 +1,42 @@ +[package] +name = "sc-statement-store" +version = "4.0.0-dev" +authors = ["Parity Technologies "] +edition = "2021" +license = "GPL-3.0-or-later WITH Classpath-exception-2.0" +homepage = "https://substrate.io" +repository = "https://github.com/paritytech/substrate/" +description = "Substrate statement store." +readme = "README.md" + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] + +[dependencies] +async-trait = "0.1.57" +codec = { package = "parity-scale-codec", version = "3.2.2" } +futures = "0.3.21" +futures-timer = "3.0.2" +log = "0.4.17" +parking_lot = "0.12.1" +parity-db = "0.4.3" +sp-statement-store = { version = "4.0.0-dev", path = "../../primitives/statement-store" } +prometheus-endpoint = { package = "substrate-prometheus-endpoint", version = "0.10.0-dev", path = "../../utils/prometheus" } +#sc-utils = { version = "4.0.0-dev", path = "../utils" } +#sp-api = { version = "4.0.0-dev", path = "../../primitives/api" } +#sp-blockchain = { version = "4.0.0-dev", path = "../../primitives/blockchain" } +sp-core = { version = "7.0.0", path = "../../primitives/core" } +#sp-runtime = { version = "7.0.0", path = "../../primitives/runtime" } +sp-tracing = { version = "6.0.0", path = "../../primitives/tracing" } +#sp-transaction-pool = { version = "4.0.0-dev", path = "../../primitives/transaction-pool" } + +[dev-dependencies] +#array-bytes = "4.1" +#assert_matches = "1.3.0" +#criterion = "0.4.0" +#sc-block-builder = { version = "0.10.0-dev", path = "../block-builder" } +#sp-consensus = { version = "0.10.0-dev", path = "../../primitives/consensus/common" } +substrate-test-runtime = { version = "2.0.0", path = "../../test-utils/runtime" } +#substrate-test-runtime-client = { version = "2.0.0", path = "../../test-utils/runtime/client" } +#substrate-test-runtime-transaction-pool = { version = "2.0.0", path = "../../test-utils/runtime/transaction-pool" } + diff --git a/client/statement-store/README.md b/client/statement-store/README.md new file mode 100644 index 0000000000000..54173e2c2616c --- /dev/null +++ b/client/statement-store/README.md @@ -0,0 +1,4 @@ +Substrate statemen store implementation. + +License: GPL-3.0-or-later WITH Classpath-exception-2.0 + diff --git a/client/statement-store/src/lib.rs b/client/statement-store/src/lib.rs new file mode 100644 index 0000000000000..93e9bdc07559f --- /dev/null +++ b/client/statement-store/src/lib.rs @@ -0,0 +1,42 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +//! Substrate transaction pool implementation. + +#![recursion_limit = "256"] +#![warn(missing_docs)] +#![warn(unused_extern_crates)] + +mod store; +mod metrics; + +pub use store::Store; +pub use sp_statement_store::{StatementStore, Error}; + +/* +/// Inform the transaction pool about imported and finalized blocks. +pub async fn notification_future(client: Arc, store: Arc) +where + Client: sc_client_api::BlockchainEvents, +{ + let finality_stream = client.finality_notification_stream().map(Into::into).fuse(); + finality_stream + .for_each(|_evt| pool.maintain()) + .await +} +*/ diff --git a/client/statement-store/src/metrics.rs b/client/statement-store/src/metrics.rs new file mode 100644 index 0000000000000..17756ae8a7282 --- /dev/null +++ b/client/statement-store/src/metrics.rs @@ -0,0 +1,121 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +//! Statement store Prometheus metrics. + +use std::sync::Arc; + +use prometheus_endpoint::{register, Counter, PrometheusError, Registry, U64}; + +#[derive(Clone, Default)] +pub struct MetricsLink(Arc>); + +impl MetricsLink { + pub fn new(registry: Option<&Registry>) -> Self { + Self(Arc::new(registry.and_then(|registry| { + Metrics::register(registry) + .map_err(|err| { + log::warn!("Failed to register prometheus metrics: {}", err); + }) + .ok() + }))) + } + + pub fn report(&self, do_this: impl FnOnce(&Metrics)) { + if let Some(metrics) = self.0.as_ref() { + do_this(metrics); + } + } +} + +/// Statement store Prometheus metrics. +pub struct Metrics { + pub submitted_statements: Counter, + pub validations_invalid: Counter, + pub statements_pruned: Counter, +} + +impl Metrics { + pub fn register(registry: &Registry) -> Result { + Ok(Self { + submitted_statements: register( + Counter::new( + "substrate_sub_statement_store_submitted_statements", + "Total number of statements submitted", + )?, + registry, + )?, + validations_invalid: register( + Counter::new( + "substrate_sub_statement_store_validations_invalid", + "Total number of statements that were removed from the pool as invalid", + )?, + registry, + )?, + statements_pruned: register( + Counter::new( + "substrate_sub_statement_store_block_statements", + "Total number of statements that was requested to be pruned by block events", + )?, + registry, + )?, + }) + } +} + +/// Statement store api Prometheus metrics. +pub struct ApiMetrics { + pub validations_scheduled: Counter, + pub validations_finished: Counter, +} + +impl ApiMetrics { + /// Register the metrics at the given Prometheus registry. + pub fn register(registry: &Registry) -> Result { + Ok(Self { + validations_scheduled: register( + Counter::new( + "substrate_sub_statement_store_validations_scheduled", + "Total number of statements scheduled for validation", + )?, + registry, + )?, + validations_finished: register( + Counter::new( + "substrate_sub_statement_store_validations_finished", + "Total number of statements that finished validation", + )?, + registry, + )?, + }) + } +} + +/// An extension trait for [`ApiMetrics`]. +pub trait ApiMetricsExt { + /// Report an event to the metrics. + fn report(&self, report: impl FnOnce(&ApiMetrics)); +} + +impl ApiMetricsExt for Option> { + fn report(&self, report: impl FnOnce(&ApiMetrics)) { + if let Some(metrics) = self.as_ref() { + report(metrics) + } + } +} diff --git a/client/statement-store/src/store.rs b/client/statement-store/src/store.rs new file mode 100644 index 0000000000000..3f262d9afa6db --- /dev/null +++ b/client/statement-store/src/store.rs @@ -0,0 +1,250 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +//! Disk-backed statement store. + +use std::{collections::{HashSet, HashMap}, sync::Arc, future::Future}; + +use parking_lot::RwLock; +use sp_statement_store::{Statement, Topic, DecryptionKey, Result, Error, Hash, SubmitResult}; +use sp_core::{Encode, Decode}; + +const KEY_VERSION: &[u8] = b"version".as_slice(); +const CURRENT_VERSION: u32 = 1; + +const LOG_TARGET: &str = "statement"; + +mod col { + pub const META: u8 = 0; + pub const STATEMENTS: u8 = 1; + pub const COUNT: u8 = 2; +} + +#[derive(Default)] +struct Index { + by_topic: HashMap>, + by_dec_key: HashMap>, + extended_topics: HashMap, +} + +/// Statement store. +pub struct Store { + db: parity_db::Db, + index: RwLock, +} + +impl Index { + fn insert(&mut self, hash: Hash, statement: Statement) { + let mut ext_topics = [Topic::default(); 3]; + let mut nt = 0; + while let Some(t) = statement.topic(nt) { + if nt == 0 { + self.by_topic.entry(t).or_default().insert(hash); + } else { + ext_topics[nt - 1] = t; + } + nt += 1; + } + self.extended_topics.insert(hash, ext_topics); + if let Some(key) = statement.decryption_key() { + self.by_dec_key.entry(key).or_default().insert(hash); + } + + } + + fn iter_topics(&self, key: Option, topics: &[Topic], mut f: impl FnMut(&Hash) -> Result<()>) -> Result<()> { + let mut sets: [Option<&HashSet>; 4] = Default::default(); + let mut num_sets = 0; + for t in topics { + sets[num_sets] = self.by_topic.get(t); + if sets[num_sets].is_some() { + num_sets += 1; + } + } + if num_sets == 0 && key.is_none() { + return Ok(()); + } + sets[0..num_sets].sort_by_key(|s| s.map_or(0, HashSet::len)); + if let Some(key) = key { + let key_set = if let Some(set) = self.by_dec_key.get(&key) { set } else { return Ok(()) }; + for item in key_set { + if sets.iter().all(|set| set.unwrap().contains(item)) { + f(item)? + } + } + } else { + for item in sets[0].unwrap() { + if sets[1 .. num_sets].iter().all(|set| set.unwrap().contains(item)) { + f(item)? + } + } + } + Ok(()) + } +} + +impl Store { + /// Create a new shared store instance. There should only be one per process. + pub fn new(path: &std::path::Path) -> Result> { + let mut path: std::path::PathBuf = path.into(); + path.pop(); + path.push("statement"); + + let mut config = parity_db::Options::with_columns(&path, col::COUNT); + + let mut statement_col = &mut config.columns[col::STATEMENTS as usize]; + statement_col.ref_counted = false; + statement_col.preimage = true; + statement_col.uniform = true; + let db = parity_db::Db::open_or_create(&config).map_err(|e| Error::Db(e.to_string()))?; + match db.get(col::META, &KEY_VERSION).map_err(|e| Error::Db(e.to_string()))? { + Some(version) => { + let version = u32::from_le_bytes(version.try_into() + .map_err(|_| Error::Db("Error reading database version".into()))?); + if version != CURRENT_VERSION { + return Err(Error::Db(format!("Unsupported database version: {version}"))); + } + }, + None => { + db.commit( + [(col::META, KEY_VERSION.to_vec(), Some(CURRENT_VERSION.to_le_bytes().to_vec()))] + ).map_err(|e| Error::Db(e.to_string()))?; + } + } + + let mut index = Index::default(); + db.iter_column_while(col::STATEMENTS, |item| { + let statement = item.value; + let hash = sp_statement_store::hash_encoded(&statement); + if let Ok(statement) = Statement::decode(&mut statement.as_slice()) { + index.insert(hash, statement); + } + true + }).map_err(|e| Error::Db(e.to_string()))?; + + Ok(Arc::new(Store { db, index: RwLock::new(index) })) + } + + fn collect_statements(&self, key: Option, match_all_topics: &[Topic], mut f: impl FnMut(Statement) -> Option ) -> Result> { + let mut result = Vec::new(); + let index = self.index.read(); + index.iter_topics(key, match_all_topics, |hash| { + match self.db.get(col::STATEMENTS, hash).map_err(|e| Error::Db(e.to_string()))? { + Some(statement) => { + if let Ok(statement) = Statement::decode(&mut statement.as_slice()) { + if let Some(data) = f(statement) { + result.push(data); + } + } else { + // DB inconsistency + log::warn!(target: LOG_TARGET, "Corrupt statement {:?}", hash); + } + + } + None => { + // DB inconsistency + log::warn!(target: LOG_TARGET, "Missing statement {:?}", hash); + } + } + Ok(()) + })?; + Ok(result) + } + + /// Perform periodic store maintenance + pub async fn maintain(&self) { + } +} + +impl sp_statement_store::StatementStore for Store { + fn dump_encoded(&self) -> Result)>> { + let mut result = Vec::new(); + self.db.iter_column_while(col::STATEMENTS, |item| { + result.push((sp_statement_store::hash_encoded(&item.value), item.value)); + true + }).map_err(|e| Error::Db(e.to_string()))?; + Ok(result) + } + + /// Return all statements. + fn dump(&self) -> Result> { + let mut result = Vec::new(); + self.db.iter_column_while(col::STATEMENTS, |item| { + if let Ok(statement) = Statement::decode(&mut item.value.as_slice()) { + result.push((statement.hash(), statement)); + } + true + }).map_err(|e| Error::Db(e.to_string()))?; + Ok(result) + } + + fn statement(&self, hash: &Hash) -> Result> { + Ok(match self.db.get(col::STATEMENTS, hash.as_slice()).map_err(|e| Error::Db(e.to_string()))? { + Some(statement) => { + Some(Statement::decode(&mut statement.as_slice()).map_err(|e| Error::Decode(e.to_string()))?) + } + None => None, + }) + } + + + /// Return the data of all known statements which include all topics and have no `DecryptionKey` field. + fn broadcasts(&self, match_all_topics: &[Topic]) -> Result>> { + self.collect_statements(None, match_all_topics, |statement| statement.into_data()) + } + + /// Return the data of all known statements whose decryption key is identified as `dest` (this will generally be the public key or a hash thereof for symmetric ciphers, or a hash of the private key for symmetric ciphers). + fn posted(&self, match_all_topics: &[Topic], dest: [u8; 32]) -> Result>> { + self.collect_statements(Some(dest), match_all_topics, |statement| statement.into_data()) + } + + /// Return the decrypted data of all known statements whose decryption key is identified as `dest`. The key must be available to the client. + fn posted_clear(&self, match_all_topics: &[Topic], dest: [u8; 32]) -> Result>> { + self.collect_statements(Some(dest), match_all_topics, |statement| statement.into_data()) + } + + /// Submit a statement. + fn submit(&self, statement: Statement) -> SubmitResult { + let encoded = statement.encode(); + let hash = sp_statement_store::hash_encoded(&encoded); + //commit to the db with locked index + let mut index = self.index.write(); + if let Err(e) = self.db.commit([(col::STATEMENTS, &hash, Some(encoded))]) { + return SubmitResult::InternalError(Error::Db(e.to_string())); + } + index.insert(hash, statement); + SubmitResult::OkNew(hash) + } + + /// Submit a SCALE-encoded statement. + fn submit_encoded(&self, mut statement: &[u8]) -> SubmitResult { + match Statement::decode(&mut statement) { + Ok(decoded) => self.submit(decoded), + Err(e) => SubmitResult::Bad(e.to_string()), + } + } + + fn submit_async(&self, statement: Statement) -> std::pin::Pin + Send>> { + Box::pin(std::future::ready(self.submit(statement))) + } +} + +#[cfg(test)] +mod tests { +} + diff --git a/primitives/blockchain/src/error.rs b/primitives/blockchain/src/error.rs index 41e5cda9c11c5..d7f7086388e7f 100644 --- a/primitives/blockchain/src/error.rs +++ b/primitives/blockchain/src/error.rs @@ -159,6 +159,9 @@ pub enum Error { #[error("State Database error: {0}")] StateDatabase(String), + #[error("Statement store error: {0}")] + StatementStore(String), + #[error("Failed to set the chain head to a block that's too old.")] SetHeadTooOld, diff --git a/primitives/statement-store/Cargo.toml b/primitives/statement-store/Cargo.toml new file mode 100644 index 0000000000000..58cc6d7eaaf3e --- /dev/null +++ b/primitives/statement-store/Cargo.toml @@ -0,0 +1,31 @@ +[package] +name = "sp-statement-store" +version = "4.0.0-dev" +authors = ["Parity Technologies "] +edition = "2021" +license = "Apache-2.0" +homepage = "https://substrate.io" +repository = "https://github.com/paritytech/substrate/" +description = "A crate which contains primitives related to the statement store" +readme = "README.md" + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] + +[dependencies] +codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false, features = ["derive"] } +scale-info = { version = "2.1.1", default-features = false, features = ["derive"] } +sp-core = { version = "7.0.0", default-features = false, path = "../core" } +sp-runtime = { version = "7.0.0", default-features = false, path = "../runtime" } +sp-std = { version = "5.0.0", default-features = false, path = "../std" } +thiserror = "1.0" + +[features] +default = ["std"] +std = [ + "codec/std", + "scale-info/std", + "sp-core/std", + "sp-runtime/std", + "sp-std/std", +] diff --git a/primitives/statement-store/README.md b/primitives/statement-store/README.md new file mode 100644 index 0000000000000..1ac38dfef7260 --- /dev/null +++ b/primitives/statement-store/README.md @@ -0,0 +1,4 @@ +A crate which contains primitives related to the statement store. This mainly +includes the statement structure. + +License: Apache-2.0 diff --git a/primitives/statement-store/src/lib.rs b/primitives/statement-store/src/lib.rs new file mode 100644 index 0000000000000..dc3d647d6fb2f --- /dev/null +++ b/primitives/statement-store/src/lib.rs @@ -0,0 +1,156 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#![cfg_attr(not(feature = "std"), no_std)] + +//! A crate which contains statement-store primitives. + +//use sp_runtime::{DispatchError, DispatchResult}; +use sp_std::vec::Vec; +use codec::{Decode, Encode}; + +pub type Topic = [u8; 32]; +pub type DecryptionKey = [u8; 32]; +pub type Hash = [u8; 32]; + +#[cfg(feature = "std")] +pub use api::{StatementStore, SubmitResult, Error, Result}; + +/// Returns blake2-256 hash for the encoded statement. +pub fn hash_encoded(data: &[u8]) -> [u8; 32] { + sp_core::hashing::blake2_256(data) +} + +#[derive(Encode, Decode, sp_runtime::RuntimeDebug, Clone)] +pub enum Proof { + Sr25519 { signature: [u8; 64], signer: [u8; 32] }, + Ed25519 { signature: [u8; 64], signer: [u8; 32] }, + Secp256k1Ecdsa { signature: [u8; 65], signer: [u8; 33] }, + OnChain { who: [u8; 32], block_hash: [u8; 32], event_index: u64 }, +} + +#[derive(Encode, Decode, sp_runtime::RuntimeDebug, Clone)] +pub enum Field { + AuthenticityProof(Proof), + DecryptionKey(DecryptionKey), + Priority(u32), + Topic0(Topic), + Topic1(Topic), + Topic2(Topic), + Topic3(Topic), + Data(Vec), +} + +#[derive(Encode, Decode, sp_runtime::RuntimeDebug, Clone)] +pub struct Statement { + fields: Vec, +} + +impl Statement { + pub fn new(proof: Proof) -> Statement { + Statement { + fields: vec![Field::AuthenticityProof(proof)], + } + } + + pub fn hash(&self) -> [u8; 32] { + hash_encoded(&self.encode()) + } + + pub fn topic(&self, index: usize) -> Option { + for field in &self.fields[1..] { + match (field, index) { + (Field::Topic0(t), 0) => return Some(*t), + (Field::Topic1(t), 1) => return Some(*t), + (Field::Topic2(t), 2) => return Some(*t), + (Field::Topic3(t), 3) => return Some(*t), + _ => {}, + } + } + None + } + + pub fn decryption_key(&self) -> Option { + if let Some(Field::DecryptionKey(key)) = self.fields.get(1) { + Some(*key) + } else { + None + } + } + + pub fn into_data(self) -> Option> { + for field in self.fields.into_iter() { + if let Field::Data(data) = field { + return Some(data); + } + } + None + } +} + +#[cfg(feature = "std")] +mod api { + use crate::{Statement, Topic, Hash}; + use std::future::Future; + + #[derive(Debug, thiserror::Error)] + pub enum Error { + /// Database error. + #[error("Database error: {0:?}")] + Db(String), + /// Error decoding statement structure. + #[error("Error decoding statement: {0:?}")] + Decode(String), + } + + pub enum SubmitResult { + OkNew(Hash), + OkKnown(Hash), + Bad(String), + InternalError(Error), + } + + pub type Result = std::result::Result; + + pub trait StatementStore: Send + Sync { + /// Return all statements, SCALE-encoded. + fn dump_encoded(&self) -> Result)>>; + + /// Return all statements. + fn dump(&self) -> Result>; + + /// Get statement by hash. + fn statement(&self, hash: &Hash) -> Result>; + + /// Return the data of all known statements which include all topics and have no `DecryptionKey` field. + fn broadcasts(&self, match_all_topics: &[Topic]) -> Result>>; + + /// Return the data of all known statements whose decryption key is identified as `dest` (this will generally be the public key or a hash thereof for symmetric ciphers, or a hash of the private key for symmetric ciphers). + fn posted(&self, match_all_topics: &[Topic], dest: [u8; 32]) -> Result>>; + + /// Return the decrypted data of all known statements whose decryption key is identified as `dest`. The key must be available to the client. + fn posted_clear(&self, match_all_topics: &[Topic], dest: [u8; 32]) -> Result>>; + + /// Submit a statement. + fn submit(&self, statement: Statement) -> SubmitResult; + + /// Submit a SCALE-encoded statement. + fn submit_encoded(&self, statement: &[u8]) -> SubmitResult; + + fn submit_async(&self, statement: Statement) -> std::pin::Pin + Send>>; + } +} From ff1edaa06849efc9cdcc85d970b9850d37d340b4 Mon Sep 17 00:00:00 2001 From: arkpar Date: Wed, 15 Mar 2023 11:30:48 +0100 Subject: [PATCH 02/78] Sync with networking changes in master --- Cargo.lock | 1 + client/network/statement/Cargo.toml | 1 + client/network/statement/src/lib.rs | 12 ++++++++---- 3 files changed, 10 insertions(+), 4 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 7ea719e1126d4..516d953bd7fb7 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8955,6 +8955,7 @@ dependencies = [ "log", "parity-scale-codec", "pin-project", + "sc-network", "sc-network-common", "sc-peerset", "sc-utils", diff --git a/client/network/statement/Cargo.toml b/client/network/statement/Cargo.toml index e5230a9de4ab9..52d05b941420f 100644 --- a/client/network/statement/Cargo.toml +++ b/client/network/statement/Cargo.toml @@ -21,6 +21,7 @@ log = "0.4.17" pin-project = "1.0.12" prometheus-endpoint = { package = "substrate-prometheus-endpoint", version = "0.10.0-dev", path = "../../../utils/prometheus" } sc-network-common = { version = "0.10.0-dev", path = "../common" } +sc-network = { version = "0.10.0-dev", path = "../" } sc-peerset = { version = "4.0.0-dev", path = "../../peerset" } sc-utils = { version = "4.0.0-dev", path = "../../utils" } sp-runtime = { version = "7.0.0", path = "../../../primitives/runtime" } diff --git a/client/network/statement/src/lib.rs b/client/network/statement/src/lib.rs index a9a1d9d842df9..47239598f99bf 100644 --- a/client/network/statement/src/lib.rs +++ b/client/network/statement/src/lib.rs @@ -32,13 +32,17 @@ use futures::{prelude::*, stream::FuturesUnordered}; use libp2p::{multiaddr, PeerId}; use log::{debug, trace, warn}; use prometheus_endpoint::{register, Counter, PrometheusError, Registry, U64}; -use sc_network_common::{ +use sc_network::{ config::{NonDefaultSetConfig, NonReservedPeerMode, ProtocolId, SetConfig}, error, - protocol::{event::Event, role::ObservedRole, ProtocolName}, - service::{NetworkEventStream, NetworkNotification, NetworkPeers}, - sync::{SyncEvent, SyncEventStream}, + event::Event, + types::ProtocolName, utils::{interval, LruHashSet}, + NetworkEventStream, NetworkNotification, NetworkPeers, +}; +use sc_network_common::{ + role::ObservedRole, + sync::{SyncEvent, SyncEventStream}, }; use sp_statement_store::{Hash, Statement, StatementStore, SubmitResult}; use sc_utils::mpsc::{tracing_unbounded, TracingUnboundedReceiver, TracingUnboundedSender}; From 8407e402af626a050cbc5fbedbd0a324f51d158d Mon Sep 17 00:00:00 2001 From: arkpar Date: Wed, 15 Mar 2023 17:53:16 +0100 Subject: [PATCH 03/78] WIP statement pallet --- Cargo.lock | 23 +++++++++ Cargo.toml | 1 + bin/node-template/node/src/service.rs | 2 +- bin/node-template/runtime/Cargo.toml | 5 ++ bin/node-template/runtime/src/lib.rs | 13 +++++ bin/node/cli/src/service.rs | 2 +- bin/node/runtime/Cargo.toml | 5 ++ bin/node/runtime/src/lib.rs | 12 +++++ client/statement-store/Cargo.toml | 5 +- client/statement-store/src/lib.rs | 2 +- client/statement-store/src/store.rs | 59 ++++++++++++++++++++-- frame/statement/Cargo.toml | 39 +++++++++++++++ frame/statement/src/lib.rs | 72 +++++++++++++++++++++++++++ primitives/statement-store/Cargo.toml | 7 ++- primitives/statement-store/src/lib.rs | 52 ++++++++++++++++++- 15 files changed, 288 insertions(+), 11 deletions(-) create mode 100644 frame/statement/Cargo.toml create mode 100644 frame/statement/src/lib.rs diff --git a/Cargo.lock b/Cargo.lock index 516d953bd7fb7..bb8efeeb322bc 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3582,6 +3582,7 @@ dependencies = [ "pallet-staking-reward-curve", "pallet-staking-runtime-api", "pallet-state-trie-migration", + "pallet-statement", "pallet-sudo", "pallet-timestamp", "pallet-tips", @@ -3607,6 +3608,7 @@ dependencies = [ "sp-runtime", "sp-session", "sp-staking", + "sp-statement-store", "sp-std", "sp-transaction-pool", "sp-version", @@ -5117,6 +5119,7 @@ dependencies = [ "pallet-aura", "pallet-balances", "pallet-grandpa", + "pallet-statement", "pallet-sudo", "pallet-template", "pallet-timestamp", @@ -5133,6 +5136,7 @@ dependencies = [ "sp-offchain", "sp-runtime", "sp-session", + "sp-statement-store", "sp-std", "sp-transaction-pool", "sp-version", @@ -6705,6 +6709,21 @@ dependencies = [ "zstd", ] +[[package]] +name = "pallet-statement" +version = "4.0.0-dev" +dependencies = [ + "frame-support", + "frame-system", + "parity-scale-codec", + "scale-info", + "sp-api", + "sp-io", + "sp-runtime", + "sp-statement-store", + "sp-std", +] + [[package]] name = "pallet-sudo" version = "4.0.0-dev" @@ -9355,7 +9374,9 @@ dependencies = [ "parity-db", "parity-scale-codec", "parking_lot 0.12.1", + "sp-api", "sp-core", + "sp-runtime", "sp-statement-store", "sp-tracing", "substrate-prometheus-endpoint", @@ -10639,8 +10660,10 @@ dependencies = [ name = "sp-statement-store" version = "4.0.0-dev" dependencies = [ + "log", "parity-scale-codec", "scale-info", + "sp-api", "sp-core", "sp-runtime", "sp-std", diff --git a/Cargo.toml b/Cargo.toml index 0aae82478aa14..5d190809efaab 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -151,6 +151,7 @@ members = [ "frame/sudo", "frame/root-offences", "frame/root-testing", + "frame/statement", "frame/support", "frame/support/procedural", "frame/support/procedural/tools", diff --git a/bin/node-template/node/src/service.rs b/bin/node-template/node/src/service.rs index fc6ebc6214214..4c55ffb1df2d6 100644 --- a/bin/node-template/node/src/service.rs +++ b/bin/node-template/node/src/service.rs @@ -135,7 +135,7 @@ pub fn new_partial( compatibility_mode: Default::default(), })?; - let statement_store = sc_statement_store::Store::new(config.database.path().unwrap())?; + let statement_store = sc_statement_store::Store::new(config.database.path().unwrap(), client.clone())?; Ok(sc_service::PartialComponents { client, diff --git a/bin/node-template/runtime/Cargo.toml b/bin/node-template/runtime/Cargo.toml index 90fa6269ebe8c..262be1ce0c331 100644 --- a/bin/node-template/runtime/Cargo.toml +++ b/bin/node-template/runtime/Cargo.toml @@ -25,6 +25,7 @@ frame-system = { version = "4.0.0-dev", default-features = false, path = "../../ frame-try-runtime = { version = "0.10.0-dev", default-features = false, path = "../../../frame/try-runtime", optional = true } pallet-timestamp = { version = "4.0.0-dev", default-features = false, path = "../../../frame/timestamp" } pallet-transaction-payment = { version = "4.0.0-dev", default-features = false, path = "../../../frame/transaction-payment" } +pallet-statement = { version = "4.0.0-dev", default-features = false, path = "../../../frame/statement" } frame-executive = { version = "4.0.0-dev", default-features = false, path = "../../../frame/executive" } sp-api = { version = "4.0.0-dev", default-features = false, path = "../../../primitives/api" } sp-block-builder = { version = "4.0.0-dev", default-features = false, path = "../../../primitives/block-builder"} @@ -37,6 +38,7 @@ sp-runtime = { version = "7.0.0", default-features = false, path = "../../../pri sp-session = { version = "4.0.0-dev", default-features = false, path = "../../../primitives/session" } sp-std = { version = "5.0.0", default-features = false, path = "../../../primitives/std" } sp-transaction-pool = { version = "4.0.0-dev", default-features = false, path = "../../../primitives/transaction-pool" } +sp-statement-store = { version = "4.0.0-dev", default-features = false, path = "../../../primitives/statement-store" } sp-version = { version = "5.0.0", default-features = false, path = "../../../primitives/version" } # Used for the node template's RPCs @@ -70,6 +72,7 @@ std = [ "pallet-balances/std", "pallet-grandpa/std", "pallet-sudo/std", + "pallet-statement/std", "pallet-template/std", "pallet-timestamp/std", "pallet-transaction-payment-rpc-runtime-api/std", @@ -85,6 +88,7 @@ std = [ "sp-session/std", "sp-std/std", "sp-transaction-pool/std", + "sp-statement-store/std", "sp-version/std", "substrate-wasm-builder", ] @@ -107,6 +111,7 @@ try-runtime = [ "pallet-aura/try-runtime", "pallet-balances/try-runtime", "pallet-grandpa/try-runtime", + "pallet-statement/try-runtime", "pallet-sudo/try-runtime", "pallet-template/try-runtime", "pallet-timestamp/try-runtime", diff --git a/bin/node-template/runtime/src/lib.rs b/bin/node-template/runtime/src/lib.rs index ac01aa95f4f12..02279f5ea799a 100644 --- a/bin/node-template/runtime/src/lib.rs +++ b/bin/node-template/runtime/src/lib.rs @@ -263,6 +263,8 @@ impl pallet_sudo::Config for Runtime { type RuntimeCall = RuntimeCall; } +impl pallet_statement::Config for Runtime {} + /// Configure the pallet-template in pallets/template. impl pallet_template::Config for Runtime { type RuntimeEvent = RuntimeEvent; @@ -283,6 +285,7 @@ construct_runtime!( Balances: pallet_balances, TransactionPayment: pallet_transaction_payment, Sudo: pallet_sudo, + Statement: pallet_statement, // Include the custom logic from the pallet-template in the runtime. TemplateModule: pallet_template, } @@ -387,6 +390,16 @@ impl_runtime_apis! { } } + impl sp_statement_store::runtime_api::ValidateStatement for Runtime { + fn validate_statement( + source: sp_statement_store::runtime_api::StatementSource, + statement: sp_statement_store::Statement, + ) -> Result { + Statement::validate_statement(source, statement) + } + } + + impl sp_offchain::OffchainWorkerApi for Runtime { fn offchain_worker(header: &::Header) { Executive::offchain_worker(header) diff --git a/bin/node/cli/src/service.rs b/bin/node/cli/src/service.rs index e91ca5bf1efe5..fc4ba33ac896f 100644 --- a/bin/node/cli/src/service.rs +++ b/bin/node/cli/src/service.rs @@ -282,7 +282,7 @@ pub fn new_partial( (rpc_extensions_builder, shared_voter_state2) }; - let statement_store = sc_statement_store::Store::new(config.database.path().unwrap())?; + let statement_store = sc_statement_store::Store::new(config.database.path().unwrap(), client.clone())?; Ok(sc_service::PartialComponents { client, diff --git a/bin/node/runtime/Cargo.toml b/bin/node/runtime/Cargo.toml index 16f1d0a4cb532..048ca1ce6669c 100644 --- a/bin/node/runtime/Cargo.toml +++ b/bin/node/runtime/Cargo.toml @@ -39,6 +39,7 @@ sp-runtime = { version = "7.0.0", default-features = false, path = "../../../pri sp-staking = { version = "4.0.0-dev", default-features = false, path = "../../../primitives/staking" } sp-session = { version = "4.0.0-dev", default-features = false, path = "../../../primitives/session" } sp-transaction-pool = { version = "4.0.0-dev", default-features = false, path = "../../../primitives/transaction-pool" } +sp-statement-store = { version = "4.0.0-dev", default-features = false, path = "../../../primitives/statement-store" } sp-version = { version = "5.0.0", default-features = false, path = "../../../primitives/version" } sp-io = { version = "7.0.0", default-features = false, path = "../../../primitives/io" } @@ -104,6 +105,7 @@ pallet-staking = { version = "4.0.0-dev", default-features = false, path = "../. pallet-staking-reward-curve = { version = "4.0.0-dev", default-features = false, path = "../../../frame/staking/reward-curve" } pallet-staking-runtime-api = { version = "4.0.0-dev", default-features = false, path = "../../../frame/staking/runtime-api" } pallet-state-trie-migration = { version = "4.0.0-dev", default-features = false, path = "../../../frame/state-trie-migration" } +pallet-statement = { version = "4.0.0-dev", default-features = false, path = "../../../frame/statement" } pallet-scheduler = { version = "4.0.0-dev", default-features = false, path = "../../../frame/scheduler" } pallet-society = { version = "4.0.0-dev", default-features = false, path = "../../../frame/society" } pallet-sudo = { version = "4.0.0-dev", default-features = false, path = "../../../frame/sudo" } @@ -186,6 +188,7 @@ std = [ "pallet-staking/std", "pallet-staking-runtime-api/std", "pallet-state-trie-migration/std", + "pallet-statement/std", "pallet-salary/std", "sp-session/std", "pallet-sudo/std", @@ -201,6 +204,7 @@ std = [ "pallet-transaction-payment/std", "pallet-transaction-storage/std", "pallet-treasury/std", + "sp-statement-store/std", "sp-transaction-pool/std", "pallet-utility/std", "sp-version/std", @@ -327,6 +331,7 @@ try-runtime = [ "pallet-session/try-runtime", "pallet-staking/try-runtime", "pallet-state-trie-migration/try-runtime", + "pallet-statement/try-runtime", "pallet-scheduler/try-runtime", "pallet-society/try-runtime", "pallet-sudo/try-runtime", diff --git a/bin/node/runtime/src/lib.rs b/bin/node/runtime/src/lib.rs index d8426d3b35e15..7d2b0c4bdb6e8 100644 --- a/bin/node/runtime/src/lib.rs +++ b/bin/node/runtime/src/lib.rs @@ -1731,6 +1731,8 @@ impl frame_benchmarking_pallet_pov::Config for Runtime { type RuntimeEvent = RuntimeEvent; } +impl pallet_statement::Config for Runtime {} + construct_runtime!( pub struct Runtime where Block = Block, @@ -1801,6 +1803,7 @@ construct_runtime!( FastUnstake: pallet_fast_unstake, MessageQueue: pallet_message_queue, Pov: frame_benchmarking_pallet_pov, + Statement: pallet_statement, } ); @@ -1972,6 +1975,15 @@ impl_runtime_apis! { } } + impl sp_statement_store::runtime_api::ValidateStatement for Runtime { + fn validate_statement( + source: sp_statement_store::runtime_api::StatementSource, + statement: sp_statement_store::Statement, + ) -> Result { + Statement::validate_statement(source, statement) + } + } + impl sp_offchain::OffchainWorkerApi for Runtime { fn offchain_worker(header: &::Header) { Executive::offchain_worker(header) diff --git a/client/statement-store/Cargo.toml b/client/statement-store/Cargo.toml index 7996a694eb93b..08c74a6ea6db7 100644 --- a/client/statement-store/Cargo.toml +++ b/client/statement-store/Cargo.toml @@ -22,11 +22,12 @@ parking_lot = "0.12.1" parity-db = "0.4.3" sp-statement-store = { version = "4.0.0-dev", path = "../../primitives/statement-store" } prometheus-endpoint = { package = "substrate-prometheus-endpoint", version = "0.10.0-dev", path = "../../utils/prometheus" } +#sc-client-api = { version = "4.0.0-dev", path = "../api" } #sc-utils = { version = "4.0.0-dev", path = "../utils" } -#sp-api = { version = "4.0.0-dev", path = "../../primitives/api" } +sp-api = { version = "4.0.0-dev", path = "../../primitives/api" } #sp-blockchain = { version = "4.0.0-dev", path = "../../primitives/blockchain" } sp-core = { version = "7.0.0", path = "../../primitives/core" } -#sp-runtime = { version = "7.0.0", path = "../../primitives/runtime" } +sp-runtime = { version = "7.0.0", path = "../../primitives/runtime" } sp-tracing = { version = "6.0.0", path = "../../primitives/tracing" } #sp-transaction-pool = { version = "4.0.0-dev", path = "../../primitives/transaction-pool" } diff --git a/client/statement-store/src/lib.rs b/client/statement-store/src/lib.rs index 93e9bdc07559f..17d504b1b53e7 100644 --- a/client/statement-store/src/lib.rs +++ b/client/statement-store/src/lib.rs @@ -23,7 +23,7 @@ #![warn(unused_extern_crates)] mod store; -mod metrics; +//mod metrics; pub use store::Store; pub use sp_statement_store::{StatementStore, Error}; diff --git a/client/statement-store/src/store.rs b/client/statement-store/src/store.rs index 3f262d9afa6db..f03adaf601e1b 100644 --- a/client/statement-store/src/store.rs +++ b/client/statement-store/src/store.rs @@ -21,8 +21,11 @@ use std::{collections::{HashSet, HashMap}, sync::Arc, future::Future}; use parking_lot::RwLock; -use sp_statement_store::{Statement, Topic, DecryptionKey, Result, Error, Hash, SubmitResult}; +use sp_statement_store::{Statement, Topic, DecryptionKey, Result, Error, Hash, BlockHash, SubmitResult}; +use sp_statement_store::runtime_api::{ValidateStatement, ValidStatement, InvalidStatement, StatementSource}; use sp_core::{Encode, Decode}; +use sp_api::ProvideRuntimeApi; +use sp_runtime::traits::Block as BlockT; const KEY_VERSION: &[u8] = b"version".as_slice(); const CURRENT_VERSION: u32 = 1; @@ -42,10 +45,43 @@ struct Index { extended_topics: HashMap, } +struct ClientWrapper { + client: Arc, + _block: std::marker::PhantomData, +} + +impl ClientWrapper + where + Block: BlockT, + Block::Hash: From, + Client: ProvideRuntimeApi + + Send + + Sync + + 'static, + Client::Api: ValidateStatement, +{ + fn validate_statement( + &self, + block: BlockHash, + source: StatementSource, + statement: Statement, + ) -> std::result::Result { + let api = self.client.runtime_api(); + let block = block.into(); + match api.validate_statement(block, source, statement) { + Ok(r) => r, + Err(_) => { + Err(InvalidStatement::InternalError) + } + } + } +} + /// Statement store. pub struct Store { db: parity_db::Db, index: RwLock, + validate_fn: Box std::result::Result + Send + Sync>, } impl Index { @@ -100,7 +136,16 @@ impl Index { impl Store { /// Create a new shared store instance. There should only be one per process. - pub fn new(path: &std::path::Path) -> Result> { + pub fn new(path: &std::path::Path, client: Arc) -> Result> + where + Block: BlockT, + Block::Hash: From, + Client: ProvideRuntimeApi + + Send + + Sync + + 'static, + Client::Api: ValidateStatement, + { let mut path: std::path::PathBuf = path.into(); path.pop(); path.push("statement"); @@ -137,7 +182,14 @@ impl Store { true }).map_err(|e| Error::Db(e.to_string()))?; - Ok(Arc::new(Store { db, index: RwLock::new(index) })) + let validator = ClientWrapper { client, _block: Default::default() }; + let validate_fn = Box::new(move |block, source, statement| validator.validate_statement(block, source, statement)); + + Ok(Arc::new(Store { + db, + index: RwLock::new(index), + validate_fn, + })) } fn collect_statements(&self, key: Option, match_all_topics: &[Topic], mut f: impl FnMut(Statement) -> Option ) -> Result> { @@ -222,6 +274,7 @@ impl sp_statement_store::StatementStore for Store { fn submit(&self, statement: Statement) -> SubmitResult { let encoded = statement.encode(); let hash = sp_statement_store::hash_encoded(&encoded); + let _ = (self.validate_fn)(Default::default(), StatementSource::Local, statement.clone()); //commit to the db with locked index let mut index = self.index.write(); if let Err(e) = self.db.commit([(col::STATEMENTS, &hash, Some(encoded))]) { diff --git a/frame/statement/Cargo.toml b/frame/statement/Cargo.toml new file mode 100644 index 0000000000000..88fb1b9b7ec3b --- /dev/null +++ b/frame/statement/Cargo.toml @@ -0,0 +1,39 @@ +[package] +name = "pallet-statement" +version = "4.0.0-dev" +authors = ["Parity Technologies "] +edition = "2021" +license = "Apache-2.0" +homepage = "https://substrate.io" +repository = "https://github.com/paritytech/substrate/" +description = "FRAME pallet for statement store" + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] + +[dependencies] +codec = { package = "parity-scale-codec", version = "3.2.2", default-features = false, features = ["derive"]} +scale-info = { version = "2.1.1", default-features = false, features = ["derive"] } +frame-support = { version = "4.0.0-dev", default-features = false, path = "../support" } +frame-system = { version = "4.0.0-dev", default-features = false, path = "../system" } +sp-statement-store = { version = "4.0.0-dev", default-features = false, path = "../../primitives/statement-store" } +sp-api = { version = "4.0.0-dev", default-features = false, path = "../../primitives/api" } +sp-runtime = { version = "7.0.0", default-features = false, path = "../../primitives/runtime" } +sp-std = { version = "5.0.0", default-features = false, path = "../../primitives/std" } +sp-io = { version = "7.0.0", default-features = false, path = "../../primitives/io" } + +[features] +default = [ "std" ] +std = [ + "codec/std", + "scale-info/std", + "frame-support/std", + "frame-system/std", + "sp-api/std", + "sp-runtime/std", + "sp-std/std", + "sp-io/std", +] +try-runtime = [ + "frame-support/try-runtime", +] diff --git a/frame/statement/src/lib.rs b/frame/statement/src/lib.rs new file mode 100644 index 0000000000000..cc9556d73c30e --- /dev/null +++ b/frame/statement/src/lib.rs @@ -0,0 +1,72 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Supporting pallet for the statement store. +//! +//! - [`Pallet`] +//! +//! ## Overview +//! +//! The Statement pallet provides means to create and validate statements for the statement store. +//! + +#![cfg_attr(not(feature = "std"), no_std)] + +//use codec::{Decode, Encode, MaxEncodedLen}; +use sp_statement_store::Statement; +use sp_statement_store::runtime_api::{StatementSource, ValidStatement, InvalidStatement}; +use frame_support::sp_tracing::{enter_span, within_span, Level}; + +//mod mock; +//mod tests; + +pub use pallet::*; + +//const LOG_TARGET: &str = "runtime::statement"; + +#[frame_support::pallet] +pub mod pallet { + use super::*; + + #[pallet::config] + pub trait Config: frame_system::Config {} + + #[pallet::pallet] + pub struct Pallet(sp_std::marker::PhantomData); +} + +impl Pallet { + pub fn validate_statement( + _source: StatementSource, + _statement: Statement, + ) -> Result { + sp_io::init_tracing(); + + enter_span! { Level::TRACE, "validate_statement" }; + + + within_span! { + Level::TRACE, "validate"; + } + Ok(ValidStatement { + priority: 0, + propagate: true, + }) + } + +} + diff --git a/primitives/statement-store/Cargo.toml b/primitives/statement-store/Cargo.toml index 58cc6d7eaaf3e..e8b18b3e0f54a 100644 --- a/primitives/statement-store/Cargo.toml +++ b/primitives/statement-store/Cargo.toml @@ -18,7 +18,9 @@ scale-info = { version = "2.1.1", default-features = false, features = ["derive" sp-core = { version = "7.0.0", default-features = false, path = "../core" } sp-runtime = { version = "7.0.0", default-features = false, path = "../runtime" } sp-std = { version = "5.0.0", default-features = false, path = "../std" } -thiserror = "1.0" +sp-api = { version = "4.0.0-dev", default-features = false, path = "../api" } +thiserror = {version = "1.0", optional = true } +log = { version = "0.4.17", optional = true } [features] default = ["std"] @@ -28,4 +30,7 @@ std = [ "sp-core/std", "sp-runtime/std", "sp-std/std", + "sp-api/std", + "thiserror", + "log", ] diff --git a/primitives/statement-store/src/lib.rs b/primitives/statement-store/src/lib.rs index dc3d647d6fb2f..c758a7c5477ea 100644 --- a/primitives/statement-store/src/lib.rs +++ b/primitives/statement-store/src/lib.rs @@ -26,11 +26,13 @@ use codec::{Decode, Encode}; pub type Topic = [u8; 32]; pub type DecryptionKey = [u8; 32]; pub type Hash = [u8; 32]; +pub type BlockHash = [u8; 32]; #[cfg(feature = "std")] pub use api::{StatementStore, SubmitResult, Error, Result}; /// Returns blake2-256 hash for the encoded statement. +#[cfg(feature = "std")] pub fn hash_encoded(data: &[u8]) -> [u8; 32] { sp_core::hashing::blake2_256(data) } @@ -40,7 +42,7 @@ pub enum Proof { Sr25519 { signature: [u8; 64], signer: [u8; 32] }, Ed25519 { signature: [u8; 64], signer: [u8; 32] }, Secp256k1Ecdsa { signature: [u8; 65], signer: [u8; 33] }, - OnChain { who: [u8; 32], block_hash: [u8; 32], event_index: u64 }, + OnChain { who: [u8; 32], block_hash: BlockHash, event_index: u64 }, } #[derive(Encode, Decode, sp_runtime::RuntimeDebug, Clone)] @@ -63,10 +65,11 @@ pub struct Statement { impl Statement { pub fn new(proof: Proof) -> Statement { Statement { - fields: vec![Field::AuthenticityProof(proof)], + fields: sp_std::vec![Field::AuthenticityProof(proof)], } } + #[cfg(feature = "std")] pub fn hash(&self) -> [u8; 32] { hash_encoded(&self.encode()) } @@ -154,3 +157,48 @@ mod api { fn submit_async(&self, statement: Statement) -> std::pin::Pin + Send>>; } } + +pub mod runtime_api { + use codec::{Decode, Encode}; + use scale_info::TypeInfo; + use sp_runtime::{RuntimeDebug}; + use crate::Statement; + + /// Information concerning a valid statement. + #[derive(Clone, PartialEq, Eq, Encode, Decode, RuntimeDebug, TypeInfo)] + pub struct ValidStatement { + pub priority: u64, + pub propagate: bool, + } + + /// An invalid statement. + #[derive(Clone, PartialEq, Eq, Encode, Decode, Copy, RuntimeDebug, TypeInfo)] + pub enum InvalidStatement { + Payment, + BadProof, + Stale, + InternalError, + } + + /// The source of the statement. + /// + /// Depending on the source we might apply different validation schemes. + #[derive(Copy, Clone, PartialEq, Eq, Encode, Decode, RuntimeDebug, TypeInfo)] + pub enum StatementSource { + /// Statement is coming from a local source, such as the OCW. + Local, + /// Statement has been received externally (network or RPC). + External, + } + + sp_api::decl_runtime_apis! { + /// Runtime API trait for statement validation. + pub trait ValidateStatement { + /// Validate the statement. + fn validate_statement( + source: StatementSource, + statement: Statement, + ) -> Result; + } + } +} From 21c9aaf0ca408fc90e528cf6cc54ecb87e3ae5b8 Mon Sep 17 00:00:00 2001 From: arkpar Date: Thu, 16 Mar 2023 19:32:09 +0100 Subject: [PATCH 04/78] Statement validation --- Cargo.lock | 2 + bin/node-template/runtime/src/lib.rs | 11 ++- bin/node/runtime/src/lib.rs | 11 ++- frame/statement/Cargo.toml | 3 + frame/statement/src/lib.rs | 127 +++++++++++++++++++++++--- frame/system/src/lib.rs | 8 ++ primitives/statement-store/src/lib.rs | 45 +++++++-- 7 files changed, 184 insertions(+), 23 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index bb8efeeb322bc..65b725e4b5004 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6715,9 +6715,11 @@ version = "4.0.0-dev" dependencies = [ "frame-support", "frame-system", + "log", "parity-scale-codec", "scale-info", "sp-api", + "sp-core", "sp-io", "sp-runtime", "sp-statement-store", diff --git a/bin/node-template/runtime/src/lib.rs b/bin/node-template/runtime/src/lib.rs index 02279f5ea799a..1f65fb9f09661 100644 --- a/bin/node-template/runtime/src/lib.rs +++ b/bin/node-template/runtime/src/lib.rs @@ -263,7 +263,15 @@ impl pallet_sudo::Config for Runtime { type RuntimeCall = RuntimeCall; } -impl pallet_statement::Config for Runtime {} +parameter_types! { + pub StatementPriorityBalance: Balance = 1000; +} + +impl pallet_statement::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type Currency = Balances; + type PriorityBalance = StatementPriorityBalance; +} /// Configure the pallet-template in pallets/template. impl pallet_template::Config for Runtime { @@ -399,7 +407,6 @@ impl_runtime_apis! { } } - impl sp_offchain::OffchainWorkerApi for Runtime { fn offchain_worker(header: &::Header) { Executive::offchain_worker(header) diff --git a/bin/node/runtime/src/lib.rs b/bin/node/runtime/src/lib.rs index 7d2b0c4bdb6e8..b9254d44995ee 100644 --- a/bin/node/runtime/src/lib.rs +++ b/bin/node/runtime/src/lib.rs @@ -1731,7 +1731,16 @@ impl frame_benchmarking_pallet_pov::Config for Runtime { type RuntimeEvent = RuntimeEvent; } -impl pallet_statement::Config for Runtime {} + +parameter_types! { + pub StatementPriorityBalance: Balance = 10 * CENTS; +} + +impl pallet_statement::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type Currency = Balances; + type PriorityBalance = StatementPriorityBalance; +} construct_runtime!( pub struct Runtime where diff --git a/frame/statement/Cargo.toml b/frame/statement/Cargo.toml index 88fb1b9b7ec3b..c9b1b92c7e039 100644 --- a/frame/statement/Cargo.toml +++ b/frame/statement/Cargo.toml @@ -21,6 +21,8 @@ sp-api = { version = "4.0.0-dev", default-features = false, path = "../../primit sp-runtime = { version = "7.0.0", default-features = false, path = "../../primitives/runtime" } sp-std = { version = "5.0.0", default-features = false, path = "../../primitives/std" } sp-io = { version = "7.0.0", default-features = false, path = "../../primitives/io" } +sp-core = { version = "7.0.0", default-features = false, path = "../../primitives/core" } +log = { version = "0.4.17", default-features = false } [features] default = [ "std" ] @@ -33,6 +35,7 @@ std = [ "sp-runtime/std", "sp-std/std", "sp-io/std", + "sp-core/std", ] try-runtime = [ "frame-support/try-runtime", diff --git a/frame/statement/src/lib.rs b/frame/statement/src/lib.rs index cc9556d73c30e..2f41d5e15aa2b 100644 --- a/frame/statement/src/lib.rs +++ b/frame/statement/src/lib.rs @@ -27,46 +27,151 @@ #![cfg_attr(not(feature = "std"), no_std)] //use codec::{Decode, Encode, MaxEncodedLen}; -use sp_statement_store::Statement; +use sp_statement_store::{Proof, Statement}; use sp_statement_store::runtime_api::{StatementSource, ValidStatement, InvalidStatement}; -use frame_support::sp_tracing::{enter_span, within_span, Level}; +use frame_support::sp_tracing::{enter_span, Level}; +use frame_support::sp_runtime::traits::{Zero, Verify}; +use frame_support::sp_runtime::SaturatedConversion; +use frame_support::traits::Currency; +use frame_support::pallet_prelude::*; //mod mock; //mod tests; pub use pallet::*; -//const LOG_TARGET: &str = "runtime::statement"; +const LOG_TARGET: &str = "runtime::statement"; + #[frame_support::pallet] pub mod pallet { use super::*; #[pallet::config] - pub trait Config: frame_system::Config {} + pub trait Config: frame_system::Config + where + ::AccountId: From<[u8; 32]>, + { + /// The overarching event type. + type RuntimeEvent: From> + + IsType<::RuntimeEvent>; + /// Account balance. + type Currency: Currency<::AccountId>; + /// Min balance for priority statements. + #[pallet::constant] + type PriorityBalance: Get>; + } #[pallet::pallet] pub struct Pallet(sp_std::marker::PhantomData); + + #[pallet::event] + #[pallet::generate_deposit(pub(super) fn deposit_event)] + pub enum Event + where + ::AccountId: From<[u8; 32]>, + { + /// A new statement is submitted + NewStatement { statement: Statement }, + } + pub type BalanceOf = + <::Currency as Currency<::AccountId>>::Balance; } -impl Pallet { +impl Pallet + where + ::AccountId: From<[u8; 32]>, + ::RuntimeEvent: From>, +{ + + /// Validate a statement against current state. This is supposed ti be called by the statement + /// store on the host side. pub fn validate_statement( _source: StatementSource, - _statement: Statement, + statement: Statement, ) -> Result { sp_io::init_tracing(); enter_span! { Level::TRACE, "validate_statement" }; + log::debug!(target: LOG_TARGET, "Validating statement {:?}", statement); + let account: Option = match statement.proof() { + None => None, + Some(Proof::Sr25519 { signature, signer }) => { + let to_sign = statement.signature_material(); + let signature = sp_core::sr25519::Signature(*signature); + let public = sp_core::sr25519::Public(*signer); + if !signature.verify(to_sign.as_slice(), &public) { + log::debug!(target: LOG_TARGET, "Bad Sr25519 signature."); + return Err(InvalidStatement::BadProof); + } + Some(signer.clone().into()) + }, + Some(Proof::Ed25519 { signature, signer }) => { + let to_sign = statement.signature_material(); + let signature = sp_core::ed25519::Signature(*signature); + let public = sp_core::ed25519::Public(*signer); + if !signature.verify(to_sign.as_slice(), &public) { + log::debug!(target: LOG_TARGET, "Bad Ed25519 signature."); + return Err(InvalidStatement::BadProof); + } + Some(signer.clone().into()) + }, + Some(Proof::Secp256k1Ecdsa { signature, signer }) => { + let to_sign = statement.signature_material(); + let signature = sp_core::ecdsa::Signature(*signature); + let public = sp_core::ecdsa::Public(*signer); + if !signature.verify(to_sign.as_slice(), &public) { + log::debug!(target: LOG_TARGET, "Bad ECDSA signature."); + return Err(InvalidStatement::BadProof); + } + Some(sp_io::hashing::blake2_256(signer).into()) + }, + Some(Proof::OnChain { who, block_hash, event_index }) => { + // block_hash and event_index should be checked by the host + if frame_system::Pallet::::parent_hash().as_ref() != block_hash.as_slice() { + log::debug!(target: LOG_TARGET, "Bad block hash."); + return Err(InvalidStatement::BadProof); + } + let account_id = Some(who.clone().into()); + match frame_system::Pallet::::event_no_consensus(*event_index as usize) { + Some(e) => { + if e != (Event::NewStatement { statement: statement.strip_proof() }).into() { + log::debug!(target: LOG_TARGET, "Event mismatch"); + return Err(InvalidStatement::BadProof); + } + }, + _ => { + log::debug!(target: LOG_TARGET, "Bad event index"); + return Err(InvalidStatement::BadProof); + } + } + account_id + } + }; + let priority: u64 = if let Some(account) = account { + let priority_cost = T::PriorityBalance::get(); + if priority_cost.is_zero() { + 0 + } + else { + let balance = T::Currency::free_balance(&account); + let priority = balance / priority_cost; + priority.saturated_into() + + } + } else { + 0 + }; - - within_span! { - Level::TRACE, "validate"; - } Ok(ValidStatement { - priority: 0, + priority, propagate: true, }) } + pub fn submit_statement(statement: Statement) { + Self::deposit_event(Event::NewStatement { statement }); + } + } diff --git a/frame/system/src/lib.rs b/frame/system/src/lib.rs index faa1ee5d6cf4f..40fb53b037993 100644 --- a/frame/system/src/lib.rs +++ b/frame/system/src/lib.rs @@ -1442,6 +1442,14 @@ impl Pallet { Self::read_events_no_consensus().map(|e| *e).collect() } + /// Get a single event at specified index. + /// + /// Should only be called if you know what you are doing and outside of the runtime block + /// execution else it can have a large impact on the PoV size of a block. + pub fn event_no_consensus(index: usize) -> Option { + Events::::get().get(index).map(|e| e.event.clone()) + } + /// Get the current events deposited by the runtime. /// /// Should only be called if you know what you are doing and outside of the runtime block diff --git a/primitives/statement-store/src/lib.rs b/primitives/statement-store/src/lib.rs index c758a7c5477ea..24500a743741e 100644 --- a/primitives/statement-store/src/lib.rs +++ b/primitives/statement-store/src/lib.rs @@ -19,9 +19,9 @@ //! A crate which contains statement-store primitives. -//use sp_runtime::{DispatchError, DispatchResult}; use sp_std::vec::Vec; use codec::{Decode, Encode}; +use scale_info::TypeInfo; pub type Topic = [u8; 32]; pub type DecryptionKey = [u8; 32]; @@ -37,7 +37,7 @@ pub fn hash_encoded(data: &[u8]) -> [u8; 32] { sp_core::hashing::blake2_256(data) } -#[derive(Encode, Decode, sp_runtime::RuntimeDebug, Clone)] +#[derive(Encode, Decode, TypeInfo, sp_runtime::RuntimeDebug, Clone, PartialEq, Eq)] pub enum Proof { Sr25519 { signature: [u8; 64], signer: [u8; 32] }, Ed25519 { signature: [u8; 64], signer: [u8; 32] }, @@ -45,7 +45,7 @@ pub enum Proof { OnChain { who: [u8; 32], block_hash: BlockHash, event_index: u64 }, } -#[derive(Encode, Decode, sp_runtime::RuntimeDebug, Clone)] +#[derive(Encode, Decode, TypeInfo, sp_runtime::RuntimeDebug, Clone, PartialEq, Eq)] pub enum Field { AuthenticityProof(Proof), DecryptionKey(DecryptionKey), @@ -57,7 +57,7 @@ pub enum Field { Data(Vec), } -#[derive(Encode, Decode, sp_runtime::RuntimeDebug, Clone)] +#[derive(Encode, Decode, TypeInfo, sp_runtime::RuntimeDebug, Clone, PartialEq, Eq)] pub struct Statement { fields: Vec, } @@ -75,7 +75,7 @@ impl Statement { } pub fn topic(&self, index: usize) -> Option { - for field in &self.fields[1..] { + for field in &self.fields { match (field, index) { (Field::Topic0(t), 0) => return Some(*t), (Field::Topic1(t), 1) => return Some(*t), @@ -88,11 +88,12 @@ impl Statement { } pub fn decryption_key(&self) -> Option { - if let Some(Field::DecryptionKey(key)) = self.fields.get(1) { - Some(*key) - } else { - None + for field in &self.fields { + if let Field::DecryptionKey(key) = field { + return Some(*key); + } } + None } pub fn into_data(self) -> Option> { @@ -103,6 +104,32 @@ impl Statement { } None } + + pub fn proof(&self) -> Option<&Proof> { + if let Some(Field::AuthenticityProof(p)) = self.fields.get(0) { + Some(p) + } else { + None + } + } + + /// Return encoded fields that can be signed to construct or verify a proof + pub fn signature_material(&self) -> Vec { + let mut out = Vec::new(); + let skip_fields = if let Some(Field::AuthenticityProof(_)) = self.fields.get(0) { 1 } else { 0 }; + for field in &self.fields[skip_fields..] { + field.encode_to(&mut out) + } + out + } + + /// Return a copy of this statement with proof removed + pub fn strip_proof(&self) -> Statement { + if let Some(Field::AuthenticityProof(_)) = self.fields.get(0) { + return Statement { fields: self.fields[1..].iter().cloned().collect() } + } + self.clone() + } } #[cfg(feature = "std")] From c70609796cedf6d5329a0185758dade24ded86b2 Mon Sep 17 00:00:00 2001 From: arkpar Date: Thu, 16 Mar 2023 23:26:43 +0100 Subject: [PATCH 05/78] pallet tests --- Cargo.lock | 2 + frame/statement/Cargo.toml | 4 + frame/statement/src/lib.rs | 27 ++-- frame/statement/src/mock.rs | 109 ++++++++++++++++ frame/statement/src/tests.rs | 120 ++++++++++++++++++ primitives/application-crypto/src/lib.rs | 14 +++ primitives/core/src/crypto.rs | 2 + primitives/statement-store/Cargo.toml | 2 + primitives/statement-store/src/lib.rs | 150 ++++++++++++++++++++++- 9 files changed, 413 insertions(+), 17 deletions(-) create mode 100644 frame/statement/src/mock.rs create mode 100644 frame/statement/src/tests.rs diff --git a/Cargo.lock b/Cargo.lock index 65b725e4b5004..7dd76720ac03e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6716,6 +6716,7 @@ dependencies = [ "frame-support", "frame-system", "log", + "pallet-balances", "parity-scale-codec", "scale-info", "sp-api", @@ -10666,6 +10667,7 @@ dependencies = [ "parity-scale-codec", "scale-info", "sp-api", + "sp-application-crypto", "sp-core", "sp-runtime", "sp-std", diff --git a/frame/statement/Cargo.toml b/frame/statement/Cargo.toml index c9b1b92c7e039..8f9a6269573ec 100644 --- a/frame/statement/Cargo.toml +++ b/frame/statement/Cargo.toml @@ -24,6 +24,9 @@ sp-io = { version = "7.0.0", default-features = false, path = "../../primitives/ sp-core = { version = "7.0.0", default-features = false, path = "../../primitives/core" } log = { version = "0.4.17", default-features = false } +[dev-dependencies] +pallet-balances = { version = "4.0.0-dev", path = "../balances" } + [features] default = [ "std" ] std = [ @@ -36,6 +39,7 @@ std = [ "sp-std/std", "sp-io/std", "sp-core/std", + "sp-statement-store/std", ] try-runtime = [ "frame-support/try-runtime", diff --git a/frame/statement/src/lib.rs b/frame/statement/src/lib.rs index 2f41d5e15aa2b..f6c3a49f2ee06 100644 --- a/frame/statement/src/lib.rs +++ b/frame/statement/src/lib.rs @@ -35,14 +35,15 @@ use frame_support::sp_runtime::SaturatedConversion; use frame_support::traits::Currency; use frame_support::pallet_prelude::*; -//mod mock; -//mod tests; +#[cfg(test)] +mod mock; +#[cfg(test)] +mod tests; pub use pallet::*; const LOG_TARGET: &str = "runtime::statement"; - #[frame_support::pallet] pub mod pallet { use super::*; @@ -72,7 +73,7 @@ pub mod pallet { ::AccountId: From<[u8; 32]>, { /// A new statement is submitted - NewStatement { statement: Statement }, + NewStatement { account: T::AccountId, statement: Statement }, } pub type BalanceOf = <::Currency as Currency<::AccountId>>::Balance; @@ -81,6 +82,7 @@ pub mod pallet { impl Pallet where ::AccountId: From<[u8; 32]>, + [u8; 32]: From<::AccountId>, ::RuntimeEvent: From>, { @@ -95,7 +97,9 @@ impl Pallet enter_span! { Level::TRACE, "validate_statement" }; log::debug!(target: LOG_TARGET, "Validating statement {:?}", statement); let account: Option = match statement.proof() { - None => None, + None => { + return Err(InvalidStatement::NoProof) + }, Some(Proof::Sr25519 { signature, signer }) => { let to_sign = statement.signature_material(); let signature = sp_core::sr25519::Signature(*signature); @@ -132,10 +136,10 @@ impl Pallet log::debug!(target: LOG_TARGET, "Bad block hash."); return Err(InvalidStatement::BadProof); } - let account_id = Some(who.clone().into()); + let account: T::AccountId = who.clone().into(); match frame_system::Pallet::::event_no_consensus(*event_index as usize) { Some(e) => { - if e != (Event::NewStatement { statement: statement.strip_proof() }).into() { + if e != (Event::NewStatement { account: account.clone(), statement: statement.strip_proof() }).into() { log::debug!(target: LOG_TARGET, "Event mismatch"); return Err(InvalidStatement::BadProof); } @@ -145,7 +149,7 @@ impl Pallet return Err(InvalidStatement::BadProof); } } - account_id + Some(account) } }; let priority: u64 = if let Some(account) = account { @@ -165,13 +169,10 @@ impl Pallet Ok(ValidStatement { priority, - propagate: true, }) } - pub fn submit_statement(statement: Statement) { - Self::deposit_event(Event::NewStatement { statement }); + pub fn submit_statement(account: T::AccountId, statement: Statement) { + Self::deposit_event(Event::NewStatement { account, statement }); } - } - diff --git a/frame/statement/src/mock.rs b/frame/statement/src/mock.rs new file mode 100644 index 0000000000000..c88daa8c13ad6 --- /dev/null +++ b/frame/statement/src/mock.rs @@ -0,0 +1,109 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! # Preimage test environment. + +use super::*; + +use crate as pallet_statement; +use frame_support::{ + ord_parameter_types, + traits::{ConstU32, ConstU64, Everything}, + weights::constants::RocksDbWeight, +}; +use sp_core::{Pair, H256}; +use sp_runtime::{ + AccountId32, + testing::Header, + traits::{BlakeTwo256, IdentityLookup}, +}; + +type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; +type Block = frame_system::mocking::MockBlock; + +frame_support::construct_runtime!( + pub enum Test where + Block = Block, + NodeBlock = Block, + UncheckedExtrinsic = UncheckedExtrinsic, + { + System: frame_system, + Balances: pallet_balances, + Statement: pallet_statement, + } +); + +impl frame_system::Config for Test { + type BaseCallFilter = Everything; + type BlockWeights = (); + type BlockLength = (); + type DbWeight = RocksDbWeight; + type RuntimeOrigin = RuntimeOrigin; + type RuntimeCall = RuntimeCall; + type Index = u64; + type BlockNumber = u64; + type Hash = H256; + type Hashing = BlakeTwo256; + type AccountId = AccountId32; + type Lookup = IdentityLookup; + type Header = Header; + type RuntimeEvent = RuntimeEvent; + type BlockHashCount = ConstU64<250>; + type Version = (); + type PalletInfo = PalletInfo; + type AccountData = pallet_balances::AccountData; + type OnNewAccount = (); + type OnKilledAccount = (); + type SystemWeightInfo = (); + type SS58Prefix = (); + type OnSetCode = (); + type MaxConsumers = ConstU32<16>; +} + +impl pallet_balances::Config for Test { + type Balance = u64; + type RuntimeEvent = RuntimeEvent; + type DustRemoval = (); + type ExistentialDeposit = ConstU64<5>; + type AccountStore = System; + type WeightInfo = (); + type MaxLocks = (); + type MaxReserves = ConstU32<50>; + type ReserveIdentifier = [u8; 8]; +} + +ord_parameter_types! { + pub const One: u64 = 1; +} + +impl Config for Test { + type RuntimeEvent = RuntimeEvent; + type Currency = Balances; + type PriorityBalance = ConstU64<10>; +} + +pub fn new_test_ext() -> sp_io::TestExternalities { + let mut t = frame_system::GenesisConfig::default().build_storage::().unwrap(); + let balances = pallet_balances::GenesisConfig:: { + balances: vec![ + (sp_core::sr25519::Pair::from_string("//Alice", None).unwrap().public().into(), 200) + ], + }; + balances.assimilate_storage(&mut t).unwrap(); + t.into() +} + diff --git a/frame/statement/src/tests.rs b/frame/statement/src/tests.rs new file mode 100644 index 0000000000000..c0e1df8ee14a7 --- /dev/null +++ b/frame/statement/src/tests.rs @@ -0,0 +1,120 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! # Statement runtime support tests. + +#![cfg(test)] + +use super::*; +use crate::mock::*; +use sp_statement_store::runtime_api::{ValidStatement, InvalidStatement, StatementSource}; +use sp_statement_store::{Proof, Statement}; +use sp_core::Pair; +use sp_runtime::AccountId32; + +#[test] +fn sign_and_validate_no_balance() { + new_test_ext().execute_with(|| { + let pair = sp_core::sr25519::Pair::from_string("//Bob", None).unwrap(); + let mut statement = Statement::new(); + statement.sign_sr25519_private(&pair); + let result = Pallet::::validate_statement(StatementSource::Local, statement); + assert_eq!(Ok(ValidStatement { priority: 0 }), result); + + let pair = sp_core::ed25519::Pair::from_string("//Bob", None).unwrap(); + let mut statement = Statement::new(); + statement.sign_ed25519_private(&pair); + let result = Pallet::::validate_statement(StatementSource::Local, statement); + assert_eq!(Ok(ValidStatement { priority: 0 }), result); + + let pair = sp_core::ecdsa::Pair::from_string("//Bob", None).unwrap(); + let mut statement = Statement::new(); + statement.sign_ecdsa_private(&pair); + let result = Pallet::::validate_statement(StatementSource::Local, statement); + assert_eq!(Ok(ValidStatement { priority: 0 }), result); + }); +} + +#[test] +fn validate_with_balance() { + new_test_ext().execute_with(|| { + let pair = sp_core::sr25519::Pair::from_string("//Alice", None).unwrap(); + let mut statement = Statement::new(); + statement.sign_sr25519_private(&pair); + let result = Pallet::::validate_statement(StatementSource::Local, statement); + assert_eq!(Ok(ValidStatement { priority: 20 }), result); + }); +} + +#[test] +fn validate_no_proof_fails() { + new_test_ext().execute_with(|| { + let statement = Statement::new(); + let result = Pallet::::validate_statement(StatementSource::Local, statement); + assert_eq!(Err(InvalidStatement::NoProof), result); + }); +} + +#[test] +fn validate_bad_signature_fails() { + new_test_ext().execute_with(|| { + let statement = Statement::new_with_proof(Proof::Sr25519 { signature: [0u8; 64], signer: Default::default() }); + let result = Pallet::::validate_statement(StatementSource::Local, statement); + assert_eq!(Err(InvalidStatement::BadProof), result); + }); +} + +#[test] +fn validate_event() { + new_test_ext().execute_with(|| { + let parent_hash = sp_core::H256::random(); + System::reset_events(); + System::initialize(&1, &parent_hash, &Default::default()); + let mut statement = Statement::new(); + let pair = sp_core::sr25519::Pair::from_string("//Alice", None).unwrap(); + let account: AccountId32 = pair.public().into(); + Pallet::::submit_statement(account.clone(), statement.clone()); + statement.set_proof(Proof::OnChain { who: account.clone().into(), event_index: 0, block_hash: parent_hash.into() }); + let result = Pallet::::validate_statement(StatementSource::Local, statement.clone()); + assert_eq!(Ok(ValidStatement { priority: 20 }), result); + + // Use wrong event index + statement.set_proof(Proof::OnChain { who: account.clone().into(), event_index: 1, block_hash: parent_hash.into() }); + let result = Pallet::::validate_statement(StatementSource::Local, statement.clone()); + assert_eq!(Err(InvalidStatement::BadProof), result); + + // Use wrong block hash + statement.set_proof(Proof::OnChain { who: account.clone().into(), event_index: 0, block_hash: sp_core::H256::random().into() }); + let result = Pallet::::validate_statement(StatementSource::Local, statement.clone()); + assert_eq!(Err(InvalidStatement::BadProof), result); + }); +} + +#[test] +fn validate_no_event_fails() { + new_test_ext().execute_with(|| { + let parent_hash = sp_core::H256::random(); + System::reset_events(); + System::initialize(&1, &parent_hash, &Default::default()); + let mut statement = Statement::new(); + let pair = sp_core::sr25519::Pair::from_string("//Alice", None).unwrap(); + let account: AccountId32 = pair.public().into(); + statement.set_proof(Proof::OnChain { who: account.into(), event_index: 0, block_hash: parent_hash.into() }); + let result = Pallet::::validate_statement(StatementSource::Local, statement); + assert_eq!(Err(InvalidStatement::BadProof), result); + }); +} diff --git a/primitives/application-crypto/src/lib.rs b/primitives/application-crypto/src/lib.rs index 992ecd1d05621..03dae0e9f0768 100644 --- a/primitives/application-crypto/src/lib.rs +++ b/primitives/application-crypto/src/lib.rs @@ -370,6 +370,13 @@ macro_rules! app_crypto_public_common { <$public>::try_from(data).map(Into::into) } } + + impl Public { + /// Convert into wrapped generic public key type. + pub fn into_generic(self) -> $public { + self.0 + } + } }; } @@ -533,6 +540,13 @@ macro_rules! app_crypto_signature_common { Self::try_from(&data[..]) } } + + impl Signature { + /// Convert into wrapped generic signature type. + pub fn into_generic(self) -> $sig { + self.0 + } + } }; } diff --git a/primitives/core/src/crypto.rs b/primitives/core/src/crypto.rs index 16af3d06963ab..3419e03bb6069 100644 --- a/primitives/core/src/crypto.rs +++ b/primitives/core/src/crypto.rs @@ -1128,6 +1128,8 @@ pub mod key_types { pub const AUTHORITY_DISCOVERY: KeyTypeId = KeyTypeId(*b"audi"); /// Key type for staking, built-in. Identified as `stak`. pub const STAKING: KeyTypeId = KeyTypeId(*b"stak"); + /// A key type for signing statements + pub const STATEMENT: KeyTypeId = KeyTypeId(*b"stmt"); /// A key type ID useful for tests. pub const DUMMY: KeyTypeId = KeyTypeId(*b"dumy"); } diff --git a/primitives/statement-store/Cargo.toml b/primitives/statement-store/Cargo.toml index e8b18b3e0f54a..b21dc5d9bb81e 100644 --- a/primitives/statement-store/Cargo.toml +++ b/primitives/statement-store/Cargo.toml @@ -19,6 +19,7 @@ sp-core = { version = "7.0.0", default-features = false, path = "../core" } sp-runtime = { version = "7.0.0", default-features = false, path = "../runtime" } sp-std = { version = "5.0.0", default-features = false, path = "../std" } sp-api = { version = "4.0.0-dev", default-features = false, path = "../api" } +sp-application-crypto = { version = "7.0.0", default-features = false, path = "../application-crypto" } thiserror = {version = "1.0", optional = true } log = { version = "0.4.17", optional = true } @@ -31,6 +32,7 @@ std = [ "sp-runtime/std", "sp-std/std", "sp-api/std", + "sp-application-crypto/std", "thiserror", "log", ] diff --git a/primitives/statement-store/src/lib.rs b/primitives/statement-store/src/lib.rs index 24500a743741e..8619a5c024576 100644 --- a/primitives/statement-store/src/lib.rs +++ b/primitives/statement-store/src/lib.rs @@ -22,6 +22,9 @@ use sp_std::vec::Vec; use codec::{Decode, Encode}; use scale_info::TypeInfo; +use sp_application_crypto::RuntimeAppPublic; +#[cfg(feature = "std")] +use sp_core::Pair; pub type Topic = [u8; 32]; pub type DecryptionKey = [u8; 32]; @@ -31,6 +34,48 @@ pub type BlockHash = [u8; 32]; #[cfg(feature = "std")] pub use api::{StatementStore, SubmitResult, Error, Result}; +pub mod sr25519 { + mod app_sr25519 { + use sp_application_crypto::{app_crypto, key_types::STATEMENT, sr25519}; + app_crypto!(sr25519, STATEMENT); + } + + sp_application_crypto::with_pair! { + pub type Pair = app_sr25519::Pair; + } + + pub type Signature = app_sr25519::Signature; + pub type Public = app_sr25519::Public; +} + +pub mod ed25519 { + mod app_ed25519 { + use sp_application_crypto::{app_crypto, ed25519, key_types::STATEMENT}; + app_crypto!(ed25519, STATEMENT); + } + + sp_application_crypto::with_pair! { + pub type Pair = app_ed25519::Pair; + } + + pub type Signature = app_ed25519::Signature; + pub type Public = app_ed25519::Public; +} + +pub mod ecdsa { + mod app_ecdsa { + use sp_application_crypto::{app_crypto, ecdsa, key_types::STATEMENT}; + app_crypto!(ecdsa, STATEMENT); + } + + sp_application_crypto::with_pair! { + pub type Pair = app_ecdsa::Pair; + } + + pub type Signature = app_ecdsa::Signature; + pub type Public = app_ecdsa::Public; +} + /// Returns blake2-256 hash for the encoded statement. #[cfg(feature = "std")] pub fn hash_encoded(data: &[u8]) -> [u8; 32] { @@ -62,13 +107,102 @@ pub struct Statement { fields: Vec, } +#[derive(Clone, Copy, PartialEq, Eq)] +pub enum SignatureVerificationResult { + Valid, + Invalid, + NoSignature, +} + impl Statement { - pub fn new(proof: Proof) -> Statement { + pub fn new() -> Statement { Statement { - fields: sp_std::vec![Field::AuthenticityProof(proof)], + fields: Vec::new(), + } + } + + pub fn new_with_proof(proof: Proof) -> Statement { + Statement { + fields: vec![Field::AuthenticityProof(proof)], + } + } + + pub fn sign_sr25519_public(&mut self, key: &sr25519::Public) -> bool { + let to_sign = self.signature_material(); + if let Some(signature) = key.sign(&to_sign) { + let proof = Proof::Sr25519 { + signature: signature.into_generic().into(), + signer: key.clone().into_generic().into(), + }; + self.set_proof(proof); + true + } else { + false + } + } + + #[cfg(feature = "std")] + pub fn sign_sr25519_private(&mut self, key: &sp_core::sr25519::Pair) { + let to_sign = self.signature_material(); + let proof = Proof::Sr25519 { + signature: key.sign(&to_sign).into(), + signer: key.public().into(), + }; + self.set_proof(proof); + } + + pub fn sign_ed25519_public(&mut self, key: &ed25519::Public) -> bool { + let to_sign = self.signature_material(); + if let Some(signature) = key.sign(&to_sign) { + let proof = Proof::Ed25519 { + signature: signature.into_generic().into(), + signer: key.clone().into_generic().into(), + }; + self.set_proof(proof); + true + } else { + false } } + #[cfg(feature = "std")] + pub fn sign_ed25519_private(&mut self, key: &sp_core::ed25519::Pair) { + let to_sign = self.signature_material(); + let proof = Proof::Ed25519 { + signature: key.sign(&to_sign).into(), + signer: key.public().into(), + }; + self.set_proof(proof); + } + + pub fn sign_ecdsa_public(&mut self, key: &ecdsa::Public) -> bool { + let to_sign = self.signature_material(); + if let Some(signature) = key.sign(&to_sign) { + let proof = Proof::Secp256k1Ecdsa { + signature: signature.into_generic().into(), + signer: key.clone().into_generic().0, + }; + self.set_proof(proof); + true + } else { + false + } + } + + #[cfg(feature = "std")] + pub fn sign_ecdsa_private(&mut self, key: &sp_core::ecdsa::Pair) { + let to_sign = self.signature_material(); + let proof = Proof::Secp256k1Ecdsa { + signature: key.sign(&to_sign).into(), + signer: key.public().0, + }; + self.set_proof(proof); + } + + pub fn verify_signature(&self) -> SignatureVerificationResult { + SignatureVerificationResult::Valid + } + #[cfg(feature = "std")] pub fn hash(&self) -> [u8; 32] { hash_encoded(&self.encode()) @@ -130,6 +264,15 @@ impl Statement { } self.clone() } + + pub fn set_proof(&mut self, proof: Proof) { + if let Some(Field::AuthenticityProof(_)) = self.fields.get(0) { + self.fields[0] = Field::AuthenticityProof(proof); + } else { + self.fields.insert(0, Field::AuthenticityProof(proof)); + } + } + } #[cfg(feature = "std")] @@ -195,14 +338,13 @@ pub mod runtime_api { #[derive(Clone, PartialEq, Eq, Encode, Decode, RuntimeDebug, TypeInfo)] pub struct ValidStatement { pub priority: u64, - pub propagate: bool, } /// An invalid statement. #[derive(Clone, PartialEq, Eq, Encode, Decode, Copy, RuntimeDebug, TypeInfo)] pub enum InvalidStatement { - Payment, BadProof, + NoProof, Stale, InternalError, } From e066abe7162f43a05544d86bd51f36e561acbe46 Mon Sep 17 00:00:00 2001 From: arkpar Date: Fri, 17 Mar 2023 16:40:30 +0100 Subject: [PATCH 06/78] Validation queue --- client/network/statement/src/lib.rs | 83 +++++++++++++++++++-------- client/rpc/src/statement/mod.rs | 4 +- client/service/src/builder.rs | 7 +++ client/statement-store/src/store.rs | 43 +++++++++----- primitives/statement-store/src/lib.rs | 32 +++++++---- 5 files changed, 117 insertions(+), 52 deletions(-) diff --git a/client/network/statement/src/lib.rs b/client/network/statement/src/lib.rs index 47239598f99bf..00396d416976f 100644 --- a/client/network/statement/src/lib.rs +++ b/client/network/statement/src/lib.rs @@ -28,9 +28,8 @@ use crate::config::*; use codec::{Decode, Encode}; -use futures::{prelude::*, stream::FuturesUnordered}; +use futures::{prelude::*, stream::FuturesUnordered, channel::oneshot}; use libp2p::{multiaddr, PeerId}; -use log::{debug, trace, warn}; use prometheus_endpoint::{register, Counter, PrometheusError, Registry, U64}; use sc_network::{ config::{NonDefaultSetConfig, NonReservedPeerMode, ProtocolId, SetConfig}, @@ -44,7 +43,7 @@ use sc_network_common::{ role::ObservedRole, sync::{SyncEvent, SyncEventStream}, }; -use sp_statement_store::{Hash, Statement, StatementStore, SubmitResult}; +use sp_statement_store::{Hash, Statement, StatementStore, SubmitResult, NetworkPriority}; use sc_utils::mpsc::{tracing_unbounded, TracingUnboundedReceiver, TracingUnboundedSender}; use std::{ collections::{hash_map::Entry, HashMap}, @@ -60,7 +59,7 @@ pub mod config; /// A set of statements. pub type Statements = Vec; /// Future resolving to statement import result. -pub type StatementImportFuture = Pin + Send>>; +pub type StatementImportFuture = oneshot::Receiver; mod rep { use sc_peerset::ReputationChange as Rep; @@ -75,8 +74,12 @@ mod rep { pub const GOOD_STATEMENT: Rep = Rep::new(1 << 7, "Good statement"); /// Reputation change when a peer sends us a bad statement. pub const BAD_STATEMENT: Rep = Rep::new(-(1 << 12), "Bad statement"); + /// Reputation change when a peer sends us particularly useful statement + pub const EXCELLENT_STATEMENT: Rep = Rep::new(1 << 8, "High priority statement"); } +const LOG_TARGET: &str = "statement-gossip"; + struct Metrics { propagated_statements: Counter, } @@ -103,13 +106,13 @@ struct PendingStatement { } impl Future for PendingStatement { - type Output = (Hash, SubmitResult); + type Output = (Hash, Option); fn poll(self: Pin<&mut Self>, cx: &mut std::task::Context<'_>) -> Poll { let mut this = self.project(); if let Poll::Ready(import_result) = Pin::new(&mut this.validation).poll_unpin(cx) { - return Poll::Ready((this.hash.clone(), import_result)) + return Poll::Ready((this.hash.clone(), import_result.ok())) } Poll::Pending @@ -173,10 +176,31 @@ impl StatementHandlerPrototype { sync: S, statement_store: Arc, metrics_registry: Option<&Registry>, + executor: Box + Send>>) + Send>, ) -> error::Result<(StatementHandler, StatementHandlerController)> { let net_event_stream = network.event_stream("statement-handler-net"); let sync_event_stream = sync.event_stream("statement-handler-sync"); let (to_handler, from_controller) = tracing_unbounded("mpsc_statement_handler", 100_000); + let (queue_sender, mut queue_receiver) = tracing_unbounded("mpsc_statement_validator", 100_000); + + let store = statement_store.clone(); + executor( + async move { + loop { + let task: Option<(Statement, oneshot::Sender)> = queue_receiver.next().await; + match task { + None => return, + Some((statement, completion)) => { + let result = store.submit(statement); + if let Err(_) = completion.send(result) { + log::debug!(target: LOG_TARGET, "Error sending validation completion"); + } + } + } + } + } + .boxed(), + ); let handler = StatementHandler { protocol_name: self.protocol_name, @@ -192,6 +216,7 @@ impl StatementHandlerPrototype { peers: HashMap::new(), statement_store, from_controller, + queue_sender, metrics: if let Some(r) = metrics_registry { Some(Metrics::register(r)?) } else { @@ -258,6 +283,7 @@ pub struct StatementHandler< peers: HashMap, statement_store: Arc, from_controller: TracingUnboundedReceiver, + queue_sender: TracingUnboundedSender<(Statement, oneshot::Sender)>, /// Prometheus metrics. metrics: Option, } @@ -285,9 +311,11 @@ where }, (hash, result) = self.pending_statements.select_next_some() => { if let Some(peers) = self.pending_statements_peers.remove(&hash) { - peers.into_iter().for_each(|p| self.on_handle_statement_import(p, &result)); + if let Some(result) = result { + peers.into_iter().for_each(|p| self.on_handle_statement_import(p, &result)); + } } else { - warn!(target: "sub-libp2p", "Inconsistent state, no peers for pending statement!"); + log::warn!(target: LOG_TARGET, "Inconsistent state, no peers for pending statement!"); } }, network_event = self.net_event_stream.next() => { @@ -326,7 +354,7 @@ where iter::once(addr).collect(), ); if let Err(err) = result { - log::error!(target: "sync", "Add reserved peer failed: {}", err); + log::error!(target: LOG_TARGET, "Add reserved peer failed: {}", err); } }, SyncEvent::PeerDisconnected(remote) => { @@ -369,7 +397,7 @@ where } // Accept statements only when node is not major syncing if self.sync.is_major_syncing() { - trace!(target: "sync", "{remote}: Ignoring statements while major syncing"); + log::trace!(target: LOG_TARGET, "{remote}: Ignoring statements while major syncing"); continue } if let Ok(statements) = @@ -377,7 +405,7 @@ where { self.on_statements(remote, statements); } else { - debug!(target: "sub-libp2p", "Failed to decode statement list from {remote}"); + log::debug!(target: LOG_TARGET, "Failed to decode statement list from {remote}"); } } }, @@ -389,12 +417,12 @@ where /// Called when peer sends us new statements fn on_statements(&mut self, who: PeerId, statements: Statements) { - trace!(target: "sync", "Received {} statements from {}", statements.len(), who); + log::trace!(target: LOG_TARGET, "Received {} statements from {}", statements.len(), who); if let Some(ref mut peer) = self.peers.get_mut(&who) { for s in statements { if self.pending_statements.len() > MAX_PENDING_STATEMENTS { - debug!( - target: "sync", + log::debug!( + target: LOG_TARGET, "Ignoring any further statements that exceed `MAX_PENDING_STATEMENTS`({}) limit", MAX_PENDING_STATEMENTS, ); @@ -408,11 +436,14 @@ where match self.pending_statements_peers.entry(hash.clone()) { Entry::Vacant(entry) => { - self.pending_statements.push(PendingStatement { - validation: self.statement_store.submit_async(s), - hash, - }); - entry.insert(vec![who]); + let (completion_sender, completion_receiver) = oneshot::channel(); + if let Ok(()) = self.queue_sender.unbounded_send((s, completion_sender)) { + self.pending_statements.push(PendingStatement { + validation: completion_receiver, + hash, + }); + entry.insert(vec![who]); + } }, Entry::Occupied(mut entry) => { entry.get_mut().push(who); @@ -424,9 +455,11 @@ where fn on_handle_statement_import(&mut self, who: PeerId, import: &SubmitResult) { match import { - SubmitResult::OkNew(_) => - self.network.report_peer(who, rep::ANY_STATEMENT_REFUND), - SubmitResult::OkKnown(_) => self.network.report_peer(who, rep::GOOD_STATEMENT), + SubmitResult::OkNew(NetworkPriority::High) => + self.network.report_peer(who, rep::EXCELLENT_STATEMENT), + SubmitResult::OkNew(NetworkPriority::Low) => + self.network.report_peer(who, rep::GOOD_STATEMENT), + SubmitResult::OkKnown => self.network.report_peer(who, rep::ANY_STATEMENT_REFUND), SubmitResult::Bad(_) => self.network.report_peer(who, rep::BAD_STATEMENT), SubmitResult::InternalError(_) => {}, } @@ -439,7 +472,7 @@ where return } - debug!(target: "sync", "Propagating statement [{:?}]", hash); + log::debug!(target: LOG_TARGET, "Propagating statement [{:?}]", hash); if let Ok(Some(statement)) = self.statement_store.statement(hash) { self.do_propagate_statements(&[(hash.clone(), statement)]); } @@ -466,7 +499,7 @@ where propagated_statements += hashes.len(); if !to_send.is_empty() { - trace!(target: "sync", "Sending {} statements to {}", to_send.len(), who); + log::trace!(target: LOG_TARGET, "Sending {} statements to {}", to_send.len(), who); self.network .write_notification(*who, self.protocol_name.clone(), to_send.encode()); } @@ -484,7 +517,7 @@ where return } - debug!(target: "sync", "Propagating statements"); + log::debug!(target: LOG_TARGET, "Propagating statements"); if let Ok(statements) = self.statement_store.dump() { self.do_propagate_statements(&statements); } diff --git a/client/rpc/src/statement/mod.rs b/client/rpc/src/statement/mod.rs index 571ccf2a7dfc2..4e0e75db694ea 100644 --- a/client/rpc/src/statement/mod.rs +++ b/client/rpc/src/statement/mod.rs @@ -83,8 +83,8 @@ impl StatementApiServer for StatementStore { fn submit(&self, encoded: Bytes) -> RpcResult<()> { match self.store.submit_encoded(&encoded) { - SubmitResult::OkNew(_) | SubmitResult::OkKnown(_) => Ok(()), - SubmitResult::Bad(e) => Err(Error::StatementStore(e).into()), + SubmitResult::OkNew(_) | SubmitResult::OkKnown => Ok(()), + SubmitResult::Bad(e) => Err(Error::StatementStore(e.into()).into()), SubmitResult::InternalError(e) => Err(Error::StatementStore(e.to_string()).into()), } } diff --git a/client/service/src/builder.rs b/client/service/src/builder.rs index d152ef50ec618..341afa96c00c8 100644 --- a/client/service/src/builder.rs +++ b/client/service/src/builder.rs @@ -1009,12 +1009,19 @@ where )?; spawn_handle.spawn("network-transactions-handler", Some("networking"), tx_handler.run()); + let statement_protocol_executor = { + let spawn_handle = Clone::clone(&spawn_handle); + Box::new(move |fut| { + spawn_handle.spawn("network-statement-validator", Some("networking"), fut); + }) + }; // crate statement goissip protocol and add it to the list of supported protocols of `network_params` let (statement_handler, statement_handler_controller) = statement_handler_proto.build( network.clone(), sync_service.clone(), statement_store.clone(), config.prometheus_config.as_ref().map(|config| &config.registry), + statement_protocol_executor, )?; spawn_handle.spawn("network-statement-handler", Some("networking"), statement_handler.run()); diff --git a/client/statement-store/src/store.rs b/client/statement-store/src/store.rs index f03adaf601e1b..39b6a375236a6 100644 --- a/client/statement-store/src/store.rs +++ b/client/statement-store/src/store.rs @@ -18,10 +18,10 @@ //! Disk-backed statement store. -use std::{collections::{HashSet, HashMap}, sync::Arc, future::Future}; +use std::{collections::{HashSet, HashMap}, sync::Arc}; use parking_lot::RwLock; -use sp_statement_store::{Statement, Topic, DecryptionKey, Result, Error, Hash, BlockHash, SubmitResult}; +use sp_statement_store::{Statement, Topic, DecryptionKey, Result, Error, Hash, BlockHash, SubmitResult, NetworkPriority}; use sp_statement_store::runtime_api::{ValidateStatement, ValidStatement, InvalidStatement, StatementSource}; use sp_core::{Encode, Decode}; use sp_api::ProvideRuntimeApi; @@ -100,7 +100,6 @@ impl Index { if let Some(key) = statement.decryption_key() { self.by_dec_key.entry(key).or_default().insert(hash); } - } fn iter_topics(&self, key: Option, topics: &[Topic], mut f: impl FnMut(&Hash) -> Result<()>) -> Result<()> { @@ -274,27 +273,41 @@ impl sp_statement_store::StatementStore for Store { fn submit(&self, statement: Statement) -> SubmitResult { let encoded = statement.encode(); let hash = sp_statement_store::hash_encoded(&encoded); - let _ = (self.validate_fn)(Default::default(), StatementSource::Local, statement.clone()); - //commit to the db with locked index - let mut index = self.index.write(); - if let Err(e) = self.db.commit([(col::STATEMENTS, &hash, Some(encoded))]) { - return SubmitResult::InternalError(Error::Db(e.to_string())); + let validation_result = (self.validate_fn)(Default::default(), StatementSource::Local, statement.clone()); + match validation_result { + Ok(ValidStatement { priority }) => { + //commit to the db with locked index + let mut index = self.index.write(); + if let Err(e) = self.db.commit([(col::STATEMENTS, &hash, Some(encoded))]) { + log::debug!(target: LOG_TARGET, "Statement validation failed: database error {}, {:?}", e, statement); + return SubmitResult::InternalError(Error::Db(e.to_string())); + } + index.insert(hash, statement); + let network_priority = if priority > 0 { NetworkPriority::High } else { NetworkPriority::Low }; + SubmitResult::OkNew(network_priority) + } + Err(InvalidStatement::BadProof) => { + log::debug!(target: LOG_TARGET, "Statement validation failed: BadProof, {:?}", statement); + SubmitResult::Bad("Bad statement proof") + }, + Err(InvalidStatement::NoProof) =>{ + log::debug!(target: LOG_TARGET, "Statement validation failed: NoProof, {:?}", statement); + SubmitResult::Bad("Missing statement proof") + }, + Err(InvalidStatement::InternalError) => SubmitResult::InternalError(Error::Runtime), } - index.insert(hash, statement); - SubmitResult::OkNew(hash) } /// Submit a SCALE-encoded statement. fn submit_encoded(&self, mut statement: &[u8]) -> SubmitResult { match Statement::decode(&mut statement) { Ok(decoded) => self.submit(decoded), - Err(e) => SubmitResult::Bad(e.to_string()), + Err(e) => { + log::debug!(target: LOG_TARGET, "Error decoding submitted statement. Failed with: {}", e); + SubmitResult::Bad("Bad SCALE encoding") + } } } - - fn submit_async(&self, statement: Statement) -> std::pin::Pin + Send>> { - Box::pin(std::future::ready(self.submit(statement))) - } } #[cfg(test)] diff --git a/primitives/statement-store/src/lib.rs b/primitives/statement-store/src/lib.rs index 8619a5c024576..32588b826af83 100644 --- a/primitives/statement-store/src/lib.rs +++ b/primitives/statement-store/src/lib.rs @@ -32,7 +32,7 @@ pub type Hash = [u8; 32]; pub type BlockHash = [u8; 32]; #[cfg(feature = "std")] -pub use api::{StatementStore, SubmitResult, Error, Result}; +pub use api::{StatementStore, SubmitResult, Error, Result, NetworkPriority}; pub mod sr25519 { mod app_sr25519 { @@ -123,7 +123,7 @@ impl Statement { pub fn new_with_proof(proof: Proof) -> Statement { Statement { - fields: vec![Field::AuthenticityProof(proof)], + fields: [Field::AuthenticityProof(proof)].to_vec(), } } @@ -278,7 +278,7 @@ impl Statement { #[cfg(feature = "std")] mod api { use crate::{Statement, Topic, Hash}; - use std::future::Future; + pub use crate::runtime_api::StatementSource; #[derive(Debug, thiserror::Error)] pub enum Error { @@ -288,12 +288,27 @@ mod api { /// Error decoding statement structure. #[error("Error decoding statement: {0:?}")] Decode(String), + /// Error making runtime call. + #[error("Error calling into the runtime")] + Runtime, } + #[derive(Debug, PartialEq, Eq)] + pub enum NetworkPriority { + High, + Low, + } + + /// Statement submission outcome + #[derive(Debug)] pub enum SubmitResult { - OkNew(Hash), - OkKnown(Hash), - Bad(String), + /// Accepted as new with given score + OkNew(NetworkPriority), + /// Known statement + OkKnown, + /// Statement failed validation. + Bad(&'static str), + /// Internal store error. InternalError(Error), } @@ -323,15 +338,13 @@ mod api { /// Submit a SCALE-encoded statement. fn submit_encoded(&self, statement: &[u8]) -> SubmitResult; - - fn submit_async(&self, statement: Statement) -> std::pin::Pin + Send>>; } } pub mod runtime_api { use codec::{Decode, Encode}; use scale_info::TypeInfo; - use sp_runtime::{RuntimeDebug}; + use sp_runtime::RuntimeDebug; use crate::Statement; /// Information concerning a valid statement. @@ -345,7 +358,6 @@ pub mod runtime_api { pub enum InvalidStatement { BadProof, NoProof, - Stale, InternalError, } From bfbbc5bd389cbe41209bed7530a5299c9dd7c1ed Mon Sep 17 00:00:00 2001 From: arkpar Date: Mon, 20 Mar 2023 12:29:58 +0100 Subject: [PATCH 07/78] Store maintenance --- client/network/statement/src/lib.rs | 11 +- client/rpc/src/statement/mod.rs | 9 +- client/service/src/builder.rs | 22 +- client/statement-store/src/store.rs | 363 ++++++++++++++++++++------ primitives/statement-store/src/lib.rs | 33 ++- 5 files changed, 332 insertions(+), 106 deletions(-) diff --git a/client/network/statement/src/lib.rs b/client/network/statement/src/lib.rs index 00396d416976f..9cdaae33075ae 100644 --- a/client/network/statement/src/lib.rs +++ b/client/network/statement/src/lib.rs @@ -43,7 +43,7 @@ use sc_network_common::{ role::ObservedRole, sync::{SyncEvent, SyncEventStream}, }; -use sp_statement_store::{Hash, Statement, StatementStore, SubmitResult, NetworkPriority}; +use sp_statement_store::{Hash, Statement, StatementSource, StatementStore, SubmitResult, NetworkPriority}; use sc_utils::mpsc::{tracing_unbounded, TracingUnboundedReceiver, TracingUnboundedSender}; use std::{ collections::{hash_map::Entry, HashMap}, @@ -191,7 +191,7 @@ impl StatementHandlerPrototype { match task { None => return, Some((statement, completion)) => { - let result = store.submit(statement); + let result = store.submit(statement, StatementSource::Network); if let Err(_) = completion.send(result) { log::debug!(target: LOG_TARGET, "Error sending validation completion"); } @@ -455,11 +455,12 @@ where fn on_handle_statement_import(&mut self, who: PeerId, import: &SubmitResult) { match import { - SubmitResult::OkNew(NetworkPriority::High) => + SubmitResult::New(NetworkPriority::High) => self.network.report_peer(who, rep::EXCELLENT_STATEMENT), - SubmitResult::OkNew(NetworkPriority::Low) => + SubmitResult::New(NetworkPriority::Low) => self.network.report_peer(who, rep::GOOD_STATEMENT), - SubmitResult::OkKnown => self.network.report_peer(who, rep::ANY_STATEMENT_REFUND), + SubmitResult::Known => self.network.report_peer(who, rep::ANY_STATEMENT_REFUND), + SubmitResult::KnownExpired => {}, SubmitResult::Bad(_) => self.network.report_peer(who, rep::BAD_STATEMENT), SubmitResult::InternalError(_) => {}, } diff --git a/client/rpc/src/statement/mod.rs b/client/rpc/src/statement/mod.rs index 4e0e75db694ea..af296ec28b1d9 100644 --- a/client/rpc/src/statement/mod.rs +++ b/client/rpc/src/statement/mod.rs @@ -27,8 +27,7 @@ use jsonrpsee::core::{async_trait, RpcResult}; pub use sc_rpc_api::statement::{error::Error, StatementApiServer}; use sc_rpc_api::DenyUnsafe; use sp_core::Bytes; -use sp_statement_store::SubmitResult; -//use sp_statement_store::StatementStore; +use sp_statement_store::{SubmitResult, StatementSource}; use std::sync::Arc; /// Statement store API @@ -82,8 +81,10 @@ impl StatementApiServer for StatementStore { } fn submit(&self, encoded: Bytes) -> RpcResult<()> { - match self.store.submit_encoded(&encoded) { - SubmitResult::OkNew(_) | SubmitResult::OkKnown => Ok(()), + match self.store.submit_encoded(&encoded, StatementSource::Rpc) { + SubmitResult::New(_) | SubmitResult::Known => Ok(()), + // `KnownExpired` should not happen. Expired statements submitted with `StatementSource::Rpc` should be renewed. + SubmitResult::KnownExpired => Err(Error::StatementStore("Submitted an expired statement".into()).into()), SubmitResult::Bad(e) => Err(Error::StatementStore(e.into()).into()), SubmitResult::InternalError(e) => Err(Error::StatementStore(e.to_string()).into()), } diff --git a/client/service/src/builder.rs b/client/service/src/builder.rs index 341afa96c00c8..e210511f7a1a7 100644 --- a/client/service/src/builder.rs +++ b/client/service/src/builder.rs @@ -514,11 +514,18 @@ where ), ); - // Inform the statement store about finalized blocks. + // Perform periodic statement store maintenance + let store = statement_store.clone(); spawn_handle.spawn( "statement-store-notifications", Some("statement-store"), - statement_store_notifications(client.clone(), statement_store.clone()), + async move { + let mut interval = tokio::time::interval(std::time::Duration::from_millis(10)); + loop { + interval.tick().await; + store.maintain(); + } + } ); // Prometheus metrics. @@ -615,17 +622,6 @@ async fn transaction_notifications( .await; } -async fn statement_store_notifications(client: Arc, store: Arc) -where - Block: BlockT, - Client: sc_client_api::BlockchainEvents, -{ - let finality_stream = client.finality_notification_stream().fuse(); - finality_stream - .for_each(|_evt| store.maintain()) - .await -} - fn init_telemetry( config: &mut Configuration, network: Network, diff --git a/client/statement-store/src/store.rs b/client/statement-store/src/store.rs index 39b6a375236a6..fbb56ed15e6a0 100644 --- a/client/statement-store/src/store.rs +++ b/client/statement-store/src/store.rs @@ -18,8 +18,7 @@ //! Disk-backed statement store. -use std::{collections::{HashSet, HashMap}, sync::Arc}; - +use std::{collections::{HashSet, HashMap, BinaryHeap}, sync::Arc}; use parking_lot::RwLock; use sp_statement_store::{Statement, Topic, DecryptionKey, Result, Error, Hash, BlockHash, SubmitResult, NetworkPriority}; use sp_statement_store::runtime_api::{ValidateStatement, ValidStatement, InvalidStatement, StatementSource}; @@ -32,17 +31,47 @@ const CURRENT_VERSION: u32 = 1; const LOG_TARGET: &str = "statement"; +const EXPIRE_AFTER: u64 = 24 * 60 * 60; //24h +const PURGE_AFTER: u64 = 2 * 24 * 60 * 60; //48h + +#[allow(dead_code)] +pub const MAINTENANCE_PERIOD: std::time::Duration = std::time::Duration::from_secs(30); + mod col { pub const META: u8 = 0; pub const STATEMENTS: u8 = 1; + pub const COUNT: u8 = 2; } +#[derive(PartialEq, Eq)] +struct EvictionPriority { + hash: Hash, + priority: u64, + timestamp: u64, +} + +impl PartialOrd for EvictionPriority { + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.priority.cmp(&other.priority).then_with(|| self.timestamp.cmp(&other.timestamp)).reverse()) + } +} + +impl Ord for EvictionPriority { + fn cmp(&self, other: &Self) -> std::cmp::Ordering { + self.priority.cmp(&other.priority).then_with(|| self.timestamp.cmp(&other.timestamp)).reverse() + } +} + #[derive(Default)] struct Index { by_topic: HashMap>, by_dec_key: HashMap>, - extended_topics: HashMap, + all_topics: HashMap; 4], Option)>, + by_priority: BinaryHeap, + entries: HashMap, + expired: HashMap, + max_entries: usize, } struct ClientWrapper { @@ -82,27 +111,73 @@ pub struct Store { db: parity_db::Db, index: RwLock, validate_fn: Box std::result::Result + Send + Sync>, + time_overrite: Option, +} + +#[derive(Encode, Decode, Clone)] +pub struct StatementMeta { + priority: u64, + timestamp: u64, +} + +#[derive(Encode, Decode)] +pub struct StatementWithMeta { + meta: StatementMeta, + statement: Statement, +} + +enum IndexQuery { + Unknown, + Exists(u64), + Expired(u64), } impl Index { - fn insert(&mut self, hash: Hash, statement: Statement) { - let mut ext_topics = [Topic::default(); 3]; + fn insert_with_meta(&mut self, hash: Hash, statement: StatementWithMeta) { + let mut all_topics = [None; 4]; let mut nt = 0; + let StatementWithMeta { statement, meta } = statement; while let Some(t) = statement.topic(nt) { - if nt == 0 { - self.by_topic.entry(t).or_default().insert(hash); - } else { - ext_topics[nt - 1] = t; - } + self.by_topic.entry(t).or_default().insert(hash); + all_topics[nt] = Some(t); nt += 1; } - self.extended_topics.insert(hash, ext_topics); - if let Some(key) = statement.decryption_key() { - self.by_dec_key.entry(key).or_default().insert(hash); + let key = statement.decryption_key(); + if let Some(k) = &key { + self.by_dec_key.entry(k.clone()).or_default().insert(hash); + } + if nt > 0 || key.is_some() { + self.all_topics.insert(hash, (all_topics, key)); + } + self.expired.remove(&hash); + if self.entries.insert(hash, meta.clone()).is_none() { + self.by_priority.push(EvictionPriority { + hash, + priority: meta.priority, + timestamp: meta.timestamp, + }); } } - fn iter_topics(&self, key: Option, topics: &[Topic], mut f: impl FnMut(&Hash) -> Result<()>) -> Result<()> { + fn query(&self, hash: &Hash) -> IndexQuery { + if let Some(meta) = self.entries.get(hash) { + return IndexQuery::Exists(meta.priority); + } + if let Some(meta) = self.expired.get(hash) { + return IndexQuery::Expired(meta.priority); + } + IndexQuery::Unknown + } + + fn insert_expired(&mut self, hash: Hash, meta: StatementMeta) { + self.expired.insert(hash, meta); + } + + fn is_expired(&self, hash: &Hash) -> bool { + self.expired.contains_key(hash) + } + + fn iter(&self, key: Option, topics: &[Topic], mut f: impl FnMut(&Hash) -> Result<()>) -> Result<()> { let mut sets: [Option<&HashSet>; 4] = Default::default(); let mut num_sets = 0; for t in topics { @@ -112,25 +187,105 @@ impl Index { } } if num_sets == 0 && key.is_none() { - return Ok(()); - } - sets[0..num_sets].sort_by_key(|s| s.map_or(0, HashSet::len)); - if let Some(key) = key { - let key_set = if let Some(set) = self.by_dec_key.get(&key) { set } else { return Ok(()) }; - for item in key_set { - if sets.iter().all(|set| set.unwrap().contains(item)) { - f(item)? - } + // Iterate all entries + for h in self.entries.keys() { + f(h)? } } else { - for item in sets[0].unwrap() { - if sets[1 .. num_sets].iter().all(|set| set.unwrap().contains(item)) { - f(item)? + // Start with the smallest topic set or the key set. + sets[0..num_sets].sort_by_key(|s| s.map_or(0, HashSet::len)); + if let Some(key) = key { + let key_set = if let Some(set) = self.by_dec_key.get(&key) { set } else { return Ok(()) }; + for item in key_set { + if sets.iter().all(|set| set.unwrap().contains(item)) { + f(item)? + } + } + } else { + for item in sets[0].unwrap() { + if sets[1 .. num_sets].iter().all(|set| set.unwrap().contains(item)) { + f(item)? + } } } } Ok(()) } + + fn maintain(&mut self, current_time: u64) -> Vec<(parity_db::ColId, Vec, Option>)> { + // Purge previously expired messages. + let mut purged = Vec::new(); + self.expired.retain(|hash, meta| { + if meta.timestamp + PURGE_AFTER > current_time { + purged.push((col::STATEMENTS, hash.to_vec(), None)); + false + } else { + true + } + }); + + // Expire messages. + let mut num_expired = 0; + self.entries.retain(|hash, meta| { + if meta.timestamp + EXPIRE_AFTER > current_time { + if let Some((topics, key)) = self.all_topics.remove(hash) { + for t in topics { + if let Some(t) = t { + if let Some(set) = self.by_topic.get_mut(&t) { + set.remove(hash); + } + } + } + if let Some(k) = key { + if let Some(set) = self.by_dec_key.get_mut(&k) { + set.remove(hash); + } + } + } + self.expired.insert(hash.clone(), meta.clone()); + num_expired += 1; + false + } else { + true + } + }); + if num_expired > 0 { + // Rebuild the priority queue + self.by_priority = self.entries.iter().map(|(hash, meta)| EvictionPriority { + hash: hash.clone(), + priority: meta.priority, + timestamp: meta.timestamp, + }).collect(); + } + purged + } + + fn evict(&mut self) -> Vec<(parity_db::ColId, Vec, Option>)> { + let mut evicted_set = Vec::new(); + while self.by_priority.len() > self.max_entries { + if let Some(evicted) = self.by_priority.pop() { + self.entries.remove(&evicted.hash); + if let Some((topics, key)) = self.all_topics.remove(&evicted.hash) { + for t in topics { + if let Some(t) = t { + if let Some(set) = self.by_topic.get_mut(&t) { + set.remove(&evicted.hash); + } + } + } + if let Some(k) = key { + if let Some(set) = self.by_dec_key.get_mut(&k) { + set.remove(&evicted.hash); + } + } + } + evicted_set.push((col::STATEMENTS, evicted.hash.to_vec(), None)); + } else { + break; + } + } + evicted_set + } } impl Store { @@ -171,34 +326,47 @@ impl Store { } } - let mut index = Index::default(); - db.iter_column_while(col::STATEMENTS, |item| { - let statement = item.value; - let hash = sp_statement_store::hash_encoded(&statement); - if let Ok(statement) = Statement::decode(&mut statement.as_slice()) { - index.insert(hash, statement); - } - true - }).map_err(|e| Error::Db(e.to_string()))?; - + let index = Index::default(); let validator = ClientWrapper { client, _block: Default::default() }; let validate_fn = Box::new(move |block, source, statement| validator.validate_statement(block, source, statement)); - Ok(Arc::new(Store { + let store = Arc::new(Store { db, index: RwLock::new(index), validate_fn, - })) + time_overrite: None, + }); + store.populate()?; + Ok(store) + } + + fn populate(&self) -> Result<()> { + let current_time = self.timestamp(); + let mut index = self.index.write(); + self.db.iter_column_while(col::STATEMENTS, |item| { + let statement = item.value; + if let Ok(statement_with_meta) = StatementWithMeta::decode(&mut statement.as_slice()) { + let hash = statement_with_meta.statement.hash(); + if statement_with_meta.meta.timestamp + EXPIRE_AFTER > current_time { + index.insert_expired(hash, statement_with_meta.meta); + } else { + index.insert_with_meta(hash, statement_with_meta); + } + } + true + }).map_err(|e| Error::Db(e.to_string()))?; + + Ok(()) } fn collect_statements(&self, key: Option, match_all_topics: &[Topic], mut f: impl FnMut(Statement) -> Option ) -> Result> { let mut result = Vec::new(); let index = self.index.read(); - index.iter_topics(key, match_all_topics, |hash| { + index.iter(key, match_all_topics, |hash| { match self.db.get(col::STATEMENTS, hash).map_err(|e| Error::Db(e.to_string()))? { - Some(statement) => { - if let Ok(statement) = Statement::decode(&mut statement.as_slice()) { - if let Some(data) = f(statement) { + Some(entry) => { + if let Ok(statement) = StatementWithMeta::decode(&mut entry.as_slice()) { + if let Some(data) = f(statement.statement) { result.push(data); } } else { @@ -218,7 +386,16 @@ impl Store { } /// Perform periodic store maintenance - pub async fn maintain(&self) { + pub fn maintain(&self) { + let deleted = self.index.write().maintain(self.timestamp()); + if let Err(e) = self.db.commit(deleted) { + log::warn!(target: LOG_TARGET, "Error writing to the statement database: {:?}", e); + } + } + + fn timestamp(&self) -> u64 { + self.time_overrite.unwrap_or_else(|| + std::time::SystemTime::now().duration_since(std::time::UNIX_EPOCH).unwrap_or_default().as_secs()) } } @@ -226,7 +403,14 @@ impl sp_statement_store::StatementStore for Store { fn dump_encoded(&self) -> Result)>> { let mut result = Vec::new(); self.db.iter_column_while(col::STATEMENTS, |item| { - result.push((sp_statement_store::hash_encoded(&item.value), item.value)); + if let Ok(entry) = StatementWithMeta::decode(&mut item.value.as_slice()) { + entry.statement.using_encoded(|statement| { + let hash = sp_statement_store::hash_encoded(statement); + if !self.index.read().is_expired(&hash) { + result.push((hash, entry.statement.encode())); + } + }); + } true }).map_err(|e| Error::Db(e.to_string()))?; Ok(result) @@ -236,24 +420,27 @@ impl sp_statement_store::StatementStore for Store { fn dump(&self) -> Result> { let mut result = Vec::new(); self.db.iter_column_while(col::STATEMENTS, |item| { - if let Ok(statement) = Statement::decode(&mut item.value.as_slice()) { - result.push((statement.hash(), statement)); + if let Ok(entry) = StatementWithMeta::decode(&mut item.value.as_slice()) { + let hash = entry.statement.hash(); + if !self.index.read().is_expired(&hash) { + result.push((hash, entry.statement)); + } } true }).map_err(|e| Error::Db(e.to_string()))?; Ok(result) } + /// Returns a statement by hash. fn statement(&self, hash: &Hash) -> Result> { Ok(match self.db.get(col::STATEMENTS, hash.as_slice()).map_err(|e| Error::Db(e.to_string()))? { - Some(statement) => { - Some(Statement::decode(&mut statement.as_slice()).map_err(|e| Error::Decode(e.to_string()))?) + Some(entry) => { + Some(StatementWithMeta::decode(&mut entry.as_slice()).map_err(|e| Error::Decode(e.to_string()))?.statement) } None => None, }) } - /// Return the data of all known statements which include all topics and have no `DecryptionKey` field. fn broadcasts(&self, match_all_topics: &[Topic]) -> Result>> { self.collect_statements(None, match_all_topics, |statement| statement.into_data()) @@ -269,39 +456,67 @@ impl sp_statement_store::StatementStore for Store { self.collect_statements(Some(dest), match_all_topics, |statement| statement.into_data()) } - /// Submit a statement. - fn submit(&self, statement: Statement) -> SubmitResult { - let encoded = statement.encode(); - let hash = sp_statement_store::hash_encoded(&encoded); - let validation_result = (self.validate_fn)(Default::default(), StatementSource::Local, statement.clone()); - match validation_result { - Ok(ValidStatement { priority }) => { - //commit to the db with locked index - let mut index = self.index.write(); - if let Err(e) = self.db.commit([(col::STATEMENTS, &hash, Some(encoded))]) { - log::debug!(target: LOG_TARGET, "Statement validation failed: database error {}, {:?}", e, statement); - return SubmitResult::InternalError(Error::Db(e.to_string())); + /// Submit a statement to the store. Validates the statement and returns validation result. + fn submit(&self, statement: Statement, source: StatementSource) -> SubmitResult { + let hash = statement.hash(); + let priority = match self.index.read().query(&hash) { + IndexQuery::Expired(priority) => { + if !source.can_be_resubmitted() { + return SubmitResult::KnownExpired; } - index.insert(hash, statement); - let network_priority = if priority > 0 { NetworkPriority::High } else { NetworkPriority::Low }; - SubmitResult::OkNew(network_priority) + priority } - Err(InvalidStatement::BadProof) => { - log::debug!(target: LOG_TARGET, "Statement validation failed: BadProof, {:?}", statement); - SubmitResult::Bad("Bad statement proof") - }, - Err(InvalidStatement::NoProof) =>{ - log::debug!(target: LOG_TARGET, "Statement validation failed: NoProof, {:?}", statement); - SubmitResult::Bad("Missing statement proof") + IndexQuery::Exists(priority) => { + if !source.can_be_resubmitted() { + return SubmitResult::Known; + } + priority + } + IndexQuery::Unknown => { + // Validate. + let validation_result = (self.validate_fn)(Default::default(), source, statement.clone()); + match validation_result { + Ok(ValidStatement { priority }) => priority, + Err(InvalidStatement::BadProof) => { + log::debug!(target: LOG_TARGET, "Statement validation failed: BadProof, {:?}", statement); + return SubmitResult::Bad("Bad statement proof") + }, + Err(InvalidStatement::NoProof) =>{ + log::debug!(target: LOG_TARGET, "Statement validation failed: NoProof, {:?}", statement); + return SubmitResult::Bad("Missing statement proof") + }, + Err(InvalidStatement::InternalError) => { + return SubmitResult::InternalError(Error::Runtime) + }, + } + } + }; + + // Commit to the db prior to locking the index. + let statement_with_meta = StatementWithMeta { + meta: StatementMeta { + priority, + timestamp: self.timestamp(), }, - Err(InvalidStatement::InternalError) => SubmitResult::InternalError(Error::Runtime), + statement, + }; + + let mut commit = self.index.write().evict(); + commit.push((col::STATEMENTS, hash.to_vec(), Some(statement_with_meta.encode()))); + if let Err(e) = self.db.commit(commit) { + log::debug!(target: LOG_TARGET, "Statement validation failed: database error {}, {:?}", e, statement_with_meta.statement); + return SubmitResult::InternalError(Error::Db(e.to_string())); } + let mut index = self.index.write(); + index.insert_with_meta(hash, statement_with_meta); + let network_priority = if priority > 0 { NetworkPriority::High } else { NetworkPriority::Low }; + SubmitResult::New(network_priority) } /// Submit a SCALE-encoded statement. - fn submit_encoded(&self, mut statement: &[u8]) -> SubmitResult { + fn submit_encoded(&self, mut statement: &[u8], source: StatementSource) -> SubmitResult { match Statement::decode(&mut statement) { - Ok(decoded) => self.submit(decoded), + Ok(decoded) => self.submit(decoded, source), Err(e) => { log::debug!(target: LOG_TARGET, "Error decoding submitted statement. Failed with: {}", e); SubmitResult::Bad("Bad SCALE encoding") diff --git a/primitives/statement-store/src/lib.rs b/primitives/statement-store/src/lib.rs index 32588b826af83..89655cd0c1cd8 100644 --- a/primitives/statement-store/src/lib.rs +++ b/primitives/statement-store/src/lib.rs @@ -32,7 +32,7 @@ pub type Hash = [u8; 32]; pub type BlockHash = [u8; 32]; #[cfg(feature = "std")] -pub use api::{StatementStore, SubmitResult, Error, Result, NetworkPriority}; +pub use api::{StatementStore, SubmitResult, Error, Result, NetworkPriority, StatementSource}; pub mod sr25519 { mod app_sr25519 { @@ -205,7 +205,7 @@ impl Statement { #[cfg(feature = "std")] pub fn hash(&self) -> [u8; 32] { - hash_encoded(&self.encode()) + self.using_encoded(hash_encoded) } pub fn topic(&self, index: usize) -> Option { @@ -303,9 +303,11 @@ mod api { #[derive(Debug)] pub enum SubmitResult { /// Accepted as new with given score - OkNew(NetworkPriority), + New(NetworkPriority), /// Known statement - OkKnown, + Known, + /// Known statement that's already expired. + KnownExpired, /// Statement failed validation. Bad(&'static str), /// Internal store error. @@ -334,10 +336,10 @@ mod api { fn posted_clear(&self, match_all_topics: &[Topic], dest: [u8; 32]) -> Result>>; /// Submit a statement. - fn submit(&self, statement: Statement) -> SubmitResult; + fn submit(&self, statement: Statement, source: StatementSource) -> SubmitResult; /// Submit a SCALE-encoded statement. - fn submit_encoded(&self, statement: &[u8]) -> SubmitResult; + fn submit_encoded(&self, statement: &[u8], source: StatementSource) -> SubmitResult; } } @@ -366,10 +368,21 @@ pub mod runtime_api { /// Depending on the source we might apply different validation schemes. #[derive(Copy, Clone, PartialEq, Eq, Encode, Decode, RuntimeDebug, TypeInfo)] pub enum StatementSource { - /// Statement is coming from a local source, such as the OCW. - Local, - /// Statement has been received externally (network or RPC). - External, + /// Statement is coming from the on-chain worker. + Chain, + /// Statement has been received from the gossip network. + Network, + /// Statement has been submitted over the RPC api. + Rpc, + } + + impl StatementSource { + pub fn can_be_resubmitted(&self) -> bool { + match self { + StatementSource::Chain | StatementSource::Rpc => true, + StatementSource::Network => false, + } + } } sp_api::decl_runtime_apis! { From d610e62125909d4cd3614daa14991de8c6f30ab4 Mon Sep 17 00:00:00 2001 From: arkpar Date: Tue, 21 Mar 2023 19:44:41 +0100 Subject: [PATCH 08/78] Basic statement refactoring + tests + docs --- Cargo.lock | 1 + client/service/src/builder.rs | 2 +- client/statement-store/Cargo.toml | 2 +- client/statement-store/src/lib.rs | 2 +- client/statement-store/src/store.rs | 1 + frame/statement/src/lib.rs | 68 +-- primitives/statement-store/Cargo.toml | 2 + primitives/statement-store/src/lib.rs | 464 +++++++++++------- primitives/statement-store/src/runtime_api.rs | 69 +++ primitives/statement-store/src/store_api.rs | 81 +++ 10 files changed, 454 insertions(+), 238 deletions(-) create mode 100644 primitives/statement-store/src/runtime_api.rs create mode 100644 primitives/statement-store/src/store_api.rs diff --git a/Cargo.lock b/Cargo.lock index 589f3333e7471..3c07fbba013e1 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -10793,6 +10793,7 @@ dependencies = [ "sp-api", "sp-application-crypto", "sp-core", + "sp-io", "sp-runtime", "sp-std", "thiserror", diff --git a/client/service/src/builder.rs b/client/service/src/builder.rs index 8ec1027d28565..96f1e93ba71a3 100644 --- a/client/service/src/builder.rs +++ b/client/service/src/builder.rs @@ -467,7 +467,7 @@ where "statement-store-notifications", Some("statement-store"), async move { - let mut interval = tokio::time::interval(std::time::Duration::from_millis(10)); + let mut interval = tokio::time::interval(sc_statement_store::MAINTENANCE_PERIOD); loop { interval.tick().await; store.maintain(); diff --git a/client/statement-store/Cargo.toml b/client/statement-store/Cargo.toml index 08c74a6ea6db7..6bbbc8338f83f 100644 --- a/client/statement-store/Cargo.toml +++ b/client/statement-store/Cargo.toml @@ -19,7 +19,7 @@ futures = "0.3.21" futures-timer = "3.0.2" log = "0.4.17" parking_lot = "0.12.1" -parity-db = "0.4.3" +parity-db = "0.4.6" sp-statement-store = { version = "4.0.0-dev", path = "../../primitives/statement-store" } prometheus-endpoint = { package = "substrate-prometheus-endpoint", version = "0.10.0-dev", path = "../../utils/prometheus" } #sc-client-api = { version = "4.0.0-dev", path = "../api" } diff --git a/client/statement-store/src/lib.rs b/client/statement-store/src/lib.rs index 17d504b1b53e7..586cc0c3d79ef 100644 --- a/client/statement-store/src/lib.rs +++ b/client/statement-store/src/lib.rs @@ -25,7 +25,7 @@ mod store; //mod metrics; -pub use store::Store; +pub use store::{Store, MAINTENANCE_PERIOD}; pub use sp_statement_store::{StatementStore, Error}; /* diff --git a/client/statement-store/src/store.rs b/client/statement-store/src/store.rs index fbb56ed15e6a0..9aad7cb753fa3 100644 --- a/client/statement-store/src/store.rs +++ b/client/statement-store/src/store.rs @@ -34,6 +34,7 @@ const LOG_TARGET: &str = "statement"; const EXPIRE_AFTER: u64 = 24 * 60 * 60; //24h const PURGE_AFTER: u64 = 2 * 24 * 60 * 60; //48h +/// Suggested maintenance period. A good value to call `Store::maintain` with. #[allow(dead_code)] pub const MAINTENANCE_PERIOD: std::time::Duration = std::time::Duration::from_secs(30); diff --git a/frame/statement/src/lib.rs b/frame/statement/src/lib.rs index f6c3a49f2ee06..b1af1a088f23f 100644 --- a/frame/statement/src/lib.rs +++ b/frame/statement/src/lib.rs @@ -27,10 +27,10 @@ #![cfg_attr(not(feature = "std"), no_std)] //use codec::{Decode, Encode, MaxEncodedLen}; -use sp_statement_store::{Proof, Statement}; +use sp_statement_store::{Proof, Statement, SignatureVerificationResult}; use sp_statement_store::runtime_api::{StatementSource, ValidStatement, InvalidStatement}; use frame_support::sp_tracing::{enter_span, Level}; -use frame_support::sp_runtime::traits::{Zero, Verify}; +use frame_support::sp_runtime::traits::Zero; use frame_support::sp_runtime::SaturatedConversion; use frame_support::traits::Currency; use frame_support::pallet_prelude::*; @@ -96,40 +96,7 @@ impl Pallet enter_span! { Level::TRACE, "validate_statement" }; log::debug!(target: LOG_TARGET, "Validating statement {:?}", statement); - let account: Option = match statement.proof() { - None => { - return Err(InvalidStatement::NoProof) - }, - Some(Proof::Sr25519 { signature, signer }) => { - let to_sign = statement.signature_material(); - let signature = sp_core::sr25519::Signature(*signature); - let public = sp_core::sr25519::Public(*signer); - if !signature.verify(to_sign.as_slice(), &public) { - log::debug!(target: LOG_TARGET, "Bad Sr25519 signature."); - return Err(InvalidStatement::BadProof); - } - Some(signer.clone().into()) - }, - Some(Proof::Ed25519 { signature, signer }) => { - let to_sign = statement.signature_material(); - let signature = sp_core::ed25519::Signature(*signature); - let public = sp_core::ed25519::Public(*signer); - if !signature.verify(to_sign.as_slice(), &public) { - log::debug!(target: LOG_TARGET, "Bad Ed25519 signature."); - return Err(InvalidStatement::BadProof); - } - Some(signer.clone().into()) - }, - Some(Proof::Secp256k1Ecdsa { signature, signer }) => { - let to_sign = statement.signature_material(); - let signature = sp_core::ecdsa::Signature(*signature); - let public = sp_core::ecdsa::Public(*signer); - if !signature.verify(to_sign.as_slice(), &public) { - log::debug!(target: LOG_TARGET, "Bad ECDSA signature."); - return Err(InvalidStatement::BadProof); - } - Some(sp_io::hashing::blake2_256(signer).into()) - }, + let account: T::AccountId = match statement.proof() { Some(Proof::OnChain { who, block_hash, event_index }) => { // block_hash and event_index should be checked by the host if frame_system::Pallet::::parent_hash().as_ref() != block_hash.as_slice() { @@ -149,22 +116,27 @@ impl Pallet return Err(InvalidStatement::BadProof); } } - Some(account) + account + } + _ => match statement.verify_signature() { + SignatureVerificationResult::Valid(account) => account.into(), + SignatureVerificationResult::Invalid => { + log::debug!(target: LOG_TARGET, "Bad statement signature."); + return Err(InvalidStatement::BadProof); + }, + SignatureVerificationResult::NoSignature => { + log::debug!(target: LOG_TARGET, "Missing statement signature."); + return Err(InvalidStatement::NoProof); + } } }; - let priority: u64 = if let Some(account) = account { - let priority_cost = T::PriorityBalance::get(); - if priority_cost.is_zero() { + let priority_cost = T::PriorityBalance::get(); + let priority: u64 = if priority_cost.is_zero() { 0 - } - else { - let balance = T::Currency::free_balance(&account); - let priority = balance / priority_cost; - priority.saturated_into() - - } } else { - 0 + let balance = T::Currency::free_balance(&account); + let priority = balance / priority_cost; + priority.saturated_into() }; Ok(ValidStatement { diff --git a/primitives/statement-store/Cargo.toml b/primitives/statement-store/Cargo.toml index b21dc5d9bb81e..e84d227fe78c9 100644 --- a/primitives/statement-store/Cargo.toml +++ b/primitives/statement-store/Cargo.toml @@ -19,6 +19,7 @@ sp-core = { version = "7.0.0", default-features = false, path = "../core" } sp-runtime = { version = "7.0.0", default-features = false, path = "../runtime" } sp-std = { version = "5.0.0", default-features = false, path = "../std" } sp-api = { version = "4.0.0-dev", default-features = false, path = "../api" } +sp-io = { version = "7.0.0", default-features = false, path = "../io" } sp-application-crypto = { version = "7.0.0", default-features = false, path = "../application-crypto" } thiserror = {version = "1.0", optional = true } log = { version = "0.4.17", optional = true } @@ -32,6 +33,7 @@ std = [ "sp-runtime/std", "sp-std/std", "sp-api/std", + "sp-io/std", "sp-application-crypto/std", "thiserror", "log", diff --git a/primitives/statement-store/src/lib.rs b/primitives/statement-store/src/lib.rs index 89655cd0c1cd8..78013c3f46098 100644 --- a/primitives/statement-store/src/lib.rs +++ b/primitives/statement-store/src/lib.rs @@ -26,53 +26,43 @@ use sp_application_crypto::RuntimeAppPublic; #[cfg(feature = "std")] use sp_core::Pair; +/// Statement topic. pub type Topic = [u8; 32]; +/// Decryption key identifier. pub type DecryptionKey = [u8; 32]; +/// Statement hash. pub type Hash = [u8; 32]; +/// Block hash. pub type BlockHash = [u8; 32]; #[cfg(feature = "std")] -pub use api::{StatementStore, SubmitResult, Error, Result, NetworkPriority, StatementSource}; +pub use store_api::{StatementStore, SubmitResult, Error, Result, NetworkPriority, StatementSource}; -pub mod sr25519 { +pub mod runtime_api; +#[cfg(feature = "std")] +mod store_api; + +mod sr25519 { mod app_sr25519 { - use sp_application_crypto::{app_crypto, key_types::STATEMENT, sr25519}; + use sp_application_crypto::{app_crypto, sr25519, key_types::STATEMENT}; app_crypto!(sr25519, STATEMENT); } - - sp_application_crypto::with_pair! { - pub type Pair = app_sr25519::Pair; - } - - pub type Signature = app_sr25519::Signature; pub type Public = app_sr25519::Public; } -pub mod ed25519 { +mod ed25519 { mod app_ed25519 { use sp_application_crypto::{app_crypto, ed25519, key_types::STATEMENT}; app_crypto!(ed25519, STATEMENT); } - - sp_application_crypto::with_pair! { - pub type Pair = app_ed25519::Pair; - } - - pub type Signature = app_ed25519::Signature; pub type Public = app_ed25519::Public; } -pub mod ecdsa { +mod ecdsa { mod app_ecdsa { use sp_application_crypto::{app_crypto, ecdsa, key_types::STATEMENT}; app_crypto!(ecdsa, STATEMENT); } - - sp_application_crypto::with_pair! { - pub type Pair = app_ecdsa::Pair; - } - - pub type Signature = app_ecdsa::Signature; pub type Public = app_ecdsa::Public; } @@ -82,51 +72,125 @@ pub fn hash_encoded(data: &[u8]) -> [u8; 32] { sp_core::hashing::blake2_256(data) } +/// Statement proof. #[derive(Encode, Decode, TypeInfo, sp_runtime::RuntimeDebug, Clone, PartialEq, Eq)] pub enum Proof { - Sr25519 { signature: [u8; 64], signer: [u8; 32] }, - Ed25519 { signature: [u8; 64], signer: [u8; 32] }, - Secp256k1Ecdsa { signature: [u8; 65], signer: [u8; 33] }, - OnChain { who: [u8; 32], block_hash: BlockHash, event_index: u64 }, + /// Sr25519 Signature. + Sr25519 { + /// Signature. + signature: [u8; 64], + /// Public key. + signer: [u8; 32] + }, + /// Ed25519 Signature. + Ed25519 { + /// Signature. + signature: [u8; 64], + /// Public key. + signer: [u8; 32] + }, + /// Secp256k1 Signature. + Secp256k1Ecdsa { + /// Signature. + signature: [u8; 65], + /// Public key. + signer: [u8; 33] + }, + /// On-chain event proof. + OnChain { + /// Account identifier associated with the event. + who: [u8; 32], + /// Hash of block that contains the event. + block_hash: BlockHash, + /// Index of the event in the event list. + event_index: u64 + }, } #[derive(Encode, Decode, TypeInfo, sp_runtime::RuntimeDebug, Clone, PartialEq, Eq)] +/// Statement attributes. Each statement is a list of 0 or more fields. Fields may only appear in +/// the order declared here. +#[repr(u8)] pub enum Field { - AuthenticityProof(Proof), - DecryptionKey(DecryptionKey), - Priority(u32), - Topic0(Topic), - Topic1(Topic), - Topic2(Topic), - Topic3(Topic), - Data(Vec), + /// Statement proof. + AuthenticityProof(Proof) = 0, + /// An identifier for the key that `Data` field may be decrypted with. + DecryptionKey(DecryptionKey) = 1, + /// First statement topic. + Topic1(Topic) = 2, + /// Second statement topic. + Topic2(Topic) = 3, + /// Third statement topic. + Topic3(Topic) = 4, + /// Fourth statement topic. + Topic4(Topic) = 5, + /// Additional data. + Data(Vec) = 6, } -#[derive(Encode, Decode, TypeInfo, sp_runtime::RuntimeDebug, Clone, PartialEq, Eq)] +#[derive(TypeInfo, sp_runtime::RuntimeDebug, Clone, PartialEq, Eq, Default)] +/// Statement structure. pub struct Statement { - fields: Vec, + proof: Option, + decryption_key: Option, + num_topics: u8, + topics: [Topic; 4], + data: Option>, } -#[derive(Clone, Copy, PartialEq, Eq)] +impl Decode for Statement { + fn decode(input: &mut I) -> core::result::Result { + // Encoding matches that of Vec. Basically this just means accepting that there + // will be a prefix of vector length. + let num_fields: codec::Compact = Decode::decode(input)?; + let mut statement = Statement::new(); + for _ in 0 .. num_fields.into() { + let field: Field = Decode::decode(input)?; + match field { + Field::AuthenticityProof(p) => statement.set_proof(p), + Field::DecryptionKey(key) => statement.set_decryption_key(key), + Field::Topic1(t) => statement.set_topic(0, t), + Field::Topic2(t) => statement.set_topic(1, t), + Field::Topic3(t) => statement.set_topic(2, t), + Field::Topic4(t) => statement.set_topic(3, t), + Field::Data(data) => statement.set_plain_data(data), + } + } + Ok(statement) + } +} + +impl Encode for Statement { + fn encode(&self) -> Vec { + self.encoded(true) + } +} + +#[derive(Clone, Copy, PartialEq, Eq, Debug)] +/// Result returned by `Statement::verify_signature` pub enum SignatureVerificationResult { - Valid, + /// Signature is valid and matches this account id. + Valid([u8; 32]), + /// Signature has failed verification. Invalid, + /// No signature in the proof or no proof. NoSignature, } impl Statement { + /// Create a new empty statement with no proof. pub fn new() -> Statement { - Statement { - fields: Vec::new(), - } + Default::default() } + /// Create a new statement with a proof. pub fn new_with_proof(proof: Proof) -> Statement { - Statement { - fields: [Field::AuthenticityProof(proof)].to_vec(), - } + let mut statement = Self::new(); + statement.set_proof(proof); + statement } + /// Sign with a key that matches given public key in the keystore. pub fn sign_sr25519_public(&mut self, key: &sr25519::Public) -> bool { let to_sign = self.signature_material(); if let Some(signature) = key.sign(&to_sign) { @@ -141,6 +205,7 @@ impl Statement { } } + /// Sign with a given private key and add the signature proof field. #[cfg(feature = "std")] pub fn sign_sr25519_private(&mut self, key: &sp_core::sr25519::Pair) { let to_sign = self.signature_material(); @@ -151,6 +216,7 @@ impl Statement { self.set_proof(proof); } + /// Sign with a key that matches given public key in the keystore. pub fn sign_ed25519_public(&mut self, key: &ed25519::Public) -> bool { let to_sign = self.signature_material(); if let Some(signature) = key.sign(&to_sign) { @@ -165,6 +231,7 @@ impl Statement { } } + /// Sign with a given private key and add the signature proof field. #[cfg(feature = "std")] pub fn sign_ed25519_private(&mut self, key: &sp_core::ed25519::Pair) { let to_sign = self.signature_material(); @@ -175,6 +242,7 @@ impl Statement { self.set_proof(proof); } + /// Sign with a key that matches given public key in the keystore. pub fn sign_ecdsa_public(&mut self, key: &ecdsa::Public) -> bool { let to_sign = self.signature_material(); if let Some(signature) = key.sign(&to_sign) { @@ -189,6 +257,7 @@ impl Statement { } } + /// Sign with a given private key and add the signature proof field. #[cfg(feature = "std")] pub fn sign_ecdsa_private(&mut self, key: &sp_core::ecdsa::Pair) { let to_sign = self.signature_material(); @@ -199,200 +268,221 @@ impl Statement { self.set_proof(proof); } + /// Check proof signature, if any. pub fn verify_signature(&self) -> SignatureVerificationResult { - SignatureVerificationResult::Valid + use sp_runtime::traits::Verify; + + match self.proof() { + Some(Proof::OnChain{..}) | None => { + SignatureVerificationResult::NoSignature + }, + Some(Proof::Sr25519 { signature, signer }) => { + let to_sign = self.signature_material(); + let signature = sp_core::sr25519::Signature(*signature); + let public = sp_core::sr25519::Public(*signer); + if signature.verify(to_sign.as_slice(), &public) { + SignatureVerificationResult::Valid(signer.clone()) + } else { + SignatureVerificationResult::Invalid + } + }, + Some(Proof::Ed25519 { signature, signer }) => { + let to_sign = self.signature_material(); + let signature = sp_core::ed25519::Signature(*signature); + let public = sp_core::ed25519::Public(*signer); + if signature.verify(to_sign.as_slice(), &public) { + SignatureVerificationResult::Valid(signer.clone()) + } else { + SignatureVerificationResult::Invalid + } + }, + Some(Proof::Secp256k1Ecdsa { signature, signer }) => { + let to_sign = self.signature_material(); + let signature = sp_core::ecdsa::Signature(*signature); + let public = sp_core::ecdsa::Public(*signer); + if signature.verify(to_sign.as_slice(), &public) { + SignatureVerificationResult::Valid(sp_io::hashing::blake2_256(signer)) + } else { + SignatureVerificationResult::Invalid + } + } + } } + /// Calculate statement hash. #[cfg(feature = "std")] pub fn hash(&self) -> [u8; 32] { self.using_encoded(hash_encoded) } + /// Returns a topic by topic index. pub fn topic(&self, index: usize) -> Option { - for field in &self.fields { - match (field, index) { - (Field::Topic0(t), 0) => return Some(*t), - (Field::Topic1(t), 1) => return Some(*t), - (Field::Topic2(t), 2) => return Some(*t), - (Field::Topic3(t), 3) => return Some(*t), - _ => {}, - } + if index < self.num_topics as usize { + Some(self.topics[index].clone()) + } else { + None } - None } + /// Returns decryption key if any. pub fn decryption_key(&self) -> Option { - for field in &self.fields { - if let Field::DecryptionKey(key) = field { - return Some(*key); - } - } - None + self.decryption_key.clone() } + /// Convert to internal data. pub fn into_data(self) -> Option> { - for field in self.fields.into_iter() { - if let Field::Data(data) = field { - return Some(data); - } - } - None + self.data } + /// Get a reference to the statement proof, if any. pub fn proof(&self) -> Option<&Proof> { - if let Some(Field::AuthenticityProof(p)) = self.fields.get(0) { - Some(p) - } else { - None - } + self.proof.as_ref() } /// Return encoded fields that can be signed to construct or verify a proof - pub fn signature_material(&self) -> Vec { - let mut out = Vec::new(); - let skip_fields = if let Some(Field::AuthenticityProof(_)) = self.fields.get(0) { 1 } else { 0 }; - for field in &self.fields[skip_fields..] { - field.encode_to(&mut out) - } - out + fn signature_material(&self) -> Vec { + self.encoded(false) } /// Return a copy of this statement with proof removed pub fn strip_proof(&self) -> Statement { - if let Some(Field::AuthenticityProof(_)) = self.fields.get(0) { - return Statement { fields: self.fields[1..].iter().cloned().collect() } + Statement { + proof: None, + decryption_key: self.decryption_key.clone(), + topics: self.topics.clone(), + num_topics: self.num_topics, + data: self.data.clone(), } - self.clone() } + /// Set statement proof. Any existing proof is overwritten. pub fn set_proof(&mut self, proof: Proof) { - if let Some(Field::AuthenticityProof(_)) = self.fields.get(0) { - self.fields[0] = Field::AuthenticityProof(proof); - } else { - self.fields.insert(0, Field::AuthenticityProof(proof)); - } + self.proof = Some(proof) } -} - -#[cfg(feature = "std")] -mod api { - use crate::{Statement, Topic, Hash}; - pub use crate::runtime_api::StatementSource; - - #[derive(Debug, thiserror::Error)] - pub enum Error { - /// Database error. - #[error("Database error: {0:?}")] - Db(String), - /// Error decoding statement structure. - #[error("Error decoding statement: {0:?}")] - Decode(String), - /// Error making runtime call. - #[error("Error calling into the runtime")] - Runtime, + /// Set topic by index. + pub fn set_topic(&mut self, index: usize, topic: Topic) { + if index <= 4 { + self.topics[index] = topic; + self.num_topics = self.num_topics.max(index as u8 + 1); + } } - #[derive(Debug, PartialEq, Eq)] - pub enum NetworkPriority { - High, - Low, + /// Set decryption key. + pub fn set_decryption_key(&mut self, key: DecryptionKey) { + self.decryption_key = Some(key); } - /// Statement submission outcome - #[derive(Debug)] - pub enum SubmitResult { - /// Accepted as new with given score - New(NetworkPriority), - /// Known statement - Known, - /// Known statement that's already expired. - KnownExpired, - /// Statement failed validation. - Bad(&'static str), - /// Internal store error. - InternalError(Error), + /// Set unencrypted statement data. + pub fn set_plain_data(&mut self, data: Vec) { + self.data = Some(data) } - pub type Result = std::result::Result; + fn encoded(&self, with_proof: bool) -> Vec { + // Encoding matches that of Vec. Basically this just means accepting that there + // will be a prefix of vector length. + let num_fields = + if with_proof && self.proof.is_some() { 1 } else { 0 } + + if self.decryption_key.is_some() { 1 } else { 0 } + + if self.data.is_some() { 1 } else { 0 } + + self.num_topics as u32; - pub trait StatementStore: Send + Sync { - /// Return all statements, SCALE-encoded. - fn dump_encoded(&self) -> Result)>>; + let mut output = Vec::new(); + let compact_len = codec::Compact::(num_fields); + compact_len.encode_to(&mut output); - /// Return all statements. - fn dump(&self) -> Result>; + if with_proof { + if let Some(proof) = &self.proof { + 0u8.encode_to(&mut output); + proof.encode_to(&mut output); + } + } + if let Some(decryption_key) = &self.decryption_key { + 1u8.encode_to(&mut output); + decryption_key.encode_to(&mut output); + } + for t in 0 .. self.num_topics { + (2u8 + t).encode_to(&mut output); + self.topics[t as usize].encode_to(&mut output); + } + if let Some(data) = &self.data { + 6u8.encode_to(&mut output); + data.encode_to(&mut output); + } + output + } +} - /// Get statement by hash. - fn statement(&self, hash: &Hash) -> Result>; +#[cfg(test)] +mod test { + use crate::{Statement, Proof, Field, SignatureVerificationResult, hash_encoded}; + use codec::{Encode, Decode}; + use sp_application_crypto::Pair; + + #[test] + fn statement_encoding_matches_vec() { + let mut statement = Statement::new(); + assert!(statement.proof().is_none()); + let proof = Proof::OnChain { + who: [42u8; 32], + block_hash: [24u8; 32], + event_index: 66, + }; - /// Return the data of all known statements which include all topics and have no `DecryptionKey` field. - fn broadcasts(&self, match_all_topics: &[Topic]) -> Result>>; + let decryption_key = [0xde; 32]; + let topic1 = [0x01; 32]; + let topic2 = [0x02; 32]; + let data = vec![55,99]; - /// Return the data of all known statements whose decryption key is identified as `dest` (this will generally be the public key or a hash thereof for symmetric ciphers, or a hash of the private key for symmetric ciphers). - fn posted(&self, match_all_topics: &[Topic], dest: [u8; 32]) -> Result>>; + statement.set_proof(proof.clone()); + statement.set_decryption_key(decryption_key.clone()); + statement.set_topic(0, topic1.clone()); + statement.set_topic(1, topic2.clone()); + statement.set_plain_data(data.clone()); - /// Return the decrypted data of all known statements whose decryption key is identified as `dest`. The key must be available to the client. - fn posted_clear(&self, match_all_topics: &[Topic], dest: [u8; 32]) -> Result>>; + let fields = vec![ + Field::AuthenticityProof(proof.clone()), + Field::DecryptionKey(decryption_key.clone()), + Field::Topic1(topic1.clone()), + Field::Topic2(topic2.clone()), + Field::Data(data.clone()), + ]; - /// Submit a statement. - fn submit(&self, statement: Statement, source: StatementSource) -> SubmitResult; + let encoded = statement.encode(); + assert_eq!(statement.hash(), hash_encoded(&encoded)); + assert_eq!(encoded, fields.encode()); - /// Submit a SCALE-encoded statement. - fn submit_encoded(&self, statement: &[u8], source: StatementSource) -> SubmitResult; + let decoded = Statement::decode(&mut encoded.as_slice()).unwrap(); + assert_eq!(decoded, statement); } -} -pub mod runtime_api { - use codec::{Decode, Encode}; - use scale_info::TypeInfo; - use sp_runtime::RuntimeDebug; - use crate::Statement; + #[test] + fn sign_and_verify() { + let mut statement = Statement::new(); + statement.set_plain_data(vec![42]); - /// Information concerning a valid statement. - #[derive(Clone, PartialEq, Eq, Encode, Decode, RuntimeDebug, TypeInfo)] - pub struct ValidStatement { - pub priority: u64, - } + let sr25519_kp = sp_core::sr25519::Pair::from_string("//Alice", None).unwrap(); + let ed25519_kp = sp_core::ed25519::Pair::from_string("//Alice", None).unwrap(); + let secp256k1_kp = sp_core::ecdsa::Pair::from_string("//Alice", None).unwrap(); - /// An invalid statement. - #[derive(Clone, PartialEq, Eq, Encode, Decode, Copy, RuntimeDebug, TypeInfo)] - pub enum InvalidStatement { - BadProof, - NoProof, - InternalError, - } + statement.sign_sr25519_private(&sr25519_kp); + assert_eq!(statement.verify_signature(), SignatureVerificationResult::Valid(sr25519_kp.public().0)); - /// The source of the statement. - /// - /// Depending on the source we might apply different validation schemes. - #[derive(Copy, Clone, PartialEq, Eq, Encode, Decode, RuntimeDebug, TypeInfo)] - pub enum StatementSource { - /// Statement is coming from the on-chain worker. - Chain, - /// Statement has been received from the gossip network. - Network, - /// Statement has been submitted over the RPC api. - Rpc, - } + statement.sign_ed25519_private(&ed25519_kp); + assert_eq!(statement.verify_signature(), SignatureVerificationResult::Valid(ed25519_kp.public().0)); - impl StatementSource { - pub fn can_be_resubmitted(&self) -> bool { - match self { - StatementSource::Chain | StatementSource::Rpc => true, - StatementSource::Network => false, - } - } - } + statement.sign_ecdsa_private(&secp256k1_kp); + assert_eq!(statement.verify_signature(), SignatureVerificationResult::Valid(sp_core::hashing::blake2_256(&secp256k1_kp.public().0))); - sp_api::decl_runtime_apis! { - /// Runtime API trait for statement validation. - pub trait ValidateStatement { - /// Validate the statement. - fn validate_statement( - source: StatementSource, - statement: Statement, - ) -> Result; - } + // set an invalid signature + statement.set_proof(Proof::Sr25519 { + signature: [0u8; 64], + signer: [0u8; 32], + }); + assert_eq!(statement.verify_signature(), SignatureVerificationResult::Invalid); + + statement = statement.strip_proof(); + assert_eq!(statement.verify_signature(), SignatureVerificationResult::NoSignature); } } + diff --git a/primitives/statement-store/src/runtime_api.rs b/primitives/statement-store/src/runtime_api.rs new file mode 100644 index 0000000000000..238e1fc085645 --- /dev/null +++ b/primitives/statement-store/src/runtime_api.rs @@ -0,0 +1,69 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use codec::{Decode, Encode}; +use scale_info::TypeInfo; +use sp_runtime::RuntimeDebug; +use crate::Statement; + +/// Information concerning a valid statement. +#[derive(Clone, PartialEq, Eq, Encode, Decode, RuntimeDebug, TypeInfo)] +pub struct ValidStatement { + pub priority: u64, +} + +/// An invalid statement. +#[derive(Clone, PartialEq, Eq, Encode, Decode, Copy, RuntimeDebug, TypeInfo)] +pub enum InvalidStatement { + BadProof, + NoProof, + InternalError, +} + +/// The source of the statement. +/// +/// Depending on the source we might apply different validation schemes. +#[derive(Copy, Clone, PartialEq, Eq, Encode, Decode, RuntimeDebug, TypeInfo)] +pub enum StatementSource { + /// Statement is coming from the on-chain worker. + Chain, + /// Statement has been received from the gossip network. + Network, + /// Statement has been submitted over the RPC api. + Rpc, +} + +impl StatementSource { + pub fn can_be_resubmitted(&self) -> bool { + match self { + StatementSource::Chain | StatementSource::Rpc => true, + StatementSource::Network => false, + } + } +} + +sp_api::decl_runtime_apis! { + /// Runtime API trait for statement validation. + pub trait ValidateStatement { + /// Validate the statement. + fn validate_statement( + source: StatementSource, + statement: Statement, + ) -> Result; + } +} + diff --git a/primitives/statement-store/src/store_api.rs b/primitives/statement-store/src/store_api.rs new file mode 100644 index 0000000000000..eaefec08b8805 --- /dev/null +++ b/primitives/statement-store/src/store_api.rs @@ -0,0 +1,81 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::{Statement, Topic, Hash}; +pub use crate::runtime_api::StatementSource; + +#[derive(Debug, thiserror::Error)] +pub enum Error { + /// Database error. + #[error("Database error: {0:?}")] + Db(String), + /// Error decoding statement structure. + #[error("Error decoding statement: {0:?}")] + Decode(String), + /// Error making runtime call. + #[error("Error calling into the runtime")] + Runtime, +} + +#[derive(Debug, PartialEq, Eq)] +pub enum NetworkPriority { + High, + Low, +} + +/// Statement submission outcome +#[derive(Debug)] +pub enum SubmitResult { + /// Accepted as new with given score + New(NetworkPriority), + /// Known statement + Known, + /// Known statement that's already expired. + KnownExpired, + /// Statement failed validation. + Bad(&'static str), + /// Internal store error. + InternalError(Error), +} + +pub type Result = std::result::Result; + +pub trait StatementStore: Send + Sync { + /// Return all statements, SCALE-encoded. + fn dump_encoded(&self) -> Result)>>; + + /// Return all statements. + fn dump(&self) -> Result>; + + /// Get statement by hash. + fn statement(&self, hash: &Hash) -> Result>; + + /// Return the data of all known statements which include all topics and have no `DecryptionKey` field. + fn broadcasts(&self, match_all_topics: &[Topic]) -> Result>>; + + /// Return the data of all known statements whose decryption key is identified as `dest` (this will generally be the public key or a hash thereof for symmetric ciphers, or a hash of the private key for symmetric ciphers). + fn posted(&self, match_all_topics: &[Topic], dest: [u8; 32]) -> Result>>; + + /// Return the decrypted data of all known statements whose decryption key is identified as `dest`. The key must be available to the client. + fn posted_clear(&self, match_all_topics: &[Topic], dest: [u8; 32]) -> Result>>; + + /// Submit a statement. + fn submit(&self, statement: Statement, source: StatementSource) -> SubmitResult; + + /// Submit a SCALE-encoded statement. + fn submit_encoded(&self, statement: &[u8], source: StatementSource) -> SubmitResult; +} From 0b73266090854e5f8acbce7c8cc05da4aad47506 Mon Sep 17 00:00:00 2001 From: arkpar Date: Wed, 22 Mar 2023 14:40:43 +0100 Subject: [PATCH 09/78] Store metrics --- bin/node-template/node/src/service.rs | 2 +- bin/node/cli/src/service.rs | 2 +- client/statement-store/src/lib.rs | 544 +++++++++++++++++- client/statement-store/src/metrics.rs | 41 -- client/statement-store/src/store.rs | 532 ----------------- primitives/statement-store/src/lib.rs | 1 + primitives/statement-store/src/runtime_api.rs | 11 +- primitives/statement-store/src/store_api.rs | 6 + 8 files changed, 548 insertions(+), 591 deletions(-) delete mode 100644 client/statement-store/src/store.rs diff --git a/bin/node-template/node/src/service.rs b/bin/node-template/node/src/service.rs index 59c46a115ddf2..818e0895dd264 100644 --- a/bin/node-template/node/src/service.rs +++ b/bin/node-template/node/src/service.rs @@ -130,7 +130,7 @@ pub fn new_partial( compatibility_mode: Default::default(), })?; - let statement_store = sc_statement_store::Store::new(config.database.path().unwrap(), client.clone())?; + let statement_store = sc_statement_store::Store::new(config.database.path().unwrap(), client.clone(), config.prometheus_registry())?; Ok(sc_service::PartialComponents { client, diff --git a/bin/node/cli/src/service.rs b/bin/node/cli/src/service.rs index 74e785fbebcf6..5f2a6e799a548 100644 --- a/bin/node/cli/src/service.rs +++ b/bin/node/cli/src/service.rs @@ -282,7 +282,7 @@ pub fn new_partial( (rpc_extensions_builder, shared_voter_state2) }; - let statement_store = sc_statement_store::Store::new(config.database.path().unwrap(), client.clone())?; + let statement_store = sc_statement_store::Store::new(config.database.path().unwrap(), client.clone(), config.prometheus_registry())?; Ok(sc_service::PartialComponents { client, diff --git a/client/statement-store/src/lib.rs b/client/statement-store/src/lib.rs index 586cc0c3d79ef..53ae199b5b123 100644 --- a/client/statement-store/src/lib.rs +++ b/client/statement-store/src/lib.rs @@ -16,27 +16,541 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -//! Substrate transaction pool implementation. +//! Disk-backed statement store. -#![recursion_limit = "256"] #![warn(missing_docs)] #![warn(unused_extern_crates)] -mod store; -//mod metrics; +mod metrics; -pub use store::{Store, MAINTENANCE_PERIOD}; pub use sp_statement_store::{StatementStore, Error}; -/* -/// Inform the transaction pool about imported and finalized blocks. -pub async fn notification_future(client: Arc, store: Arc) -where - Client: sc_client_api::BlockchainEvents, +use std::{collections::{HashSet, HashMap, BinaryHeap}, sync::Arc}; +use parking_lot::RwLock; +use metrics::MetricsLink as PrometheusMetrics; +use prometheus_endpoint::Registry as PrometheusRegistry; +use sp_statement_store::{Statement, Topic, DecryptionKey, Result, Hash, BlockHash, SubmitResult, NetworkPriority}; +use sp_statement_store::runtime_api::{ValidateStatement, ValidStatement, InvalidStatement, StatementSource}; +use sp_core::{Encode, Decode}; +use sp_api::ProvideRuntimeApi; +use sp_runtime::traits::Block as BlockT; + + +const KEY_VERSION: &[u8] = b"version".as_slice(); +const CURRENT_VERSION: u32 = 1; + +const LOG_TARGET: &str = "statement"; + +const EXPIRE_AFTER: u64 = 24 * 60 * 60; //24h +const PURGE_AFTER: u64 = 2 * 24 * 60 * 60; //48h + +/// Suggested maintenance period. A good value to call `Store::maintain` with. +#[allow(dead_code)] +pub const MAINTENANCE_PERIOD: std::time::Duration = std::time::Duration::from_secs(30); + +mod col { + pub const META: u8 = 0; + pub const STATEMENTS: u8 = 1; + + pub const COUNT: u8 = 2; +} + +#[derive(PartialEq, Eq)] +struct EvictionPriority { + hash: Hash, + priority: u64, + timestamp: u64, +} + +impl PartialOrd for EvictionPriority { + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.priority.cmp(&other.priority).then_with(|| self.timestamp.cmp(&other.timestamp)).reverse()) + } +} + +impl Ord for EvictionPriority { + fn cmp(&self, other: &Self) -> std::cmp::Ordering { + self.priority.cmp(&other.priority).then_with(|| self.timestamp.cmp(&other.timestamp)).reverse() + } +} + +#[derive(Default)] +struct Index { + by_topic: HashMap>, + by_dec_key: HashMap>, + all_topics: HashMap; 4], Option)>, + by_priority: BinaryHeap, + entries: HashMap, + expired: HashMap, + max_entries: usize, +} + +struct ClientWrapper { + client: Arc, + _block: std::marker::PhantomData, +} + +impl ClientWrapper + where + Block: BlockT, + Block::Hash: From, + Client: ProvideRuntimeApi + + Send + + Sync + + 'static, + Client::Api: ValidateStatement, { - let finality_stream = client.finality_notification_stream().map(Into::into).fuse(); - finality_stream - .for_each(|_evt| pool.maintain()) - .await + fn validate_statement( + &self, + block: BlockHash, + source: StatementSource, + statement: Statement, + ) -> std::result::Result { + let api = self.client.runtime_api(); + let block = block.into(); + match api.validate_statement(block, source, statement) { + Ok(r) => r, + Err(_) => { + Err(InvalidStatement::InternalError) + } + } + } +} + +/// Statement store. +pub struct Store { + db: parity_db::Db, + index: RwLock, + validate_fn: Box std::result::Result + Send + Sync>, + time_overrite: Option, + metrics: PrometheusMetrics, +} + +#[derive(Encode, Decode, Clone)] +struct StatementMeta { + priority: u64, + timestamp: u64, +} + +#[derive(Encode, Decode)] +struct StatementWithMeta { + meta: StatementMeta, + statement: Statement, +} + +enum IndexQuery { + Unknown, + Exists(u64), + Expired(u64), +} + +impl Index { + fn insert_with_meta(&mut self, hash: Hash, statement: StatementWithMeta) { + let mut all_topics = [None; 4]; + let mut nt = 0; + let StatementWithMeta { statement, meta } = statement; + while let Some(t) = statement.topic(nt) { + self.by_topic.entry(t).or_default().insert(hash); + all_topics[nt] = Some(t); + nt += 1; + } + let key = statement.decryption_key(); + if let Some(k) = &key { + self.by_dec_key.entry(k.clone()).or_default().insert(hash); + } + if nt > 0 || key.is_some() { + self.all_topics.insert(hash, (all_topics, key)); + } + self.expired.remove(&hash); + if self.entries.insert(hash, meta.clone()).is_none() { + self.by_priority.push(EvictionPriority { + hash, + priority: meta.priority, + timestamp: meta.timestamp, + }); + } + } + + fn query(&self, hash: &Hash) -> IndexQuery { + if let Some(meta) = self.entries.get(hash) { + return IndexQuery::Exists(meta.priority); + } + if let Some(meta) = self.expired.get(hash) { + return IndexQuery::Expired(meta.priority); + } + IndexQuery::Unknown + } + + fn insert_expired(&mut self, hash: Hash, meta: StatementMeta) { + self.expired.insert(hash, meta); + } + + fn is_expired(&self, hash: &Hash) -> bool { + self.expired.contains_key(hash) + } + + fn iter(&self, key: Option, topics: &[Topic], mut f: impl FnMut(&Hash) -> Result<()>) -> Result<()> { + let mut sets: [Option<&HashSet>; 4] = Default::default(); + let mut num_sets = 0; + for t in topics { + sets[num_sets] = self.by_topic.get(t); + if sets[num_sets].is_some() { + num_sets += 1; + } + } + if num_sets == 0 && key.is_none() { + // Iterate all entries + for h in self.entries.keys() { + f(h)? + } + } else { + // Start with the smallest topic set or the key set. + sets[0..num_sets].sort_by_key(|s| s.map_or(0, HashSet::len)); + if let Some(key) = key { + let key_set = if let Some(set) = self.by_dec_key.get(&key) { set } else { return Ok(()) }; + for item in key_set { + if sets.iter().all(|set| set.unwrap().contains(item)) { + f(item)? + } + } + } else { + for item in sets[0].unwrap() { + if sets[1 .. num_sets].iter().all(|set| set.unwrap().contains(item)) { + f(item)? + } + } + } + } + Ok(()) + } + + fn maintain(&mut self, current_time: u64) -> Vec<(parity_db::ColId, Vec, Option>)> { + // Purge previously expired messages. + let mut purged = Vec::new(); + self.expired.retain(|hash, meta| { + if meta.timestamp + PURGE_AFTER > current_time { + purged.push((col::STATEMENTS, hash.to_vec(), None)); + false + } else { + true + } + }); + + // Expire messages. + let mut num_expired = 0; + self.entries.retain(|hash, meta| { + if meta.timestamp + EXPIRE_AFTER > current_time { + if let Some((topics, key)) = self.all_topics.remove(hash) { + for t in topics { + if let Some(t) = t { + if let Some(set) = self.by_topic.get_mut(&t) { + set.remove(hash); + } + } + } + if let Some(k) = key { + if let Some(set) = self.by_dec_key.get_mut(&k) { + set.remove(hash); + } + } + } + self.expired.insert(hash.clone(), meta.clone()); + num_expired += 1; + false + } else { + true + } + }); + if num_expired > 0 { + // Rebuild the priority queue + self.by_priority = self.entries.iter().map(|(hash, meta)| EvictionPriority { + hash: hash.clone(), + priority: meta.priority, + timestamp: meta.timestamp, + }).collect(); + } + purged + } + + fn evict(&mut self) -> Vec<(parity_db::ColId, Vec, Option>)> { + let mut evicted_set = Vec::new(); + while self.by_priority.len() > self.max_entries { + if let Some(evicted) = self.by_priority.pop() { + self.entries.remove(&evicted.hash); + if let Some((topics, key)) = self.all_topics.remove(&evicted.hash) { + for t in topics { + if let Some(t) = t { + if let Some(set) = self.by_topic.get_mut(&t) { + set.remove(&evicted.hash); + } + } + } + if let Some(k) = key { + if let Some(set) = self.by_dec_key.get_mut(&k) { + set.remove(&evicted.hash); + } + } + } + evicted_set.push((col::STATEMENTS, evicted.hash.to_vec(), None)); + } else { + break; + } + } + evicted_set + } +} + +impl Store { + /// Create a new shared store instance. There should only be one per process. + pub fn new( + path: &std::path::Path, + client: Arc, + prometheus: Option<&PrometheusRegistry>, + ) -> Result> + where + Block: BlockT, + Block::Hash: From, + Client: ProvideRuntimeApi + + Send + + Sync + + 'static, + Client::Api: ValidateStatement, + { + let mut path: std::path::PathBuf = path.into(); + path.pop(); + path.push("statement"); + + let mut config = parity_db::Options::with_columns(&path, col::COUNT); + + let mut statement_col = &mut config.columns[col::STATEMENTS as usize]; + statement_col.ref_counted = false; + statement_col.preimage = true; + statement_col.uniform = true; + let db = parity_db::Db::open_or_create(&config).map_err(|e| Error::Db(e.to_string()))?; + match db.get(col::META, &KEY_VERSION).map_err(|e| Error::Db(e.to_string()))? { + Some(version) => { + let version = u32::from_le_bytes(version.try_into() + .map_err(|_| Error::Db("Error reading database version".into()))?); + if version != CURRENT_VERSION { + return Err(Error::Db(format!("Unsupported database version: {version}"))); + } + }, + None => { + db.commit( + [(col::META, KEY_VERSION.to_vec(), Some(CURRENT_VERSION.to_le_bytes().to_vec()))] + ).map_err(|e| Error::Db(e.to_string()))?; + } + } + + let index = Index::default(); + let validator = ClientWrapper { client, _block: Default::default() }; + let validate_fn = Box::new(move |block, source, statement| validator.validate_statement(block, source, statement)); + + let store = Arc::new(Store { + db, + index: RwLock::new(index), + validate_fn, + time_overrite: None, + metrics: PrometheusMetrics::new(prometheus), + }); + store.populate()?; + Ok(store) + } + + fn populate(&self) -> Result<()> { + let current_time = self.timestamp(); + let mut index = self.index.write(); + self.db.iter_column_while(col::STATEMENTS, |item| { + let statement = item.value; + if let Ok(statement_with_meta) = StatementWithMeta::decode(&mut statement.as_slice()) { + let hash = statement_with_meta.statement.hash(); + if statement_with_meta.meta.timestamp + EXPIRE_AFTER > current_time { + index.insert_expired(hash, statement_with_meta.meta); + } else { + index.insert_with_meta(hash, statement_with_meta); + } + } + true + }).map_err(|e| Error::Db(e.to_string()))?; + + Ok(()) + } + + fn collect_statements(&self, key: Option, match_all_topics: &[Topic], mut f: impl FnMut(Statement) -> Option ) -> Result> { + let mut result = Vec::new(); + let index = self.index.read(); + index.iter(key, match_all_topics, |hash| { + match self.db.get(col::STATEMENTS, hash).map_err(|e| Error::Db(e.to_string()))? { + Some(entry) => { + if let Ok(statement) = StatementWithMeta::decode(&mut entry.as_slice()) { + if let Some(data) = f(statement.statement) { + result.push(data); + } + } else { + // DB inconsistency + log::warn!(target: LOG_TARGET, "Corrupt statement {:?}", hash); + } + + } + None => { + // DB inconsistency + log::warn!(target: LOG_TARGET, "Missing statement {:?}", hash); + } + } + Ok(()) + })?; + Ok(result) + } + + /// Perform periodic store maintenance + pub fn maintain(&self) { + let deleted = self.index.write().maintain(self.timestamp()); + let count = deleted.len() as u64; + if let Err(e) = self.db.commit(deleted) { + log::warn!(target: LOG_TARGET, "Error writing to the statement database: {:?}", e); + } else { + self.metrics.report(|metrics| metrics.statements_pruned.inc_by(count)); + } + } + + fn timestamp(&self) -> u64 { + self.time_overrite.unwrap_or_else(|| + std::time::SystemTime::now().duration_since(std::time::UNIX_EPOCH).unwrap_or_default().as_secs()) + } } -*/ + +impl StatementStore for Store { + fn dump_encoded(&self) -> Result)>> { + let mut result = Vec::new(); + self.db.iter_column_while(col::STATEMENTS, |item| { + if let Ok(entry) = StatementWithMeta::decode(&mut item.value.as_slice()) { + entry.statement.using_encoded(|statement| { + let hash = sp_statement_store::hash_encoded(statement); + if !self.index.read().is_expired(&hash) { + result.push((hash, entry.statement.encode())); + } + }); + } + true + }).map_err(|e| Error::Db(e.to_string()))?; + Ok(result) + } + + /// Return all statements. + fn dump(&self) -> Result> { + let mut result = Vec::new(); + self.db.iter_column_while(col::STATEMENTS, |item| { + if let Ok(entry) = StatementWithMeta::decode(&mut item.value.as_slice()) { + let hash = entry.statement.hash(); + if !self.index.read().is_expired(&hash) { + result.push((hash, entry.statement)); + } + } + true + }).map_err(|e| Error::Db(e.to_string()))?; + Ok(result) + } + + /// Returns a statement by hash. + fn statement(&self, hash: &Hash) -> Result> { + Ok(match self.db.get(col::STATEMENTS, hash.as_slice()).map_err(|e| Error::Db(e.to_string()))? { + Some(entry) => { + Some(StatementWithMeta::decode(&mut entry.as_slice()).map_err(|e| Error::Decode(e.to_string()))?.statement) + } + None => None, + }) + } + + /// Return the data of all known statements which include all topics and have no `DecryptionKey` field. + fn broadcasts(&self, match_all_topics: &[Topic]) -> Result>> { + self.collect_statements(None, match_all_topics, |statement| statement.into_data()) + } + + /// Return the data of all known statements whose decryption key is identified as `dest` (this will generally be the public key or a hash thereof for symmetric ciphers, or a hash of the private key for symmetric ciphers). + fn posted(&self, match_all_topics: &[Topic], dest: [u8; 32]) -> Result>> { + self.collect_statements(Some(dest), match_all_topics, |statement| statement.into_data()) + } + + /// Return the decrypted data of all known statements whose decryption key is identified as `dest`. The key must be available to the client. + fn posted_clear(&self, match_all_topics: &[Topic], dest: [u8; 32]) -> Result>> { + self.collect_statements(Some(dest), match_all_topics, |statement| statement.into_data()) + } + + /// Submit a statement to the store. Validates the statement and returns validation result. + fn submit(&self, statement: Statement, source: StatementSource) -> SubmitResult { + let hash = statement.hash(); + let priority = match self.index.read().query(&hash) { + IndexQuery::Expired(priority) => { + if !source.can_be_resubmitted() { + return SubmitResult::KnownExpired; + } + priority + } + IndexQuery::Exists(priority) => { + if !source.can_be_resubmitted() { + return SubmitResult::Known; + } + priority + } + IndexQuery::Unknown => { + // Validate. + let validation_result = (self.validate_fn)(Default::default(), source, statement.clone()); + match validation_result { + Ok(ValidStatement { priority }) => priority, + Err(InvalidStatement::BadProof) => { + log::debug!(target: LOG_TARGET, "Statement validation failed: BadProof, {:?}", statement); + self.metrics.report(|metrics| metrics.validations_invalid.inc()); + return SubmitResult::Bad("Bad statement proof") + }, + Err(InvalidStatement::NoProof) =>{ + log::debug!(target: LOG_TARGET, "Statement validation failed: NoProof, {:?}", statement); + self.metrics.report(|metrics| metrics.validations_invalid.inc()); + return SubmitResult::Bad("Missing statement proof") + }, + Err(InvalidStatement::InternalError) => { + return SubmitResult::InternalError(Error::Runtime) + }, + } + } + }; + + // Commit to the db prior to locking the index. + let statement_with_meta = StatementWithMeta { + meta: StatementMeta { + priority, + timestamp: self.timestamp(), + }, + statement, + }; + + let mut commit = self.index.write().evict(); + commit.push((col::STATEMENTS, hash.to_vec(), Some(statement_with_meta.encode()))); + if let Err(e) = self.db.commit(commit) { + log::debug!(target: LOG_TARGET, "Statement validation failed: database error {}, {:?}", e, statement_with_meta.statement); + return SubmitResult::InternalError(Error::Db(e.to_string())); + } + self.metrics.report(|metrics| metrics.submitted_statements.inc()); + let mut index = self.index.write(); + index.insert_with_meta(hash, statement_with_meta); + let network_priority = if priority > 0 { NetworkPriority::High } else { NetworkPriority::Low }; + SubmitResult::New(network_priority) + } + + /// Submit a SCALE-encoded statement. + fn submit_encoded(&self, mut statement: &[u8], source: StatementSource) -> SubmitResult { + match Statement::decode(&mut statement) { + Ok(decoded) => self.submit(decoded, source), + Err(e) => { + log::debug!(target: LOG_TARGET, "Error decoding submitted statement. Failed with: {}", e); + SubmitResult::Bad("Bad SCALE encoding") + } + } + } +} + +#[cfg(test)] +mod tests { +} + + + diff --git a/client/statement-store/src/metrics.rs b/client/statement-store/src/metrics.rs index 17756ae8a7282..db24e1cd4a7f0 100644 --- a/client/statement-store/src/metrics.rs +++ b/client/statement-store/src/metrics.rs @@ -78,44 +78,3 @@ impl Metrics { } } -/// Statement store api Prometheus metrics. -pub struct ApiMetrics { - pub validations_scheduled: Counter, - pub validations_finished: Counter, -} - -impl ApiMetrics { - /// Register the metrics at the given Prometheus registry. - pub fn register(registry: &Registry) -> Result { - Ok(Self { - validations_scheduled: register( - Counter::new( - "substrate_sub_statement_store_validations_scheduled", - "Total number of statements scheduled for validation", - )?, - registry, - )?, - validations_finished: register( - Counter::new( - "substrate_sub_statement_store_validations_finished", - "Total number of statements that finished validation", - )?, - registry, - )?, - }) - } -} - -/// An extension trait for [`ApiMetrics`]. -pub trait ApiMetricsExt { - /// Report an event to the metrics. - fn report(&self, report: impl FnOnce(&ApiMetrics)); -} - -impl ApiMetricsExt for Option> { - fn report(&self, report: impl FnOnce(&ApiMetrics)) { - if let Some(metrics) = self.as_ref() { - report(metrics) - } - } -} diff --git a/client/statement-store/src/store.rs b/client/statement-store/src/store.rs deleted file mode 100644 index 9aad7cb753fa3..0000000000000 --- a/client/statement-store/src/store.rs +++ /dev/null @@ -1,532 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 - -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// This program is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with this program. If not, see . - -//! Disk-backed statement store. - -use std::{collections::{HashSet, HashMap, BinaryHeap}, sync::Arc}; -use parking_lot::RwLock; -use sp_statement_store::{Statement, Topic, DecryptionKey, Result, Error, Hash, BlockHash, SubmitResult, NetworkPriority}; -use sp_statement_store::runtime_api::{ValidateStatement, ValidStatement, InvalidStatement, StatementSource}; -use sp_core::{Encode, Decode}; -use sp_api::ProvideRuntimeApi; -use sp_runtime::traits::Block as BlockT; - -const KEY_VERSION: &[u8] = b"version".as_slice(); -const CURRENT_VERSION: u32 = 1; - -const LOG_TARGET: &str = "statement"; - -const EXPIRE_AFTER: u64 = 24 * 60 * 60; //24h -const PURGE_AFTER: u64 = 2 * 24 * 60 * 60; //48h - -/// Suggested maintenance period. A good value to call `Store::maintain` with. -#[allow(dead_code)] -pub const MAINTENANCE_PERIOD: std::time::Duration = std::time::Duration::from_secs(30); - -mod col { - pub const META: u8 = 0; - pub const STATEMENTS: u8 = 1; - - pub const COUNT: u8 = 2; -} - -#[derive(PartialEq, Eq)] -struct EvictionPriority { - hash: Hash, - priority: u64, - timestamp: u64, -} - -impl PartialOrd for EvictionPriority { - fn partial_cmp(&self, other: &Self) -> Option { - Some(self.priority.cmp(&other.priority).then_with(|| self.timestamp.cmp(&other.timestamp)).reverse()) - } -} - -impl Ord for EvictionPriority { - fn cmp(&self, other: &Self) -> std::cmp::Ordering { - self.priority.cmp(&other.priority).then_with(|| self.timestamp.cmp(&other.timestamp)).reverse() - } -} - -#[derive(Default)] -struct Index { - by_topic: HashMap>, - by_dec_key: HashMap>, - all_topics: HashMap; 4], Option)>, - by_priority: BinaryHeap, - entries: HashMap, - expired: HashMap, - max_entries: usize, -} - -struct ClientWrapper { - client: Arc, - _block: std::marker::PhantomData, -} - -impl ClientWrapper - where - Block: BlockT, - Block::Hash: From, - Client: ProvideRuntimeApi - + Send - + Sync - + 'static, - Client::Api: ValidateStatement, -{ - fn validate_statement( - &self, - block: BlockHash, - source: StatementSource, - statement: Statement, - ) -> std::result::Result { - let api = self.client.runtime_api(); - let block = block.into(); - match api.validate_statement(block, source, statement) { - Ok(r) => r, - Err(_) => { - Err(InvalidStatement::InternalError) - } - } - } -} - -/// Statement store. -pub struct Store { - db: parity_db::Db, - index: RwLock, - validate_fn: Box std::result::Result + Send + Sync>, - time_overrite: Option, -} - -#[derive(Encode, Decode, Clone)] -pub struct StatementMeta { - priority: u64, - timestamp: u64, -} - -#[derive(Encode, Decode)] -pub struct StatementWithMeta { - meta: StatementMeta, - statement: Statement, -} - -enum IndexQuery { - Unknown, - Exists(u64), - Expired(u64), -} - -impl Index { - fn insert_with_meta(&mut self, hash: Hash, statement: StatementWithMeta) { - let mut all_topics = [None; 4]; - let mut nt = 0; - let StatementWithMeta { statement, meta } = statement; - while let Some(t) = statement.topic(nt) { - self.by_topic.entry(t).or_default().insert(hash); - all_topics[nt] = Some(t); - nt += 1; - } - let key = statement.decryption_key(); - if let Some(k) = &key { - self.by_dec_key.entry(k.clone()).or_default().insert(hash); - } - if nt > 0 || key.is_some() { - self.all_topics.insert(hash, (all_topics, key)); - } - self.expired.remove(&hash); - if self.entries.insert(hash, meta.clone()).is_none() { - self.by_priority.push(EvictionPriority { - hash, - priority: meta.priority, - timestamp: meta.timestamp, - }); - } - } - - fn query(&self, hash: &Hash) -> IndexQuery { - if let Some(meta) = self.entries.get(hash) { - return IndexQuery::Exists(meta.priority); - } - if let Some(meta) = self.expired.get(hash) { - return IndexQuery::Expired(meta.priority); - } - IndexQuery::Unknown - } - - fn insert_expired(&mut self, hash: Hash, meta: StatementMeta) { - self.expired.insert(hash, meta); - } - - fn is_expired(&self, hash: &Hash) -> bool { - self.expired.contains_key(hash) - } - - fn iter(&self, key: Option, topics: &[Topic], mut f: impl FnMut(&Hash) -> Result<()>) -> Result<()> { - let mut sets: [Option<&HashSet>; 4] = Default::default(); - let mut num_sets = 0; - for t in topics { - sets[num_sets] = self.by_topic.get(t); - if sets[num_sets].is_some() { - num_sets += 1; - } - } - if num_sets == 0 && key.is_none() { - // Iterate all entries - for h in self.entries.keys() { - f(h)? - } - } else { - // Start with the smallest topic set or the key set. - sets[0..num_sets].sort_by_key(|s| s.map_or(0, HashSet::len)); - if let Some(key) = key { - let key_set = if let Some(set) = self.by_dec_key.get(&key) { set } else { return Ok(()) }; - for item in key_set { - if sets.iter().all(|set| set.unwrap().contains(item)) { - f(item)? - } - } - } else { - for item in sets[0].unwrap() { - if sets[1 .. num_sets].iter().all(|set| set.unwrap().contains(item)) { - f(item)? - } - } - } - } - Ok(()) - } - - fn maintain(&mut self, current_time: u64) -> Vec<(parity_db::ColId, Vec, Option>)> { - // Purge previously expired messages. - let mut purged = Vec::new(); - self.expired.retain(|hash, meta| { - if meta.timestamp + PURGE_AFTER > current_time { - purged.push((col::STATEMENTS, hash.to_vec(), None)); - false - } else { - true - } - }); - - // Expire messages. - let mut num_expired = 0; - self.entries.retain(|hash, meta| { - if meta.timestamp + EXPIRE_AFTER > current_time { - if let Some((topics, key)) = self.all_topics.remove(hash) { - for t in topics { - if let Some(t) = t { - if let Some(set) = self.by_topic.get_mut(&t) { - set.remove(hash); - } - } - } - if let Some(k) = key { - if let Some(set) = self.by_dec_key.get_mut(&k) { - set.remove(hash); - } - } - } - self.expired.insert(hash.clone(), meta.clone()); - num_expired += 1; - false - } else { - true - } - }); - if num_expired > 0 { - // Rebuild the priority queue - self.by_priority = self.entries.iter().map(|(hash, meta)| EvictionPriority { - hash: hash.clone(), - priority: meta.priority, - timestamp: meta.timestamp, - }).collect(); - } - purged - } - - fn evict(&mut self) -> Vec<(parity_db::ColId, Vec, Option>)> { - let mut evicted_set = Vec::new(); - while self.by_priority.len() > self.max_entries { - if let Some(evicted) = self.by_priority.pop() { - self.entries.remove(&evicted.hash); - if let Some((topics, key)) = self.all_topics.remove(&evicted.hash) { - for t in topics { - if let Some(t) = t { - if let Some(set) = self.by_topic.get_mut(&t) { - set.remove(&evicted.hash); - } - } - } - if let Some(k) = key { - if let Some(set) = self.by_dec_key.get_mut(&k) { - set.remove(&evicted.hash); - } - } - } - evicted_set.push((col::STATEMENTS, evicted.hash.to_vec(), None)); - } else { - break; - } - } - evicted_set - } -} - -impl Store { - /// Create a new shared store instance. There should only be one per process. - pub fn new(path: &std::path::Path, client: Arc) -> Result> - where - Block: BlockT, - Block::Hash: From, - Client: ProvideRuntimeApi - + Send - + Sync - + 'static, - Client::Api: ValidateStatement, - { - let mut path: std::path::PathBuf = path.into(); - path.pop(); - path.push("statement"); - - let mut config = parity_db::Options::with_columns(&path, col::COUNT); - - let mut statement_col = &mut config.columns[col::STATEMENTS as usize]; - statement_col.ref_counted = false; - statement_col.preimage = true; - statement_col.uniform = true; - let db = parity_db::Db::open_or_create(&config).map_err(|e| Error::Db(e.to_string()))?; - match db.get(col::META, &KEY_VERSION).map_err(|e| Error::Db(e.to_string()))? { - Some(version) => { - let version = u32::from_le_bytes(version.try_into() - .map_err(|_| Error::Db("Error reading database version".into()))?); - if version != CURRENT_VERSION { - return Err(Error::Db(format!("Unsupported database version: {version}"))); - } - }, - None => { - db.commit( - [(col::META, KEY_VERSION.to_vec(), Some(CURRENT_VERSION.to_le_bytes().to_vec()))] - ).map_err(|e| Error::Db(e.to_string()))?; - } - } - - let index = Index::default(); - let validator = ClientWrapper { client, _block: Default::default() }; - let validate_fn = Box::new(move |block, source, statement| validator.validate_statement(block, source, statement)); - - let store = Arc::new(Store { - db, - index: RwLock::new(index), - validate_fn, - time_overrite: None, - }); - store.populate()?; - Ok(store) - } - - fn populate(&self) -> Result<()> { - let current_time = self.timestamp(); - let mut index = self.index.write(); - self.db.iter_column_while(col::STATEMENTS, |item| { - let statement = item.value; - if let Ok(statement_with_meta) = StatementWithMeta::decode(&mut statement.as_slice()) { - let hash = statement_with_meta.statement.hash(); - if statement_with_meta.meta.timestamp + EXPIRE_AFTER > current_time { - index.insert_expired(hash, statement_with_meta.meta); - } else { - index.insert_with_meta(hash, statement_with_meta); - } - } - true - }).map_err(|e| Error::Db(e.to_string()))?; - - Ok(()) - } - - fn collect_statements(&self, key: Option, match_all_topics: &[Topic], mut f: impl FnMut(Statement) -> Option ) -> Result> { - let mut result = Vec::new(); - let index = self.index.read(); - index.iter(key, match_all_topics, |hash| { - match self.db.get(col::STATEMENTS, hash).map_err(|e| Error::Db(e.to_string()))? { - Some(entry) => { - if let Ok(statement) = StatementWithMeta::decode(&mut entry.as_slice()) { - if let Some(data) = f(statement.statement) { - result.push(data); - } - } else { - // DB inconsistency - log::warn!(target: LOG_TARGET, "Corrupt statement {:?}", hash); - } - - } - None => { - // DB inconsistency - log::warn!(target: LOG_TARGET, "Missing statement {:?}", hash); - } - } - Ok(()) - })?; - Ok(result) - } - - /// Perform periodic store maintenance - pub fn maintain(&self) { - let deleted = self.index.write().maintain(self.timestamp()); - if let Err(e) = self.db.commit(deleted) { - log::warn!(target: LOG_TARGET, "Error writing to the statement database: {:?}", e); - } - } - - fn timestamp(&self) -> u64 { - self.time_overrite.unwrap_or_else(|| - std::time::SystemTime::now().duration_since(std::time::UNIX_EPOCH).unwrap_or_default().as_secs()) - } -} - -impl sp_statement_store::StatementStore for Store { - fn dump_encoded(&self) -> Result)>> { - let mut result = Vec::new(); - self.db.iter_column_while(col::STATEMENTS, |item| { - if let Ok(entry) = StatementWithMeta::decode(&mut item.value.as_slice()) { - entry.statement.using_encoded(|statement| { - let hash = sp_statement_store::hash_encoded(statement); - if !self.index.read().is_expired(&hash) { - result.push((hash, entry.statement.encode())); - } - }); - } - true - }).map_err(|e| Error::Db(e.to_string()))?; - Ok(result) - } - - /// Return all statements. - fn dump(&self) -> Result> { - let mut result = Vec::new(); - self.db.iter_column_while(col::STATEMENTS, |item| { - if let Ok(entry) = StatementWithMeta::decode(&mut item.value.as_slice()) { - let hash = entry.statement.hash(); - if !self.index.read().is_expired(&hash) { - result.push((hash, entry.statement)); - } - } - true - }).map_err(|e| Error::Db(e.to_string()))?; - Ok(result) - } - - /// Returns a statement by hash. - fn statement(&self, hash: &Hash) -> Result> { - Ok(match self.db.get(col::STATEMENTS, hash.as_slice()).map_err(|e| Error::Db(e.to_string()))? { - Some(entry) => { - Some(StatementWithMeta::decode(&mut entry.as_slice()).map_err(|e| Error::Decode(e.to_string()))?.statement) - } - None => None, - }) - } - - /// Return the data of all known statements which include all topics and have no `DecryptionKey` field. - fn broadcasts(&self, match_all_topics: &[Topic]) -> Result>> { - self.collect_statements(None, match_all_topics, |statement| statement.into_data()) - } - - /// Return the data of all known statements whose decryption key is identified as `dest` (this will generally be the public key or a hash thereof for symmetric ciphers, or a hash of the private key for symmetric ciphers). - fn posted(&self, match_all_topics: &[Topic], dest: [u8; 32]) -> Result>> { - self.collect_statements(Some(dest), match_all_topics, |statement| statement.into_data()) - } - - /// Return the decrypted data of all known statements whose decryption key is identified as `dest`. The key must be available to the client. - fn posted_clear(&self, match_all_topics: &[Topic], dest: [u8; 32]) -> Result>> { - self.collect_statements(Some(dest), match_all_topics, |statement| statement.into_data()) - } - - /// Submit a statement to the store. Validates the statement and returns validation result. - fn submit(&self, statement: Statement, source: StatementSource) -> SubmitResult { - let hash = statement.hash(); - let priority = match self.index.read().query(&hash) { - IndexQuery::Expired(priority) => { - if !source.can_be_resubmitted() { - return SubmitResult::KnownExpired; - } - priority - } - IndexQuery::Exists(priority) => { - if !source.can_be_resubmitted() { - return SubmitResult::Known; - } - priority - } - IndexQuery::Unknown => { - // Validate. - let validation_result = (self.validate_fn)(Default::default(), source, statement.clone()); - match validation_result { - Ok(ValidStatement { priority }) => priority, - Err(InvalidStatement::BadProof) => { - log::debug!(target: LOG_TARGET, "Statement validation failed: BadProof, {:?}", statement); - return SubmitResult::Bad("Bad statement proof") - }, - Err(InvalidStatement::NoProof) =>{ - log::debug!(target: LOG_TARGET, "Statement validation failed: NoProof, {:?}", statement); - return SubmitResult::Bad("Missing statement proof") - }, - Err(InvalidStatement::InternalError) => { - return SubmitResult::InternalError(Error::Runtime) - }, - } - } - }; - - // Commit to the db prior to locking the index. - let statement_with_meta = StatementWithMeta { - meta: StatementMeta { - priority, - timestamp: self.timestamp(), - }, - statement, - }; - - let mut commit = self.index.write().evict(); - commit.push((col::STATEMENTS, hash.to_vec(), Some(statement_with_meta.encode()))); - if let Err(e) = self.db.commit(commit) { - log::debug!(target: LOG_TARGET, "Statement validation failed: database error {}, {:?}", e, statement_with_meta.statement); - return SubmitResult::InternalError(Error::Db(e.to_string())); - } - let mut index = self.index.write(); - index.insert_with_meta(hash, statement_with_meta); - let network_priority = if priority > 0 { NetworkPriority::High } else { NetworkPriority::Low }; - SubmitResult::New(network_priority) - } - - /// Submit a SCALE-encoded statement. - fn submit_encoded(&self, mut statement: &[u8], source: StatementSource) -> SubmitResult { - match Statement::decode(&mut statement) { - Ok(decoded) => self.submit(decoded, source), - Err(e) => { - log::debug!(target: LOG_TARGET, "Error decoding submitted statement. Failed with: {}", e); - SubmitResult::Bad("Bad SCALE encoding") - } - } - } -} - -#[cfg(test)] -mod tests { -} - diff --git a/primitives/statement-store/src/lib.rs b/primitives/statement-store/src/lib.rs index 78013c3f46098..3974dec529503 100644 --- a/primitives/statement-store/src/lib.rs +++ b/primitives/statement-store/src/lib.rs @@ -16,6 +16,7 @@ // limitations under the License. #![cfg_attr(not(feature = "std"), no_std)] +#![warn(missing_docs)] //! A crate which contains statement-store primitives. diff --git a/primitives/statement-store/src/runtime_api.rs b/primitives/statement-store/src/runtime_api.rs index 238e1fc085645..6b4f2c6a73ed7 100644 --- a/primitives/statement-store/src/runtime_api.rs +++ b/primitives/statement-store/src/runtime_api.rs @@ -15,6 +15,8 @@ // See the License for the specific language governing permissions and // limitations under the License. +//! Runtime support for the statement store. + use codec::{Decode, Encode}; use scale_info::TypeInfo; use sp_runtime::RuntimeDebug; @@ -23,14 +25,19 @@ use crate::Statement; /// Information concerning a valid statement. #[derive(Clone, PartialEq, Eq, Encode, Decode, RuntimeDebug, TypeInfo)] pub struct ValidStatement { + /// Statement priority as calculated by the runtime. Higher priority statements have lower + /// chance of being evicted. pub priority: u64, } -/// An invalid statement. +/// An reason for an invalid statement. #[derive(Clone, PartialEq, Eq, Encode, Decode, Copy, RuntimeDebug, TypeInfo)] pub enum InvalidStatement { + /// Failed proof validation. BadProof, + /// Missing proof. NoProof, + /// Validity could not be checked because of internal error. InternalError, } @@ -48,6 +55,8 @@ pub enum StatementSource { } impl StatementSource { + /// Check if the source allows the statement to be resubmitted to the store, extending its + /// expiration date. pub fn can_be_resubmitted(&self) -> bool { match self { StatementSource::Chain | StatementSource::Rpc => true, diff --git a/primitives/statement-store/src/store_api.rs b/primitives/statement-store/src/store_api.rs index eaefec08b8805..bf5a65619dc44 100644 --- a/primitives/statement-store/src/store_api.rs +++ b/primitives/statement-store/src/store_api.rs @@ -18,6 +18,7 @@ use crate::{Statement, Topic, Hash}; pub use crate::runtime_api::StatementSource; +/// Statement store error. #[derive(Debug, thiserror::Error)] pub enum Error { /// Database error. @@ -32,8 +33,11 @@ pub enum Error { } #[derive(Debug, PartialEq, Eq)] +/// Network propagation priority. pub enum NetworkPriority { + /// High priority. Statement should be broadcast to all peers. High, + /// Low priority. Low, } @@ -52,8 +56,10 @@ pub enum SubmitResult { InternalError(Error), } +/// Result type for `Error` pub type Result = std::result::Result; +/// Statement store API. pub trait StatementStore: Send + Sync { /// Return all statements, SCALE-encoded. fn dump_encoded(&self) -> Result)>>; From 66b9c981dd344076ded0c7797df4bb3ff47e876f Mon Sep 17 00:00:00 2001 From: arkpar Date: Wed, 22 Mar 2023 23:21:36 +0100 Subject: [PATCH 10/78] Store tests --- Cargo.lock | 4 +- client/statement-store/Cargo.toml | 14 +- client/statement-store/src/lib.rs | 305 ++++++++++++++++---- primitives/statement-store/src/store_api.rs | 4 +- 4 files changed, 260 insertions(+), 67 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 3c07fbba013e1..6822f654dc22d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -9472,6 +9472,7 @@ name = "sc-statement-store" version = "4.0.0-dev" dependencies = [ "async-trait", + "env_logger 0.9.3", "futures", "futures-timer", "log", @@ -9479,12 +9480,13 @@ dependencies = [ "parity-scale-codec", "parking_lot 0.12.1", "sp-api", + "sp-blockchain", "sp-core", "sp-runtime", "sp-statement-store", "sp-tracing", "substrate-prometheus-endpoint", - "substrate-test-runtime", + "tempfile", ] [[package]] diff --git a/client/statement-store/Cargo.toml b/client/statement-store/Cargo.toml index 6bbbc8338f83f..3bc285555ed3c 100644 --- a/client/statement-store/Cargo.toml +++ b/client/statement-store/Cargo.toml @@ -22,22 +22,16 @@ parking_lot = "0.12.1" parity-db = "0.4.6" sp-statement-store = { version = "4.0.0-dev", path = "../../primitives/statement-store" } prometheus-endpoint = { package = "substrate-prometheus-endpoint", version = "0.10.0-dev", path = "../../utils/prometheus" } -#sc-client-api = { version = "4.0.0-dev", path = "../api" } -#sc-utils = { version = "4.0.0-dev", path = "../utils" } sp-api = { version = "4.0.0-dev", path = "../../primitives/api" } -#sp-blockchain = { version = "4.0.0-dev", path = "../../primitives/blockchain" } +sp-blockchain = { version = "4.0.0-dev", path = "../../primitives/blockchain" } sp-core = { version = "7.0.0", path = "../../primitives/core" } sp-runtime = { version = "7.0.0", path = "../../primitives/runtime" } sp-tracing = { version = "6.0.0", path = "../../primitives/tracing" } -#sp-transaction-pool = { version = "4.0.0-dev", path = "../../primitives/transaction-pool" } [dev-dependencies] -#array-bytes = "4.1" -#assert_matches = "1.3.0" -#criterion = "0.4.0" -#sc-block-builder = { version = "0.10.0-dev", path = "../block-builder" } -#sp-consensus = { version = "0.10.0-dev", path = "../../primitives/consensus/common" } -substrate-test-runtime = { version = "2.0.0", path = "../../test-utils/runtime" } +tempfile = "3.1.0" +env_logger = "0.9" +#substrate-test-runtime = { version = "2.0.0", path = "../../test-utils/runtime" } #substrate-test-runtime-client = { version = "2.0.0", path = "../../test-utils/runtime/client" } #substrate-test-runtime-transaction-pool = { version = "2.0.0", path = "../../test-utils/runtime/transaction-pool" } diff --git a/client/statement-store/src/lib.rs b/client/statement-store/src/lib.rs index 53ae199b5b123..6392b3c5f193c 100644 --- a/client/statement-store/src/lib.rs +++ b/client/statement-store/src/lib.rs @@ -29,20 +29,22 @@ use std::{collections::{HashSet, HashMap, BinaryHeap}, sync::Arc}; use parking_lot::RwLock; use metrics::MetricsLink as PrometheusMetrics; use prometheus_endpoint::Registry as PrometheusRegistry; -use sp_statement_store::{Statement, Topic, DecryptionKey, Result, Hash, BlockHash, SubmitResult, NetworkPriority}; +use sp_statement_store::{Statement, Topic, DecryptionKey, Result, Hash, BlockHash, SubmitResult, NetworkPriority, Proof}; use sp_statement_store::runtime_api::{ValidateStatement, ValidStatement, InvalidStatement, StatementSource}; -use sp_core::{Encode, Decode}; +use sp_core::{Encode, Decode, hexdisplay::HexDisplay}; use sp_api::ProvideRuntimeApi; use sp_runtime::traits::Block as BlockT; +use sp_blockchain::HeaderBackend; const KEY_VERSION: &[u8] = b"version".as_slice(); const CURRENT_VERSION: u32 = 1; -const LOG_TARGET: &str = "statement"; +const LOG_TARGET: &str = "statement-store"; const EXPIRE_AFTER: u64 = 24 * 60 * 60; //24h const PURGE_AFTER: u64 = 2 * 24 * 60 * 60; //48h +const MAX_LIVE_STATEMENTS: usize = 8192; /// Suggested maintenance period. A good value to call `Store::maintain` with. #[allow(dead_code)] @@ -94,20 +96,20 @@ impl ClientWrapper where Block: BlockT, Block::Hash: From, - Client: ProvideRuntimeApi - + Send - + Sync - + 'static, + Client: ProvideRuntimeApi + HeaderBackend + Send + Sync + 'static, Client::Api: ValidateStatement, { fn validate_statement( &self, - block: BlockHash, + block: Option, source: StatementSource, statement: Statement, ) -> std::result::Result { let api = self.client.runtime_api(); - let block = block.into(); + let block = block.map(Into::into).unwrap_or_else(|| { + // Validate against the finalized state. + self.client.info().finalized_hash + }); match api.validate_statement(block, source, statement) { Ok(r) => r, Err(_) => { @@ -121,8 +123,8 @@ impl ClientWrapper pub struct Store { db: parity_db::Db, index: RwLock, - validate_fn: Box std::result::Result + Send + Sync>, - time_overrite: Option, + validate_fn: Box, StatementSource, Statement) -> std::result::Result + Send + Sync>, + time_override: Option, metrics: PrometheusMetrics, } @@ -201,6 +203,7 @@ impl Index { if num_sets == 0 && key.is_none() { // Iterate all entries for h in self.entries.keys() { + log::trace!(target: LOG_TARGET, "Iterating: {:?}", HexDisplay::from(h)); f(h)? } } else { @@ -210,12 +213,14 @@ impl Index { let key_set = if let Some(set) = self.by_dec_key.get(&key) { set } else { return Ok(()) }; for item in key_set { if sets.iter().all(|set| set.unwrap().contains(item)) { + log::trace!(target: LOG_TARGET, "Iterating by key: {:?}", HexDisplay::from(item)); f(item)? } } } else { for item in sets[0].unwrap() { if sets[1 .. num_sets].iter().all(|set| set.unwrap().contains(item)) { + log::trace!(target: LOG_TARGET, "Iterating by topic: {:?}", HexDisplay::from(item)); f(item)? } } @@ -228,8 +233,9 @@ impl Index { // Purge previously expired messages. let mut purged = Vec::new(); self.expired.retain(|hash, meta| { - if meta.timestamp + PURGE_AFTER > current_time { + if meta.timestamp + PURGE_AFTER < current_time { purged.push((col::STATEMENTS, hash.to_vec(), None)); + log::trace!(target: LOG_TARGET, "Purged statement {:?}", HexDisplay::from(hash)); false } else { true @@ -239,7 +245,7 @@ impl Index { // Expire messages. let mut num_expired = 0; self.entries.retain(|hash, meta| { - if meta.timestamp + EXPIRE_AFTER > current_time { + if meta.timestamp + EXPIRE_AFTER < current_time { if let Some((topics, key)) = self.all_topics.remove(hash) { for t in topics { if let Some(t) = t { @@ -254,6 +260,7 @@ impl Index { } } } + log::trace!(target: LOG_TARGET, "Expired statement {:?}", HexDisplay::from(hash)); self.expired.insert(hash.clone(), meta.clone()); num_expired += 1; false @@ -310,10 +317,7 @@ impl Store { where Block: BlockT, Block::Hash: From, - Client: ProvideRuntimeApi - + Send - + Sync - + 'static, + Client: ProvideRuntimeApi + HeaderBackend + Send + Sync + 'static, Client::Api: ValidateStatement, { let mut path: std::path::PathBuf = path.into(); @@ -342,7 +346,8 @@ impl Store { } } - let index = Index::default(); + let mut index = Index::default(); + index.max_entries = MAX_LIVE_STATEMENTS; let validator = ClientWrapper { client, _block: Default::default() }; let validate_fn = Box::new(move |block, source, statement| validator.validate_statement(block, source, statement)); @@ -350,7 +355,7 @@ impl Store { db, index: RwLock::new(index), validate_fn, - time_overrite: None, + time_override: None, metrics: PrometheusMetrics::new(prometheus), }); store.populate()?; @@ -359,20 +364,25 @@ impl Store { fn populate(&self) -> Result<()> { let current_time = self.timestamp(); - let mut index = self.index.write(); - self.db.iter_column_while(col::STATEMENTS, |item| { - let statement = item.value; - if let Ok(statement_with_meta) = StatementWithMeta::decode(&mut statement.as_slice()) { - let hash = statement_with_meta.statement.hash(); - if statement_with_meta.meta.timestamp + EXPIRE_AFTER > current_time { - index.insert_expired(hash, statement_with_meta.meta); - } else { - index.insert_with_meta(hash, statement_with_meta); + { + let mut index = self.index.write(); + self.db.iter_column_while(col::STATEMENTS, |item| { + let statement = item.value; + if let Ok(statement_with_meta) = StatementWithMeta::decode(&mut statement.as_slice()) { + let hash = statement_with_meta.statement.hash(); + if statement_with_meta.meta.timestamp + EXPIRE_AFTER < current_time { + log::trace!(target: LOG_TARGET, "Statement loaded (expired): {:?}", HexDisplay::from(&hash)); + index.insert_expired(hash, statement_with_meta.meta); + } else { + log::trace!(target: LOG_TARGET, "Statement loaded {:?}", HexDisplay::from(&hash)); + index.insert_with_meta(hash, statement_with_meta); + } } - } - true - }).map_err(|e| Error::Db(e.to_string()))?; + true + }).map_err(|e| Error::Db(e.to_string()))?; + } + self.maintain(); Ok(()) } @@ -388,13 +398,13 @@ impl Store { } } else { // DB inconsistency - log::warn!(target: LOG_TARGET, "Corrupt statement {:?}", hash); + log::warn!(target: LOG_TARGET, "Corrupt statement {:?}", HexDisplay::from(hash)); } } None => { // DB inconsistency - log::warn!(target: LOG_TARGET, "Missing statement {:?}", hash); + log::warn!(target: LOG_TARGET, "Missing statement {:?}", HexDisplay::from(hash)); } } Ok(()) @@ -414,40 +424,44 @@ impl Store { } fn timestamp(&self) -> u64 { - self.time_overrite.unwrap_or_else(|| + self.time_override.unwrap_or_else(|| std::time::SystemTime::now().duration_since(std::time::UNIX_EPOCH).unwrap_or_default().as_secs()) } } impl StatementStore for Store { fn dump_encoded(&self) -> Result)>> { - let mut result = Vec::new(); - self.db.iter_column_while(col::STATEMENTS, |item| { - if let Ok(entry) = StatementWithMeta::decode(&mut item.value.as_slice()) { - entry.statement.using_encoded(|statement| { - let hash = sp_statement_store::hash_encoded(statement); - if !self.index.read().is_expired(&hash) { - result.push((hash, entry.statement.encode())); - } - }); + let index = self.index.read(); + let mut result = Vec::with_capacity(index.entries.len()); + for h in self.index.read().entries.keys() { + let encoded = self.db.get(col::STATEMENTS, h).map_err(|e| Error::Db(e.to_string()))?; + if let Some(encoded) = encoded { + if let Ok(entry) = StatementWithMeta::decode(&mut encoded.as_slice()) { + entry.statement.using_encoded(|statement| { + let hash = sp_statement_store::hash_encoded(statement); + if !self.index.read().is_expired(&hash) { + result.push((hash, entry.statement.encode())); + } + }); + } } - true - }).map_err(|e| Error::Db(e.to_string()))?; + } Ok(result) } /// Return all statements. fn dump(&self) -> Result> { - let mut result = Vec::new(); - self.db.iter_column_while(col::STATEMENTS, |item| { - if let Ok(entry) = StatementWithMeta::decode(&mut item.value.as_slice()) { - let hash = entry.statement.hash(); - if !self.index.read().is_expired(&hash) { + let index = self.index.read(); + let mut result = Vec::with_capacity(index.entries.len()); + for h in self.index.read().entries.keys() { + let encoded = self.db.get(col::STATEMENTS, h).map_err(|e| Error::Db(e.to_string()))?; + if let Some(encoded) = encoded { + if let Ok(entry) = StatementWithMeta::decode(&mut encoded.as_slice()) { + let hash = entry.statement.hash(); result.push((hash, entry.statement)); } } - true - }).map_err(|e| Error::Db(e.to_string()))?; + } Ok(result) } @@ -494,7 +508,12 @@ impl StatementStore for Store { } IndexQuery::Unknown => { // Validate. - let validation_result = (self.validate_fn)(Default::default(), source, statement.clone()); + let at_block = if let Some(Proof::OnChain{block_hash, ..}) = statement.proof() { + Some(block_hash.clone()) + } else { + None + }; + let validation_result = (self.validate_fn)(at_block, source, statement.clone()); match validation_result { Ok(ValidStatement { priority }) => priority, Err(InvalidStatement::BadProof) => { @@ -533,6 +552,7 @@ impl StatementStore for Store { let mut index = self.index.write(); index.insert_with_meta(hash, statement_with_meta); let network_priority = if priority > 0 { NetworkPriority::High } else { NetworkPriority::Low }; + log::trace!(target: LOG_TARGET, "Statement submitted: {:?}", HexDisplay::from(&hash)); SubmitResult::New(network_priority) } @@ -550,7 +570,184 @@ impl StatementStore for Store { #[cfg(test)] mod tests { -} + use sp_statement_store::runtime_api::{ValidateStatement, ValidStatement, InvalidStatement}; + use sp_statement_store::{Statement, Topic, + SignatureVerificationResult, Proof, StatementStore, StatementSource, NetworkPriority, SubmitResult, + }; + use crate::Store; + use sp_core::Pair; + + type Extrinsic = sp_runtime::OpaqueExtrinsic; + type Hash = sp_core::H256; + type Hashing = sp_runtime::traits::BlakeTwo256; + type BlockNumber = u64; + type Header = sp_runtime::generic::Header; + type Block = sp_runtime::generic::Block; + + const CORRECT_BLOCK_HASH: [u8; 32] = [1u8; 32]; + + #[derive(Clone)] + pub(crate) struct TestClient; + + pub(crate) struct RuntimeApi { + _inner: TestClient, + } + impl sp_api::ProvideRuntimeApi for TestClient { + type Api = RuntimeApi; + fn runtime_api(&self) -> sp_api::ApiRef { + RuntimeApi { _inner: self.clone() }.into() + } + } + sp_api::mock_impl_runtime_apis! { + impl ValidateStatement for RuntimeApi { + fn validate_statement( + _source: StatementSource, + statement: Statement, + ) -> std::result::Result { + match statement.verify_signature() { + SignatureVerificationResult::Valid(_) => Ok(ValidStatement{priority: 10}), + SignatureVerificationResult::Invalid => Err(InvalidStatement::BadProof), + SignatureVerificationResult::NoSignature => { + if let Some(Proof::OnChain { block_hash, .. }) = statement.proof() { + if block_hash == &CORRECT_BLOCK_HASH { + Ok(ValidStatement{priority: 1}) + } else { + Err(InvalidStatement::BadProof) + } + } else { + Ok(ValidStatement{priority: 0}) + } + } + } + } + } + } + impl sp_blockchain::HeaderBackend for TestClient { + fn header(&self, _hash: Hash) -> sp_blockchain::Result> { + unimplemented!() + } + fn info(&self) -> sp_blockchain::Info { + sp_blockchain::Info { + best_hash: CORRECT_BLOCK_HASH.into(), + best_number: 0, + genesis_hash: Default::default(), + finalized_hash: CORRECT_BLOCK_HASH.into(), + finalized_number: 1, + finalized_state: None, + number_leaves: 0, + block_gap: None, + } + } + fn status(&self, _hash: Hash) -> sp_blockchain::Result { + unimplemented!() + } + fn number( &self, _hash: Hash) -> sp_blockchain::Result> { + unimplemented!() + } + fn hash(&self, _number: BlockNumber) -> sp_blockchain::Result> { + unimplemented!() + } + } + + fn test_store() -> (std::sync::Arc, tempfile::TempDir) { + let _ = env_logger::try_init(); + let temp_dir = tempfile::Builder::new() + .tempdir() + .expect("Error creating test dir"); + + let client = std::sync::Arc::new(TestClient); + let mut path: std::path::PathBuf = temp_dir.path().into(); + path.push("db"); + let store = Store::new(&path, client, None).unwrap(); + (store, temp_dir) // return order is important. Store must be dropped before TempDir + } + + fn signed_statement(data: u8) -> Statement { + signed_statement_with_topics(data, &[]) + } + + fn signed_statement_with_topics(data: u8, topics: &[Topic]) -> Statement { + let mut statement = Statement::new(); + statement.set_plain_data(vec![data]); + for i in 0 .. topics.len() { + statement.set_topic(i, topics[i].clone()); + } + let kp = sp_core::sr25519::Pair::from_string("//Alice", None).unwrap(); + statement.sign_sr25519_private(&kp); + statement + } + + fn topic(data: u8) -> Topic { + [data; 32] + } + + #[test] + fn submit_one() { + let (store, _temp) = test_store(); + let statement0 = signed_statement(0); + assert_eq!(store.submit(statement0, StatementSource::Network), SubmitResult::New(NetworkPriority::High)); + let unsigned = Statement::new(); + assert_eq!(store.submit(unsigned, StatementSource::Network), SubmitResult::New(NetworkPriority::Low)); + } + + #[test] + fn save_and_load_statements() { + let (store, temp) = test_store(); + let statement0 = signed_statement(0); + let statement1 = signed_statement(1); + let statement2 = signed_statement(2); + assert_eq!(store.submit(statement0.clone(), StatementSource::Network), SubmitResult::New(NetworkPriority::High)); + assert_eq!(store.submit(statement1.clone(), StatementSource::Network), SubmitResult::New(NetworkPriority::High)); + assert_eq!(store.submit(statement2.clone(), StatementSource::Network), SubmitResult::New(NetworkPriority::High)); + assert_eq!(store.dump().unwrap().len(), 3); + assert_eq!(store.broadcasts(&[]).unwrap().len(), 3); + assert_eq!(store.statement(&statement1.hash()).unwrap(), Some(statement1.clone())); + std::mem::drop(store); + + let client = std::sync::Arc::new(TestClient); + let mut path: std::path::PathBuf = temp.path().into(); + path.push("db"); + let store = Store::new(&path, client, None).unwrap(); + assert_eq!(store.dump().unwrap().len(), 3); + assert_eq!(store.broadcasts(&[]).unwrap().len(), 3); + assert_eq!(store.statement(&statement1.hash()).unwrap(), Some(statement1)); + } + + #[test] + fn search_by_topic() { + let (store, _temp) = test_store(); + let statement0 = signed_statement(0); + let statement1 = signed_statement_with_topics(1, &[topic(0)]); + let statement2 = signed_statement_with_topics(2, &[topic(0), topic(1)]); + let statement3 = signed_statement_with_topics(3, &[topic(0), topic(1), topic(2)]); + let statement4 = signed_statement_with_topics(4, &[topic(0), topic(42), topic(2), topic(3)]); + let statements = vec![statement0, statement1, statement2, statement3, statement4]; + for s in &statements { + store.submit(s.clone(), StatementSource::Network); + } + + let assert_topics = |topics: &[u8], expected: &[u8]| { + let topics: Vec<_> = topics.iter().map(|t| topic(*t)).collect(); + let mut got_vals: Vec<_> = store.broadcasts(&topics).unwrap().into_iter().map(|d| d[0]).collect(); + got_vals.sort(); + assert_eq!(expected.to_vec(), got_vals); + }; + + assert_topics(&[], &[0,1,2,3,4]); + assert_topics(&[0], &[1,2,3,4]); + assert_topics(&[1], &[2,3]); + assert_topics(&[2], &[3,4]); + assert_topics(&[3], &[4]); + assert_topics(&[42], &[4]); + + assert_topics(&[0,1], &[2, 3]); + assert_topics(&[1,2], &[3]); + } + + #[test] + fn maintenance() { + } +} diff --git a/primitives/statement-store/src/store_api.rs b/primitives/statement-store/src/store_api.rs index bf5a65619dc44..ccb5522e0f69d 100644 --- a/primitives/statement-store/src/store_api.rs +++ b/primitives/statement-store/src/store_api.rs @@ -19,7 +19,7 @@ use crate::{Statement, Topic, Hash}; pub use crate::runtime_api::StatementSource; /// Statement store error. -#[derive(Debug, thiserror::Error)] +#[derive(Debug, Eq, PartialEq, thiserror::Error)] pub enum Error { /// Database error. #[error("Database error: {0:?}")] @@ -42,7 +42,7 @@ pub enum NetworkPriority { } /// Statement submission outcome -#[derive(Debug)] +#[derive(Debug, Eq, PartialEq)] pub enum SubmitResult { /// Accepted as new with given score New(NetworkPriority), From c8f04674720c376a90425e42fc78422c451c18c8 Mon Sep 17 00:00:00 2001 From: arkpar Date: Thu, 23 Mar 2023 12:10:02 +0100 Subject: [PATCH 11/78] Store maintenance test --- client/statement-store/src/lib.rs | 104 +++++++++++++++++++++++++----- 1 file changed, 89 insertions(+), 15 deletions(-) diff --git a/client/statement-store/src/lib.rs b/client/statement-store/src/lib.rs index 6392b3c5f193c..86abde26fd5ee 100644 --- a/client/statement-store/src/lib.rs +++ b/client/statement-store/src/lib.rs @@ -233,7 +233,7 @@ impl Index { // Purge previously expired messages. let mut purged = Vec::new(); self.expired.retain(|hash, meta| { - if meta.timestamp + PURGE_AFTER < current_time { + if meta.timestamp + PURGE_AFTER <= current_time { purged.push((col::STATEMENTS, hash.to_vec(), None)); log::trace!(target: LOG_TARGET, "Purged statement {:?}", HexDisplay::from(hash)); false @@ -245,7 +245,7 @@ impl Index { // Expire messages. let mut num_expired = 0; self.entries.retain(|hash, meta| { - if meta.timestamp + EXPIRE_AFTER < current_time { + if meta.timestamp + EXPIRE_AFTER <= current_time { if let Some((topics, key)) = self.all_topics.remove(hash) { for t in topics { if let Some(t) = t { @@ -281,8 +281,10 @@ impl Index { fn evict(&mut self) -> Vec<(parity_db::ColId, Vec, Option>)> { let mut evicted_set = Vec::new(); - while self.by_priority.len() > self.max_entries { + + while self.by_priority.len() >= self.max_entries { if let Some(evicted) = self.by_priority.pop() { + log::trace!(target: LOG_TARGET, "Evicting statement {:?}", HexDisplay::from(&evicted.hash)); self.entries.remove(&evicted.hash); if let Some((topics, key)) = self.all_topics.remove(&evicted.hash) { for t in topics { @@ -313,7 +315,7 @@ impl Store { path: &std::path::Path, client: Arc, prometheus: Option<&PrometheusRegistry>, - ) -> Result> + ) -> Result where Block: BlockT, Block::Hash: From, @@ -351,13 +353,13 @@ impl Store { let validator = ClientWrapper { client, _block: Default::default() }; let validate_fn = Box::new(move |block, source, statement| validator.validate_statement(block, source, statement)); - let store = Arc::new(Store { + let store = Store { db, index: RwLock::new(index), validate_fn, time_override: None, metrics: PrometheusMetrics::new(prometheus), - }); + }; store.populate()?; Ok(store) } @@ -414,6 +416,7 @@ impl Store { /// Perform periodic store maintenance pub fn maintain(&self) { + log::trace!(target: LOG_TARGET, "Started store maintenance"); let deleted = self.index.write().maintain(self.timestamp()); let count = deleted.len() as u64; if let Err(e) = self.db.commit(deleted) { @@ -421,12 +424,20 @@ impl Store { } else { self.metrics.report(|metrics| metrics.statements_pruned.inc_by(count)); } + log::trace!(target: LOG_TARGET, "Completed store maintenance. Purged: {}, Active: {}, Expired: {}", + count, self.index.read().entries.len(), self.index.read().expired.len() + ); } fn timestamp(&self) -> u64 { self.time_override.unwrap_or_else(|| std::time::SystemTime::now().duration_since(std::time::UNIX_EPOCH).unwrap_or_default().as_secs()) } + + #[cfg(test)] + fn set_time(&mut self, time: u64) { + self.time_override = Some(time); + } } impl StatementStore for Store { @@ -469,9 +480,13 @@ impl StatementStore for Store { fn statement(&self, hash: &Hash) -> Result> { Ok(match self.db.get(col::STATEMENTS, hash.as_slice()).map_err(|e| Error::Db(e.to_string()))? { Some(entry) => { + log::trace!(target: LOG_TARGET, "Queried statement {:?}", HexDisplay::from(hash)); Some(StatementWithMeta::decode(&mut entry.as_slice()).map_err(|e| Error::Decode(e.to_string()))?.statement) } - None => None, + None => { + log::trace!(target: LOG_TARGET, "Queried missing statement {:?}", HexDisplay::from(hash)); + None + }, }) } @@ -651,7 +666,7 @@ mod tests { } } - fn test_store() -> (std::sync::Arc, tempfile::TempDir) { + fn test_store() -> (Store, tempfile::TempDir) { let _ = env_logger::try_init(); let temp_dir = tempfile::Builder::new() .tempdir() @@ -674,13 +689,29 @@ mod tests { for i in 0 .. topics.len() { statement.set_topic(i, topics[i].clone()); } - let kp = sp_core::sr25519::Pair::from_string("//Alice", None).unwrap(); - statement.sign_sr25519_private(&kp); + let kp = sp_core::ed25519::Pair::from_string("//Alice", None).unwrap(); + statement.sign_ed25519_private(&kp); + statement + } + + fn onchain_statement_with_topics(data: u8, topics: &[Topic]) -> Statement { + let mut statement = Statement::new(); + statement.set_plain_data(vec![data]); + for i in 0 .. topics.len() { + statement.set_topic(i, topics[i].clone()); + } + statement.set_proof(Proof::OnChain { + block_hash: CORRECT_BLOCK_HASH, + who: Default::default(), + event_index: 0, + }); statement } - fn topic(data: u8) -> Topic { - [data; 32] + fn topic(data: u64) -> Topic { + let mut topic: Topic = Default::default(); + topic[0..8].copy_from_slice(&data.to_le_bytes()); + topic } #[test] @@ -704,7 +735,7 @@ mod tests { assert_eq!(store.dump().unwrap().len(), 3); assert_eq!(store.broadcasts(&[]).unwrap().len(), 3); assert_eq!(store.statement(&statement1.hash()).unwrap(), Some(statement1.clone())); - std::mem::drop(store); + drop(store); let client = std::sync::Arc::new(TestClient); let mut path: std::path::PathBuf = temp.path().into(); @@ -728,7 +759,7 @@ mod tests { store.submit(s.clone(), StatementSource::Network); } - let assert_topics = |topics: &[u8], expected: &[u8]| { + let assert_topics = |topics: &[u64], expected: &[u8]| { let topics: Vec<_> = topics.iter().map(|t| topic(*t)).collect(); let mut got_vals: Vec<_> = store.broadcasts(&topics).unwrap().into_iter().map(|d| d[0]).collect(); got_vals.sort(); @@ -748,6 +779,49 @@ mod tests { #[test] fn maintenance() { + use super::{MAX_LIVE_STATEMENTS, EXPIRE_AFTER, PURGE_AFTER}; + // Check test assumptions + assert!((MAX_LIVE_STATEMENTS as u64) < EXPIRE_AFTER); + + // first 10 statements are high priority, the rest is low. + let (mut store, _temp) = test_store(); + for time in 0 .. MAX_LIVE_STATEMENTS as u64 { + store.set_time(time); + let statement = if time < 10 { + signed_statement_with_topics(0, &[topic(time)]) + } else { + onchain_statement_with_topics(0, &[topic(time)]) + }; + store.submit(statement, StatementSource::Network); + } + + let first = signed_statement_with_topics(0, &[topic(0)]); + let second = signed_statement_with_topics(0, &[topic(0)]); + assert_eq!(first, second); + assert_eq!(store.statement(&first.hash()).unwrap().unwrap(), first); + assert_eq!(store.index.read().entries.len(), MAX_LIVE_STATEMENTS); + + let first_to_be_evicted = onchain_statement_with_topics(0, &[topic(10)]); + assert_eq!(store.index.read().entries.len(), MAX_LIVE_STATEMENTS); + assert_eq!(store.statement(&first_to_be_evicted.hash()).unwrap().unwrap(), first_to_be_evicted); + + // Check that the new statement replaces the old. + store.submit(signed_statement_with_topics(0, &[topic(MAX_LIVE_STATEMENTS as u64 + 1)]) , StatementSource::Network); + assert_eq!(store.statement(&first_to_be_evicted.hash()).unwrap(), None); + + + store.set_time(EXPIRE_AFTER + (MAX_LIVE_STATEMENTS as u64)/ 2); + store.maintain(); + // Half statements should be expired. + assert_eq!(store.index.read().entries.len(), MAX_LIVE_STATEMENTS / 2); + assert_eq!(store.index.read().expired.len(), MAX_LIVE_STATEMENTS / 2); + + // The high-priority statement should survive. + assert_eq!(store.statement(&first.hash()).unwrap().unwrap(), first); + + store.set_time(PURGE_AFTER + (MAX_LIVE_STATEMENTS as u64)/ 2); + store.maintain(); + assert_eq!(store.index.read().entries.len(), 0); + assert_eq!(store.index.read().expired.len(), MAX_LIVE_STATEMENTS / 2); } } - From 9bc6773e61ad21832c61ca7eef5355e5d0553778 Mon Sep 17 00:00:00 2001 From: arkpar Date: Thu, 23 Mar 2023 12:31:55 +0100 Subject: [PATCH 12/78] cargo fmt --- bin/node-template/node/src/service.rs | 34 +- bin/node/cli/src/service.rs | 34 +- bin/node/runtime/src/lib.rs | 1 - client/network/statement/src/config.rs | 2 - client/network/statement/src/lib.rs | 44 +- client/rpc-api/src/statement/mod.rs | 18 +- client/rpc/src/statement/mod.rs | 46 +- client/rpc/src/statement/tests.rs | 2 - client/service/src/builder.rs | 21 +- client/service/src/lib.rs | 2 +- client/statement-store/src/lib.rs | 393 ++++++++++++------ client/statement-store/src/metrics.rs | 1 - frame/statement/src/lib.rs | 61 +-- frame/statement/src/mock.rs | 10 +- frame/statement/src/tests.rs | 35 +- primitives/statement-store/src/lib.rs | 88 ++-- primitives/statement-store/src/runtime_api.rs | 3 +- primitives/statement-store/src/store_api.rs | 12 +- 18 files changed, 502 insertions(+), 305 deletions(-) diff --git a/bin/node-template/node/src/service.rs b/bin/node-template/node/src/service.rs index 818e0895dd264..8459dc88eedc8 100644 --- a/bin/node-template/node/src/service.rs +++ b/bin/node-template/node/src/service.rs @@ -130,7 +130,11 @@ pub fn new_partial( compatibility_mode: Default::default(), })?; - let statement_store = sc_statement_store::Store::new(config.database.path().unwrap(), client.clone(), config.prometheus_registry())?; + let statement_store = sc_statement_store::Store::new( + config.database.path().unwrap(), + client.clone(), + config.prometheus_registry(), + )?; Ok(sc_service::PartialComponents { client, @@ -174,17 +178,23 @@ pub fn new_full(mut config: Configuration) -> Result Vec::default(), )); - let (network, system_rpc_tx, tx_handler_controller, _statement_handler_controller, network_starter, sync_service) = - sc_service::build_network(sc_service::BuildNetworkParams { - config: &config, - client: client.clone(), - transaction_pool: transaction_pool.clone(), - statement_store: statement_store.clone(), - spawn_handle: task_manager.spawn_handle(), - import_queue, - block_announce_validator_builder: None, - warp_sync_params: Some(WarpSyncParams::WithProvider(warp_sync)), - })?; + let ( + network, + system_rpc_tx, + tx_handler_controller, + _statement_handler_controller, + network_starter, + sync_service, + ) = sc_service::build_network(sc_service::BuildNetworkParams { + config: &config, + client: client.clone(), + transaction_pool: transaction_pool.clone(), + statement_store: statement_store.clone(), + spawn_handle: task_manager.spawn_handle(), + import_queue, + block_announce_validator_builder: None, + warp_sync_params: Some(WarpSyncParams::WithProvider(warp_sync)), + })?; if config.offchain_worker.enabled { sc_service::build_offchain_workers( diff --git a/bin/node/cli/src/service.rs b/bin/node/cli/src/service.rs index 5f2a6e799a548..ef704597cce4f 100644 --- a/bin/node/cli/src/service.rs +++ b/bin/node/cli/src/service.rs @@ -282,7 +282,11 @@ pub fn new_partial( (rpc_extensions_builder, shared_voter_state2) }; - let statement_store = sc_statement_store::Store::new(config.database.path().unwrap(), client.clone(), config.prometheus_registry())?; + let statement_store = sc_statement_store::Store::new( + config.database.path().unwrap(), + client.clone(), + config.prometheus_registry(), + )?; Ok(sc_service::PartialComponents { client, @@ -358,17 +362,23 @@ pub fn new_full_base( Vec::default(), )); - let (network, system_rpc_tx, tx_handler_controller, _statement_handler_controller, network_starter, sync_service) = - sc_service::build_network(sc_service::BuildNetworkParams { - config: &config, - client: client.clone(), - transaction_pool: transaction_pool.clone(), - statement_store: statement_store.clone(), - spawn_handle: task_manager.spawn_handle(), - import_queue, - block_announce_validator_builder: None, - warp_sync_params: Some(WarpSyncParams::WithProvider(warp_sync)), - })?; + let ( + network, + system_rpc_tx, + tx_handler_controller, + _statement_handler_controller, + network_starter, + sync_service, + ) = sc_service::build_network(sc_service::BuildNetworkParams { + config: &config, + client: client.clone(), + transaction_pool: transaction_pool.clone(), + statement_store: statement_store.clone(), + spawn_handle: task_manager.spawn_handle(), + import_queue, + block_announce_validator_builder: None, + warp_sync_params: Some(WarpSyncParams::WithProvider(warp_sync)), + })?; if config.offchain_worker.enabled { sc_service::build_offchain_workers( diff --git a/bin/node/runtime/src/lib.rs b/bin/node/runtime/src/lib.rs index 6696961923e22..2e8b3d18226db 100644 --- a/bin/node/runtime/src/lib.rs +++ b/bin/node/runtime/src/lib.rs @@ -1744,7 +1744,6 @@ impl frame_benchmarking_pallet_pov::Config for Runtime { type RuntimeEvent = RuntimeEvent; } - parameter_types! { pub StatementPriorityBalance: Balance = 10 * CENTS; } diff --git a/client/network/statement/src/config.rs b/client/network/statement/src/config.rs index e05ed7e1e17c5..d3eaba1dac0ba 100644 --- a/client/network/statement/src/config.rs +++ b/client/network/statement/src/config.rs @@ -24,7 +24,6 @@ use std::time; pub(crate) const PROPAGATE_TIMEOUT: time::Duration = time::Duration::from_millis(2900); /// Maximum number of known statement hashes to keep for a peer. -/// pub(crate) const MAX_KNOWN_STATEMENTS: usize = 10240; /// Maximum allowed size for a statement notification. @@ -32,4 +31,3 @@ pub(crate) const MAX_STATEMENT_SIZE: u64 = 256 * 1024; /// Maximum number of statement validation request we keep at any moment. pub(crate) const MAX_PENDING_STATEMENTS: usize = 8192; - diff --git a/client/network/statement/src/lib.rs b/client/network/statement/src/lib.rs index 9cdaae33075ae..097815dba06f1 100644 --- a/client/network/statement/src/lib.rs +++ b/client/network/statement/src/lib.rs @@ -28,7 +28,7 @@ use crate::config::*; use codec::{Decode, Encode}; -use futures::{prelude::*, stream::FuturesUnordered, channel::oneshot}; +use futures::{channel::oneshot, prelude::*, stream::FuturesUnordered}; use libp2p::{multiaddr, PeerId}; use prometheus_endpoint::{register, Counter, PrometheusError, Registry, U64}; use sc_network::{ @@ -43,8 +43,10 @@ use sc_network_common::{ role::ObservedRole, sync::{SyncEvent, SyncEventStream}, }; -use sp_statement_store::{Hash, Statement, StatementSource, StatementStore, SubmitResult, NetworkPriority}; use sc_utils::mpsc::{tracing_unbounded, TracingUnboundedReceiver, TracingUnboundedSender}; +use sp_statement_store::{ + Hash, NetworkPriority, Statement, StatementSource, StatementStore, SubmitResult, +}; use std::{ collections::{hash_map::Entry, HashMap}, iter, @@ -181,21 +183,26 @@ impl StatementHandlerPrototype { let net_event_stream = network.event_stream("statement-handler-net"); let sync_event_stream = sync.event_stream("statement-handler-sync"); let (to_handler, from_controller) = tracing_unbounded("mpsc_statement_handler", 100_000); - let (queue_sender, mut queue_receiver) = tracing_unbounded("mpsc_statement_validator", 100_000); + let (queue_sender, mut queue_receiver) = + tracing_unbounded("mpsc_statement_validator", 100_000); let store = statement_store.clone(); executor( async move { loop { - let task: Option<(Statement, oneshot::Sender)> = queue_receiver.next().await; + let task: Option<(Statement, oneshot::Sender)> = + queue_receiver.next().await; match task { None => return, Some((statement, completion)) => { let result = store.submit(statement, StatementSource::Network); if let Err(_) = completion.send(result) { - log::debug!(target: LOG_TARGET, "Error sending validation completion"); + log::debug!( + target: LOG_TARGET, + "Error sending validation completion" + ); } - } + }, } } } @@ -397,15 +404,19 @@ where } // Accept statements only when node is not major syncing if self.sync.is_major_syncing() { - log::trace!(target: LOG_TARGET, "{remote}: Ignoring statements while major syncing"); + log::trace!( + target: LOG_TARGET, + "{remote}: Ignoring statements while major syncing" + ); continue } - if let Ok(statements) = - ::decode(&mut message.as_ref()) - { + if let Ok(statements) = ::decode(&mut message.as_ref()) { self.on_statements(remote, statements); } else { - log::debug!(target: LOG_TARGET, "Failed to decode statement list from {remote}"); + log::debug!( + target: LOG_TARGET, + "Failed to decode statement list from {remote}" + ); } } }, @@ -438,10 +449,8 @@ where Entry::Vacant(entry) => { let (completion_sender, completion_receiver) = oneshot::channel(); if let Ok(()) = self.queue_sender.unbounded_send((s, completion_sender)) { - self.pending_statements.push(PendingStatement { - validation: completion_receiver, - hash, - }); + self.pending_statements + .push(PendingStatement { validation: completion_receiver, hash }); entry.insert(vec![who]); } }, @@ -479,10 +488,7 @@ where } } - fn do_propagate_statements( - &mut self, - statements: &[(Hash, Statement)], - ) { + fn do_propagate_statements(&mut self, statements: &[(Hash, Statement)]) { let mut propagated_statements = 0; for (who, peer) in self.peers.iter_mut() { diff --git a/client/rpc-api/src/statement/mod.rs b/client/rpc-api/src/statement/mod.rs index 0231e30db3a61..b44ede6d17a0f 100644 --- a/client/rpc-api/src/statement/mod.rs +++ b/client/rpc-api/src/statement/mod.rs @@ -28,19 +28,27 @@ pub mod error; pub trait StatementApi { /// Return all statements, SCALE-encoded. #[method(name = "statement_dump")] - fn dump(&self, ) -> RpcResult>; + fn dump(&self) -> RpcResult>; - /// Return the data of all known statements which include all topics and have no `DecryptionKey` field. + /// Return the data of all known statements which include all topics and have no `DecryptionKey` + /// field. #[method(name = "statement_broadcasts")] fn broadcasts(&self, match_all_topics: Vec<[u8; 32]>) -> RpcResult>; - /// Return the data of all known statements whose decryption key is identified as `dest` (this will generally be the public key or a hash thereof for symmetric ciphers, or a hash of the private key for symmetric ciphers). + /// Return the data of all known statements whose decryption key is identified as `dest` (this + /// will generally be the public key or a hash thereof for symmetric ciphers, or a hash of the + /// private key for symmetric ciphers). #[method(name = "statement_posted")] fn posted(&self, match_all_topics: Vec<[u8; 32]>, dest: [u8; 32]) -> RpcResult>; - /// Return the decrypted data of all known statements whose decryption key is identified as `dest`. The key must be available to the client. + /// Return the decrypted data of all known statements whose decryption key is identified as + /// `dest`. The key must be available to the client. #[method(name = "statement_postedClear")] - fn posted_clear(&self, match_all_topics: Vec<[u8; 32]>, dest: [u8; 32]) -> RpcResult>; + fn posted_clear( + &self, + match_all_topics: Vec<[u8; 32]>, + dest: [u8; 32], + ) -> RpcResult>; /// Submit a pre-encoded statement. #[method(name = "statement_submit")] diff --git a/client/rpc/src/statement/mod.rs b/client/rpc/src/statement/mod.rs index af296ec28b1d9..34c264afb3bae 100644 --- a/client/rpc/src/statement/mod.rs +++ b/client/rpc/src/statement/mod.rs @@ -27,7 +27,7 @@ use jsonrpsee::core::{async_trait, RpcResult}; pub use sc_rpc_api::statement::{error::Error, StatementApiServer}; use sc_rpc_api::DenyUnsafe; use sp_core::Bytes; -use sp_statement_store::{SubmitResult, StatementSource}; +use sp_statement_store::{StatementSource, SubmitResult}; use std::sync::Arc; /// Statement store API @@ -38,53 +38,65 @@ pub struct StatementStore { impl StatementStore { /// Create new instance of Offchain API. - pub fn new(store: Arc, deny_unsafe: DenyUnsafe) -> Self { + pub fn new( + store: Arc, + deny_unsafe: DenyUnsafe, + ) -> Self { StatementStore { store, deny_unsafe } } } #[async_trait] impl StatementApiServer for StatementStore { - fn dump(&self, ) -> RpcResult> { + fn dump(&self) -> RpcResult> { self.deny_unsafe.check_if_safe()?; - let statements = self.store.dump_encoded(). - map_err(|e| Error::StatementStore(e.to_string()))?; + let statements = + self.store.dump_encoded().map_err(|e| Error::StatementStore(e.to_string()))?; Ok(statements.into_iter().map(|(_, s)| s.into()).collect()) } fn broadcasts(&self, match_all_topics: Vec<[u8; 32]>) -> RpcResult> { - Ok(self.store.broadcasts(&match_all_topics) + Ok(self + .store + .broadcasts(&match_all_topics) .map_err(|e| Error::StatementStore(e.to_string()))? .into_iter() .map(Into::into) - .collect() - ) + .collect()) } fn posted(&self, match_all_topics: Vec<[u8; 32]>, dest: [u8; 32]) -> RpcResult> { - Ok(self.store.posted(&match_all_topics, dest) + Ok(self + .store + .posted(&match_all_topics, dest) .map_err(|e| Error::StatementStore(e.to_string()))? .into_iter() .map(Into::into) - .collect() - ) + .collect()) } - fn posted_clear(&self, match_all_topics: Vec<[u8; 32]>, dest: [u8; 32]) -> RpcResult> { - Ok(self.store.posted_clear(&match_all_topics, dest) + fn posted_clear( + &self, + match_all_topics: Vec<[u8; 32]>, + dest: [u8; 32], + ) -> RpcResult> { + Ok(self + .store + .posted_clear(&match_all_topics, dest) .map_err(|e| Error::StatementStore(e.to_string()))? .into_iter() .map(Into::into) - .collect() - ) + .collect()) } fn submit(&self, encoded: Bytes) -> RpcResult<()> { match self.store.submit_encoded(&encoded, StatementSource::Rpc) { SubmitResult::New(_) | SubmitResult::Known => Ok(()), - // `KnownExpired` should not happen. Expired statements submitted with `StatementSource::Rpc` should be renewed. - SubmitResult::KnownExpired => Err(Error::StatementStore("Submitted an expired statement".into()).into()), + // `KnownExpired` should not happen. Expired statements submitted with + // `StatementSource::Rpc` should be renewed. + SubmitResult::KnownExpired => + Err(Error::StatementStore("Submitted an expired statement".into()).into()), SubmitResult::Bad(e) => Err(Error::StatementStore(e.into()).into()), SubmitResult::InternalError(e) => Err(Error::StatementStore(e.to_string()).into()), } diff --git a/client/rpc/src/statement/tests.rs b/client/rpc/src/statement/tests.rs index ec691e40e8a8c..b46a8f75295fe 100644 --- a/client/rpc/src/statement/tests.rs +++ b/client/rpc/src/statement/tests.rs @@ -15,5 +15,3 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . - - diff --git a/client/service/src/builder.rs b/client/service/src/builder.rs index 96f1e93ba71a3..ecbdc3d801563 100644 --- a/client/service/src/builder.rs +++ b/client/service/src/builder.rs @@ -59,9 +59,9 @@ use sc_rpc::{ DenyUnsafe, SubscriptionTaskExecutor, }; use sc_rpc_spec_v2::{chain_head::ChainHeadApiServer, transaction::TransactionApiServer}; +use sc_statement_store::Store as StatementStore; use sc_telemetry::{telemetry, ConnectionMessage, Telemetry, TelemetryHandle, SUBSTRATE_INFO}; use sc_transaction_pool_api::MaintainedTransactionPool; -use sc_statement_store::Store as StatementStore; use sc_utils::mpsc::{tracing_unbounded, TracingUnboundedSender}; use sp_api::{CallApiAt, ProvideRuntimeApi}; use sp_blockchain::{HeaderBackend, HeaderMetadata}; @@ -463,17 +463,13 @@ where // Perform periodic statement store maintenance let store = statement_store.clone(); - spawn_handle.spawn( - "statement-store-notifications", - Some("statement-store"), - async move { - let mut interval = tokio::time::interval(sc_statement_store::MAINTENANCE_PERIOD); - loop { - interval.tick().await; - store.maintain(); - } + spawn_handle.spawn("statement-store-notifications", Some("statement-store"), async move { + let mut interval = tokio::time::interval(sc_statement_store::MAINTENANCE_PERIOD); + loop { + interval.tick().await; + store.maintain(); } - ); + }); // Prometheus metrics. let metrics_service = @@ -954,7 +950,8 @@ where spawn_handle.spawn("network-statement-validator", Some("networking"), fut); }) }; - // crate statement goissip protocol and add it to the list of supported protocols of `network_params` + // crate statement goissip protocol and add it to the list of supported protocols of + // `network_params` let (statement_handler, statement_handler_controller) = statement_handler_proto.build( network.clone(), sync_service.clone(), diff --git a/client/service/src/lib.rs b/client/service/src/lib.rs index 794c13826cb4a..5e321813b63a5 100644 --- a/client/service/src/lib.rs +++ b/client/service/src/lib.rs @@ -85,10 +85,10 @@ pub use sc_network_transactions::config::{TransactionImport, TransactionImportFu pub use sc_rpc::{ RandomIntegerSubscriptionId, RandomStringSubscriptionId, RpcSubscriptionIdProvider, }; +pub use sc_statement_store::Store as StatementStore; pub use sc_tracing::TracingReceiver; pub use sc_transaction_pool::Options as TransactionPoolOptions; pub use sc_transaction_pool_api::{error::IntoPoolError, InPoolTransaction, TransactionPool}; -pub use sc_statement_store::Store as StatementStore; #[doc(hidden)] pub use std::{ops::Deref, result::Result, sync::Arc}; pub use task_manager::{SpawnTaskHandle, Task, TaskManager, TaskRegistry, DEFAULT_GROUP_NAME}; diff --git a/client/statement-store/src/lib.rs b/client/statement-store/src/lib.rs index 86abde26fd5ee..336f6643f2808 100644 --- a/client/statement-store/src/lib.rs +++ b/client/statement-store/src/lib.rs @@ -23,19 +23,23 @@ mod metrics; -pub use sp_statement_store::{StatementStore, Error}; +pub use sp_statement_store::{Error, StatementStore}; -use std::{collections::{HashSet, HashMap, BinaryHeap}, sync::Arc}; -use parking_lot::RwLock; use metrics::MetricsLink as PrometheusMetrics; +use parking_lot::RwLock; use prometheus_endpoint::Registry as PrometheusRegistry; -use sp_statement_store::{Statement, Topic, DecryptionKey, Result, Hash, BlockHash, SubmitResult, NetworkPriority, Proof}; -use sp_statement_store::runtime_api::{ValidateStatement, ValidStatement, InvalidStatement, StatementSource}; -use sp_core::{Encode, Decode, hexdisplay::HexDisplay}; use sp_api::ProvideRuntimeApi; -use sp_runtime::traits::Block as BlockT; use sp_blockchain::HeaderBackend; - +use sp_core::{hexdisplay::HexDisplay, Decode, Encode}; +use sp_runtime::traits::Block as BlockT; +use sp_statement_store::{ + runtime_api::{InvalidStatement, StatementSource, ValidStatement, ValidateStatement}, + BlockHash, DecryptionKey, Hash, NetworkPriority, Proof, Result, Statement, SubmitResult, Topic, +}; +use std::{ + collections::{BinaryHeap, HashMap, HashSet}, + sync::Arc, +}; const KEY_VERSION: &[u8] = b"version".as_slice(); const CURRENT_VERSION: u32 = 1; @@ -66,13 +70,21 @@ struct EvictionPriority { impl PartialOrd for EvictionPriority { fn partial_cmp(&self, other: &Self) -> Option { - Some(self.priority.cmp(&other.priority).then_with(|| self.timestamp.cmp(&other.timestamp)).reverse()) + Some( + self.priority + .cmp(&other.priority) + .then_with(|| self.timestamp.cmp(&other.timestamp)) + .reverse(), + ) } } impl Ord for EvictionPriority { fn cmp(&self, other: &Self) -> std::cmp::Ordering { - self.priority.cmp(&other.priority).then_with(|| self.timestamp.cmp(&other.timestamp)).reverse() + self.priority + .cmp(&other.priority) + .then_with(|| self.timestamp.cmp(&other.timestamp)) + .reverse() } } @@ -87,17 +99,17 @@ struct Index { max_entries: usize, } -struct ClientWrapper { +struct ClientWrapper { client: Arc, _block: std::marker::PhantomData, } impl ClientWrapper - where - Block: BlockT, - Block::Hash: From, - Client: ProvideRuntimeApi + HeaderBackend + Send + Sync + 'static, - Client::Api: ValidateStatement, +where + Block: BlockT, + Block::Hash: From, + Client: ProvideRuntimeApi + HeaderBackend + Send + Sync + 'static, + Client::Api: ValidateStatement, { fn validate_statement( &self, @@ -112,9 +124,7 @@ impl ClientWrapper }); match api.validate_statement(block, source, statement) { Ok(r) => r, - Err(_) => { - Err(InvalidStatement::InternalError) - } + Err(_) => Err(InvalidStatement::InternalError), } } } @@ -123,7 +133,15 @@ impl ClientWrapper pub struct Store { db: parity_db::Db, index: RwLock, - validate_fn: Box, StatementSource, Statement) -> std::result::Result + Send + Sync>, + validate_fn: Box< + dyn Fn( + Option, + StatementSource, + Statement, + ) -> std::result::Result + + Send + + Sync, + >, time_override: Option, metrics: PrometheusMetrics, } @@ -175,10 +193,10 @@ impl Index { fn query(&self, hash: &Hash) -> IndexQuery { if let Some(meta) = self.entries.get(hash) { - return IndexQuery::Exists(meta.priority); + return IndexQuery::Exists(meta.priority) } if let Some(meta) = self.expired.get(hash) { - return IndexQuery::Expired(meta.priority); + return IndexQuery::Expired(meta.priority) } IndexQuery::Unknown } @@ -191,7 +209,12 @@ impl Index { self.expired.contains_key(hash) } - fn iter(&self, key: Option, topics: &[Topic], mut f: impl FnMut(&Hash) -> Result<()>) -> Result<()> { + fn iter( + &self, + key: Option, + topics: &[Topic], + mut f: impl FnMut(&Hash) -> Result<()>, + ) -> Result<()> { let mut sets: [Option<&HashSet>; 4] = Default::default(); let mut num_sets = 0; for t in topics { @@ -210,17 +233,26 @@ impl Index { // Start with the smallest topic set or the key set. sets[0..num_sets].sort_by_key(|s| s.map_or(0, HashSet::len)); if let Some(key) = key { - let key_set = if let Some(set) = self.by_dec_key.get(&key) { set } else { return Ok(()) }; + let key_set = + if let Some(set) = self.by_dec_key.get(&key) { set } else { return Ok(()) }; for item in key_set { if sets.iter().all(|set| set.unwrap().contains(item)) { - log::trace!(target: LOG_TARGET, "Iterating by key: {:?}", HexDisplay::from(item)); + log::trace!( + target: LOG_TARGET, + "Iterating by key: {:?}", + HexDisplay::from(item) + ); f(item)? } } } else { for item in sets[0].unwrap() { - if sets[1 .. num_sets].iter().all(|set| set.unwrap().contains(item)) { - log::trace!(target: LOG_TARGET, "Iterating by topic: {:?}", HexDisplay::from(item)); + if sets[1..num_sets].iter().all(|set| set.unwrap().contains(item)) { + log::trace!( + target: LOG_TARGET, + "Iterating by topic: {:?}", + HexDisplay::from(item) + ); f(item)? } } @@ -270,11 +302,15 @@ impl Index { }); if num_expired > 0 { // Rebuild the priority queue - self.by_priority = self.entries.iter().map(|(hash, meta)| EvictionPriority { - hash: hash.clone(), - priority: meta.priority, - timestamp: meta.timestamp, - }).collect(); + self.by_priority = self + .entries + .iter() + .map(|(hash, meta)| EvictionPriority { + hash: hash.clone(), + priority: meta.priority, + timestamp: meta.timestamp, + }) + .collect(); } purged } @@ -284,7 +320,11 @@ impl Index { while self.by_priority.len() >= self.max_entries { if let Some(evicted) = self.by_priority.pop() { - log::trace!(target: LOG_TARGET, "Evicting statement {:?}", HexDisplay::from(&evicted.hash)); + log::trace!( + target: LOG_TARGET, + "Evicting statement {:?}", + HexDisplay::from(&evicted.hash) + ); self.entries.remove(&evicted.hash); if let Some((topics, key)) = self.all_topics.remove(&evicted.hash) { for t in topics { @@ -302,7 +342,7 @@ impl Index { } evicted_set.push((col::STATEMENTS, evicted.hash.to_vec(), None)); } else { - break; + break } } evicted_set @@ -335,23 +375,31 @@ impl Store { let db = parity_db::Db::open_or_create(&config).map_err(|e| Error::Db(e.to_string()))?; match db.get(col::META, &KEY_VERSION).map_err(|e| Error::Db(e.to_string()))? { Some(version) => { - let version = u32::from_le_bytes(version.try_into() - .map_err(|_| Error::Db("Error reading database version".into()))?); + let version = u32::from_le_bytes( + version + .try_into() + .map_err(|_| Error::Db("Error reading database version".into()))?, + ); if version != CURRENT_VERSION { - return Err(Error::Db(format!("Unsupported database version: {version}"))); + return Err(Error::Db(format!("Unsupported database version: {version}"))) } }, None => { - db.commit( - [(col::META, KEY_VERSION.to_vec(), Some(CURRENT_VERSION.to_le_bytes().to_vec()))] - ).map_err(|e| Error::Db(e.to_string()))?; - } + db.commit([( + col::META, + KEY_VERSION.to_vec(), + Some(CURRENT_VERSION.to_le_bytes().to_vec()), + )]) + .map_err(|e| Error::Db(e.to_string()))?; + }, } let mut index = Index::default(); index.max_entries = MAX_LIVE_STATEMENTS; let validator = ClientWrapper { client, _block: Default::default() }; - let validate_fn = Box::new(move |block, source, statement| validator.validate_statement(block, source, statement)); + let validate_fn = Box::new(move |block, source, statement| { + validator.validate_statement(block, source, statement) + }); let store = Store { db, @@ -368,27 +416,44 @@ impl Store { let current_time = self.timestamp(); { let mut index = self.index.write(); - self.db.iter_column_while(col::STATEMENTS, |item| { - let statement = item.value; - if let Ok(statement_with_meta) = StatementWithMeta::decode(&mut statement.as_slice()) { - let hash = statement_with_meta.statement.hash(); - if statement_with_meta.meta.timestamp + EXPIRE_AFTER < current_time { - log::trace!(target: LOG_TARGET, "Statement loaded (expired): {:?}", HexDisplay::from(&hash)); - index.insert_expired(hash, statement_with_meta.meta); - } else { - log::trace!(target: LOG_TARGET, "Statement loaded {:?}", HexDisplay::from(&hash)); - index.insert_with_meta(hash, statement_with_meta); + self.db + .iter_column_while(col::STATEMENTS, |item| { + let statement = item.value; + if let Ok(statement_with_meta) = + StatementWithMeta::decode(&mut statement.as_slice()) + { + let hash = statement_with_meta.statement.hash(); + if statement_with_meta.meta.timestamp + EXPIRE_AFTER < current_time { + log::trace!( + target: LOG_TARGET, + "Statement loaded (expired): {:?}", + HexDisplay::from(&hash) + ); + index.insert_expired(hash, statement_with_meta.meta); + } else { + log::trace!( + target: LOG_TARGET, + "Statement loaded {:?}", + HexDisplay::from(&hash) + ); + index.insert_with_meta(hash, statement_with_meta); + } } - } - true - }).map_err(|e| Error::Db(e.to_string()))?; + true + }) + .map_err(|e| Error::Db(e.to_string()))?; } self.maintain(); Ok(()) } - fn collect_statements(&self, key: Option, match_all_topics: &[Topic], mut f: impl FnMut(Statement) -> Option ) -> Result> { + fn collect_statements( + &self, + key: Option, + match_all_topics: &[Topic], + mut f: impl FnMut(Statement) -> Option, + ) -> Result> { let mut result = Vec::new(); let index = self.index.read(); index.iter(key, match_all_topics, |hash| { @@ -400,14 +465,21 @@ impl Store { } } else { // DB inconsistency - log::warn!(target: LOG_TARGET, "Corrupt statement {:?}", HexDisplay::from(hash)); + log::warn!( + target: LOG_TARGET, + "Corrupt statement {:?}", + HexDisplay::from(hash) + ); } - - } + }, None => { // DB inconsistency - log::warn!(target: LOG_TARGET, "Missing statement {:?}", HexDisplay::from(hash)); - } + log::warn!( + target: LOG_TARGET, + "Missing statement {:?}", + HexDisplay::from(hash) + ); + }, } Ok(()) })?; @@ -424,14 +496,22 @@ impl Store { } else { self.metrics.report(|metrics| metrics.statements_pruned.inc_by(count)); } - log::trace!(target: LOG_TARGET, "Completed store maintenance. Purged: {}, Active: {}, Expired: {}", - count, self.index.read().entries.len(), self.index.read().expired.len() + log::trace!( + target: LOG_TARGET, + "Completed store maintenance. Purged: {}, Active: {}, Expired: {}", + count, + self.index.read().entries.len(), + self.index.read().expired.len() ); } fn timestamp(&self) -> u64 { - self.time_override.unwrap_or_else(|| - std::time::SystemTime::now().duration_since(std::time::UNIX_EPOCH).unwrap_or_default().as_secs()) + self.time_override.unwrap_or_else(|| { + std::time::SystemTime::now() + .duration_since(std::time::UNIX_EPOCH) + .unwrap_or_default() + .as_secs() + }) } #[cfg(test)] @@ -478,29 +558,51 @@ impl StatementStore for Store { /// Returns a statement by hash. fn statement(&self, hash: &Hash) -> Result> { - Ok(match self.db.get(col::STATEMENTS, hash.as_slice()).map_err(|e| Error::Db(e.to_string()))? { - Some(entry) => { - log::trace!(target: LOG_TARGET, "Queried statement {:?}", HexDisplay::from(hash)); - Some(StatementWithMeta::decode(&mut entry.as_slice()).map_err(|e| Error::Decode(e.to_string()))?.statement) - } - None => { - log::trace!(target: LOG_TARGET, "Queried missing statement {:?}", HexDisplay::from(hash)); - None + Ok( + match self + .db + .get(col::STATEMENTS, hash.as_slice()) + .map_err(|e| Error::Db(e.to_string()))? + { + Some(entry) => { + log::trace!( + target: LOG_TARGET, + "Queried statement {:?}", + HexDisplay::from(hash) + ); + Some( + StatementWithMeta::decode(&mut entry.as_slice()) + .map_err(|e| Error::Decode(e.to_string()))? + .statement, + ) + }, + None => { + log::trace!( + target: LOG_TARGET, + "Queried missing statement {:?}", + HexDisplay::from(hash) + ); + None + }, }, - }) + ) } - /// Return the data of all known statements which include all topics and have no `DecryptionKey` field. + /// Return the data of all known statements which include all topics and have no `DecryptionKey` + /// field. fn broadcasts(&self, match_all_topics: &[Topic]) -> Result>> { self.collect_statements(None, match_all_topics, |statement| statement.into_data()) } - /// Return the data of all known statements whose decryption key is identified as `dest` (this will generally be the public key or a hash thereof for symmetric ciphers, or a hash of the private key for symmetric ciphers). + /// Return the data of all known statements whose decryption key is identified as `dest` (this + /// will generally be the public key or a hash thereof for symmetric ciphers, or a hash of the + /// private key for symmetric ciphers). fn posted(&self, match_all_topics: &[Topic], dest: [u8; 32]) -> Result>> { self.collect_statements(Some(dest), match_all_topics, |statement| statement.into_data()) } - /// Return the decrypted data of all known statements whose decryption key is identified as `dest`. The key must be available to the client. + /// Return the decrypted data of all known statements whose decryption key is identified as + /// `dest`. The key must be available to the client. fn posted_clear(&self, match_all_topics: &[Topic], dest: [u8; 32]) -> Result>> { self.collect_statements(Some(dest), match_all_topics, |statement| statement.into_data()) } @@ -511,19 +613,19 @@ impl StatementStore for Store { let priority = match self.index.read().query(&hash) { IndexQuery::Expired(priority) => { if !source.can_be_resubmitted() { - return SubmitResult::KnownExpired; + return SubmitResult::KnownExpired } priority - } + }, IndexQuery::Exists(priority) => { if !source.can_be_resubmitted() { - return SubmitResult::Known; + return SubmitResult::Known } priority - } + }, IndexQuery::Unknown => { // Validate. - let at_block = if let Some(Proof::OnChain{block_hash, ..}) = statement.proof() { + let at_block = if let Some(Proof::OnChain { block_hash, .. }) = statement.proof() { Some(block_hash.clone()) } else { None @@ -532,41 +634,51 @@ impl StatementStore for Store { match validation_result { Ok(ValidStatement { priority }) => priority, Err(InvalidStatement::BadProof) => { - log::debug!(target: LOG_TARGET, "Statement validation failed: BadProof, {:?}", statement); + log::debug!( + target: LOG_TARGET, + "Statement validation failed: BadProof, {:?}", + statement + ); self.metrics.report(|metrics| metrics.validations_invalid.inc()); return SubmitResult::Bad("Bad statement proof") }, - Err(InvalidStatement::NoProof) =>{ - log::debug!(target: LOG_TARGET, "Statement validation failed: NoProof, {:?}", statement); + Err(InvalidStatement::NoProof) => { + log::debug!( + target: LOG_TARGET, + "Statement validation failed: NoProof, {:?}", + statement + ); self.metrics.report(|metrics| metrics.validations_invalid.inc()); return SubmitResult::Bad("Missing statement proof") }, - Err(InvalidStatement::InternalError) => { - return SubmitResult::InternalError(Error::Runtime) - }, + Err(InvalidStatement::InternalError) => + return SubmitResult::InternalError(Error::Runtime), } - } + }, }; // Commit to the db prior to locking the index. let statement_with_meta = StatementWithMeta { - meta: StatementMeta { - priority, - timestamp: self.timestamp(), - }, + meta: StatementMeta { priority, timestamp: self.timestamp() }, statement, }; let mut commit = self.index.write().evict(); commit.push((col::STATEMENTS, hash.to_vec(), Some(statement_with_meta.encode()))); if let Err(e) = self.db.commit(commit) { - log::debug!(target: LOG_TARGET, "Statement validation failed: database error {}, {:?}", e, statement_with_meta.statement); - return SubmitResult::InternalError(Error::Db(e.to_string())); + log::debug!( + target: LOG_TARGET, + "Statement validation failed: database error {}, {:?}", + e, + statement_with_meta.statement + ); + return SubmitResult::InternalError(Error::Db(e.to_string())) } self.metrics.report(|metrics| metrics.submitted_statements.inc()); let mut index = self.index.write(); index.insert_with_meta(hash, statement_with_meta); - let network_priority = if priority > 0 { NetworkPriority::High } else { NetworkPriority::Low }; + let network_priority = + if priority > 0 { NetworkPriority::High } else { NetworkPriority::Low }; log::trace!(target: LOG_TARGET, "Statement submitted: {:?}", HexDisplay::from(&hash)); SubmitResult::New(network_priority) } @@ -576,21 +688,26 @@ impl StatementStore for Store { match Statement::decode(&mut statement) { Ok(decoded) => self.submit(decoded, source), Err(e) => { - log::debug!(target: LOG_TARGET, "Error decoding submitted statement. Failed with: {}", e); + log::debug!( + target: LOG_TARGET, + "Error decoding submitted statement. Failed with: {}", + e + ); SubmitResult::Bad("Bad SCALE encoding") - } + }, } } } #[cfg(test)] mod tests { - use sp_statement_store::runtime_api::{ValidateStatement, ValidStatement, InvalidStatement}; - use sp_statement_store::{Statement, Topic, - SignatureVerificationResult, Proof, StatementStore, StatementSource, NetworkPriority, SubmitResult, - }; use crate::Store; use sp_core::Pair; + use sp_statement_store::{ + runtime_api::{InvalidStatement, ValidStatement, ValidateStatement}, + NetworkPriority, Proof, SignatureVerificationResult, Statement, StatementSource, + StatementStore, SubmitResult, Topic, + }; type Extrinsic = sp_runtime::OpaqueExtrinsic; type Hash = sp_core::H256; @@ -658,7 +775,7 @@ mod tests { fn status(&self, _hash: Hash) -> sp_blockchain::Result { unimplemented!() } - fn number( &self, _hash: Hash) -> sp_blockchain::Result> { + fn number(&self, _hash: Hash) -> sp_blockchain::Result> { unimplemented!() } fn hash(&self, _number: BlockNumber) -> sp_blockchain::Result> { @@ -668,9 +785,7 @@ mod tests { fn test_store() -> (Store, tempfile::TempDir) { let _ = env_logger::try_init(); - let temp_dir = tempfile::Builder::new() - .tempdir() - .expect("Error creating test dir"); + let temp_dir = tempfile::Builder::new().tempdir().expect("Error creating test dir"); let client = std::sync::Arc::new(TestClient); let mut path: std::path::PathBuf = temp_dir.path().into(); @@ -686,7 +801,7 @@ mod tests { fn signed_statement_with_topics(data: u8, topics: &[Topic]) -> Statement { let mut statement = Statement::new(); statement.set_plain_data(vec![data]); - for i in 0 .. topics.len() { + for i in 0..topics.len() { statement.set_topic(i, topics[i].clone()); } let kp = sp_core::ed25519::Pair::from_string("//Alice", None).unwrap(); @@ -697,7 +812,7 @@ mod tests { fn onchain_statement_with_topics(data: u8, topics: &[Topic]) -> Statement { let mut statement = Statement::new(); statement.set_plain_data(vec![data]); - for i in 0 .. topics.len() { + for i in 0..topics.len() { statement.set_topic(i, topics[i].clone()); } statement.set_proof(Proof::OnChain { @@ -718,9 +833,15 @@ mod tests { fn submit_one() { let (store, _temp) = test_store(); let statement0 = signed_statement(0); - assert_eq!(store.submit(statement0, StatementSource::Network), SubmitResult::New(NetworkPriority::High)); + assert_eq!( + store.submit(statement0, StatementSource::Network), + SubmitResult::New(NetworkPriority::High) + ); let unsigned = Statement::new(); - assert_eq!(store.submit(unsigned, StatementSource::Network), SubmitResult::New(NetworkPriority::Low)); + assert_eq!( + store.submit(unsigned, StatementSource::Network), + SubmitResult::New(NetworkPriority::Low) + ); } #[test] @@ -729,9 +850,18 @@ mod tests { let statement0 = signed_statement(0); let statement1 = signed_statement(1); let statement2 = signed_statement(2); - assert_eq!(store.submit(statement0.clone(), StatementSource::Network), SubmitResult::New(NetworkPriority::High)); - assert_eq!(store.submit(statement1.clone(), StatementSource::Network), SubmitResult::New(NetworkPriority::High)); - assert_eq!(store.submit(statement2.clone(), StatementSource::Network), SubmitResult::New(NetworkPriority::High)); + assert_eq!( + store.submit(statement0.clone(), StatementSource::Network), + SubmitResult::New(NetworkPriority::High) + ); + assert_eq!( + store.submit(statement1.clone(), StatementSource::Network), + SubmitResult::New(NetworkPriority::High) + ); + assert_eq!( + store.submit(statement2.clone(), StatementSource::Network), + SubmitResult::New(NetworkPriority::High) + ); assert_eq!(store.dump().unwrap().len(), 3); assert_eq!(store.broadcasts(&[]).unwrap().len(), 3); assert_eq!(store.statement(&statement1.hash()).unwrap(), Some(statement1.clone())); @@ -753,7 +883,8 @@ mod tests { let statement1 = signed_statement_with_topics(1, &[topic(0)]); let statement2 = signed_statement_with_topics(2, &[topic(0), topic(1)]); let statement3 = signed_statement_with_topics(3, &[topic(0), topic(1), topic(2)]); - let statement4 = signed_statement_with_topics(4, &[topic(0), topic(42), topic(2), topic(3)]); + let statement4 = + signed_statement_with_topics(4, &[topic(0), topic(42), topic(2), topic(3)]); let statements = vec![statement0, statement1, statement2, statement3, statement4]; for s in &statements { store.submit(s.clone(), StatementSource::Network); @@ -761,31 +892,32 @@ mod tests { let assert_topics = |topics: &[u64], expected: &[u8]| { let topics: Vec<_> = topics.iter().map(|t| topic(*t)).collect(); - let mut got_vals: Vec<_> = store.broadcasts(&topics).unwrap().into_iter().map(|d| d[0]).collect(); + let mut got_vals: Vec<_> = + store.broadcasts(&topics).unwrap().into_iter().map(|d| d[0]).collect(); got_vals.sort(); assert_eq!(expected.to_vec(), got_vals); }; - assert_topics(&[], &[0,1,2,3,4]); - assert_topics(&[0], &[1,2,3,4]); - assert_topics(&[1], &[2,3]); - assert_topics(&[2], &[3,4]); + assert_topics(&[], &[0, 1, 2, 3, 4]); + assert_topics(&[0], &[1, 2, 3, 4]); + assert_topics(&[1], &[2, 3]); + assert_topics(&[2], &[3, 4]); assert_topics(&[3], &[4]); assert_topics(&[42], &[4]); - assert_topics(&[0,1], &[2, 3]); - assert_topics(&[1,2], &[3]); + assert_topics(&[0, 1], &[2, 3]); + assert_topics(&[1, 2], &[3]); } #[test] fn maintenance() { - use super::{MAX_LIVE_STATEMENTS, EXPIRE_AFTER, PURGE_AFTER}; + use super::{EXPIRE_AFTER, MAX_LIVE_STATEMENTS, PURGE_AFTER}; // Check test assumptions assert!((MAX_LIVE_STATEMENTS as u64) < EXPIRE_AFTER); // first 10 statements are high priority, the rest is low. let (mut store, _temp) = test_store(); - for time in 0 .. MAX_LIVE_STATEMENTS as u64 { + for time in 0..MAX_LIVE_STATEMENTS as u64 { store.set_time(time); let statement = if time < 10 { signed_statement_with_topics(0, &[topic(time)]) @@ -803,14 +935,19 @@ mod tests { let first_to_be_evicted = onchain_statement_with_topics(0, &[topic(10)]); assert_eq!(store.index.read().entries.len(), MAX_LIVE_STATEMENTS); - assert_eq!(store.statement(&first_to_be_evicted.hash()).unwrap().unwrap(), first_to_be_evicted); + assert_eq!( + store.statement(&first_to_be_evicted.hash()).unwrap().unwrap(), + first_to_be_evicted + ); // Check that the new statement replaces the old. - store.submit(signed_statement_with_topics(0, &[topic(MAX_LIVE_STATEMENTS as u64 + 1)]) , StatementSource::Network); + store.submit( + signed_statement_with_topics(0, &[topic(MAX_LIVE_STATEMENTS as u64 + 1)]), + StatementSource::Network, + ); assert_eq!(store.statement(&first_to_be_evicted.hash()).unwrap(), None); - - store.set_time(EXPIRE_AFTER + (MAX_LIVE_STATEMENTS as u64)/ 2); + store.set_time(EXPIRE_AFTER + (MAX_LIVE_STATEMENTS as u64) / 2); store.maintain(); // Half statements should be expired. assert_eq!(store.index.read().entries.len(), MAX_LIVE_STATEMENTS / 2); @@ -819,7 +956,7 @@ mod tests { // The high-priority statement should survive. assert_eq!(store.statement(&first.hash()).unwrap().unwrap(), first); - store.set_time(PURGE_AFTER + (MAX_LIVE_STATEMENTS as u64)/ 2); + store.set_time(PURGE_AFTER + (MAX_LIVE_STATEMENTS as u64) / 2); store.maintain(); assert_eq!(store.index.read().entries.len(), 0); assert_eq!(store.index.read().expired.len(), MAX_LIVE_STATEMENTS / 2); diff --git a/client/statement-store/src/metrics.rs b/client/statement-store/src/metrics.rs index db24e1cd4a7f0..cf191b79757ed 100644 --- a/client/statement-store/src/metrics.rs +++ b/client/statement-store/src/metrics.rs @@ -77,4 +77,3 @@ impl Metrics { }) } } - diff --git a/frame/statement/src/lib.rs b/frame/statement/src/lib.rs index b1af1a088f23f..be26c6a86c367 100644 --- a/frame/statement/src/lib.rs +++ b/frame/statement/src/lib.rs @@ -22,18 +22,20 @@ //! ## Overview //! //! The Statement pallet provides means to create and validate statements for the statement store. -//! #![cfg_attr(not(feature = "std"), no_std)] //use codec::{Decode, Encode, MaxEncodedLen}; -use sp_statement_store::{Proof, Statement, SignatureVerificationResult}; -use sp_statement_store::runtime_api::{StatementSource, ValidStatement, InvalidStatement}; -use frame_support::sp_tracing::{enter_span, Level}; -use frame_support::sp_runtime::traits::Zero; -use frame_support::sp_runtime::SaturatedConversion; -use frame_support::traits::Currency; -use frame_support::pallet_prelude::*; +use frame_support::{ + pallet_prelude::*, + sp_runtime::{traits::Zero, SaturatedConversion}, + sp_tracing::{enter_span, Level}, + traits::Currency, +}; +use sp_statement_store::{ + runtime_api::{InvalidStatement, StatementSource, ValidStatement}, + Proof, SignatureVerificationResult, Statement, +}; #[cfg(test)] mod mock; @@ -54,8 +56,7 @@ pub mod pallet { ::AccountId: From<[u8; 32]>, { /// The overarching event type. - type RuntimeEvent: From> - + IsType<::RuntimeEvent>; + type RuntimeEvent: From> + IsType<::RuntimeEvent>; /// Account balance. type Currency: Currency<::AccountId>; /// Min balance for priority statements. @@ -80,12 +81,11 @@ pub mod pallet { } impl Pallet - where - ::AccountId: From<[u8; 32]>, - [u8; 32]: From<::AccountId>, - ::RuntimeEvent: From>, +where + ::AccountId: From<[u8; 32]>, + [u8; 32]: From<::AccountId>, + ::RuntimeEvent: From>, { - /// Validate a statement against current state. This is supposed ti be called by the statement /// store on the host side. pub fn validate_statement( @@ -101,47 +101,50 @@ impl Pallet // block_hash and event_index should be checked by the host if frame_system::Pallet::::parent_hash().as_ref() != block_hash.as_slice() { log::debug!(target: LOG_TARGET, "Bad block hash."); - return Err(InvalidStatement::BadProof); + return Err(InvalidStatement::BadProof) } let account: T::AccountId = who.clone().into(); match frame_system::Pallet::::event_no_consensus(*event_index as usize) { Some(e) => { - if e != (Event::NewStatement { account: account.clone(), statement: statement.strip_proof() }).into() { + if e != (Event::NewStatement { + account: account.clone(), + statement: statement.strip_proof(), + }) + .into() + { log::debug!(target: LOG_TARGET, "Event mismatch"); - return Err(InvalidStatement::BadProof); + return Err(InvalidStatement::BadProof) } }, _ => { log::debug!(target: LOG_TARGET, "Bad event index"); - return Err(InvalidStatement::BadProof); - } + return Err(InvalidStatement::BadProof) + }, } account - } + }, _ => match statement.verify_signature() { SignatureVerificationResult::Valid(account) => account.into(), SignatureVerificationResult::Invalid => { log::debug!(target: LOG_TARGET, "Bad statement signature."); - return Err(InvalidStatement::BadProof); + return Err(InvalidStatement::BadProof) }, SignatureVerificationResult::NoSignature => { log::debug!(target: LOG_TARGET, "Missing statement signature."); - return Err(InvalidStatement::NoProof); - } - } + return Err(InvalidStatement::NoProof) + }, + }, }; let priority_cost = T::PriorityBalance::get(); let priority: u64 = if priority_cost.is_zero() { - 0 + 0 } else { let balance = T::Currency::free_balance(&account); let priority = balance / priority_cost; priority.saturated_into() }; - Ok(ValidStatement { - priority, - }) + Ok(ValidStatement { priority }) } pub fn submit_statement(account: T::AccountId, statement: Statement) { diff --git a/frame/statement/src/mock.rs b/frame/statement/src/mock.rs index 438237be0eb05..fced6af03f073 100644 --- a/frame/statement/src/mock.rs +++ b/frame/statement/src/mock.rs @@ -27,9 +27,9 @@ use frame_support::{ }; use sp_core::{Pair, H256}; use sp_runtime::{ - AccountId32, testing::Header, traits::{BlakeTwo256, IdentityLookup}, + AccountId32, }; type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; @@ -103,11 +103,11 @@ impl Config for Test { pub fn new_test_ext() -> sp_io::TestExternalities { let mut t = frame_system::GenesisConfig::default().build_storage::().unwrap(); let balances = pallet_balances::GenesisConfig:: { - balances: vec![ - (sp_core::sr25519::Pair::from_string("//Alice", None).unwrap().public().into(), 200) - ], + balances: vec![( + sp_core::sr25519::Pair::from_string("//Alice", None).unwrap().public().into(), + 200, + )], }; balances.assimilate_storage(&mut t).unwrap(); t.into() } - diff --git a/frame/statement/src/tests.rs b/frame/statement/src/tests.rs index 58bb0def1dc53..4d2a22e247feb 100644 --- a/frame/statement/src/tests.rs +++ b/frame/statement/src/tests.rs @@ -21,10 +21,12 @@ use super::*; use crate::mock::*; -use sp_statement_store::runtime_api::{ValidStatement, InvalidStatement, StatementSource}; -use sp_statement_store::{Proof, Statement}; use sp_core::Pair; use sp_runtime::AccountId32; +use sp_statement_store::{ + runtime_api::{InvalidStatement, StatementSource, ValidStatement}, + Proof, Statement, +}; #[test] fn sign_and_validate_no_balance() { @@ -72,7 +74,10 @@ fn validate_no_proof_fails() { #[test] fn validate_bad_signature_fails() { new_test_ext().execute_with(|| { - let statement = Statement::new_with_proof(Proof::Sr25519 { signature: [0u8; 64], signer: Default::default() }); + let statement = Statement::new_with_proof(Proof::Sr25519 { + signature: [0u8; 64], + signer: Default::default(), + }); let result = Pallet::::validate_statement(StatementSource::Chain, statement); assert_eq!(Err(InvalidStatement::BadProof), result); }); @@ -88,17 +93,29 @@ fn validate_event() { let pair = sp_core::sr25519::Pair::from_string("//Alice", None).unwrap(); let account: AccountId32 = pair.public().into(); Pallet::::submit_statement(account.clone(), statement.clone()); - statement.set_proof(Proof::OnChain { who: account.clone().into(), event_index: 0, block_hash: parent_hash.into() }); + statement.set_proof(Proof::OnChain { + who: account.clone().into(), + event_index: 0, + block_hash: parent_hash.into(), + }); let result = Pallet::::validate_statement(StatementSource::Chain, statement.clone()); assert_eq!(Ok(ValidStatement { priority: 20 }), result); // Use wrong event index - statement.set_proof(Proof::OnChain { who: account.clone().into(), event_index: 1, block_hash: parent_hash.into() }); + statement.set_proof(Proof::OnChain { + who: account.clone().into(), + event_index: 1, + block_hash: parent_hash.into(), + }); let result = Pallet::::validate_statement(StatementSource::Chain, statement.clone()); assert_eq!(Err(InvalidStatement::BadProof), result); // Use wrong block hash - statement.set_proof(Proof::OnChain { who: account.clone().into(), event_index: 0, block_hash: sp_core::H256::random().into() }); + statement.set_proof(Proof::OnChain { + who: account.clone().into(), + event_index: 0, + block_hash: sp_core::H256::random().into(), + }); let result = Pallet::::validate_statement(StatementSource::Chain, statement.clone()); assert_eq!(Err(InvalidStatement::BadProof), result); }); @@ -113,7 +130,11 @@ fn validate_no_event_fails() { let mut statement = Statement::new(); let pair = sp_core::sr25519::Pair::from_string("//Alice", None).unwrap(); let account: AccountId32 = pair.public().into(); - statement.set_proof(Proof::OnChain { who: account.into(), event_index: 0, block_hash: parent_hash.into() }); + statement.set_proof(Proof::OnChain { + who: account.into(), + event_index: 0, + block_hash: parent_hash.into(), + }); let result = Pallet::::validate_statement(StatementSource::Chain, statement); assert_eq!(Err(InvalidStatement::BadProof), result); }); diff --git a/primitives/statement-store/src/lib.rs b/primitives/statement-store/src/lib.rs index 3974dec529503..8cafd23b974ed 100644 --- a/primitives/statement-store/src/lib.rs +++ b/primitives/statement-store/src/lib.rs @@ -20,12 +20,12 @@ //! A crate which contains statement-store primitives. -use sp_std::vec::Vec; use codec::{Decode, Encode}; use scale_info::TypeInfo; use sp_application_crypto::RuntimeAppPublic; #[cfg(feature = "std")] use sp_core::Pair; +use sp_std::vec::Vec; /// Statement topic. pub type Topic = [u8; 32]; @@ -37,7 +37,9 @@ pub type Hash = [u8; 32]; pub type BlockHash = [u8; 32]; #[cfg(feature = "std")] -pub use store_api::{StatementStore, SubmitResult, Error, Result, NetworkPriority, StatementSource}; +pub use store_api::{ + Error, NetworkPriority, Result, StatementSource, StatementStore, SubmitResult, +}; pub mod runtime_api; #[cfg(feature = "std")] @@ -45,7 +47,7 @@ mod store_api; mod sr25519 { mod app_sr25519 { - use sp_application_crypto::{app_crypto, sr25519, key_types::STATEMENT}; + use sp_application_crypto::{app_crypto, key_types::STATEMENT, sr25519}; app_crypto!(sr25519, STATEMENT); } pub type Public = app_sr25519::Public; @@ -81,21 +83,21 @@ pub enum Proof { /// Signature. signature: [u8; 64], /// Public key. - signer: [u8; 32] + signer: [u8; 32], }, /// Ed25519 Signature. Ed25519 { /// Signature. signature: [u8; 64], /// Public key. - signer: [u8; 32] + signer: [u8; 32], }, /// Secp256k1 Signature. Secp256k1Ecdsa { /// Signature. signature: [u8; 65], /// Public key. - signer: [u8; 33] + signer: [u8; 33], }, /// On-chain event proof. OnChain { @@ -104,7 +106,7 @@ pub enum Proof { /// Hash of block that contains the event. block_hash: BlockHash, /// Index of the event in the event list. - event_index: u64 + event_index: u64, }, } @@ -145,7 +147,7 @@ impl Decode for Statement { // will be a prefix of vector length. let num_fields: codec::Compact = Decode::decode(input)?; let mut statement = Statement::new(); - for _ in 0 .. num_fields.into() { + for _ in 0..num_fields.into() { let field: Field = Decode::decode(input)?; match field { Field::AuthenticityProof(p) => statement.set_proof(p), @@ -210,10 +212,8 @@ impl Statement { #[cfg(feature = "std")] pub fn sign_sr25519_private(&mut self, key: &sp_core::sr25519::Pair) { let to_sign = self.signature_material(); - let proof = Proof::Sr25519 { - signature: key.sign(&to_sign).into(), - signer: key.public().into(), - }; + let proof = + Proof::Sr25519 { signature: key.sign(&to_sign).into(), signer: key.public().into() }; self.set_proof(proof); } @@ -236,10 +236,8 @@ impl Statement { #[cfg(feature = "std")] pub fn sign_ed25519_private(&mut self, key: &sp_core::ed25519::Pair) { let to_sign = self.signature_material(); - let proof = Proof::Ed25519 { - signature: key.sign(&to_sign).into(), - signer: key.public().into(), - }; + let proof = + Proof::Ed25519 { signature: key.sign(&to_sign).into(), signer: key.public().into() }; self.set_proof(proof); } @@ -262,10 +260,8 @@ impl Statement { #[cfg(feature = "std")] pub fn sign_ecdsa_private(&mut self, key: &sp_core::ecdsa::Pair) { let to_sign = self.signature_material(); - let proof = Proof::Secp256k1Ecdsa { - signature: key.sign(&to_sign).into(), - signer: key.public().0, - }; + let proof = + Proof::Secp256k1Ecdsa { signature: key.sign(&to_sign).into(), signer: key.public().0 }; self.set_proof(proof); } @@ -274,12 +270,10 @@ impl Statement { use sp_runtime::traits::Verify; match self.proof() { - Some(Proof::OnChain{..}) | None => { - SignatureVerificationResult::NoSignature - }, + Some(Proof::OnChain { .. }) | None => SignatureVerificationResult::NoSignature, Some(Proof::Sr25519 { signature, signer }) => { let to_sign = self.signature_material(); - let signature = sp_core::sr25519::Signature(*signature); + let signature = sp_core::sr25519::Signature(*signature); let public = sp_core::sr25519::Public(*signer); if signature.verify(to_sign.as_slice(), &public) { SignatureVerificationResult::Valid(signer.clone()) @@ -289,7 +283,7 @@ impl Statement { }, Some(Proof::Ed25519 { signature, signer }) => { let to_sign = self.signature_material(); - let signature = sp_core::ed25519::Signature(*signature); + let signature = sp_core::ed25519::Signature(*signature); let public = sp_core::ed25519::Public(*signer); if signature.verify(to_sign.as_slice(), &public) { SignatureVerificationResult::Valid(signer.clone()) @@ -299,14 +293,14 @@ impl Statement { }, Some(Proof::Secp256k1Ecdsa { signature, signer }) => { let to_sign = self.signature_material(); - let signature = sp_core::ecdsa::Signature(*signature); + let signature = sp_core::ecdsa::Signature(*signature); let public = sp_core::ecdsa::Public(*signer); if signature.verify(to_sign.as_slice(), &public) { SignatureVerificationResult::Valid(sp_io::hashing::blake2_256(signer)) } else { SignatureVerificationResult::Invalid } - } + }, } } @@ -382,8 +376,7 @@ impl Statement { fn encoded(&self, with_proof: bool) -> Vec { // Encoding matches that of Vec. Basically this just means accepting that there // will be a prefix of vector length. - let num_fields = - if with_proof && self.proof.is_some() { 1 } else { 0 } + + let num_fields = if with_proof && self.proof.is_some() { 1 } else { 0 } + if self.decryption_key.is_some() { 1 } else { 0 } + if self.data.is_some() { 1 } else { 0 } + self.num_topics as u32; @@ -402,7 +395,7 @@ impl Statement { 1u8.encode_to(&mut output); decryption_key.encode_to(&mut output); } - for t in 0 .. self.num_topics { + for t in 0..self.num_topics { (2u8 + t).encode_to(&mut output); self.topics[t as usize].encode_to(&mut output); } @@ -416,24 +409,20 @@ impl Statement { #[cfg(test)] mod test { - use crate::{Statement, Proof, Field, SignatureVerificationResult, hash_encoded}; - use codec::{Encode, Decode}; + use crate::{hash_encoded, Field, Proof, SignatureVerificationResult, Statement}; + use codec::{Decode, Encode}; use sp_application_crypto::Pair; #[test] fn statement_encoding_matches_vec() { let mut statement = Statement::new(); assert!(statement.proof().is_none()); - let proof = Proof::OnChain { - who: [42u8; 32], - block_hash: [24u8; 32], - event_index: 66, - }; + let proof = Proof::OnChain { who: [42u8; 32], block_hash: [24u8; 32], event_index: 66 }; let decryption_key = [0xde; 32]; let topic1 = [0x01; 32]; let topic2 = [0x02; 32]; - let data = vec![55,99]; + let data = vec![55, 99]; statement.set_proof(proof.clone()); statement.set_decryption_key(decryption_key.clone()); @@ -467,23 +456,30 @@ mod test { let secp256k1_kp = sp_core::ecdsa::Pair::from_string("//Alice", None).unwrap(); statement.sign_sr25519_private(&sr25519_kp); - assert_eq!(statement.verify_signature(), SignatureVerificationResult::Valid(sr25519_kp.public().0)); + assert_eq!( + statement.verify_signature(), + SignatureVerificationResult::Valid(sr25519_kp.public().0) + ); statement.sign_ed25519_private(&ed25519_kp); - assert_eq!(statement.verify_signature(), SignatureVerificationResult::Valid(ed25519_kp.public().0)); + assert_eq!( + statement.verify_signature(), + SignatureVerificationResult::Valid(ed25519_kp.public().0) + ); statement.sign_ecdsa_private(&secp256k1_kp); - assert_eq!(statement.verify_signature(), SignatureVerificationResult::Valid(sp_core::hashing::blake2_256(&secp256k1_kp.public().0))); + assert_eq!( + statement.verify_signature(), + SignatureVerificationResult::Valid(sp_core::hashing::blake2_256( + &secp256k1_kp.public().0 + )) + ); // set an invalid signature - statement.set_proof(Proof::Sr25519 { - signature: [0u8; 64], - signer: [0u8; 32], - }); + statement.set_proof(Proof::Sr25519 { signature: [0u8; 64], signer: [0u8; 32] }); assert_eq!(statement.verify_signature(), SignatureVerificationResult::Invalid); statement = statement.strip_proof(); assert_eq!(statement.verify_signature(), SignatureVerificationResult::NoSignature); } } - diff --git a/primitives/statement-store/src/runtime_api.rs b/primitives/statement-store/src/runtime_api.rs index 6b4f2c6a73ed7..875c6b38c34cf 100644 --- a/primitives/statement-store/src/runtime_api.rs +++ b/primitives/statement-store/src/runtime_api.rs @@ -17,10 +17,10 @@ //! Runtime support for the statement store. +use crate::Statement; use codec::{Decode, Encode}; use scale_info::TypeInfo; use sp_runtime::RuntimeDebug; -use crate::Statement; /// Information concerning a valid statement. #[derive(Clone, PartialEq, Eq, Encode, Decode, RuntimeDebug, TypeInfo)] @@ -75,4 +75,3 @@ sp_api::decl_runtime_apis! { ) -> Result; } } - diff --git a/primitives/statement-store/src/store_api.rs b/primitives/statement-store/src/store_api.rs index ccb5522e0f69d..c038b2adb9b1e 100644 --- a/primitives/statement-store/src/store_api.rs +++ b/primitives/statement-store/src/store_api.rs @@ -15,8 +15,8 @@ // See the License for the specific language governing permissions and // limitations under the License. -use crate::{Statement, Topic, Hash}; pub use crate::runtime_api::StatementSource; +use crate::{Hash, Statement, Topic}; /// Statement store error. #[derive(Debug, Eq, PartialEq, thiserror::Error)] @@ -70,13 +70,17 @@ pub trait StatementStore: Send + Sync { /// Get statement by hash. fn statement(&self, hash: &Hash) -> Result>; - /// Return the data of all known statements which include all topics and have no `DecryptionKey` field. + /// Return the data of all known statements which include all topics and have no `DecryptionKey` + /// field. fn broadcasts(&self, match_all_topics: &[Topic]) -> Result>>; - /// Return the data of all known statements whose decryption key is identified as `dest` (this will generally be the public key or a hash thereof for symmetric ciphers, or a hash of the private key for symmetric ciphers). + /// Return the data of all known statements whose decryption key is identified as `dest` (this + /// will generally be the public key or a hash thereof for symmetric ciphers, or a hash of the + /// private key for symmetric ciphers). fn posted(&self, match_all_topics: &[Topic], dest: [u8; 32]) -> Result>>; - /// Return the decrypted data of all known statements whose decryption key is identified as `dest`. The key must be available to the client. + /// Return the decrypted data of all known statements whose decryption key is identified as + /// `dest`. The key must be available to the client. fn posted_clear(&self, match_all_topics: &[Topic], dest: [u8; 32]) -> Result>>; /// Submit a statement. From 8263aa407ca1957c40aa965b226689b04827b9dc Mon Sep 17 00:00:00 2001 From: arkpar Date: Thu, 23 Mar 2023 12:54:54 +0100 Subject: [PATCH 13/78] Build fix --- bin/node-template/node/src/service.rs | 4 ++-- bin/node/cli/src/service.rs | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/bin/node-template/node/src/service.rs b/bin/node-template/node/src/service.rs index 8459dc88eedc8..71069d0e6d50d 100644 --- a/bin/node-template/node/src/service.rs +++ b/bin/node-template/node/src/service.rs @@ -130,11 +130,11 @@ pub fn new_partial( compatibility_mode: Default::default(), })?; - let statement_store = sc_statement_store::Store::new( + let statement_store = Arc::new(sc_statement_store::Store::new( config.database.path().unwrap(), client.clone(), config.prometheus_registry(), - )?; + )?); Ok(sc_service::PartialComponents { client, diff --git a/bin/node/cli/src/service.rs b/bin/node/cli/src/service.rs index ef704597cce4f..04f7eab33d5b1 100644 --- a/bin/node/cli/src/service.rs +++ b/bin/node/cli/src/service.rs @@ -282,11 +282,11 @@ pub fn new_partial( (rpc_extensions_builder, shared_voter_state2) }; - let statement_store = sc_statement_store::Store::new( + let statement_store = Arc::new(sc_statement_store::Store::new( config.database.path().unwrap(), client.clone(), config.prometheus_registry(), - )?; + )?); Ok(sc_service::PartialComponents { client, From 7779458a4c50bf3057fe35aeff46b6975c65c69a Mon Sep 17 00:00:00 2001 From: arkpar Date: Thu, 23 Mar 2023 18:59:11 +0100 Subject: [PATCH 14/78] OCW Api --- Cargo.lock | 5 +- bin/node-template/node/src/service.rs | 4 +- bin/node/cli/src/service.rs | 4 +- client/api/Cargo.toml | 1 + client/api/src/execution_extensions.rs | 16 +++- client/statement-store/Cargo.toml | 4 +- client/statement-store/src/lib.rs | 19 +++- primitives/core/src/offchain/mod.rs | 12 +-- primitives/statement-store/Cargo.toml | 5 +- primitives/statement-store/src/lib.rs | 10 +- primitives/statement-store/src/runtime_api.rs | 95 ++++++++++++++++++- 11 files changed, 148 insertions(+), 27 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 6822f654dc22d..88368b1e4d5ad 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8391,6 +8391,7 @@ dependencies = [ "sp-keystore", "sp-runtime", "sp-state-machine", + "sp-statement-store", "sp-storage", "sp-test-primitives", "substrate-prometheus-endpoint", @@ -9479,6 +9480,7 @@ dependencies = [ "parity-db", "parity-scale-codec", "parking_lot 0.12.1", + "sc-client-api", "sp-api", "sp-blockchain", "sp-core", @@ -10795,8 +10797,9 @@ dependencies = [ "sp-api", "sp-application-crypto", "sp-core", - "sp-io", + "sp-externalities", "sp-runtime", + "sp-runtime-interface", "sp-std", "thiserror", ] diff --git a/bin/node-template/node/src/service.rs b/bin/node-template/node/src/service.rs index 71069d0e6d50d..12b2f1a7cd9fe 100644 --- a/bin/node-template/node/src/service.rs +++ b/bin/node-template/node/src/service.rs @@ -130,11 +130,11 @@ pub fn new_partial( compatibility_mode: Default::default(), })?; - let statement_store = Arc::new(sc_statement_store::Store::new( + let statement_store = sc_statement_store::Store::new_shared( config.database.path().unwrap(), client.clone(), config.prometheus_registry(), - )?); + )?; Ok(sc_service::PartialComponents { client, diff --git a/bin/node/cli/src/service.rs b/bin/node/cli/src/service.rs index 04f7eab33d5b1..14a31e4e8e2e7 100644 --- a/bin/node/cli/src/service.rs +++ b/bin/node/cli/src/service.rs @@ -282,11 +282,11 @@ pub fn new_partial( (rpc_extensions_builder, shared_voter_state2) }; - let statement_store = Arc::new(sc_statement_store::Store::new( + let statement_store = sc_statement_store::Store::new_shared( config.database.path().unwrap(), client.clone(), config.prometheus_registry(), - )?); + )?; Ok(sc_service::PartialComponents { client, diff --git a/client/api/Cargo.toml b/client/api/Cargo.toml index f494200852729..02f4292aac594 100644 --- a/client/api/Cargo.toml +++ b/client/api/Cargo.toml @@ -34,6 +34,7 @@ sp-externalities = { version = "0.13.0", path = "../../primitives/externalities" sp-keystore = { version = "0.13.0", default-features = false, path = "../../primitives/keystore" } sp-runtime = { version = "7.0.0", default-features = false, path = "../../primitives/runtime" } sp-state-machine = { version = "0.13.0", path = "../../primitives/state-machine" } +sp-statement-store = { version = "4.0.0-dev", path = "../../primitives/statement-store" } sp-storage = { version = "7.0.0", path = "../../primitives/storage" } [dev-dependencies] diff --git a/client/api/src/execution_extensions.rs b/client/api/src/execution_extensions.rs index ffa670f7bc628..d6b6848486877 100644 --- a/client/api/src/execution_extensions.rs +++ b/client/api/src/execution_extensions.rs @@ -165,7 +165,7 @@ pub struct ExecutionExtensions { strategies: ExecutionStrategies, keystore: Option, offchain_db: Option>, - // FIXME: these two are only RwLock because of https://github.com/paritytech/substrate/issues/4587 + // FIXME: these three are only RwLock because of https://github.com/paritytech/substrate/issues/4587 // remove when fixed. // To break retain cycle between `Client` and `TransactionPool` we require this // extension to be a `Weak` reference. @@ -173,6 +173,7 @@ pub struct ExecutionExtensions { // during initialization. transaction_pool: RwLock>>>, extensions_factory: RwLock>>, + statement_store: RwLock>>, } impl Default for ExecutionExtensions { @@ -183,6 +184,7 @@ impl Default for ExecutionExtensions { offchain_db: None, transaction_pool: RwLock::new(None), extensions_factory: RwLock::new(Box::new(())), + statement_store: RwLock::new(None), } } } @@ -195,6 +197,7 @@ impl ExecutionExtensions { offchain_db: Option>, ) -> Self { let transaction_pool = RwLock::new(None); + let statement_store = RwLock::new(None); let extensions_factory = Box::new(()); Self { strategies, @@ -202,6 +205,7 @@ impl ExecutionExtensions { offchain_db, extensions_factory: RwLock::new(extensions_factory), transaction_pool, + statement_store, } } @@ -223,6 +227,11 @@ impl ExecutionExtensions { *self.transaction_pool.write() = Some(Arc::downgrade(pool) as _); } + /// Register statement store extension. + pub fn register_statement_store(&self, store: Arc) { + *self.statement_store.write() = Some(Arc::downgrade(&store) as _); + } + /// Based on the execution context and capabilities it produces /// the extensions object to support desired set of APIs. pub fn extensions( @@ -253,6 +262,11 @@ impl ExecutionExtensions { } } + if capabilities.contains(offchain::Capabilities::STATEMENT_STORE) { + if let Some(store) = self.statement_store.read().as_ref().and_then(|x| x.upgrade()) { + extensions.register(sp_statement_store::runtime_api::StatementStoreExt(store)); + } + } if capabilities.contains(offchain::Capabilities::OFFCHAIN_DB_READ) || capabilities.contains(offchain::Capabilities::OFFCHAIN_DB_WRITE) { diff --git a/client/statement-store/Cargo.toml b/client/statement-store/Cargo.toml index 3bc285555ed3c..d259456df125c 100644 --- a/client/statement-store/Cargo.toml +++ b/client/statement-store/Cargo.toml @@ -27,11 +27,9 @@ sp-blockchain = { version = "4.0.0-dev", path = "../../primitives/blockchain" } sp-core = { version = "7.0.0", path = "../../primitives/core" } sp-runtime = { version = "7.0.0", path = "../../primitives/runtime" } sp-tracing = { version = "6.0.0", path = "../../primitives/tracing" } +sc-client-api = { version = "4.0.0-dev", path = "../api" } [dev-dependencies] tempfile = "3.1.0" env_logger = "0.9" -#substrate-test-runtime = { version = "2.0.0", path = "../../test-utils/runtime" } -#substrate-test-runtime-client = { version = "2.0.0", path = "../../test-utils/runtime/client" } -#substrate-test-runtime-transaction-pool = { version = "2.0.0", path = "../../test-utils/runtime/transaction-pool" } diff --git a/client/statement-store/src/lib.rs b/client/statement-store/src/lib.rs index 336f6643f2808..e9d9309b07210 100644 --- a/client/statement-store/src/lib.rs +++ b/client/statement-store/src/lib.rs @@ -351,7 +351,24 @@ impl Index { impl Store { /// Create a new shared store instance. There should only be one per process. - pub fn new( + pub fn new_shared( + path: &std::path::Path, + client: Arc, + prometheus: Option<&PrometheusRegistry>, + ) -> Result> + where + Block: BlockT, + Block::Hash: From, + Client: ProvideRuntimeApi + HeaderBackend + sc_client_api::ExecutorProvider + Send + Sync + 'static, + Client::Api: ValidateStatement, + { + let store = Arc::new(Self::new(path, client.clone(), prometheus)?); + client.execution_extensions().register_statement_store(store.clone()); + Ok(store) + } + + /// Create a new instance. + fn new( path: &std::path::Path, client: Arc, prometheus: Option<&PrometheusRegistry>, diff --git a/primitives/core/src/offchain/mod.rs b/primitives/core/src/offchain/mod.rs index 5a77e19a3e522..a9e6639807023 100644 --- a/primitives/core/src/offchain/mod.rs +++ b/primitives/core/src/offchain/mod.rs @@ -278,16 +278,8 @@ bitflags::bitflags! { const NODE_AUTHORIZATION = 0b0000_1000_0000; /// Access time related functionality const TIME = 0b0001_0000_0000; - } -} - -impl Capabilities { - /// Return capabilities for rich offchain calls. - /// - /// Those calls should be allowed to sign and submit transactions - /// and access offchain workers database (but read only!). - pub fn rich_offchain_call() -> Self { - Capabilities::TRANSACTION_POOL | Capabilities::KEYSTORE | Capabilities::OFFCHAIN_DB_READ + /// Access the statement store. + const STATEMENT_STORE = 0b0010_0000_0000; } } diff --git a/primitives/statement-store/Cargo.toml b/primitives/statement-store/Cargo.toml index e84d227fe78c9..16708d95cdc9f 100644 --- a/primitives/statement-store/Cargo.toml +++ b/primitives/statement-store/Cargo.toml @@ -19,8 +19,9 @@ sp-core = { version = "7.0.0", default-features = false, path = "../core" } sp-runtime = { version = "7.0.0", default-features = false, path = "../runtime" } sp-std = { version = "5.0.0", default-features = false, path = "../std" } sp-api = { version = "4.0.0-dev", default-features = false, path = "../api" } -sp-io = { version = "7.0.0", default-features = false, path = "../io" } sp-application-crypto = { version = "7.0.0", default-features = false, path = "../application-crypto" } +sp-runtime-interface = { version = "7.0.0", default-features = false, path = "../runtime-interface" } +sp-externalities = { version = "0.13.0", default-features = false, path = "../externalities" } thiserror = {version = "1.0", optional = true } log = { version = "0.4.17", optional = true } @@ -31,9 +32,9 @@ std = [ "scale-info/std", "sp-core/std", "sp-runtime/std", + "sp-runtime-interface/std", "sp-std/std", "sp-api/std", - "sp-io/std", "sp-application-crypto/std", "thiserror", "log", diff --git a/primitives/statement-store/src/lib.rs b/primitives/statement-store/src/lib.rs index 8cafd23b974ed..f3480e6a204b1 100644 --- a/primitives/statement-store/src/lib.rs +++ b/primitives/statement-store/src/lib.rs @@ -23,6 +23,7 @@ use codec::{Decode, Encode}; use scale_info::TypeInfo; use sp_application_crypto::RuntimeAppPublic; +use sp_runtime_interface::pass_by::PassByCodec; #[cfg(feature = "std")] use sp_core::Pair; use sp_std::vec::Vec; @@ -76,7 +77,7 @@ pub fn hash_encoded(data: &[u8]) -> [u8; 32] { } /// Statement proof. -#[derive(Encode, Decode, TypeInfo, sp_runtime::RuntimeDebug, Clone, PartialEq, Eq)] +#[derive(Encode, Decode, TypeInfo, sp_core::RuntimeDebug, Clone, PartialEq, Eq)] pub enum Proof { /// Sr25519 Signature. Sr25519 { @@ -110,7 +111,7 @@ pub enum Proof { }, } -#[derive(Encode, Decode, TypeInfo, sp_runtime::RuntimeDebug, Clone, PartialEq, Eq)] +#[derive(Encode, Decode, TypeInfo, sp_core::RuntimeDebug, Clone, PartialEq, Eq)] /// Statement attributes. Each statement is a list of 0 or more fields. Fields may only appear in /// the order declared here. #[repr(u8)] @@ -131,7 +132,7 @@ pub enum Field { Data(Vec) = 6, } -#[derive(TypeInfo, sp_runtime::RuntimeDebug, Clone, PartialEq, Eq, Default)] +#[derive(TypeInfo, sp_core::RuntimeDebug, PassByCodec, Clone, PartialEq, Eq, Default)] /// Statement structure. pub struct Statement { proof: Option, @@ -296,7 +297,8 @@ impl Statement { let signature = sp_core::ecdsa::Signature(*signature); let public = sp_core::ecdsa::Public(*signer); if signature.verify(to_sign.as_slice(), &public) { - SignatureVerificationResult::Valid(sp_io::hashing::blake2_256(signer)) + let sender_hash = ::hash(signer); + SignatureVerificationResult::Valid(sender_hash.into()) } else { SignatureVerificationResult::Invalid } diff --git a/primitives/statement-store/src/runtime_api.rs b/primitives/statement-store/src/runtime_api.rs index 875c6b38c34cf..66b90c1eabbaa 100644 --- a/primitives/statement-store/src/runtime_api.rs +++ b/primitives/statement-store/src/runtime_api.rs @@ -17,10 +17,13 @@ //! Runtime support for the statement store. -use crate::Statement; +use crate::{Topic, Statement, Hash}; use codec::{Decode, Encode}; use scale_info::TypeInfo; use sp_runtime::RuntimeDebug; +use sp_runtime_interface::{runtime_interface, pass_by::PassByEnum}; +use sp_externalities::ExternalitiesExt; +use sp_std::vec::Vec; /// Information concerning a valid statement. #[derive(Clone, PartialEq, Eq, Encode, Decode, RuntimeDebug, TypeInfo)] @@ -75,3 +78,93 @@ sp_api::decl_runtime_apis! { ) -> Result; } } + +#[cfg(feature = "std")] +sp_externalities::decl_extension! { + /// The offchain database extension that will be registered at the Substrate externalities. + pub struct StatementStoreExt(std::sync::Arc); +} + +#[cfg(feature = "std")] +/// Host extensions for the runtime. +impl StatementStoreExt { + /// Create new instance of externalities extensions. + pub fn new(store: std::sync::Arc) -> Self { + Self(store) + } +} + +#[derive(Debug, Eq, PartialEq, Clone, Copy, Encode, Decode, PassByEnum)] +/// Submission result. +pub enum SubmitResult { + /// Accepted as new. + OkNew, + /// Known statement + OkKnown, + /// Statement failed validation. + Bad, + /// The store is not available. + NotAvailable, +} + +/// Host interface +#[runtime_interface] +pub trait Io { + /// Submit a new new statement. The statement will be broadcast to the network. + /// This is meant to be used by the offchain worker. + fn submit_statement(&mut self, statement: Statement) -> SubmitResult { + if let Some(StatementStoreExt(store)) = self.extension::() { + match store.submit(statement, StatementSource::Chain) { + crate::SubmitResult::New(_) => SubmitResult::OkNew, + crate::SubmitResult::Known => SubmitResult::OkKnown, + // This should not happen for `StatementSource::Chain`. An existing statement will be + // overwritten. + crate::SubmitResult::KnownExpired => SubmitResult::Bad, + crate::SubmitResult::Bad(_) => SubmitResult::Bad, + crate::SubmitResult::InternalError(_) => SubmitResult::Bad, + } + } else { + SubmitResult::NotAvailable + } + } + + /// Return all statements. + fn dump(&mut self) -> Vec<(Hash, Statement)> { + if let Some(StatementStoreExt(store)) = self.extension::() { + store.dump().unwrap_or_default() + } else { + Vec::default() + } + } + + /// Return the data of all known statements which include all topics and have no `DecryptionKey` + /// field. + fn broadcasts(&mut self, match_all_topics: &[Topic]) -> Vec> { + if let Some(StatementStoreExt(store)) = self.extension::() { + store.broadcasts(match_all_topics).unwrap_or_default() + } else { + Vec::default() + } + } + + /// Return the data of all known statements whose decryption key is identified as `dest` (this + /// will generally be the public key or a hash thereof for symmetric ciphers, or a hash of the + /// private key for symmetric ciphers). + fn posted(&mut self, match_all_topics: &[Topic], dest: [u8; 32]) -> Vec> { + if let Some(StatementStoreExt(store)) = self.extension::() { + store.posted(match_all_topics, dest).unwrap_or_default() + } else { + Vec::default() + } + } + + /// Return the decrypted data of all known statements whose decryption key is identified as + /// `dest`. The key must be available to the client. + fn posted_clear(&mut self, match_all_topics: &[Topic], dest: [u8; 32]) -> Vec> { + if let Some(StatementStoreExt(store)) = self.extension::() { + store.posted_clear(match_all_topics, dest).unwrap_or_default() + } else { + Vec::default() + } + } +} From f01160e89a8565ee1807968e820e19f4d97d510b Mon Sep 17 00:00:00 2001 From: arkpar Date: Thu, 23 Mar 2023 20:04:21 +0100 Subject: [PATCH 15/78] Offchain worker --- frame/statement/src/lib.rs | 77 +++++++++++++++++++++------ primitives/statement-store/src/lib.rs | 16 +++--- 2 files changed, 68 insertions(+), 25 deletions(-) diff --git a/frame/statement/src/lib.rs b/frame/statement/src/lib.rs index be26c6a86c367..3c944374a3ef8 100644 --- a/frame/statement/src/lib.rs +++ b/frame/statement/src/lib.rs @@ -25,13 +25,13 @@ #![cfg_attr(not(feature = "std"), no_std)] -//use codec::{Decode, Encode, MaxEncodedLen}; use frame_support::{ pallet_prelude::*, sp_runtime::{traits::Zero, SaturatedConversion}, sp_tracing::{enter_span, Level}, traits::Currency, }; +use frame_system::pallet_prelude::*; use sp_statement_store::{ runtime_api::{InvalidStatement, StatementSource, ValidStatement}, Proof, SignatureVerificationResult, Statement, @@ -50,10 +50,13 @@ const LOG_TARGET: &str = "runtime::statement"; pub mod pallet { use super::*; + pub type BalanceOf = + <::Currency as Currency<::AccountId>>::Balance; + #[pallet::config] pub trait Config: frame_system::Config where - ::AccountId: From<[u8; 32]>, + ::AccountId: From, { /// The overarching event type. type RuntimeEvent: From> + IsType<::RuntimeEvent>; @@ -71,26 +74,42 @@ pub mod pallet { #[pallet::generate_deposit(pub(super) fn deposit_event)] pub enum Event where - ::AccountId: From<[u8; 32]>, + ::AccountId: From, { /// A new statement is submitted NewStatement { account: T::AccountId, statement: Statement }, } - pub type BalanceOf = - <::Currency as Currency<::AccountId>>::Balance; + + #[pallet::hooks] + impl Hooks> for Pallet + where + ::AccountId: From, + sp_statement_store::AccountId: From<::AccountId>, + ::RuntimeEvent: From>, + ::RuntimeEvent: TryInto>, + sp_statement_store::BlockHash: From<::Hash>, + { + fn offchain_worker(now: BlockNumberFor) { + log::trace!(target: LOG_TARGET, "Collecting statements at #{:?}", now); + Pallet::::collect_statements(); + Pallet::::dispatch_statemens(); + } + } } impl Pallet where - ::AccountId: From<[u8; 32]>, - [u8; 32]: From<::AccountId>, + ::AccountId: From, + sp_statement_store::AccountId: From<::AccountId>, ::RuntimeEvent: From>, + ::RuntimeEvent: TryInto>, + sp_statement_store::BlockHash: From<::Hash>, { /// Validate a statement against current state. This is supposed ti be called by the statement /// store on the host side. pub fn validate_statement( _source: StatementSource, - statement: Statement, + mut statement: Statement, ) -> Result { sp_io::init_tracing(); @@ -106,13 +125,14 @@ where let account: T::AccountId = who.clone().into(); match frame_system::Pallet::::event_no_consensus(*event_index as usize) { Some(e) => { - if e != (Event::NewStatement { - account: account.clone(), - statement: statement.strip_proof(), - }) - .into() - { - log::debug!(target: LOG_TARGET, "Event mismatch"); + statement.strip_proof(); + if let Ok(Event::NewStatement { account: a, statement: s }) = e.try_into() { + if a != account || s != statement { + log::debug!(target: LOG_TARGET, "Event data mismatch"); + return Err(InvalidStatement::BadProof) + } + } else { + log::debug!(target: LOG_TARGET, "Event type mismatch"); return Err(InvalidStatement::BadProof) } }, @@ -147,7 +167,34 @@ where Ok(ValidStatement { priority }) } + /// Submit a statement event. The statement will be picked up by the offchain worker and + /// broadcast to the network. pub fn submit_statement(account: T::AccountId, statement: Statement) { Self::deposit_event(Event::NewStatement { account, statement }); } + + fn collect_statements() { + // Find `NewStatement` events and submit them to the store + for (index, event) in frame_system::Pallet::::read_events_no_consensus().enumerate() { + if let Ok(Event::::NewStatement{ account, mut statement }) = event.event.try_into() { + if statement.proof().is_none() { + let proof = Proof::OnChain { + who: account.into(), + block_hash: frame_system::Pallet::::parent_hash().into(), + event_index: index as u64, + }; + statement.set_proof(proof); + } + sp_statement_store::runtime_api::io::submit_statement(statement); + } + } + } + + fn dispatch_statemens() { + let all_statements = sp_statement_store::runtime_api::io::dump(); + for (hash, _statement) in all_statements { + // TODO: Custom statement handling + log::trace!(target: LOG_TARGET, "Handling statement #{:?}", hash); + } + } } diff --git a/primitives/statement-store/src/lib.rs b/primitives/statement-store/src/lib.rs index f3480e6a204b1..7bb28fa61a83a 100644 --- a/primitives/statement-store/src/lib.rs +++ b/primitives/statement-store/src/lib.rs @@ -36,6 +36,8 @@ pub type DecryptionKey = [u8; 32]; pub type Hash = [u8; 32]; /// Block hash. pub type BlockHash = [u8; 32]; +/// Account id +pub type AccountId = [u8; 32]; #[cfg(feature = "std")] pub use store_api::{ @@ -103,7 +105,7 @@ pub enum Proof { /// On-chain event proof. OnChain { /// Account identifier associated with the event. - who: [u8; 32], + who: AccountId, /// Hash of block that contains the event. block_hash: BlockHash, /// Index of the event in the event list. @@ -342,14 +344,8 @@ impl Statement { } /// Return a copy of this statement with proof removed - pub fn strip_proof(&self) -> Statement { - Statement { - proof: None, - decryption_key: self.decryption_key.clone(), - topics: self.topics.clone(), - num_topics: self.num_topics, - data: self.data.clone(), - } + pub fn strip_proof(&mut self) { + self.proof = None; } /// Set statement proof. Any existing proof is overwritten. @@ -481,7 +477,7 @@ mod test { statement.set_proof(Proof::Sr25519 { signature: [0u8; 64], signer: [0u8; 32] }); assert_eq!(statement.verify_signature(), SignatureVerificationResult::Invalid); - statement = statement.strip_proof(); + statement.strip_proof(); assert_eq!(statement.verify_signature(), SignatureVerificationResult::NoSignature); } } From 7431f5533e2e948b97159b8d5298a841b12d210f Mon Sep 17 00:00:00 2001 From: arkpar Date: Thu, 23 Mar 2023 21:24:47 +0100 Subject: [PATCH 16/78] Enable host functions --- Cargo.lock | 1 + bin/node-template/node/src/command.rs | 2 +- bin/node/cli/src/command.rs | 2 +- client/executor/Cargo.toml | 1 + client/executor/src/lib.rs | 7 ++++++- client/executor/src/native_executor.rs | 4 ++-- primitives/statement-store/src/runtime_api.rs | 6 ++++++ 7 files changed, 18 insertions(+), 5 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 88368b1e4d5ad..6d9b7a7633046 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8828,6 +8828,7 @@ dependencies = [ "sp-runtime", "sp-runtime-interface", "sp-state-machine", + "sp-statement-store", "sp-tracing", "sp-trie", "sp-version", diff --git a/bin/node-template/node/src/command.rs b/bin/node-template/node/src/command.rs index e121db820f2a3..fa53200c6349e 100644 --- a/bin/node-template/node/src/command.rs +++ b/bin/node-template/node/src/command.rs @@ -191,7 +191,7 @@ pub fn run() -> sc_cli::Result<()> { Ok(( cmd.run::::ExtendHostFunctions, >, _>(Some(info_provider)), task_manager, diff --git a/bin/node/cli/src/command.rs b/bin/node/cli/src/command.rs index b38b25d8fb3ad..fd12214823ac1 100644 --- a/bin/node/cli/src/command.rs +++ b/bin/node/cli/src/command.rs @@ -246,7 +246,7 @@ pub fn run() -> Result<()> { Ok(( cmd.run::::ExtendHostFunctions, >, _>(Some(info_provider)), task_manager, diff --git a/client/executor/Cargo.toml b/client/executor/Cargo.toml index 21a9bd70dde65..7f27814053606 100644 --- a/client/executor/Cargo.toml +++ b/client/executor/Cargo.toml @@ -32,6 +32,7 @@ sp-runtime-interface = { version = "7.0.0", path = "../../primitives/runtime-int sp-trie = { version = "7.0.0", path = "../../primitives/trie" } sp-version = { version = "5.0.0", path = "../../primitives/version" } sp-wasm-interface = { version = "7.0.0", path = "../../primitives/wasm-interface" } +sp-statement-store = { version = "4.0.0-dev", path = "../../primitives/statement-store" } [dev-dependencies] array-bytes = "4.1" diff --git a/client/executor/src/lib.rs b/client/executor/src/lib.rs index e5bae474e9e25..892a984d4460d 100644 --- a/client/executor/src/lib.rs +++ b/client/executor/src/lib.rs @@ -62,6 +62,11 @@ pub trait RuntimeVersionOf { ) -> error::Result; } +/// The host functions Substrate provides for the Wasm runtime environment. +/// +/// All these host functions will be callable from inside the Wasm environment. +pub type HostFunctions = (sp_io::SubstrateHostFunctions, sp_statement_store::runtime_api::HostFunctions); + #[cfg(test)] mod tests { use super::*; @@ -74,7 +79,7 @@ mod tests { let mut ext = TestExternalities::default(); let mut ext = ext.ext(); - let executor = WasmExecutor::::new( + let executor = WasmExecutor::::new( WasmExecutionMethod::Interpreted, Some(8), 8, diff --git a/client/executor/src/native_executor.rs b/client/executor/src/native_executor.rs index c72cf3c9c91df..6838104c585a4 100644 --- a/client/executor/src/native_executor.rs +++ b/client/executor/src/native_executor.rs @@ -535,7 +535,7 @@ pub struct NativeElseWasmExecutor { native_version: NativeVersion, /// Fallback wasm executor. wasm: - WasmExecutor>, + WasmExecutor>, } impl NativeElseWasmExecutor { @@ -573,7 +573,7 @@ impl NativeElseWasmExecutor { /// Create a new instance using the given [`WasmExecutor`]. pub fn new_with_wasm_executor( executor: WasmExecutor< - ExtendedHostFunctions, + ExtendedHostFunctions, >, ) -> Self { Self { native_version: D::native_version(), wasm: executor } diff --git a/primitives/statement-store/src/runtime_api.rs b/primitives/statement-store/src/runtime_api.rs index 66b90c1eabbaa..0ef3ef6835031 100644 --- a/primitives/statement-store/src/runtime_api.rs +++ b/primitives/statement-store/src/runtime_api.rs @@ -107,6 +107,12 @@ pub enum SubmitResult { NotAvailable, } +/// Export functions for the WASM host. +#[cfg(feature = "std")] +pub type HostFunctions = ( + io::HostFunctions, +); + /// Host interface #[runtime_interface] pub trait Io { From 5cc24b29fd8c7b299bd6620d78b20637d70a8cbd Mon Sep 17 00:00:00 2001 From: arkpar Date: Thu, 23 Mar 2023 21:25:14 +0100 Subject: [PATCH 17/78] fmt --- client/executor/src/lib.rs | 3 ++- client/executor/src/native_executor.rs | 7 ++----- client/statement-store/src/lib.rs | 7 ++++++- frame/statement/src/lib.rs | 3 ++- primitives/statement-store/src/lib.rs | 5 +++-- primitives/statement-store/src/runtime_api.rs | 16 +++++++--------- 6 files changed, 22 insertions(+), 19 deletions(-) diff --git a/client/executor/src/lib.rs b/client/executor/src/lib.rs index 892a984d4460d..5e6eaeb58418c 100644 --- a/client/executor/src/lib.rs +++ b/client/executor/src/lib.rs @@ -65,7 +65,8 @@ pub trait RuntimeVersionOf { /// The host functions Substrate provides for the Wasm runtime environment. /// /// All these host functions will be callable from inside the Wasm environment. -pub type HostFunctions = (sp_io::SubstrateHostFunctions, sp_statement_store::runtime_api::HostFunctions); +pub type HostFunctions = + (sp_io::SubstrateHostFunctions, sp_statement_store::runtime_api::HostFunctions); #[cfg(test)] mod tests { diff --git a/client/executor/src/native_executor.rs b/client/executor/src/native_executor.rs index 6838104c585a4..59fde1d98a639 100644 --- a/client/executor/src/native_executor.rs +++ b/client/executor/src/native_executor.rs @@ -534,8 +534,7 @@ pub struct NativeElseWasmExecutor { /// Native runtime version info. native_version: NativeVersion, /// Fallback wasm executor. - wasm: - WasmExecutor>, + wasm: WasmExecutor>, } impl NativeElseWasmExecutor { @@ -572,9 +571,7 @@ impl NativeElseWasmExecutor { /// Create a new instance using the given [`WasmExecutor`]. pub fn new_with_wasm_executor( - executor: WasmExecutor< - ExtendedHostFunctions, - >, + executor: WasmExecutor>, ) -> Self { Self { native_version: D::native_version(), wasm: executor } } diff --git a/client/statement-store/src/lib.rs b/client/statement-store/src/lib.rs index e9d9309b07210..097295bde5b04 100644 --- a/client/statement-store/src/lib.rs +++ b/client/statement-store/src/lib.rs @@ -359,7 +359,12 @@ impl Store { where Block: BlockT, Block::Hash: From, - Client: ProvideRuntimeApi + HeaderBackend + sc_client_api::ExecutorProvider + Send + Sync + 'static, + Client: ProvideRuntimeApi + + HeaderBackend + + sc_client_api::ExecutorProvider + + Send + + Sync + + 'static, Client::Api: ValidateStatement, { let store = Arc::new(Self::new(path, client.clone(), prometheus)?); diff --git a/frame/statement/src/lib.rs b/frame/statement/src/lib.rs index 3c944374a3ef8..944a47fbe51e7 100644 --- a/frame/statement/src/lib.rs +++ b/frame/statement/src/lib.rs @@ -176,7 +176,8 @@ where fn collect_statements() { // Find `NewStatement` events and submit them to the store for (index, event) in frame_system::Pallet::::read_events_no_consensus().enumerate() { - if let Ok(Event::::NewStatement{ account, mut statement }) = event.event.try_into() { + if let Ok(Event::::NewStatement { account, mut statement }) = event.event.try_into() + { if statement.proof().is_none() { let proof = Proof::OnChain { who: account.into(), diff --git a/primitives/statement-store/src/lib.rs b/primitives/statement-store/src/lib.rs index 7bb28fa61a83a..a43cec9ac26a5 100644 --- a/primitives/statement-store/src/lib.rs +++ b/primitives/statement-store/src/lib.rs @@ -23,9 +23,9 @@ use codec::{Decode, Encode}; use scale_info::TypeInfo; use sp_application_crypto::RuntimeAppPublic; -use sp_runtime_interface::pass_by::PassByCodec; #[cfg(feature = "std")] use sp_core::Pair; +use sp_runtime_interface::pass_by::PassByCodec; use sp_std::vec::Vec; /// Statement topic. @@ -299,7 +299,8 @@ impl Statement { let signature = sp_core::ecdsa::Signature(*signature); let public = sp_core::ecdsa::Public(*signer); if signature.verify(to_sign.as_slice(), &public) { - let sender_hash = ::hash(signer); + let sender_hash = + ::hash(signer); SignatureVerificationResult::Valid(sender_hash.into()) } else { SignatureVerificationResult::Invalid diff --git a/primitives/statement-store/src/runtime_api.rs b/primitives/statement-store/src/runtime_api.rs index 0ef3ef6835031..711ab2d1c273f 100644 --- a/primitives/statement-store/src/runtime_api.rs +++ b/primitives/statement-store/src/runtime_api.rs @@ -17,12 +17,12 @@ //! Runtime support for the statement store. -use crate::{Topic, Statement, Hash}; +use crate::{Hash, Statement, Topic}; use codec::{Decode, Encode}; use scale_info::TypeInfo; -use sp_runtime::RuntimeDebug; -use sp_runtime_interface::{runtime_interface, pass_by::PassByEnum}; use sp_externalities::ExternalitiesExt; +use sp_runtime::RuntimeDebug; +use sp_runtime_interface::{pass_by::PassByEnum, runtime_interface}; use sp_std::vec::Vec; /// Information concerning a valid statement. @@ -109,22 +109,20 @@ pub enum SubmitResult { /// Export functions for the WASM host. #[cfg(feature = "std")] -pub type HostFunctions = ( - io::HostFunctions, -); +pub type HostFunctions = (io::HostFunctions,); /// Host interface #[runtime_interface] pub trait Io { /// Submit a new new statement. The statement will be broadcast to the network. /// This is meant to be used by the offchain worker. - fn submit_statement(&mut self, statement: Statement) -> SubmitResult { + fn submit_statement(&mut self, statement: Statement) -> SubmitResult { if let Some(StatementStoreExt(store)) = self.extension::() { match store.submit(statement, StatementSource::Chain) { crate::SubmitResult::New(_) => SubmitResult::OkNew, crate::SubmitResult::Known => SubmitResult::OkKnown, - // This should not happen for `StatementSource::Chain`. An existing statement will be - // overwritten. + // This should not happen for `StatementSource::Chain`. An existing statement will + // be overwritten. crate::SubmitResult::KnownExpired => SubmitResult::Bad, crate::SubmitResult::Bad(_) => SubmitResult::Bad, crate::SubmitResult::InternalError(_) => SubmitResult::Bad, From 4febbeab58b2073e0c3a0aa9e7394b699b7eca11 Mon Sep 17 00:00:00 2001 From: arkpar Date: Fri, 24 Mar 2023 09:32:26 +0100 Subject: [PATCH 18/78] Minor tweaks --- client/rpc/src/statement/mod.rs | 4 ---- client/rpc/src/statement/tests.rs | 17 ----------------- client/service/src/builder.rs | 3 +-- frame/statement/src/mock.rs | 2 +- 4 files changed, 2 insertions(+), 24 deletions(-) delete mode 100644 client/rpc/src/statement/tests.rs diff --git a/client/rpc/src/statement/mod.rs b/client/rpc/src/statement/mod.rs index 34c264afb3bae..9dbd65f5413a9 100644 --- a/client/rpc/src/statement/mod.rs +++ b/client/rpc/src/statement/mod.rs @@ -18,10 +18,6 @@ //! Substrate statement store API. -#[cfg(test)] -mod tests; - -//use self::error::Error; use jsonrpsee::core::{async_trait, RpcResult}; /// Re-export the API for backward compatibility. pub use sc_rpc_api::statement::{error::Error, StatementApiServer}; diff --git a/client/rpc/src/statement/tests.rs b/client/rpc/src/statement/tests.rs deleted file mode 100644 index b46a8f75295fe..0000000000000 --- a/client/rpc/src/statement/tests.rs +++ /dev/null @@ -1,17 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 - -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// This program is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with this program. If not, see . diff --git a/client/service/src/builder.rs b/client/service/src/builder.rs index ecbdc3d801563..cc4e3d97b33a9 100644 --- a/client/service/src/builder.rs +++ b/client/service/src/builder.rs @@ -694,7 +694,6 @@ where } if let Some(store) = statement_store { let store = sc_rpc::statement::StatementStore::new(store, deny_unsafe).into_rpc(); - rpc_api.merge(store).map_err(|e| Error::Application(e.into()))?; } @@ -950,7 +949,7 @@ where spawn_handle.spawn("network-statement-validator", Some("networking"), fut); }) }; - // crate statement goissip protocol and add it to the list of supported protocols of + // crate statement gossip protocol and add it to the list of supported protocols of // `network_params` let (statement_handler, statement_handler_controller) = statement_handler_proto.build( network.clone(), diff --git a/frame/statement/src/mock.rs b/frame/statement/src/mock.rs index fced6af03f073..c78681dcb55a6 100644 --- a/frame/statement/src/mock.rs +++ b/frame/statement/src/mock.rs @@ -15,7 +15,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! # Preimage test environment. +//! # Statement pallet test environment. use super::*; From b06e356a58549867cb9f7e866bb46f5808946581 Mon Sep 17 00:00:00 2001 From: arkpar Date: Fri, 24 Mar 2023 09:48:48 +0100 Subject: [PATCH 19/78] Fixed a warning --- primitives/statement-store/src/runtime_api.rs | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/primitives/statement-store/src/runtime_api.rs b/primitives/statement-store/src/runtime_api.rs index 711ab2d1c273f..ab0e000b991fc 100644 --- a/primitives/statement-store/src/runtime_api.rs +++ b/primitives/statement-store/src/runtime_api.rs @@ -20,11 +20,13 @@ use crate::{Hash, Statement, Topic}; use codec::{Decode, Encode}; use scale_info::TypeInfo; -use sp_externalities::ExternalitiesExt; use sp_runtime::RuntimeDebug; use sp_runtime_interface::{pass_by::PassByEnum, runtime_interface}; use sp_std::vec::Vec; +#[cfg(feature = "std")] +use sp_externalities::ExternalitiesExt; + /// Information concerning a valid statement. #[derive(Clone, PartialEq, Eq, Encode, Decode, RuntimeDebug, TypeInfo)] pub struct ValidStatement { From c6f2b531c9eac226686d0eb95e4abda3f925ec59 Mon Sep 17 00:00:00 2001 From: arkpar Date: Fri, 24 Mar 2023 10:17:25 +0100 Subject: [PATCH 20/78] Removed tracing --- frame/statement/src/lib.rs | 3 --- 1 file changed, 3 deletions(-) diff --git a/frame/statement/src/lib.rs b/frame/statement/src/lib.rs index 944a47fbe51e7..a01d1e32ecdba 100644 --- a/frame/statement/src/lib.rs +++ b/frame/statement/src/lib.rs @@ -28,7 +28,6 @@ use frame_support::{ pallet_prelude::*, sp_runtime::{traits::Zero, SaturatedConversion}, - sp_tracing::{enter_span, Level}, traits::Currency, }; use frame_system::pallet_prelude::*; @@ -112,8 +111,6 @@ where mut statement: Statement, ) -> Result { sp_io::init_tracing(); - - enter_span! { Level::TRACE, "validate_statement" }; log::debug!(target: LOG_TARGET, "Validating statement {:?}", statement); let account: T::AccountId = match statement.proof() { Some(Proof::OnChain { who, block_hash, event_index }) => { From 50b583df50ebbc9d72ce98fbfa350ff97125b2f2 Mon Sep 17 00:00:00 2001 From: arkpar Date: Fri, 24 Mar 2023 19:29:27 +0100 Subject: [PATCH 21/78] Manual expiration --- client/statement-store/src/lib.rs | 220 +++++++++++++------------- frame/statement/src/lib.rs | 1 - primitives/statement-store/src/lib.rs | 50 +++++- 3 files changed, 154 insertions(+), 117 deletions(-) diff --git a/client/statement-store/src/lib.rs b/client/statement-store/src/lib.rs index 097295bde5b04..4399e6e26390d 100644 --- a/client/statement-store/src/lib.rs +++ b/client/statement-store/src/lib.rs @@ -46,9 +46,9 @@ const CURRENT_VERSION: u32 = 1; const LOG_TARGET: &str = "statement-store"; -const EXPIRE_AFTER: u64 = 24 * 60 * 60; //24h const PURGE_AFTER: u64 = 2 * 24 * 60 * 60; //48h const MAX_LIVE_STATEMENTS: usize = 8192; +const MAX_TOTAL_SIZE: usize = 2 * 1024 * 1024 * 1024; /// Suggested maintenance period. A good value to call `Store::maintain` with. #[allow(dead_code)] @@ -57,15 +57,15 @@ pub const MAINTENANCE_PERIOD: std::time::Duration = std::time::Duration::from_se mod col { pub const META: u8 = 0; pub const STATEMENTS: u8 = 1; + pub const EXPIRED: u8 = 2; - pub const COUNT: u8 = 2; + pub const COUNT: u8 = 3; } #[derive(PartialEq, Eq)] struct EvictionPriority { hash: Hash, priority: u64, - timestamp: u64, } impl PartialOrd for EvictionPriority { @@ -73,7 +73,6 @@ impl PartialOrd for EvictionPriority { Some( self.priority .cmp(&other.priority) - .then_with(|| self.timestamp.cmp(&other.timestamp)) .reverse(), ) } @@ -83,7 +82,6 @@ impl Ord for EvictionPriority { fn cmp(&self, other: &Self) -> std::cmp::Ordering { self.priority .cmp(&other.priority) - .then_with(|| self.timestamp.cmp(&other.timestamp)) .reverse() } } @@ -95,7 +93,7 @@ struct Index { all_topics: HashMap; 4], Option)>, by_priority: BinaryHeap, entries: HashMap, - expired: HashMap, + expired: HashMap, // Value is expiration timestamp. max_entries: usize, } @@ -149,7 +147,6 @@ pub struct Store { #[derive(Encode, Decode, Clone)] struct StatementMeta { priority: u64, - timestamp: u64, } #[derive(Encode, Decode)] @@ -161,7 +158,7 @@ struct StatementWithMeta { enum IndexQuery { Unknown, Exists(u64), - Expired(u64), + Expired, } impl Index { @@ -186,7 +183,6 @@ impl Index { self.by_priority.push(EvictionPriority { hash, priority: meta.priority, - timestamp: meta.timestamp, }); } } @@ -195,14 +191,14 @@ impl Index { if let Some(meta) = self.entries.get(hash) { return IndexQuery::Exists(meta.priority) } - if let Some(meta) = self.expired.get(hash) { - return IndexQuery::Expired(meta.priority) + if let Some(_) = self.expired.get(hash) { + return IndexQuery::Expired } IndexQuery::Unknown } - fn insert_expired(&mut self, hash: Hash, meta: StatementMeta) { - self.expired.insert(hash, meta); + fn insert_expired(&mut self, hash: Hash, timestamp: u64) { + self.expired.insert(hash, timestamp); } fn is_expired(&self, hash: &Hash) -> bool { @@ -261,61 +257,54 @@ impl Index { Ok(()) } - fn maintain(&mut self, current_time: u64) -> Vec<(parity_db::ColId, Vec, Option>)> { + fn maintain(&mut self, current_time: u64) -> Vec { // Purge previously expired messages. let mut purged = Vec::new(); - self.expired.retain(|hash, meta| { - if meta.timestamp + PURGE_AFTER <= current_time { - purged.push((col::STATEMENTS, hash.to_vec(), None)); + self.expired.retain(|hash, timestamp| { + if *timestamp + PURGE_AFTER <= current_time { + purged.push(hash.clone()); log::trace!(target: LOG_TARGET, "Purged statement {:?}", HexDisplay::from(hash)); false } else { true } }); + purged + } - // Expire messages. - let mut num_expired = 0; - self.entries.retain(|hash, meta| { - if meta.timestamp + EXPIRE_AFTER <= current_time { - if let Some((topics, key)) = self.all_topics.remove(hash) { - for t in topics { - if let Some(t) = t { - if let Some(set) = self.by_topic.get_mut(&t) { - set.remove(hash); - } - } - } - if let Some(k) = key { - if let Some(set) = self.by_dec_key.get_mut(&k) { + fn make_expired(&mut self, hash: &Hash, current_time: u64) -> bool { + if let Some(_) = self.entries.remove(hash) { + if let Some((topics, key)) = self.all_topics.remove(hash) { + for t in topics { + if let Some(t) = t { + if let Some(set) = self.by_topic.get_mut(&t) { set.remove(hash); } } } - log::trace!(target: LOG_TARGET, "Expired statement {:?}", HexDisplay::from(hash)); - self.expired.insert(hash.clone(), meta.clone()); - num_expired += 1; - false - } else { - true + if let Some(k) = key { + if let Some(set) = self.by_dec_key.get_mut(&k) { + set.remove(hash); + } + } } - }); - if num_expired > 0 { - // Rebuild the priority queue + self.expired.insert(hash.clone(), current_time); + log::trace!(target: LOG_TARGET, "Expired statement {:?}", HexDisplay::from(hash)); self.by_priority = self .entries .iter() .map(|(hash, meta)| EvictionPriority { hash: hash.clone(), priority: meta.priority, - timestamp: meta.timestamp, }) - .collect(); + .collect(); + true + } else { + false } - purged } - fn evict(&mut self) -> Vec<(parity_db::ColId, Vec, Option>)> { + fn evict(&mut self) -> Vec { let mut evicted_set = Vec::new(); while self.by_priority.len() >= self.max_entries { @@ -340,7 +329,8 @@ impl Index { } } } - evicted_set.push((col::STATEMENTS, evicted.hash.to_vec(), None)); + //evicted_set.push((col::STATEMENTS, evicted.hash.to_vec(), None)); + evicted_set.push(evicted.hash); } else { break } @@ -445,25 +435,32 @@ impl Store { StatementWithMeta::decode(&mut statement.as_slice()) { let hash = statement_with_meta.statement.hash(); - if statement_with_meta.meta.timestamp + EXPIRE_AFTER < current_time { - log::trace!( - target: LOG_TARGET, - "Statement loaded (expired): {:?}", - HexDisplay::from(&hash) - ); - index.insert_expired(hash, statement_with_meta.meta); - } else { - log::trace!( - target: LOG_TARGET, - "Statement loaded {:?}", - HexDisplay::from(&hash) - ); - index.insert_with_meta(hash, statement_with_meta); - } + log::trace!( + target: LOG_TARGET, + "Statement loaded {:?}", + HexDisplay::from(&hash) + ); + index.insert_with_meta(hash, statement_with_meta); } true }) .map_err(|e| Error::Db(e.to_string()))?; + self.db + .iter_column_while(col::EXPIRED, |item| { + let expired_info = item.value; + if let Ok((hash, timestamp)) = + <(Hash, u64)>::decode(&mut expired_info.as_slice()) + { + log::trace!( + target: LOG_TARGET, + "Statement loaded (expired): {:?}", + HexDisplay::from(&hash) + ); + index.insert_expired(hash, timestamp); + } + true + }) + .map_err(|e| Error::Db(e.to_string()))?; } self.maintain(); @@ -512,6 +509,7 @@ impl Store { pub fn maintain(&self) { log::trace!(target: LOG_TARGET, "Started store maintenance"); let deleted = self.index.write().maintain(self.timestamp()); + let deleted: Vec<_> = deleted.into_iter().map(|hash| (col::EXPIRED, hash.to_vec(), None)).collect(); let count = deleted.len() as u64; if let Err(e) = self.db.commit(deleted) { log::warn!(target: LOG_TARGET, "Error writing to the statement database: {:?}", e); @@ -632,73 +630,77 @@ impl StatementStore for Store { /// Submit a statement to the store. Validates the statement and returns validation result. fn submit(&self, statement: Statement, source: StatementSource) -> SubmitResult { let hash = statement.hash(); - let priority = match self.index.read().query(&hash) { - IndexQuery::Expired(priority) => { + match self.index.read().query(&hash) { + IndexQuery::Expired => { if !source.can_be_resubmitted() { return SubmitResult::KnownExpired } - priority }, - IndexQuery::Exists(priority) => { + IndexQuery::Exists(_) => { if !source.can_be_resubmitted() { return SubmitResult::Known } - priority }, - IndexQuery::Unknown => { - // Validate. - let at_block = if let Some(Proof::OnChain { block_hash, .. }) = statement.proof() { - Some(block_hash.clone()) - } else { - None - }; - let validation_result = (self.validate_fn)(at_block, source, statement.clone()); - match validation_result { - Ok(ValidStatement { priority }) => priority, - Err(InvalidStatement::BadProof) => { - log::debug!( - target: LOG_TARGET, - "Statement validation failed: BadProof, {:?}", - statement - ); - self.metrics.report(|metrics| metrics.validations_invalid.inc()); - return SubmitResult::Bad("Bad statement proof") - }, - Err(InvalidStatement::NoProof) => { - log::debug!( - target: LOG_TARGET, - "Statement validation failed: NoProof, {:?}", - statement - ); - self.metrics.report(|metrics| metrics.validations_invalid.inc()); - return SubmitResult::Bad("Missing statement proof") - }, - Err(InvalidStatement::InternalError) => - return SubmitResult::InternalError(Error::Runtime), - } + IndexQuery::Unknown => {}, + } + // Validate. + let at_block = if let Some(Proof::OnChain { block_hash, .. }) = statement.proof() { + Some(block_hash.clone()) + } else { + None + }; + let validation_result = (self.validate_fn)(at_block, source, statement.clone()); + let priority = match validation_result { + Ok(ValidStatement { priority }) => priority, + Err(InvalidStatement::BadProof) => { + log::debug!( + target: LOG_TARGET, + "Statement validation failed: BadProof, {:?}", + statement + ); + self.metrics.report(|metrics| metrics.validations_invalid.inc()); + return SubmitResult::Bad("Bad statement proof") }, + Err(InvalidStatement::NoProof) => { + log::debug!( + target: LOG_TARGET, + "Statement validation failed: NoProof, {:?}", + statement + ); + self.metrics.report(|metrics| metrics.validations_invalid.inc()); + return SubmitResult::Bad("Missing statement proof") + }, + Err(InvalidStatement::InternalError) => + return SubmitResult::InternalError(Error::Runtime), }; - // Commit to the db prior to locking the index. let statement_with_meta = StatementWithMeta { - meta: StatementMeta { priority, timestamp: self.timestamp() }, + meta: StatementMeta { priority }, statement, }; - let mut commit = self.index.write().evict(); + let current_time = self.timestamp(); + let mut commit = Vec::new(); commit.push((col::STATEMENTS, hash.to_vec(), Some(statement_with_meta.encode()))); - if let Err(e) = self.db.commit(commit) { - log::debug!( - target: LOG_TARGET, - "Statement validation failed: database error {}, {:?}", - e, - statement_with_meta.statement - ); - return SubmitResult::InternalError(Error::Db(e.to_string())) + commit.push((col::EXPIRED, hash.to_vec(), None)); + { + let mut index = self.index.write(); + for hash in index.evict() { + commit.push((col::STATEMENTS, hash.to_vec(), None)); + commit.push((col::EXPIRED, hash.to_vec(), Some((hash, current_time).encode()))); + } + if let Err(e) = self.db.commit(commit) { + log::debug!( + target: LOG_TARGET, + "Statement validation failed: database error {}, {:?}", + e, + statement_with_meta.statement + ); + return SubmitResult::InternalError(Error::Db(e.to_string())) + } + index.insert_with_meta(hash, statement_with_meta); } self.metrics.report(|metrics| metrics.submitted_statements.inc()); - let mut index = self.index.write(); - index.insert_with_meta(hash, statement_with_meta); let network_priority = if priority > 0 { NetworkPriority::High } else { NetworkPriority::Low }; log::trace!(target: LOG_TARGET, "Statement submitted: {:?}", HexDisplay::from(&hash)); diff --git a/frame/statement/src/lib.rs b/frame/statement/src/lib.rs index a01d1e32ecdba..1cf55ce784930 100644 --- a/frame/statement/src/lib.rs +++ b/frame/statement/src/lib.rs @@ -114,7 +114,6 @@ where log::debug!(target: LOG_TARGET, "Validating statement {:?}", statement); let account: T::AccountId = match statement.proof() { Some(Proof::OnChain { who, block_hash, event_index }) => { - // block_hash and event_index should be checked by the host if frame_system::Pallet::::parent_hash().as_ref() != block_hash.as_slice() { log::debug!(target: LOG_TARGET, "Bad block hash."); return Err(InvalidStatement::BadProof) diff --git a/primitives/statement-store/src/lib.rs b/primitives/statement-store/src/lib.rs index a43cec9ac26a5..b8e63d8583c6f 100644 --- a/primitives/statement-store/src/lib.rs +++ b/primitives/statement-store/src/lib.rs @@ -38,6 +38,8 @@ pub type Hash = [u8; 32]; pub type BlockHash = [u8; 32]; /// Account id pub type AccountId = [u8; 32]; +/// Statement channel. +pub type Channel = [u8; 32]; #[cfg(feature = "std")] pub use store_api::{ @@ -122,16 +124,20 @@ pub enum Field { AuthenticityProof(Proof) = 0, /// An identifier for the key that `Data` field may be decrypted with. DecryptionKey(DecryptionKey) = 1, + /// Priority when competing with other messages from the same sender. + Priority(u32) = 2, + /// Account channel to use. Only one message per `(account, channel)` pair is allowed. + Channel(Channel) = 3, /// First statement topic. - Topic1(Topic) = 2, + Topic1(Topic) = 4, /// Second statement topic. - Topic2(Topic) = 3, + Topic2(Topic) = 5, /// Third statement topic. - Topic3(Topic) = 4, + Topic3(Topic) = 6, /// Fourth statement topic. - Topic4(Topic) = 5, + Topic4(Topic) = 7, /// Additional data. - Data(Vec) = 6, + Data(Vec) = 8, } #[derive(TypeInfo, sp_core::RuntimeDebug, PassByCodec, Clone, PartialEq, Eq, Default)] @@ -139,6 +145,8 @@ pub enum Field { pub struct Statement { proof: Option, decryption_key: Option, + channel: Option, + priority: Option, num_topics: u8, topics: [Topic; 4], data: Option>, @@ -155,6 +163,8 @@ impl Decode for Statement { match field { Field::AuthenticityProof(p) => statement.set_proof(p), Field::DecryptionKey(key) => statement.set_decryption_key(key), + Field::Priority(p) => statement.set_priority(p), + Field::Channel(c) => statement.set_channel(c), Field::Topic1(t) => statement.set_topic(0, t), Field::Topic2(t) => statement.set_topic(1, t), Field::Topic3(t) => statement.set_topic(2, t), @@ -354,6 +364,16 @@ impl Statement { self.proof = Some(proof) } + /// Set statement priority. + pub fn set_priority(&mut self, priority: u32) { + self.priority = Some(priority) + } + + /// Set statement channel. + pub fn set_channel(&mut self, channel: Channel) { + self.channel = Some(channel) + } + /// Set topic by index. pub fn set_topic(&mut self, index: usize, topic: Topic) { if index <= 4 { @@ -377,6 +397,8 @@ impl Statement { // will be a prefix of vector length. let num_fields = if with_proof && self.proof.is_some() { 1 } else { 0 } + if self.decryption_key.is_some() { 1 } else { 0 } + + if self.priority.is_some() { 1 } else { 0 } + + if self.channel.is_some() { 1 } else { 0 } + if self.data.is_some() { 1 } else { 0 } + self.num_topics as u32; @@ -394,12 +416,20 @@ impl Statement { 1u8.encode_to(&mut output); decryption_key.encode_to(&mut output); } + if let Some(priority) = &self.priority { + 2u8.encode_to(&mut output); + priority.encode_to(&mut output); + } + if let Some(channel) = &self.channel { + 3u8.encode_to(&mut output); + channel.encode_to(&mut output); + } for t in 0..self.num_topics { - (2u8 + t).encode_to(&mut output); + (4u8 + t).encode_to(&mut output); self.topics[t as usize].encode_to(&mut output); } if let Some(data) = &self.data { - 6u8.encode_to(&mut output); + 8u8.encode_to(&mut output); data.encode_to(&mut output); } output @@ -422,9 +452,13 @@ mod test { let topic1 = [0x01; 32]; let topic2 = [0x02; 32]; let data = vec![55, 99]; + let priority = 999; + let channel = [0xcc; 32]; statement.set_proof(proof.clone()); statement.set_decryption_key(decryption_key.clone()); + statement.set_priority(priority); + statement.set_channel(channel.clone()); statement.set_topic(0, topic1.clone()); statement.set_topic(1, topic2.clone()); statement.set_plain_data(data.clone()); @@ -432,6 +466,8 @@ mod test { let fields = vec![ Field::AuthenticityProof(proof.clone()), Field::DecryptionKey(decryption_key.clone()), + Field::Priority(priority), + Field::Channel(channel), Field::Topic1(topic1.clone()), Field::Topic2(topic2.clone()), Field::Data(data.clone()), From e141eb705539006bb8741891a40ecb40c2dad9e0 Mon Sep 17 00:00:00 2001 From: arkpar Date: Tue, 28 Mar 2023 18:54:48 +0200 Subject: [PATCH 22/78] Reworked constraint management --- Cargo.lock | 1 - client/db/Cargo.toml | 1 - client/rpc-api/src/statement/mod.rs | 2 +- client/statement-store/src/lib.rs | 540 +++++++++++++----- primitives/statement-store/src/lib.rs | 42 +- primitives/statement-store/src/runtime_api.rs | 14 +- primitives/statement-store/src/store_api.rs | 5 + 7 files changed, 445 insertions(+), 160 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index f188beadde319..0763caa99e225 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8424,7 +8424,6 @@ dependencies = [ "sp-database", "sp-runtime", "sp-state-machine", - "sp-statement-store", "sp-tracing", "sp-trie", "substrate-test-runtime-client", diff --git a/client/db/Cargo.toml b/client/db/Cargo.toml index f8f2769418403..8e4bcf18a2d04 100644 --- a/client/db/Cargo.toml +++ b/client/db/Cargo.toml @@ -33,7 +33,6 @@ sp-core = { version = "7.0.0", path = "../../primitives/core" } sp-database = { version = "4.0.0-dev", path = "../../primitives/database" } sp-runtime = { version = "7.0.0", path = "../../primitives/runtime" } sp-state-machine = { version = "0.13.0", path = "../../primitives/state-machine" } -sp-statement-store = { version = "4.0.0-dev", path = "../../primitives/statement-store" } sp-trie = { version = "7.0.0", path = "../../primitives/trie" } [dev-dependencies] diff --git a/client/rpc-api/src/statement/mod.rs b/client/rpc-api/src/statement/mod.rs index b44ede6d17a0f..9a0b65d30c7f1 100644 --- a/client/rpc-api/src/statement/mod.rs +++ b/client/rpc-api/src/statement/mod.rs @@ -16,7 +16,7 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -//! Substrate offchain API. +//! Substrate Statement Store RPC API. use jsonrpsee::{core::RpcResult, proc_macros::rpc}; use sp_core::Bytes; diff --git a/client/statement-store/src/lib.rs b/client/statement-store/src/lib.rs index 4399e6e26390d..c1ab1e88a5e7a 100644 --- a/client/statement-store/src/lib.rs +++ b/client/statement-store/src/lib.rs @@ -34,10 +34,10 @@ use sp_core::{hexdisplay::HexDisplay, Decode, Encode}; use sp_runtime::traits::Block as BlockT; use sp_statement_store::{ runtime_api::{InvalidStatement, StatementSource, ValidStatement, ValidateStatement}, - BlockHash, DecryptionKey, Hash, NetworkPriority, Proof, Result, Statement, SubmitResult, Topic, + BlockHash, DecryptionKey, Hash, NetworkPriority, Proof, Result, Statement, SubmitResult, Topic, AccountId, Channel, }; use std::{ - collections::{BinaryHeap, HashMap, HashSet}, + collections::{BTreeMap, HashMap, HashSet}, sync::Arc, }; @@ -48,7 +48,7 @@ const LOG_TARGET: &str = "statement-store"; const PURGE_AFTER: u64 = 2 * 24 * 60 * 60; //48h const MAX_LIVE_STATEMENTS: usize = 8192; -const MAX_TOTAL_SIZE: usize = 2 * 1024 * 1024 * 1024; +const MAX_TOTAL_SIZE: usize = 64 * 1024 * 1024; /// Suggested maintenance period. A good value to call `Store::maintain` with. #[allow(dead_code)] @@ -63,26 +63,42 @@ mod col { } #[derive(PartialEq, Eq)] -struct EvictionPriority { +struct PriorityKey { hash: Hash, - priority: u64, + priority: u32, } -impl PartialOrd for EvictionPriority { +#[derive(PartialEq, Eq)] +struct ChannelEntry { + hash: Hash, + priority: u32, +} + +#[derive(Default)] +struct StatementsForAccount { + // Statements ordered by priority. + by_priority: BTreeMap, usize)>, + // Channel to statement map. Only one statement per channel is allowed. + channels: HashMap, + // Sum of all `Data` field sizes. + data_size: usize, +} + +impl PartialOrd for PriorityKey { fn partial_cmp(&self, other: &Self) -> Option { Some( self.priority .cmp(&other.priority) - .reverse(), + .then_with(|| self.hash.cmp(&other.hash)) ) } } -impl Ord for EvictionPriority { +impl Ord for PriorityKey { fn cmp(&self, other: &Self) -> std::cmp::Ordering { self.priority .cmp(&other.priority) - .reverse() + .then_with(|| self.hash.cmp(&other.hash)) } } @@ -90,11 +106,14 @@ impl Ord for EvictionPriority { struct Index { by_topic: HashMap>, by_dec_key: HashMap>, - all_topics: HashMap; 4], Option)>, - by_priority: BinaryHeap, - entries: HashMap, + statement_topics: HashMap; 4], Option)>, + entries: HashMap, // Statement hash -> (Account id, global_priority, priority) expired: HashMap, // Value is expiration timestamp. + accounts: HashMap, + by_global_priority: BTreeMap, max_entries: usize, + max_size: usize, + total_size: usize, } struct ClientWrapper { @@ -140,13 +159,14 @@ pub struct Store { + Send + Sync, >, + // Used for testing time_override: Option, metrics: PrometheusMetrics, } #[derive(Encode, Decode, Clone)] struct StatementMeta { - priority: u64, + global_priority: u32, } #[derive(Encode, Decode)] @@ -157,15 +177,27 @@ struct StatementWithMeta { enum IndexQuery { Unknown, - Exists(u64), + Exists, Expired, } +enum MaybeInserted { + Inserted(HashSet), + Ignored +} + impl Index { - fn insert_with_meta(&mut self, hash: Hash, statement: StatementWithMeta) { + fn new() -> Index { + Index { + max_entries: MAX_LIVE_STATEMENTS, + max_size: MAX_TOTAL_SIZE, + .. Default::default() + } + } + + fn insert_new(&mut self, hash: Hash, account: AccountId, global_priority: u32, statement: &Statement) { let mut all_topics = [None; 4]; let mut nt = 0; - let StatementWithMeta { statement, meta } = statement; while let Some(t) = statement.topic(nt) { self.by_topic.entry(t).or_default().insert(hash); all_topics[nt] = Some(t); @@ -176,22 +208,28 @@ impl Index { self.by_dec_key.entry(k.clone()).or_default().insert(hash); } if nt > 0 || key.is_some() { - self.all_topics.insert(hash, (all_topics, key)); + self.statement_topics.insert(hash, (all_topics, key)); } - self.expired.remove(&hash); - if self.entries.insert(hash, meta.clone()).is_none() { - self.by_priority.push(EvictionPriority { - hash, - priority: meta.priority, - }); + let priority = statement.priority().unwrap_or(0); + self.entries.insert(hash, (account.clone(), global_priority, priority)); + self.by_global_priority.insert(PriorityKey { hash: hash.clone(), priority: global_priority }, statement.data_len()); + self.total_size += statement.data_len(); + let mut account_info = self.accounts.entry(account).or_default(); + account_info.data_size += statement.data_len(); + if let Some(channel) = statement.channel() { + account_info.channels.insert(channel, ChannelEntry { hash, priority }); } + account_info.by_priority.insert(PriorityKey { + hash, + priority, + }, (statement.channel(), statement.data_len())); } fn query(&self, hash: &Hash) -> IndexQuery { - if let Some(meta) = self.entries.get(hash) { - return IndexQuery::Exists(meta.priority) + if self.entries.contains_key(hash) { + return IndexQuery::Exists } - if let Some(_) = self.expired.get(hash) { + if self.expired.contains_key(hash) { return IndexQuery::Expired } IndexQuery::Unknown @@ -211,15 +249,21 @@ impl Index { topics: &[Topic], mut f: impl FnMut(&Hash) -> Result<()>, ) -> Result<()> { - let mut sets: [Option<&HashSet>; 4] = Default::default(); - let mut num_sets = 0; - for t in topics { - sets[num_sets] = self.by_topic.get(t); - if sets[num_sets].is_some() { - num_sets += 1; + let empty = HashSet::new(); + let mut sets: [&HashSet; 4] = [∅ 4]; + if topics.len() > 4 { + return Ok(()) + } + for (i, t) in topics.iter().enumerate() { + let set = self.by_topic.get(t); + if set.map(|s| s.len()).unwrap_or(0) == 0 { + // At least one of the topics does not exist in the index. + return Ok(()) } + sets[i] = set.expect("Function returns if set is None"); } - if num_sets == 0 && key.is_none() { + let sets = &mut sets[0 .. topics.len()]; + if sets.is_empty() && key.is_none() { // Iterate all entries for h in self.entries.keys() { log::trace!(target: LOG_TARGET, "Iterating: {:?}", HexDisplay::from(h)); @@ -227,12 +271,12 @@ impl Index { } } else { // Start with the smallest topic set or the key set. - sets[0..num_sets].sort_by_key(|s| s.map_or(0, HashSet::len)); + sets.sort_by_key(|s| s.len()); if let Some(key) = key { let key_set = if let Some(set) = self.by_dec_key.get(&key) { set } else { return Ok(()) }; for item in key_set { - if sets.iter().all(|set| set.unwrap().contains(item)) { + if sets.iter().all(|set| set.contains(item)) { log::trace!( target: LOG_TARGET, "Iterating by key: {:?}", @@ -242,8 +286,8 @@ impl Index { } } } else { - for item in sets[0].unwrap() { - if sets[1..num_sets].iter().all(|set| set.unwrap().contains(item)) { + for item in sets[0] { + if sets[1..].iter().all(|set| set.contains(item)) { log::trace!( target: LOG_TARGET, "Iterating by topic: {:?}", @@ -273,8 +317,11 @@ impl Index { } fn make_expired(&mut self, hash: &Hash, current_time: u64) -> bool { - if let Some(_) = self.entries.remove(hash) { - if let Some((topics, key)) = self.all_topics.remove(hash) { + if let Some((account, global_priority, priority)) = self.entries.remove(hash) { + let key = PriorityKey { hash: hash.clone(), priority: global_priority }; + let len = self.by_global_priority.remove(&key).unwrap_or(0); + self.total_size -= len; + if let Some((topics, key)) = self.statement_topics.remove(hash) { for t in topics { if let Some(t) = t { if let Some(set) = self.by_topic.get_mut(&t) { @@ -289,53 +336,132 @@ impl Index { } } self.expired.insert(hash.clone(), current_time); + if let std::collections::hash_map::Entry::Occupied(mut account_rec) = self.accounts.entry(account) { + let key = PriorityKey { hash: hash.clone(), priority }; + if let Some((channel, len)) = account_rec.get_mut().by_priority.remove(&key) { + account_rec.get_mut().data_size -= len; + if let Some(channel) = channel { + account_rec.get_mut().channels.remove(&channel); + } + } + if account_rec.get().by_priority.is_empty() { + account_rec.remove_entry(); + } + } log::trace!(target: LOG_TARGET, "Expired statement {:?}", HexDisplay::from(hash)); - self.by_priority = self - .entries - .iter() - .map(|(hash, meta)| EvictionPriority { - hash: hash.clone(), - priority: meta.priority, - }) - .collect(); true } else { false } } - fn evict(&mut self) -> Vec { - let mut evicted_set = Vec::new(); + fn insert(&mut self, hash: Hash, statement: &Statement, account: &AccountId, validation: &ValidStatement, current_time: u64) -> MaybeInserted { + let statement_len = statement.data_len(); + if statement_len > validation.max_size as usize { + log::debug!( + target: LOG_TARGET, + "Ignored oversize message: {:?} ({} bytes)", + HexDisplay::from(&hash), + statement_len, + ); + return MaybeInserted::Ignored; + } - while self.by_priority.len() >= self.max_entries { - if let Some(evicted) = self.by_priority.pop() { - log::trace!( - target: LOG_TARGET, - "Evicting statement {:?}", - HexDisplay::from(&evicted.hash) - ); - self.entries.remove(&evicted.hash); - if let Some((topics, key)) = self.all_topics.remove(&evicted.hash) { - for t in topics { - if let Some(t) = t { - if let Some(set) = self.by_topic.get_mut(&t) { - set.remove(&evicted.hash); - } - } - } - if let Some(k) = key { - if let Some(set) = self.by_dec_key.get_mut(&k) { - set.remove(&evicted.hash); + let mut evicted = HashSet::new(); + let mut would_free_size = 0; + let priority = statement.priority().unwrap_or(0); + let (max_size, max_count) = (validation.max_size as usize, validation.max_count as usize); + // It may happen that we can't delete enough lower priority messages + // to satisfy size constraints. We check for that before deleting anything, + // taking into account channel message replacement. + if let Some(account_rec) = self.accounts.get(account) { + if let Some(channel) = statement.channel() { + if let Some(channel_record) = account_rec.channels.get(&channel) { + if priority <= channel_record.priority { + // Trying to replace channel message with lower priority + log::debug!( + target: LOG_TARGET, + "Ignored lower priority channel message: {:?} {} <= {}", + HexDisplay::from(&hash), + priority, + channel_record.priority, + ); + return MaybeInserted::Ignored; + } else { + // Would replace channel message. Still need to check for size constraints + // below. + log::debug!( + target: LOG_TARGET, + "Replacing higher priority channel message: {:?} ({}) > {:?} ({})", + HexDisplay::from(&hash), + priority, + HexDisplay::from(&channel_record.hash), + channel_record.priority, + ); + let key = PriorityKey { hash: channel_record.hash, priority: channel_record.priority }; + if let Some((_channel, len)) = account_rec.by_priority.get(&key) { + would_free_size = *len; + evicted.insert(channel_record.hash); } } } - //evicted_set.push((col::STATEMENTS, evicted.hash.to_vec(), None)); - evicted_set.push(evicted.hash); - } else { - break + } + // Check if we can evict enough lower priority statements to satisfy constraints + for (entry, (_, len)) in account_rec.by_priority.iter() { + if (account_rec.data_size - would_free_size + statement_len <= max_size) + && account_rec.by_priority.len() + 1 - evicted.len() <= max_count { + // Satisfied + break; + } + if evicted.contains(&entry.hash) { + // Already accounted for above + continue; + } + if entry.priority >= priority { + log::debug!( + target: LOG_TARGET, + "Ignored message due to constraints {:?} {} < {}", + HexDisplay::from(&hash), + priority, + entry.priority, + ); + return MaybeInserted::Ignored; + } + evicted.insert(entry.hash); + would_free_size += len; } } - evicted_set + // Now check global constraints as well. + for (entry, len) in self.by_global_priority.iter() { + if (self.total_size - would_free_size + statement_len <= self.max_size) + && self.by_global_priority.len() + 1 - evicted.len() <= self.max_entries { + // Satisfied + break; + } + if evicted.contains(&entry.hash) { + // Already accounted for above + continue; + } + + if entry.priority >= priority { + log::debug!( + target: LOG_TARGET, + "Ignored message due global to constraints {:?} {} < {}", + HexDisplay::from(&hash), + priority, + entry.priority, + ); + return MaybeInserted::Ignored; + } + evicted.insert(entry.hash); + would_free_size += len; + } + + for h in &evicted { + self.make_expired(h, current_time); + } + self.insert_new(hash, *account, priority, statement); + MaybeInserted::Inserted(evicted) } } @@ -406,8 +532,6 @@ impl Store { }, } - let mut index = Index::default(); - index.max_entries = MAX_LIVE_STATEMENTS; let validator = ClientWrapper { client, _block: Default::default() }; let validate_fn = Box::new(move |block, source, statement| { validator.validate_statement(block, source, statement) @@ -415,7 +539,7 @@ impl Store { let store = Store { db, - index: RwLock::new(index), + index: RwLock::new(Index::new()), validate_fn, time_override: None, metrics: PrometheusMetrics::new(prometheus), @@ -425,7 +549,6 @@ impl Store { } fn populate(&self) -> Result<()> { - let current_time = self.timestamp(); { let mut index = self.index.write(); self.db @@ -440,7 +563,9 @@ impl Store { "Statement loaded {:?}", HexDisplay::from(&hash) ); - index.insert_with_meta(hash, statement_with_meta); + if let Some(account_id) = statement_with_meta.statement.account_id() { + index.insert_new(hash, account_id, statement_with_meta.meta.global_priority, &statement_with_meta.statement); + } } true }) @@ -636,13 +761,24 @@ impl StatementStore for Store { return SubmitResult::KnownExpired } }, - IndexQuery::Exists(_) => { + IndexQuery::Exists => { if !source.can_be_resubmitted() { return SubmitResult::Known } }, IndexQuery::Unknown => {}, } + + let Some(account_id) = statement.account_id() else { + log::debug!( + target: LOG_TARGET, + "Statement validation failed: Missing proof ({:?})", + HexDisplay::from(&hash), + ); + self.metrics.report(|metrics| metrics.validations_invalid.inc()); + return SubmitResult::Bad("No statement proof") + }; + // Validate. let at_block = if let Some(Proof::OnChain { block_hash, .. }) = statement.proof() { Some(block_hash.clone()) @@ -650,13 +786,13 @@ impl StatementStore for Store { None }; let validation_result = (self.validate_fn)(at_block, source, statement.clone()); - let priority = match validation_result { - Ok(ValidStatement { priority }) => priority, + let validation = match validation_result { + Ok(validation) => validation, Err(InvalidStatement::BadProof) => { log::debug!( target: LOG_TARGET, "Statement validation failed: BadProof, {:?}", - statement + HexDisplay::from(&hash), ); self.metrics.report(|metrics| metrics.validations_invalid.inc()); return SubmitResult::Bad("Bad statement proof") @@ -665,7 +801,7 @@ impl StatementStore for Store { log::debug!( target: LOG_TARGET, "Statement validation failed: NoProof, {:?}", - statement + HexDisplay::from(&hash), ); self.metrics.report(|metrics| metrics.validations_invalid.inc()); return SubmitResult::Bad("Missing statement proof") @@ -675,17 +811,29 @@ impl StatementStore for Store { }; let statement_with_meta = StatementWithMeta { - meta: StatementMeta { priority }, + meta: StatementMeta { global_priority: validation.global_priority }, statement, }; let current_time = self.timestamp(); let mut commit = Vec::new(); - commit.push((col::STATEMENTS, hash.to_vec(), Some(statement_with_meta.encode()))); - commit.push((col::EXPIRED, hash.to_vec(), None)); { let mut index = self.index.write(); - for hash in index.evict() { + + let evicted = match index.insert( + hash, + &statement_with_meta.statement, + &account_id, + &validation, + current_time, + + ) { + MaybeInserted::Ignored => return SubmitResult::Ignored, + MaybeInserted::Inserted(evicted) => evicted, + }; + + commit.push((col::STATEMENTS, hash.to_vec(), Some(statement_with_meta.encode()))); + for hash in evicted { commit.push((col::STATEMENTS, hash.to_vec(), None)); commit.push((col::EXPIRED, hash.to_vec(), Some((hash, current_time).encode()))); } @@ -698,11 +846,10 @@ impl StatementStore for Store { ); return SubmitResult::InternalError(Error::Db(e.to_string())) } - index.insert_with_meta(hash, statement_with_meta); - } + }// Release index lock self.metrics.report(|metrics| metrics.submitted_statements.inc()); let network_priority = - if priority > 0 { NetworkPriority::High } else { NetworkPriority::Low }; + if validation.global_priority > 0 { NetworkPriority::High } else { NetworkPriority::Low }; log::trace!(target: LOG_TARGET, "Statement submitted: {:?}", HexDisplay::from(&hash)); SubmitResult::New(network_priority) } @@ -721,6 +868,29 @@ impl StatementStore for Store { }, } } + + fn remove(&self, hash: &Hash) -> Result<()> { + let current_time = self.timestamp(); + { + let mut index = self.index.write(); + if index.make_expired(hash, current_time) { + let commit = [ + (col::STATEMENTS, hash.to_vec(), None), + (col::EXPIRED, hash.to_vec(), Some((hash, current_time).encode())), + ]; + if let Err(e) = self.db.commit(commit) { + log::debug!( + target: LOG_TARGET, + "Error removing statement: database error {}, {:?}", + e, + HexDisplay::from(hash), + ); + return Err(Error::Db(e.to_string())) + } + } + } + Ok(()) + } } #[cfg(test)] @@ -730,7 +900,7 @@ mod tests { use sp_statement_store::{ runtime_api::{InvalidStatement, ValidStatement, ValidateStatement}, NetworkPriority, Proof, SignatureVerificationResult, Statement, StatementSource, - StatementStore, SubmitResult, Topic, + StatementStore, SubmitResult, Topic, Channel, AccountId, }; type Extrinsic = sp_runtime::OpaqueExtrinsic; @@ -761,18 +931,26 @@ mod tests { _source: StatementSource, statement: Statement, ) -> std::result::Result { + use crate::tests::account; match statement.verify_signature() { - SignatureVerificationResult::Valid(_) => Ok(ValidStatement{priority: 10}), + SignatureVerificationResult::Valid(_) => Ok(ValidStatement{global_priority: 10, max_count: 100, max_size: 1000}), SignatureVerificationResult::Invalid => Err(InvalidStatement::BadProof), SignatureVerificationResult::NoSignature => { if let Some(Proof::OnChain { block_hash, .. }) = statement.proof() { if block_hash == &CORRECT_BLOCK_HASH { - Ok(ValidStatement{priority: 1}) + let (global_priority, max_count, max_size) = match statement.account_id() { + Some(a) if a == account(1) => (10, 1, 1000), + Some(a) if a == account(2) => (20, 2, 1000), + Some(a) if a == account(3) => (30, 3, 1000), + Some(a) if a == account(4) => (40, 4, 1000), + _ => (0, 2, 2000), + }; + Ok(ValidStatement{ global_priority, max_count, max_size }) } else { Err(InvalidStatement::BadProof) } } else { - Ok(ValidStatement{priority: 0}) + Err(InvalidStatement::BadProof) } } } @@ -833,26 +1011,41 @@ mod tests { statement } - fn onchain_statement_with_topics(data: u8, topics: &[Topic]) -> Statement { + fn topic(data: u64) -> Topic { + let mut topic: Topic = Default::default(); + topic[0..8].copy_from_slice(&data.to_le_bytes()); + topic + } + + fn account(id: u64) -> AccountId { + let mut account: AccountId = Default::default(); + account[0..8].copy_from_slice(&id.to_le_bytes()); + account + } + + fn channel(id: u64) -> Channel { + let mut channel: Channel = Default::default(); + channel[0..8].copy_from_slice(&id.to_le_bytes()); + channel + } + + fn statement(account_id: u64, priority: u32, c: Option, data_len: usize) -> Statement { let mut statement = Statement::new(); - statement.set_plain_data(vec![data]); - for i in 0..topics.len() { - statement.set_topic(i, topics[i].clone()); + let mut data = Vec::new(); + data.resize(data_len, 0); + statement.set_plain_data(data); + statement.set_priority(priority); + if let Some(c) = c { + statement.set_channel(channel(c)); } statement.set_proof(Proof::OnChain { block_hash: CORRECT_BLOCK_HASH, - who: Default::default(), + who: account(account_id), event_index: 0, }); statement } - fn topic(data: u64) -> Topic { - let mut topic: Topic = Default::default(); - topic[0..8].copy_from_slice(&data.to_le_bytes()); - topic - } - #[test] fn submit_one() { let (store, _temp) = test_store(); @@ -861,7 +1054,7 @@ mod tests { store.submit(statement0, StatementSource::Network), SubmitResult::New(NetworkPriority::High) ); - let unsigned = Statement::new(); + let unsigned = statement(0, 1, None, 0); assert_eq!( store.submit(unsigned, StatementSource::Network), SubmitResult::New(NetworkPriority::Low) @@ -931,58 +1124,101 @@ mod tests { assert_topics(&[0, 1], &[2, 3]); assert_topics(&[1, 2], &[3]); + assert_topics(&[99], &[]); + assert_topics(&[0, 99], &[]); + assert_topics(&[0, 1, 2, 3, 42], &[]); } #[test] - fn maintenance() { - use super::{EXPIRE_AFTER, MAX_LIVE_STATEMENTS, PURGE_AFTER}; - // Check test assumptions - assert!((MAX_LIVE_STATEMENTS as u64) < EXPIRE_AFTER); - - // first 10 statements are high priority, the rest is low. - let (mut store, _temp) = test_store(); - for time in 0..MAX_LIVE_STATEMENTS as u64 { - store.set_time(time); - let statement = if time < 10 { - signed_statement_with_topics(0, &[topic(time)]) - } else { - onchain_statement_with_topics(0, &[topic(time)]) - }; - store.submit(statement, StatementSource::Network); - } - - let first = signed_statement_with_topics(0, &[topic(0)]); - let second = signed_statement_with_topics(0, &[topic(0)]); - assert_eq!(first, second); - assert_eq!(store.statement(&first.hash()).unwrap().unwrap(), first); - assert_eq!(store.index.read().entries.len(), MAX_LIVE_STATEMENTS); - - let first_to_be_evicted = onchain_statement_with_topics(0, &[topic(10)]); - assert_eq!(store.index.read().entries.len(), MAX_LIVE_STATEMENTS); - assert_eq!( - store.statement(&first_to_be_evicted.hash()).unwrap().unwrap(), - first_to_be_evicted - ); + fn constraints() { + let (store, _temp) = test_store(); - // Check that the new statement replaces the old. - store.submit( - signed_statement_with_topics(0, &[topic(MAX_LIVE_STATEMENTS as u64 + 1)]), - StatementSource::Network, - ); - assert_eq!(store.statement(&first_to_be_evicted.hash()).unwrap(), None); + store.index.write().max_size = 3000; + let source = StatementSource::Network; + let ok = SubmitResult::New(NetworkPriority::High); + let ignored = SubmitResult::Ignored; + + // Account 1 (limit = 1 msg, 1000 bytes) + + // Oversized statement is not allowed. Limit for account 1 is 1 msg, 1000 bytes + assert_eq!(store.submit(statement(1, 1, Some(1), 2000), source), ignored); + assert_eq!(store.submit(statement(1, 1, Some(1), 500), source), ok); + // Would not replace channel message with same priority + assert_eq!(store.submit(statement(1, 1, Some(1), 200), source), ignored); + assert_eq!(store.submit(statement(1, 2, Some(1), 600), source), ok); + // Submit another message to another channel with lower priority. Should not be allowed + // because msg count limit is 1 + assert_eq!(store.submit(statement(1, 1, Some(2), 100), source), ignored); + assert_eq!(store.index.read().expired.len(), 1); + + // Account 2 (limit = 2 msg, 1000 bytes) + + assert_eq!(store.submit(statement(2, 1, None, 500), source), ok); + assert_eq!(store.submit(statement(2, 2, None, 100), source), ok); + // Should evict priority 1 + assert_eq!(store.submit(statement(2, 3, None, 500), source), ok); + assert_eq!(store.index.read().expired.len(), 2); + // Should evict all + assert_eq!(store.submit(statement(2, 4, None, 1000), source), ok); + assert_eq!(store.index.read().expired.len(), 4); + + // Account 3 (limit = 3 msg, 1000 bytes) + + assert_eq!(store.submit(statement(3, 2, Some(1), 300), source), ok); + assert_eq!(store.submit(statement(3, 3, Some(2), 300), source), ok); + assert_eq!(store.submit(statement(3, 4, Some(3), 300), source), ok); + // Should evict 2 and 3 + assert_eq!(store.submit(statement(3, 5, None, 500), source), ok); + assert_eq!(store.index.read().expired.len(), 6); + + assert_eq!(store.index.read().total_size, 2400); + assert_eq!(store.index.read().entries.len(), 4); + + // Should be over the global size limit + assert_eq!(store.submit(statement(4, 1, None, 700), source), ignored); + // Should be over the global count limit + store.index.write().max_entries = 4; + assert_eq!(store.submit(statement(4, 1, None, 100), source), ignored); + // Should evict statement from account 1 + assert_eq!(store.submit(statement(4, 6, None, 100), source), ok); + assert_eq!(store.index.read().expired.len(), 7); + + + let mut expected_statements = vec![ + statement(2, 4, None, 1000).hash(), + statement(3, 4, Some(3), 300).hash(), + statement(3, 5, None, 500).hash(), + statement(4, 6, None, 100).hash(), + ]; + expected_statements.sort(); + let mut statements: Vec<_> = store.dump().unwrap().into_iter().map(|(hash, _)| hash).collect(); + statements.sort(); + assert_eq!(expected_statements, statements); + } - store.set_time(EXPIRE_AFTER + (MAX_LIVE_STATEMENTS as u64) / 2); + #[test] + fn expired_statements_are_purged() { + use super::PURGE_AFTER; + let (mut store, temp) = test_store(); + let mut statement = statement(1, 1, Some(3), 100); + store.set_time(0); + statement.set_topic(0, topic(4)); + store.submit(statement.clone(), StatementSource::Network); + assert_eq!(store.index.read().entries.len(), 1); + store.remove(&statement.hash()).unwrap(); + assert_eq!(store.index.read().entries.len(), 0); + assert_eq!(store.index.read().by_global_priority.len(), 0); + assert_eq!(store.index.read().accounts.len(), 0); + store.set_time(PURGE_AFTER + 1); store.maintain(); - // Half statements should be expired. - assert_eq!(store.index.read().entries.len(), MAX_LIVE_STATEMENTS / 2); - assert_eq!(store.index.read().expired.len(), MAX_LIVE_STATEMENTS / 2); - - // The high-priority statement should survive. - assert_eq!(store.statement(&first.hash()).unwrap().unwrap(), first); + assert_eq!(store.index.read().expired.len(), 0); + drop(store); - store.set_time(PURGE_AFTER + (MAX_LIVE_STATEMENTS as u64) / 2); - store.maintain(); - assert_eq!(store.index.read().entries.len(), 0); - assert_eq!(store.index.read().expired.len(), MAX_LIVE_STATEMENTS / 2); + let client = std::sync::Arc::new(TestClient); + let mut path: std::path::PathBuf = temp.path().into(); + path.push("db"); + let store = Store::new(&path, client, None).unwrap(); + assert_eq!(store.dump().unwrap().len(), 0); + assert_eq!(store.index.read().expired.len(), 0); } } diff --git a/primitives/statement-store/src/lib.rs b/primitives/statement-store/src/lib.rs index b8e63d8583c6f..28b3464510d64 100644 --- a/primitives/statement-store/src/lib.rs +++ b/primitives/statement-store/src/lib.rs @@ -115,6 +115,19 @@ pub enum Proof { }, } +impl Proof { + /// Return account id for the proof creator. + pub fn account_id(&self) -> AccountId { + match self { + Proof::Sr25519 { signer, ..} => *signer, + Proof::Ed25519 { signer, ..} => *signer, + Proof::Secp256k1Ecdsa { signer, ..} => + ::hash(signer).into(), + Proof::OnChain { who, ..} => *who, + } + } +} + #[derive(Encode, Decode, TypeInfo, sp_core::RuntimeDebug, Clone, PartialEq, Eq)] /// Statement attributes. Each statement is a list of 0 or more fields. Fields may only appear in /// the order declared here. @@ -186,7 +199,7 @@ impl Encode for Statement { /// Result returned by `Statement::verify_signature` pub enum SignatureVerificationResult { /// Signature is valid and matches this account id. - Valid([u8; 32]), + Valid(AccountId), /// Signature has failed verification. Invalid, /// No signature in the proof or no proof. @@ -349,6 +362,31 @@ impl Statement { self.proof.as_ref() } + /// Get proof account id, if any + pub fn account_id(&self) -> Option { + self.proof.as_ref().map(Proof::account_id) + } + + /// Get plain data. + pub fn data(&self) -> Option<&Vec> { + self.data.as_ref() + } + + /// Get plain data len. + pub fn data_len(&self) -> usize { + self.data().map_or(0, Vec::len) + } + + /// Get channel, if any. + pub fn channel(&self) -> Option { + self.channel.clone() + } + + /// Get priority, if any. + pub fn priority(&self) -> Option { + self.priority.clone() + } + /// Return encoded fields that can be signed to construct or verify a proof fn signature_material(&self) -> Vec { self.encoded(false) @@ -376,7 +414,7 @@ impl Statement { /// Set topic by index. pub fn set_topic(&mut self, index: usize, topic: Topic) { - if index <= 4 { + if index < 4 { self.topics[index] = topic; self.num_topics = self.num_topics.max(index as u8 + 1); } diff --git a/primitives/statement-store/src/runtime_api.rs b/primitives/statement-store/src/runtime_api.rs index ab0e000b991fc..884eb7caa8614 100644 --- a/primitives/statement-store/src/runtime_api.rs +++ b/primitives/statement-store/src/runtime_api.rs @@ -30,9 +30,14 @@ use sp_externalities::ExternalitiesExt; /// Information concerning a valid statement. #[derive(Clone, PartialEq, Eq, Encode, Decode, RuntimeDebug, TypeInfo)] pub struct ValidStatement { - /// Statement priority as calculated by the runtime. Higher priority statements have lower - /// chance of being evicted. - pub priority: u64, + /// Max statement count for this account, as calculated by the runtime. + pub max_count: u64, + /// Max total data size for this account, as calculated by the runtime. + pub max_size: u64, + /// Global priority value. This is used to prioritize statements on the global scale. + /// If the global loimit of messages is reached, the statement with the lowest priority will be + /// removed first. + pub global_priority: u32, } /// An reason for an invalid statement. @@ -107,6 +112,8 @@ pub enum SubmitResult { Bad, /// The store is not available. NotAvailable, + /// Statement could not be inserted because of priority or size checks. + Full, } /// Export functions for the WASM host. @@ -123,6 +130,7 @@ pub trait Io { match store.submit(statement, StatementSource::Chain) { crate::SubmitResult::New(_) => SubmitResult::OkNew, crate::SubmitResult::Known => SubmitResult::OkKnown, + crate::SubmitResult::Ignored => SubmitResult::Full, // This should not happen for `StatementSource::Chain`. An existing statement will // be overwritten. crate::SubmitResult::KnownExpired => SubmitResult::Bad, diff --git a/primitives/statement-store/src/store_api.rs b/primitives/statement-store/src/store_api.rs index c038b2adb9b1e..77c9017618d39 100644 --- a/primitives/statement-store/src/store_api.rs +++ b/primitives/statement-store/src/store_api.rs @@ -50,6 +50,8 @@ pub enum SubmitResult { Known, /// Known statement that's already expired. KnownExpired, + /// Priority is too low or the size is too big. + Ignored, /// Statement failed validation. Bad(&'static str), /// Internal store error. @@ -88,4 +90,7 @@ pub trait StatementStore: Send + Sync { /// Submit a SCALE-encoded statement. fn submit_encoded(&self, statement: &[u8], source: StatementSource) -> SubmitResult; + + /// Remove a statement from the store. + fn remove(&self, hash: &Hash) -> Result<()>; } From c5555e95940fd65921d30fdf954094b466a742d9 Mon Sep 17 00:00:00 2001 From: arkpar Date: Tue, 28 Mar 2023 20:23:07 +0200 Subject: [PATCH 23/78] Updated pallet constraint calculation --- bin/node-template/runtime/src/lib.rs | 6 +- bin/node/runtime/src/lib.rs | 6 +- client/network/statement/src/lib.rs | 1 + client/rpc/src/statement/mod.rs | 3 +- client/statement-store/src/lib.rs | 132 ++++++++++-------- frame/statement/src/lib.rs | 55 +++++--- frame/statement/src/mock.rs | 14 +- frame/statement/src/tests.rs | 44 +++++- primitives/statement-store/src/lib.rs | 8 +- primitives/statement-store/src/runtime_api.rs | 4 +- 10 files changed, 178 insertions(+), 95 deletions(-) diff --git a/bin/node-template/runtime/src/lib.rs b/bin/node-template/runtime/src/lib.rs index d52997f8d9074..95455ee4d7ea6 100644 --- a/bin/node-template/runtime/src/lib.rs +++ b/bin/node-template/runtime/src/lib.rs @@ -268,13 +268,15 @@ impl pallet_sudo::Config for Runtime { } parameter_types! { - pub StatementPriorityBalance: Balance = 1000; + pub StatementCost: Balance = 1000; + pub StatementByteCost: Balance = 10; } impl pallet_statement::Config for Runtime { type RuntimeEvent = RuntimeEvent; type Currency = Balances; - type PriorityBalance = StatementPriorityBalance; + type StatementCost = StatementCost; + type ByteCost = StatementByteCost; } /// Configure the pallet-template in pallets/template. diff --git a/bin/node/runtime/src/lib.rs b/bin/node/runtime/src/lib.rs index 2e8b3d18226db..c784df3eb962f 100644 --- a/bin/node/runtime/src/lib.rs +++ b/bin/node/runtime/src/lib.rs @@ -1745,13 +1745,15 @@ impl frame_benchmarking_pallet_pov::Config for Runtime { } parameter_types! { - pub StatementPriorityBalance: Balance = 10 * CENTS; + pub StatementCost: Balance = 1 * DOLLARS; + pub StatementByteCost: Balance = 100 * MILLICENTS; } impl pallet_statement::Config for Runtime { type RuntimeEvent = RuntimeEvent; type Currency = Balances; - type PriorityBalance = StatementPriorityBalance; + type StatementCost = StatementCost; + type ByteCost = StatementByteCost; } construct_runtime!( diff --git a/client/network/statement/src/lib.rs b/client/network/statement/src/lib.rs index 097815dba06f1..f964a1ed62b27 100644 --- a/client/network/statement/src/lib.rs +++ b/client/network/statement/src/lib.rs @@ -470,6 +470,7 @@ where self.network.report_peer(who, rep::GOOD_STATEMENT), SubmitResult::Known => self.network.report_peer(who, rep::ANY_STATEMENT_REFUND), SubmitResult::KnownExpired => {}, + SubmitResult::Ignored => {}, SubmitResult::Bad(_) => self.network.report_peer(who, rep::BAD_STATEMENT), SubmitResult::InternalError(_) => {}, } diff --git a/client/rpc/src/statement/mod.rs b/client/rpc/src/statement/mod.rs index 9dbd65f5413a9..543b8d291332a 100644 --- a/client/rpc/src/statement/mod.rs +++ b/client/rpc/src/statement/mod.rs @@ -92,8 +92,9 @@ impl StatementApiServer for StatementStore { // `KnownExpired` should not happen. Expired statements submitted with // `StatementSource::Rpc` should be renewed. SubmitResult::KnownExpired => - Err(Error::StatementStore("Submitted an expired statement".into()).into()), + Err(Error::StatementStore("Submitted an expired statement.".into()).into()), SubmitResult::Bad(e) => Err(Error::StatementStore(e.into()).into()), + SubmitResult::Ignored => Err(Error::StatementStore("Store is full.".into()).into()), SubmitResult::InternalError(e) => Err(Error::StatementStore(e.to_string()).into()), } } diff --git a/client/statement-store/src/lib.rs b/client/statement-store/src/lib.rs index c1ab1e88a5e7a..76cbbcdf22bc8 100644 --- a/client/statement-store/src/lib.rs +++ b/client/statement-store/src/lib.rs @@ -34,7 +34,8 @@ use sp_core::{hexdisplay::HexDisplay, Decode, Encode}; use sp_runtime::traits::Block as BlockT; use sp_statement_store::{ runtime_api::{InvalidStatement, StatementSource, ValidStatement, ValidateStatement}, - BlockHash, DecryptionKey, Hash, NetworkPriority, Proof, Result, Statement, SubmitResult, Topic, AccountId, Channel, + AccountId, BlockHash, Channel, DecryptionKey, Hash, NetworkPriority, Proof, Result, Statement, + SubmitResult, Topic, }; use std::{ collections::{BTreeMap, HashMap, HashSet}, @@ -86,19 +87,13 @@ struct StatementsForAccount { impl PartialOrd for PriorityKey { fn partial_cmp(&self, other: &Self) -> Option { - Some( - self.priority - .cmp(&other.priority) - .then_with(|| self.hash.cmp(&other.hash)) - ) + Some(self.priority.cmp(&other.priority).then_with(|| self.hash.cmp(&other.hash))) } } impl Ord for PriorityKey { fn cmp(&self, other: &Self) -> std::cmp::Ordering { - self.priority - .cmp(&other.priority) - .then_with(|| self.hash.cmp(&other.hash)) + self.priority.cmp(&other.priority).then_with(|| self.hash.cmp(&other.hash)) } } @@ -107,7 +102,8 @@ struct Index { by_topic: HashMap>, by_dec_key: HashMap>, statement_topics: HashMap; 4], Option)>, - entries: HashMap, // Statement hash -> (Account id, global_priority, priority) + entries: HashMap, /* Statement hash -> (Account id, + * global_priority, priority) */ expired: HashMap, // Value is expiration timestamp. accounts: HashMap, by_global_priority: BTreeMap, @@ -183,19 +179,21 @@ enum IndexQuery { enum MaybeInserted { Inserted(HashSet), - Ignored + Ignored, } impl Index { fn new() -> Index { - Index { - max_entries: MAX_LIVE_STATEMENTS, - max_size: MAX_TOTAL_SIZE, - .. Default::default() - } + Index { max_entries: MAX_LIVE_STATEMENTS, max_size: MAX_TOTAL_SIZE, ..Default::default() } } - fn insert_new(&mut self, hash: Hash, account: AccountId, global_priority: u32, statement: &Statement) { + fn insert_new( + &mut self, + hash: Hash, + account: AccountId, + global_priority: u32, + statement: &Statement, + ) { let mut all_topics = [None; 4]; let mut nt = 0; while let Some(t) = statement.topic(nt) { @@ -212,17 +210,19 @@ impl Index { } let priority = statement.priority().unwrap_or(0); self.entries.insert(hash, (account.clone(), global_priority, priority)); - self.by_global_priority.insert(PriorityKey { hash: hash.clone(), priority: global_priority }, statement.data_len()); + self.by_global_priority.insert( + PriorityKey { hash: hash.clone(), priority: global_priority }, + statement.data_len(), + ); self.total_size += statement.data_len(); let mut account_info = self.accounts.entry(account).or_default(); account_info.data_size += statement.data_len(); if let Some(channel) = statement.channel() { account_info.channels.insert(channel, ChannelEntry { hash, priority }); } - account_info.by_priority.insert(PriorityKey { - hash, - priority, - }, (statement.channel(), statement.data_len())); + account_info + .by_priority + .insert(PriorityKey { hash, priority }, (statement.channel(), statement.data_len())); } fn query(&self, hash: &Hash) -> IndexQuery { @@ -262,7 +262,7 @@ impl Index { } sets[i] = set.expect("Function returns if set is None"); } - let sets = &mut sets[0 .. topics.len()]; + let sets = &mut sets[0..topics.len()]; if sets.is_empty() && key.is_none() { // Iterate all entries for h in self.entries.keys() { @@ -336,7 +336,9 @@ impl Index { } } self.expired.insert(hash.clone(), current_time); - if let std::collections::hash_map::Entry::Occupied(mut account_rec) = self.accounts.entry(account) { + if let std::collections::hash_map::Entry::Occupied(mut account_rec) = + self.accounts.entry(account) + { let key = PriorityKey { hash: hash.clone(), priority }; if let Some((channel, len)) = account_rec.get_mut().by_priority.remove(&key) { account_rec.get_mut().data_size -= len; @@ -355,7 +357,14 @@ impl Index { } } - fn insert(&mut self, hash: Hash, statement: &Statement, account: &AccountId, validation: &ValidStatement, current_time: u64) -> MaybeInserted { + fn insert( + &mut self, + hash: Hash, + statement: &Statement, + account: &AccountId, + validation: &ValidStatement, + current_time: u64, + ) -> MaybeInserted { let statement_len = statement.data_len(); if statement_len > validation.max_size as usize { log::debug!( @@ -364,7 +373,7 @@ impl Index { HexDisplay::from(&hash), statement_len, ); - return MaybeInserted::Ignored; + return MaybeInserted::Ignored } let mut evicted = HashSet::new(); @@ -386,7 +395,7 @@ impl Index { priority, channel_record.priority, ); - return MaybeInserted::Ignored; + return MaybeInserted::Ignored } else { // Would replace channel message. Still need to check for size constraints // below. @@ -398,7 +407,10 @@ impl Index { HexDisplay::from(&channel_record.hash), channel_record.priority, ); - let key = PriorityKey { hash: channel_record.hash, priority: channel_record.priority }; + let key = PriorityKey { + hash: channel_record.hash, + priority: channel_record.priority, + }; if let Some((_channel, len)) = account_rec.by_priority.get(&key) { would_free_size = *len; evicted.insert(channel_record.hash); @@ -408,14 +420,15 @@ impl Index { } // Check if we can evict enough lower priority statements to satisfy constraints for (entry, (_, len)) in account_rec.by_priority.iter() { - if (account_rec.data_size - would_free_size + statement_len <= max_size) - && account_rec.by_priority.len() + 1 - evicted.len() <= max_count { + if (account_rec.data_size - would_free_size + statement_len <= max_size) && + account_rec.by_priority.len() + 1 - evicted.len() <= max_count + { // Satisfied - break; + break } if evicted.contains(&entry.hash) { // Already accounted for above - continue; + continue } if entry.priority >= priority { log::debug!( @@ -425,7 +438,7 @@ impl Index { priority, entry.priority, ); - return MaybeInserted::Ignored; + return MaybeInserted::Ignored } evicted.insert(entry.hash); would_free_size += len; @@ -433,14 +446,15 @@ impl Index { } // Now check global constraints as well. for (entry, len) in self.by_global_priority.iter() { - if (self.total_size - would_free_size + statement_len <= self.max_size) - && self.by_global_priority.len() + 1 - evicted.len() <= self.max_entries { - // Satisfied - break; + if (self.total_size - would_free_size + statement_len <= self.max_size) && + self.by_global_priority.len() + 1 - evicted.len() <= self.max_entries + { + // Satisfied + break } if evicted.contains(&entry.hash) { // Already accounted for above - continue; + continue } if entry.priority >= priority { @@ -451,7 +465,7 @@ impl Index { priority, entry.priority, ); - return MaybeInserted::Ignored; + return MaybeInserted::Ignored } evicted.insert(entry.hash); would_free_size += len; @@ -564,7 +578,12 @@ impl Store { HexDisplay::from(&hash) ); if let Some(account_id) = statement_with_meta.statement.account_id() { - index.insert_new(hash, account_id, statement_with_meta.meta.global_priority, &statement_with_meta.statement); + index.insert_new( + hash, + account_id, + statement_with_meta.meta.global_priority, + &statement_with_meta.statement, + ); } } true @@ -585,7 +604,7 @@ impl Store { } true }) - .map_err(|e| Error::Db(e.to_string()))?; + .map_err(|e| Error::Db(e.to_string()))?; } self.maintain(); @@ -634,7 +653,8 @@ impl Store { pub fn maintain(&self) { log::trace!(target: LOG_TARGET, "Started store maintenance"); let deleted = self.index.write().maintain(self.timestamp()); - let deleted: Vec<_> = deleted.into_iter().map(|hash| (col::EXPIRED, hash.to_vec(), None)).collect(); + let deleted: Vec<_> = + deleted.into_iter().map(|hash| (col::EXPIRED, hash.to_vec(), None)).collect(); let count = deleted.len() as u64; if let Err(e) = self.db.commit(deleted) { log::warn!(target: LOG_TARGET, "Error writing to the statement database: {:?}", e); @@ -756,16 +776,14 @@ impl StatementStore for Store { fn submit(&self, statement: Statement, source: StatementSource) -> SubmitResult { let hash = statement.hash(); match self.index.read().query(&hash) { - IndexQuery::Expired => { + IndexQuery::Expired => if !source.can_be_resubmitted() { return SubmitResult::KnownExpired - } - }, - IndexQuery::Exists => { + }, + IndexQuery::Exists => if !source.can_be_resubmitted() { return SubmitResult::Known - } - }, + }, IndexQuery::Unknown => {}, } @@ -826,7 +844,6 @@ impl StatementStore for Store { &account_id, &validation, current_time, - ) { MaybeInserted::Ignored => return SubmitResult::Ignored, MaybeInserted::Inserted(evicted) => evicted, @@ -846,10 +863,13 @@ impl StatementStore for Store { ); return SubmitResult::InternalError(Error::Db(e.to_string())) } - }// Release index lock + } // Release index lock self.metrics.report(|metrics| metrics.submitted_statements.inc()); - let network_priority = - if validation.global_priority > 0 { NetworkPriority::High } else { NetworkPriority::Low }; + let network_priority = if validation.global_priority > 0 { + NetworkPriority::High + } else { + NetworkPriority::Low + }; log::trace!(target: LOG_TARGET, "Statement submitted: {:?}", HexDisplay::from(&hash)); SubmitResult::New(network_priority) } @@ -899,8 +919,8 @@ mod tests { use sp_core::Pair; use sp_statement_store::{ runtime_api::{InvalidStatement, ValidStatement, ValidateStatement}, - NetworkPriority, Proof, SignatureVerificationResult, Statement, StatementSource, - StatementStore, SubmitResult, Topic, Channel, AccountId, + AccountId, Channel, NetworkPriority, Proof, SignatureVerificationResult, Statement, + StatementSource, StatementStore, SubmitResult, Topic, }; type Extrinsic = sp_runtime::OpaqueExtrinsic; @@ -1183,7 +1203,6 @@ mod tests { assert_eq!(store.submit(statement(4, 6, None, 100), source), ok); assert_eq!(store.index.read().expired.len(), 7); - let mut expected_statements = vec![ statement(2, 4, None, 1000).hash(), statement(3, 4, Some(3), 300).hash(), @@ -1191,7 +1210,8 @@ mod tests { statement(4, 6, None, 100).hash(), ]; expected_statements.sort(); - let mut statements: Vec<_> = store.dump().unwrap().into_iter().map(|(hash, _)| hash).collect(); + let mut statements: Vec<_> = + store.dump().unwrap().into_iter().map(|(hash, _)| hash).collect(); statements.sort(); assert_eq!(expected_statements, statements); } diff --git a/frame/statement/src/lib.rs b/frame/statement/src/lib.rs index 1cf55ce784930..0c7275ad519da 100644 --- a/frame/statement/src/lib.rs +++ b/frame/statement/src/lib.rs @@ -27,8 +27,8 @@ use frame_support::{ pallet_prelude::*, - sp_runtime::{traits::Zero, SaturatedConversion}, - traits::Currency, + sp_runtime::{traits::CheckedDiv, SaturatedConversion}, + traits::fungible::Inspect, }; use frame_system::pallet_prelude::*; use sp_statement_store::{ @@ -45,12 +45,19 @@ pub use pallet::*; const LOG_TARGET: &str = "runtime::statement"; +const MIN_ALLOWED_STATEMENTS: u32 = 4; +const MAX_ALLOWED_STATEMENTS: u32 = 10; +const MIN_ALLOWED_BYTES: u32 = 1024; +const MAX_ALLOWED_BYTES: u32 = 4096; + #[frame_support::pallet] pub mod pallet { use super::*; pub type BalanceOf = - <::Currency as Currency<::AccountId>>::Balance; + <::Currency as Inspect<::AccountId>>::Balance; + + pub type AccountIdOf = ::AccountId; #[pallet::config] pub trait Config: frame_system::Config @@ -59,11 +66,13 @@ pub mod pallet { { /// The overarching event type. type RuntimeEvent: From> + IsType<::RuntimeEvent>; - /// Account balance. - type Currency: Currency<::AccountId>; + /// The currency which is used to calculate account limits. + type Currency: Inspect; /// Min balance for priority statements. #[pallet::constant] - type PriorityBalance: Get>; + type StatementCost: Get>; + #[pallet::constant] + type ByteCost: Get>; } #[pallet::pallet] @@ -91,7 +100,7 @@ pub mod pallet { fn offchain_worker(now: BlockNumberFor) { log::trace!(target: LOG_TARGET, "Collecting statements at #{:?}", now); Pallet::::collect_statements(); - Pallet::::dispatch_statemens(); + Pallet::::dispatch_statements(); } } } @@ -151,16 +160,26 @@ where }, }, }; - let priority_cost = T::PriorityBalance::get(); - let priority: u64 = if priority_cost.is_zero() { - 0 - } else { - let balance = T::Currency::free_balance(&account); - let priority = balance / priority_cost; - priority.saturated_into() - }; - - Ok(ValidStatement { priority }) + let statement_cost = T::StatementCost::get(); + let byte_cost = T::ByteCost::get(); + let priority_cost = statement_cost; + let balance = >>::balance(&account); + let global_priority = + balance.checked_div(&priority_cost).unwrap_or_default().saturated_into(); + let max_count = balance + .checked_div(&statement_cost) + .unwrap_or_default() + .saturated_into::() + .min(MAX_ALLOWED_STATEMENTS) + .max(MIN_ALLOWED_STATEMENTS); + let max_size = balance + .checked_div(&byte_cost) + .unwrap_or_default() + .saturated_into::() + .min(MAX_ALLOWED_BYTES) + .max(MIN_ALLOWED_BYTES); + + Ok(ValidStatement { global_priority, max_count, max_size }) } /// Submit a statement event. The statement will be picked up by the offchain worker and @@ -187,7 +206,7 @@ where } } - fn dispatch_statemens() { + fn dispatch_statements() { let all_statements = sp_statement_store::runtime_api::io::dump(); for (hash, _statement) in all_statements { // TODO: Custom statement handling diff --git a/frame/statement/src/mock.rs b/frame/statement/src/mock.rs index c78681dcb55a6..9759e2ee41b0d 100644 --- a/frame/statement/src/mock.rs +++ b/frame/statement/src/mock.rs @@ -97,16 +97,20 @@ ord_parameter_types! { impl Config for Test { type RuntimeEvent = RuntimeEvent; type Currency = Balances; - type PriorityBalance = ConstU64<10>; + type StatementCost = ConstU64<1000>; + type ByteCost = ConstU64<2>; } pub fn new_test_ext() -> sp_io::TestExternalities { let mut t = frame_system::GenesisConfig::default().build_storage::().unwrap(); let balances = pallet_balances::GenesisConfig:: { - balances: vec![( - sp_core::sr25519::Pair::from_string("//Alice", None).unwrap().public().into(), - 200, - )], + balances: vec![ + (sp_core::sr25519::Pair::from_string("//Alice", None).unwrap().public().into(), 6000), + ( + sp_core::sr25519::Pair::from_string("//Charlie", None).unwrap().public().into(), + 500000, + ), + ], }; balances.assimilate_storage(&mut t).unwrap(); t.into() diff --git a/frame/statement/src/tests.rs b/frame/statement/src/tests.rs index 4d2a22e247feb..35b94d0da869a 100644 --- a/frame/statement/src/tests.rs +++ b/frame/statement/src/tests.rs @@ -35,19 +35,40 @@ fn sign_and_validate_no_balance() { let mut statement = Statement::new(); statement.sign_sr25519_private(&pair); let result = Pallet::::validate_statement(StatementSource::Chain, statement); - assert_eq!(Ok(ValidStatement { priority: 0 }), result); + assert_eq!( + Ok(ValidStatement { + global_priority: 0, + max_count: MIN_ALLOWED_STATEMENTS, + max_size: MIN_ALLOWED_BYTES + }), + result + ); let pair = sp_core::ed25519::Pair::from_string("//Bob", None).unwrap(); let mut statement = Statement::new(); statement.sign_ed25519_private(&pair); let result = Pallet::::validate_statement(StatementSource::Chain, statement); - assert_eq!(Ok(ValidStatement { priority: 0 }), result); + assert_eq!( + Ok(ValidStatement { + global_priority: 0, + max_count: MIN_ALLOWED_STATEMENTS, + max_size: MIN_ALLOWED_BYTES + }), + result + ); let pair = sp_core::ecdsa::Pair::from_string("//Bob", None).unwrap(); let mut statement = Statement::new(); statement.sign_ecdsa_private(&pair); let result = Pallet::::validate_statement(StatementSource::Chain, statement); - assert_eq!(Ok(ValidStatement { priority: 0 }), result); + assert_eq!( + Ok(ValidStatement { + global_priority: 0, + max_count: MIN_ALLOWED_STATEMENTS, + max_size: MIN_ALLOWED_BYTES + }), + result + ); }); } @@ -58,7 +79,20 @@ fn validate_with_balance() { let mut statement = Statement::new(); statement.sign_sr25519_private(&pair); let result = Pallet::::validate_statement(StatementSource::Chain, statement); - assert_eq!(Ok(ValidStatement { priority: 20 }), result); + assert_eq!(Ok(ValidStatement { global_priority: 6, max_count: 6, max_size: 3000 }), result); + + let pair = sp_core::sr25519::Pair::from_string("//Charlie", None).unwrap(); + let mut statement = Statement::new(); + statement.sign_sr25519_private(&pair); + let result = Pallet::::validate_statement(StatementSource::Chain, statement); + assert_eq!( + Ok(ValidStatement { + global_priority: 500, + max_count: MAX_ALLOWED_STATEMENTS, + max_size: MAX_ALLOWED_BYTES + }), + result + ); }); } @@ -99,7 +133,7 @@ fn validate_event() { block_hash: parent_hash.into(), }); let result = Pallet::::validate_statement(StatementSource::Chain, statement.clone()); - assert_eq!(Ok(ValidStatement { priority: 20 }), result); + assert_eq!(Ok(ValidStatement { global_priority: 6, max_count: 6, max_size: 3000 }), result); // Use wrong event index statement.set_proof(Proof::OnChain { diff --git a/primitives/statement-store/src/lib.rs b/primitives/statement-store/src/lib.rs index 28b3464510d64..128a8408342b1 100644 --- a/primitives/statement-store/src/lib.rs +++ b/primitives/statement-store/src/lib.rs @@ -119,11 +119,11 @@ impl Proof { /// Return account id for the proof creator. pub fn account_id(&self) -> AccountId { match self { - Proof::Sr25519 { signer, ..} => *signer, - Proof::Ed25519 { signer, ..} => *signer, - Proof::Secp256k1Ecdsa { signer, ..} => + Proof::Sr25519 { signer, .. } => *signer, + Proof::Ed25519 { signer, .. } => *signer, + Proof::Secp256k1Ecdsa { signer, .. } => ::hash(signer).into(), - Proof::OnChain { who, ..} => *who, + Proof::OnChain { who, .. } => *who, } } } diff --git a/primitives/statement-store/src/runtime_api.rs b/primitives/statement-store/src/runtime_api.rs index 884eb7caa8614..df03ea98e2420 100644 --- a/primitives/statement-store/src/runtime_api.rs +++ b/primitives/statement-store/src/runtime_api.rs @@ -31,9 +31,9 @@ use sp_externalities::ExternalitiesExt; #[derive(Clone, PartialEq, Eq, Encode, Decode, RuntimeDebug, TypeInfo)] pub struct ValidStatement { /// Max statement count for this account, as calculated by the runtime. - pub max_count: u64, + pub max_count: u32, /// Max total data size for this account, as calculated by the runtime. - pub max_size: u64, + pub max_size: u32, /// Global priority value. This is used to prioritize statements on the global scale. /// If the global loimit of messages is reached, the statement with the lowest priority will be /// removed first. From 1171ba09c49a7c9501b87865f1942a6ac2fb4049 Mon Sep 17 00:00:00 2001 From: arkpar Date: Tue, 28 Mar 2023 20:26:55 +0200 Subject: [PATCH 24/78] Added small test --- primitives/statement-store/src/lib.rs | 3 +++ 1 file changed, 3 insertions(+) diff --git a/primitives/statement-store/src/lib.rs b/primitives/statement-store/src/lib.rs index 128a8408342b1..053490279582a 100644 --- a/primitives/statement-store/src/lib.rs +++ b/primitives/statement-store/src/lib.rs @@ -501,6 +501,9 @@ mod test { statement.set_topic(1, topic2.clone()); statement.set_plain_data(data.clone()); + statement.set_topic(5, [0x55; 32]); + assert_eq!(statement.topic(5), None); + let fields = vec![ Field::AuthenticityProof(proof.clone()), Field::DecryptionKey(decryption_key.clone()), From 3d571cab12db3d6bdce45ed2ab5899c3a9db3332 Mon Sep 17 00:00:00 2001 From: arkpar Date: Sat, 1 Apr 2023 11:56:24 +0200 Subject: [PATCH 25/78] Added remove function to the APIs --- client/rpc-api/src/statement/mod.rs | 4 ++++ client/rpc/src/statement/mod.rs | 8 ++++++++ primitives/statement-store/src/runtime_api.rs | 7 +++++++ 3 files changed, 19 insertions(+) diff --git a/client/rpc-api/src/statement/mod.rs b/client/rpc-api/src/statement/mod.rs index 9a0b65d30c7f1..39ec52cbea013 100644 --- a/client/rpc-api/src/statement/mod.rs +++ b/client/rpc-api/src/statement/mod.rs @@ -53,4 +53,8 @@ pub trait StatementApi { /// Submit a pre-encoded statement. #[method(name = "statement_submit")] fn submit(&self, encoded: Bytes) -> RpcResult<()>; + + /// Remove a statement from the store. + #[method(name = "statement_remove")] + fn remove(&self, statement_hash: [u8; 32]) -> RpcResult<()>; } diff --git a/client/rpc/src/statement/mod.rs b/client/rpc/src/statement/mod.rs index 543b8d291332a..4f4a7dc00e0a0 100644 --- a/client/rpc/src/statement/mod.rs +++ b/client/rpc/src/statement/mod.rs @@ -98,4 +98,12 @@ impl StatementApiServer for StatementStore { SubmitResult::InternalError(e) => Err(Error::StatementStore(e.to_string()).into()), } } + + fn remove(&self, hash: [u8; 32]) -> RpcResult<()> { + Ok(self + .store + .remove(&hash) + .map_err(|e| Error::StatementStore(e.to_string()))? + ) + } } diff --git a/primitives/statement-store/src/runtime_api.rs b/primitives/statement-store/src/runtime_api.rs index df03ea98e2420..d9e0a680eeacb 100644 --- a/primitives/statement-store/src/runtime_api.rs +++ b/primitives/statement-store/src/runtime_api.rs @@ -181,4 +181,11 @@ pub trait Io { Vec::default() } } + + /// Remove a statement from the store by hash. + fn remove(&mut self, hash: &Hash) { + if let Some(StatementStoreExt(store)) = self.extension::() { + store.remove(hash).unwrap_or_default() + } + } } From 85a31c9f194a9c91d7663d083a3e132d5008765c Mon Sep 17 00:00:00 2001 From: Arkadiy Paronyan Date: Sat, 1 Apr 2023 12:04:00 +0200 Subject: [PATCH 26/78] Copy-paste spec into readme --- primitives/statement-store/README.md | 39 ++++++++++++++++++++++++++-- 1 file changed, 37 insertions(+), 2 deletions(-) diff --git a/primitives/statement-store/README.md b/primitives/statement-store/README.md index 1ac38dfef7260..7dc56748e74f2 100644 --- a/primitives/statement-store/README.md +++ b/primitives/statement-store/README.md @@ -1,4 +1,39 @@ -A crate which contains primitives related to the statement store. This mainly -includes the statement structure. +Statement store is an off-chain data-store for signed statements accessible via RPC and OCW. + +Nodes hold a number of statements with a proof of authenticity owing to an account ID. OCWs can place items in the data-store (with valid signatures) for any accounts whose keys they control. Users can also submit pre-signed statements via RPC. Statements can also be submitted from on-chain logic through an on-chain event. + +A new system event `NewStatement` is added to the runtime. This event allows any account on-chain to declare that they want to make a statement for the store. Within the node store and for broadcasting, the statement would be accompanied with the hash of the block and index of the event within it, essentially taking the place of a real signature. + +Statements comprise an optional proof of authenticity (e.g. a signature) and a number of fields. For statements without a proof, nodes would gossip statements randomly with a rate-limiter to minimise the chance of being overrun by a misbehaving node. These will generally be disregarded by nodes unless they are gossiped by several different peers or if a peer pays for it somehow (e.g. gossiping something valuable). + +Each field is effectively a key/value pair. Fields must be sorted and the same field type may not be repeated. Depending on which keys are present, clients may index the message for ease of retrieval. + +Formally, `Statement` is equivalent to the type `Vec` and `Field` is the SCALE-encoded enumeration: +- 0: `AuthenticityProof(Proof)`: The signature of the message. For cryptography where the public key cannot be derived from the signature together with the message data, then this will also include the signer's public key. The message data is all fields of the messages fields except the signature concatenated together *without the length prefix that a `Vec` would usually imply*. This is so that the signature can be derived without needing to re-encode the statement. +- 1: `DecryptionKey([u8; 32])`: The decryption key identifier which should be used to decrypt the statement's data. +- 2: `Priority(u32)`: Priority specifier. Higher priority statements should be kept around at the cost of lower priority statements if multiple statements from the same sender are competing for persistence or transport. Nodes should disregard when considering unsigned statements. +- 3: `Channel([u8; 32])`: The channel identifier. Only one message of a given channel should be retained at once (the one of highest priority). Nodes should disregard when considering unsigned statements. +- 4: `Topic1([u8; 32]))`: First topic identifier. +- 5: `Topic2([u8; 32]))`: Second topic identifier. +- 6: `Topic3([u8; 32]))`: Third topic identifier. +- 7: `Topic4([u8; 32]))`: Fourth topic identifier. +- 8: `Data(Vec)`: General data segment. No special meaning. + +`Proof` is defined as the SCALE-encoded enumeration: +- 0: `Sr25519 { signature: [u8; 64], signer: [u8; 32] }` +- 1: `Ed25519 { signature: [u8; 64], signer: [u8; 32] )` +- 2: `Secp256k1Ecdsa { signature: [u8; 65], signer: [u8; 33] )` +- 3: `OnChain { who: [u8; 32], block_hash: [u8; 32], event_index: u64 }` + +### Potential uses + +Potential use-cases are various and include: +- ring-signature aggregation; +- messaging; +- state-channels; +- deferral of the initial "advertising" phase of multi-party transactions; +- publication of preimage data whose hash is referenced on-chain; +- effective transferal of fee payment to a second-party. + License: Apache-2.0 From b07775fc922f4091682da94b096d53c9f69e0910 Mon Sep 17 00:00:00 2001 From: arkpar Date: Sat, 1 Apr 2023 13:01:50 +0200 Subject: [PATCH 27/78] Comments --- client/rpc/src/statement/mod.rs | 6 +----- client/statement-store/src/lib.rs | 27 +++++++++++++++++++++++++-- 2 files changed, 26 insertions(+), 7 deletions(-) diff --git a/client/rpc/src/statement/mod.rs b/client/rpc/src/statement/mod.rs index 4f4a7dc00e0a0..c249b01747fce 100644 --- a/client/rpc/src/statement/mod.rs +++ b/client/rpc/src/statement/mod.rs @@ -100,10 +100,6 @@ impl StatementApiServer for StatementStore { } fn remove(&self, hash: [u8; 32]) -> RpcResult<()> { - Ok(self - .store - .remove(&hash) - .map_err(|e| Error::StatementStore(e.to_string()))? - ) + Ok(self.store.remove(&hash).map_err(|e| Error::StatementStore(e.to_string()))?) } } diff --git a/client/statement-store/src/lib.rs b/client/statement-store/src/lib.rs index 76cbbcdf22bc8..9d4b3efed3e67 100644 --- a/client/statement-store/src/lib.rs +++ b/client/statement-store/src/lib.rs @@ -16,6 +16,29 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . +// Constraint management. +// +// Each time a new statement is inserted into the store, it is first validated with the runtime +// Validation function computes `global_priority`, 'max_count' and `max_size` for a statement. +// The following constraints are then checked: +// * For a given account id, there may be at most `max_count` statements with `max_size` total data +// size. To satisfy this, statements for this account ID are removed from the store starting with +// the lowest priority until a constraint is satisfied. +// * There may not be more than `MAX_TOTAL_STATEMENTS` total statements with `MAX_TOTAL_SIZE` size. +// To satisfy this, statements are removed from the store starting with the lowest +// `global_priority` until a constraint is satisfied. +// +// When a new statement is inserted that would not satisfy constraints in the first place, no +// statements are deleted and `Ignored` result is returned. +// The order in which statements with the same priority are deleted is unspecified. +// +// Statement expiration. +// +// Each time a statement is removed from the store (Either evicted by higher priority statement or +// explicitly with the `remove` function) the statement is marked as expired. Expired statements +// can't be added to the store for `PURGE_AFTER` seconds. This is to prevent old statements from +// being propagated on the network. + //! Disk-backed statement store. #![warn(missing_docs)] @@ -48,7 +71,7 @@ const CURRENT_VERSION: u32 = 1; const LOG_TARGET: &str = "statement-store"; const PURGE_AFTER: u64 = 2 * 24 * 60 * 60; //48h -const MAX_LIVE_STATEMENTS: usize = 8192; +const MAX_TOTAL_STATEMENTS: usize = 8192; const MAX_TOTAL_SIZE: usize = 64 * 1024 * 1024; /// Suggested maintenance period. A good value to call `Store::maintain` with. @@ -184,7 +207,7 @@ enum MaybeInserted { impl Index { fn new() -> Index { - Index { max_entries: MAX_LIVE_STATEMENTS, max_size: MAX_TOTAL_SIZE, ..Default::default() } + Index { max_entries: MAX_TOTAL_STATEMENTS, max_size: MAX_TOTAL_SIZE, ..Default::default() } } fn insert_new( From 659b494927c0936109b7dd0b22c94995777bd5b7 Mon Sep 17 00:00:00 2001 From: arkpar Date: Sun, 2 Apr 2023 11:24:47 +0200 Subject: [PATCH 28/78] Made the store optional --- bin/node-template/node/src/service.rs | 2 +- bin/node/cli/src/service.rs | 2 +- client/service/src/builder.rs | 89 +++++++++++++++------------ client/service/src/lib.rs | 2 +- 4 files changed, 54 insertions(+), 41 deletions(-) diff --git a/bin/node-template/node/src/service.rs b/bin/node-template/node/src/service.rs index 12b2f1a7cd9fe..3dd8565aa05ff 100644 --- a/bin/node-template/node/src/service.rs +++ b/bin/node-template/node/src/service.rs @@ -144,7 +144,7 @@ pub fn new_partial( keystore_container, select_chain, transaction_pool, - statement_store, + statement_store: Some(statement_store), other: (grandpa_block_import, grandpa_link, telemetry), }) } diff --git a/bin/node/cli/src/service.rs b/bin/node/cli/src/service.rs index 7cd42fde4afb1..8c8e3942ac574 100644 --- a/bin/node/cli/src/service.rs +++ b/bin/node/cli/src/service.rs @@ -296,7 +296,7 @@ pub fn new_partial( select_chain, import_queue, transaction_pool, - statement_store, + statement_store: Some(statement_store), other: (rpc_extensions_builder, import_setup, rpc_setup, telemetry), }) } diff --git a/client/service/src/builder.rs b/client/service/src/builder.rs index 5f2a61ff4bfaa..834bca254ba2d 100644 --- a/client/service/src/builder.rs +++ b/client/service/src/builder.rs @@ -325,7 +325,7 @@ pub struct SpawnTasksParams<'a, TBl: BlockT, TCl, TExPool, TRpc, Backend> { /// A shared transaction pool. pub transaction_pool: Arc, /// Shared statement store. - pub statement_store: Arc, + pub statement_store: Option>, /// Builds additional [`RpcModule`]s that should be added to the server pub rpc_builder: Box Result, Error>>, @@ -461,13 +461,15 @@ where // Perform periodic statement store maintenance let store = statement_store.clone(); - spawn_handle.spawn("statement-store-notifications", Some("statement-store"), async move { - let mut interval = tokio::time::interval(sc_statement_store::MAINTENANCE_PERIOD); - loop { - interval.tick().await; - store.maintain(); - } - }); + if let Some(store) = store { + spawn_handle.spawn("statement-store-notifications", Some("statement-store"), async move { + let mut interval = tokio::time::interval(sc_statement_store::MAINTENANCE_PERIOD); + loop { + interval.tick().await; + store.maintain(); + } + }); + } // Prometheus metrics. let metrics_service = @@ -506,7 +508,7 @@ where task_manager.spawn_handle(), client.clone(), transaction_pool.clone(), - Some(statement_store.clone()), + statement_store.clone(), keystore.clone(), system_rpc_tx.clone(), &config, @@ -721,7 +723,7 @@ pub struct BuildNetworkParams<'a, TBl: BlockT, TExPool, TImpQu, TCl> { /// A shared transaction pool. pub transaction_pool: Arc, /// A shared statement store. - pub statement_store: Arc, + pub statement_store: Option>, /// A handle for spawning tasks. pub spawn_handle: SpawnTaskHandle, /// An import queue. @@ -740,7 +742,7 @@ pub fn build_network( Arc::Hash>>, TracingUnboundedSender>, sc_network_transactions::TransactionsHandlerController<::Hash>, - sc_network_statement::StatementHandlerController, + Option, NetworkStarter, Arc>, ), @@ -919,20 +921,25 @@ where .insert(0, transactions_handler_proto.set_config()); // crate statment protocol and add it to the list of supported protocols of `network_params` - let statement_handler_proto = sc_network_statement::StatementHandlerPrototype::new( - protocol_id.clone(), - client - .block_hash(0u32.into()) - .ok() - .flatten() - .expect("Genesis block exists; qed"), - config.chain_spec.fork_id(), - ); - network_params - .network_config - .extra_sets - .insert(0, statement_handler_proto.set_config()); + let statement_handler_proto = if statement_store.is_some() { + let statement_handler_proto = sc_network_statement::StatementHandlerPrototype::new( + protocol_id.clone(), + client + .block_hash(0u32.into()) + .ok() + .flatten() + .expect("Genesis block exists; qed"), + config.chain_spec.fork_id(), + ); + network_params + .network_config + .extra_sets + .insert(0, statement_handler_proto.set_config()); + Some(statement_handler_proto) + } else { + None + }; let has_bootnodes = !network_params.network_config.boot_nodes.is_empty(); let network_mut = sc_network::NetworkWorker::new(network_params)?; let network = network_mut.service().clone(); @@ -945,22 +952,28 @@ where )?; spawn_handle.spawn("network-transactions-handler", Some("networking"), tx_handler.run()); - let statement_protocol_executor = { - let spawn_handle = Clone::clone(&spawn_handle); - Box::new(move |fut| { - spawn_handle.spawn("network-statement-validator", Some("networking"), fut); - }) - }; // crate statement gossip protocol and add it to the list of supported protocols of // `network_params` - let (statement_handler, statement_handler_controller) = statement_handler_proto.build( - network.clone(), - sync_service.clone(), - statement_store.clone(), - config.prometheus_config.as_ref().map(|config| &config.registry), - statement_protocol_executor, - )?; - spawn_handle.spawn("network-statement-handler", Some("networking"), statement_handler.run()); + let statement_handler_controller = if let Some(statement_store) = statement_store { + let statement_protocol_executor = { + let spawn_handle = Clone::clone(&spawn_handle); + Box::new(move |fut| { + spawn_handle.spawn("network-statement-validator", Some("networking"), fut); + }) + }; + let statement_handler_proto = statement_handler_proto.expect("statement_handler_proto is always created when statement_store is `Some`"); + let (statement_handler, statement_handler_controller) = statement_handler_proto.build( + network.clone(), + sync_service.clone(), + statement_store.clone(), + config.prometheus_config.as_ref().map(|config| &config.registry), + statement_protocol_executor, + )?; + spawn_handle.spawn("network-statement-handler", Some("networking"), statement_handler.run()); + Some(statement_handler_controller) + } else { + None + }; spawn_handle.spawn_blocking( "chain-sync-network-service-provider", diff --git a/client/service/src/lib.rs b/client/service/src/lib.rs index 5e321813b63a5..f8a806eec267e 100644 --- a/client/service/src/lib.rs +++ b/client/service/src/lib.rs @@ -142,7 +142,7 @@ pub struct PartialComponents, /// A shared statement store. - pub statement_store: Arc, + pub statement_store: Option>, /// Everything else that needs to be passed into the main build function. pub other: Other, } From 25f3771c1429a2e9654c19b94ebc812c66986840 Mon Sep 17 00:00:00 2001 From: arkpar Date: Sun, 2 Apr 2023 11:32:42 +0200 Subject: [PATCH 29/78] Removed network protocol controller --- bin/node-template/node/src/service.rs | 1 - bin/node/cli/src/service.rs | 1 - client/network/statement/src/config.rs | 2 +- client/network/statement/src/lib.rs | 46 +++----------------------- client/service/src/builder.rs | 11 ++---- 5 files changed, 8 insertions(+), 53 deletions(-) diff --git a/bin/node-template/node/src/service.rs b/bin/node-template/node/src/service.rs index 3dd8565aa05ff..f4bc2c0d6204d 100644 --- a/bin/node-template/node/src/service.rs +++ b/bin/node-template/node/src/service.rs @@ -182,7 +182,6 @@ pub fn new_full(mut config: Configuration) -> Result network, system_rpc_tx, tx_handler_controller, - _statement_handler_controller, network_starter, sync_service, ) = sc_service::build_network(sc_service::BuildNetworkParams { diff --git a/bin/node/cli/src/service.rs b/bin/node/cli/src/service.rs index 8c8e3942ac574..fb8ffd1609661 100644 --- a/bin/node/cli/src/service.rs +++ b/bin/node/cli/src/service.rs @@ -366,7 +366,6 @@ pub fn new_full_base( network, system_rpc_tx, tx_handler_controller, - _statement_handler_controller, network_starter, sync_service, ) = sc_service::build_network(sc_service::BuildNetworkParams { diff --git a/client/network/statement/src/config.rs b/client/network/statement/src/config.rs index d3eaba1dac0ba..159998a0fe300 100644 --- a/client/network/statement/src/config.rs +++ b/client/network/statement/src/config.rs @@ -21,7 +21,7 @@ use std::time; /// Interval at which we propagate statements; -pub(crate) const PROPAGATE_TIMEOUT: time::Duration = time::Duration::from_millis(2900); +pub(crate) const PROPAGATE_TIMEOUT: time::Duration = time::Duration::from_millis(1000); /// Maximum number of known statement hashes to keep for a peer. pub(crate) const MAX_KNOWN_STATEMENTS: usize = 10240; diff --git a/client/network/statement/src/lib.rs b/client/network/statement/src/lib.rs index f964a1ed62b27..e4eec00042642 100644 --- a/client/network/statement/src/lib.rs +++ b/client/network/statement/src/lib.rs @@ -43,7 +43,7 @@ use sc_network_common::{ role::ObservedRole, sync::{SyncEvent, SyncEventStream}, }; -use sc_utils::mpsc::{tracing_unbounded, TracingUnboundedReceiver, TracingUnboundedSender}; +use sc_utils::mpsc::{tracing_unbounded, TracingUnboundedSender}; use sp_statement_store::{ Hash, NetworkPriority, Statement, StatementSource, StatementStore, SubmitResult, }; @@ -164,8 +164,7 @@ impl StatementHandlerPrototype { } } - /// Turns the prototype into the actual handler. Returns a controller that allows controlling - /// the behaviour of the handler while it's running. + /// Turns the prototype into the actual handler. /// /// Important: the statements handler is initially disabled and doesn't gossip statements. /// Gossiping is enabled when major syncing is done. @@ -179,10 +178,9 @@ impl StatementHandlerPrototype { statement_store: Arc, metrics_registry: Option<&Registry>, executor: Box + Send>>) + Send>, - ) -> error::Result<(StatementHandler, StatementHandlerController)> { + ) -> error::Result> { let net_event_stream = network.event_stream("statement-handler-net"); let sync_event_stream = sync.event_stream("statement-handler-sync"); - let (to_handler, from_controller) = tracing_unbounded("mpsc_statement_handler", 100_000); let (queue_sender, mut queue_receiver) = tracing_unbounded("mpsc_statement_validator", 100_000); @@ -222,7 +220,6 @@ impl StatementHandlerPrototype { sync_event_stream: sync_event_stream.fuse(), peers: HashMap::new(), statement_store, - from_controller, queue_sender, metrics: if let Some(r) = metrics_registry { Some(Metrics::register(r)?) @@ -231,38 +228,10 @@ impl StatementHandlerPrototype { }, }; - let controller = StatementHandlerController { to_handler }; - - Ok((handler, controller)) - } -} - -/// Controls the behaviour of a [`StatementHandler`] it is connected to. -pub struct StatementHandlerController { - to_handler: TracingUnboundedSender, -} - -impl StatementHandlerController { - /// You may call this when new statements are imported by the statement store. - /// - /// All statements will be fetched from the `StatementStore` and propagated to peers. - pub fn propagate_statements(&self) { - let _ = self.to_handler.unbounded_send(ToHandler::PropagateStatements); - } - - /// You must call when new a statement is imported by the statement store. - /// - /// This statement will be fetched from the `StatementStore` and propagated to peers. - pub fn propagate_statement(&self, hash: Hash) { - let _ = self.to_handler.unbounded_send(ToHandler::PropagateStatement(hash)); + Ok(handler) } } -enum ToHandler { - PropagateStatements, - PropagateStatement(Hash), -} - /// Handler for statements. Call [`StatementHandler::run`] to start the processing. pub struct StatementHandler< N: NetworkPeers + NetworkEventStream + NetworkNotification, @@ -289,7 +258,6 @@ pub struct StatementHandler< // All connected peers peers: HashMap, statement_store: Arc, - from_controller: TracingUnboundedReceiver, queue_sender: TracingUnboundedSender<(Statement, oneshot::Sender)>, /// Prometheus metrics. metrics: Option, @@ -341,12 +309,6 @@ where return; } } - message = self.from_controller.select_next_some() => { - match message { - ToHandler::PropagateStatement(hash) => self.propagate_statement(&hash), - ToHandler::PropagateStatements => self.propagate_statements(), - } - }, } } } diff --git a/client/service/src/builder.rs b/client/service/src/builder.rs index 834bca254ba2d..02c4719b46c0d 100644 --- a/client/service/src/builder.rs +++ b/client/service/src/builder.rs @@ -742,7 +742,6 @@ pub fn build_network( Arc::Hash>>, TracingUnboundedSender>, sc_network_transactions::TransactionsHandlerController<::Hash>, - Option, NetworkStarter, Arc>, ), @@ -954,7 +953,7 @@ where // crate statement gossip protocol and add it to the list of supported protocols of // `network_params` - let statement_handler_controller = if let Some(statement_store) = statement_store { + if let Some(statement_store) = statement_store { let statement_protocol_executor = { let spawn_handle = Clone::clone(&spawn_handle); Box::new(move |fut| { @@ -962,7 +961,7 @@ where }) }; let statement_handler_proto = statement_handler_proto.expect("statement_handler_proto is always created when statement_store is `Some`"); - let (statement_handler, statement_handler_controller) = statement_handler_proto.build( + let statement_handler = statement_handler_proto.build( network.clone(), sync_service.clone(), statement_store.clone(), @@ -970,10 +969,7 @@ where statement_protocol_executor, )?; spawn_handle.spawn("network-statement-handler", Some("networking"), statement_handler.run()); - Some(statement_handler_controller) - } else { - None - }; + } spawn_handle.spawn_blocking( "chain-sync-network-service-provider", @@ -1040,7 +1036,6 @@ where network, system_rpc_tx, tx_handler_controller, - statement_handler_controller, NetworkStarter(network_start_tx), sync_service.clone(), )) From 27e73cc7b8d76e78a9dde0bde2291722a23bee86 Mon Sep 17 00:00:00 2001 From: arkpar Date: Mon, 3 Apr 2023 10:02:15 +0200 Subject: [PATCH 30/78] fmt --- bin/node-template/node/src/service.rs | 27 +++++++++++---------------- bin/node/cli/src/service.rs | 27 +++++++++++---------------- client/service/src/builder.rs | 9 +++++++-- 3 files changed, 29 insertions(+), 34 deletions(-) diff --git a/bin/node-template/node/src/service.rs b/bin/node-template/node/src/service.rs index f4bc2c0d6204d..72f75584457bc 100644 --- a/bin/node-template/node/src/service.rs +++ b/bin/node-template/node/src/service.rs @@ -178,22 +178,17 @@ pub fn new_full(mut config: Configuration) -> Result Vec::default(), )); - let ( - network, - system_rpc_tx, - tx_handler_controller, - network_starter, - sync_service, - ) = sc_service::build_network(sc_service::BuildNetworkParams { - config: &config, - client: client.clone(), - transaction_pool: transaction_pool.clone(), - statement_store: statement_store.clone(), - spawn_handle: task_manager.spawn_handle(), - import_queue, - block_announce_validator_builder: None, - warp_sync_params: Some(WarpSyncParams::WithProvider(warp_sync)), - })?; + let (network, system_rpc_tx, tx_handler_controller, network_starter, sync_service) = + sc_service::build_network(sc_service::BuildNetworkParams { + config: &config, + client: client.clone(), + transaction_pool: transaction_pool.clone(), + statement_store: statement_store.clone(), + spawn_handle: task_manager.spawn_handle(), + import_queue, + block_announce_validator_builder: None, + warp_sync_params: Some(WarpSyncParams::WithProvider(warp_sync)), + })?; if config.offchain_worker.enabled { sc_service::build_offchain_workers( diff --git a/bin/node/cli/src/service.rs b/bin/node/cli/src/service.rs index fb8ffd1609661..db3526404cb08 100644 --- a/bin/node/cli/src/service.rs +++ b/bin/node/cli/src/service.rs @@ -362,22 +362,17 @@ pub fn new_full_base( Vec::default(), )); - let ( - network, - system_rpc_tx, - tx_handler_controller, - network_starter, - sync_service, - ) = sc_service::build_network(sc_service::BuildNetworkParams { - config: &config, - client: client.clone(), - transaction_pool: transaction_pool.clone(), - statement_store: statement_store.clone(), - spawn_handle: task_manager.spawn_handle(), - import_queue, - block_announce_validator_builder: None, - warp_sync_params: Some(WarpSyncParams::WithProvider(warp_sync)), - })?; + let (network, system_rpc_tx, tx_handler_controller, network_starter, sync_service) = + sc_service::build_network(sc_service::BuildNetworkParams { + config: &config, + client: client.clone(), + transaction_pool: transaction_pool.clone(), + statement_store: statement_store.clone(), + spawn_handle: task_manager.spawn_handle(), + import_queue, + block_announce_validator_builder: None, + warp_sync_params: Some(WarpSyncParams::WithProvider(warp_sync)), + })?; if config.offchain_worker.enabled { sc_service::build_offchain_workers( diff --git a/client/service/src/builder.rs b/client/service/src/builder.rs index 02c4719b46c0d..410817bf2c73b 100644 --- a/client/service/src/builder.rs +++ b/client/service/src/builder.rs @@ -960,7 +960,8 @@ where spawn_handle.spawn("network-statement-validator", Some("networking"), fut); }) }; - let statement_handler_proto = statement_handler_proto.expect("statement_handler_proto is always created when statement_store is `Some`"); + let statement_handler_proto = statement_handler_proto + .expect("statement_handler_proto is always created when statement_store is `Some`"); let statement_handler = statement_handler_proto.build( network.clone(), sync_service.clone(), @@ -968,7 +969,11 @@ where config.prometheus_config.as_ref().map(|config| &config.registry), statement_protocol_executor, )?; - spawn_handle.spawn("network-statement-handler", Some("networking"), statement_handler.run()); + spawn_handle.spawn( + "network-statement-handler", + Some("networking"), + statement_handler.run(), + ); } spawn_handle.spawn_blocking( From 0e84775b15d32e51dd37b720fe953762fdf20425 Mon Sep 17 00:00:00 2001 From: arkpar Date: Mon, 3 Apr 2023 14:03:49 +0200 Subject: [PATCH 31/78] Clippy fixes --- client/statement-store/src/lib.rs | 24 +++++++++++------------- frame/statement/src/lib.rs | 2 +- primitives/statement-store/src/lib.rs | 24 ++++++++++++------------ 3 files changed, 24 insertions(+), 26 deletions(-) diff --git a/client/statement-store/src/lib.rs b/client/statement-store/src/lib.rs index 9d4b3efed3e67..2bdf2ff625649 100644 --- a/client/statement-store/src/lib.rs +++ b/client/statement-store/src/lib.rs @@ -226,15 +226,15 @@ impl Index { } let key = statement.decryption_key(); if let Some(k) = &key { - self.by_dec_key.entry(k.clone()).or_default().insert(hash); + self.by_dec_key.entry(*k).or_default().insert(hash); } if nt > 0 || key.is_some() { self.statement_topics.insert(hash, (all_topics, key)); } let priority = statement.priority().unwrap_or(0); - self.entries.insert(hash, (account.clone(), global_priority, priority)); + self.entries.insert(hash, (account, global_priority, priority)); self.by_global_priority.insert( - PriorityKey { hash: hash.clone(), priority: global_priority }, + PriorityKey { hash, priority: global_priority }, statement.data_len(), ); self.total_size += statement.data_len(); @@ -329,7 +329,7 @@ impl Index { let mut purged = Vec::new(); self.expired.retain(|hash, timestamp| { if *timestamp + PURGE_AFTER <= current_time { - purged.push(hash.clone()); + purged.push(*hash); log::trace!(target: LOG_TARGET, "Purged statement {:?}", HexDisplay::from(hash)); false } else { @@ -341,15 +341,13 @@ impl Index { fn make_expired(&mut self, hash: &Hash, current_time: u64) -> bool { if let Some((account, global_priority, priority)) = self.entries.remove(hash) { - let key = PriorityKey { hash: hash.clone(), priority: global_priority }; + let key = PriorityKey { hash: *hash, priority: global_priority }; let len = self.by_global_priority.remove(&key).unwrap_or(0); self.total_size -= len; if let Some((topics, key)) = self.statement_topics.remove(hash) { - for t in topics { - if let Some(t) = t { - if let Some(set) = self.by_topic.get_mut(&t) { - set.remove(hash); - } + for t in topics.into_iter().flatten() { + if let Some(set) = self.by_topic.get_mut(&t) { + set.remove(hash); } } if let Some(k) = key { @@ -358,11 +356,11 @@ impl Index { } } } - self.expired.insert(hash.clone(), current_time); + self.expired.insert(*hash, current_time); if let std::collections::hash_map::Entry::Occupied(mut account_rec) = self.accounts.entry(account) { - let key = PriorityKey { hash: hash.clone(), priority }; + let key = PriorityKey { hash: *hash, priority }; if let Some((channel, len)) = account_rec.get_mut().by_priority.remove(&key) { account_rec.get_mut().data_size -= len; if let Some(channel) = channel { @@ -822,7 +820,7 @@ impl StatementStore for Store { // Validate. let at_block = if let Some(Proof::OnChain { block_hash, .. }) = statement.proof() { - Some(block_hash.clone()) + Some(*block_hash) } else { None }; diff --git a/frame/statement/src/lib.rs b/frame/statement/src/lib.rs index 0c7275ad519da..711fac1780665 100644 --- a/frame/statement/src/lib.rs +++ b/frame/statement/src/lib.rs @@ -127,7 +127,7 @@ where log::debug!(target: LOG_TARGET, "Bad block hash."); return Err(InvalidStatement::BadProof) } - let account: T::AccountId = who.clone().into(); + let account: T::AccountId = (*who).into(); match frame_system::Pallet::::event_no_consensus(*event_index as usize) { Some(e) => { statement.strip_proof(); diff --git a/primitives/statement-store/src/lib.rs b/primitives/statement-store/src/lib.rs index 053490279582a..2cd0edbaf426c 100644 --- a/primitives/statement-store/src/lib.rs +++ b/primitives/statement-store/src/lib.rs @@ -302,7 +302,7 @@ impl Statement { let signature = sp_core::sr25519::Signature(*signature); let public = sp_core::sr25519::Public(*signer); if signature.verify(to_sign.as_slice(), &public) { - SignatureVerificationResult::Valid(signer.clone()) + SignatureVerificationResult::Valid(*signer) } else { SignatureVerificationResult::Invalid } @@ -312,7 +312,7 @@ impl Statement { let signature = sp_core::ed25519::Signature(*signature); let public = sp_core::ed25519::Public(*signer); if signature.verify(to_sign.as_slice(), &public) { - SignatureVerificationResult::Valid(signer.clone()) + SignatureVerificationResult::Valid(*signer) } else { SignatureVerificationResult::Invalid } @@ -341,7 +341,7 @@ impl Statement { /// Returns a topic by topic index. pub fn topic(&self, index: usize) -> Option { if index < self.num_topics as usize { - Some(self.topics[index].clone()) + Some(self.topics[index]) } else { None } @@ -349,7 +349,7 @@ impl Statement { /// Returns decryption key if any. pub fn decryption_key(&self) -> Option { - self.decryption_key.clone() + self.decryption_key } /// Convert to internal data. @@ -379,12 +379,12 @@ impl Statement { /// Get channel, if any. pub fn channel(&self) -> Option { - self.channel.clone() + self.channel } /// Get priority, if any. pub fn priority(&self) -> Option { - self.priority.clone() + self.priority } /// Return encoded fields that can be signed to construct or verify a proof @@ -494,11 +494,11 @@ mod test { let channel = [0xcc; 32]; statement.set_proof(proof.clone()); - statement.set_decryption_key(decryption_key.clone()); + statement.set_decryption_key(decryption_key); statement.set_priority(priority); - statement.set_channel(channel.clone()); - statement.set_topic(0, topic1.clone()); - statement.set_topic(1, topic2.clone()); + statement.set_channel(channel); + statement.set_topic(0, topic1); + statement.set_topic(1, topic2); statement.set_plain_data(data.clone()); statement.set_topic(5, [0x55; 32]); @@ -509,8 +509,8 @@ mod test { Field::DecryptionKey(decryption_key.clone()), Field::Priority(priority), Field::Channel(channel), - Field::Topic1(topic1.clone()), - Field::Topic2(topic2.clone()), + Field::Topic1(topic1), + Field::Topic2(topic2), Field::Data(data.clone()), ]; From 312a07e7cacefbcf3e45c8e0baae34be47db6edd Mon Sep 17 00:00:00 2001 From: arkpar Date: Mon, 3 Apr 2023 14:12:10 +0200 Subject: [PATCH 32/78] fmt --- client/statement-store/src/lib.rs | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/client/statement-store/src/lib.rs b/client/statement-store/src/lib.rs index 2bdf2ff625649..d6e42babaeeff 100644 --- a/client/statement-store/src/lib.rs +++ b/client/statement-store/src/lib.rs @@ -233,10 +233,8 @@ impl Index { } let priority = statement.priority().unwrap_or(0); self.entries.insert(hash, (account, global_priority, priority)); - self.by_global_priority.insert( - PriorityKey { hash, priority: global_priority }, - statement.data_len(), - ); + self.by_global_priority + .insert(PriorityKey { hash, priority: global_priority }, statement.data_len()); self.total_size += statement.data_len(); let mut account_info = self.accounts.entry(account).or_default(); account_info.data_size += statement.data_len(); From 9c211723b29b595148cbe5b3c7fa26eb17635a5f Mon Sep 17 00:00:00 2001 From: arkpar Date: Mon, 3 Apr 2023 15:17:46 +0200 Subject: [PATCH 33/78] fmt --- primitives/statement-store/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/primitives/statement-store/src/lib.rs b/primitives/statement-store/src/lib.rs index 2cd0edbaf426c..88c29f538e916 100644 --- a/primitives/statement-store/src/lib.rs +++ b/primitives/statement-store/src/lib.rs @@ -506,7 +506,7 @@ mod test { let fields = vec![ Field::AuthenticityProof(proof.clone()), - Field::DecryptionKey(decryption_key.clone()), + Field::DecryptionKey(decryption_key), Field::Priority(priority), Field::Channel(channel), Field::Topic1(topic1), From 0d95b2e44e61009ee32a2679212cde1bb508d85c Mon Sep 17 00:00:00 2001 From: arkpar Date: Mon, 3 Apr 2023 16:17:36 +0200 Subject: [PATCH 34/78] More clippy fixes --- client/statement-store/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/client/statement-store/src/lib.rs b/client/statement-store/src/lib.rs index d6e42babaeeff..98587659e34fe 100644 --- a/client/statement-store/src/lib.rs +++ b/client/statement-store/src/lib.rs @@ -1043,7 +1043,7 @@ mod tests { let mut statement = Statement::new(); statement.set_plain_data(vec![data]); for i in 0..topics.len() { - statement.set_topic(i, topics[i].clone()); + statement.set_topic(i, topics[i]); } let kp = sp_core::ed25519::Pair::from_string("//Alice", None).unwrap(); statement.sign_ed25519_private(&kp); From 61f6e5fed25301483334f76b32ede368fc664129 Mon Sep 17 00:00:00 2001 From: arkpar Date: Mon, 3 Apr 2023 16:46:15 +0200 Subject: [PATCH 35/78] More clippy fixes --- client/network/statement/src/lib.rs | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/client/network/statement/src/lib.rs b/client/network/statement/src/lib.rs index e4eec00042642..3d751e3ac7e24 100644 --- a/client/network/statement/src/lib.rs +++ b/client/network/statement/src/lib.rs @@ -114,7 +114,7 @@ impl Future for PendingStatement { let mut this = self.project(); if let Poll::Ready(import_result) = Pin::new(&mut this.validation).poll_unpin(cx) { - return Poll::Ready((this.hash.clone(), import_result.ok())) + return Poll::Ready((*this.hash, import_result.ok())) } Poll::Pending @@ -403,11 +403,11 @@ where } let hash = s.hash(); - peer.known_statements.insert(hash.clone()); + peer.known_statements.insert(hash); self.network.report_peer(who, rep::ANY_STATEMENT); - match self.pending_statements_peers.entry(hash.clone()) { + match self.pending_statements_peers.entry(hash) { Entry::Vacant(entry) => { let (completion_sender, completion_receiver) = oneshot::channel(); if let Ok(()) = self.queue_sender.unbounded_send((s, completion_sender)) { @@ -447,7 +447,7 @@ where log::debug!(target: LOG_TARGET, "Propagating statement [{:?}]", hash); if let Ok(Some(statement)) = self.statement_store.statement(hash) { - self.do_propagate_statements(&[(hash.clone(), statement)]); + self.do_propagate_statements(&[(*hash, statement)]); } } @@ -462,7 +462,7 @@ where let (hashes, to_send): (Vec<_>, Vec<_>) = statements .iter() - .filter(|&(ref hash, _)| peer.known_statements.insert(hash.clone())) + .filter(|(hash, _)| peer.known_statements.insert(*hash)) .cloned() .unzip(); From d93493a383874bc1dd00b6ce83c688c5a956824c Mon Sep 17 00:00:00 2001 From: arkpar Date: Mon, 3 Apr 2023 17:04:19 +0200 Subject: [PATCH 36/78] More clippy fixes --- frame/statement/src/lib.rs | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/frame/statement/src/lib.rs b/frame/statement/src/lib.rs index 711fac1780665..47b059555e8c8 100644 --- a/frame/statement/src/lib.rs +++ b/frame/statement/src/lib.rs @@ -170,14 +170,12 @@ where .checked_div(&statement_cost) .unwrap_or_default() .saturated_into::() - .min(MAX_ALLOWED_STATEMENTS) - .max(MIN_ALLOWED_STATEMENTS); + .clamp(MIN_ALLOWED_STATEMENTS, MAX_ALLOWED_STATEMENTS); let max_size = balance .checked_div(&byte_cost) .unwrap_or_default() .saturated_into::() - .min(MAX_ALLOWED_BYTES) - .max(MIN_ALLOWED_BYTES); + .clamp(MIN_ALLOWED_BYTES, MAX_ALLOWED_BYTES); Ok(ValidStatement { global_priority, max_count, max_size }) } From f06ac25973f3ebda85d032e4ec3c4d9a9868d3e1 Mon Sep 17 00:00:00 2001 From: Arkadiy Paronyan Date: Wed, 5 Apr 2023 15:44:54 +0200 Subject: [PATCH 37/78] Update client/statement-store/README.md Co-authored-by: cheme --- client/statement-store/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/client/statement-store/README.md b/client/statement-store/README.md index 54173e2c2616c..41e268f4ece0d 100644 --- a/client/statement-store/README.md +++ b/client/statement-store/README.md @@ -1,4 +1,4 @@ -Substrate statemen store implementation. +Substrate statement store implementation. License: GPL-3.0-or-later WITH Classpath-exception-2.0 From 3c2373d3a61b9fadb2b77407e04375b0910376ce Mon Sep 17 00:00:00 2001 From: Arkadiy Paronyan Date: Wed, 5 Apr 2023 15:49:34 +0200 Subject: [PATCH 38/78] Apply suggestions from code review MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Co-authored-by: Bastian Köcher --- primitives/statement-store/Cargo.toml | 2 +- primitives/statement-store/src/runtime_api.rs | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/primitives/statement-store/Cargo.toml b/primitives/statement-store/Cargo.toml index 16708d95cdc9f..5aa4d833637cf 100644 --- a/primitives/statement-store/Cargo.toml +++ b/primitives/statement-store/Cargo.toml @@ -22,7 +22,7 @@ sp-api = { version = "4.0.0-dev", default-features = false, path = "../api" } sp-application-crypto = { version = "7.0.0", default-features = false, path = "../application-crypto" } sp-runtime-interface = { version = "7.0.0", default-features = false, path = "../runtime-interface" } sp-externalities = { version = "0.13.0", default-features = false, path = "../externalities" } -thiserror = {version = "1.0", optional = true } +thiserror = { version = "1.0", optional = true } log = { version = "0.4.17", optional = true } [features] diff --git a/primitives/statement-store/src/runtime_api.rs b/primitives/statement-store/src/runtime_api.rs index d9e0a680eeacb..0c1b5f0ea4d18 100644 --- a/primitives/statement-store/src/runtime_api.rs +++ b/primitives/statement-store/src/runtime_api.rs @@ -101,8 +101,8 @@ impl StatementStoreExt { } } -#[derive(Debug, Eq, PartialEq, Clone, Copy, Encode, Decode, PassByEnum)] /// Submission result. +#[derive(Debug, Eq, PartialEq, Clone, Copy, Encode, Decode, PassByEnum)] pub enum SubmitResult { /// Accepted as new. OkNew, From a13e2c00b8cc57402f7053f9427534204b719f22 Mon Sep 17 00:00:00 2001 From: arkpar Date: Wed, 5 Apr 2023 16:09:56 +0200 Subject: [PATCH 39/78] Removed sstore from node-template --- Cargo.lock | 2 -- bin/node-template/node/src/service.rs | 8 +------- bin/node-template/runtime/Cargo.toml | 5 ----- bin/node-template/runtime/src/lib.rs | 17 ----------------- 4 files changed, 1 insertion(+), 31 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 3285ac2c15c57..c78e57af67d00 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5205,7 +5205,6 @@ dependencies = [ "pallet-aura", "pallet-balances", "pallet-grandpa", - "pallet-statement", "pallet-sudo", "pallet-template", "pallet-timestamp", @@ -5222,7 +5221,6 @@ dependencies = [ "sp-offchain", "sp-runtime", "sp-session", - "sp-statement-store", "sp-std", "sp-transaction-pool", "sp-version", diff --git a/bin/node-template/node/src/service.rs b/bin/node-template/node/src/service.rs index 72f75584457bc..221f778a0fa5a 100644 --- a/bin/node-template/node/src/service.rs +++ b/bin/node-template/node/src/service.rs @@ -130,12 +130,6 @@ pub fn new_partial( compatibility_mode: Default::default(), })?; - let statement_store = sc_statement_store::Store::new_shared( - config.database.path().unwrap(), - client.clone(), - config.prometheus_registry(), - )?; - Ok(sc_service::PartialComponents { client, backend, @@ -144,7 +138,7 @@ pub fn new_partial( keystore_container, select_chain, transaction_pool, - statement_store: Some(statement_store), + statement_store: None, other: (grandpa_block_import, grandpa_link, telemetry), }) } diff --git a/bin/node-template/runtime/Cargo.toml b/bin/node-template/runtime/Cargo.toml index 647713a9396f1..8915061c17c6d 100644 --- a/bin/node-template/runtime/Cargo.toml +++ b/bin/node-template/runtime/Cargo.toml @@ -25,7 +25,6 @@ frame-system = { version = "4.0.0-dev", default-features = false, path = "../../ frame-try-runtime = { version = "0.10.0-dev", default-features = false, path = "../../../frame/try-runtime", optional = true } pallet-timestamp = { version = "4.0.0-dev", default-features = false, path = "../../../frame/timestamp" } pallet-transaction-payment = { version = "4.0.0-dev", default-features = false, path = "../../../frame/transaction-payment" } -pallet-statement = { version = "4.0.0-dev", default-features = false, path = "../../../frame/statement" } frame-executive = { version = "4.0.0-dev", default-features = false, path = "../../../frame/executive" } sp-api = { version = "4.0.0-dev", default-features = false, path = "../../../primitives/api" } sp-block-builder = { version = "4.0.0-dev", default-features = false, path = "../../../primitives/block-builder"} @@ -38,7 +37,6 @@ sp-runtime = { version = "7.0.0", default-features = false, path = "../../../pri sp-session = { version = "4.0.0-dev", default-features = false, path = "../../../primitives/session" } sp-std = { version = "5.0.0", default-features = false, path = "../../../primitives/std" } sp-transaction-pool = { version = "4.0.0-dev", default-features = false, path = "../../../primitives/transaction-pool" } -sp-statement-store = { version = "4.0.0-dev", default-features = false, path = "../../../primitives/statement-store" } sp-version = { version = "5.0.0", default-features = false, path = "../../../primitives/version" } # Used for the node template's RPCs @@ -72,7 +70,6 @@ std = [ "pallet-balances/std", "pallet-grandpa/std", "pallet-sudo/std", - "pallet-statement/std", "pallet-template/std", "pallet-timestamp/std", "pallet-transaction-payment-rpc-runtime-api/std", @@ -88,7 +85,6 @@ std = [ "sp-session/std", "sp-std/std", "sp-transaction-pool/std", - "sp-statement-store/std", "sp-version/std", "substrate-wasm-builder", ] @@ -111,7 +107,6 @@ try-runtime = [ "pallet-aura/try-runtime", "pallet-balances/try-runtime", "pallet-grandpa/try-runtime", - "pallet-statement/try-runtime", "pallet-sudo/try-runtime", "pallet-template/try-runtime", "pallet-timestamp/try-runtime", diff --git a/bin/node-template/runtime/src/lib.rs b/bin/node-template/runtime/src/lib.rs index 95455ee4d7ea6..4eb95d18969fe 100644 --- a/bin/node-template/runtime/src/lib.rs +++ b/bin/node-template/runtime/src/lib.rs @@ -272,13 +272,6 @@ parameter_types! { pub StatementByteCost: Balance = 10; } -impl pallet_statement::Config for Runtime { - type RuntimeEvent = RuntimeEvent; - type Currency = Balances; - type StatementCost = StatementCost; - type ByteCost = StatementByteCost; -} - /// Configure the pallet-template in pallets/template. impl pallet_template::Config for Runtime { type RuntimeEvent = RuntimeEvent; @@ -299,7 +292,6 @@ construct_runtime!( Balances: pallet_balances, TransactionPayment: pallet_transaction_payment, Sudo: pallet_sudo, - Statement: pallet_statement, // Include the custom logic from the pallet-template in the runtime. TemplateModule: pallet_template, } @@ -412,15 +404,6 @@ impl_runtime_apis! { } } - impl sp_statement_store::runtime_api::ValidateStatement for Runtime { - fn validate_statement( - source: sp_statement_store::runtime_api::StatementSource, - statement: sp_statement_store::Statement, - ) -> Result { - Statement::validate_statement(source, statement) - } - } - impl sp_offchain::OffchainWorkerApi for Runtime { fn offchain_worker(header: &::Header) { Executive::offchain_worker(header) From ac499ab76f87356161587b4a42f7c0aa717be4c5 Mon Sep 17 00:00:00 2001 From: arkpar Date: Wed, 5 Apr 2023 16:27:51 +0200 Subject: [PATCH 40/78] Sort out data path --- bin/node/cli/src/service.rs | 2 +- client/cli/src/config.rs | 3 ++- client/cli/src/runner.rs | 6 ++++-- client/service/src/config.rs | 6 ++++-- client/service/test/src/lib.rs | 3 ++- client/statement-store/src/lib.rs | 3 +-- 6 files changed, 14 insertions(+), 9 deletions(-) diff --git a/bin/node/cli/src/service.rs b/bin/node/cli/src/service.rs index db3526404cb08..c11cadce80b95 100644 --- a/bin/node/cli/src/service.rs +++ b/bin/node/cli/src/service.rs @@ -283,7 +283,7 @@ pub fn new_partial( }; let statement_store = sc_statement_store::Store::new_shared( - config.database.path().unwrap(), + &config.data_path, client.clone(), config.prometheus_registry(), )?; diff --git a/client/cli/src/config.rs b/client/cli/src/config.rs index 063b2c39839f4..469ff89b64965 100644 --- a/client/cli/src/config.rs +++ b/client/cli/src/config.rs @@ -526,6 +526,7 @@ pub trait CliConfiguration: Sized { )?, keystore, database: self.database_config(&config_dir, database_cache_size, database)?, + data_path: config_dir, trie_cache_maximum_size: self.trie_cache_maximum_size()?, state_pruning: self.state_pruning()?, blocks_pruning: self.blocks_pruning()?, @@ -558,7 +559,7 @@ pub trait CliConfiguration: Sized { max_runtime_instances, announce_block: self.announce_block()?, role, - base_path: Some(base_path), + base_path: base_path, informant_output_format: Default::default(), runtime_cache_size, }) diff --git a/client/cli/src/runner.rs b/client/cli/src/runner.rs index a8b75f2665aea..2653489d083cb 100644 --- a/client/cli/src/runner.rs +++ b/client/cli/src/runner.rs @@ -259,6 +259,7 @@ mod tests { fn create_runner() -> Runner { let runtime = build_runtime().unwrap(); + let root = PathBuf::from("db"); let runner = Runner::new( Configuration { impl_name: "spec".into(), @@ -268,7 +269,7 @@ mod tests { transaction_pool: Default::default(), network: NetworkConfiguration::new_memory(), keystore: sc_service::config::KeystoreConfig::InMemory, - database: sc_client_db::DatabaseSource::ParityDb { path: PathBuf::from("db") }, + database: sc_client_db::DatabaseSource::ParityDb { path: root.clone() }, trie_cache_maximum_size: None, state_pruning: None, blocks_pruning: sc_client_db::BlocksPruning::KeepAll, @@ -310,7 +311,8 @@ mod tests { tracing_receiver: Default::default(), max_runtime_instances: 8, announce_block: true, - base_path: None, + base_path: sc_service::BasePath::new(root.clone()), + data_path: root, informant_output_format: Default::default(), runtime_cache_size: 2, }, diff --git a/client/service/src/config.rs b/client/service/src/config.rs index 6550fcdd8ef4f..add7f019a96f5 100644 --- a/client/service/src/config.rs +++ b/client/service/src/config.rs @@ -140,8 +140,10 @@ pub struct Configuration { pub max_runtime_instances: usize, /// Announce block automatically after they have been imported pub announce_block: bool, - /// Base path of the configuration - pub base_path: Option, + /// Data path root for the configured chain. + pub data_path: PathBuf, + /// Base path of the configuration. This is shared between chains. + pub base_path: BasePath, /// Configuration of the output format that the informant uses. pub informant_output_format: sc_informant::OutputFormat, /// Maximum number of different runtime versions that can be cached. diff --git a/client/service/test/src/lib.rs b/client/service/test/src/lib.rs index 8a2e5050bd5d3..3be5ac13fa4fe 100644 --- a/client/service/test/src/lib.rs +++ b/client/service/test/src/lib.rs @@ -274,7 +274,8 @@ fn node_config< tracing_receiver: Default::default(), max_runtime_instances: 8, announce_block: true, - base_path: Some(BasePath::new(root)), + base_path: BasePath::new(root.clone()), + data_path: root, informant_output_format: Default::default(), runtime_cache_size: 2, } diff --git a/client/statement-store/src/lib.rs b/client/statement-store/src/lib.rs index 98587659e34fe..ff686c8ef79b6 100644 --- a/client/statement-store/src/lib.rs +++ b/client/statement-store/src/lib.rs @@ -534,8 +534,7 @@ impl Store { Client::Api: ValidateStatement, { let mut path: std::path::PathBuf = path.into(); - path.pop(); - path.push("statement"); + path.push("statements"); let mut config = parity_db::Options::with_columns(&path, col::COUNT); From e562b6543712bdeaca0c25bf925bd43ed84e9c35 Mon Sep 17 00:00:00 2001 From: arkpar Date: Wed, 5 Apr 2023 16:44:22 +0200 Subject: [PATCH 41/78] Added offline check --- client/network/statement/src/lib.rs | 4 ++-- client/statement-store/src/lib.rs | 1 - 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/client/network/statement/src/lib.rs b/client/network/statement/src/lib.rs index 3d751e3ac7e24..f9fe971caaa9d 100644 --- a/client/network/statement/src/lib.rs +++ b/client/network/statement/src/lib.rs @@ -365,10 +365,10 @@ where continue } // Accept statements only when node is not major syncing - if self.sync.is_major_syncing() { + if self.sync.is_major_syncing() || self.sync.is_offline() { log::trace!( target: LOG_TARGET, - "{remote}: Ignoring statements while major syncing" + "{remote}: Ignoring statements while major syncing or offline" ); continue } diff --git a/client/statement-store/src/lib.rs b/client/statement-store/src/lib.rs index ff686c8ef79b6..aeb980db5e1b7 100644 --- a/client/statement-store/src/lib.rs +++ b/client/statement-store/src/lib.rs @@ -75,7 +75,6 @@ const MAX_TOTAL_STATEMENTS: usize = 8192; const MAX_TOTAL_SIZE: usize = 64 * 1024 * 1024; /// Suggested maintenance period. A good value to call `Store::maintain` with. -#[allow(dead_code)] pub const MAINTENANCE_PERIOD: std::time::Duration = std::time::Duration::from_secs(30); mod col { From 06f33767d8a0d22e1d40e03329e924fb4a08e860 Mon Sep 17 00:00:00 2001 From: arkpar Date: Wed, 5 Apr 2023 16:52:06 +0200 Subject: [PATCH 42/78] Removed dispatch_statement --- frame/statement/src/lib.rs | 9 --------- 1 file changed, 9 deletions(-) diff --git a/frame/statement/src/lib.rs b/frame/statement/src/lib.rs index 47b059555e8c8..c6d33f27ea70d 100644 --- a/frame/statement/src/lib.rs +++ b/frame/statement/src/lib.rs @@ -100,7 +100,6 @@ pub mod pallet { fn offchain_worker(now: BlockNumberFor) { log::trace!(target: LOG_TARGET, "Collecting statements at #{:?}", now); Pallet::::collect_statements(); - Pallet::::dispatch_statements(); } } } @@ -203,12 +202,4 @@ where } } } - - fn dispatch_statements() { - let all_statements = sp_statement_store::runtime_api::io::dump(); - for (hash, _statement) in all_statements { - // TODO: Custom statement handling - log::trace!(target: LOG_TARGET, "Handling statement #{:?}", hash); - } - } } From 167c4ad3e54e9dd08f9c638e18b7d35d26d69d2c Mon Sep 17 00:00:00 2001 From: arkpar Date: Wed, 5 Apr 2023 16:55:24 +0200 Subject: [PATCH 43/78] Renamed into_generic --- primitives/application-crypto/src/lib.rs | 4 ++-- primitives/statement-store/src/lib.rs | 12 ++++++------ 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/primitives/application-crypto/src/lib.rs b/primitives/application-crypto/src/lib.rs index 220da84598f94..80dfdda6d8568 100644 --- a/primitives/application-crypto/src/lib.rs +++ b/primitives/application-crypto/src/lib.rs @@ -319,7 +319,7 @@ macro_rules! app_crypto_public_common { impl Public { /// Convert into wrapped generic public key type. - pub fn into_generic(self) -> $public { + pub fn into_inner(self) -> $public { self.0 } } @@ -487,7 +487,7 @@ macro_rules! app_crypto_signature_common { impl Signature { /// Convert into wrapped generic signature type. - pub fn into_generic(self) -> $sig { + pub fn into_inner(self) -> $sig { self.0 } } diff --git a/primitives/statement-store/src/lib.rs b/primitives/statement-store/src/lib.rs index 88c29f538e916..cd8180743b33c 100644 --- a/primitives/statement-store/src/lib.rs +++ b/primitives/statement-store/src/lib.rs @@ -224,8 +224,8 @@ impl Statement { let to_sign = self.signature_material(); if let Some(signature) = key.sign(&to_sign) { let proof = Proof::Sr25519 { - signature: signature.into_generic().into(), - signer: key.clone().into_generic().into(), + signature: signature.into_inner().into(), + signer: key.clone().into_inner().into(), }; self.set_proof(proof); true @@ -248,8 +248,8 @@ impl Statement { let to_sign = self.signature_material(); if let Some(signature) = key.sign(&to_sign) { let proof = Proof::Ed25519 { - signature: signature.into_generic().into(), - signer: key.clone().into_generic().into(), + signature: signature.into_inner().into(), + signer: key.clone().into_inner().into(), }; self.set_proof(proof); true @@ -272,8 +272,8 @@ impl Statement { let to_sign = self.signature_material(); if let Some(signature) = key.sign(&to_sign) { let proof = Proof::Secp256k1Ecdsa { - signature: signature.into_generic().into(), - signer: key.clone().into_generic().0, + signature: signature.into_inner().into(), + signer: key.clone().into_inner().0, }; self.set_proof(proof); true From 4c840c0f64ee4e86f2943fd9a4e1ca552deb3da5 Mon Sep 17 00:00:00 2001 From: arkpar Date: Wed, 5 Apr 2023 17:00:05 +0200 Subject: [PATCH 44/78] Fixed commit placement --- primitives/statement-store/src/runtime_api.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/primitives/statement-store/src/runtime_api.rs b/primitives/statement-store/src/runtime_api.rs index 0c1b5f0ea4d18..16bf09a2e5318 100644 --- a/primitives/statement-store/src/runtime_api.rs +++ b/primitives/statement-store/src/runtime_api.rs @@ -92,8 +92,8 @@ sp_externalities::decl_extension! { pub struct StatementStoreExt(std::sync::Arc); } +// Host extensions for the runtime. #[cfg(feature = "std")] -/// Host extensions for the runtime. impl StatementStoreExt { /// Create new instance of externalities extensions. pub fn new(store: std::sync::Arc) -> Self { From 03906c8ce58f127749a4f4a85c58cdbe72267567 Mon Sep 17 00:00:00 2001 From: arkpar Date: Wed, 5 Apr 2023 17:15:02 +0200 Subject: [PATCH 45/78] Use HashSet for tracking peers/statements --- client/network/statement/src/lib.rs | 15 +++++++++++---- 1 file changed, 11 insertions(+), 4 deletions(-) diff --git a/client/network/statement/src/lib.rs b/client/network/statement/src/lib.rs index f9fe971caaa9d..2595e338a9a96 100644 --- a/client/network/statement/src/lib.rs +++ b/client/network/statement/src/lib.rs @@ -48,7 +48,7 @@ use sp_statement_store::{ Hash, NetworkPriority, Statement, StatementSource, StatementStore, SubmitResult, }; use std::{ - collections::{hash_map::Entry, HashMap}, + collections::{hash_map::Entry, HashMap, HashSet}, iter, num::NonZeroUsize, pin::Pin, @@ -76,6 +76,8 @@ mod rep { pub const GOOD_STATEMENT: Rep = Rep::new(1 << 7, "Good statement"); /// Reputation change when a peer sends us a bad statement. pub const BAD_STATEMENT: Rep = Rep::new(-(1 << 12), "Bad statement"); + /// Reputation change when a peer sends us a duplicate statement. + pub const DUPLICATE_STATEMENT: Rep = Rep::new(-(1 << 7), "Duplicate statement"); /// Reputation change when a peer sends us particularly useful statement pub const EXCELLENT_STATEMENT: Rep = Rep::new(1 << 8, "High priority statement"); } @@ -246,7 +248,7 @@ pub struct StatementHandler< /// these peers using the statement hash while the statement is /// imported. This prevents that we import the same statement /// multiple times concurrently. - pending_statements_peers: HashMap>, + pending_statements_peers: HashMap>, /// Network service to use to send messages and manage peers. network: N, /// Syncing service. @@ -413,11 +415,16 @@ where if let Ok(()) = self.queue_sender.unbounded_send((s, completion_sender)) { self.pending_statements .push(PendingStatement { validation: completion_receiver, hash }); - entry.insert(vec![who]); + let mut set = HashSet::new(); + set.insert(who); + entry.insert(set); } }, Entry::Occupied(mut entry) => { - entry.get_mut().push(who); + if !(entry.get_mut().insert(who)) { + // Already received this from the same peer. + self.network.report_peer(who, rep::DUPLICATE_STATEMENT); + } }, } } From 206488dd3268bf09ab18d6f8ae6b36b107190408 Mon Sep 17 00:00:00 2001 From: arkpar Date: Wed, 5 Apr 2023 17:29:58 +0200 Subject: [PATCH 46/78] fmt --- client/cli/src/config.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/client/cli/src/config.rs b/client/cli/src/config.rs index 469ff89b64965..de3f5ed5cb8da 100644 --- a/client/cli/src/config.rs +++ b/client/cli/src/config.rs @@ -559,7 +559,7 @@ pub trait CliConfiguration: Sized { max_runtime_instances, announce_block: self.announce_block()?, role, - base_path: base_path, + base_path, informant_output_format: Default::default(), runtime_cache_size, }) From 26257837cdaeac68aed42ab3615d80602d48efe5 Mon Sep 17 00:00:00 2001 From: arkpar Date: Wed, 5 Apr 2023 19:38:00 +0200 Subject: [PATCH 47/78] Use ExtendedHostFunctions --- Cargo.lock | 2 +- bin/node-template/node/src/command.rs | 2 +- bin/node/cli/src/command.rs | 2 +- bin/node/executor/Cargo.toml | 1 + bin/node/executor/src/lib.rs | 5 ++++- client/executor/Cargo.toml | 1 - client/executor/src/lib.rs | 8 +------- client/executor/src/native_executor.rs | 7 +++++-- 8 files changed, 14 insertions(+), 14 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 310551a9ca98d..77803bcbf9672 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5074,6 +5074,7 @@ dependencies = [ "sp-keystore", "sp-runtime", "sp-state-machine", + "sp-statement-store", "sp-tracing", "sp-trie", "wat", @@ -8864,7 +8865,6 @@ dependencies = [ "sp-runtime", "sp-runtime-interface", "sp-state-machine", - "sp-statement-store", "sp-tracing", "sp-trie", "sp-version", diff --git a/bin/node-template/node/src/command.rs b/bin/node-template/node/src/command.rs index fa53200c6349e..e121db820f2a3 100644 --- a/bin/node-template/node/src/command.rs +++ b/bin/node-template/node/src/command.rs @@ -191,7 +191,7 @@ pub fn run() -> sc_cli::Result<()> { Ok(( cmd.run::::ExtendHostFunctions, >, _>(Some(info_provider)), task_manager, diff --git a/bin/node/cli/src/command.rs b/bin/node/cli/src/command.rs index fd12214823ac1..b38b25d8fb3ad 100644 --- a/bin/node/cli/src/command.rs +++ b/bin/node/cli/src/command.rs @@ -246,7 +246,7 @@ pub fn run() -> Result<()> { Ok(( cmd.run::::ExtendHostFunctions, >, _>(Some(info_provider)), task_manager, diff --git a/bin/node/executor/Cargo.toml b/bin/node/executor/Cargo.toml index cb661f536f7b7..fd3c85faaacee 100644 --- a/bin/node/executor/Cargo.toml +++ b/bin/node/executor/Cargo.toml @@ -24,6 +24,7 @@ sp-keystore = { version = "0.13.0", path = "../../../primitives/keystore" } sp-state-machine = { version = "0.13.0", path = "../../../primitives/state-machine" } sp-tracing = { version = "6.0.0", path = "../../../primitives/tracing" } sp-trie = { version = "7.0.0", path = "../../../primitives/trie" } +sp-statement-store = { version = "4.0.0-dev", path = "../../../primitives/statement-store" } [dev-dependencies] criterion = "0.4.0" diff --git a/bin/node/executor/src/lib.rs b/bin/node/executor/src/lib.rs index 4e3ec9a0b34d5..3557a16740b8a 100644 --- a/bin/node/executor/src/lib.rs +++ b/bin/node/executor/src/lib.rs @@ -25,7 +25,10 @@ pub use sc_executor::NativeElseWasmExecutor; pub struct ExecutorDispatch; impl sc_executor::NativeExecutionDispatch for ExecutorDispatch { - type ExtendHostFunctions = frame_benchmarking::benchmarking::HostFunctions; + type ExtendHostFunctions = ( + frame_benchmarking::benchmarking::HostFunctions, + sp_statement_store::runtime_api::HostFunctions, + ); fn dispatch(method: &str, data: &[u8]) -> Option> { kitchensink_runtime::api::dispatch(method, data) diff --git a/client/executor/Cargo.toml b/client/executor/Cargo.toml index 7f27814053606..21a9bd70dde65 100644 --- a/client/executor/Cargo.toml +++ b/client/executor/Cargo.toml @@ -32,7 +32,6 @@ sp-runtime-interface = { version = "7.0.0", path = "../../primitives/runtime-int sp-trie = { version = "7.0.0", path = "../../primitives/trie" } sp-version = { version = "5.0.0", path = "../../primitives/version" } sp-wasm-interface = { version = "7.0.0", path = "../../primitives/wasm-interface" } -sp-statement-store = { version = "4.0.0-dev", path = "../../primitives/statement-store" } [dev-dependencies] array-bytes = "4.1" diff --git a/client/executor/src/lib.rs b/client/executor/src/lib.rs index 5e6eaeb58418c..e5bae474e9e25 100644 --- a/client/executor/src/lib.rs +++ b/client/executor/src/lib.rs @@ -62,12 +62,6 @@ pub trait RuntimeVersionOf { ) -> error::Result; } -/// The host functions Substrate provides for the Wasm runtime environment. -/// -/// All these host functions will be callable from inside the Wasm environment. -pub type HostFunctions = - (sp_io::SubstrateHostFunctions, sp_statement_store::runtime_api::HostFunctions); - #[cfg(test)] mod tests { use super::*; @@ -80,7 +74,7 @@ mod tests { let mut ext = TestExternalities::default(); let mut ext = ext.ext(); - let executor = WasmExecutor::::new( + let executor = WasmExecutor::::new( WasmExecutionMethod::Interpreted, Some(8), 8, diff --git a/client/executor/src/native_executor.rs b/client/executor/src/native_executor.rs index 59fde1d98a639..c72cf3c9c91df 100644 --- a/client/executor/src/native_executor.rs +++ b/client/executor/src/native_executor.rs @@ -534,7 +534,8 @@ pub struct NativeElseWasmExecutor { /// Native runtime version info. native_version: NativeVersion, /// Fallback wasm executor. - wasm: WasmExecutor>, + wasm: + WasmExecutor>, } impl NativeElseWasmExecutor { @@ -571,7 +572,9 @@ impl NativeElseWasmExecutor { /// Create a new instance using the given [`WasmExecutor`]. pub fn new_with_wasm_executor( - executor: WasmExecutor>, + executor: WasmExecutor< + ExtendedHostFunctions, + >, ) -> Self { Self { native_version: D::native_version(), wasm: executor } } From a7f652a592167efedd563a26322f09d00dfdec67 Mon Sep 17 00:00:00 2001 From: arkpar Date: Wed, 5 Apr 2023 19:48:34 +0200 Subject: [PATCH 48/78] Fixed benches --- bin/node/cli/benches/block_production.rs | 3 ++- bin/node/cli/benches/transaction_pool.rs | 3 ++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/bin/node/cli/benches/block_production.rs b/bin/node/cli/benches/block_production.rs index c7f0cd20efd5b..7e45277484abf 100644 --- a/bin/node/cli/benches/block_production.rs +++ b/bin/node/cli/benches/block_production.rs @@ -108,7 +108,8 @@ fn new_node(tokio_handle: Handle) -> node_cli::service::NewFullBase { max_runtime_instances: 8, runtime_cache_size: 2, announce_block: true, - base_path: Some(base_path), + data_path: base_path.path().into(), + base_path, informant_output_format: Default::default(), wasm_runtime_overrides: None, }; diff --git a/bin/node/cli/benches/transaction_pool.rs b/bin/node/cli/benches/transaction_pool.rs index 7488ec03363e7..055c3028e0805 100644 --- a/bin/node/cli/benches/transaction_pool.rs +++ b/bin/node/cli/benches/transaction_pool.rs @@ -102,7 +102,8 @@ fn new_node(tokio_handle: Handle) -> node_cli::service::NewFullBase { max_runtime_instances: 8, runtime_cache_size: 2, announce_block: true, - base_path: Some(base_path), + data_path: base_path.path().into(), + base_path, informant_output_format: Default::default(), wasm_runtime_overrides: None, }; From 0bf748e8674f7ab25715ad6ff6cec52d81fe7a60 Mon Sep 17 00:00:00 2001 From: arkpar Date: Wed, 5 Apr 2023 20:03:07 +0200 Subject: [PATCH 49/78] Tweaks --- bin/node-template/runtime/src/lib.rs | 5 ----- client/statement-store/src/lib.rs | 4 ++++ 2 files changed, 4 insertions(+), 5 deletions(-) diff --git a/bin/node-template/runtime/src/lib.rs b/bin/node-template/runtime/src/lib.rs index 4eb95d18969fe..50bcb67cb4790 100644 --- a/bin/node-template/runtime/src/lib.rs +++ b/bin/node-template/runtime/src/lib.rs @@ -267,11 +267,6 @@ impl pallet_sudo::Config for Runtime { type RuntimeCall = RuntimeCall; } -parameter_types! { - pub StatementCost: Balance = 1000; - pub StatementByteCost: Balance = 10; -} - /// Configure the pallet-template in pallets/template. impl pallet_template::Config for Runtime { type RuntimeEvent = RuntimeEvent; diff --git a/client/statement-store/src/lib.rs b/client/statement-store/src/lib.rs index aeb980db5e1b7..72de0355d4d3c 100644 --- a/client/statement-store/src/lib.rs +++ b/client/statement-store/src/lib.rs @@ -579,6 +579,8 @@ impl Store { Ok(store) } + /// Create memory index from the data. + // This may be moved to a background thread if it slows startup too much. fn populate(&self) -> Result<()> { { let mut index = self.index.write(); @@ -703,6 +705,7 @@ impl Store { } impl StatementStore for Store { + /// Return all statements SCALE-encoded. fn dump_encoded(&self) -> Result)>> { let index = self.index.read(); let mut result = Vec::with_capacity(index.entries.len()); @@ -906,6 +909,7 @@ impl StatementStore for Store { } } + /// Remove a statement by hash. fn remove(&self, hash: &Hash) -> Result<()> { let current_time = self.timestamp(); { From 8c883a796cef93bedf7f8d71ebad3dd773408f0b Mon Sep 17 00:00:00 2001 From: Arkadiy Paronyan Date: Mon, 17 Apr 2023 14:23:29 +0200 Subject: [PATCH 50/78] Apply suggestions from code review Co-authored-by: cheme --- client/statement-store/src/lib.rs | 4 ++-- frame/statement/src/lib.rs | 2 +- primitives/statement-store/src/lib.rs | 6 +++--- primitives/statement-store/src/runtime_api.rs | 2 +- 4 files changed, 7 insertions(+), 7 deletions(-) diff --git a/client/statement-store/src/lib.rs b/client/statement-store/src/lib.rs index 72de0355d4d3c..7ec59d6724346 100644 --- a/client/statement-store/src/lib.rs +++ b/client/statement-store/src/lib.rs @@ -109,7 +109,7 @@ struct StatementsForAccount { impl PartialOrd for PriorityKey { fn partial_cmp(&self, other: &Self) -> Option { - Some(self.priority.cmp(&other.priority).then_with(|| self.hash.cmp(&other.hash))) + Some(self.cmp(other)) } } @@ -430,7 +430,7 @@ impl Index { priority: channel_record.priority, }; if let Some((_channel, len)) = account_rec.by_priority.get(&key) { - would_free_size = *len; + would_free_size += *len; evicted.insert(channel_record.hash); } } diff --git a/frame/statement/src/lib.rs b/frame/statement/src/lib.rs index c6d33f27ea70d..20ccc13e982bb 100644 --- a/frame/statement/src/lib.rs +++ b/frame/statement/src/lib.rs @@ -112,7 +112,7 @@ where ::RuntimeEvent: TryInto>, sp_statement_store::BlockHash: From<::Hash>, { - /// Validate a statement against current state. This is supposed ti be called by the statement + /// Validate a statement against current state. This is supposed to be called by the statement /// store on the host side. pub fn validate_statement( _source: StatementSource, diff --git a/primitives/statement-store/src/lib.rs b/primitives/statement-store/src/lib.rs index cd8180743b33c..6c2d726d24150 100644 --- a/primitives/statement-store/src/lib.rs +++ b/primitives/statement-store/src/lib.rs @@ -128,9 +128,9 @@ impl Proof { } } -#[derive(Encode, Decode, TypeInfo, sp_core::RuntimeDebug, Clone, PartialEq, Eq)] -/// Statement attributes. Each statement is a list of 0 or more fields. Fields may only appear in +/// Statement attributes. Each statement is a list of 0 or more fields. Fields may only appear once and in /// the order declared here. +#[derive(Encode, Decode, TypeInfo, sp_core::RuntimeDebug, Clone, PartialEq, Eq)] #[repr(u8)] pub enum Field { /// Statement proof. @@ -153,8 +153,8 @@ pub enum Field { Data(Vec) = 8, } -#[derive(TypeInfo, sp_core::RuntimeDebug, PassByCodec, Clone, PartialEq, Eq, Default)] /// Statement structure. +#[derive(TypeInfo, sp_core::RuntimeDebug, PassByCodec, Clone, PartialEq, Eq, Default)] pub struct Statement { proof: Option, decryption_key: Option, diff --git a/primitives/statement-store/src/runtime_api.rs b/primitives/statement-store/src/runtime_api.rs index 16bf09a2e5318..cb8c0a43690eb 100644 --- a/primitives/statement-store/src/runtime_api.rs +++ b/primitives/statement-store/src/runtime_api.rs @@ -35,7 +35,7 @@ pub struct ValidStatement { /// Max total data size for this account, as calculated by the runtime. pub max_size: u32, /// Global priority value. This is used to prioritize statements on the global scale. - /// If the global loimit of messages is reached, the statement with the lowest priority will be + /// If the global limit of messages is reached, the statement with the lowest priority will be /// removed first. pub global_priority: u32, } From 6c15ce96836ac7aaf1502b83590d40e2e1dee7a3 Mon Sep 17 00:00:00 2001 From: arkpar Date: Mon, 17 Apr 2023 16:09:55 +0200 Subject: [PATCH 51/78] Fixed priority mixup --- client/statement-store/src/lib.rs | 32 +++++++++++++++++++------------ 1 file changed, 20 insertions(+), 12 deletions(-) diff --git a/client/statement-store/src/lib.rs b/client/statement-store/src/lib.rs index 7ec59d6724346..f836471701edd 100644 --- a/client/statement-store/src/lib.rs +++ b/client/statement-store/src/lib.rs @@ -263,26 +263,26 @@ impl Index { self.expired.contains_key(hash) } - fn iter( + fn iterate_with( &self, key: Option, - topics: &[Topic], + match_all_topics: &[Topic], mut f: impl FnMut(&Hash) -> Result<()>, ) -> Result<()> { let empty = HashSet::new(); let mut sets: [&HashSet; 4] = [∅ 4]; - if topics.len() > 4 { + if match_all_topics.len() > 4 { return Ok(()) } - for (i, t) in topics.iter().enumerate() { + for (i, t) in match_all_topics.iter().enumerate() { let set = self.by_topic.get(t); if set.map(|s| s.len()).unwrap_or(0) == 0 { - // At least one of the topics does not exist in the index. + // At least one of the match_all_topics does not exist in the index. return Ok(()) } sets[i] = set.expect("Function returns if set is None"); } - let sets = &mut sets[0..topics.len()]; + let sets = &mut sets[0..match_all_topics.len()]; if sets.is_empty() && key.is_none() { // Iterate all entries for h in self.entries.keys() { @@ -475,10 +475,10 @@ impl Index { continue } - if entry.priority >= priority { + if entry.priority >= validation.global_priority { log::debug!( target: LOG_TARGET, - "Ignored message due global to constraints {:?} {} < {}", + "Ignored message due to global constraints {:?} {} < {}", HexDisplay::from(&hash), priority, entry.priority, @@ -492,7 +492,7 @@ impl Index { for h in &evicted { self.make_expired(h, current_time); } - self.insert_new(hash, *account, priority, statement); + self.insert_new(hash, *account, validation.global_priority, statement); MaybeInserted::Inserted(evicted) } } @@ -581,6 +581,8 @@ impl Store { /// Create memory index from the data. // This may be moved to a background thread if it slows startup too much. + // This function should only be used on startup. There should be no other DB operations when + // iterating the index. fn populate(&self) -> Result<()> { { let mut index = self.index.write(); @@ -603,6 +605,12 @@ impl Store { statement_with_meta.meta.global_priority, &statement_with_meta.statement, ); + } else { + log::debug!( + target: LOG_TARGET, + "Error decoding statement loaded from the DB: {:?}", + HexDisplay::from(&hash) + ); } } true @@ -638,7 +646,7 @@ impl Store { ) -> Result> { let mut result = Vec::new(); let index = self.index.read(); - index.iter(key, match_all_topics, |hash| { + index.iterate_with(key, match_all_topics, |hash| { match self.db.get(col::STATEMENTS, hash).map_err(|e| Error::Db(e.to_string()))? { Some(entry) => { if let Ok(statement) = StatementWithMeta::decode(&mut entry.as_slice()) { @@ -1216,10 +1224,10 @@ mod tests { assert_eq!(store.index.read().entries.len(), 4); // Should be over the global size limit - assert_eq!(store.submit(statement(4, 1, None, 700), source), ignored); + assert_eq!(store.submit(statement(1, 1, None, 700), source), ignored); // Should be over the global count limit store.index.write().max_entries = 4; - assert_eq!(store.submit(statement(4, 1, None, 100), source), ignored); + assert_eq!(store.submit(statement(1, 1, None, 100), source), ignored); // Should evict statement from account 1 assert_eq!(store.submit(statement(4, 6, None, 100), source), ok); assert_eq!(store.index.read().expired.len(), 7); From f383ab6ed757cfee4364e96b18371182ddf2ff02 Mon Sep 17 00:00:00 2001 From: arkpar Date: Mon, 17 Apr 2023 16:16:55 +0200 Subject: [PATCH 52/78] Rename --- client/statement-store/src/lib.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/client/statement-store/src/lib.rs b/client/statement-store/src/lib.rs index f836471701edd..9ee234f509399 100644 --- a/client/statement-store/src/lib.rs +++ b/client/statement-store/src/lib.rs @@ -123,7 +123,7 @@ impl Ord for PriorityKey { struct Index { by_topic: HashMap>, by_dec_key: HashMap>, - statement_topics: HashMap; 4], Option)>, + topics_and_keys: HashMap; 4], Option)>, entries: HashMap, /* Statement hash -> (Account id, * global_priority, priority) */ expired: HashMap, // Value is expiration timestamp. @@ -228,7 +228,7 @@ impl Index { self.by_dec_key.entry(*k).or_default().insert(hash); } if nt > 0 || key.is_some() { - self.statement_topics.insert(hash, (all_topics, key)); + self.topics_and_keys.insert(hash, (all_topics, key)); } let priority = statement.priority().unwrap_or(0); self.entries.insert(hash, (account, global_priority, priority)); @@ -341,7 +341,7 @@ impl Index { let key = PriorityKey { hash: *hash, priority: global_priority }; let len = self.by_global_priority.remove(&key).unwrap_or(0); self.total_size -= len; - if let Some((topics, key)) = self.statement_topics.remove(hash) { + if let Some((topics, key)) = self.topics_and_keys.remove(hash) { for t in topics.into_iter().flatten() { if let Some(set) = self.by_topic.get_mut(&t) { set.remove(hash); From a978b2a227193b6e45063676234ce7e6cd2eff49 Mon Sep 17 00:00:00 2001 From: arkpar Date: Tue, 18 Apr 2023 11:38:24 +0200 Subject: [PATCH 53/78] newtypes for priorities --- client/statement-store/src/lib.rs | 64 +++++++++++++++++-------------- 1 file changed, 35 insertions(+), 29 deletions(-) diff --git a/client/statement-store/src/lib.rs b/client/statement-store/src/lib.rs index 9ee234f509399..ac96ec9ad1a80 100644 --- a/client/statement-store/src/lib.rs +++ b/client/statement-store/src/lib.rs @@ -85,50 +85,55 @@ mod col { pub const COUNT: u8 = 3; } +#[derive(Eq, PartialEq, Debug, Ord, PartialOrd, Clone, Copy)] +struct Priority(u32); +#[derive(Eq, PartialEq, Debug, Ord, PartialOrd, Clone, Copy, Encode, Decode)] +struct GlobalPriority(u32); + #[derive(PartialEq, Eq)] -struct PriorityKey { +struct PriorityKey

{ hash: Hash, - priority: u32, + priority: P, +} + +impl PartialOrd for PriorityKey

{ + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } +} + +impl Ord for PriorityKey

{ + fn cmp(&self, other: &Self) -> std::cmp::Ordering { + self.priority.cmp(&other.priority).then_with(|| self.hash.cmp(&other.hash)) + } } #[derive(PartialEq, Eq)] struct ChannelEntry { hash: Hash, - priority: u32, + priority: Priority, } #[derive(Default)] struct StatementsForAccount { // Statements ordered by priority. - by_priority: BTreeMap, usize)>, + by_priority: BTreeMap, (Option, usize)>, // Channel to statement map. Only one statement per channel is allowed. channels: HashMap, // Sum of all `Data` field sizes. data_size: usize, } -impl PartialOrd for PriorityKey { - fn partial_cmp(&self, other: &Self) -> Option { - Some(self.cmp(other)) - } -} - -impl Ord for PriorityKey { - fn cmp(&self, other: &Self) -> std::cmp::Ordering { - self.priority.cmp(&other.priority).then_with(|| self.hash.cmp(&other.hash)) - } -} - #[derive(Default)] struct Index { by_topic: HashMap>, by_dec_key: HashMap>, topics_and_keys: HashMap; 4], Option)>, - entries: HashMap, /* Statement hash -> (Account id, + entries: HashMap, /* Statement hash -> (Account id, * global_priority, priority) */ expired: HashMap, // Value is expiration timestamp. accounts: HashMap, - by_global_priority: BTreeMap, + by_global_priority: BTreeMap, usize>, max_entries: usize, max_size: usize, total_size: usize, @@ -184,7 +189,7 @@ pub struct Store { #[derive(Encode, Decode, Clone)] struct StatementMeta { - global_priority: u32, + global_priority: GlobalPriority, } #[derive(Encode, Decode)] @@ -213,7 +218,7 @@ impl Index { &mut self, hash: Hash, account: AccountId, - global_priority: u32, + global_priority: GlobalPriority, statement: &Statement, ) { let mut all_topics = [None; 4]; @@ -230,7 +235,7 @@ impl Index { if nt > 0 || key.is_some() { self.topics_and_keys.insert(hash, (all_topics, key)); } - let priority = statement.priority().unwrap_or(0); + let priority = Priority(statement.priority().unwrap_or(0)); self.entries.insert(hash, (account, global_priority, priority)); self.by_global_priority .insert(PriorityKey { hash, priority: global_priority }, statement.data_len()); @@ -396,7 +401,7 @@ impl Index { let mut evicted = HashSet::new(); let mut would_free_size = 0; - let priority = statement.priority().unwrap_or(0); + let priority = Priority(statement.priority().unwrap_or(0)); let (max_size, max_count) = (validation.max_size as usize, validation.max_count as usize); // It may happen that we can't delete enough lower priority messages // to satisfy size constraints. We check for that before deleting anything, @@ -408,7 +413,7 @@ impl Index { // Trying to replace channel message with lower priority log::debug!( target: LOG_TARGET, - "Ignored lower priority channel message: {:?} {} <= {}", + "Ignored lower priority channel message: {:?} {:?} <= {:?}", HexDisplay::from(&hash), priority, channel_record.priority, @@ -419,7 +424,7 @@ impl Index { // below. log::debug!( target: LOG_TARGET, - "Replacing higher priority channel message: {:?} ({}) > {:?} ({})", + "Replacing higher priority channel message: {:?} ({:?}) > {:?} ({:?})", HexDisplay::from(&hash), priority, HexDisplay::from(&channel_record.hash), @@ -451,7 +456,7 @@ impl Index { if entry.priority >= priority { log::debug!( target: LOG_TARGET, - "Ignored message due to constraints {:?} {} < {}", + "Ignored message due to constraints {:?} {:?} < {:?}", HexDisplay::from(&hash), priority, entry.priority, @@ -462,6 +467,7 @@ impl Index { would_free_size += len; } } + let global_priority = GlobalPriority(validation.global_priority); // Now check global constraints as well. for (entry, len) in self.by_global_priority.iter() { if (self.total_size - would_free_size + statement_len <= self.max_size) && @@ -475,10 +481,10 @@ impl Index { continue } - if entry.priority >= validation.global_priority { + if entry.priority >= global_priority { log::debug!( target: LOG_TARGET, - "Ignored message due to global constraints {:?} {} < {}", + "Ignored message due to global constraints {:?} {:?} < {:?}", HexDisplay::from(&hash), priority, entry.priority, @@ -492,7 +498,7 @@ impl Index { for h in &evicted { self.make_expired(h, current_time); } - self.insert_new(hash, *account, validation.global_priority, statement); + self.insert_new(hash, *account, global_priority, statement); MaybeInserted::Inserted(evicted) } } @@ -857,7 +863,7 @@ impl StatementStore for Store { }; let statement_with_meta = StatementWithMeta { - meta: StatementMeta { global_priority: validation.global_priority }, + meta: StatementMeta { global_priority: GlobalPriority(validation.global_priority) }, statement, }; From 83f343f45954f20709a5ed30e83e1e66e1655f1f Mon Sep 17 00:00:00 2001 From: arkpar Date: Tue, 18 Apr 2023 11:46:15 +0200 Subject: [PATCH 54/78] Added MAX_TOPICS --- client/statement-store/src/lib.rs | 10 +++++----- primitives/statement-store/src/lib.rs | 7 +++++-- 2 files changed, 10 insertions(+), 7 deletions(-) diff --git a/client/statement-store/src/lib.rs b/client/statement-store/src/lib.rs index ac96ec9ad1a80..001c7241d5146 100644 --- a/client/statement-store/src/lib.rs +++ b/client/statement-store/src/lib.rs @@ -46,7 +46,7 @@ mod metrics; -pub use sp_statement_store::{Error, StatementStore}; +pub use sp_statement_store::{Error, StatementStore, MAX_TOPICS}; use metrics::MetricsLink as PrometheusMetrics; use parking_lot::RwLock; @@ -128,7 +128,7 @@ struct StatementsForAccount { struct Index { by_topic: HashMap>, by_dec_key: HashMap>, - topics_and_keys: HashMap; 4], Option)>, + topics_and_keys: HashMap; MAX_TOPICS], Option)>, entries: HashMap, /* Statement hash -> (Account id, * global_priority, priority) */ expired: HashMap, // Value is expiration timestamp. @@ -221,7 +221,7 @@ impl Index { global_priority: GlobalPriority, statement: &Statement, ) { - let mut all_topics = [None; 4]; + let mut all_topics = [None; MAX_TOPICS]; let mut nt = 0; while let Some(t) = statement.topic(nt) { self.by_topic.entry(t).or_default().insert(hash); @@ -275,8 +275,8 @@ impl Index { mut f: impl FnMut(&Hash) -> Result<()>, ) -> Result<()> { let empty = HashSet::new(); - let mut sets: [&HashSet; 4] = [∅ 4]; - if match_all_topics.len() > 4 { + let mut sets: [&HashSet; MAX_TOPICS] = [∅ MAX_TOPICS]; + if match_all_topics.len() > MAX_TOPICS { return Ok(()) } for (i, t) in match_all_topics.iter().enumerate() { diff --git a/primitives/statement-store/src/lib.rs b/primitives/statement-store/src/lib.rs index 6c2d726d24150..e85a012e785af 100644 --- a/primitives/statement-store/src/lib.rs +++ b/primitives/statement-store/src/lib.rs @@ -41,6 +41,9 @@ pub type AccountId = [u8; 32]; /// Statement channel. pub type Channel = [u8; 32]; +/// Total number of topic fields allowed. +pub const MAX_TOPICS: usize = 4; + #[cfg(feature = "std")] pub use store_api::{ Error, NetworkPriority, Result, StatementSource, StatementStore, SubmitResult, @@ -161,7 +164,7 @@ pub struct Statement { channel: Option, priority: Option, num_topics: u8, - topics: [Topic; 4], + topics: [Topic; MAX_TOPICS], data: Option>, } @@ -414,7 +417,7 @@ impl Statement { /// Set topic by index. pub fn set_topic(&mut self, index: usize, topic: Topic) { - if index < 4 { + if index < MAX_TOPICS { self.topics[index] = topic; self.num_topics = self.num_topics.max(index as u8 + 1); } From 94b10a77c007a4e592c18a3d6497c5d9529f829b Mon Sep 17 00:00:00 2001 From: arkpar Date: Tue, 18 Apr 2023 12:33:37 +0200 Subject: [PATCH 55/78] Fixed key filtering logic --- client/statement-store/src/lib.rs | 128 +++++++++++++++--------------- 1 file changed, 62 insertions(+), 66 deletions(-) diff --git a/client/statement-store/src/lib.rs b/client/statement-store/src/lib.rs index 001c7241d5146..fc2a11b9e3c40 100644 --- a/client/statement-store/src/lib.rs +++ b/client/statement-store/src/lib.rs @@ -127,7 +127,7 @@ struct StatementsForAccount { #[derive(Default)] struct Index { by_topic: HashMap>, - by_dec_key: HashMap>, + by_dec_key: HashMap, HashSet>, topics_and_keys: HashMap; MAX_TOPICS], Option)>, entries: HashMap, /* Statement hash -> (Account id, * global_priority, priority) */ @@ -229,9 +229,7 @@ impl Index { nt += 1; } let key = statement.decryption_key(); - if let Some(k) = &key { - self.by_dec_key.entry(*k).or_default().insert(hash); - } + self.by_dec_key.entry(key).or_default().insert(hash); if nt > 0 || key.is_some() { self.topics_and_keys.insert(hash, (all_topics, key)); } @@ -275,52 +273,35 @@ impl Index { mut f: impl FnMut(&Hash) -> Result<()>, ) -> Result<()> { let empty = HashSet::new(); - let mut sets: [&HashSet; MAX_TOPICS] = [∅ MAX_TOPICS]; + let mut sets: [&HashSet; MAX_TOPICS + 1] = [∅ MAX_TOPICS + 1]; if match_all_topics.len() > MAX_TOPICS { return Ok(()) } + let key_set = self.by_dec_key.get(&key); + if key_set.map(|s| s.len()).unwrap_or(0) == 0 { + // Key does not exist in the index. + return Ok(()) + } + sets[0] = key_set.expect("Function returns if key_set is None"); for (i, t) in match_all_topics.iter().enumerate() { let set = self.by_topic.get(t); if set.map(|s| s.len()).unwrap_or(0) == 0 { // At least one of the match_all_topics does not exist in the index. return Ok(()) } - sets[i] = set.expect("Function returns if set is None"); + sets[i + 1] = set.expect("Function returns if set is None"); } - let sets = &mut sets[0..match_all_topics.len()]; - if sets.is_empty() && key.is_none() { - // Iterate all entries - for h in self.entries.keys() { - log::trace!(target: LOG_TARGET, "Iterating: {:?}", HexDisplay::from(h)); - f(h)? - } - } else { - // Start with the smallest topic set or the key set. - sets.sort_by_key(|s| s.len()); - if let Some(key) = key { - let key_set = - if let Some(set) = self.by_dec_key.get(&key) { set } else { return Ok(()) }; - for item in key_set { - if sets.iter().all(|set| set.contains(item)) { - log::trace!( - target: LOG_TARGET, - "Iterating by key: {:?}", - HexDisplay::from(item) - ); - f(item)? - } - } - } else { - for item in sets[0] { - if sets[1..].iter().all(|set| set.contains(item)) { - log::trace!( - target: LOG_TARGET, - "Iterating by topic: {:?}", - HexDisplay::from(item) - ); - f(item)? - } - } + let sets = &mut sets[0..match_all_topics.len() + 1]; + // Start with the smallest topic set or the key set. + sets.sort_by_key(|s| s.len()); + for item in sets[0] { + if sets[1..].iter().all(|set| set.contains(item)) { + log::trace!( + target: LOG_TARGET, + "Iterating by topic/key: statement {:?}", + HexDisplay::from(item) + ); + f(item)? } } Ok(()) @@ -352,10 +333,8 @@ impl Index { set.remove(hash); } } - if let Some(k) = key { - if let Some(set) = self.by_dec_key.get_mut(&k) { - set.remove(hash); - } + if let Some(set) = self.by_dec_key.get_mut(&key) { + set.remove(hash); } } self.expired.insert(*hash, current_time); @@ -955,7 +934,7 @@ mod tests { use sp_statement_store::{ runtime_api::{InvalidStatement, ValidStatement, ValidateStatement}, AccountId, Channel, NetworkPriority, Proof, SignatureVerificationResult, Statement, - StatementSource, StatementStore, SubmitResult, Topic, + StatementSource, StatementStore, SubmitResult, Topic, DecryptionKey, }; type Extrinsic = sp_runtime::OpaqueExtrinsic; @@ -1052,15 +1031,18 @@ mod tests { } fn signed_statement(data: u8) -> Statement { - signed_statement_with_topics(data, &[]) + signed_statement_with_topics(data, &[], None) } - fn signed_statement_with_topics(data: u8, topics: &[Topic]) -> Statement { + fn signed_statement_with_topics(data: u8, topics: &[Topic], dec_key: Option) -> Statement { let mut statement = Statement::new(); statement.set_plain_data(vec![data]); for i in 0..topics.len() { statement.set_topic(i, topics[i]); } + if let Some(key) = dec_key { + statement.set_decryption_key(key); + } let kp = sp_core::ed25519::Pair::from_string("//Alice", None).unwrap(); statement.sign_ed25519_private(&kp); statement @@ -1072,6 +1054,12 @@ mod tests { topic } + fn dec_key(data: u64) -> DecryptionKey { + let mut dec_key: DecryptionKey = Default::default(); + dec_key[0..8].copy_from_slice(&data.to_le_bytes()); + dec_key + } + fn account(id: u64) -> AccountId { let mut account: AccountId = Default::default(); account[0..8].copy_from_slice(&id.to_le_bytes()); @@ -1149,39 +1137,47 @@ mod tests { } #[test] - fn search_by_topic() { + fn search_by_topic_and_key() { let (store, _temp) = test_store(); let statement0 = signed_statement(0); - let statement1 = signed_statement_with_topics(1, &[topic(0)]); - let statement2 = signed_statement_with_topics(2, &[topic(0), topic(1)]); - let statement3 = signed_statement_with_topics(3, &[topic(0), topic(1), topic(2)]); + let statement1 = signed_statement_with_topics(1, &[topic(0)], None); + let statement2 = signed_statement_with_topics(2, &[topic(0), topic(1)], Some(dec_key(2))); + let statement3 = signed_statement_with_topics(3, &[topic(0), topic(1), topic(2)], None); let statement4 = - signed_statement_with_topics(4, &[topic(0), topic(42), topic(2), topic(3)]); + signed_statement_with_topics(4, &[topic(0), topic(42), topic(2), topic(3)], None); let statements = vec![statement0, statement1, statement2, statement3, statement4]; for s in &statements { store.submit(s.clone(), StatementSource::Network); } - let assert_topics = |topics: &[u64], expected: &[u8]| { + let assert_topics = |topics: &[u64], key: Option, expected: &[u8]| { + let key = key.map(dec_key); let topics: Vec<_> = topics.iter().map(|t| topic(*t)).collect(); let mut got_vals: Vec<_> = - store.broadcasts(&topics).unwrap().into_iter().map(|d| d[0]).collect(); + if let Some(key) = key { + store.posted(&topics, key).unwrap().into_iter().map(|d| d[0]).collect() + } else { + store.broadcasts(&topics).unwrap().into_iter().map(|d| d[0]).collect() + }; got_vals.sort(); assert_eq!(expected.to_vec(), got_vals); }; - assert_topics(&[], &[0, 1, 2, 3, 4]); - assert_topics(&[0], &[1, 2, 3, 4]); - assert_topics(&[1], &[2, 3]); - assert_topics(&[2], &[3, 4]); - assert_topics(&[3], &[4]); - assert_topics(&[42], &[4]); - - assert_topics(&[0, 1], &[2, 3]); - assert_topics(&[1, 2], &[3]); - assert_topics(&[99], &[]); - assert_topics(&[0, 99], &[]); - assert_topics(&[0, 1, 2, 3, 42], &[]); + assert_topics(&[], None, &[0, 1, 3, 4]); + assert_topics(&[], Some(2), &[2]); + assert_topics(&[0], None, &[1, 3, 4]); + assert_topics(&[1], None, &[3]); + assert_topics(&[2], None, &[3, 4]); + assert_topics(&[3], None, &[4]); + assert_topics(&[42], None, &[4]); + + assert_topics(&[0, 1], None, &[3]); + assert_topics(&[0, 1], Some(2), &[2]); + assert_topics(&[0, 1, 99], Some(2), &[]); + assert_topics(&[1, 2], None, &[3]); + assert_topics(&[99], None, &[]); + assert_topics(&[0, 99], None, &[]); + assert_topics(&[0, 1, 2, 3, 42], None, &[]); } #[test] From 1c22fef76e714ce797dea846b41a63445dc7f0ff Mon Sep 17 00:00:00 2001 From: arkpar Date: Tue, 18 Apr 2023 12:42:30 +0200 Subject: [PATCH 56/78] Remove empty entrie --- client/statement-store/src/lib.rs | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/client/statement-store/src/lib.rs b/client/statement-store/src/lib.rs index fc2a11b9e3c40..d76d05b22e05f 100644 --- a/client/statement-store/src/lib.rs +++ b/client/statement-store/src/lib.rs @@ -329,12 +329,18 @@ impl Index { self.total_size -= len; if let Some((topics, key)) = self.topics_and_keys.remove(hash) { for t in topics.into_iter().flatten() { - if let Some(set) = self.by_topic.get_mut(&t) { - set.remove(hash); + if let std::collections::hash_map::Entry::Occupied(mut set) = self.by_topic.entry(t) { + set.get_mut().remove(hash); + if set.get().is_empty() { + set.remove_entry(); + } } } - if let Some(set) = self.by_dec_key.get_mut(&key) { - set.remove(hash); + if let std::collections::hash_map::Entry::Occupied(mut set) = self.by_dec_key.entry(key) { + set.get_mut().remove(hash); + if set.get().is_empty() { + set.remove_entry(); + } } } self.expired.insert(*hash, current_time); From f08810b5093b740ec6456d52ac77b8e2e57eaa59 Mon Sep 17 00:00:00 2001 From: arkpar Date: Tue, 18 Apr 2023 13:02:23 +0200 Subject: [PATCH 57/78] Removed prefix from signing --- primitives/statement-store/src/lib.rs | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/primitives/statement-store/src/lib.rs b/primitives/statement-store/src/lib.rs index e85a012e785af..5f9f989769e5f 100644 --- a/primitives/statement-store/src/lib.rs +++ b/primitives/statement-store/src/lib.rs @@ -194,7 +194,7 @@ impl Decode for Statement { impl Encode for Statement { fn encode(&self) -> Vec { - self.encoded(true) + self.encoded(false) } } @@ -392,7 +392,7 @@ impl Statement { /// Return encoded fields that can be signed to construct or verify a proof fn signature_material(&self) -> Vec { - self.encoded(false) + self.encoded(true) } /// Return a copy of this statement with proof removed @@ -433,10 +433,10 @@ impl Statement { self.data = Some(data) } - fn encoded(&self, with_proof: bool) -> Vec { + fn encoded(&self, for_signing: bool) -> Vec { // Encoding matches that of Vec. Basically this just means accepting that there // will be a prefix of vector length. - let num_fields = if with_proof && self.proof.is_some() { 1 } else { 0 } + + let num_fields = if !for_signing && self.proof.is_some() { 1 } else { 0 } + if self.decryption_key.is_some() { 1 } else { 0 } + if self.priority.is_some() { 1 } else { 0 } + if self.channel.is_some() { 1 } else { 0 } + @@ -444,10 +444,10 @@ impl Statement { self.num_topics as u32; let mut output = Vec::new(); - let compact_len = codec::Compact::(num_fields); - compact_len.encode_to(&mut output); + if !for_signing { + let compact_len = codec::Compact::(num_fields); + compact_len.encode_to(&mut output); - if with_proof { if let Some(proof) = &self.proof { 0u8.encode_to(&mut output); proof.encode_to(&mut output); From 7849cc882dbc44f2e9631b341b1ab5ffc12fce67 Mon Sep 17 00:00:00 2001 From: arkpar Date: Tue, 18 Apr 2023 14:02:53 +0200 Subject: [PATCH 58/78] More documentation --- frame/statement/src/lib.rs | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/frame/statement/src/lib.rs b/frame/statement/src/lib.rs index 20ccc13e982bb..e4e8ec0f77e36 100644 --- a/frame/statement/src/lib.rs +++ b/frame/statement/src/lib.rs @@ -22,6 +22,16 @@ //! ## Overview //! //! The Statement pallet provides means to create and validate statements for the statement store. +//! +//! For each statement validation function calculates the following three values based on the +//! statement author balance: +//! `max_count`: Maximum number of statements allowed for the author (signer) of this statement. +//! `max_size`: Maximum total size of statements allowed for the author (signer) of this statement. +//! `global_priority`: A numerical value that defines the order in which statements are evicted when +//! the statement store hits global constraints. This is simply balance divided by `StatementCost`. +//! +//! This pallet also contains an offchain worker that turns on-chain statement events into +//! statements. These statements are placed in the store and propagated over the network. #![cfg_attr(not(feature = "std"), no_std)] From 64ac7f54ef566f62281c4f710630bb5c647fcc57 Mon Sep 17 00:00:00 2001 From: arkpar Date: Tue, 18 Apr 2023 14:03:14 +0200 Subject: [PATCH 59/78] fmt --- client/statement-store/src/lib.rs | 34 +++++++++++++++++---------- primitives/statement-store/src/lib.rs | 4 ++-- 2 files changed, 23 insertions(+), 15 deletions(-) diff --git a/client/statement-store/src/lib.rs b/client/statement-store/src/lib.rs index d76d05b22e05f..54ae46d8b6aa0 100644 --- a/client/statement-store/src/lib.rs +++ b/client/statement-store/src/lib.rs @@ -129,8 +129,9 @@ struct Index { by_topic: HashMap>, by_dec_key: HashMap, HashSet>, topics_and_keys: HashMap; MAX_TOPICS], Option)>, - entries: HashMap, /* Statement hash -> (Account id, - * global_priority, priority) */ + entries: HashMap, /* Statement hash -> (Account + * id, + * global_priority, priority) */ expired: HashMap, // Value is expiration timestamp. accounts: HashMap, by_global_priority: BTreeMap, usize>, @@ -329,14 +330,18 @@ impl Index { self.total_size -= len; if let Some((topics, key)) = self.topics_and_keys.remove(hash) { for t in topics.into_iter().flatten() { - if let std::collections::hash_map::Entry::Occupied(mut set) = self.by_topic.entry(t) { + if let std::collections::hash_map::Entry::Occupied(mut set) = + self.by_topic.entry(t) + { set.get_mut().remove(hash); if set.get().is_empty() { set.remove_entry(); } } } - if let std::collections::hash_map::Entry::Occupied(mut set) = self.by_dec_key.entry(key) { + if let std::collections::hash_map::Entry::Occupied(mut set) = + self.by_dec_key.entry(key) + { set.get_mut().remove(hash); if set.get().is_empty() { set.remove_entry(); @@ -939,8 +944,8 @@ mod tests { use sp_core::Pair; use sp_statement_store::{ runtime_api::{InvalidStatement, ValidStatement, ValidateStatement}, - AccountId, Channel, NetworkPriority, Proof, SignatureVerificationResult, Statement, - StatementSource, StatementStore, SubmitResult, Topic, DecryptionKey, + AccountId, Channel, DecryptionKey, NetworkPriority, Proof, SignatureVerificationResult, + Statement, StatementSource, StatementStore, SubmitResult, Topic, }; type Extrinsic = sp_runtime::OpaqueExtrinsic; @@ -1040,7 +1045,11 @@ mod tests { signed_statement_with_topics(data, &[], None) } - fn signed_statement_with_topics(data: u8, topics: &[Topic], dec_key: Option) -> Statement { + fn signed_statement_with_topics( + data: u8, + topics: &[Topic], + dec_key: Option, + ) -> Statement { let mut statement = Statement::new(); statement.set_plain_data(vec![data]); for i in 0..topics.len() { @@ -1159,12 +1168,11 @@ mod tests { let assert_topics = |topics: &[u64], key: Option, expected: &[u8]| { let key = key.map(dec_key); let topics: Vec<_> = topics.iter().map(|t| topic(*t)).collect(); - let mut got_vals: Vec<_> = - if let Some(key) = key { - store.posted(&topics, key).unwrap().into_iter().map(|d| d[0]).collect() - } else { - store.broadcasts(&topics).unwrap().into_iter().map(|d| d[0]).collect() - }; + let mut got_vals: Vec<_> = if let Some(key) = key { + store.posted(&topics, key).unwrap().into_iter().map(|d| d[0]).collect() + } else { + store.broadcasts(&topics).unwrap().into_iter().map(|d| d[0]).collect() + }; got_vals.sort(); assert_eq!(expected.to_vec(), got_vals); }; diff --git a/primitives/statement-store/src/lib.rs b/primitives/statement-store/src/lib.rs index 5f9f989769e5f..14def6b128834 100644 --- a/primitives/statement-store/src/lib.rs +++ b/primitives/statement-store/src/lib.rs @@ -131,8 +131,8 @@ impl Proof { } } -/// Statement attributes. Each statement is a list of 0 or more fields. Fields may only appear once and in -/// the order declared here. +/// Statement attributes. Each statement is a list of 0 or more fields. Fields may only appear once +/// and in the order declared here. #[derive(Encode, Decode, TypeInfo, sp_core::RuntimeDebug, Clone, PartialEq, Eq)] #[repr(u8)] pub enum Field { From 21dd02081a5fb250c4176e843516c7a25033198a Mon Sep 17 00:00:00 2001 From: arkpar Date: Wed, 19 Apr 2023 12:44:02 +0200 Subject: [PATCH 60/78] Moved store setup from sc-service to node --- Cargo.lock | 4 +- bin/node-template/node/src/service.rs | 4 -- bin/node/cli/Cargo.toml | 3 ++ bin/node/cli/src/service.rs | 70 +++++++++++++++++++++----- bin/node/rpc/Cargo.toml | 1 + bin/node/rpc/src/lib.rs | 7 ++- client/service/Cargo.toml | 2 - client/service/src/builder.rs | 71 --------------------------- client/service/src/error.rs | 3 -- client/service/src/lib.rs | 3 -- 10 files changed, 69 insertions(+), 99 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 768680b7f3d5e..82c8d18303fd2 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5028,6 +5028,7 @@ dependencies = [ "sc-keystore", "sc-network", "sc-network-common", + "sc-network-statement", "sc-network-sync", "sc-rpc", "sc-service", @@ -5161,6 +5162,7 @@ dependencies = [ "sp-consensus-babe", "sp-keystore", "sp-runtime", + "sp-statement-store", "substrate-frame-rpc-system", "substrate-state-trie-migration-rpc", ] @@ -9496,14 +9498,12 @@ dependencies = [ "sc-network-bitswap", "sc-network-common", "sc-network-light", - "sc-network-statement", "sc-network-sync", "sc-network-transactions", "sc-offchain", "sc-rpc", "sc-rpc-server", "sc-rpc-spec-v2", - "sc-statement-store", "sc-storage-monitor", "sc-sysinfo", "sc-telemetry", diff --git a/bin/node-template/node/src/service.rs b/bin/node-template/node/src/service.rs index f15c200987a82..723d1db3eea37 100644 --- a/bin/node-template/node/src/service.rs +++ b/bin/node-template/node/src/service.rs @@ -133,7 +133,6 @@ pub fn new_partial( keystore_container, select_chain, transaction_pool, - statement_store: None, other: (grandpa_block_import, grandpa_link, telemetry), }) } @@ -148,7 +147,6 @@ pub fn new_full(mut config: Configuration) -> Result keystore_container, select_chain, transaction_pool, - statement_store, other: (block_import, grandpa_link, mut telemetry), } = new_partial(&config)?; @@ -172,7 +170,6 @@ pub fn new_full(mut config: Configuration) -> Result config: &config, client: client.clone(), transaction_pool: transaction_pool.clone(), - statement_store: statement_store.clone(), spawn_handle: task_manager.spawn_handle(), import_queue, block_announce_validator_builder: None, @@ -212,7 +209,6 @@ pub fn new_full(mut config: Configuration) -> Result keystore: keystore_container.keystore(), task_manager: &mut task_manager, transaction_pool: transaction_pool.clone(), - statement_store: statement_store.clone(), rpc_builder: rpc_extensions_builder, backend, system_rpc_tx, diff --git a/bin/node/cli/Cargo.toml b/bin/node/cli/Cargo.toml index e89546534b03e..4bf11136b4d6f 100644 --- a/bin/node/cli/Cargo.toml +++ b/bin/node/cli/Cargo.toml @@ -70,6 +70,7 @@ sc-statement-store = { version = "4.0.0-dev", path = "../../../client/statement- sc-network = { version = "0.10.0-dev", path = "../../../client/network" } sc-network-common = { version = "0.10.0-dev", path = "../../../client/network/common" } sc-network-sync = { version = "0.10.0-dev", path = "../../../client/network/sync" } +sc-network-statement = { version = "0.10.0-dev", path = "../../../client/network/statement" } sc-consensus-slots = { version = "0.10.0-dev", path = "../../../client/consensus/slots" } sc-consensus-babe = { version = "0.10.0-dev", path = "../../../client/consensus/babe" } grandpa = { version = "0.10.0-dev", package = "sc-consensus-grandpa", path = "../../../client/consensus/grandpa" } @@ -104,6 +105,8 @@ node-inspect = { version = "0.9.0-dev", optional = true, path = "../inspect" } try-runtime-cli = { version = "0.10.0-dev", optional = true, path = "../../../utils/frame/try-runtime/cli" } serde_json = "1.0.85" +tokio = { version = "1.22.0", features = ["macros", "time", "parking_lot"] } + [dev-dependencies] sc-keystore = { version = "4.0.0-dev", path = "../../../client/keystore" } sc-client-db = { version = "0.10.0-dev", path = "../../../client/db" } diff --git a/bin/node/cli/src/service.rs b/bin/node/cli/src/service.rs index ecb28b1c3c187..761f6ca7adcd2 100644 --- a/bin/node/cli/src/service.rs +++ b/bin/node/cli/src/service.rs @@ -35,6 +35,7 @@ use sc_network::{event::Event, NetworkEventStream, NetworkService}; use sc_network_common::sync::warp::WarpSyncParams; use sc_network_sync::SyncingService; use sc_service::{config::Configuration, error::Error as ServiceError, RpcHandlers, TaskManager}; +use sc_statement_store::Store as StatementStore; use sc_telemetry::{Telemetry, TelemetryWorker}; use sp_api::ProvideRuntimeApi; use sp_core::crypto::Pair; @@ -148,6 +149,7 @@ pub fn new_partial( ), grandpa::SharedVoterState, Option, + Arc, ), >, ServiceError, @@ -227,6 +229,12 @@ pub fn new_partial( let import_setup = (block_import, grandpa_link, babe_link); + let statement_store = sc_statement_store::Store::new_shared( + &config.data_path, + client.clone(), + config.prometheus_registry(), + ).map_err(|e| ServiceError::Other(format!("Statement store error: {:?}", e)))?; + let (rpc_extensions_builder, rpc_setup) = { let (_, grandpa_link, _) = &import_setup; @@ -247,6 +255,7 @@ pub fn new_partial( let chain_spec = config.chain_spec.cloned_box(); let rpc_backend = backend.clone(); + let rpc_statement_store = statement_store.clone(); let rpc_extensions_builder = move |deny_unsafe, subscription_executor| { let deps = node_rpc::FullDeps { client: client.clone(), @@ -265,6 +274,7 @@ pub fn new_partial( subscription_executor, finality_provider: finality_proof_provider.clone(), }, + statement_store: rpc_statement_store.clone(), }; node_rpc::create_full(deps, rpc_backend.clone()).map_err(Into::into) @@ -273,12 +283,6 @@ pub fn new_partial( (rpc_extensions_builder, shared_voter_state2) }; - let statement_store = sc_statement_store::Store::new_shared( - &config.data_path, - client.clone(), - config.prometheus_registry(), - )?; - Ok(sc_service::PartialComponents { client, backend, @@ -287,8 +291,7 @@ pub fn new_partial( select_chain, import_queue, transaction_pool, - statement_store: Some(statement_store), - other: (rpc_extensions_builder, import_setup, rpc_setup, telemetry), + other: (rpc_extensions_builder, import_setup, rpc_setup, telemetry, statement_store), }) } @@ -332,8 +335,7 @@ pub fn new_full_base( keystore_container, select_chain, transaction_pool, - statement_store, - other: (rpc_builder, import_setup, rpc_setup, mut telemetry), + other: (rpc_builder, import_setup, rpc_setup, mut telemetry, statement_store), } = new_partial(&config)?; let shared_voter_state = rpc_setup; @@ -343,6 +345,20 @@ pub fn new_full_base( &config.chain_spec, ); + let statement_handler_proto = sc_network_statement::StatementHandlerPrototype::new( + config.protocol_id().clone(), + client + .block_hash(0u32.into()) + .ok() + .flatten() + .expect("Genesis block exists; qed"), + config.chain_spec.fork_id(), + ); + config + .network + .extra_sets + .push(statement_handler_proto.set_config()); + config .network .extra_sets @@ -358,7 +374,6 @@ pub fn new_full_base( config: &config, client: client.clone(), transaction_pool: transaction_pool.clone(), - statement_store: statement_store.clone(), spawn_handle: task_manager.spawn_handle(), import_queue, block_announce_validator_builder: None, @@ -394,7 +409,6 @@ pub fn new_full_base( system_rpc_tx, tx_handler_controller, sync_service: sync_service.clone(), - statement_store, telemetry: telemetry.as_mut(), })?; @@ -536,7 +550,7 @@ pub fn new_full_base( sync: Arc::new(sync_service.clone()), telemetry: telemetry.as_ref().map(|x| x.handle()), voting_rule: grandpa::VotingRulesBuilder::default().build(), - prometheus_registry, + prometheus_registry: prometheus_registry.clone(), shared_voter_state, }; @@ -549,6 +563,36 @@ pub fn new_full_base( ); } + // Perform periodic statement store maintenance + let store = statement_store.clone(); + task_manager.spawn_handle().spawn("statement-store-notifications", Some("statement-store"), async move { + let mut interval = tokio::time::interval(sc_statement_store::MAINTENANCE_PERIOD); + loop { + interval.tick().await; + store.maintain(); + } + }); + + // Spawn statement protocol worker + let statement_protocol_executor = { + let spawn_handle = Clone::clone(&task_manager.spawn_handle()); + Box::new(move |fut| { + spawn_handle.spawn("network-statement-validator", Some("networking"), fut); + }) + }; + let statement_handler = statement_handler_proto.build( + network.clone(), + sync_service.clone(), + statement_store.clone(), + prometheus_registry.as_ref(), + statement_protocol_executor, + )?; + task_manager.spawn_handle().spawn( + "network-statement-handler", + Some("networking"), + statement_handler.run(), + ); + network_starter.start_network(); Ok(NewFullBase { task_manager, diff --git a/bin/node/rpc/Cargo.toml b/bin/node/rpc/Cargo.toml index 724efbe9a5721..8a336242cd267 100644 --- a/bin/node/rpc/Cargo.toml +++ b/bin/node/rpc/Cargo.toml @@ -35,5 +35,6 @@ sp-consensus = { version = "0.10.0-dev", path = "../../../primitives/consensus/c sp-consensus-babe = { version = "0.10.0-dev", path = "../../../primitives/consensus/babe" } sp-keystore = { version = "0.13.0", path = "../../../primitives/keystore" } sp-runtime = { version = "7.0.0", path = "../../../primitives/runtime" } +sp-statement-store = { version = "4.0.0-dev", path = "../../../primitives/statement-store" } substrate-frame-rpc-system = { version = "4.0.0-dev", path = "../../../utils/frame/rpc/system" } substrate-state-trie-migration-rpc = { version = "4.0.0-dev", path = "../../../utils/frame/rpc/state-trie-migration-rpc/" } diff --git a/bin/node/rpc/src/lib.rs b/bin/node/rpc/src/lib.rs index 5f61fdcd55d97..fd15b6beb9073 100644 --- a/bin/node/rpc/src/lib.rs +++ b/bin/node/rpc/src/lib.rs @@ -88,6 +88,8 @@ pub struct FullDeps { pub babe: BabeDeps, /// GRANDPA specific dependencies. pub grandpa: GrandpaDeps, + /// Shared statement store reference. + pub statement_store: Arc, } /// Instantiate all Full RPC extensions. @@ -120,12 +122,13 @@ where use sc_consensus_grandpa_rpc::{Grandpa, GrandpaApiServer}; use sc_rpc::dev::{Dev, DevApiServer}; use sc_rpc_spec_v2::chain_spec::{ChainSpec, ChainSpecApiServer}; + use sc_rpc::statement::StatementApiServer; use sc_sync_state_rpc::{SyncState, SyncStateApiServer}; use substrate_frame_rpc_system::{System, SystemApiServer}; use substrate_state_trie_migration_rpc::{StateMigration, StateMigrationApiServer}; let mut io = RpcModule::new(()); - let FullDeps { client, pool, select_chain, chain_spec, deny_unsafe, babe, grandpa } = deps; + let FullDeps { client, pool, select_chain, chain_spec, deny_unsafe, babe, grandpa, statement_store } = deps; let BabeDeps { keystore, babe_worker_handle } = babe; let GrandpaDeps { @@ -169,6 +172,8 @@ where io.merge(StateMigration::new(client.clone(), backend, deny_unsafe).into_rpc())?; io.merge(Dev::new(client, deny_unsafe).into_rpc())?; + let statement_store = sc_rpc::statement::StatementStore::new(statement_store, deny_unsafe).into_rpc(); + io.merge(statement_store)?; Ok(io) } diff --git a/client/service/Cargo.toml b/client/service/Cargo.toml index 4739dbead3248..b4ce3bbbb7f1c 100644 --- a/client/service/Cargo.toml +++ b/client/service/Cargo.toml @@ -53,14 +53,12 @@ sc-network-common = { version = "0.10.0-dev", path = "../network/common" } sc-network-light = { version = "0.10.0-dev", path = "../network/light" } sc-network-sync = { version = "0.10.0-dev", path = "../network/sync" } sc-network-transactions = { version = "0.10.0-dev", path = "../network/transactions" } -sc-network-statement = { version = "0.10.0-dev", path = "../network/statement" } sc-chain-spec = { version = "4.0.0-dev", path = "../chain-spec" } sc-client-api = { version = "4.0.0-dev", path = "../api" } sp-api = { version = "4.0.0-dev", path = "../../primitives/api" } sc-client-db = { version = "0.10.0-dev", default-features = false, path = "../db" } codec = { package = "parity-scale-codec", version = "3.2.2" } sc-executor = { version = "0.10.0-dev", path = "../executor" } -sc-statement-store = { version = "4.0.0-dev", path = "../statement-store" } sc-transaction-pool = { version = "4.0.0-dev", path = "../transaction-pool" } sp-transaction-pool = { version = "4.0.0-dev", path = "../../primitives/transaction-pool" } sc-transaction-pool-api = { version = "4.0.0-dev", path = "../transaction-pool/api" } diff --git a/client/service/src/builder.rs b/client/service/src/builder.rs index 471f71ce19c18..a877b87caff3e 100644 --- a/client/service/src/builder.rs +++ b/client/service/src/builder.rs @@ -55,12 +55,10 @@ use sc_rpc::{ chain::ChainApiServer, offchain::OffchainApiServer, state::{ChildStateApiServer, StateApiServer}, - statement::StatementApiServer, system::SystemApiServer, DenyUnsafe, SubscriptionTaskExecutor, }; use sc_rpc_spec_v2::{chain_head::ChainHeadApiServer, transaction::TransactionApiServer}; -use sc_statement_store::Store as StatementStore; use sc_telemetry::{telemetry, ConnectionMessage, Telemetry, TelemetryHandle, SUBSTRATE_INFO}; use sc_transaction_pool_api::MaintainedTransactionPool; use sc_utils::mpsc::{tracing_unbounded, TracingUnboundedSender}; @@ -347,8 +345,6 @@ pub struct SpawnTasksParams<'a, TBl: BlockT, TCl, TExPool, TRpc, Backend> { pub keystore: KeystorePtr, /// A shared transaction pool. pub transaction_pool: Arc, - /// Shared statement store. - pub statement_store: Option>, /// Builds additional [`RpcModule`]s that should be added to the server pub rpc_builder: Box Result, Error>>, @@ -434,7 +430,6 @@ where backend, keystore, transaction_pool, - statement_store, rpc_builder, network, system_rpc_tx, @@ -482,18 +477,6 @@ where ), ); - // Perform periodic statement store maintenance - let store = statement_store.clone(); - if let Some(store) = store { - spawn_handle.spawn("statement-store-notifications", Some("statement-store"), async move { - let mut interval = tokio::time::interval(sc_statement_store::MAINTENANCE_PERIOD); - loop { - interval.tick().await; - store.maintain(); - } - }); - } - // Prometheus metrics. let metrics_service = if let Some(PrometheusConfig { port, registry }) = config.prometheus_config.clone() { @@ -531,7 +514,6 @@ where task_manager.spawn_handle(), client.clone(), transaction_pool.clone(), - statement_store.clone(), keystore.clone(), system_rpc_tx.clone(), &config, @@ -631,7 +613,6 @@ fn gen_rpc_module( spawn_handle: SpawnTaskHandle, client: Arc, transaction_pool: Arc, - statement_store: Option>, keystore: KeystorePtr, system_rpc_tx: TracingUnboundedSender>, config: &Configuration, @@ -715,10 +696,6 @@ where rpc_api.merge(offchain).map_err(|e| Error::Application(e.into()))?; } - if let Some(store) = statement_store { - let store = sc_rpc::statement::StatementStore::new(store, deny_unsafe).into_rpc(); - rpc_api.merge(store).map_err(|e| Error::Application(e.into()))?; - } // Part of the RPC v2 spec. rpc_api.merge(transaction_v2).map_err(|e| Error::Application(e.into()))?; @@ -745,8 +722,6 @@ pub struct BuildNetworkParams<'a, TBl: BlockT, TExPool, TImpQu, TCl> { pub client: Arc, /// A shared transaction pool. pub transaction_pool: Arc, - /// A shared statement store. - pub statement_store: Option>, /// A handle for spawning tasks. pub spawn_handle: SpawnTaskHandle, /// An import queue. @@ -788,7 +763,6 @@ where config, client, transaction_pool, - statement_store, spawn_handle, import_queue, block_announce_validator_builder, @@ -942,26 +916,6 @@ where .extra_sets .insert(0, transactions_handler_proto.set_config()); - // crate statment protocol and add it to the list of supported protocols of `network_params` - - let statement_handler_proto = if statement_store.is_some() { - let statement_handler_proto = sc_network_statement::StatementHandlerPrototype::new( - protocol_id.clone(), - client - .block_hash(0u32.into()) - .ok() - .flatten() - .expect("Genesis block exists; qed"), - config.chain_spec.fork_id(), - ); - network_params - .network_config - .extra_sets - .insert(0, statement_handler_proto.set_config()); - Some(statement_handler_proto) - } else { - None - }; let has_bootnodes = !network_params.network_config.boot_nodes.is_empty(); let network_mut = sc_network::NetworkWorker::new(network_params)?; let network = network_mut.service().clone(); @@ -974,31 +928,6 @@ where )?; spawn_handle.spawn("network-transactions-handler", Some("networking"), tx_handler.run()); - // crate statement gossip protocol and add it to the list of supported protocols of - // `network_params` - if let Some(statement_store) = statement_store { - let statement_protocol_executor = { - let spawn_handle = Clone::clone(&spawn_handle); - Box::new(move |fut| { - spawn_handle.spawn("network-statement-validator", Some("networking"), fut); - }) - }; - let statement_handler_proto = statement_handler_proto - .expect("statement_handler_proto is always created when statement_store is `Some`"); - let statement_handler = statement_handler_proto.build( - network.clone(), - sync_service.clone(), - statement_store.clone(), - config.prometheus_config.as_ref().map(|config| &config.registry), - statement_protocol_executor, - )?; - spawn_handle.spawn( - "network-statement-handler", - Some("networking"), - statement_handler.run(), - ); - } - spawn_handle.spawn_blocking( "chain-sync-network-service-provider", Some("networking"), diff --git a/client/service/src/error.rs b/client/service/src/error.rs index be0e5aae15e85..c871342c771eb 100644 --- a/client/service/src/error.rs +++ b/client/service/src/error.rs @@ -60,9 +60,6 @@ pub enum Error { #[error("Application")] Application(#[from] Box), - #[error(transparent)] - StatementStore(#[from] sc_statement_store::Error), - #[error("Other: {0}")] Other(String), } diff --git a/client/service/src/lib.rs b/client/service/src/lib.rs index 9c8d5cd7de4d9..c0c7c537c64dc 100644 --- a/client/service/src/lib.rs +++ b/client/service/src/lib.rs @@ -85,7 +85,6 @@ pub use sc_network_transactions::config::{TransactionImport, TransactionImportFu pub use sc_rpc::{ RandomIntegerSubscriptionId, RandomStringSubscriptionId, RpcSubscriptionIdProvider, }; -pub use sc_statement_store::Store as StatementStore; pub use sc_tracing::TracingReceiver; pub use sc_transaction_pool::Options as TransactionPoolOptions; pub use sc_transaction_pool_api::{error::IntoPoolError, InPoolTransaction, TransactionPool}; @@ -141,8 +140,6 @@ pub struct PartialComponents, - /// A shared statement store. - pub statement_store: Option>, /// Everything else that needs to be passed into the main build function. pub other: Other, } From d72290caa3b35732491d7f4050502b8313b72318 Mon Sep 17 00:00:00 2001 From: arkpar Date: Wed, 19 Apr 2023 13:12:58 +0200 Subject: [PATCH 61/78] Handle maintenance task in sc-statement-store --- Cargo.lock | 1 + bin/node/cli/Cargo.toml | 2 -- bin/node/cli/src/service.rs | 11 +---------- client/statement-store/Cargo.toml | 1 + client/statement-store/src/lib.rs | 17 ++++++++++++++--- 5 files changed, 17 insertions(+), 15 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 82c8d18303fd2..168d330ec460a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -9606,6 +9606,7 @@ dependencies = [ "sp-tracing", "substrate-prometheus-endpoint", "tempfile", + "tokio", ] [[package]] diff --git a/bin/node/cli/Cargo.toml b/bin/node/cli/Cargo.toml index 4bf11136b4d6f..f80f1a5ea63e4 100644 --- a/bin/node/cli/Cargo.toml +++ b/bin/node/cli/Cargo.toml @@ -105,8 +105,6 @@ node-inspect = { version = "0.9.0-dev", optional = true, path = "../inspect" } try-runtime-cli = { version = "0.10.0-dev", optional = true, path = "../../../utils/frame/try-runtime/cli" } serde_json = "1.0.85" -tokio = { version = "1.22.0", features = ["macros", "time", "parking_lot"] } - [dev-dependencies] sc-keystore = { version = "4.0.0-dev", path = "../../../client/keystore" } sc-client-db = { version = "0.10.0-dev", path = "../../../client/db" } diff --git a/bin/node/cli/src/service.rs b/bin/node/cli/src/service.rs index 761f6ca7adcd2..7ad9335fb1908 100644 --- a/bin/node/cli/src/service.rs +++ b/bin/node/cli/src/service.rs @@ -233,6 +233,7 @@ pub fn new_partial( &config.data_path, client.clone(), config.prometheus_registry(), + &task_manager.spawn_handle(), ).map_err(|e| ServiceError::Other(format!("Statement store error: {:?}", e)))?; let (rpc_extensions_builder, rpc_setup) = { @@ -563,16 +564,6 @@ pub fn new_full_base( ); } - // Perform periodic statement store maintenance - let store = statement_store.clone(); - task_manager.spawn_handle().spawn("statement-store-notifications", Some("statement-store"), async move { - let mut interval = tokio::time::interval(sc_statement_store::MAINTENANCE_PERIOD); - loop { - interval.tick().await; - store.maintain(); - } - }); - // Spawn statement protocol worker let statement_protocol_executor = { let spawn_handle = Clone::clone(&task_manager.spawn_handle()); diff --git a/client/statement-store/Cargo.toml b/client/statement-store/Cargo.toml index d259456df125c..d9c9f238ddc0d 100644 --- a/client/statement-store/Cargo.toml +++ b/client/statement-store/Cargo.toml @@ -20,6 +20,7 @@ futures-timer = "3.0.2" log = "0.4.17" parking_lot = "0.12.1" parity-db = "0.4.6" +tokio = { version = "1.22.0", features = ["time"] } sp-statement-store = { version = "4.0.0-dev", path = "../../primitives/statement-store" } prometheus-endpoint = { package = "substrate-prometheus-endpoint", version = "0.10.0-dev", path = "../../utils/prometheus" } sp-api = { version = "4.0.0-dev", path = "../../primitives/api" } diff --git a/client/statement-store/src/lib.rs b/client/statement-store/src/lib.rs index 54ae46d8b6aa0..7cd244a211919 100644 --- a/client/statement-store/src/lib.rs +++ b/client/statement-store/src/lib.rs @@ -53,7 +53,7 @@ use parking_lot::RwLock; use prometheus_endpoint::Registry as PrometheusRegistry; use sp_api::ProvideRuntimeApi; use sp_blockchain::HeaderBackend; -use sp_core::{hexdisplay::HexDisplay, Decode, Encode}; +use sp_core::{hexdisplay::HexDisplay, traits::SpawnNamed, Decode, Encode}; use sp_runtime::traits::Block as BlockT; use sp_statement_store::{ runtime_api::{InvalidStatement, StatementSource, ValidStatement, ValidateStatement}, @@ -74,8 +74,7 @@ const PURGE_AFTER: u64 = 2 * 24 * 60 * 60; //48h const MAX_TOTAL_STATEMENTS: usize = 8192; const MAX_TOTAL_SIZE: usize = 64 * 1024 * 1024; -/// Suggested maintenance period. A good value to call `Store::maintain` with. -pub const MAINTENANCE_PERIOD: std::time::Duration = std::time::Duration::from_secs(30); +const MAINTENANCE_PERIOD: std::time::Duration = std::time::Duration::from_secs(30); mod col { pub const META: u8 = 0; @@ -499,6 +498,7 @@ impl Store { path: &std::path::Path, client: Arc, prometheus: Option<&PrometheusRegistry>, + task_spawner: &dyn SpawnNamed, ) -> Result> where Block: BlockT, @@ -513,6 +513,17 @@ impl Store { { let store = Arc::new(Self::new(path, client.clone(), prometheus)?); client.execution_extensions().register_statement_store(store.clone()); + + // Perform periodic statement store maintenance + let worker_store = store.clone(); + task_spawner.spawn("statement-store-notifications", Some("statement-store"), Box::pin(async move { + let mut interval = tokio::time::interval(MAINTENANCE_PERIOD); + loop { + interval.tick().await; + worker_store.maintain(); + } + })); + Ok(store) } From 16e469e6f26070c7abbd3544b9746f14ca6807a7 Mon Sep 17 00:00:00 2001 From: arkpar Date: Wed, 19 Apr 2023 13:17:08 +0200 Subject: [PATCH 62/78] Use statement iterator --- frame/system/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/frame/system/src/lib.rs b/frame/system/src/lib.rs index 72a66e9f6805e..8031dc6449325 100644 --- a/frame/system/src/lib.rs +++ b/frame/system/src/lib.rs @@ -1447,7 +1447,7 @@ impl Pallet { /// Should only be called if you know what you are doing and outside of the runtime block /// execution else it can have a large impact on the PoV size of a block. pub fn event_no_consensus(index: usize) -> Option { - Events::::get().get(index).map(|e| e.event.clone()) + Self::read_events_no_consensus().nth(index).map(|e| e.event.clone()) } /// Get the current events deposited by the runtime. From 8d18b5e8f28f6efe8f206e3aa7b34662e67ccf02 Mon Sep 17 00:00:00 2001 From: arkpar Date: Wed, 19 Apr 2023 13:20:05 +0200 Subject: [PATCH 63/78] Renamed runtime API mod --- frame/statement/src/lib.rs | 2 +- primitives/statement-store/src/runtime_api.rs | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/frame/statement/src/lib.rs b/frame/statement/src/lib.rs index e4e8ec0f77e36..305df553999ba 100644 --- a/frame/statement/src/lib.rs +++ b/frame/statement/src/lib.rs @@ -208,7 +208,7 @@ where }; statement.set_proof(proof); } - sp_statement_store::runtime_api::io::submit_statement(statement); + sp_statement_store::runtime_api::statement_store::submit_statement(statement); } } } diff --git a/primitives/statement-store/src/runtime_api.rs b/primitives/statement-store/src/runtime_api.rs index cb8c0a43690eb..f2572cdb45c98 100644 --- a/primitives/statement-store/src/runtime_api.rs +++ b/primitives/statement-store/src/runtime_api.rs @@ -118,11 +118,11 @@ pub enum SubmitResult { /// Export functions for the WASM host. #[cfg(feature = "std")] -pub type HostFunctions = (io::HostFunctions,); +pub type HostFunctions = (statement_store::HostFunctions,); /// Host interface #[runtime_interface] -pub trait Io { +pub trait StatementStore { /// Submit a new new statement. The statement will be broadcast to the network. /// This is meant to be used by the offchain worker. fn submit_statement(&mut self, statement: Statement) -> SubmitResult { From 9d3add26d6fc56b3efc13b892fe3fc3c584d8a5c Mon Sep 17 00:00:00 2001 From: arkpar Date: Wed, 19 Apr 2023 13:20:34 +0200 Subject: [PATCH 64/78] fmt --- bin/node/cli/src/service.rs | 16 +++++++--------- bin/node/rpc/src/lib.rs | 20 ++++++++++++++++---- client/statement-store/src/lib.rs | 18 +++++++++++------- 3 files changed, 34 insertions(+), 20 deletions(-) diff --git a/bin/node/cli/src/service.rs b/bin/node/cli/src/service.rs index 7ad9335fb1908..c8846ff63576e 100644 --- a/bin/node/cli/src/service.rs +++ b/bin/node/cli/src/service.rs @@ -234,7 +234,8 @@ pub fn new_partial( client.clone(), config.prometheus_registry(), &task_manager.spawn_handle(), - ).map_err(|e| ServiceError::Other(format!("Statement store error: {:?}", e)))?; + ) + .map_err(|e| ServiceError::Other(format!("Statement store error: {:?}", e)))?; let (rpc_extensions_builder, rpc_setup) = { let (_, grandpa_link, _) = &import_setup; @@ -349,16 +350,13 @@ pub fn new_full_base( let statement_handler_proto = sc_network_statement::StatementHandlerPrototype::new( config.protocol_id().clone(), client - .block_hash(0u32.into()) - .ok() - .flatten() - .expect("Genesis block exists; qed"), + .block_hash(0u32.into()) + .ok() + .flatten() + .expect("Genesis block exists; qed"), config.chain_spec.fork_id(), ); - config - .network - .extra_sets - .push(statement_handler_proto.set_config()); + config.network.extra_sets.push(statement_handler_proto.set_config()); config .network diff --git a/bin/node/rpc/src/lib.rs b/bin/node/rpc/src/lib.rs index fd15b6beb9073..5ab96bf1c7064 100644 --- a/bin/node/rpc/src/lib.rs +++ b/bin/node/rpc/src/lib.rs @@ -120,15 +120,26 @@ where use pallet_transaction_payment_rpc::{TransactionPayment, TransactionPaymentApiServer}; use sc_consensus_babe_rpc::{Babe, BabeApiServer}; use sc_consensus_grandpa_rpc::{Grandpa, GrandpaApiServer}; - use sc_rpc::dev::{Dev, DevApiServer}; + use sc_rpc::{ + dev::{Dev, DevApiServer}, + statement::StatementApiServer, + }; use sc_rpc_spec_v2::chain_spec::{ChainSpec, ChainSpecApiServer}; - use sc_rpc::statement::StatementApiServer; use sc_sync_state_rpc::{SyncState, SyncStateApiServer}; use substrate_frame_rpc_system::{System, SystemApiServer}; use substrate_state_trie_migration_rpc::{StateMigration, StateMigrationApiServer}; let mut io = RpcModule::new(()); - let FullDeps { client, pool, select_chain, chain_spec, deny_unsafe, babe, grandpa, statement_store } = deps; + let FullDeps { + client, + pool, + select_chain, + chain_spec, + deny_unsafe, + babe, + grandpa, + statement_store, + } = deps; let BabeDeps { keystore, babe_worker_handle } = babe; let GrandpaDeps { @@ -172,7 +183,8 @@ where io.merge(StateMigration::new(client.clone(), backend, deny_unsafe).into_rpc())?; io.merge(Dev::new(client, deny_unsafe).into_rpc())?; - let statement_store = sc_rpc::statement::StatementStore::new(statement_store, deny_unsafe).into_rpc(); + let statement_store = + sc_rpc::statement::StatementStore::new(statement_store, deny_unsafe).into_rpc(); io.merge(statement_store)?; Ok(io) diff --git a/client/statement-store/src/lib.rs b/client/statement-store/src/lib.rs index 7cd244a211919..ce5d84f3041f5 100644 --- a/client/statement-store/src/lib.rs +++ b/client/statement-store/src/lib.rs @@ -516,13 +516,17 @@ impl Store { // Perform periodic statement store maintenance let worker_store = store.clone(); - task_spawner.spawn("statement-store-notifications", Some("statement-store"), Box::pin(async move { - let mut interval = tokio::time::interval(MAINTENANCE_PERIOD); - loop { - interval.tick().await; - worker_store.maintain(); - } - })); + task_spawner.spawn( + "statement-store-notifications", + Some("statement-store"), + Box::pin(async move { + let mut interval = tokio::time::interval(MAINTENANCE_PERIOD); + loop { + interval.tick().await; + worker_store.maintain(); + } + }), + ); Ok(store) } From cd7c9dc9020d4b546717aa09cef94c752aa7ad62 Mon Sep 17 00:00:00 2001 From: arkpar Date: Wed, 19 Apr 2023 14:32:35 +0200 Subject: [PATCH 65/78] Remove dump_encoded --- client/rpc/src/statement/mod.rs | 5 +++-- client/statement-store/src/lib.rs | 24 --------------------- primitives/statement-store/src/store_api.rs | 3 --- 3 files changed, 3 insertions(+), 29 deletions(-) diff --git a/client/rpc/src/statement/mod.rs b/client/rpc/src/statement/mod.rs index c249b01747fce..440c7d82caa18 100644 --- a/client/rpc/src/statement/mod.rs +++ b/client/rpc/src/statement/mod.rs @@ -19,6 +19,7 @@ //! Substrate statement store API. use jsonrpsee::core::{async_trait, RpcResult}; +use codec::Encode; /// Re-export the API for backward compatibility. pub use sc_rpc_api::statement::{error::Error, StatementApiServer}; use sc_rpc_api::DenyUnsafe; @@ -48,8 +49,8 @@ impl StatementApiServer for StatementStore { self.deny_unsafe.check_if_safe()?; let statements = - self.store.dump_encoded().map_err(|e| Error::StatementStore(e.to_string()))?; - Ok(statements.into_iter().map(|(_, s)| s.into()).collect()) + self.store.dump().map_err(|e| Error::StatementStore(e.to_string()))?; + Ok(statements.into_iter().map(|(_, s)| s.encode().into()).collect()) } fn broadcasts(&self, match_all_topics: Vec<[u8; 32]>) -> RpcResult> { diff --git a/client/statement-store/src/lib.rs b/client/statement-store/src/lib.rs index ce5d84f3041f5..696c7e5fcbea4 100644 --- a/client/statement-store/src/lib.rs +++ b/client/statement-store/src/lib.rs @@ -262,10 +262,6 @@ impl Index { self.expired.insert(hash, timestamp); } - fn is_expired(&self, hash: &Hash) -> bool { - self.expired.contains_key(hash) - } - fn iterate_with( &self, key: Option, @@ -724,26 +720,6 @@ impl Store { } impl StatementStore for Store { - /// Return all statements SCALE-encoded. - fn dump_encoded(&self) -> Result)>> { - let index = self.index.read(); - let mut result = Vec::with_capacity(index.entries.len()); - for h in self.index.read().entries.keys() { - let encoded = self.db.get(col::STATEMENTS, h).map_err(|e| Error::Db(e.to_string()))?; - if let Some(encoded) = encoded { - if let Ok(entry) = StatementWithMeta::decode(&mut encoded.as_slice()) { - entry.statement.using_encoded(|statement| { - let hash = sp_statement_store::hash_encoded(statement); - if !self.index.read().is_expired(&hash) { - result.push((hash, entry.statement.encode())); - } - }); - } - } - } - Ok(result) - } - /// Return all statements. fn dump(&self) -> Result> { let index = self.index.read(); diff --git a/primitives/statement-store/src/store_api.rs b/primitives/statement-store/src/store_api.rs index 77c9017618d39..7040ffa56b6cd 100644 --- a/primitives/statement-store/src/store_api.rs +++ b/primitives/statement-store/src/store_api.rs @@ -63,9 +63,6 @@ pub type Result = std::result::Result; /// Statement store API. pub trait StatementStore: Send + Sync { - /// Return all statements, SCALE-encoded. - fn dump_encoded(&self) -> Result)>>; - /// Return all statements. fn dump(&self) -> Result>; From 38d893d2e70473d88ac6de786bb3049f5f6841e5 Mon Sep 17 00:00:00 2001 From: arkpar Date: Mon, 24 Apr 2023 12:32:51 +0200 Subject: [PATCH 66/78] fmt --- client/rpc/src/statement/mod.rs | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/client/rpc/src/statement/mod.rs b/client/rpc/src/statement/mod.rs index 440c7d82caa18..f4b0152b0eb46 100644 --- a/client/rpc/src/statement/mod.rs +++ b/client/rpc/src/statement/mod.rs @@ -18,8 +18,8 @@ //! Substrate statement store API. -use jsonrpsee::core::{async_trait, RpcResult}; use codec::Encode; +use jsonrpsee::core::{async_trait, RpcResult}; /// Re-export the API for backward compatibility. pub use sc_rpc_api::statement::{error::Error, StatementApiServer}; use sc_rpc_api::DenyUnsafe; @@ -48,8 +48,7 @@ impl StatementApiServer for StatementStore { fn dump(&self) -> RpcResult> { self.deny_unsafe.check_if_safe()?; - let statements = - self.store.dump().map_err(|e| Error::StatementStore(e.to_string()))?; + let statements = self.store.dump().map_err(|e| Error::StatementStore(e.to_string()))?; Ok(statements.into_iter().map(|(_, s)| s.encode().into()).collect()) } From fdb43c5468462612cbe877a086c31c75d9e8b2c3 Mon Sep 17 00:00:00 2001 From: Arkadiy Paronyan Date: Mon, 24 Apr 2023 14:24:06 +0200 Subject: [PATCH 67/78] Apply suggestions from code review MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Co-authored-by: Bastian Köcher --- bin/node/cli/src/service.rs | 2 +- client/network/statement/src/lib.rs | 56 +++++-------------- client/statement-store/src/lib.rs | 11 ++-- primitives/statement-store/src/lib.rs | 16 +++++- primitives/statement-store/src/runtime_api.rs | 4 +- primitives/statement-store/src/store_api.rs | 2 +- 6 files changed, 37 insertions(+), 54 deletions(-) diff --git a/bin/node/cli/src/service.rs b/bin/node/cli/src/service.rs index c8846ff63576e..1f967c9be99cd 100644 --- a/bin/node/cli/src/service.rs +++ b/bin/node/cli/src/service.rs @@ -564,7 +564,7 @@ pub fn new_full_base( // Spawn statement protocol worker let statement_protocol_executor = { - let spawn_handle = Clone::clone(&task_manager.spawn_handle()); + let spawn_handle = task_manager.spawn_handle(); Box::new(move |fut| { spawn_handle.spawn("network-statement-validator", Some("networking"), fut); }) diff --git a/client/network/statement/src/lib.rs b/client/network/statement/src/lib.rs index 2595e338a9a96..0c55bb2159454 100644 --- a/client/network/statement/src/lib.rs +++ b/client/network/statement/src/lib.rs @@ -28,7 +28,7 @@ use crate::config::*; use codec::{Decode, Encode}; -use futures::{channel::oneshot, prelude::*, stream::FuturesUnordered}; +use futures::{channel::oneshot, prelude::*, stream::FuturesUnordered, FutureExt}; use libp2p::{multiaddr, PeerId}; use prometheus_endpoint::{register, Counter, PrometheusError, Registry, U64}; use sc_network::{ @@ -102,31 +102,10 @@ impl Metrics { } } -#[pin_project::pin_project] -struct PendingStatement { - #[pin] - validation: StatementImportFuture, - hash: Hash, -} - -impl Future for PendingStatement { - type Output = (Hash, Option); - - fn poll(self: Pin<&mut Self>, cx: &mut std::task::Context<'_>) -> Poll { - let mut this = self.project(); - - if let Poll::Ready(import_result) = Pin::new(&mut this.validation).poll_unpin(cx) { - return Poll::Ready((*this.hash, import_result.ok())) - } - - Poll::Pending - } -} /// Prototype for a [`StatementHandler`]. pub struct StatementHandlerPrototype { protocol_name: ProtocolName, - fallback_protocol_names: Vec, } impl StatementHandlerPrototype { @@ -142,11 +121,9 @@ impl StatementHandlerPrototype { } else { format!("/{}/statement/1", array_bytes::bytes2hex("", genesis_hash)) }; - let legacy_protocol_name = format!("/{}/statement/1", protocol_id.as_ref()); Self { protocol_name: protocol_name.into(), - fallback_protocol_names: iter::once(legacy_protocol_name.into()).collect(), } } @@ -154,7 +131,7 @@ impl StatementHandlerPrototype { pub fn set_config(&self) -> NonDefaultSetConfig { NonDefaultSetConfig { notifications_protocol: self.protocol_name.clone(), - fallback_names: self.fallback_protocol_names.clone(), + fallback_names: Vec::new(), max_notification_size: MAX_STATEMENT_SIZE, handshake: None, set_config: SetConfig { @@ -179,7 +156,7 @@ impl StatementHandlerPrototype { sync: S, statement_store: Arc, metrics_registry: Option<&Registry>, - executor: Box + Send>>) + Send>, + executor: impl sp_core::traits::SpawnNamed, ) -> error::Result> { let net_event_stream = network.event_stream("statement-handler-net"); let sync_event_stream = sync.event_stream("statement-handler-sync"); @@ -196,7 +173,7 @@ impl StatementHandlerPrototype { None => return, Some((statement, completion)) => { let result = store.submit(statement, StatementSource::Network); - if let Err(_) = completion.send(result) { + if completion.send(result).is_err() { log::debug!( target: LOG_TARGET, "Error sending validation completion" @@ -243,7 +220,7 @@ pub struct StatementHandler< /// Interval at which we call `propagate_statements`. propagate_timeout: stream::Fuse + Send>>>, /// Pending statements verification tasks. - pending_statements: FuturesUnordered, + pending_statements: FuturesUnordered)> + Send>>, /// As multiple peers can send us the same statement, we group /// these peers using the statement hash while the statement is /// imported. This prevents that we import the same statement @@ -367,7 +344,7 @@ where continue } // Accept statements only when node is not major syncing - if self.sync.is_major_syncing() || self.sync.is_offline() { + if self.sync.is_major_syncing() { log::trace!( target: LOG_TARGET, "{remote}: Ignoring statements while major syncing or offline" @@ -412,16 +389,14 @@ where match self.pending_statements_peers.entry(hash) { Entry::Vacant(entry) => { let (completion_sender, completion_receiver) = oneshot::channel(); - if let Ok(()) = self.queue_sender.unbounded_send((s, completion_sender)) { + if self.queue_sender.unbounded_send((s, completion_sender)).is_ok() { self.pending_statements - .push(PendingStatement { validation: completion_receiver, hash }); - let mut set = HashSet::new(); - set.insert(who); - entry.insert(set); + .push(async move { let res = completion_receiver.await; (hash, res.ok()) }.boxed()); + entry.insert(HashSet::from_iter([who])); } }, Entry::Occupied(mut entry) => { - if !(entry.get_mut().insert(who)) { + if !entry.get_mut().insert(who) { // Already received this from the same peer. self.network.report_peer(who, rep::DUPLICATE_STATEMENT); } @@ -462,18 +437,17 @@ where let mut propagated_statements = 0; for (who, peer) in self.peers.iter_mut() { - // never send statements to the light node + // never send statements to light nodes if matches!(peer.role, ObservedRole::Light) { continue } - let (hashes, to_send): (Vec<_>, Vec<_>) = statements + let to_send = statements .iter() - .filter(|(hash, _)| peer.known_statements.insert(*hash)) - .cloned() - .unzip(); + .filter_map(|(hash, stmt)| peer.known_statements.insert(*hash).then(|| stmt)) + .collect::>(); - propagated_statements += hashes.len(); + propagated_statements += to_send.len(); if !to_send.is_empty() { log::trace!(target: LOG_TARGET, "Sending {} statements to {}", to_send.len(), who); diff --git a/client/statement-store/src/lib.rs b/client/statement-store/src/lib.rs index 696c7e5fcbea4..9d9c7c3100fc5 100644 --- a/client/statement-store/src/lib.rs +++ b/client/statement-store/src/lib.rs @@ -162,10 +162,7 @@ where // Validate against the finalized state. self.client.info().finalized_hash }); - match api.validate_statement(block, source, statement) { - Ok(r) => r, - Err(_) => Err(InvalidStatement::InternalError), - } + api.validate_statement(block, source, statement).map_err(|_| InvalidStatement::InternalError) } } @@ -274,14 +271,14 @@ impl Index { return Ok(()) } let key_set = self.by_dec_key.get(&key); - if key_set.map(|s| s.len()).unwrap_or(0) == 0 { + if key_set.map_or(0, |s| s.len()) == 0 { // Key does not exist in the index. return Ok(()) } sets[0] = key_set.expect("Function returns if key_set is None"); for (i, t) in match_all_topics.iter().enumerate() { let set = self.by_topic.get(t); - if set.map(|s| s.len()).unwrap_or(0) == 0 { + if set.map_or(0, |s| s.len()) == 0 { // At least one of the match_all_topics does not exist in the index. return Ok(()) } @@ -513,7 +510,7 @@ impl Store { // Perform periodic statement store maintenance let worker_store = store.clone(); task_spawner.spawn( - "statement-store-notifications", + "statement-store-maintenance", Some("statement-store"), Box::pin(async move { let mut interval = tokio::time::interval(MAINTENANCE_PERIOD); diff --git a/primitives/statement-store/src/lib.rs b/primitives/statement-store/src/lib.rs index 14def6b128834..881f547da0b88 100644 --- a/primitives/statement-store/src/lib.rs +++ b/primitives/statement-store/src/lib.rs @@ -223,6 +223,10 @@ impl Statement { } /// Sign with a key that matches given public key in the keystore. + /// + /// Returns `true` if signing worked (private key present etc). + /// + /// NOTE: This can only be called from the runtime. pub fn sign_sr25519_public(&mut self, key: &sr25519::Public) -> bool { let to_sign = self.signature_material(); if let Some(signature) = key.sign(&to_sign) { @@ -247,6 +251,10 @@ impl Statement { } /// Sign with a key that matches given public key in the keystore. + /// + /// Returns `true` if signing worked (private key present etc). + /// + /// NOTE: This can only be called from the runtime. pub fn sign_ed25519_public(&mut self, key: &ed25519::Public) -> bool { let to_sign = self.signature_material(); if let Some(signature) = key.sign(&to_sign) { @@ -271,6 +279,10 @@ impl Statement { } /// Sign with a key that matches given public key in the keystore. + /// + /// Returns `true` if signing worked (private key present etc). + /// + /// NOTE: This can only be called from the runtime. pub fn sign_ecdsa_public(&mut self, key: &ecdsa::Public) -> bool { let to_sign = self.signature_material(); if let Some(signature) = key.sign(&to_sign) { @@ -395,8 +407,8 @@ impl Statement { self.encoded(true) } - /// Return a copy of this statement with proof removed - pub fn strip_proof(&mut self) { + /// Remove the proof of this statement. + pub fn remove_proof(&mut self) { self.proof = None; } diff --git a/primitives/statement-store/src/runtime_api.rs b/primitives/statement-store/src/runtime_api.rs index f2572cdb45c98..7de50b82110a2 100644 --- a/primitives/statement-store/src/runtime_api.rs +++ b/primitives/statement-store/src/runtime_api.rs @@ -60,8 +60,8 @@ pub enum StatementSource { Chain, /// Statement has been received from the gossip network. Network, - /// Statement has been submitted over the RPC api. - Rpc, + /// Statement has been submitted over the local api. + Local, } impl StatementSource { diff --git a/primitives/statement-store/src/store_api.rs b/primitives/statement-store/src/store_api.rs index 7040ffa56b6cd..321c87f3ab063 100644 --- a/primitives/statement-store/src/store_api.rs +++ b/primitives/statement-store/src/store_api.rs @@ -64,7 +64,7 @@ pub type Result = std::result::Result; /// Statement store API. pub trait StatementStore: Send + Sync { /// Return all statements. - fn dump(&self) -> Result>; + fn statements(&self) -> Result>; /// Get statement by hash. fn statement(&self, hash: &Hash) -> Result>; From 74e49451e91404b4a24b5e2e679f6c79e36460b8 Mon Sep 17 00:00:00 2001 From: Arkadiy Paronyan Date: Mon, 24 Apr 2023 14:24:55 +0200 Subject: [PATCH 68/78] Apply suggestions from code review MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Co-authored-by: Bastian Köcher --- primitives/statement-store/src/lib.rs | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/primitives/statement-store/src/lib.rs b/primitives/statement-store/src/lib.rs index 881f547da0b88..69cd935383ad1 100644 --- a/primitives/statement-store/src/lib.rs +++ b/primitives/statement-store/src/lib.rs @@ -283,6 +283,10 @@ impl Statement { /// Returns `true` if signing worked (private key present etc). /// /// NOTE: This can only be called from the runtime. + /// + /// Returns `true` if signing worked (private key present etc). + /// + /// NOTE: This can only be called from the runtime. pub fn sign_ecdsa_public(&mut self, key: &ecdsa::Public) -> bool { let to_sign = self.signature_material(); if let Some(signature) = key.sign(&to_sign) { From 4f1ac7631a6baeaeb29bdf78a6c36d3d9dbbfe90 Mon Sep 17 00:00:00 2001 From: arkpar Date: Mon, 24 Apr 2023 14:55:13 +0200 Subject: [PATCH 69/78] Fixed build after applying review suggestions --- bin/node/cli/src/service.rs | 1 - client/network/statement/src/lib.rs | 10 ++++------ client/rpc/src/statement/mod.rs | 4 ++-- client/statement-store/src/lib.rs | 12 ++++++------ frame/statement/src/lib.rs | 2 +- primitives/statement-store/src/lib.rs | 2 +- primitives/statement-store/src/runtime_api.rs | 6 +++--- 7 files changed, 17 insertions(+), 20 deletions(-) diff --git a/bin/node/cli/src/service.rs b/bin/node/cli/src/service.rs index 1f967c9be99cd..5a627886d56aa 100644 --- a/bin/node/cli/src/service.rs +++ b/bin/node/cli/src/service.rs @@ -348,7 +348,6 @@ pub fn new_full_base( ); let statement_handler_proto = sc_network_statement::StatementHandlerPrototype::new( - config.protocol_id().clone(), client .block_hash(0u32.into()) .ok() diff --git a/client/network/statement/src/lib.rs b/client/network/statement/src/lib.rs index 0c55bb2159454..75dc2848401e6 100644 --- a/client/network/statement/src/lib.rs +++ b/client/network/statement/src/lib.rs @@ -32,7 +32,7 @@ use futures::{channel::oneshot, prelude::*, stream::FuturesUnordered, FutureExt} use libp2p::{multiaddr, PeerId}; use prometheus_endpoint::{register, Counter, PrometheusError, Registry, U64}; use sc_network::{ - config::{NonDefaultSetConfig, NonReservedPeerMode, ProtocolId, SetConfig}, + config::{NonDefaultSetConfig, NonReservedPeerMode, SetConfig}, error, event::Event, types::ProtocolName, @@ -53,7 +53,6 @@ use std::{ num::NonZeroUsize, pin::Pin, sync::Arc, - task::Poll, }; pub mod config; @@ -111,7 +110,6 @@ pub struct StatementHandlerPrototype { impl StatementHandlerPrototype { /// Create a new instance. pub fn new>( - protocol_id: ProtocolId, genesis_hash: Hash, fork_id: Option<&str>, ) -> Self { @@ -156,7 +154,7 @@ impl StatementHandlerPrototype { sync: S, statement_store: Arc, metrics_registry: Option<&Registry>, - executor: impl sp_core::traits::SpawnNamed, + executor: impl Fn(Pin + Send>>) + Send, ) -> error::Result> { let net_event_stream = network.event_stream("statement-handler-net"); let sync_event_stream = sync.event_stream("statement-handler-sync"); @@ -220,7 +218,7 @@ pub struct StatementHandler< /// Interval at which we call `propagate_statements`. propagate_timeout: stream::Fuse + Send>>>, /// Pending statements verification tasks. - pending_statements: FuturesUnordered)> + Send>>, + pending_statements: FuturesUnordered)> + Send>>>, /// As multiple peers can send us the same statement, we group /// these peers using the statement hash while the statement is /// imported. This prevents that we import the same statement @@ -469,7 +467,7 @@ where } log::debug!(target: LOG_TARGET, "Propagating statements"); - if let Ok(statements) = self.statement_store.dump() { + if let Ok(statements) = self.statement_store.statements() { self.do_propagate_statements(&statements); } } diff --git a/client/rpc/src/statement/mod.rs b/client/rpc/src/statement/mod.rs index f4b0152b0eb46..05a8e2df393ce 100644 --- a/client/rpc/src/statement/mod.rs +++ b/client/rpc/src/statement/mod.rs @@ -48,7 +48,7 @@ impl StatementApiServer for StatementStore { fn dump(&self) -> RpcResult> { self.deny_unsafe.check_if_safe()?; - let statements = self.store.dump().map_err(|e| Error::StatementStore(e.to_string()))?; + let statements = self.store.statements().map_err(|e| Error::StatementStore(e.to_string()))?; Ok(statements.into_iter().map(|(_, s)| s.encode().into()).collect()) } @@ -87,7 +87,7 @@ impl StatementApiServer for StatementStore { } fn submit(&self, encoded: Bytes) -> RpcResult<()> { - match self.store.submit_encoded(&encoded, StatementSource::Rpc) { + match self.store.submit_encoded(&encoded, StatementSource::Local) { SubmitResult::New(_) | SubmitResult::Known => Ok(()), // `KnownExpired` should not happen. Expired statements submitted with // `StatementSource::Rpc` should be renewed. diff --git a/client/statement-store/src/lib.rs b/client/statement-store/src/lib.rs index 9d9c7c3100fc5..decc9ad99703d 100644 --- a/client/statement-store/src/lib.rs +++ b/client/statement-store/src/lib.rs @@ -162,7 +162,7 @@ where // Validate against the finalized state. self.client.info().finalized_hash }); - api.validate_statement(block, source, statement).map_err(|_| InvalidStatement::InternalError) + api.validate_statement(block, source, statement).map_err(|_| InvalidStatement::InternalError)? } } @@ -718,7 +718,7 @@ impl Store { impl StatementStore for Store { /// Return all statements. - fn dump(&self) -> Result> { + fn statements(&self) -> Result> { let index = self.index.read(); let mut result = Vec::with_capacity(index.entries.len()); for h in self.index.read().entries.keys() { @@ -1125,7 +1125,7 @@ mod tests { store.submit(statement2.clone(), StatementSource::Network), SubmitResult::New(NetworkPriority::High) ); - assert_eq!(store.dump().unwrap().len(), 3); + assert_eq!(store.statements().unwrap().len(), 3); assert_eq!(store.broadcasts(&[]).unwrap().len(), 3); assert_eq!(store.statement(&statement1.hash()).unwrap(), Some(statement1.clone())); drop(store); @@ -1134,7 +1134,7 @@ mod tests { let mut path: std::path::PathBuf = temp.path().into(); path.push("db"); let store = Store::new(&path, client, None).unwrap(); - assert_eq!(store.dump().unwrap().len(), 3); + assert_eq!(store.statements().unwrap().len(), 3); assert_eq!(store.broadcasts(&[]).unwrap().len(), 3); assert_eq!(store.statement(&statement1.hash()).unwrap(), Some(statement1)); } @@ -1244,7 +1244,7 @@ mod tests { ]; expected_statements.sort(); let mut statements: Vec<_> = - store.dump().unwrap().into_iter().map(|(hash, _)| hash).collect(); + store.statements().unwrap().into_iter().map(|(hash, _)| hash).collect(); statements.sort(); assert_eq!(expected_statements, statements); } @@ -1271,7 +1271,7 @@ mod tests { let mut path: std::path::PathBuf = temp.path().into(); path.push("db"); let store = Store::new(&path, client, None).unwrap(); - assert_eq!(store.dump().unwrap().len(), 0); + assert_eq!(store.statements().unwrap().len(), 0); assert_eq!(store.index.read().expired.len(), 0); } } diff --git a/frame/statement/src/lib.rs b/frame/statement/src/lib.rs index 305df553999ba..570b947011a2d 100644 --- a/frame/statement/src/lib.rs +++ b/frame/statement/src/lib.rs @@ -139,7 +139,7 @@ where let account: T::AccountId = (*who).into(); match frame_system::Pallet::::event_no_consensus(*event_index as usize) { Some(e) => { - statement.strip_proof(); + statement.remove_proof(); if let Ok(Event::NewStatement { account: a, statement: s }) = e.try_into() { if a != account || s != statement { log::debug!(target: LOG_TARGET, "Event data mismatch"); diff --git a/primitives/statement-store/src/lib.rs b/primitives/statement-store/src/lib.rs index 69cd935383ad1..d08900425da43 100644 --- a/primitives/statement-store/src/lib.rs +++ b/primitives/statement-store/src/lib.rs @@ -574,7 +574,7 @@ mod test { statement.set_proof(Proof::Sr25519 { signature: [0u8; 64], signer: [0u8; 32] }); assert_eq!(statement.verify_signature(), SignatureVerificationResult::Invalid); - statement.strip_proof(); + statement.remove_proof(); assert_eq!(statement.verify_signature(), SignatureVerificationResult::NoSignature); } } diff --git a/primitives/statement-store/src/runtime_api.rs b/primitives/statement-store/src/runtime_api.rs index 7de50b82110a2..d8f84ff116ab8 100644 --- a/primitives/statement-store/src/runtime_api.rs +++ b/primitives/statement-store/src/runtime_api.rs @@ -69,7 +69,7 @@ impl StatementSource { /// expiration date. pub fn can_be_resubmitted(&self) -> bool { match self { - StatementSource::Chain | StatementSource::Rpc => true, + StatementSource::Chain | StatementSource::Local => true, StatementSource::Network => false, } } @@ -143,9 +143,9 @@ pub trait StatementStore { } /// Return all statements. - fn dump(&mut self) -> Vec<(Hash, Statement)> { + fn statements(&mut self) -> Vec<(Hash, Statement)> { if let Some(StatementStoreExt(store)) = self.extension::() { - store.dump().unwrap_or_default() + store.statements().unwrap_or_default() } else { Vec::default() } From 2f2d4f8193f66894bc0c3d315d8d14c421022260 Mon Sep 17 00:00:00 2001 From: arkpar Date: Tue, 25 Apr 2023 08:12:33 +0200 Subject: [PATCH 70/78] License exceptions --- scripts/ci/deny.toml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/scripts/ci/deny.toml b/scripts/ci/deny.toml index f932875937606..91822c831cc19 100644 --- a/scripts/ci/deny.toml +++ b/scripts/ci/deny.toml @@ -75,6 +75,7 @@ exceptions = [ { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-network-sync" }, { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-network-test" }, { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-network-transactions" }, + { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-network-statement" }, { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-offchain" }, { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-peerset" }, { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-proposer-metrics" }, @@ -86,6 +87,7 @@ exceptions = [ { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-service" }, { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-service-test" }, { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-state-db" }, + { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-statement-store" }, { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-storage-monitor" }, { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-sysinfo" }, { allow = ["GPL-3.0 WITH Classpath-exception-2.0"], name = "sc-telemetry" }, From 75ee1bf0f862d6f3aaf4019033f7148c6ef0a07b Mon Sep 17 00:00:00 2001 From: arkpar Date: Tue, 25 Apr 2023 08:12:47 +0200 Subject: [PATCH 71/78] fmt --- client/network/statement/src/lib.rs | 22 +++++++++++----------- client/rpc/src/statement/mod.rs | 3 ++- client/statement-store/src/lib.rs | 3 ++- 3 files changed, 15 insertions(+), 13 deletions(-) diff --git a/client/network/statement/src/lib.rs b/client/network/statement/src/lib.rs index 75dc2848401e6..e5256d3420c59 100644 --- a/client/network/statement/src/lib.rs +++ b/client/network/statement/src/lib.rs @@ -101,7 +101,6 @@ impl Metrics { } } - /// Prototype for a [`StatementHandler`]. pub struct StatementHandlerPrototype { protocol_name: ProtocolName, @@ -109,10 +108,7 @@ pub struct StatementHandlerPrototype { impl StatementHandlerPrototype { /// Create a new instance. - pub fn new>( - genesis_hash: Hash, - fork_id: Option<&str>, - ) -> Self { + pub fn new>(genesis_hash: Hash, fork_id: Option<&str>) -> Self { let genesis_hash = genesis_hash.as_ref(); let protocol_name = if let Some(fork_id) = fork_id { format!("/{}/{}/statement/1", array_bytes::bytes2hex("", genesis_hash), fork_id) @@ -120,9 +116,7 @@ impl StatementHandlerPrototype { format!("/{}/statement/1", array_bytes::bytes2hex("", genesis_hash)) }; - Self { - protocol_name: protocol_name.into(), - } + Self { protocol_name: protocol_name.into() } } /// Returns the configuration of the set to put in the network configuration. @@ -218,7 +212,8 @@ pub struct StatementHandler< /// Interval at which we call `propagate_statements`. propagate_timeout: stream::Fuse + Send>>>, /// Pending statements verification tasks. - pending_statements: FuturesUnordered)> + Send>>>, + pending_statements: + FuturesUnordered)> + Send>>>, /// As multiple peers can send us the same statement, we group /// these peers using the statement hash while the statement is /// imported. This prevents that we import the same statement @@ -388,8 +383,13 @@ where Entry::Vacant(entry) => { let (completion_sender, completion_receiver) = oneshot::channel(); if self.queue_sender.unbounded_send((s, completion_sender)).is_ok() { - self.pending_statements - .push(async move { let res = completion_receiver.await; (hash, res.ok()) }.boxed()); + self.pending_statements.push( + async move { + let res = completion_receiver.await; + (hash, res.ok()) + } + .boxed(), + ); entry.insert(HashSet::from_iter([who])); } }, diff --git a/client/rpc/src/statement/mod.rs b/client/rpc/src/statement/mod.rs index 05a8e2df393ce..0959777a6d49c 100644 --- a/client/rpc/src/statement/mod.rs +++ b/client/rpc/src/statement/mod.rs @@ -48,7 +48,8 @@ impl StatementApiServer for StatementStore { fn dump(&self) -> RpcResult> { self.deny_unsafe.check_if_safe()?; - let statements = self.store.statements().map_err(|e| Error::StatementStore(e.to_string()))?; + let statements = + self.store.statements().map_err(|e| Error::StatementStore(e.to_string()))?; Ok(statements.into_iter().map(|(_, s)| s.encode().into()).collect()) } diff --git a/client/statement-store/src/lib.rs b/client/statement-store/src/lib.rs index decc9ad99703d..d95175b246f31 100644 --- a/client/statement-store/src/lib.rs +++ b/client/statement-store/src/lib.rs @@ -162,7 +162,8 @@ where // Validate against the finalized state. self.client.info().finalized_hash }); - api.validate_statement(block, source, statement).map_err(|_| InvalidStatement::InternalError)? + api.validate_statement(block, source, statement) + .map_err(|_| InvalidStatement::InternalError)? } } From b50119852adb7fff6ec810d3db72e62bf0a7d854 Mon Sep 17 00:00:00 2001 From: arkpar Date: Tue, 25 Apr 2023 08:27:36 +0200 Subject: [PATCH 72/78] Store options --- bin/node/cli/src/service.rs | 1 + client/statement-store/src/lib.rs | 66 +++++++++++++++++++++---------- 2 files changed, 46 insertions(+), 21 deletions(-) diff --git a/bin/node/cli/src/service.rs b/bin/node/cli/src/service.rs index 5a627886d56aa..b704bf0290ddc 100644 --- a/bin/node/cli/src/service.rs +++ b/bin/node/cli/src/service.rs @@ -231,6 +231,7 @@ pub fn new_partial( let statement_store = sc_statement_store::Store::new_shared( &config.data_path, + Default::default(), client.clone(), config.prometheus_registry(), &task_manager.spawn_handle(), diff --git a/client/statement-store/src/lib.rs b/client/statement-store/src/lib.rs index d95175b246f31..7f1ac2b6c6eaa 100644 --- a/client/statement-store/src/lib.rs +++ b/client/statement-store/src/lib.rs @@ -36,8 +36,8 @@ // // Each time a statement is removed from the store (Either evicted by higher priority statement or // explicitly with the `remove` function) the statement is marked as expired. Expired statements -// can't be added to the store for `PURGE_AFTER` seconds. This is to prevent old statements from -// being propagated on the network. +// can't be added to the store for `Options::purge_after_sec` seconds. This is to prevent old +// statements from being propagated on the network. //! Disk-backed statement store. @@ -70,9 +70,9 @@ const CURRENT_VERSION: u32 = 1; const LOG_TARGET: &str = "statement-store"; -const PURGE_AFTER: u64 = 2 * 24 * 60 * 60; //48h -const MAX_TOTAL_STATEMENTS: usize = 8192; -const MAX_TOTAL_SIZE: usize = 64 * 1024 * 1024; +const DEFAULT_PURGE_AFTER_SEC: u64 = 2 * 24 * 60 * 60; //48h +const DEFAULT_MAX_TOTAL_STATEMENTS: usize = 8192; +const DEFAULT_MAX_TOTAL_SIZE: usize = 64 * 1024 * 1024; const MAINTENANCE_PERIOD: std::time::Duration = std::time::Duration::from_secs(30); @@ -123,6 +123,28 @@ struct StatementsForAccount { data_size: usize, } +/// Store configuration +pub struct Options { + /// Maximum statement allowed in the store. Once this limit is reached lower-priority + /// statements may be evicted. + max_total_statements: usize, + /// Maximum total data size allowed in the store. Once this limit is reached lower-priority + /// statements may be evicted. + max_total_size: usize, + /// Number of seconds for which removed statements won't be allowed to be added back in. + purge_after_sec: u64, +} + +impl Default for Options { + fn default() -> Self { + Options { + max_total_statements: DEFAULT_MAX_TOTAL_STATEMENTS, + max_total_size: DEFAULT_MAX_TOTAL_SIZE, + purge_after_sec: DEFAULT_PURGE_AFTER_SEC, + } + } +} + #[derive(Default)] struct Index { by_topic: HashMap>, @@ -134,8 +156,7 @@ struct Index { expired: HashMap, // Value is expiration timestamp. accounts: HashMap, by_global_priority: BTreeMap, usize>, - max_entries: usize, - max_size: usize, + options: Options, total_size: usize, } @@ -208,8 +229,8 @@ enum MaybeInserted { } impl Index { - fn new() -> Index { - Index { max_entries: MAX_TOTAL_STATEMENTS, max_size: MAX_TOTAL_SIZE, ..Default::default() } + fn new(options: Options) -> Index { + Index { options, ..Default::default() } } fn insert_new( @@ -305,7 +326,7 @@ impl Index { // Purge previously expired messages. let mut purged = Vec::new(); self.expired.retain(|hash, timestamp| { - if *timestamp + PURGE_AFTER <= current_time { + if *timestamp + self.options.purge_after_sec <= current_time { purged.push(*hash); log::trace!(target: LOG_TARGET, "Purged statement {:?}", HexDisplay::from(hash)); false @@ -453,8 +474,9 @@ impl Index { let global_priority = GlobalPriority(validation.global_priority); // Now check global constraints as well. for (entry, len) in self.by_global_priority.iter() { - if (self.total_size - would_free_size + statement_len <= self.max_size) && - self.by_global_priority.len() + 1 - evicted.len() <= self.max_entries + if (self.total_size - would_free_size + statement_len <= self.options.max_total_size) && + self.by_global_priority.len() + 1 - evicted.len() <= + self.options.max_total_statements { // Satisfied break @@ -490,6 +512,7 @@ impl Store { /// Create a new shared store instance. There should only be one per process. pub fn new_shared( path: &std::path::Path, + options: Options, client: Arc, prometheus: Option<&PrometheusRegistry>, task_spawner: &dyn SpawnNamed, @@ -505,7 +528,7 @@ impl Store { + 'static, Client::Api: ValidateStatement, { - let store = Arc::new(Self::new(path, client.clone(), prometheus)?); + let store = Arc::new(Self::new(path, options, client.clone(), prometheus)?); client.execution_extensions().register_statement_store(store.clone()); // Perform periodic statement store maintenance @@ -528,6 +551,7 @@ impl Store { /// Create a new instance. fn new( path: &std::path::Path, + options: Options, client: Arc, prometheus: Option<&PrometheusRegistry>, ) -> Result @@ -575,7 +599,7 @@ impl Store { let store = Store { db, - index: RwLock::new(Index::new()), + index: RwLock::new(Index::new(options)), validate_fn, time_override: None, metrics: PrometheusMetrics::new(prometheus), @@ -1026,7 +1050,7 @@ mod tests { let client = std::sync::Arc::new(TestClient); let mut path: std::path::PathBuf = temp_dir.path().into(); path.push("db"); - let store = Store::new(&path, client, None).unwrap(); + let store = Store::new(&path, Default::default(), client, None).unwrap(); (store, temp_dir) // return order is important. Store must be dropped before TempDir } @@ -1134,7 +1158,7 @@ mod tests { let client = std::sync::Arc::new(TestClient); let mut path: std::path::PathBuf = temp.path().into(); path.push("db"); - let store = Store::new(&path, client, None).unwrap(); + let store = Store::new(&path, Default::default(), client, None).unwrap(); assert_eq!(store.statements().unwrap().len(), 3); assert_eq!(store.broadcasts(&[]).unwrap().len(), 3); assert_eq!(store.statement(&statement1.hash()).unwrap(), Some(statement1)); @@ -1187,7 +1211,7 @@ mod tests { fn constraints() { let (store, _temp) = test_store(); - store.index.write().max_size = 3000; + store.index.write().options.max_total_size = 3000; let source = StatementSource::Network; let ok = SubmitResult::New(NetworkPriority::High); let ignored = SubmitResult::Ignored; @@ -1231,7 +1255,7 @@ mod tests { // Should be over the global size limit assert_eq!(store.submit(statement(1, 1, None, 700), source), ignored); // Should be over the global count limit - store.index.write().max_entries = 4; + store.index.write().options.max_total_statements = 4; assert_eq!(store.submit(statement(1, 1, None, 100), source), ignored); // Should evict statement from account 1 assert_eq!(store.submit(statement(4, 6, None, 100), source), ok); @@ -1252,7 +1276,7 @@ mod tests { #[test] fn expired_statements_are_purged() { - use super::PURGE_AFTER; + use super::DEFAULT_PURGE_AFTER_SEC; let (mut store, temp) = test_store(); let mut statement = statement(1, 1, Some(3), 100); store.set_time(0); @@ -1263,7 +1287,7 @@ mod tests { assert_eq!(store.index.read().entries.len(), 0); assert_eq!(store.index.read().by_global_priority.len(), 0); assert_eq!(store.index.read().accounts.len(), 0); - store.set_time(PURGE_AFTER + 1); + store.set_time(DEFAULT_PURGE_AFTER_SEC + 1); store.maintain(); assert_eq!(store.index.read().expired.len(), 0); drop(store); @@ -1271,7 +1295,7 @@ mod tests { let client = std::sync::Arc::new(TestClient); let mut path: std::path::PathBuf = temp.path().into(); path.push("db"); - let store = Store::new(&path, client, None).unwrap(); + let store = Store::new(&path, Default::default(), client, None).unwrap(); assert_eq!(store.statements().unwrap().len(), 0); assert_eq!(store.index.read().expired.len(), 0); } From 692f58aac869c7067159d33ad1e874e3aea3be16 Mon Sep 17 00:00:00 2001 From: arkpar Date: Tue, 25 Apr 2023 10:57:39 +0200 Subject: [PATCH 73/78] Moved pallet consts to config trait --- bin/node/runtime/src/lib.rs | 8 ++++++++ frame/statement/src/lib.rs | 26 +++++++++++++++++++------- frame/statement/src/mock.rs | 9 +++++++++ 3 files changed, 36 insertions(+), 7 deletions(-) diff --git a/bin/node/runtime/src/lib.rs b/bin/node/runtime/src/lib.rs index 1d4b052be3492..884a4a27e8599 100644 --- a/bin/node/runtime/src/lib.rs +++ b/bin/node/runtime/src/lib.rs @@ -1756,6 +1756,10 @@ impl frame_benchmarking_pallet_pov::Config for Runtime { parameter_types! { pub StatementCost: Balance = 1 * DOLLARS; pub StatementByteCost: Balance = 100 * MILLICENTS; + pub const MinAllowedStatements: u32 = 4; + pub const MaxAllowedStatements: u32 = 10; + pub const MinAllowedBytes: u32 = 1024; + pub const MaxAllowedBytes: u32 = 4096; } impl pallet_statement::Config for Runtime { @@ -1763,6 +1767,10 @@ impl pallet_statement::Config for Runtime { type Currency = Balances; type StatementCost = StatementCost; type ByteCost = StatementByteCost; + type MinAllowedStatements = MinAllowedStatements; + type MaxAllowedStatements = MaxAllowedStatements; + type MinAllowedBytes = MinAllowedBytes; + type MaxAllowedBytes = MaxAllowedBytes; } construct_runtime!( diff --git a/frame/statement/src/lib.rs b/frame/statement/src/lib.rs index 570b947011a2d..411c030d23928 100644 --- a/frame/statement/src/lib.rs +++ b/frame/statement/src/lib.rs @@ -55,11 +55,6 @@ pub use pallet::*; const LOG_TARGET: &str = "runtime::statement"; -const MIN_ALLOWED_STATEMENTS: u32 = 4; -const MAX_ALLOWED_STATEMENTS: u32 = 10; -const MIN_ALLOWED_BYTES: u32 = 1024; -const MAX_ALLOWED_BYTES: u32 = 4096; - #[frame_support::pallet] pub mod pallet { use super::*; @@ -81,8 +76,21 @@ pub mod pallet { /// Min balance for priority statements. #[pallet::constant] type StatementCost: Get>; + /// Cost of data byte used for priority calculation. #[pallet::constant] type ByteCost: Get>; + /// Minimum number of statements allowed per account. + #[pallet::constant] + type MinAllowedStatements: Get; + /// Maximum number of statements allowed per account. + #[pallet::constant] + type MaxAllowedStatements: Get; + /// Minimum data bytes allowed per account. + #[pallet::constant] + type MinAllowedBytes: Get; + /// Maximum data bytes allowed per account. + #[pallet::constant] + type MaxAllowedBytes: Get; } #[pallet::pallet] @@ -173,18 +181,22 @@ where let byte_cost = T::ByteCost::get(); let priority_cost = statement_cost; let balance = >>::balance(&account); + let min_allowed_statements = T::MinAllowedStatements::get(); + let max_allowed_statements = T::MaxAllowedStatements::get(); + let min_allowed_bytes = T::MinAllowedBytes::get(); + let max_allowed_bytes = T::MaxAllowedBytes::get(); let global_priority = balance.checked_div(&priority_cost).unwrap_or_default().saturated_into(); let max_count = balance .checked_div(&statement_cost) .unwrap_or_default() .saturated_into::() - .clamp(MIN_ALLOWED_STATEMENTS, MAX_ALLOWED_STATEMENTS); + .clamp(min_allowed_statements, max_allowed_statements); let max_size = balance .checked_div(&byte_cost) .unwrap_or_default() .saturated_into::() - .clamp(MIN_ALLOWED_BYTES, MAX_ALLOWED_BYTES); + .clamp(min_allowed_bytes, max_allowed_bytes); Ok(ValidStatement { global_priority, max_count, max_size }) } diff --git a/frame/statement/src/mock.rs b/frame/statement/src/mock.rs index 9759e2ee41b0d..f4d9360c9a6c0 100644 --- a/frame/statement/src/mock.rs +++ b/frame/statement/src/mock.rs @@ -35,6 +35,11 @@ use sp_runtime::{ type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; type Block = frame_system::mocking::MockBlock; +pub const MIN_ALLOWED_STATEMENTS: u32 = 4; +pub const MAX_ALLOWED_STATEMENTS: u32 = 10; +pub const MIN_ALLOWED_BYTES: u32 = 1024; +pub const MAX_ALLOWED_BYTES: u32 = 4096; + frame_support::construct_runtime!( pub enum Test where Block = Block, @@ -99,6 +104,10 @@ impl Config for Test { type Currency = Balances; type StatementCost = ConstU64<1000>; type ByteCost = ConstU64<2>; + type MinAllowedStatements = ConstU32; + type MaxAllowedStatements = ConstU32; + type MinAllowedBytes = ConstU32; + type MaxAllowedBytes = ConstU32; } pub fn new_test_ext() -> sp_io::TestExternalities { From 704d53a5c6adc7ddfbc1db426a42cbe5c0bf869a Mon Sep 17 00:00:00 2001 From: arkpar Date: Tue, 25 Apr 2023 11:26:50 +0200 Subject: [PATCH 74/78] Removed global priority --- client/statement-store/src/lib.rs | 142 ++++++------------ frame/statement/src/lib.rs | 7 +- frame/statement/src/tests.rs | 8 +- primitives/statement-store/src/runtime_api.rs | 4 - 4 files changed, 49 insertions(+), 112 deletions(-) diff --git a/client/statement-store/src/lib.rs b/client/statement-store/src/lib.rs index 7f1ac2b6c6eaa..202b129a5f4d0 100644 --- a/client/statement-store/src/lib.rs +++ b/client/statement-store/src/lib.rs @@ -86,22 +86,20 @@ mod col { #[derive(Eq, PartialEq, Debug, Ord, PartialOrd, Clone, Copy)] struct Priority(u32); -#[derive(Eq, PartialEq, Debug, Ord, PartialOrd, Clone, Copy, Encode, Decode)] -struct GlobalPriority(u32); #[derive(PartialEq, Eq)] -struct PriorityKey

{ +struct PriorityKey { hash: Hash, - priority: P, + priority: Priority, } -impl PartialOrd for PriorityKey

{ +impl PartialOrd for PriorityKey { fn partial_cmp(&self, other: &Self) -> Option { Some(self.cmp(other)) } } -impl Ord for PriorityKey

{ +impl Ord for PriorityKey { fn cmp(&self, other: &Self) -> std::cmp::Ordering { self.priority.cmp(&other.priority).then_with(|| self.hash.cmp(&other.hash)) } @@ -116,7 +114,7 @@ struct ChannelEntry { #[derive(Default)] struct StatementsForAccount { // Statements ordered by priority. - by_priority: BTreeMap, (Option, usize)>, + by_priority: BTreeMap, usize)>, // Channel to statement map. Only one statement per channel is allowed. channels: HashMap, // Sum of all `Data` field sizes. @@ -150,12 +148,9 @@ struct Index { by_topic: HashMap>, by_dec_key: HashMap, HashSet>, topics_and_keys: HashMap; MAX_TOPICS], Option)>, - entries: HashMap, /* Statement hash -> (Account - * id, - * global_priority, priority) */ + entries: HashMap, expired: HashMap, // Value is expiration timestamp. accounts: HashMap, - by_global_priority: BTreeMap, usize>, options: Options, total_size: usize, } @@ -206,17 +201,6 @@ pub struct Store { metrics: PrometheusMetrics, } -#[derive(Encode, Decode, Clone)] -struct StatementMeta { - global_priority: GlobalPriority, -} - -#[derive(Encode, Decode)] -struct StatementWithMeta { - meta: StatementMeta, - statement: Statement, -} - enum IndexQuery { Unknown, Exists, @@ -237,7 +221,6 @@ impl Index { &mut self, hash: Hash, account: AccountId, - global_priority: GlobalPriority, statement: &Statement, ) { let mut all_topics = [None; MAX_TOPICS]; @@ -253,9 +236,7 @@ impl Index { self.topics_and_keys.insert(hash, (all_topics, key)); } let priority = Priority(statement.priority().unwrap_or(0)); - self.entries.insert(hash, (account, global_priority, priority)); - self.by_global_priority - .insert(PriorityKey { hash, priority: global_priority }, statement.data_len()); + self.entries.insert(hash, (account, priority, statement.data_len())); self.total_size += statement.data_len(); let mut account_info = self.accounts.entry(account).or_default(); account_info.data_size += statement.data_len(); @@ -338,9 +319,7 @@ impl Index { } fn make_expired(&mut self, hash: &Hash, current_time: u64) -> bool { - if let Some((account, global_priority, priority)) = self.entries.remove(hash) { - let key = PriorityKey { hash: *hash, priority: global_priority }; - let len = self.by_global_priority.remove(&key).unwrap_or(0); + if let Some((account, priority, len)) = self.entries.remove(hash) { self.total_size -= len; if let Some((topics, key)) = self.topics_and_keys.remove(hash) { for t in topics.into_iter().flatten() { @@ -471,39 +450,25 @@ impl Index { would_free_size += len; } } - let global_priority = GlobalPriority(validation.global_priority); // Now check global constraints as well. - for (entry, len) in self.by_global_priority.iter() { - if (self.total_size - would_free_size + statement_len <= self.options.max_total_size) && - self.by_global_priority.len() + 1 - evicted.len() <= - self.options.max_total_statements - { - // Satisfied - break - } - if evicted.contains(&entry.hash) { - // Already accounted for above - continue - } - - if entry.priority >= global_priority { - log::debug!( - target: LOG_TARGET, - "Ignored message due to global constraints {:?} {:?} < {:?}", - HexDisplay::from(&hash), - priority, - entry.priority, - ); - return MaybeInserted::Ignored - } - evicted.insert(entry.hash); - would_free_size += len; + if !((self.total_size - would_free_size + statement_len <= self.options.max_total_size) && + self.entries.len() + 1 - evicted.len() <= + self.options.max_total_statements) + { + log::debug!( + target: LOG_TARGET, + "Ignored statement {} because the store is full (size={}, count={})", + HexDisplay::from(&hash), + self.total_size, + self.entries.len(), + ); + return MaybeInserted::Ignored } for h in &evicted { self.make_expired(h, current_time); } - self.insert_new(hash, *account, global_priority, statement); + self.insert_new(hash, *account, statement); MaybeInserted::Inserted(evicted) } } @@ -618,21 +583,19 @@ impl Store { self.db .iter_column_while(col::STATEMENTS, |item| { let statement = item.value; - if let Ok(statement_with_meta) = - StatementWithMeta::decode(&mut statement.as_slice()) + if let Ok(statement) = Statement::decode(&mut statement.as_slice()) { - let hash = statement_with_meta.statement.hash(); + let hash = statement.hash(); log::trace!( target: LOG_TARGET, "Statement loaded {:?}", HexDisplay::from(&hash) ); - if let Some(account_id) = statement_with_meta.statement.account_id() { + if let Some(account_id) = statement.account_id() { index.insert_new( hash, account_id, - statement_with_meta.meta.global_priority, - &statement_with_meta.statement, + &statement, ); } else { log::debug!( @@ -678,8 +641,8 @@ impl Store { index.iterate_with(key, match_all_topics, |hash| { match self.db.get(col::STATEMENTS, hash).map_err(|e| Error::Db(e.to_string()))? { Some(entry) => { - if let Ok(statement) = StatementWithMeta::decode(&mut entry.as_slice()) { - if let Some(data) = f(statement.statement) { + if let Ok(statement) = Statement::decode(&mut entry.as_slice()) { + if let Some(data) = f(statement) { result.push(data); } } else { @@ -749,9 +712,9 @@ impl StatementStore for Store { for h in self.index.read().entries.keys() { let encoded = self.db.get(col::STATEMENTS, h).map_err(|e| Error::Db(e.to_string()))?; if let Some(encoded) = encoded { - if let Ok(entry) = StatementWithMeta::decode(&mut encoded.as_slice()) { - let hash = entry.statement.hash(); - result.push((hash, entry.statement)); + if let Ok(statement) = Statement::decode(&mut encoded.as_slice()) { + let hash = statement.hash(); + result.push((hash, statement)); } } } @@ -773,9 +736,8 @@ impl StatementStore for Store { HexDisplay::from(hash) ); Some( - StatementWithMeta::decode(&mut entry.as_slice()) + Statement::decode(&mut entry.as_slice()) .map_err(|e| Error::Decode(e.to_string()))? - .statement, ) }, None => { @@ -865,11 +827,6 @@ impl StatementStore for Store { return SubmitResult::InternalError(Error::Runtime), }; - let statement_with_meta = StatementWithMeta { - meta: StatementMeta { global_priority: GlobalPriority(validation.global_priority) }, - statement, - }; - let current_time = self.timestamp(); let mut commit = Vec::new(); { @@ -877,7 +834,7 @@ impl StatementStore for Store { let evicted = match index.insert( hash, - &statement_with_meta.statement, + &statement, &account_id, &validation, current_time, @@ -886,7 +843,7 @@ impl StatementStore for Store { MaybeInserted::Inserted(evicted) => evicted, }; - commit.push((col::STATEMENTS, hash.to_vec(), Some(statement_with_meta.encode()))); + commit.push((col::STATEMENTS, hash.to_vec(), Some(statement.encode()))); for hash in evicted { commit.push((col::STATEMENTS, hash.to_vec(), None)); commit.push((col::EXPIRED, hash.to_vec(), Some((hash, current_time).encode()))); @@ -896,17 +853,13 @@ impl StatementStore for Store { target: LOG_TARGET, "Statement validation failed: database error {}, {:?}", e, - statement_with_meta.statement + statement ); return SubmitResult::InternalError(Error::Db(e.to_string())) } } // Release index lock self.metrics.report(|metrics| metrics.submitted_statements.inc()); - let network_priority = if validation.global_priority > 0 { - NetworkPriority::High - } else { - NetworkPriority::Low - }; + let network_priority = NetworkPriority::High; log::trace!(target: LOG_TARGET, "Statement submitted: {:?}", HexDisplay::from(&hash)); SubmitResult::New(network_priority) } @@ -991,19 +944,19 @@ mod tests { ) -> std::result::Result { use crate::tests::account; match statement.verify_signature() { - SignatureVerificationResult::Valid(_) => Ok(ValidStatement{global_priority: 10, max_count: 100, max_size: 1000}), + SignatureVerificationResult::Valid(_) => Ok(ValidStatement{max_count: 100, max_size: 1000}), SignatureVerificationResult::Invalid => Err(InvalidStatement::BadProof), SignatureVerificationResult::NoSignature => { if let Some(Proof::OnChain { block_hash, .. }) = statement.proof() { if block_hash == &CORRECT_BLOCK_HASH { - let (global_priority, max_count, max_size) = match statement.account_id() { - Some(a) if a == account(1) => (10, 1, 1000), - Some(a) if a == account(2) => (20, 2, 1000), - Some(a) if a == account(3) => (30, 3, 1000), - Some(a) if a == account(4) => (40, 4, 1000), - _ => (0, 2, 2000), + let (max_count, max_size) = match statement.account_id() { + Some(a) if a == account(1) => (1, 1000), + Some(a) if a == account(2) => (2, 1000), + Some(a) if a == account(3) => (3, 1000), + Some(a) if a == account(4) => (4, 1000), + _ => (2, 2000), }; - Ok(ValidStatement{ global_priority, max_count, max_size }) + Ok(ValidStatement{ max_count, max_size }) } else { Err(InvalidStatement::BadProof) } @@ -1128,7 +1081,7 @@ mod tests { let unsigned = statement(0, 1, None, 0); assert_eq!( store.submit(unsigned, StatementSource::Network), - SubmitResult::New(NetworkPriority::Low) + SubmitResult::New(NetworkPriority::High) ); } @@ -1257,15 +1210,13 @@ mod tests { // Should be over the global count limit store.index.write().options.max_total_statements = 4; assert_eq!(store.submit(statement(1, 1, None, 100), source), ignored); - // Should evict statement from account 1 - assert_eq!(store.submit(statement(4, 6, None, 100), source), ok); - assert_eq!(store.index.read().expired.len(), 7); let mut expected_statements = vec![ + statement(1, 2, Some(1), 600).hash(), statement(2, 4, None, 1000).hash(), statement(3, 4, Some(3), 300).hash(), statement(3, 5, None, 500).hash(), - statement(4, 6, None, 100).hash(), + //statement(4, 6, None, 100).hash(), ]; expected_statements.sort(); let mut statements: Vec<_> = @@ -1285,7 +1236,6 @@ mod tests { assert_eq!(store.index.read().entries.len(), 1); store.remove(&statement.hash()).unwrap(); assert_eq!(store.index.read().entries.len(), 0); - assert_eq!(store.index.read().by_global_priority.len(), 0); assert_eq!(store.index.read().accounts.len(), 0); store.set_time(DEFAULT_PURGE_AFTER_SEC + 1); store.maintain(); diff --git a/frame/statement/src/lib.rs b/frame/statement/src/lib.rs index 411c030d23928..c68dac2d29722 100644 --- a/frame/statement/src/lib.rs +++ b/frame/statement/src/lib.rs @@ -27,8 +27,6 @@ //! statement author balance: //! `max_count`: Maximum number of statements allowed for the author (signer) of this statement. //! `max_size`: Maximum total size of statements allowed for the author (signer) of this statement. -//! `global_priority`: A numerical value that defines the order in which statements are evicted when -//! the statement store hits global constraints. This is simply balance divided by `StatementCost`. //! //! This pallet also contains an offchain worker that turns on-chain statement events into //! statements. These statements are placed in the store and propagated over the network. @@ -179,14 +177,11 @@ where }; let statement_cost = T::StatementCost::get(); let byte_cost = T::ByteCost::get(); - let priority_cost = statement_cost; let balance = >>::balance(&account); let min_allowed_statements = T::MinAllowedStatements::get(); let max_allowed_statements = T::MaxAllowedStatements::get(); let min_allowed_bytes = T::MinAllowedBytes::get(); let max_allowed_bytes = T::MaxAllowedBytes::get(); - let global_priority = - balance.checked_div(&priority_cost).unwrap_or_default().saturated_into(); let max_count = balance .checked_div(&statement_cost) .unwrap_or_default() @@ -198,7 +193,7 @@ where .saturated_into::() .clamp(min_allowed_bytes, max_allowed_bytes); - Ok(ValidStatement { global_priority, max_count, max_size }) + Ok(ValidStatement { max_count, max_size }) } /// Submit a statement event. The statement will be picked up by the offchain worker and diff --git a/frame/statement/src/tests.rs b/frame/statement/src/tests.rs index 35b94d0da869a..c290d1325f0dd 100644 --- a/frame/statement/src/tests.rs +++ b/frame/statement/src/tests.rs @@ -37,7 +37,6 @@ fn sign_and_validate_no_balance() { let result = Pallet::::validate_statement(StatementSource::Chain, statement); assert_eq!( Ok(ValidStatement { - global_priority: 0, max_count: MIN_ALLOWED_STATEMENTS, max_size: MIN_ALLOWED_BYTES }), @@ -50,7 +49,6 @@ fn sign_and_validate_no_balance() { let result = Pallet::::validate_statement(StatementSource::Chain, statement); assert_eq!( Ok(ValidStatement { - global_priority: 0, max_count: MIN_ALLOWED_STATEMENTS, max_size: MIN_ALLOWED_BYTES }), @@ -63,7 +61,6 @@ fn sign_and_validate_no_balance() { let result = Pallet::::validate_statement(StatementSource::Chain, statement); assert_eq!( Ok(ValidStatement { - global_priority: 0, max_count: MIN_ALLOWED_STATEMENTS, max_size: MIN_ALLOWED_BYTES }), @@ -79,7 +76,7 @@ fn validate_with_balance() { let mut statement = Statement::new(); statement.sign_sr25519_private(&pair); let result = Pallet::::validate_statement(StatementSource::Chain, statement); - assert_eq!(Ok(ValidStatement { global_priority: 6, max_count: 6, max_size: 3000 }), result); + assert_eq!(Ok(ValidStatement { max_count: 6, max_size: 3000 }), result); let pair = sp_core::sr25519::Pair::from_string("//Charlie", None).unwrap(); let mut statement = Statement::new(); @@ -87,7 +84,6 @@ fn validate_with_balance() { let result = Pallet::::validate_statement(StatementSource::Chain, statement); assert_eq!( Ok(ValidStatement { - global_priority: 500, max_count: MAX_ALLOWED_STATEMENTS, max_size: MAX_ALLOWED_BYTES }), @@ -133,7 +129,7 @@ fn validate_event() { block_hash: parent_hash.into(), }); let result = Pallet::::validate_statement(StatementSource::Chain, statement.clone()); - assert_eq!(Ok(ValidStatement { global_priority: 6, max_count: 6, max_size: 3000 }), result); + assert_eq!(Ok(ValidStatement { max_count: 6, max_size: 3000 }), result); // Use wrong event index statement.set_proof(Proof::OnChain { diff --git a/primitives/statement-store/src/runtime_api.rs b/primitives/statement-store/src/runtime_api.rs index d8f84ff116ab8..13f88bc977e9e 100644 --- a/primitives/statement-store/src/runtime_api.rs +++ b/primitives/statement-store/src/runtime_api.rs @@ -34,10 +34,6 @@ pub struct ValidStatement { pub max_count: u32, /// Max total data size for this account, as calculated by the runtime. pub max_size: u32, - /// Global priority value. This is used to prioritize statements on the global scale. - /// If the global limit of messages is reached, the statement with the lowest priority will be - /// removed first. - pub global_priority: u32, } /// An reason for an invalid statement. From 71b39cf81d7d487688a407a8720762b18b1861f9 Mon Sep 17 00:00:00 2001 From: arkpar Date: Tue, 25 Apr 2023 13:18:05 +0200 Subject: [PATCH 75/78] Validate fields when decoding --- client/statement-store/src/lib.rs | 36 ++++++++------------------ frame/statement/src/tests.rs | 20 +++------------ primitives/statement-store/src/lib.rs | 37 ++++++++++++++++++++++++++- 3 files changed, 50 insertions(+), 43 deletions(-) diff --git a/client/statement-store/src/lib.rs b/client/statement-store/src/lib.rs index 202b129a5f4d0..086ad0fae7e23 100644 --- a/client/statement-store/src/lib.rs +++ b/client/statement-store/src/lib.rs @@ -217,12 +217,7 @@ impl Index { Index { options, ..Default::default() } } - fn insert_new( - &mut self, - hash: Hash, - account: AccountId, - statement: &Statement, - ) { + fn insert_new(&mut self, hash: Hash, account: AccountId, statement: &Statement) { let mut all_topics = [None; MAX_TOPICS]; let mut nt = 0; while let Some(t) = statement.topic(nt) { @@ -452,8 +447,7 @@ impl Index { } // Now check global constraints as well. if !((self.total_size - would_free_size + statement_len <= self.options.max_total_size) && - self.entries.len() + 1 - evicted.len() <= - self.options.max_total_statements) + self.entries.len() + 1 - evicted.len() <= self.options.max_total_statements) { log::debug!( target: LOG_TARGET, @@ -583,8 +577,7 @@ impl Store { self.db .iter_column_while(col::STATEMENTS, |item| { let statement = item.value; - if let Ok(statement) = Statement::decode(&mut statement.as_slice()) - { + if let Ok(statement) = Statement::decode(&mut statement.as_slice()) { let hash = statement.hash(); log::trace!( target: LOG_TARGET, @@ -592,11 +585,7 @@ impl Store { HexDisplay::from(&hash) ); if let Some(account_id) = statement.account_id() { - index.insert_new( - hash, - account_id, - &statement, - ); + index.insert_new(hash, account_id, &statement); } else { log::debug!( target: LOG_TARGET, @@ -737,7 +726,7 @@ impl StatementStore for Store { ); Some( Statement::decode(&mut entry.as_slice()) - .map_err(|e| Error::Decode(e.to_string()))? + .map_err(|e| Error::Decode(e.to_string()))?, ) }, None => { @@ -832,16 +821,11 @@ impl StatementStore for Store { { let mut index = self.index.write(); - let evicted = match index.insert( - hash, - &statement, - &account_id, - &validation, - current_time, - ) { - MaybeInserted::Ignored => return SubmitResult::Ignored, - MaybeInserted::Inserted(evicted) => evicted, - }; + let evicted = + match index.insert(hash, &statement, &account_id, &validation, current_time) { + MaybeInserted::Ignored => return SubmitResult::Ignored, + MaybeInserted::Inserted(evicted) => evicted, + }; commit.push((col::STATEMENTS, hash.to_vec(), Some(statement.encode()))); for hash in evicted { diff --git a/frame/statement/src/tests.rs b/frame/statement/src/tests.rs index c290d1325f0dd..51103caca60fa 100644 --- a/frame/statement/src/tests.rs +++ b/frame/statement/src/tests.rs @@ -36,10 +36,7 @@ fn sign_and_validate_no_balance() { statement.sign_sr25519_private(&pair); let result = Pallet::::validate_statement(StatementSource::Chain, statement); assert_eq!( - Ok(ValidStatement { - max_count: MIN_ALLOWED_STATEMENTS, - max_size: MIN_ALLOWED_BYTES - }), + Ok(ValidStatement { max_count: MIN_ALLOWED_STATEMENTS, max_size: MIN_ALLOWED_BYTES }), result ); @@ -48,10 +45,7 @@ fn sign_and_validate_no_balance() { statement.sign_ed25519_private(&pair); let result = Pallet::::validate_statement(StatementSource::Chain, statement); assert_eq!( - Ok(ValidStatement { - max_count: MIN_ALLOWED_STATEMENTS, - max_size: MIN_ALLOWED_BYTES - }), + Ok(ValidStatement { max_count: MIN_ALLOWED_STATEMENTS, max_size: MIN_ALLOWED_BYTES }), result ); @@ -60,10 +54,7 @@ fn sign_and_validate_no_balance() { statement.sign_ecdsa_private(&pair); let result = Pallet::::validate_statement(StatementSource::Chain, statement); assert_eq!( - Ok(ValidStatement { - max_count: MIN_ALLOWED_STATEMENTS, - max_size: MIN_ALLOWED_BYTES - }), + Ok(ValidStatement { max_count: MIN_ALLOWED_STATEMENTS, max_size: MIN_ALLOWED_BYTES }), result ); }); @@ -83,10 +74,7 @@ fn validate_with_balance() { statement.sign_sr25519_private(&pair); let result = Pallet::::validate_statement(StatementSource::Chain, statement); assert_eq!( - Ok(ValidStatement { - max_count: MAX_ALLOWED_STATEMENTS, - max_size: MAX_ALLOWED_BYTES - }), + Ok(ValidStatement { max_count: MAX_ALLOWED_STATEMENTS, max_size: MAX_ALLOWED_BYTES }), result ); }); diff --git a/primitives/statement-store/src/lib.rs b/primitives/statement-store/src/lib.rs index d08900425da43..488f5d9b7ed2f 100644 --- a/primitives/statement-store/src/lib.rs +++ b/primitives/statement-store/src/lib.rs @@ -156,6 +156,14 @@ pub enum Field { Data(Vec) = 8, } +impl Field { + fn discriminant(&self) -> u8 { + // This is safe for repr(u8) + // see https://doc.rust-lang.org/reference/items/enumerations.html#pointer-casting + unsafe { *(self as *const Self as *const u8) } + } +} + /// Statement structure. #[derive(TypeInfo, sp_core::RuntimeDebug, PassByCodec, Clone, PartialEq, Eq, Default)] pub struct Statement { @@ -173,9 +181,14 @@ impl Decode for Statement { // Encoding matches that of Vec. Basically this just means accepting that there // will be a prefix of vector length. let num_fields: codec::Compact = Decode::decode(input)?; + let mut tag = 0; let mut statement = Statement::new(); - for _ in 0..num_fields.into() { + for i in 0..num_fields.into() { let field: Field = Decode::decode(input)?; + if i > 0 && field.discriminant() <= tag { + return Err("Invalid field order or duplicate fields".into()) + } + tag = field.discriminant(); match field { Field::AuthenticityProof(p) => statement.set_proof(p), Field::DecryptionKey(key) => statement.set_decryption_key(key), @@ -541,6 +554,28 @@ mod test { assert_eq!(decoded, statement); } + #[test] + fn decode_checks_fields() { + let topic1 = [0x01; 32]; + let topic2 = [0x02; 32]; + let priority = 999; + + let fields = vec![ + Field::Priority(priority), + Field::Topic1(topic1), + Field::Topic1(topic1), + Field::Topic2(topic2), + ] + .encode(); + + assert!(Statement::decode(&mut fields.as_slice()).is_err()); + + let fields = + vec![Field::Topic1(topic1), Field::Priority(priority), Field::Topic2(topic2)].encode(); + + assert!(Statement::decode(&mut fields.as_slice()).is_err()); + } + #[test] fn sign_and_verify() { let mut statement = Statement::new(); From a798b08e1c1de7f7c808cb156066c7f06e1d086c Mon Sep 17 00:00:00 2001 From: arkpar Date: Tue, 25 Apr 2023 16:04:59 +0200 Subject: [PATCH 76/78] Limit validation channel size --- Cargo.lock | 2 +- client/network/statement/Cargo.toml | 2 +- client/network/statement/src/lib.rs | 38 +++++++++++++++++++---------- 3 files changed, 27 insertions(+), 15 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 24b9f02add407..6d34f1f187c91 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -9237,6 +9237,7 @@ name = "sc-network-statement" version = "0.10.0-dev" dependencies = [ "array-bytes", + "async-channel", "futures", "libp2p", "log", @@ -9245,7 +9246,6 @@ dependencies = [ "sc-network", "sc-network-common", "sc-peerset", - "sc-utils", "sp-consensus", "sp-runtime", "sp-statement-store", diff --git a/client/network/statement/Cargo.toml b/client/network/statement/Cargo.toml index 52d05b941420f..36d8cb077210d 100644 --- a/client/network/statement/Cargo.toml +++ b/client/network/statement/Cargo.toml @@ -14,6 +14,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] array-bytes = "4.1" +async-channel = "1.8.0" codec = { package = "parity-scale-codec", version = "3.2.2", features = ["derive"] } futures = "0.3.21" libp2p = "0.50.0" @@ -23,7 +24,6 @@ prometheus-endpoint = { package = "substrate-prometheus-endpoint", version = "0. sc-network-common = { version = "0.10.0-dev", path = "../common" } sc-network = { version = "0.10.0-dev", path = "../" } sc-peerset = { version = "4.0.0-dev", path = "../../peerset" } -sc-utils = { version = "4.0.0-dev", path = "../../utils" } sp-runtime = { version = "7.0.0", path = "../../../primitives/runtime" } sp-consensus = { version = "0.10.0-dev", path = "../../../primitives/consensus/common" } sp-statement-store = { version = "4.0.0-dev", path = "../../../primitives/statement-store" } diff --git a/client/network/statement/src/lib.rs b/client/network/statement/src/lib.rs index e5256d3420c59..02cbab27a6a15 100644 --- a/client/network/statement/src/lib.rs +++ b/client/network/statement/src/lib.rs @@ -43,7 +43,6 @@ use sc_network_common::{ role::ObservedRole, sync::{SyncEvent, SyncEventStream}, }; -use sc_utils::mpsc::{tracing_unbounded, TracingUnboundedSender}; use sp_statement_store::{ Hash, NetworkPriority, Statement, StatementSource, StatementStore, SubmitResult, }; @@ -152,8 +151,7 @@ impl StatementHandlerPrototype { ) -> error::Result> { let net_event_stream = network.event_stream("statement-handler-net"); let sync_event_stream = sync.event_stream("statement-handler-sync"); - let (queue_sender, mut queue_receiver) = - tracing_unbounded("mpsc_statement_validator", 100_000); + let (queue_sender, mut queue_receiver) = async_channel::bounded(100_000); let store = statement_store.clone(); executor( @@ -230,7 +228,7 @@ pub struct StatementHandler< // All connected peers peers: HashMap, statement_store: Arc, - queue_sender: TracingUnboundedSender<(Statement, oneshot::Sender)>, + queue_sender: async_channel::Sender<(Statement, oneshot::Sender)>, /// Prometheus metrics. metrics: Option, } @@ -382,15 +380,29 @@ where match self.pending_statements_peers.entry(hash) { Entry::Vacant(entry) => { let (completion_sender, completion_receiver) = oneshot::channel(); - if self.queue_sender.unbounded_send((s, completion_sender)).is_ok() { - self.pending_statements.push( - async move { - let res = completion_receiver.await; - (hash, res.ok()) - } - .boxed(), - ); - entry.insert(HashSet::from_iter([who])); + match self.queue_sender.try_send((s, completion_sender)) { + Ok(()) => { + self.pending_statements.push( + async move { + let res = completion_receiver.await; + (hash, res.ok()) + } + .boxed(), + ); + entry.insert(HashSet::from_iter([who])); + }, + Err(async_channel::TrySendError::Full(_)) => { + log::debug!( + target: LOG_TARGET, + "Dropped statement because validation channel is full", + ); + }, + Err(async_channel::TrySendError::Closed(_)) => { + log::trace!( + target: LOG_TARGET, + "Dropped statement because validation channel is closed", + ); + }, } }, Entry::Occupied(mut entry) => { From 7af3b5af9489f726c135a7441a6191dac80c0167 Mon Sep 17 00:00:00 2001 From: arkpar Date: Tue, 25 Apr 2023 16:09:06 +0200 Subject: [PATCH 77/78] Made a comment into module doc --- client/statement-store/src/lib.rs | 50 +++++++++++++++++-------------- 1 file changed, 27 insertions(+), 23 deletions(-) diff --git a/client/statement-store/src/lib.rs b/client/statement-store/src/lib.rs index 086ad0fae7e23..179f3a77df443 100644 --- a/client/statement-store/src/lib.rs +++ b/client/statement-store/src/lib.rs @@ -16,30 +16,34 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -// Constraint management. -// -// Each time a new statement is inserted into the store, it is first validated with the runtime -// Validation function computes `global_priority`, 'max_count' and `max_size` for a statement. -// The following constraints are then checked: -// * For a given account id, there may be at most `max_count` statements with `max_size` total data -// size. To satisfy this, statements for this account ID are removed from the store starting with -// the lowest priority until a constraint is satisfied. -// * There may not be more than `MAX_TOTAL_STATEMENTS` total statements with `MAX_TOTAL_SIZE` size. -// To satisfy this, statements are removed from the store starting with the lowest -// `global_priority` until a constraint is satisfied. -// -// When a new statement is inserted that would not satisfy constraints in the first place, no -// statements are deleted and `Ignored` result is returned. -// The order in which statements with the same priority are deleted is unspecified. -// -// Statement expiration. -// -// Each time a statement is removed from the store (Either evicted by higher priority statement or -// explicitly with the `remove` function) the statement is marked as expired. Expired statements -// can't be added to the store for `Options::purge_after_sec` seconds. This is to prevent old -// statements from being propagated on the network. - //! Disk-backed statement store. +//! +//! This module contains an implementation of `sp_statement_store::StatementStore` which is backed +//! by a database. +//! +//! Constraint management. +//! +//! Each time a new statement is inserted into the store, it is first validated with the runtime +//! Validation function computes `global_priority`, 'max_count' and `max_size` for a statement. +//! The following constraints are then checked: +//! * For a given account id, there may be at most `max_count` statements with `max_size` total data +//! size. To satisfy this, statements for this account ID are removed from the store starting with +//! the lowest priority until a constraint is satisfied. +//! * There may not be more than `MAX_TOTAL_STATEMENTS` total statements with `MAX_TOTAL_SIZE` size. +//! To satisfy this, statements are removed from the store starting with the lowest +//! `global_priority` until a constraint is satisfied. +//! +//! When a new statement is inserted that would not satisfy constraints in the first place, no +//! statements are deleted and `Ignored` result is returned. +//! The order in which statements with the same priority are deleted is unspecified. +//! +//! Statement expiration. +//! +//! Each time a statement is removed from the store (Either evicted by higher priority statement or +//! explicitly with the `remove` function) the statement is marked as expired. Expired statements +//! can't be added to the store for `Options::purge_after_sec` seconds. This is to prevent old +//! statements from being propagated on the network. + #![warn(missing_docs)] #![warn(unused_extern_crates)] From 47a8e6b67a448004fbdc8461677b6ba88d45d1e8 Mon Sep 17 00:00:00 2001 From: arkpar Date: Tue, 25 Apr 2023 16:28:34 +0200 Subject: [PATCH 78/78] Removed submit_encoded --- client/rpc/src/statement/mod.rs | 6 ++++-- client/statement-store/src/lib.rs | 18 ++---------------- primitives/statement-store/README.md | 2 +- primitives/statement-store/src/lib.rs | 5 ++++- primitives/statement-store/src/store_api.rs | 3 --- 5 files changed, 11 insertions(+), 23 deletions(-) diff --git a/client/rpc/src/statement/mod.rs b/client/rpc/src/statement/mod.rs index 0959777a6d49c..b4f432bbbb0e3 100644 --- a/client/rpc/src/statement/mod.rs +++ b/client/rpc/src/statement/mod.rs @@ -18,7 +18,7 @@ //! Substrate statement store API. -use codec::Encode; +use codec::{Decode, Encode}; use jsonrpsee::core::{async_trait, RpcResult}; /// Re-export the API for backward compatibility. pub use sc_rpc_api::statement::{error::Error, StatementApiServer}; @@ -88,7 +88,9 @@ impl StatementApiServer for StatementStore { } fn submit(&self, encoded: Bytes) -> RpcResult<()> { - match self.store.submit_encoded(&encoded, StatementSource::Local) { + let statement = Decode::decode(&mut &*encoded) + .map_err(|e| Error::StatementStore(format!("Eror decoding statement: {:?}", e)))?; + match self.store.submit(statement, StatementSource::Local) { SubmitResult::New(_) | SubmitResult::Known => Ok(()), // `KnownExpired` should not happen. Expired statements submitted with // `StatementSource::Rpc` should be renewed. diff --git a/client/statement-store/src/lib.rs b/client/statement-store/src/lib.rs index 179f3a77df443..2e2bb3bd3b430 100644 --- a/client/statement-store/src/lib.rs +++ b/client/statement-store/src/lib.rs @@ -44,7 +44,6 @@ //! can't be added to the store for `Options::purge_after_sec` seconds. This is to prevent old //! statements from being propagated on the network. - #![warn(missing_docs)] #![warn(unused_extern_crates)] @@ -473,6 +472,7 @@ impl Index { impl Store { /// Create a new shared store instance. There should only be one per process. + /// `path` will be used to open a statement database or create a new one if it does not exist. pub fn new_shared( path: &std::path::Path, options: Options, @@ -512,6 +512,7 @@ impl Store { } /// Create a new instance. + /// `path` will be used to open a statement database or create a new one if it does not exist. fn new( path: &std::path::Path, options: Options, @@ -852,21 +853,6 @@ impl StatementStore for Store { SubmitResult::New(network_priority) } - /// Submit a SCALE-encoded statement. - fn submit_encoded(&self, mut statement: &[u8], source: StatementSource) -> SubmitResult { - match Statement::decode(&mut statement) { - Ok(decoded) => self.submit(decoded, source), - Err(e) => { - log::debug!( - target: LOG_TARGET, - "Error decoding submitted statement. Failed with: {}", - e - ); - SubmitResult::Bad("Bad SCALE encoding") - }, - } - } - /// Remove a statement by hash. fn remove(&self, hash: &Hash) -> Result<()> { let current_time = self.timestamp(); diff --git a/primitives/statement-store/README.md b/primitives/statement-store/README.md index 7dc56748e74f2..fb88aaa4ecd9c 100644 --- a/primitives/statement-store/README.md +++ b/primitives/statement-store/README.md @@ -10,7 +10,7 @@ Each field is effectively a key/value pair. Fields must be sorted and the same f Formally, `Statement` is equivalent to the type `Vec` and `Field` is the SCALE-encoded enumeration: - 0: `AuthenticityProof(Proof)`: The signature of the message. For cryptography where the public key cannot be derived from the signature together with the message data, then this will also include the signer's public key. The message data is all fields of the messages fields except the signature concatenated together *without the length prefix that a `Vec` would usually imply*. This is so that the signature can be derived without needing to re-encode the statement. -- 1: `DecryptionKey([u8; 32])`: The decryption key identifier which should be used to decrypt the statement's data. +- 1: `DecryptionKey([u8; 32])`: The decryption key identifier which should be used to decrypt the statement's data. In the absense of this field `Data` should be treated as not encrypted. - 2: `Priority(u32)`: Priority specifier. Higher priority statements should be kept around at the cost of lower priority statements if multiple statements from the same sender are competing for persistence or transport. Nodes should disregard when considering unsigned statements. - 3: `Channel([u8; 32])`: The channel identifier. Only one message of a given channel should be retained at once (the one of highest priority). Nodes should disregard when considering unsigned statements. - 4: `Topic1([u8; 32]))`: First topic identifier. diff --git a/primitives/statement-store/src/lib.rs b/primitives/statement-store/src/lib.rs index 488f5d9b7ed2f..e5c642d24e2b3 100644 --- a/primitives/statement-store/src/lib.rs +++ b/primitives/statement-store/src/lib.rs @@ -444,7 +444,7 @@ impl Statement { self.channel = Some(channel) } - /// Set topic by index. + /// Set topic by index. Does noting if index is over `MAX_TOPICS`. pub fn set_topic(&mut self, index: usize, topic: Topic) { if index < MAX_TOPICS { self.topics[index] = topic; @@ -473,6 +473,9 @@ impl Statement { self.num_topics as u32; let mut output = Vec::new(); + // When encoding signature payload, the length prefix is omitted. + // This is so that the signature for encoded statement can potentially be derived without + // needing to re-encode the statement. if !for_signing { let compact_len = codec::Compact::(num_fields); compact_len.encode_to(&mut output); diff --git a/primitives/statement-store/src/store_api.rs b/primitives/statement-store/src/store_api.rs index 321c87f3ab063..89daa3e963c56 100644 --- a/primitives/statement-store/src/store_api.rs +++ b/primitives/statement-store/src/store_api.rs @@ -85,9 +85,6 @@ pub trait StatementStore: Send + Sync { /// Submit a statement. fn submit(&self, statement: Statement, source: StatementSource) -> SubmitResult; - /// Submit a SCALE-encoded statement. - fn submit_encoded(&self, statement: &[u8], source: StatementSource) -> SubmitResult; - /// Remove a statement from the store. fn remove(&self, hash: &Hash) -> Result<()>; }