From 49e54b7f353dd696f73ff5a9ed4480235242f7b9 Mon Sep 17 00:00:00 2001 From: cheme Date: Mon, 19 Nov 2018 15:13:56 +0100 Subject: [PATCH 1/9] Fill transaction hash on ethGetLog of light client. This is enifficient but we keep align with spec. --- rpc/src/v1/helpers/light_fetch.rs | 39 +++++++++++++++++++++++++++++-- 1 file changed, 37 insertions(+), 2 deletions(-) diff --git a/rpc/src/v1/helpers/light_fetch.rs b/rpc/src/v1/helpers/light_fetch.rs index cf854c77bfa..2e9a0db421d 100644 --- a/rpc/src/v1/helpers/light_fetch.rs +++ b/rpc/src/v1/helpers/light_fetch.rs @@ -318,6 +318,7 @@ impl LightFetch { const MAX_BLOCK_RANGE: u64 = 1000; let fetcher = self.clone(); + let fetcher_block = self.clone(); self.headers_range_by_block_id(filter.from_block, filter.to_block, MAX_BLOCK_RANGE) .and_then(move |mut headers| { if headers.is_empty() { @@ -353,7 +354,7 @@ impl LightFetch { data: log.data.into(), block_hash: Some(hash.into()), block_number: Some(num.into()), - // No way to easily retrieve transaction hash, so let's just skip it. + // No way to easily retrieve transaction hash. transaction_hash: None, transaction_index: Some(transaction_index.into()), log_index: Some(block_index.into()), @@ -367,8 +368,42 @@ impl LightFetch { } future::ok::<_,OnDemandError>(matches) }) // and then collect them into a vector. - .map(|matches| matches.into_iter().map(|(_, v)| v).collect()) .map_err(errors::on_demand_error) + // retrieve transaction hash. + .and_then(move |matches| { + let mut blocks = BTreeMap::new(); + let mut result: Vec = matches.into_iter().map(|(_, v)| { + { + let block_hash = v.block_hash.as_ref().expect("Previously initialized with value; qed"); + blocks.entry(block_hash.clone()).or_insert_with(|| { + fetcher_block.block(BlockId::Hash(block_hash.clone().into())) + }); + } + v + }).collect(); + // future get blocks (unordered it) + stream::futures_unordered(blocks.into_iter().map(|(_,v)|v)).collect().map(move |blocks| { + let mut tr_per_bl = BTreeMap::new(); + for enc_bl in blocks.iter() { + let bl_hash = enc_bl.hash(); + let tr = enc_bl.transactions(); + tr_per_bl.insert(bl_hash, tr); + } + for log in result.iter_mut() { + let log_index: U256 = log.transaction_index.expect("Previously initialized with value; qed").into(); + if log_index < usize::max_value().into() { + let block_hash = log.block_hash.clone().expect("Previously initialized with value; qed").into(); + let btr = tr_per_bl.get(&block_hash) + .and_then(|arr_tr| arr_tr.get(log_index.as_usize())) + .map(|tr| tr.hash().into()); + log.transaction_hash = btr; + } else { + trace!(target: "light_fetch", "A received Receipts indexed other usize length ignored"); + } + } + result + }) + }) }); match maybe_future { From f25fefa11c592c4abf6903995fe77d51e5ddcbfe Mon Sep 17 00:00:00 2001 From: cheme Date: Fri, 23 Nov 2018 16:50:41 +0100 Subject: [PATCH 2/9] Using better variables names. --- rpc/src/v1/helpers/light_fetch.rs | 22 +++++++++------------- 1 file changed, 9 insertions(+), 13 deletions(-) diff --git a/rpc/src/v1/helpers/light_fetch.rs b/rpc/src/v1/helpers/light_fetch.rs index 2e9a0db421d..c319d0a9457 100644 --- a/rpc/src/v1/helpers/light_fetch.rs +++ b/rpc/src/v1/helpers/light_fetch.rs @@ -374,29 +374,25 @@ impl LightFetch { let mut blocks = BTreeMap::new(); let mut result: Vec = matches.into_iter().map(|(_, v)| { { - let block_hash = v.block_hash.as_ref().expect("Previously initialized with value; qed"); - blocks.entry(block_hash.clone()).or_insert_with(|| { - fetcher_block.block(BlockId::Hash(block_hash.clone().into())) - }); + let block_hash = v.block_hash.as_ref().expect("Previously initialized with value; qed"); + blocks.entry(block_hash.clone()).or_insert_with(|| { + fetcher_block.block(BlockId::Hash(block_hash.clone().into())) + }); } v }).collect(); // future get blocks (unordered it) stream::futures_unordered(blocks.into_iter().map(|(_,v)|v)).collect().map(move |blocks| { - let mut tr_per_bl = BTreeMap::new(); - for enc_bl in blocks.iter() { - let bl_hash = enc_bl.hash(); - let tr = enc_bl.transactions(); - tr_per_bl.insert(bl_hash, tr); - } + let transactions_per_block: BTreeMap<_, _> = blocks.iter() + .map(|block| (block.hash(), block.transactions())).collect(); for log in result.iter_mut() { let log_index: U256 = log.transaction_index.expect("Previously initialized with value; qed").into(); if log_index < usize::max_value().into() { let block_hash = log.block_hash.clone().expect("Previously initialized with value; qed").into(); - let btr = tr_per_bl.get(&block_hash) - .and_then(|arr_tr| arr_tr.get(log_index.as_usize())) + let tx_hash = transactions_per_block.get(&block_hash) + .and_then(|txs| txs.get(log_index.as_usize())) .map(|tr| tr.hash().into()); - log.transaction_hash = btr; + log.transaction_hash = tx_hash; } else { trace!(target: "light_fetch", "A received Receipts indexed other usize length ignored"); } From 564559ddc593da8c0526b55a0144727b9768405e Mon Sep 17 00:00:00 2001 From: cheme Date: Mon, 3 Dec 2018 19:13:31 +0100 Subject: [PATCH 3/9] Expose old fast light get log as `parity_getLogsLight`. --- rpc/src/v1/helpers/light_fetch.rs | 68 +++++++++++++++++++++++++++++++ rpc/src/v1/impls/eth.rs | 50 +++++++++++++---------- rpc/src/v1/impls/light/parity.rs | 14 ++++++- rpc/src/v1/impls/parity.rs | 9 +++- rpc/src/v1/traits/parity.rs | 8 +++- 5 files changed, 123 insertions(+), 26 deletions(-) diff --git a/rpc/src/v1/helpers/light_fetch.rs b/rpc/src/v1/helpers/light_fetch.rs index c319d0a9457..37983de96ff 100644 --- a/rpc/src/v1/helpers/light_fetch.rs +++ b/rpc/src/v1/helpers/light_fetch.rs @@ -310,6 +310,74 @@ impl LightFetch { })) } + /// Variant of get transaction logs that does not fetch log transactions hash + pub fn logs_light(&self, filter: EthcoreFilter) -> impl Future, Error = Error> + Send { + use std::collections::BTreeMap; + use jsonrpc_core::futures::stream::{self, Stream}; + + const MAX_BLOCK_RANGE: u64 = 1000; + + let fetcher = self.clone(); + self.headers_range_by_block_id(filter.from_block, filter.to_block, MAX_BLOCK_RANGE) + .and_then(move |mut headers| { + if headers.is_empty() { + return Either::A(future::ok(Vec::new())); + } + + let on_demand = &fetcher.on_demand; + + let maybe_future = fetcher.sync.with_context(move |ctx| { + // find all headers which match the filter, and fetch the receipts for each one. + // match them with their numbers for easy sorting later. + let bit_combos = filter.bloom_possibilities(); + let receipts_futures: Vec<_> = headers.drain(..) + .filter(|ref hdr| { + let hdr_bloom = hdr.log_bloom(); + bit_combos.iter().any(|bloom| hdr_bloom.contains_bloom(bloom)) + }) + .map(|hdr| (hdr.number(), hdr.hash(), request::BlockReceipts(hdr.into()))) + .map(|(num, hash, req)| on_demand.request(ctx, req).expect(NO_INVALID_BACK_REFS_PROOF).map(move |x| (num, hash, x))) + .collect(); + + // as the receipts come in, find logs within them which match the filter. + // insert them into a BTreeMap to maintain order by number and block index. + stream::futures_unordered(receipts_futures) + .fold(BTreeMap::new(), move |mut matches, (num, hash, receipts)| { + let mut block_index = 0; + for (transaction_index, receipt) in receipts.into_iter().enumerate() { + for (transaction_log_index, log) in receipt.logs.into_iter().enumerate() { + if filter.matches(&log) { + matches.insert((num, block_index), Log { + address: log.address.into(), + topics: log.topics.into_iter().map(Into::into).collect(), + data: log.data.into(), + block_hash: Some(hash.into()), + block_number: Some(num.into()), + // No way to easily retrieve transaction hash, so let's just skip it. + transaction_hash: None, + transaction_index: Some(transaction_index.into()), + log_index: Some(block_index.into()), + transaction_log_index: Some(transaction_log_index.into()), + log_type: "mined".into(), + removed: false, + }); + } + block_index += 1; + } + } + future::ok::<_,OnDemandError>(matches) + }) // and then collect them into a vector. + .map(|matches| matches.into_iter().map(|(_, v)| v).collect()) + .map_err(errors::on_demand_error) + }); + + match maybe_future { + Some(fut) => Either::B(Either::A(fut)), + None => Either::B(Either::B(future::err(errors::network_disabled()))), + } + }) + } + /// Get transaction logs pub fn logs(&self, filter: EthcoreFilter) -> impl Future, Error = Error> + Send { use std::collections::BTreeMap; diff --git a/rpc/src/v1/impls/eth.rs b/rpc/src/v1/impls/eth.rs index 11e64227c70..a6230c18113 100644 --- a/rpc/src/v1/impls/eth.rs +++ b/rpc/src/v1/impls/eth.rs @@ -138,6 +138,33 @@ enum PendingTransactionId { Location(PendingOrBlock, usize) } +pub fn base_logs (client: &C, miner: &M, filter: Filter) -> BoxFuture> where + C: miner::BlockChainClient + BlockChainClient + StateClient + Call, + M: MinerService { + let include_pending = filter.to_block == Some(BlockNumber::Pending); + let filter: EthcoreFilter = match filter.try_into() { + Ok(value) => value, + Err(err) => return Box::new(future::err(err)), + }; + let mut logs = match client.logs(filter.clone()) { + Ok(logs) => logs + .into_iter() + .map(From::from) + .collect::>(), + Err(id) => return Box::new(future::err(errors::filter_block_not_found(id))), + }; + + if include_pending { + let best_block = client.chain_info().best_block_number; + let pending = pending_logs(&*miner, best_block, &filter); + logs.extend(pending); + } + + let logs = limit_logs(logs, filter.limit); + + Box::new(future::ok(logs)) +} + impl EthClient where C: miner::BlockChainClient + BlockChainClient + StateClient + Call + EngineInfo, SN: SnapshotService, @@ -711,28 +738,7 @@ impl Eth for EthClient< } fn logs(&self, filter: Filter) -> BoxFuture> { - let include_pending = filter.to_block == Some(BlockNumber::Pending); - let filter: EthcoreFilter = match filter.try_into() { - Ok(value) => value, - Err(err) => return Box::new(future::err(err)), - }; - let mut logs = match self.client.logs(filter.clone()) { - Ok(logs) => logs - .into_iter() - .map(From::from) - .collect::>(), - Err(id) => return Box::new(future::err(errors::filter_block_not_found(id))), - }; - - if include_pending { - let best_block = self.client.chain_info().best_block_number; - let pending = pending_logs(&*self.miner, best_block, &filter); - logs.extend(pending); - } - - let logs = limit_logs(logs, filter.limit); - - Box::new(future::ok(logs)) + base_logs(&*self.client, &*self.miner, filter.into()) } fn work(&self, no_new_work_timeout: Trailing) -> Result { diff --git a/rpc/src/v1/impls/light/parity.rs b/rpc/src/v1/impls/light/parity.rs index 3d5bf78977f..66beda8583f 100644 --- a/rpc/src/v1/impls/light/parity.rs +++ b/rpc/src/v1/impls/light/parity.rs @@ -26,9 +26,10 @@ use ethstore::random_phrase; use sync::LightSyncProvider; use ethcore::account_provider::AccountProvider; use ethcore_logger::RotatingLogger; +use ethcore::filter::Filter as EthcoreFilter; use jsonrpc_core::{Result, BoxFuture}; -use jsonrpc_core::futures::Future; +use jsonrpc_core::futures::{future, Future}; use jsonrpc_macros::Trailing; use v1::helpers::{self, errors, ipfs, SigningQueue, SignerService, NetworkSettings}; use v1::helpers::dispatch::LightDispatcher; @@ -40,7 +41,7 @@ use v1::types::{ Peers, Transaction, RpcSettings, Histogram, TransactionStats, LocalTransactionStatus, BlockNumber, LightBlockNumber, ConsensusCapability, VersionInfo, - OperationsInfo, ChainStatus, + OperationsInfo, ChainStatus, Log, Filter, AccountInfo, HwAccountInfo, Header, RichHeader, Receipt, }; use Host; @@ -425,4 +426,13 @@ impl Parity for ParityClient { Err(errors::status_error(has_peers)) } } + + fn logs_light(&self, filter: Filter) -> BoxFuture> { + let filter = match filter.try_into() { + Ok(value) => value, + Err(err) => return Box::new(future::err(err)), + }; + Box::new(self.fetcher().logs_light(filter)) as BoxFuture<_> + } + } diff --git a/rpc/src/v1/impls/parity.rs b/rpc/src/v1/impls/parity.rs index 2671a0eab51..bd5650443be 100644 --- a/rpc/src/v1/impls/parity.rs +++ b/rpc/src/v1/impls/parity.rs @@ -29,6 +29,7 @@ use sync::{SyncProvider, ManageNetwork}; use ethcore::account_provider::AccountProvider; use ethcore::client::{BlockChainClient, StateClient, Call}; use ethcore::ids::BlockId; +use ethcore::filter::Filter as EthcoreFilter; use ethcore::miner::{self, MinerService}; use ethcore::snapshot::{SnapshotService, RestorationStatus}; use ethcore::state::StateInfo; @@ -47,7 +48,7 @@ use v1::types::{ Peers, Transaction, RpcSettings, Histogram, TransactionStats, LocalTransactionStatus, BlockNumber, ConsensusCapability, VersionInfo, - OperationsInfo, ChainStatus, + OperationsInfo, ChainStatus, Log, Filter, AccountInfo, HwAccountInfo, RichHeader, Receipt, block_number_to_id }; @@ -504,4 +505,10 @@ impl Parity for ParityClient where Err(errors::status_error(has_peers)) } } + + fn logs_light(&self, filter: Filter) -> BoxFuture> { + use v1::impls::eth::base_logs; + // only specific impl for lightclient + base_logs(&*self.client, &*self.miner, filter.into()) + } } diff --git a/rpc/src/v1/traits/parity.rs b/rpc/src/v1/traits/parity.rs index a81e5008992..a7f542c9cf8 100644 --- a/rpc/src/v1/traits/parity.rs +++ b/rpc/src/v1/traits/parity.rs @@ -26,7 +26,7 @@ use v1::types::{ Peers, Transaction, RpcSettings, Histogram, TransactionStats, LocalTransactionStatus, BlockNumber, ConsensusCapability, VersionInfo, - OperationsInfo, ChainStatus, + OperationsInfo, ChainStatus, Log, Filter, AccountInfo, HwAccountInfo, RichHeader, Receipt, }; @@ -237,5 +237,11 @@ build_rpc_trait! { /// Otherwise the RPC returns error. #[rpc(name = "parity_nodeStatus")] fn status(&self) -> Result<()>; + + /// Returns logs matching given filter object. + /// Skip filling transaction hash for faster query. + #[rpc(name = "parity_getLogsLight")] + fn logs_light(&self, Filter) -> BoxFuture>; + } } From dac8cadf12a48ca520a30cda4f4cd51a64c83286 Mon Sep 17 00:00:00 2001 From: Andronik Ordian Date: Wed, 5 Dec 2018 10:45:14 +0100 Subject: [PATCH 4/9] Update rpc/src/v1/helpers/light_fetch.rs Fix indent. Co-Authored-By: cheme --- rpc/src/v1/helpers/light_fetch.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/rpc/src/v1/helpers/light_fetch.rs b/rpc/src/v1/helpers/light_fetch.rs index 37983de96ff..917a55b07f8 100644 --- a/rpc/src/v1/helpers/light_fetch.rs +++ b/rpc/src/v1/helpers/light_fetch.rs @@ -310,7 +310,7 @@ impl LightFetch { })) } - /// Variant of get transaction logs that does not fetch log transactions hash + /// Variant of get transaction logs that does not fetch log transactions hash pub fn logs_light(&self, filter: EthcoreFilter) -> impl Future, Error = Error> + Send { use std::collections::BTreeMap; use jsonrpc_core::futures::stream::{self, Stream}; From 0cbb9a7190885a73d545174a6d2cf1b471f2a2f1 Mon Sep 17 00:00:00 2001 From: cheme Date: Tue, 11 Dec 2018 14:34:46 +0100 Subject: [PATCH 5/9] Factor common code between light_logs and logs. --- rpc/src/v1/helpers/light_fetch.rs | 141 ++++++++++-------------------- rpc/src/v1/impls/light/parity.rs | 1 - rpc/src/v1/impls/parity.rs | 1 - 3 files changed, 44 insertions(+), 99 deletions(-) diff --git a/rpc/src/v1/helpers/light_fetch.rs b/rpc/src/v1/helpers/light_fetch.rs index 917a55b07f8..897a9c3989f 100644 --- a/rpc/src/v1/helpers/light_fetch.rs +++ b/rpc/src/v1/helpers/light_fetch.rs @@ -46,6 +46,7 @@ use ethereum_types::{U256, Address}; use hash::H256; use parking_lot::Mutex; use fastmap::H256FastMap; +use std::collections::BTreeMap; use transaction::{Action, Transaction as EthTransaction, PendingTransaction, SignedTransaction, LocalizedTransaction}; use v1::helpers::{CallRequest as CallRequestHelper, errors, dispatch}; @@ -308,11 +309,11 @@ impl LightFetch { Some(OnDemandResponse::Receipts(b)) => b, _ => panic!(WRONG_RESPONSE_AMOUNT_TYPE_PROOF), })) + + } - /// Variant of get transaction logs that does not fetch log transactions hash - pub fn logs_light(&self, filter: EthcoreFilter) -> impl Future, Error = Error> + Send { - use std::collections::BTreeMap; + fn logs_common(&self, filter: EthcoreFilter) -> impl Future, Error = Error> + Send { use jsonrpc_core::futures::stream::{self, Stream}; const MAX_BLOCK_RANGE: u64 = 1000; @@ -321,7 +322,7 @@ impl LightFetch { self.headers_range_by_block_id(filter.from_block, filter.to_block, MAX_BLOCK_RANGE) .and_then(move |mut headers| { if headers.is_empty() { - return Either::A(future::ok(Vec::new())); + return Either::A(future::ok(BTreeMap::new())); } let on_demand = &fetcher.on_demand; @@ -343,7 +344,7 @@ impl LightFetch { // insert them into a BTreeMap to maintain order by number and block index. stream::futures_unordered(receipts_futures) .fold(BTreeMap::new(), move |mut matches, (num, hash, receipts)| { - let mut block_index = 0; + let mut block_index: usize = 0; for (transaction_index, receipt) in receipts.into_iter().enumerate() { for (transaction_log_index, log) in receipt.logs.into_iter().enumerate() { if filter.matches(&log) { @@ -366,8 +367,7 @@ impl LightFetch { } } future::ok::<_,OnDemandError>(matches) - }) // and then collect them into a vector. - .map(|matches| matches.into_iter().map(|(_, v)| v).collect()) + }) .map_err(errors::on_demand_error) }); @@ -376,105 +376,52 @@ impl LightFetch { None => Either::B(Either::B(future::err(errors::network_disabled()))), } }) + + } + + + /// Variant of get transaction logs that does not fetch log transactions hash + pub fn logs_light(&self, filter: EthcoreFilter) -> impl Future, Error = Error> + Send { + self.logs_common(filter) + .map(|matches| matches.into_iter().map(|(_, v)| v).collect()) } /// Get transaction logs pub fn logs(&self, filter: EthcoreFilter) -> impl Future, Error = Error> + Send { - use std::collections::BTreeMap; use jsonrpc_core::futures::stream::{self, Stream}; - - const MAX_BLOCK_RANGE: u64 = 1000; - - let fetcher = self.clone(); let fetcher_block = self.clone(); - self.headers_range_by_block_id(filter.from_block, filter.to_block, MAX_BLOCK_RANGE) - .and_then(move |mut headers| { - if headers.is_empty() { - return Either::A(future::ok(Vec::new())); + self.logs_common(filter) + // retrieve transaction hash. + .and_then(move |matches| { + let mut blocks = BTreeMap::new(); + let mut result: Vec = matches.into_iter().map(|(_, v)| { + { + let block_hash = v.block_hash.as_ref().expect("Previously initialized with value; qed"); + blocks.entry(block_hash.clone()).or_insert_with(|| { + fetcher_block.block(BlockId::Hash(block_hash.clone().into())) + }); } - - let on_demand = &fetcher.on_demand; - - let maybe_future = fetcher.sync.with_context(move |ctx| { - // find all headers which match the filter, and fetch the receipts for each one. - // match them with their numbers for easy sorting later. - let bit_combos = filter.bloom_possibilities(); - let receipts_futures: Vec<_> = headers.drain(..) - .filter(|ref hdr| { - let hdr_bloom = hdr.log_bloom(); - bit_combos.iter().any(|bloom| hdr_bloom.contains_bloom(bloom)) - }) - .map(|hdr| (hdr.number(), hdr.hash(), request::BlockReceipts(hdr.into()))) - .map(|(num, hash, req)| on_demand.request(ctx, req).expect(NO_INVALID_BACK_REFS_PROOF).map(move |x| (num, hash, x))) - .collect(); - - // as the receipts come in, find logs within them which match the filter. - // insert them into a BTreeMap to maintain order by number and block index. - stream::futures_unordered(receipts_futures) - .fold(BTreeMap::new(), move |mut matches, (num, hash, receipts)| { - let mut block_index = 0; - for (transaction_index, receipt) in receipts.into_iter().enumerate() { - for (transaction_log_index, log) in receipt.logs.into_iter().enumerate() { - if filter.matches(&log) { - matches.insert((num, block_index), Log { - address: log.address.into(), - topics: log.topics.into_iter().map(Into::into).collect(), - data: log.data.into(), - block_hash: Some(hash.into()), - block_number: Some(num.into()), - // No way to easily retrieve transaction hash. - transaction_hash: None, - transaction_index: Some(transaction_index.into()), - log_index: Some(block_index.into()), - transaction_log_index: Some(transaction_log_index.into()), - log_type: "mined".into(), - removed: false, - }); - } - block_index += 1; - } - } - future::ok::<_,OnDemandError>(matches) - }) // and then collect them into a vector. - .map_err(errors::on_demand_error) - // retrieve transaction hash. - .and_then(move |matches| { - let mut blocks = BTreeMap::new(); - let mut result: Vec = matches.into_iter().map(|(_, v)| { - { - let block_hash = v.block_hash.as_ref().expect("Previously initialized with value; qed"); - blocks.entry(block_hash.clone()).or_insert_with(|| { - fetcher_block.block(BlockId::Hash(block_hash.clone().into())) - }); - } - v - }).collect(); - // future get blocks (unordered it) - stream::futures_unordered(blocks.into_iter().map(|(_,v)|v)).collect().map(move |blocks| { - let transactions_per_block: BTreeMap<_, _> = blocks.iter() - .map(|block| (block.hash(), block.transactions())).collect(); - for log in result.iter_mut() { - let log_index: U256 = log.transaction_index.expect("Previously initialized with value; qed").into(); - if log_index < usize::max_value().into() { - let block_hash = log.block_hash.clone().expect("Previously initialized with value; qed").into(); - let tx_hash = transactions_per_block.get(&block_hash) - .and_then(|txs| txs.get(log_index.as_usize())) - .map(|tr| tr.hash().into()); - log.transaction_hash = tx_hash; - } else { - trace!(target: "light_fetch", "A received Receipts indexed other usize length ignored"); - } - } - result - }) - }) - }); - - match maybe_future { - Some(fut) => Either::B(Either::A(fut)), - None => Either::B(Either::B(future::err(errors::network_disabled()))), + v + }).collect(); + // future get blocks (unordered it) + stream::futures_unordered(blocks.into_iter().map(|(_,v)|v)).collect().map(move |blocks| { + let transactions_per_block: BTreeMap<_, _> = blocks.iter() + .map(|block| (block.hash(), block.transactions())).collect(); + for log in result.iter_mut() { + let log_index: U256 = log.transaction_index.expect("Previously initialized with value; qed").into(); + if log_index < usize::max_value().into() { + let block_hash = log.block_hash.clone().expect("Previously initialized with value; qed").into(); + let tx_hash = transactions_per_block.get(&block_hash) + .and_then(|txs| txs.get(log_index.as_usize())) + .map(|tr| tr.hash().into()); + log.transaction_hash = tx_hash; + } else { + trace!(target: "light_fetch", "A received Receipts indexed other usize length ignored"); + } } + result }) + }) } // Get a transaction by hash. also returns the index in the block. diff --git a/rpc/src/v1/impls/light/parity.rs b/rpc/src/v1/impls/light/parity.rs index 1e9aa163fee..62eec97b39e 100644 --- a/rpc/src/v1/impls/light/parity.rs +++ b/rpc/src/v1/impls/light/parity.rs @@ -26,7 +26,6 @@ use ethstore::random_phrase; use sync::LightSyncProvider; use ethcore::account_provider::AccountProvider; use ethcore_logger::RotatingLogger; -use ethcore::filter::Filter as EthcoreFilter; use jsonrpc_core::{Result, BoxFuture}; use jsonrpc_core::futures::{future, Future}; diff --git a/rpc/src/v1/impls/parity.rs b/rpc/src/v1/impls/parity.rs index 5bb8f3be150..0c1a5c5fe22 100644 --- a/rpc/src/v1/impls/parity.rs +++ b/rpc/src/v1/impls/parity.rs @@ -29,7 +29,6 @@ use sync::{SyncProvider, ManageNetwork}; use ethcore::account_provider::AccountProvider; use ethcore::client::{BlockChainClient, StateClient, Call}; use ethcore::ids::BlockId; -use ethcore::filter::Filter as EthcoreFilter; use ethcore::miner::{self, MinerService}; use ethcore::snapshot::{SnapshotService, RestorationStatus}; use ethcore::state::StateInfo; From 3af638538133613b7f365055fd93a1293249fd24 Mon Sep 17 00:00:00 2001 From: cheme Date: Fri, 14 Dec 2018 15:02:02 +0100 Subject: [PATCH 6/9] Remove useless check --- rpc/src/v1/helpers/light_fetch.rs | 15 ++++++--------- 1 file changed, 6 insertions(+), 9 deletions(-) diff --git a/rpc/src/v1/helpers/light_fetch.rs b/rpc/src/v1/helpers/light_fetch.rs index 897a9c3989f..bbe0f3b13bc 100644 --- a/rpc/src/v1/helpers/light_fetch.rs +++ b/rpc/src/v1/helpers/light_fetch.rs @@ -409,15 +409,12 @@ impl LightFetch { .map(|block| (block.hash(), block.transactions())).collect(); for log in result.iter_mut() { let log_index: U256 = log.transaction_index.expect("Previously initialized with value; qed").into(); - if log_index < usize::max_value().into() { - let block_hash = log.block_hash.clone().expect("Previously initialized with value; qed").into(); - let tx_hash = transactions_per_block.get(&block_hash) - .and_then(|txs| txs.get(log_index.as_usize())) - .map(|tr| tr.hash().into()); - log.transaction_hash = tx_hash; - } else { - trace!(target: "light_fetch", "A received Receipts indexed other usize length ignored"); - } + let block_hash = log.block_hash.clone().expect("Previously initialized with value; qed").into(); + let tx_hash = transactions_per_block.get(&block_hash) + // transaction index is from an enumerate call in log common so not need to check value + .and_then(|txs| txs.get(log_index.as_usize())) + .map(|tr| tr.hash().into()); + log.transaction_hash = tx_hash; } result }) From 92c6d05c4edf98f1e477bcc7c20121ee51047b93 Mon Sep 17 00:00:00 2001 From: cheme Date: Mon, 17 Dec 2018 10:55:51 +0100 Subject: [PATCH 7/9] Rename parity light logs to 'parity_getLogsNoTransactionHash'. Fix indent and extra white lines. --- rpc/src/v1/helpers/light_fetch.rs | 59 +++++++++++++++---------------- rpc/src/v1/impls/light/parity.rs | 4 +-- rpc/src/v1/impls/parity.rs | 2 +- rpc/src/v1/traits/parity.rs | 6 ++-- 4 files changed, 34 insertions(+), 37 deletions(-) diff --git a/rpc/src/v1/helpers/light_fetch.rs b/rpc/src/v1/helpers/light_fetch.rs index bbe0f3b13bc..80ab78423e0 100644 --- a/rpc/src/v1/helpers/light_fetch.rs +++ b/rpc/src/v1/helpers/light_fetch.rs @@ -309,11 +309,9 @@ impl LightFetch { Some(OnDemandResponse::Receipts(b)) => b, _ => panic!(WRONG_RESPONSE_AMOUNT_TYPE_PROOF), })) - - } - fn logs_common(&self, filter: EthcoreFilter) -> impl Future, Error = Error> + Send { + fn logs_common(&self, filter: EthcoreFilter) -> impl Future, Error = Error> + Send { use jsonrpc_core::futures::stream::{self, Stream}; const MAX_BLOCK_RANGE: u64 = 1000; @@ -376,12 +374,11 @@ impl LightFetch { None => Either::B(Either::B(future::err(errors::network_disabled()))), } }) - } /// Variant of get transaction logs that does not fetch log transactions hash - pub fn logs_light(&self, filter: EthcoreFilter) -> impl Future, Error = Error> + Send { + pub fn logs_no_tx_hash(&self, filter: EthcoreFilter) -> impl Future, Error = Error> + Send { self.logs_common(filter) .map(|matches| matches.into_iter().map(|(_, v)| v).collect()) } @@ -391,34 +388,34 @@ impl LightFetch { use jsonrpc_core::futures::stream::{self, Stream}; let fetcher_block = self.clone(); self.logs_common(filter) - // retrieve transaction hash. + // retrieve transaction hash. .and_then(move |matches| { - let mut blocks = BTreeMap::new(); - let mut result: Vec = matches.into_iter().map(|(_, v)| { - { - let block_hash = v.block_hash.as_ref().expect("Previously initialized with value; qed"); - blocks.entry(block_hash.clone()).or_insert_with(|| { - fetcher_block.block(BlockId::Hash(block_hash.clone().into())) - }); - } - v - }).collect(); - // future get blocks (unordered it) - stream::futures_unordered(blocks.into_iter().map(|(_,v)|v)).collect().map(move |blocks| { - let transactions_per_block: BTreeMap<_, _> = blocks.iter() - .map(|block| (block.hash(), block.transactions())).collect(); - for log in result.iter_mut() { - let log_index: U256 = log.transaction_index.expect("Previously initialized with value; qed").into(); - let block_hash = log.block_hash.clone().expect("Previously initialized with value; qed").into(); - let tx_hash = transactions_per_block.get(&block_hash) - // transaction index is from an enumerate call in log common so not need to check value - .and_then(|txs| txs.get(log_index.as_usize())) - .map(|tr| tr.hash().into()); - log.transaction_hash = tx_hash; - } - result + let mut blocks = BTreeMap::new(); + let mut result: Vec = matches.into_iter().map(|(_, v)| { + { + let block_hash = v.block_hash.as_ref().expect("Previously initialized with value; qed"); + blocks.entry(block_hash.clone()).or_insert_with(|| { + fetcher_block.block(BlockId::Hash(block_hash.clone().into())) + }); + } + v + }).collect(); + // future get blocks (unordered it) + stream::futures_unordered(blocks.into_iter().map(|(_, v)| v)).collect().map(move |blocks| { + let transactions_per_block: BTreeMap<_, _> = blocks.iter() + .map(|block| (block.hash(), block.transactions())).collect(); + for log in result.iter_mut() { + let log_index: U256 = log.transaction_index.expect("Previously initialized with value; qed").into(); + let block_hash = log.block_hash.clone().expect("Previously initialized with value; qed").into(); + let tx_hash = transactions_per_block.get(&block_hash) + // transaction index is from an enumerate call in log common so not need to check value + .and_then(|txs| txs.get(log_index.as_usize())) + .map(|tr| tr.hash().into()); + log.transaction_hash = tx_hash; + } + result + }) }) - }) } // Get a transaction by hash. also returns the index in the block. diff --git a/rpc/src/v1/impls/light/parity.rs b/rpc/src/v1/impls/light/parity.rs index 62eec97b39e..397167ad3eb 100644 --- a/rpc/src/v1/impls/light/parity.rs +++ b/rpc/src/v1/impls/light/parity.rs @@ -427,12 +427,12 @@ impl Parity for ParityClient { } } - fn logs_light(&self, filter: Filter) -> BoxFuture> { + fn logs_no_tx_hash(&self, filter: Filter) -> BoxFuture> { let filter = match filter.try_into() { Ok(value) => value, Err(err) => return Box::new(future::err(err)), }; - Box::new(self.fetcher().logs_light(filter)) as BoxFuture<_> + Box::new(self.fetcher().logs_no_tx_hash(filter)) as BoxFuture<_> } fn verify_signature(&self, is_prefixed: bool, message: Bytes, r: H256, s: H256, v: U64) -> Result { diff --git a/rpc/src/v1/impls/parity.rs b/rpc/src/v1/impls/parity.rs index 0c1a5c5fe22..c2d57a345f7 100644 --- a/rpc/src/v1/impls/parity.rs +++ b/rpc/src/v1/impls/parity.rs @@ -505,7 +505,7 @@ impl Parity for ParityClient where } } - fn logs_light(&self, filter: Filter) -> BoxFuture> { + fn logs_no_tx_hash(&self, filter: Filter) -> BoxFuture> { use v1::impls::eth::base_logs; // only specific impl for lightclient base_logs(&*self.client, &*self.miner, filter.into()) diff --git a/rpc/src/v1/traits/parity.rs b/rpc/src/v1/traits/parity.rs index 5afdc2b9eb9..c4a697783e7 100644 --- a/rpc/src/v1/traits/parity.rs +++ b/rpc/src/v1/traits/parity.rs @@ -243,9 +243,9 @@ build_rpc_trait! { fn verify_signature(&self, bool, Bytes, H256, H256, U64) -> Result; /// Returns logs matching given filter object. - /// Skip filling transaction hash for faster query. - #[rpc(name = "parity_getLogsLight")] - fn logs_light(&self, Filter) -> BoxFuture>; + /// Is allowed to skip filling transaction hash for faster query. + #[rpc(name = "parity_getLogsNoTransactionHash")] + fn logs_no_tx_hash(&self, Filter) -> BoxFuture>; } } From c386faf6211a629007e9feffeb60174b9593a876 Mon Sep 17 00:00:00 2001 From: cheme Date: Mon, 17 Dec 2018 13:26:10 +0100 Subject: [PATCH 8/9] Use vec instead of tree map to avoid inner function. --- rpc/src/v1/helpers/light_fetch.rs | 15 +++++---------- 1 file changed, 5 insertions(+), 10 deletions(-) diff --git a/rpc/src/v1/helpers/light_fetch.rs b/rpc/src/v1/helpers/light_fetch.rs index 80ab78423e0..31aede0e5c9 100644 --- a/rpc/src/v1/helpers/light_fetch.rs +++ b/rpc/src/v1/helpers/light_fetch.rs @@ -311,7 +311,7 @@ impl LightFetch { })) } - fn logs_common(&self, filter: EthcoreFilter) -> impl Future, Error = Error> + Send { + pub fn logs_no_tx_hash(&self, filter: EthcoreFilter) -> impl Future, Error = Error> + Send { use jsonrpc_core::futures::stream::{self, Stream}; const MAX_BLOCK_RANGE: u64 = 1000; @@ -320,7 +320,7 @@ impl LightFetch { self.headers_range_by_block_id(filter.from_block, filter.to_block, MAX_BLOCK_RANGE) .and_then(move |mut headers| { if headers.is_empty() { - return Either::A(future::ok(BTreeMap::new())); + return Either::A(future::ok(Vec::new())); } let on_demand = &fetcher.on_demand; @@ -367,6 +367,7 @@ impl LightFetch { future::ok::<_,OnDemandError>(matches) }) .map_err(errors::on_demand_error) + .map(|matches| matches.into_iter().map(|(_, v)| v).collect()) }); match maybe_future { @@ -377,21 +378,15 @@ impl LightFetch { } - /// Variant of get transaction logs that does not fetch log transactions hash - pub fn logs_no_tx_hash(&self, filter: EthcoreFilter) -> impl Future, Error = Error> + Send { - self.logs_common(filter) - .map(|matches| matches.into_iter().map(|(_, v)| v).collect()) - } - /// Get transaction logs pub fn logs(&self, filter: EthcoreFilter) -> impl Future, Error = Error> + Send { use jsonrpc_core::futures::stream::{self, Stream}; let fetcher_block = self.clone(); - self.logs_common(filter) + self.logs_no_tx_hash(filter) // retrieve transaction hash. .and_then(move |matches| { let mut blocks = BTreeMap::new(); - let mut result: Vec = matches.into_iter().map(|(_, v)| { + let mut result: Vec = matches.into_iter().map(|v| { { let block_hash = v.block_hash.as_ref().expect("Previously initialized with value; qed"); blocks.entry(block_hash.clone()).or_insert_with(|| { From ad268019c5cedc2fb3332ca5a49e47a4165b2d36 Mon Sep 17 00:00:00 2001 From: cheme Date: Mon, 17 Dec 2018 19:30:29 +0100 Subject: [PATCH 9/9] better loop --- rpc/src/v1/helpers/light_fetch.rs | 11 ++++------- 1 file changed, 4 insertions(+), 7 deletions(-) diff --git a/rpc/src/v1/helpers/light_fetch.rs b/rpc/src/v1/helpers/light_fetch.rs index 31aede0e5c9..0473ff4c858 100644 --- a/rpc/src/v1/helpers/light_fetch.rs +++ b/rpc/src/v1/helpers/light_fetch.rs @@ -384,17 +384,14 @@ impl LightFetch { let fetcher_block = self.clone(); self.logs_no_tx_hash(filter) // retrieve transaction hash. - .and_then(move |matches| { + .and_then(move |mut result| { let mut blocks = BTreeMap::new(); - let mut result: Vec = matches.into_iter().map(|v| { - { - let block_hash = v.block_hash.as_ref().expect("Previously initialized with value; qed"); + for log in result.iter() { + let block_hash = log.block_hash.as_ref().expect("Previously initialized with value; qed"); blocks.entry(block_hash.clone()).or_insert_with(|| { fetcher_block.block(BlockId::Hash(block_hash.clone().into())) }); - } - v - }).collect(); + } // future get blocks (unordered it) stream::futures_unordered(blocks.into_iter().map(|(_, v)| v)).collect().map(move |blocks| { let transactions_per_block: BTreeMap<_, _> = blocks.iter()