Skip to content

Commit

Permalink
Plumb RPC listener up to caller (#5038)
Browse files Browse the repository at this point in the history
# Description

This PR allows the RPC server's socket address to be returned when
initializing the server. This allows the library consumer to easily
programmatically determine which port the RPC server is listening on. My
use case for this is automated testing. I'd like to be able to simply
specify that the server bind to port '0' and then test against whatever
port the OS assigns dynamically. I will have many RPC servers running in
parallel across many tests within a single process, and I don't want to
have to deal with port conflicts.

## Integration

Integration is straightforward. My main concern is that I am making
non-backwards-compatible changes to public library functions. Let me
know if I should leave backwards-compatible wrappers in place for
any/all of the public functions that were modified.

## Review Notes
The rationale for making the new listen_addresses field on the
RpcHandlers struct a ```[MultiAddr]``` rather than ```SocketAddr``` is
because I wanted it to be transport-agnostic as well as capable of
supporting multiple listening addresses in case that is ever required by
the RPC server in the future.

# Checklist

* [x] My PR includes a detailed description as outlined in the
"Description" and its two subsections above.
* [ ] My PR follows the [labeling requirements](CONTRIBUTING.md#Process)
of this project (at minimum one label for `T`
  required)
* External contributors: ask maintainers to put the right label on your
PR.
* [x] I have made corresponding changes to the documentation (if
applicable)
* [ ] I have added tests that prove my fix is effective or that my
feature works (if applicable)

1. I didn't understand what the 'T' label meant. Am I supposed to open a
github Issue for my PR?
2. I didn't see an easy way to add tests since the functions I am
modifying are not directly called by any tests.

---------

Co-authored-by: Niklas Adolfsson <[email protected]>
  • Loading branch information
nickvikeras and niklasad1 authored Sep 9, 2024
1 parent a3eda0a commit c72f9ab
Show file tree
Hide file tree
Showing 4 changed files with 89 additions and 29 deletions.
15 changes: 15 additions & 0 deletions prdoc/pr_5038.prdoc
Original file line number Diff line number Diff line change
@@ -0,0 +1,15 @@
# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0
# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json

title: Plumb RPC listener up to caller

doc:
- audience: Node Dev
description:
This PR allows the RPC server's socket address to be returned when initializing the server.
This allows the library consumer to easily programmatically determine which port the RPC server is listening on.
crates:
- name: sc-rpc-server
bump: major
- name: sc-service
bump: major
42 changes: 37 additions & 5 deletions substrate/client/rpc-servers/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -23,11 +23,13 @@
pub mod middleware;
pub mod utils;

use std::{error::Error as StdError, time::Duration};
use std::{error::Error as StdError, net::SocketAddr, time::Duration};

use jsonrpsee::{
core::BoxError,
server::{serve_with_graceful_shutdown, stop_channel, ws, PingConfig, StopHandle},
server::{
serve_with_graceful_shutdown, stop_channel, ws, PingConfig, ServerHandle, StopHandle,
},
Methods, RpcModule,
};
use middleware::NodeHealthProxyLayer;
Expand All @@ -46,8 +48,38 @@ pub use utils::{RpcEndpoint, RpcMethods};

const MEGABYTE: u32 = 1024 * 1024;

/// Type alias for the JSON-RPC server.
pub type Server = jsonrpsee::server::ServerHandle;
/// Type to encapsulate the server handle and listening address.
pub struct Server {
/// Handle to the rpc server
handle: ServerHandle,
/// Listening address of the server
listen_addrs: Vec<SocketAddr>,
}

impl Server {
/// Creates a new Server.
pub fn new(handle: ServerHandle, listen_addrs: Vec<SocketAddr>) -> Server {
Server { handle, listen_addrs }
}

/// Returns the `jsonrpsee::server::ServerHandle` for this Server. Can be used to stop the
/// server.
pub fn handle(&self) -> &ServerHandle {
&self.handle
}

/// The listen address for the running RPC service.
pub fn listen_addrs(&self) -> &[SocketAddr] {
&self.listen_addrs
}
}

impl Drop for Server {
fn drop(&mut self) {
// This doesn't not wait for the server to be stopped but fires the signal.
let _ = self.handle.stop();
}
}

/// Trait for providing subscription IDs that can be cloned.
pub trait SubscriptionIdProvider:
Expand Down Expand Up @@ -273,5 +305,5 @@ where
// This is to make it work with old scripts/utils that parse the logs.
log::info!("Running JSON-RPC server: addr={}", format_listen_addrs(&local_addrs));

Ok(server_handle)
Ok(Server::new(server_handle, local_addrs))
}
16 changes: 14 additions & 2 deletions substrate/client/service/src/builder.rs
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@
use crate::{
build_network_future, build_system_rpc_future,
client::{Client, ClientConfig},
config::{Configuration, ExecutorConfiguration, KeystoreConfig, PrometheusConfig},
config::{Configuration, ExecutorConfiguration, KeystoreConfig, Multiaddr, PrometheusConfig},
error::Error,
metrics::MetricsService,
start_rpc_servers, BuildGenesisBlock, GenesisBlockBuilder, RpcHandlers, SpawnTaskHandle,
Expand All @@ -43,6 +43,7 @@ use sc_executor::{
use sc_keystore::LocalKeystore;
use sc_network::{
config::{FullNetworkConfiguration, SyncMode},
multiaddr::Protocol,
service::{
traits::{PeerStore, RequestResponseConfig},
NotificationMetrics,
Expand Down Expand Up @@ -527,13 +528,24 @@ where
gen_rpc_module,
rpc_id_provider,
)?;

let listen_addrs = rpc_server_handle
.listen_addrs()
.into_iter()
.map(|socket_addr| {
let mut multiaddr: Multiaddr = socket_addr.ip().into();
multiaddr.push(Protocol::Tcp(socket_addr.port()));
multiaddr
})
.collect();

let in_memory_rpc = {
let mut module = gen_rpc_module()?;
module.extensions_mut().insert(DenyUnsafe::No);
module
};

let in_memory_rpc_handle = RpcHandlers::new(Arc::new(in_memory_rpc));
let in_memory_rpc_handle = RpcHandlers::new(Arc::new(in_memory_rpc), listen_addrs);

// Spawn informant task
spawn_handle.spawn(
Expand Down
45 changes: 23 additions & 22 deletions substrate/client/service/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,7 @@ mod client;
mod metrics;
mod task_manager;

use crate::config::Multiaddr;
use std::{
collections::HashMap,
net::{Ipv4Addr, Ipv6Addr, SocketAddr, SocketAddrV4, SocketAddrV6},
Expand All @@ -50,6 +51,7 @@ use sc_network::{
};
use sc_network_sync::SyncingService;
use sc_network_types::PeerId;
use sc_rpc_server::Server;
use sc_utils::mpsc::TracingUnboundedReceiver;
use sp_blockchain::HeaderMetadata;
use sp_consensus::SyncOracle;
Expand Down Expand Up @@ -101,14 +103,22 @@ use tokio::runtime::Handle;

const DEFAULT_PROTOCOL_ID: &str = "sup";

/// RPC handlers that can perform RPC queries.
/// A running RPC service that can perform in-memory RPC queries.
#[derive(Clone)]
pub struct RpcHandlers(Arc<RpcModule<()>>);
pub struct RpcHandlers {
// This is legacy and may be removed at some point, it was for WASM stuff before smoldot was a
// thing. https://github.com/paritytech/polkadot-sdk/pull/5038#discussion_r1694971805
rpc_module: Arc<RpcModule<()>>,

// This can be used to introspect the port the RPC server is listening on. SDK consumers are
// depending on this and it should be supported even if in-memory query support is removed.
listen_addresses: Vec<Multiaddr>,
}

impl RpcHandlers {
/// Create PRC handlers instance.
pub fn new(inner: Arc<RpcModule<()>>) -> Self {
Self(inner)
pub fn new(rpc_module: Arc<RpcModule<()>>, listen_addresses: Vec<Multiaddr>) -> Self {
Self { rpc_module, listen_addresses }
}

/// Starts an RPC query.
Expand All @@ -130,12 +140,17 @@ impl RpcHandlers {
// This limit is used to prevent panics and is large enough.
const TOKIO_MPSC_MAX_SIZE: usize = tokio::sync::Semaphore::MAX_PERMITS;

self.0.raw_json_request(json_query, TOKIO_MPSC_MAX_SIZE).await
self.rpc_module.raw_json_request(json_query, TOKIO_MPSC_MAX_SIZE).await
}

/// Provides access to the underlying `RpcModule`
pub fn handle(&self) -> Arc<RpcModule<()>> {
self.0.clone()
self.rpc_module.clone()
}

/// Provides access to listen addresses
pub fn listen_addresses(&self) -> &[Multiaddr] {
&self.listen_addresses[..]
}
}

Expand Down Expand Up @@ -363,28 +378,14 @@ pub async fn build_system_rpc_future<
debug!("`NetworkWorker` has terminated, shutting down the system RPC future.");
}

// Wrapper for HTTP and WS servers that makes sure they are properly shut down.
mod waiting {
pub struct Server(pub Option<sc_rpc_server::Server>);

impl Drop for Server {
fn drop(&mut self) {
if let Some(server) = self.0.take() {
// This doesn't not wait for the server to be stopped but fires the signal.
let _ = server.stop();
}
}
}
}

/// Starts RPC servers.
pub fn start_rpc_servers<R>(
rpc_configuration: &RpcConfiguration,
registry: Option<&Registry>,
tokio_handle: &Handle,
gen_rpc_module: R,
rpc_id_provider: Option<Box<dyn sc_rpc_server::SubscriptionIdProvider>>,
) -> Result<Box<dyn std::any::Any + Send + Sync>, error::Error>
) -> Result<Server, error::Error>
where
R: Fn() -> Result<RpcModule<()>, Error>,
{
Expand Down Expand Up @@ -451,7 +452,7 @@ where
match tokio::task::block_in_place(|| {
tokio_handle.block_on(sc_rpc_server::start_server(server_config))
}) {
Ok(server) => Ok(Box::new(waiting::Server(Some(server)))),
Ok(server) => Ok(server),
Err(e) => Err(Error::Application(e)),
}
}
Expand Down

0 comments on commit c72f9ab

Please sign in to comment.