Skip to content

Commit

Permalink
Merge branch 'master' into flush-those-logs
Browse files Browse the repository at this point in the history
  • Loading branch information
steveluscher authored Dec 6, 2024
2 parents 543ce3b + 31606f5 commit 9f8d33f
Show file tree
Hide file tree
Showing 27 changed files with 2,259 additions and 163 deletions.
15 changes: 14 additions & 1 deletion Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

15 changes: 8 additions & 7 deletions banking-bench/src/main.rs
Original file line number Diff line number Diff line change
Expand Up @@ -450,15 +450,16 @@ fn main() {
};
let cluster_info = Arc::new(cluster_info);
let tpu_disable_quic = matches.is_present("tpu_disable_quic");
let connection_cache = match tpu_disable_quic {
false => ConnectionCache::new_quic(
"connection_cache_banking_bench_quic",
DEFAULT_TPU_CONNECTION_POOL_SIZE,
),
true => ConnectionCache::with_udp(
let connection_cache = if tpu_disable_quic {
ConnectionCache::with_udp(
"connection_cache_banking_bench_udp",
DEFAULT_TPU_CONNECTION_POOL_SIZE,
),
)
} else {
ConnectionCache::new_quic(
"connection_cache_banking_bench_quic",
DEFAULT_TPU_CONNECTION_POOL_SIZE,
)
};
let banking_stage = BankingStage::new_num_threads(
block_production_method,
Expand Down
1 change: 1 addition & 0 deletions core/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -77,6 +77,7 @@ solana-runtime = { workspace = true }
solana-runtime-transaction = { workspace = true }
solana-sanitize = { workspace = true }
solana-sdk = { workspace = true }
solana-sdk-ids = { workspace = true }
solana-send-transaction-service = { workspace = true }
solana-short-vec = { workspace = true }
solana-streamer = { workspace = true }
Expand Down
6 changes: 5 additions & 1 deletion core/src/banking_stage/packet_filter.rs
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@ use {
solana_sdk::{
ed25519_program, feature_set::FeatureSet, saturating_add_assign, secp256k1_program,
},
solana_sdk_ids::secp256r1_program,
thiserror::Error,
};

Expand Down Expand Up @@ -48,7 +49,10 @@ impl ImmutableDeserializedPacket {
pub fn check_excessive_precompiles(&self) -> Result<(), PacketFilterFailure> {
let mut num_precompile_signatures: u64 = 0;
for (program_id, ix) in self.transaction().get_message().program_instructions_iter() {
if secp256k1_program::check_id(program_id) || ed25519_program::check_id(program_id) {
if secp256k1_program::check_id(program_id)
|| ed25519_program::check_id(program_id)
|| secp256r1_program::check_id(program_id)
{
let num_signatures = ix.data.first().map_or(0, |byte| u64::from(*byte));
saturating_add_assign!(num_precompile_signatures, num_signatures);
}
Expand Down
3 changes: 2 additions & 1 deletion core/src/next_leader.rs
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@ pub(crate) fn upcoming_leader_tpu_vote_sockets(
cluster_info: &ClusterInfo,
poh_recorder: &RwLock<PohRecorder>,
fanout_slots: u64,
protocol: Protocol,
) -> Vec<SocketAddr> {
let upcoming_leaders = {
let poh_recorder = poh_recorder.read().unwrap();
Expand All @@ -29,7 +30,7 @@ pub(crate) fn upcoming_leader_tpu_vote_sockets(
.dedup()
.filter_map(|leader_pubkey| {
cluster_info
.lookup_contact_info(&leader_pubkey, |node| node.tpu_vote(Protocol::UDP))?
.lookup_contact_info(&leader_pubkey, |node| node.tpu_vote(protocol))?
.ok()
})
// dedup again since leaders could potentially share the same tpu vote socket
Expand Down
57 changes: 57 additions & 0 deletions core/src/replay_stage.rs
Original file line number Diff line number Diff line change
Expand Up @@ -4233,6 +4233,7 @@ pub(crate) mod tests {
},
crossbeam_channel::unbounded,
itertools::Itertools,
solana_client::connection_cache::ConnectionCache,
solana_entry::entry::{self, Entry},
solana_gossip::{cluster_info::Node, crds::Cursor},
solana_ledger::{
Expand Down Expand Up @@ -4263,6 +4264,7 @@ pub(crate) mod tests {
transaction::TransactionError,
},
solana_streamer::socket::SocketAddrSpace,
solana_tpu_client::tpu_client::{DEFAULT_TPU_CONNECTION_POOL_SIZE, DEFAULT_VOTE_USE_QUIC},
solana_transaction_status::VersionedTransactionWithStatusMeta,
solana_vote_program::{
vote_state::{self, TowerSync, VoteStateVersions},
Expand Down Expand Up @@ -7547,11 +7549,25 @@ pub(crate) mod tests {
let vote_info = voting_receiver
.recv_timeout(Duration::from_secs(1))
.unwrap();

let connection_cache = if DEFAULT_VOTE_USE_QUIC {
ConnectionCache::new_quic(
"connection_cache_vote_quic",
DEFAULT_TPU_CONNECTION_POOL_SIZE,
)
} else {
ConnectionCache::with_udp(
"connection_cache_vote_udp",
DEFAULT_TPU_CONNECTION_POOL_SIZE,
)
};

crate::voting_service::VotingService::handle_vote(
&cluster_info,
&poh_recorder,
&tower_storage,
vote_info,
Arc::new(connection_cache),
);

let mut cursor = Cursor::default();
Expand Down Expand Up @@ -7622,12 +7638,27 @@ pub(crate) mod tests {
let vote_info = voting_receiver
.recv_timeout(Duration::from_secs(1))
.unwrap();

let connection_cache = if DEFAULT_VOTE_USE_QUIC {
ConnectionCache::new_quic(
"connection_cache_vote_quic",
DEFAULT_TPU_CONNECTION_POOL_SIZE,
)
} else {
ConnectionCache::with_udp(
"connection_cache_vote_udp",
DEFAULT_TPU_CONNECTION_POOL_SIZE,
)
};

crate::voting_service::VotingService::handle_vote(
&cluster_info,
&poh_recorder,
&tower_storage,
vote_info,
Arc::new(connection_cache),
);

let votes = cluster_info.get_votes(&mut cursor);
assert_eq!(votes.len(), 1);
let vote_tx = &votes[0];
Expand Down Expand Up @@ -7705,11 +7736,24 @@ pub(crate) mod tests {
let vote_info = voting_receiver
.recv_timeout(Duration::from_secs(1))
.unwrap();
let connection_cache = if DEFAULT_VOTE_USE_QUIC {
ConnectionCache::new_quic(
"connection_cache_vote_quic",
DEFAULT_TPU_CONNECTION_POOL_SIZE,
)
} else {
ConnectionCache::with_udp(
"connection_cache_vote_udp",
DEFAULT_TPU_CONNECTION_POOL_SIZE,
)
};

crate::voting_service::VotingService::handle_vote(
&cluster_info,
&poh_recorder,
&tower_storage,
vote_info,
Arc::new(connection_cache),
);

assert!(last_vote_refresh_time.last_refresh_time > clone_refresh_time);
Expand Down Expand Up @@ -7820,11 +7864,24 @@ pub(crate) mod tests {
let vote_info = voting_receiver
.recv_timeout(Duration::from_secs(1))
.unwrap();
let connection_cache = if DEFAULT_VOTE_USE_QUIC {
ConnectionCache::new_quic(
"connection_cache_vote_quic",
DEFAULT_TPU_CONNECTION_POOL_SIZE,
)
} else {
ConnectionCache::with_udp(
"connection_cache_vote_udp",
DEFAULT_TPU_CONNECTION_POOL_SIZE,
)
};

crate::voting_service::VotingService::handle_vote(
cluster_info,
poh_recorder,
tower_storage,
vote_info,
Arc::new(connection_cache),
);

let votes = cluster_info.get_votes(cursor);
Expand Down
16 changes: 16 additions & 0 deletions core/src/tvu.rs
Original file line number Diff line number Diff line change
Expand Up @@ -163,6 +163,7 @@ impl Tvu {
cluster_slots: Arc<ClusterSlots>,
wen_restart_repair_slots: Option<Arc<RwLock<Vec<Slot>>>>,
slot_status_notifier: Option<SlotStatusNotifier>,
vote_connection_cache: Arc<ConnectionCache>,
) -> Result<Self, String> {
let in_wen_restart = wen_restart_repair_slots.is_some();

Expand Down Expand Up @@ -331,6 +332,7 @@ impl Tvu {
cluster_info.clone(),
poh_recorder.clone(),
tower_storage,
vote_connection_cache,
);

let warm_quic_cache_service = connection_cache.and_then(|connection_cache| {
Expand Down Expand Up @@ -436,6 +438,7 @@ pub mod tests {
solana_runtime::bank::Bank,
solana_sdk::signature::{Keypair, Signer},
solana_streamer::socket::SocketAddrSpace,
solana_tpu_client::tpu_client::{DEFAULT_TPU_CONNECTION_POOL_SIZE, DEFAULT_VOTE_USE_QUIC},
std::sync::atomic::{AtomicU64, Ordering},
};

Expand Down Expand Up @@ -494,6 +497,18 @@ pub mod tests {
} else {
None
};
let connection_cache = if DEFAULT_VOTE_USE_QUIC {
ConnectionCache::new_quic(
"connection_cache_vote_quic",
DEFAULT_TPU_CONNECTION_POOL_SIZE,
)
} else {
ConnectionCache::with_udp(
"connection_cache_vote_udp",
DEFAULT_TPU_CONNECTION_POOL_SIZE,
)
};

let tvu = Tvu::new(
&vote_keypair.pubkey(),
Arc::new(RwLock::new(vec![Arc::new(vote_keypair)])),
Expand Down Expand Up @@ -555,6 +570,7 @@ pub mod tests {
cluster_slots,
wen_restart_repair_slots,
None,
Arc::new(connection_cache),
)
.expect("assume success");
if enable_wen_restart {
Expand Down
Loading

0 comments on commit 9f8d33f

Please sign in to comment.