Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Fix "cache state must exist" panic in validator-network #2561

Merged
merged 2 commits into from
Jun 11, 2024
Merged
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
46 changes: 34 additions & 12 deletions validator-network/src/network_impl.rs
Original file line number Diff line number Diff line change
Expand Up @@ -231,9 +231,13 @@ where
RequestError::InboundRequest(InboundRequestError::NoReceiver) => {}
// In all other cases, clear the Peer ID cache for this validator
_ => {
if let Some(validator_key) =
self.validator_keys.read().get(usize::from(validator_id))
{
// Make sure to drop the `self.validator_keys` lock after this line.
let validator_key = self
.validator_keys
.read()
.get(usize::from(validator_id))
.cloned();
if let Some(validator_key) = validator_key {
// Clear the peer ID cache only if the error happened for the same Peer ID that we have cached
let mut validator_peer_id_cache = self.validator_peer_id_cache.write();
if let Some(cache_entry) =
Expand Down Expand Up @@ -293,18 +297,36 @@ where
/// ordered, such that the k-th entry is the validator with ID k.
async fn set_validators(&self, validator_keys: Vec<LazyPublicKey>) {
log::trace!(?validator_keys, "Setting validators for ValidatorNetwork");
// Create new peer ID cache, but keep validators that are still active.
let mut validator_peer_id_cache = self.validator_peer_id_cache.write();
let mut keep_cached = BTreeMap::new();

for validator_key in &validator_keys {
if let Some(peer_id) = validator_peer_id_cache.remove(validator_key.compressed()) {
keep_cached.insert(validator_key.compressed().clone(), peer_id);
}
}
// Put the `validator_keys` into the same order as the
// `self.validator_peer_id_cache` so that we can simultaneously iterate
// over them. Note that `LazyPublicKey::cmp` forwards to
// `CompressedPublicKey::cmp`.
let mut sorted_validator_keys: Vec<_> = validator_keys.iter().collect();
sorted_validator_keys.sort_unstable();
let mut sorted_validator_keys = sorted_validator_keys.into_iter();
let mut cur_key = sorted_validator_keys.next();

// Drop peer ID cache, but keep validators that are still active and
// validators who are currently being resolved.
self.validator_peer_id_cache
.write()
.retain(|key, cache_state| {
// If a lookup is in progress, the lookup thread expects to be
// able to put the result into the cache map.
//
// It'll get cleaned up on the next validator change.
if let CacheState::InProgress(..) = cache_state {
return true;
}
// Move `cur_key` until we're greater or equal to `key`.
while cur_key.map(|k| k.compressed() < key).unwrap_or(false) {
cur_key = sorted_validator_keys.next();
}
Some(key) == cur_key.map(LazyPublicKey::compressed)
});

*self.validator_keys.write() = validator_keys;
*validator_peer_id_cache = keep_cached;
}

async fn send_to<M: Message>(&self, validator_id: u16, msg: M) -> Result<(), Self::Error> {
Expand Down
Loading