Skip to content

Commit

Permalink
Delete broken, #[ignore]d tests
Browse files Browse the repository at this point in the history
Fixes #2162.
  • Loading branch information
hrxi authored and jsdanielh committed Jan 15, 2025
1 parent 4b2a0d9 commit d54cef7
Show file tree
Hide file tree
Showing 3 changed files with 0 additions and 242 deletions.
92 changes: 0 additions & 92 deletions consensus/tests/request_component.rs

This file was deleted.

23 changes: 0 additions & 23 deletions validator/tests/integration.rs
Original file line number Diff line number Diff line change
Expand Up @@ -79,26 +79,3 @@ async fn validator_update() {
let producer2 = BlockProducer::new(new_signing_key, new_voting_key);
produce_macro_blocks_with_txns(&producer2, &blockchain, 1, 1, 2);
}

#[test(tokio::test(flavor = "multi_thread"))]
#[ignore]
async fn four_validators_can_create_an_epoch() {
let env =
MdbxDatabase::new_volatile(Default::default()).expect("Could not open a volatile database");

let validators =
build_validators::<Network>(env, &(1u64..=4u64).collect::<Vec<_>>(), &mut None, false)
.await;

let blockchain = Arc::clone(&validators.first().unwrap().blockchain);

for validator in validators {
spawn(validator);
}

let events = blockchain.read().notifier_as_stream();

events.take(130).for_each(|_| future::ready(())).await;

assert!(blockchain.read().block_number() >= 130 + Policy::genesis_block_number());
}
127 changes: 0 additions & 127 deletions validator/tests/mock.rs
Original file line number Diff line number Diff line change
Expand Up @@ -209,130 +209,3 @@ fn create_skip_block_update(
validator_id as usize,
)
}

#[test(tokio::test)]
#[ignore]
async fn validator_can_catch_up() {
// remove first block producer in order to trigger a skip block. Never connect him again
// remove the second block producer to trigger another skip block after the first one (which we want someone to catch up to). Never connect him again
// third block producer needs to be disconnected as well and then reconnected to catch up to the second's skip blocks while not having seen the first one,
// resulting in him producing the first block.
let hub = MockHub::default();
let env =
MdbxDatabase::new_volatile(Default::default()).expect("Could not open a volatile database");

// In total 8 validator are registered. after 3 validators are taken offline the remaining 5 should not be able to progress on their own
let mut validators = build_validators::<Network>(
env,
&(9u64..=16u64).collect::<Vec<_>>(),
&mut Some(hub),
false,
)
.await;
// Maintain a collection of the corresponding networks.

let networks: Vec<Arc<Network>> = validators
.iter()
.map(|v| v.consensus.network.clone())
.collect();

// Disconnect the block producers for the next 3 skip blocks. remember the one which is supposed to actually create the block (3rd skip block)
let (validator, _) = {
let validator = validator_for_slot(&mut validators, 1, 1);
validator
.consensus
.network
.disconnect(CloseReason::GoingOffline)
.await;
let id1 = validator.validator_slot_band();
let validator = validator_for_slot(&mut validators, 2, 2);
validator
.consensus
.network
.disconnect(CloseReason::GoingOffline)
.await;
let id2 = validator.validator_slot_band();
assert_ne!(id2, id1);

// ideally we would remove the validators from the vec for them to not even execute.
// However the implementation does still progress their chains and since they have registered listeners, they would panic.
// that is confusing, thus they are allowed to execute (with no validator network connection)
// validators.retain(|v| {
// v.validator_address() != id1 && v.validator_address() != id2
// });

let validator = validator_for_slot(&validators, 3, 3);
validator
.consensus
.network
.disconnect(CloseReason::GoingOffline)
.await;
assert_ne!(id1, validator.validator_slot_band());
assert_ne!(id2, validator.validator_slot_band());
(validator, validator.consensus.network.clone())
};
// assert_eq!(validators.len(), 7);

let blockchain = validator.blockchain.clone();
// Listen for blockchain events from the block producer (after two skip blocks).
let mut events = blockchain.read().notifier_as_stream();

let slots: Vec<_> = blockchain.read().current_validators().unwrap().validators
[validator.validator_slot_band() as usize]
.slots
.clone()
.collect();

let skip_block_info = SkipBlockInfo {
network_id: blockchain.read().network_id,
block_number: 1,
vrf_entropy: blockchain.read().head().seed().entropy(),
};

// Manually construct a skip block for the validator
let vc = create_skip_block_update(
skip_block_info,
validator.current_voting_key(),
validator.validator_slot_band(),
&slots,
);

// let the validators run.
for validator in validators {
spawn(validator);
}

// while waiting for them to run into the block producer timeout (10s)
sleep(Duration::from_secs(11)).await;
// At which point the prepared skip block message is broadcast
// (only a subset of the validators will accept it as it send as level 1 message)
for network in &networks {
for peer_id in network.get_peers() {
network
.message::<SkipBlockMessage>(SkipBlockMessage(vc.clone().into()), peer_id)
.await
.unwrap();
}
}

// wait enough time to complete the skip block aggregation (it really does not matter how long, as long as the vc completes)
sleep(Duration::from_secs(8)).await;

// reconnect a validator (who has not seen the proof for the skip block)
log::warn!("connecting networks");
Network::connect_networks(&networks, 9u64).await;

// Wait for the new block producer to create a blockchainEvent (which is always an extended event for block 1) and keep the hash
if let Some(BlockchainEvent::Extended(hash)) = events.next().await {
// retrieve the block for height 1
if let Ok(block) = blockchain.read().get_block_at(1, false, None) {
// the hash needs to be the one the extended event returned.
// (the chain itself i.e blockchain.header_hash() might have already progressed further)
assert_eq!(block.hash(), hash);
// now in that case the validator producing this block has progressed the 2nd skip block without having seen the first skip block.
return;
}
}

assert!(false);
}

0 comments on commit d54cef7

Please sign in to comment.