diff --git a/consensus/tests/request_component.rs b/consensus/tests/request_component.rs deleted file mode 100644 index ec18cd58ef..0000000000 --- a/consensus/tests/request_component.rs +++ /dev/null @@ -1,92 +0,0 @@ -use std::{sync::Arc, time::Duration}; - -use futures::StreamExt; -use nimiq_blockchain::BlockProducer; -use nimiq_blockchain_interface::AbstractBlockchain; -use nimiq_bls::KeyPair as BLSKeyPair; -use nimiq_database::mdbx::MdbxDatabase; -use nimiq_genesis_builder::GenesisBuilder; -use nimiq_keys::{Address, KeyPair, SecureGenerate}; -use nimiq_network_mock::{MockHub, MockNetwork}; -use nimiq_primitives::{networks::NetworkId, policy::Policy}; -use nimiq_test_log::test; -use nimiq_test_utils::{ - blockchain::{produce_macro_blocks, signing_key, voting_key}, - node::Node, - validator::seeded_rng, -}; -use nimiq_time::{interval, sleep}; -use nimiq_utils::spawn; - -#[test(tokio::test(flavor = "multi_thread", worker_threads = 4))] -#[ignore] -async fn test_request_component() { - let mut hub = Some(MockHub::default()); - let env = - MdbxDatabase::new_volatile(Default::default()).expect("Could not open a volatile database"); - - // Generate genesis block. - let key = KeyPair::generate(&mut seeded_rng(0)); - let sgn_key = KeyPair::generate(&mut seeded_rng(0)); - let vtn_key = BLSKeyPair::generate(&mut seeded_rng(0)); - - let genesis = GenesisBuilder::default() - .with_network(NetworkId::UnitAlbatross) - .with_genesis_validator( - Address::from(&key), - sgn_key.public, - vtn_key.public_key, - Address::default(), - None, - None, - false, - ) - .generate(env) - .unwrap(); - - let mut node1 = - Node::::history_with_genesis_info(1, genesis.clone(), &mut hub, false).await; - let mut node2 = - Node::::history_with_genesis_info(2, genesis.clone(), &mut hub, false).await; - - let producer1 = BlockProducer::new(signing_key(), voting_key()); - - node1.consume(); - node2.consume(); - - // let node1 produce blocks again - { - let prod_blockchain = Arc::clone(&node1.blockchain); - spawn(async move { - loop { - produce_macro_blocks(&producer1, &prod_blockchain, 1); - sleep(Duration::from_secs(5)).await; - } - }); - } - - let mut connected = false; - let mut interval = interval(Duration::from_secs(1)); - loop { - if node1.blockchain.read().block_number() > 200 + Policy::genesis_block_number() - && !connected - { - log::info!("Connecting node2 to node 1"); - node2.network.dial_mock(&node1.network); - connected = true; - } - - log::info!( - "Node1: at #{} - {}", - node1.blockchain.read().block_number(), - node1.blockchain.read().head_hash() - ); - log::info!( - "Node2: at #{} - {}", - node2.blockchain.read().block_number(), - node2.blockchain.read().head_hash() - ); - - interval.next().await; - } -} diff --git a/validator/tests/integration.rs b/validator/tests/integration.rs index 371127bbdd..84c696ebd4 100644 --- a/validator/tests/integration.rs +++ b/validator/tests/integration.rs @@ -79,26 +79,3 @@ async fn validator_update() { let producer2 = BlockProducer::new(new_signing_key, new_voting_key); produce_macro_blocks_with_txns(&producer2, &blockchain, 1, 1, 2); } - -#[test(tokio::test(flavor = "multi_thread"))] -#[ignore] -async fn four_validators_can_create_an_epoch() { - let env = - MdbxDatabase::new_volatile(Default::default()).expect("Could not open a volatile database"); - - let validators = - build_validators::(env, &(1u64..=4u64).collect::>(), &mut None, false) - .await; - - let blockchain = Arc::clone(&validators.first().unwrap().blockchain); - - for validator in validators { - spawn(validator); - } - - let events = blockchain.read().notifier_as_stream(); - - events.take(130).for_each(|_| future::ready(())).await; - - assert!(blockchain.read().block_number() >= 130 + Policy::genesis_block_number()); -} diff --git a/validator/tests/mock.rs b/validator/tests/mock.rs index a16dfd1141..23a5b8bd81 100644 --- a/validator/tests/mock.rs +++ b/validator/tests/mock.rs @@ -209,130 +209,3 @@ fn create_skip_block_update( validator_id as usize, ) } - -#[test(tokio::test)] -#[ignore] -async fn validator_can_catch_up() { - // remove first block producer in order to trigger a skip block. Never connect him again - // remove the second block producer to trigger another skip block after the first one (which we want someone to catch up to). Never connect him again - // third block producer needs to be disconnected as well and then reconnected to catch up to the second's skip blocks while not having seen the first one, - // resulting in him producing the first block. - let hub = MockHub::default(); - let env = - MdbxDatabase::new_volatile(Default::default()).expect("Could not open a volatile database"); - - // In total 8 validator are registered. after 3 validators are taken offline the remaining 5 should not be able to progress on their own - let mut validators = build_validators::( - env, - &(9u64..=16u64).collect::>(), - &mut Some(hub), - false, - ) - .await; - // Maintain a collection of the corresponding networks. - - let networks: Vec> = validators - .iter() - .map(|v| v.consensus.network.clone()) - .collect(); - - // Disconnect the block producers for the next 3 skip blocks. remember the one which is supposed to actually create the block (3rd skip block) - let (validator, _) = { - let validator = validator_for_slot(&mut validators, 1, 1); - validator - .consensus - .network - .disconnect(CloseReason::GoingOffline) - .await; - let id1 = validator.validator_slot_band(); - let validator = validator_for_slot(&mut validators, 2, 2); - validator - .consensus - .network - .disconnect(CloseReason::GoingOffline) - .await; - let id2 = validator.validator_slot_band(); - assert_ne!(id2, id1); - - // ideally we would remove the validators from the vec for them to not even execute. - // However the implementation does still progress their chains and since they have registered listeners, they would panic. - // that is confusing, thus they are allowed to execute (with no validator network connection) - // validators.retain(|v| { - // v.validator_address() != id1 && v.validator_address() != id2 - // }); - - let validator = validator_for_slot(&validators, 3, 3); - validator - .consensus - .network - .disconnect(CloseReason::GoingOffline) - .await; - assert_ne!(id1, validator.validator_slot_band()); - assert_ne!(id2, validator.validator_slot_band()); - (validator, validator.consensus.network.clone()) - }; - // assert_eq!(validators.len(), 7); - - let blockchain = validator.blockchain.clone(); - // Listen for blockchain events from the block producer (after two skip blocks). - let mut events = blockchain.read().notifier_as_stream(); - - let slots: Vec<_> = blockchain.read().current_validators().unwrap().validators - [validator.validator_slot_band() as usize] - .slots - .clone() - .collect(); - - let skip_block_info = SkipBlockInfo { - network_id: blockchain.read().network_id, - block_number: 1, - vrf_entropy: blockchain.read().head().seed().entropy(), - }; - - // Manually construct a skip block for the validator - let vc = create_skip_block_update( - skip_block_info, - validator.current_voting_key(), - validator.validator_slot_band(), - &slots, - ); - - // let the validators run. - for validator in validators { - spawn(validator); - } - - // while waiting for them to run into the block producer timeout (10s) - sleep(Duration::from_secs(11)).await; - // At which point the prepared skip block message is broadcast - // (only a subset of the validators will accept it as it send as level 1 message) - for network in &networks { - for peer_id in network.get_peers() { - network - .message::(SkipBlockMessage(vc.clone().into()), peer_id) - .await - .unwrap(); - } - } - - // wait enough time to complete the skip block aggregation (it really does not matter how long, as long as the vc completes) - sleep(Duration::from_secs(8)).await; - - // reconnect a validator (who has not seen the proof for the skip block) - log::warn!("connecting networks"); - Network::connect_networks(&networks, 9u64).await; - - // Wait for the new block producer to create a blockchainEvent (which is always an extended event for block 1) and keep the hash - if let Some(BlockchainEvent::Extended(hash)) = events.next().await { - // retrieve the block for height 1 - if let Ok(block) = blockchain.read().get_block_at(1, false, None) { - // the hash needs to be the one the extended event returned. - // (the chain itself i.e blockchain.header_hash() might have already progressed further) - assert_eq!(block.hash(), hash); - // now in that case the validator producing this block has progressed the 2nd skip block without having seen the first skip block. - return; - } - } - - assert!(false); -}