diff --git a/accounts-db/src/ancient_append_vecs.rs b/accounts-db/src/ancient_append_vecs.rs index 51ecb0dec7040b..8370fda32a1236 100644 --- a/accounts-db/src/ancient_append_vecs.rs +++ b/accounts-db/src/ancient_append_vecs.rs @@ -1207,7 +1207,7 @@ fn div_ceil(x: u64, y: NonZeroU64) -> u64 { // SAFETY: Since `y` is NonZero: // - we know the denominator is > 0, and thus safe (cannot have divide-by-zero) // - we know `x + y` is non-zero, and thus the numerator is safe (cannot underflow) - (x + y - 1) / y + x.div_ceil(y) } #[cfg(test)] diff --git a/bench-tps/src/main.rs b/bench-tps/src/main.rs index 7222df96fa4ab9..a6e1b5ad2b9352 100644 --- a/bench-tps/src/main.rs +++ b/bench-tps/src/main.rs @@ -210,9 +210,8 @@ fn main() { let max_fee = FeeRateGovernor::new(*target_lamports_per_signature, 0) .max_lamports_per_signature .saturating_add(max_lamports_for_prioritization(compute_unit_price)); - let num_lamports_per_account = (num_accounts - 1 + NUM_SIGNATURES_FOR_TXS * max_fee) - / num_accounts - + num_lamports_per_account; + let num_lamports_per_account = + (NUM_SIGNATURES_FOR_TXS * max_fee).div_ceil(num_accounts) + num_lamports_per_account; let mut accounts = HashMap::new(); keypairs.iter().for_each(|keypair| { accounts.insert( diff --git a/core/src/sigverify_stage.rs b/core/src/sigverify_stage.rs index a59df5ae36fea3..3db756c895ad0b 100644 --- a/core/src/sigverify_stage.rs +++ b/core/src/sigverify_stage.rs @@ -255,7 +255,7 @@ impl SigVerifyStage { while max_packets > 0 && !addrs.is_empty() { let num_addrs = addrs.len(); addrs.retain(|_, packets| { - let cap = (max_packets + num_addrs - 1) / num_addrs; + let cap = max_packets.div_ceil(num_addrs); max_packets -= packets.len().min(cap); packets.truncate(packets.len().saturating_sub(cap)); !packets.is_empty() diff --git a/core/src/validator.rs b/core/src/validator.rs index 10f23b4dde15c3..802dcc5fefdc2b 100644 --- a/core/src/validator.rs +++ b/core/src/validator.rs @@ -1892,7 +1892,7 @@ fn load_genesis( // grows too large let leader_schedule_slot_offset = genesis_config.epoch_schedule.leader_schedule_slot_offset; let slots_per_epoch = genesis_config.epoch_schedule.slots_per_epoch; - let leader_epoch_offset = (leader_schedule_slot_offset + slots_per_epoch - 1) / slots_per_epoch; + let leader_epoch_offset = leader_schedule_slot_offset.div_ceil(slots_per_epoch); assert!(leader_epoch_offset <= MAX_LEADER_SCHEDULE_EPOCH_OFFSET); let genesis_hash = genesis_config.hash(); diff --git a/entry/src/entry.rs b/entry/src/entry.rs index ee1e7eb2c706b4..c40caf584c5050 100644 --- a/entry/src/entry.rs +++ b/entry/src/entry.rs @@ -690,7 +690,7 @@ impl EntrySlice for [Entry] { transactions: vec![], }]; - let aligned_len = ((self.len() + simd_len - 1) / simd_len) * simd_len; + let aligned_len = self.len().div_ceil(simd_len) * simd_len; let mut hashes_bytes = vec![0u8; HASH_BYTES * aligned_len]; genesis .iter() diff --git a/gossip/src/crds_gossip_pull.rs b/gossip/src/crds_gossip_pull.rs index 42e2afda4b4acc..b15d307264cf55 100644 --- a/gossip/src/crds_gossip_pull.rs +++ b/gossip/src/crds_gossip_pull.rs @@ -155,7 +155,7 @@ impl CrdsFilterSet { let mask_bits = CrdsFilter::mask_bits(num_items as f64, max_items); let mut filters: Vec<_> = repeat_with(|| None).take(1usize << mask_bits).collect(); let mut indices: Vec<_> = (0..filters.len()).collect(); - let size = (filters.len() + SAMPLE_RATE - 1) / SAMPLE_RATE; + let size = filters.len().div_ceil(SAMPLE_RATE); for _ in 0..MAX_NUM_FILTERS.min(size) { let k = rng.gen_range(0..indices.len()); let k = indices.swap_remove(k); diff --git a/gossip/src/protocol.rs b/gossip/src/protocol.rs index 51f0ef09eb59f5..2fbf601fee05e0 100644 --- a/gossip/src/protocol.rs +++ b/gossip/src/protocol.rs @@ -553,7 +553,7 @@ pub(crate) mod tests { let num_values_per_payload = (PUSH_MESSAGE_MAX_PAYLOAD_SIZE / value_size).max(1); // Expected len is the ceiling of the division - let expected_len = (NUM_VALUES + num_values_per_payload - 1) / num_values_per_payload; + let expected_len = NUM_VALUES.div_ceil(num_values_per_payload); let msgs = vec![value; NUM_VALUES]; assert!(split_gossip_messages(PUSH_MESSAGE_MAX_PAYLOAD_SIZE, msgs).count() <= expected_len); diff --git a/gossip/src/weighted_shuffle.rs b/gossip/src/weighted_shuffle.rs index 4064dc63481bf3..9c66bfb413410a 100644 --- a/gossip/src/weighted_shuffle.rs +++ b/gossip/src/weighted_shuffle.rs @@ -220,7 +220,7 @@ fn get_num_nodes_and_tree_size(count: usize) -> (/*num_nodes:*/ usize, /*tree_si size += nodes; nodes *= FANOUT; } - (size + nodes, size + (count + FANOUT - 1) / FANOUT) + (size + nodes, size + count.div_ceil(FANOUT)) } #[cfg(test)] diff --git a/ledger/src/shred/merkle.rs b/ledger/src/shred/merkle.rs index 9da9f7a0a2032f..4293861f839dd1 100644 --- a/ledger/src/shred/merkle.rs +++ b/ledger/src/shred/merkle.rs @@ -1103,7 +1103,7 @@ pub(super) fn make_shreds_from_data( let (proof_size, data_buffer_size, num_data_shreds) = (1u8..32) .find_map(|proof_size| { let data_buffer_size = ShredData::capacity(proof_size, chained, resigned).ok()?; - let num_data_shreds = (data.len() + data_buffer_size - 1) / data_buffer_size; + let num_data_shreds = data.len().div_ceil(data_buffer_size); let num_data_shreds = num_data_shreds.max(min_num_data_shreds); let erasure_batch_size = shredder::get_erasure_batch_size(num_data_shreds, is_last_in_slot); diff --git a/ledger/src/shredder.rs b/ledger/src/shredder.rs index e1d3924e49968c..85a2c7e5a43836 100644 --- a/ledger/src/shredder.rs +++ b/ledger/src/shredder.rs @@ -136,7 +136,7 @@ impl Shredder { process_stats.data_buffer_residual += (data_buffer_size - serialized_shreds.len() % data_buffer_size) % data_buffer_size; // Integer division to ensure we have enough shreds to fit all the data - let num_shreds = (serialized_shreds.len() + data_buffer_size - 1) / data_buffer_size; + let num_shreds = serialized_shreds.len().div_ceil(data_buffer_size); let last_shred_index = next_shred_index + num_shreds as u32 - 1; // 1) Generate data shreds let make_data_shred = |data, shred_index: u32, fec_set_index: u32| { @@ -471,7 +471,7 @@ fn get_fec_set_offsets( return None; } let num_chunks = (num_shreds / min_chunk_size).max(1); - let chunk_size = (num_shreds + num_chunks - 1) / num_chunks; + let chunk_size = num_shreds.div_ceil(num_chunks); let offsets = std::iter::repeat(offset).take(chunk_size); num_shreds -= chunk_size; offset += chunk_size; @@ -541,7 +541,7 @@ mod tests { let size = serialized_size(&entries).unwrap() as usize; // Integer division to ensure we have enough shreds to fit all the data let data_buffer_size = ShredData::capacity(/*merkle_proof_size:*/ None).unwrap(); - let num_expected_data_shreds = (size + data_buffer_size - 1) / data_buffer_size; + let num_expected_data_shreds = size.div_ceil(data_buffer_size); let num_expected_data_shreds = num_expected_data_shreds.max(if is_last_in_slot { DATA_SHREDS_PER_FEC_BLOCK } else { diff --git a/ledger/src/sigverify_shreds.rs b/ledger/src/sigverify_shreds.rs index 7c21f58835af86..d0b42726a26835 100644 --- a/ledger/src/sigverify_shreds.rs +++ b/ledger/src/sigverify_shreds.rs @@ -203,7 +203,7 @@ fn resize_buffer(buffer: &mut PinnedVec, size: usize) { //HACK: Pubkeys vector is passed along as a `PacketBatch` buffer to the GPU //TODO: GPU needs a more opaque interface, which can handle variable sized structures for data //Pad the Pubkeys buffer such that it is bigger than a buffer of Packet sized elems - let num_packets = (size + std::mem::size_of::() - 1) / std::mem::size_of::(); + let num_packets = size.div_ceil(std::mem::size_of::()); let size = num_packets * std::mem::size_of::(); buffer.resize(size, 0u8); } diff --git a/local-cluster/src/integration_tests.rs b/local-cluster/src/integration_tests.rs index 719005cd27df49..316d8084117628 100644 --- a/local-cluster/src/integration_tests.rs +++ b/local-cluster/src/integration_tests.rs @@ -190,8 +190,7 @@ pub fn copy_blocks(end_slot: Slot, source: &Blockstore, dest: &Blockstore, is_tr /// Computes the numbr of milliseconds `num_blocks` blocks will take given /// each slot contains `ticks_per_slot` pub fn ms_for_n_slots(num_blocks: u64, ticks_per_slot: u64) -> u64 { - ((ticks_per_slot * DEFAULT_MS_PER_SLOT * num_blocks) + DEFAULT_TICKS_PER_SLOT - 1) - / DEFAULT_TICKS_PER_SLOT + (ticks_per_slot * DEFAULT_MS_PER_SLOT * num_blocks).div_ceil(DEFAULT_TICKS_PER_SLOT) } pub fn run_kill_partition_switch_threshold(