Skip to content

Commit

Permalink
clippy: manual_div_ceil (#4376)
Browse files Browse the repository at this point in the history
  • Loading branch information
brooksprumo authored Jan 10, 2025
1 parent 927c6d3 commit 57ea0f6
Show file tree
Hide file tree
Showing 12 changed files with 15 additions and 17 deletions.
2 changes: 1 addition & 1 deletion accounts-db/src/ancient_append_vecs.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1207,7 +1207,7 @@ fn div_ceil(x: u64, y: NonZeroU64) -> u64 {
// SAFETY: Since `y` is NonZero:
// - we know the denominator is > 0, and thus safe (cannot have divide-by-zero)
// - we know `x + y` is non-zero, and thus the numerator is safe (cannot underflow)
(x + y - 1) / y
x.div_ceil(y)
}

#[cfg(test)]
Expand Down
5 changes: 2 additions & 3 deletions bench-tps/src/main.rs
Original file line number Diff line number Diff line change
Expand Up @@ -210,9 +210,8 @@ fn main() {
let max_fee = FeeRateGovernor::new(*target_lamports_per_signature, 0)
.max_lamports_per_signature
.saturating_add(max_lamports_for_prioritization(compute_unit_price));
let num_lamports_per_account = (num_accounts - 1 + NUM_SIGNATURES_FOR_TXS * max_fee)
/ num_accounts
+ num_lamports_per_account;
let num_lamports_per_account =
(NUM_SIGNATURES_FOR_TXS * max_fee).div_ceil(num_accounts) + num_lamports_per_account;
let mut accounts = HashMap::new();
keypairs.iter().for_each(|keypair| {
accounts.insert(
Expand Down
2 changes: 1 addition & 1 deletion core/src/sigverify_stage.rs
Original file line number Diff line number Diff line change
Expand Up @@ -255,7 +255,7 @@ impl SigVerifyStage {
while max_packets > 0 && !addrs.is_empty() {
let num_addrs = addrs.len();
addrs.retain(|_, packets| {
let cap = (max_packets + num_addrs - 1) / num_addrs;
let cap = max_packets.div_ceil(num_addrs);
max_packets -= packets.len().min(cap);
packets.truncate(packets.len().saturating_sub(cap));
!packets.is_empty()
Expand Down
2 changes: 1 addition & 1 deletion core/src/validator.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1892,7 +1892,7 @@ fn load_genesis(
// grows too large
let leader_schedule_slot_offset = genesis_config.epoch_schedule.leader_schedule_slot_offset;
let slots_per_epoch = genesis_config.epoch_schedule.slots_per_epoch;
let leader_epoch_offset = (leader_schedule_slot_offset + slots_per_epoch - 1) / slots_per_epoch;
let leader_epoch_offset = leader_schedule_slot_offset.div_ceil(slots_per_epoch);
assert!(leader_epoch_offset <= MAX_LEADER_SCHEDULE_EPOCH_OFFSET);

let genesis_hash = genesis_config.hash();
Expand Down
2 changes: 1 addition & 1 deletion entry/src/entry.rs
Original file line number Diff line number Diff line change
Expand Up @@ -690,7 +690,7 @@ impl EntrySlice for [Entry] {
transactions: vec![],
}];

let aligned_len = ((self.len() + simd_len - 1) / simd_len) * simd_len;
let aligned_len = self.len().div_ceil(simd_len) * simd_len;
let mut hashes_bytes = vec![0u8; HASH_BYTES * aligned_len];
genesis
.iter()
Expand Down
2 changes: 1 addition & 1 deletion gossip/src/crds_gossip_pull.rs
Original file line number Diff line number Diff line change
Expand Up @@ -155,7 +155,7 @@ impl CrdsFilterSet {
let mask_bits = CrdsFilter::mask_bits(num_items as f64, max_items);
let mut filters: Vec<_> = repeat_with(|| None).take(1usize << mask_bits).collect();
let mut indices: Vec<_> = (0..filters.len()).collect();
let size = (filters.len() + SAMPLE_RATE - 1) / SAMPLE_RATE;
let size = filters.len().div_ceil(SAMPLE_RATE);
for _ in 0..MAX_NUM_FILTERS.min(size) {
let k = rng.gen_range(0..indices.len());
let k = indices.swap_remove(k);
Expand Down
2 changes: 1 addition & 1 deletion gossip/src/protocol.rs
Original file line number Diff line number Diff line change
Expand Up @@ -553,7 +553,7 @@ pub(crate) mod tests {
let num_values_per_payload = (PUSH_MESSAGE_MAX_PAYLOAD_SIZE / value_size).max(1);

// Expected len is the ceiling of the division
let expected_len = (NUM_VALUES + num_values_per_payload - 1) / num_values_per_payload;
let expected_len = NUM_VALUES.div_ceil(num_values_per_payload);
let msgs = vec![value; NUM_VALUES];

assert!(split_gossip_messages(PUSH_MESSAGE_MAX_PAYLOAD_SIZE, msgs).count() <= expected_len);
Expand Down
2 changes: 1 addition & 1 deletion gossip/src/weighted_shuffle.rs
Original file line number Diff line number Diff line change
Expand Up @@ -220,7 +220,7 @@ fn get_num_nodes_and_tree_size(count: usize) -> (/*num_nodes:*/ usize, /*tree_si
size += nodes;
nodes *= FANOUT;
}
(size + nodes, size + (count + FANOUT - 1) / FANOUT)
(size + nodes, size + count.div_ceil(FANOUT))
}

#[cfg(test)]
Expand Down
2 changes: 1 addition & 1 deletion ledger/src/shred/merkle.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1103,7 +1103,7 @@ pub(super) fn make_shreds_from_data(
let (proof_size, data_buffer_size, num_data_shreds) = (1u8..32)
.find_map(|proof_size| {
let data_buffer_size = ShredData::capacity(proof_size, chained, resigned).ok()?;
let num_data_shreds = (data.len() + data_buffer_size - 1) / data_buffer_size;
let num_data_shreds = data.len().div_ceil(data_buffer_size);
let num_data_shreds = num_data_shreds.max(min_num_data_shreds);
let erasure_batch_size =
shredder::get_erasure_batch_size(num_data_shreds, is_last_in_slot);
Expand Down
6 changes: 3 additions & 3 deletions ledger/src/shredder.rs
Original file line number Diff line number Diff line change
Expand Up @@ -136,7 +136,7 @@ impl Shredder {
process_stats.data_buffer_residual +=
(data_buffer_size - serialized_shreds.len() % data_buffer_size) % data_buffer_size;
// Integer division to ensure we have enough shreds to fit all the data
let num_shreds = (serialized_shreds.len() + data_buffer_size - 1) / data_buffer_size;
let num_shreds = serialized_shreds.len().div_ceil(data_buffer_size);
let last_shred_index = next_shred_index + num_shreds as u32 - 1;
// 1) Generate data shreds
let make_data_shred = |data, shred_index: u32, fec_set_index: u32| {
Expand Down Expand Up @@ -471,7 +471,7 @@ fn get_fec_set_offsets(
return None;
}
let num_chunks = (num_shreds / min_chunk_size).max(1);
let chunk_size = (num_shreds + num_chunks - 1) / num_chunks;
let chunk_size = num_shreds.div_ceil(num_chunks);
let offsets = std::iter::repeat(offset).take(chunk_size);
num_shreds -= chunk_size;
offset += chunk_size;
Expand Down Expand Up @@ -541,7 +541,7 @@ mod tests {
let size = serialized_size(&entries).unwrap() as usize;
// Integer division to ensure we have enough shreds to fit all the data
let data_buffer_size = ShredData::capacity(/*merkle_proof_size:*/ None).unwrap();
let num_expected_data_shreds = (size + data_buffer_size - 1) / data_buffer_size;
let num_expected_data_shreds = size.div_ceil(data_buffer_size);
let num_expected_data_shreds = num_expected_data_shreds.max(if is_last_in_slot {
DATA_SHREDS_PER_FEC_BLOCK
} else {
Expand Down
2 changes: 1 addition & 1 deletion ledger/src/sigverify_shreds.rs
Original file line number Diff line number Diff line change
Expand Up @@ -203,7 +203,7 @@ fn resize_buffer(buffer: &mut PinnedVec<u8>, size: usize) {
//HACK: Pubkeys vector is passed along as a `PacketBatch` buffer to the GPU
//TODO: GPU needs a more opaque interface, which can handle variable sized structures for data
//Pad the Pubkeys buffer such that it is bigger than a buffer of Packet sized elems
let num_packets = (size + std::mem::size_of::<Packet>() - 1) / std::mem::size_of::<Packet>();
let num_packets = size.div_ceil(std::mem::size_of::<Packet>());
let size = num_packets * std::mem::size_of::<Packet>();
buffer.resize(size, 0u8);
}
Expand Down
3 changes: 1 addition & 2 deletions local-cluster/src/integration_tests.rs
Original file line number Diff line number Diff line change
Expand Up @@ -190,8 +190,7 @@ pub fn copy_blocks(end_slot: Slot, source: &Blockstore, dest: &Blockstore, is_tr
/// Computes the numbr of milliseconds `num_blocks` blocks will take given
/// each slot contains `ticks_per_slot`
pub fn ms_for_n_slots(num_blocks: u64, ticks_per_slot: u64) -> u64 {
((ticks_per_slot * DEFAULT_MS_PER_SLOT * num_blocks) + DEFAULT_TICKS_PER_SLOT - 1)
/ DEFAULT_TICKS_PER_SLOT
(ticks_per_slot * DEFAULT_MS_PER_SLOT * num_blocks).div_ceil(DEFAULT_TICKS_PER_SLOT)
}

pub fn run_kill_partition_switch_threshold<C>(
Expand Down

0 comments on commit 57ea0f6

Please sign in to comment.