Skip to content

Commit

Permalink
Introduce clippy changes
Browse files Browse the repository at this point in the history
This commit introduces a set of modern clippy lint fixes. The majority
are simple mechanical adjustments, though a few involve more proper
checking of bounds in our quickcheck tests.
  • Loading branch information
blt committed Aug 1, 2020
1 parent baacbcf commit d1a25d6
Show file tree
Hide file tree
Showing 5 changed files with 86 additions and 92 deletions.
2 changes: 1 addition & 1 deletion benches/ckms.rs
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@ mod ckms {

impl Xorshift {
pub fn new(seed: u64) -> Xorshift {
Xorshift { seed: seed }
Xorshift { seed }
}

pub fn next_val(&mut self) -> u32 {
Expand Down
56 changes: 29 additions & 27 deletions src/ckms/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -135,7 +135,7 @@ impl<
CKMS {
n: 0,

insert_threshold: insert_threshold,
insert_threshold,
inserts: 0,

samples: Store::new(2048, error),
Expand Down Expand Up @@ -295,19 +295,18 @@ mod test {
use super::*;
use ckms::store::invariant;
use quickcheck::{QuickCheck, TestResult};
use std::cmp::Ordering;
use std::f64::consts::E;

fn percentile(data: &Vec<f64>, prcnt: f64) -> f64 {
fn percentile(data: &[f64], prcnt: f64) -> f64 {
let idx = (prcnt * (data.len() as f64)) as usize;
return data[idx];
data[idx]
}

#[test]
fn test_cma() {
fn inner(data: Vec<f64>, err: f64) -> TestResult {
if data.is_empty() {
return TestResult::discard();
} else if !(err >= 0.0) || !(err <= 1.0) {
if !between_inclusive(err, 0.0, 1.0) || data.is_empty() {
return TestResult::discard();
}

Expand All @@ -322,15 +321,15 @@ mod test {
assert!(mean.is_some());

assert!((expected_mean - mean.unwrap()).abs() < err);
return TestResult::passed();
TestResult::passed()
}
QuickCheck::new().quickcheck(inner as fn(Vec<f64>, f64) -> TestResult);
}

#[test]
fn test_cma_add_assign() {
fn inner(l_data: Vec<f64>, r_data: Vec<f64>, err: f64) -> TestResult {
if !(err >= 0.0) || !(err <= 1.0) {
if !between_inclusive(err, 0.0, 1.0) {
return TestResult::discard();
}

Expand All @@ -350,7 +349,7 @@ mod test {
if mean.is_some() {
assert!((expected_mean - mean.unwrap()).abs() < err);
}
return TestResult::passed();
TestResult::passed()
}
QuickCheck::new().quickcheck(inner as fn(Vec<f64>, Vec<f64>, f64) -> TestResult);
}
Expand All @@ -359,9 +358,7 @@ mod test {
fn error_nominal_test() {
fn inner(mut data: Vec<f64>, prcnt: f64) -> TestResult {
data.sort_by(|a, b| a.partial_cmp(b).unwrap());
if !(prcnt >= 0.0) || !(prcnt <= 1.0) {
return TestResult::discard();
} else if data.len() < 1 {
if !between_inclusive(prcnt, 0.0, 1.0) || data.is_empty() {
return TestResult::discard();
}
let err = 0.001;
Expand All @@ -388,17 +385,22 @@ mod test {
QuickCheck::new().quickcheck(inner as fn(Vec<f64>, f64) -> TestResult);
}

pub fn between_inclusive(val: f64, lower: f64, upper: f64) -> bool {
match val.partial_cmp(&lower) {
None | Some(Ordering::Less) => false,
_ => !matches!(val.partial_cmp(&upper), None | Some(Ordering::Greater)),
}
}

#[test]
fn error_nominal_with_merge_test() {
fn inner(lhs: Vec<f64>, rhs: Vec<f64>, prcnt: f64, err: f64) -> TestResult {
if !(prcnt >= 0.0) || !(prcnt <= 1.0) {
return TestResult::discard();
} else if !(err >= 0.0) || !(err <= 1.0) {
return TestResult::discard();
} else if (lhs.len() + rhs.len()) < 1 {
return TestResult::discard();
}
if lhs.is_empty() || rhs.is_empty() {
if !between_inclusive(prcnt, 0.0, 1.0)
|| !between_inclusive(err, 0.0, 1.0)
|| (lhs.len() + rhs.len()) < 1
|| lhs.is_empty()
|| rhs.is_empty()
{
return TestResult::discard();
}
let mut data = lhs.clone();
Expand Down Expand Up @@ -476,7 +478,7 @@ mod test {
fn query_invariant_test() {
fn query_invariant(f: f64, fs: Vec<i32>) -> TestResult {
let error = 0.001;
if fs.len() < 1 {
if fs.is_empty() {
return TestResult::discard();
}

Expand Down Expand Up @@ -509,8 +511,8 @@ mod test {
ckms.insert(i as f64);
}

assert_eq!(0.0, ckms.samples[0].v);
assert_eq!(1.0, ckms.samples[1].v);
assert!((0.0 - ckms.samples[0].v).abs() < f64::EPSILON);
assert!((1.0 - ckms.samples[1].v).abs() < f64::EPSILON);
}

// prop: v_i-1 < v_i =< v_i+1
Expand All @@ -523,7 +525,7 @@ mod test {
ckms.insert(f);
}

if ckms.samples.len() == 0 && fsc.len() == 0 {
if ckms.samples.is_empty() && fsc.is_empty() {
return TestResult::passed();
}
let mut cur = ckms.samples[0].v;
Expand Down Expand Up @@ -552,8 +554,8 @@ mod test {
let s = ckms.samples.len();
let mut r = 0;
for i in 1..s {
let ref prev = ckms.samples[i - 1];
let ref cur = ckms.samples[i];
let prev = &ckms.samples[i - 1];
let cur = &ckms.samples[i];

r += prev.g;

Expand Down Expand Up @@ -608,7 +610,7 @@ mod test {
// We have to choose an arbitrary, lowish constant for bound
// invalidation buffer. This is because I don't have a precise
// boundary. 1024 samples worth of slop isn't bad, I guess.
if !(s <= bound) && !((s - bound).abs() < 1_024) {
if s > bound && (s - bound).abs() >= 1_024 {
println!(
"error: {:?} n: {:?} log10: {:?}",
ckms.error_bound(),
Expand Down
17 changes: 7 additions & 10 deletions src/ckms/store.rs
Original file line number Diff line number Diff line change
Expand Up @@ -110,16 +110,16 @@ where
g_sum: 0,
};
Store {
error: error,
error,
data: vec![data],
inner_cap: inner_cap,
inner_cap,
len: 0,
n: 0,
}
}

/// Insert a point into the Store
pub fn insert(&mut self, element: T) -> ()
pub fn insert(&mut self, element: T)
where
T: fmt::Debug,
{
Expand Down Expand Up @@ -319,7 +319,7 @@ where
// through the inner caches and combine those that are contiguous and
// fit within inner_cap.
cur_outer_idx = 0;
while (self.data.len() >= 1) && (cur_outer_idx < (self.data.len() - 1)) {
while !self.data.is_empty() && (cur_outer_idx < (self.data.len() - 1)) {
if self.data[cur_outer_idx].data.len() + self.data[cur_outer_idx + 1].data.len()
<= self.inner_cap
{
Expand Down Expand Up @@ -459,6 +459,7 @@ where
#[cfg(test)]
mod test {
use super::*;
use ckms::test::between_inclusive;
use quickcheck::{QuickCheck, TestResult};

#[test]
Expand Down Expand Up @@ -486,11 +487,7 @@ mod test {
#[test]
fn obey_inner_cap() {
fn inner(data: Vec<f64>, inner_cap: usize, err: f64) -> TestResult {
if data.is_empty() {
return TestResult::discard();
} else if inner_cap == 0 {
return TestResult::discard();
} else if !(err >= 0.0) || !(err <= 1.0) {
if data.is_empty() || inner_cap == 0 || !between_inclusive(err, 0.0, 1.0) {
return TestResult::discard();
}

Expand All @@ -503,7 +500,7 @@ mod test {
assert!(inner.len() <= store.inner_cap);
}

return TestResult::passed();
TestResult::passed()
}
QuickCheck::new().quickcheck(inner as fn(Vec<f64>, usize, f64) -> TestResult);
}
Expand Down
76 changes: 34 additions & 42 deletions src/greenwald_khanna.rs
Original file line number Diff line number Diff line change
Expand Up @@ -141,11 +141,7 @@ where
{
/// Creates a new instance of a Tuple
pub fn new(v: T, g: usize, delta: usize) -> Tuple<T> {
Tuple {
v: v,
g: g,
delta: delta,
}
Tuple { v, g, delta }
}
}

Expand Down Expand Up @@ -191,7 +187,7 @@ where
pub fn new(epsilon: f64) -> Stream<T> {
Stream {
summary: vec![],
epsilon: epsilon,
epsilon,
n: 0,
}
}
Expand Down Expand Up @@ -221,7 +217,7 @@ where
/// Compute the epsilon-approximate phi-quantile
/// from the summary data structure.
pub fn quantile(&self, phi: f64) -> &T {
assert!(self.summary.len() >= 1);
assert!(!self.summary.is_empty());
assert!(phi >= 0f64 && phi <= 1f64);

let r = (phi * self.n as f64).floor() as usize;
Expand Down Expand Up @@ -358,42 +354,38 @@ impl<T: Ord> AddAssign for Stream<T> {
let mut rhs_samples = rhs.summary.into_iter().peekable();
let mut started_self = false;
let mut started_rhs = false;
loop {
match (self_samples.peek(), rhs_samples.peek()) {
(Some(self_sample), Some(rhs_sample)) => {
// Detect next sample
let (next_sample, additional_delta) = if self_sample.v < rhs_sample.v {
started_self = true;
(
self_samples.next().unwrap(),
if started_rhs {
additional_self_delta
} else {
0
},
)
while let (Some(self_sample), Some(rhs_sample)) = (self_samples.peek(), rhs_samples.peek())
{
// Detect next sample
let (next_sample, additional_delta) = if self_sample.v < rhs_sample.v {
started_self = true;
(
self_samples.next().unwrap(),
if started_rhs {
additional_self_delta
} else {
started_rhs = true;
(
rhs_samples.next().unwrap(),
if started_self {
additional_rhs_delta
} else {
0
},
)
};

// Insert it
let next_sample = Tuple {
v: next_sample.v,
g: next_sample.g,
delta: next_sample.delta + additional_delta,
};
merged_summary.push(next_sample);
}
_ => break,
}
0
},
)
} else {
started_rhs = true;
(
rhs_samples.next().unwrap(),
if started_self {
additional_rhs_delta
} else {
0
},
)
};

// Insert it
let next_sample = Tuple {
v: next_sample.v,
g: next_sample.g,
delta: next_sample.delta + additional_delta,
};
merged_summary.push(next_sample);
}

// Copy the remaining samples from the rhs list
Expand Down
Loading

0 comments on commit d1a25d6

Please sign in to comment.