From f9171a3981f3422005eb2082ffcb6e40fd28af9d Mon Sep 17 00:00:00 2001 From: PSeitz Date: Tue, 20 Dec 2022 07:30:06 +0100 Subject: [PATCH] fix clippy (#1725) * fix clippy * fix clippy fastfield codecs * fix clippy bitpacker * fix clippy common * fix clippy stacker * fix clippy sstable * fmt --- bitpacker/src/bitpacker.rs | 9 +++---- bitpacker/src/blocked_bitpacker.rs | 6 ++--- common/src/bitset.rs | 6 ++--- fastfield_codecs/src/blockwise_linear.rs | 2 +- .../src/compact_space/build_compact_space.rs | 2 +- fastfield_codecs/src/compact_space/mod.rs | 27 ++++++++----------- src/aggregation/bucket/histogram/histogram.rs | 4 +-- src/aggregation/intermediate_agg_result.rs | 4 +-- src/aggregation/mod.rs | 4 +-- src/aggregation/segment_agg_result.rs | 2 +- src/collector/facet_collector.rs | 2 +- src/collector/mod.rs | 2 +- src/core/index.rs | 2 +- src/directory/composite_file.rs | 2 +- src/directory/footer.rs | 9 ++++--- src/directory/managed_directory.rs | 2 +- src/directory/mmap_directory.rs | 6 ++--- src/fastfield/facet_reader.rs | 4 +-- src/fastfield/mod.rs | 2 +- src/fastfield/multivalued/writer.rs | 2 +- src/fastfield/writer.rs | 14 ++-------- src/indexer/demuxer.rs | 4 +-- src/indexer/index_writer.rs | 8 +++--- src/indexer/merger.rs | 4 +-- src/indexer/segment_updater.rs | 2 +- src/indexer/stamper.rs | 6 ++--- src/lib.rs | 2 +- src/positions/reader.rs | 2 +- src/postings/per_field_postings_writer.rs | 2 +- src/postings/serializer.rs | 2 +- src/query/bitset/mod.rs | 2 +- src/query/range_query_ip_fastfield.rs | 8 +++--- src/query/union.rs | 2 +- src/schema/date_time_options.rs | 2 +- src/store/index/mod.rs | 4 +-- src/store/reader.rs | 6 +---- src/store/store_compressor.rs | 2 +- src/termdict/fst_termdict/term_info_store.rs | 2 +- src/termdict/fst_termdict/termdict.rs | 2 +- sstable/src/sstable_index.rs | 2 +- sstable/src/value.rs | 2 +- stacker/src/expull.rs | 4 +-- 42 files changed, 81 insertions(+), 102 deletions(-) diff --git a/bitpacker/src/bitpacker.rs b/bitpacker/src/bitpacker.rs index 86dd1a7ca9..b5058cfacd 100644 --- a/bitpacker/src/bitpacker.rs +++ b/bitpacker/src/bitpacker.rs @@ -25,15 +25,14 @@ impl BitPacker { num_bits: u8, output: &mut TWrite, ) -> io::Result<()> { - let val_u64 = val as u64; let num_bits = num_bits as usize; if self.mini_buffer_written + num_bits > 64 { - self.mini_buffer |= val_u64.wrapping_shl(self.mini_buffer_written as u32); + self.mini_buffer |= val.wrapping_shl(self.mini_buffer_written as u32); output.write_all(self.mini_buffer.to_le_bytes().as_ref())?; - self.mini_buffer = val_u64.wrapping_shr((64 - self.mini_buffer_written) as u32); + self.mini_buffer = val.wrapping_shr((64 - self.mini_buffer_written) as u32); self.mini_buffer_written = self.mini_buffer_written + num_bits - 64; } else { - self.mini_buffer |= val_u64 << self.mini_buffer_written; + self.mini_buffer |= val << self.mini_buffer_written; self.mini_buffer_written += num_bits; if self.mini_buffer_written == 64 { output.write_all(self.mini_buffer.to_le_bytes().as_ref())?; @@ -102,7 +101,7 @@ impl BitUnpacker { .try_into() .unwrap(); let val_unshifted_unmasked: u64 = u64::from_le_bytes(bytes); - let val_shifted = (val_unshifted_unmasked >> bit_shift) as u64; + let val_shifted = (val_unshifted_unmasked >> bit_shift); val_shifted & self.mask } } diff --git a/bitpacker/src/blocked_bitpacker.rs b/bitpacker/src/blocked_bitpacker.rs index fa4ee59679..3d901f7e92 100644 --- a/bitpacker/src/blocked_bitpacker.rs +++ b/bitpacker/src/blocked_bitpacker.rs @@ -84,7 +84,7 @@ impl BlockedBitpacker { #[inline] pub fn add(&mut self, val: u64) { self.buffer.push(val); - if self.buffer.len() == BLOCK_SIZE as usize { + if self.buffer.len() == BLOCK_SIZE { self.flush(); } } @@ -126,8 +126,8 @@ impl BlockedBitpacker { } #[inline] pub fn get(&self, idx: usize) -> u64 { - let metadata_pos = idx / BLOCK_SIZE as usize; - let pos_in_block = idx % BLOCK_SIZE as usize; + let metadata_pos = idx / BLOCK_SIZE; + let pos_in_block = idx % BLOCK_SIZE; if let Some(metadata) = self.offset_and_bits.get(metadata_pos) { let unpacked = BitUnpacker::new(metadata.num_bits()).get( pos_in_block as u32, diff --git a/common/src/bitset.rs b/common/src/bitset.rs index bbc74c9954..74d687f46e 100644 --- a/common/src/bitset.rs +++ b/common/src/bitset.rs @@ -151,7 +151,7 @@ impl TinySet { if self.is_empty() { None } else { - let lowest = self.0.trailing_zeros() as u32; + let lowest = self.0.trailing_zeros(); self.0 ^= TinySet::singleton(lowest).0; Some(lowest) } @@ -421,7 +421,7 @@ mod tests { bitset.serialize(&mut out).unwrap(); let bitset = ReadOnlyBitSet::open(OwnedBytes::new(out)); - assert_eq!(bitset.len() as usize, i as usize); + assert_eq!(bitset.len(), i as usize); } } @@ -432,7 +432,7 @@ mod tests { bitset.serialize(&mut out).unwrap(); let bitset = ReadOnlyBitSet::open(OwnedBytes::new(out)); - assert_eq!(bitset.len() as usize, 64); + assert_eq!(bitset.len(), 64); } #[test] diff --git a/fastfield_codecs/src/blockwise_linear.rs b/fastfield_codecs/src/blockwise_linear.rs index 553463cc7b..f4ba6c7283 100644 --- a/fastfield_codecs/src/blockwise_linear.rs +++ b/fastfield_codecs/src/blockwise_linear.rs @@ -75,7 +75,7 @@ impl FastFieldCodec for BlockwiseLinearCodec { if column.num_vals() < 10 * CHUNK_SIZE as u32 { return None; } - let mut first_chunk: Vec = column.iter().take(CHUNK_SIZE as usize).collect(); + let mut first_chunk: Vec = column.iter().take(CHUNK_SIZE).collect(); let line = Line::train(&VecColumn::from(&first_chunk)); for (i, buffer_val) in first_chunk.iter_mut().enumerate() { let interpolated_val = line.eval(i as u32); diff --git a/fastfield_codecs/src/compact_space/build_compact_space.rs b/fastfield_codecs/src/compact_space/build_compact_space.rs index 90e14d3ef7..51d1912ca9 100644 --- a/fastfield_codecs/src/compact_space/build_compact_space.rs +++ b/fastfield_codecs/src/compact_space/build_compact_space.rs @@ -208,7 +208,7 @@ impl CompactSpaceBuilder { }; let covered_range_len = range_mapping.range_length(); ranges_mapping.push(range_mapping); - compact_start += covered_range_len as u64; + compact_start += covered_range_len; } // println!("num ranges {}", ranges_mapping.len()); CompactSpace { ranges_mapping } diff --git a/fastfield_codecs/src/compact_space/mod.rs b/fastfield_codecs/src/compact_space/mod.rs index 9120d2413c..9129452cc7 100644 --- a/fastfield_codecs/src/compact_space/mod.rs +++ b/fastfield_codecs/src/compact_space/mod.rs @@ -97,7 +97,7 @@ impl BinarySerializable for CompactSpace { }; let range_length = range_mapping.range_length(); ranges_mapping.push(range_mapping); - compact_start += range_length as u64; + compact_start += range_length; } Ok(Self { ranges_mapping }) @@ -407,10 +407,10 @@ impl CompactSpaceDecompressor { let idx2 = idx + 1; let idx3 = idx + 2; let idx4 = idx + 3; - let val1 = get_val(idx1 as u32); - let val2 = get_val(idx2 as u32); - let val3 = get_val(idx3 as u32); - let val4 = get_val(idx4 as u32); + let val1 = get_val(idx1); + let val2 = get_val(idx2); + let val3 = get_val(idx3); + let val4 = get_val(idx4); push_if_in_range(idx1, val1); push_if_in_range(idx2, val2); push_if_in_range(idx3, val3); @@ -419,14 +419,13 @@ impl CompactSpaceDecompressor { // handle rest for idx in cutoff..position_range.end { - push_if_in_range(idx, get_val(idx as u32)); + push_if_in_range(idx, get_val(idx)); } } #[inline] fn iter_compact(&self) -> impl Iterator + '_ { - (0..self.params.num_vals) - .map(move |idx| self.params.bit_unpacker.get(idx, &self.data) as u64) + (0..self.params.num_vals).map(move |idx| self.params.bit_unpacker.get(idx, &self.data)) } #[inline] @@ -569,7 +568,7 @@ mod tests { let decomp = CompactSpaceDecompressor::open(data).unwrap(); let complete_range = 0..vals.len() as u32; for (pos, val) in vals.iter().enumerate() { - let val = *val as u128; + let val = *val; let pos = pos as u32; let mut positions = Vec::new(); decomp.get_positions_for_value_range(val..=val, pos..pos + 1, &mut positions); @@ -666,7 +665,7 @@ mod tests { get_positions_for_value_range_helper( &decomp, 4_000_211_221u128..=5_000_000_000u128, - complete_range.clone() + complete_range ), vec![6, 7] ); @@ -703,7 +702,7 @@ mod tests { vec![0] ); assert_eq!( - get_positions_for_value_range_helper(&decomp, 0..=105, complete_range.clone()), + get_positions_for_value_range_helper(&decomp, 0..=105, complete_range), vec![0] ); } @@ -756,11 +755,7 @@ mod tests { ); assert_eq!( - get_positions_for_value_range_helper( - &*decomp, - 1_000_000..=1_000_000, - complete_range.clone() - ), + get_positions_for_value_range_helper(&*decomp, 1_000_000..=1_000_000, complete_range), vec![11] ); } diff --git a/src/aggregation/bucket/histogram/histogram.rs b/src/aggregation/bucket/histogram/histogram.rs index 8472628069..2f709e27cb 100644 --- a/src/aggregation/bucket/histogram/histogram.rs +++ b/src/aggregation/bucket/histogram/histogram.rs @@ -401,7 +401,7 @@ impl SegmentHistogramCollector { debug_assert_eq!( self.buckets[bucket_pos].key, - get_bucket_val(val, self.interval, self.offset) as f64 + get_bucket_val(val, self.interval, self.offset) ); self.increment_bucket(bucket_pos, doc, &bucket_with_accessor.sub_aggregation)?; } @@ -428,7 +428,7 @@ impl SegmentHistogramCollector { if bounds.contains(val) { debug_assert_eq!( self.buckets[bucket_pos].key, - get_bucket_val(val, self.interval, self.offset) as f64 + get_bucket_val(val, self.interval, self.offset) ); self.increment_bucket(bucket_pos, doc, bucket_with_accessor)?; diff --git a/src/aggregation/intermediate_agg_result.rs b/src/aggregation/intermediate_agg_result.rs index cdeaa92e3d..508dc79986 100644 --- a/src/aggregation/intermediate_agg_result.rs +++ b/src/aggregation/intermediate_agg_result.rs @@ -282,8 +282,8 @@ impl IntermediateBucketResult { IntermediateBucketResult::Range(range_res) => { let mut buckets: Vec = range_res .buckets - .into_iter() - .map(|(_, bucket)| { + .into_values() + .map(|bucket| { bucket.into_final_bucket_entry( &req.sub_aggregation, schema, diff --git a/src/aggregation/mod.rs b/src/aggregation/mod.rs index a6bd4478c4..cca611ba63 100644 --- a/src/aggregation/mod.rs +++ b/src/aggregation/mod.rs @@ -451,9 +451,9 @@ mod tests { text_field_id => term.to_string(), string_field_id => term.to_string(), score_field => i as u64, - score_field_f64 => i as f64, + score_field_f64 => i, score_field_i64 => i as i64, - fraction_field => i as f64/100.0, + fraction_field => i/100.0, ))?; } index_writer.commit()?; diff --git a/src/aggregation/segment_agg_result.rs b/src/aggregation/segment_agg_result.rs index 406791fc87..28944c39b9 100644 --- a/src/aggregation/segment_agg_result.rs +++ b/src/aggregation/segment_agg_result.rs @@ -305,7 +305,7 @@ impl BucketCount { } pub(crate) fn add_count(&self, count: u32) { self.bucket_count - .fetch_add(count as u32, std::sync::atomic::Ordering::Relaxed); + .fetch_add(count, std::sync::atomic::Ordering::Relaxed); } pub(crate) fn get_count(&self) -> u32 { self.bucket_count.load(std::sync::atomic::Ordering::Relaxed) diff --git a/src/collector/facet_collector.rs b/src/collector/facet_collector.rs index 18b44ff77a..80192bc8be 100644 --- a/src/collector/facet_collector.rs +++ b/src/collector/facet_collector.rs @@ -357,7 +357,7 @@ impl SegmentCollector for FacetSegmentCollector { let mut facet = vec![]; let facet_ord = self.collapse_facet_ords[collapsed_facet_ord]; // TODO handle errors. - if facet_dict.ord_to_term(facet_ord as u64, &mut facet).is_ok() { + if facet_dict.ord_to_term(facet_ord, &mut facet).is_ok() { if let Ok(facet) = Facet::from_encoded(facet) { facet_counts.insert(facet, count); } diff --git a/src/collector/mod.rs b/src/collector/mod.rs index b0a08d48da..6708d125c3 100644 --- a/src/collector/mod.rs +++ b/src/collector/mod.rs @@ -170,7 +170,7 @@ pub trait Collector: Sync + Send { segment_ord: u32, reader: &SegmentReader, ) -> crate::Result<::Fruit> { - let mut segment_collector = self.for_segment(segment_ord as u32, reader)?; + let mut segment_collector = self.for_segment(segment_ord, reader)?; match (reader.alive_bitset(), self.requires_scoring()) { (Some(alive_bitset), true) => { diff --git a/src/core/index.rs b/src/core/index.rs index 7d942313d4..b3368b6df1 100644 --- a/src/core/index.rs +++ b/src/core/index.rs @@ -813,7 +813,7 @@ mod tests { let field = schema.get_field("num_likes").unwrap(); let tempdir = TempDir::new().unwrap(); let tempdir_path = PathBuf::from(tempdir.path()); - let index = Index::create_in_dir(&tempdir_path, schema).unwrap(); + let index = Index::create_in_dir(tempdir_path, schema).unwrap(); let reader = index .reader_builder() .reload_policy(ReloadPolicy::OnCommit) diff --git a/src/directory/composite_file.rs b/src/directory/composite_file.rs index 047c6c2dfe..9a2c782a15 100644 --- a/src/directory/composite_file.rs +++ b/src/directory/composite_file.rs @@ -75,7 +75,7 @@ impl CompositeWrite { let mut prev_offset = 0; for (file_addr, offset) in self.offsets { - VInt((offset - prev_offset) as u64).serialize(&mut self.write)?; + VInt(offset - prev_offset).serialize(&mut self.write)?; file_addr.serialize(&mut self.write)?; prev_offset = offset; } diff --git a/src/directory/footer.rs b/src/directory/footer.rs index 81f1fb99a8..80a38392bb 100644 --- a/src/directory/footer.rs +++ b/src/directory/footer.rs @@ -38,7 +38,7 @@ impl Footer { counting_write.write_all(serde_json::to_string(&self)?.as_ref())?; let footer_payload_len = counting_write.written_bytes(); BinarySerializable::serialize(&(footer_payload_len as u32), write)?; - BinarySerializable::serialize(&(FOOTER_MAGIC_NUMBER as u32), write)?; + BinarySerializable::serialize(&FOOTER_MAGIC_NUMBER, write)?; Ok(()) } @@ -90,9 +90,10 @@ impl Footer { )); } - let footer: Footer = serde_json::from_slice(&file.read_bytes_slice( - file.len() - total_footer_size..file.len() - footer_metadata_len as usize, - )?)?; + let footer: Footer = + serde_json::from_slice(&file.read_bytes_slice( + file.len() - total_footer_size..file.len() - footer_metadata_len, + )?)?; let body = file.slice_to(file.len() - total_footer_size); Ok((footer, body)) diff --git a/src/directory/managed_directory.rs b/src/directory/managed_directory.rs index d212261953..c9ae9f4ec4 100644 --- a/src/directory/managed_directory.rs +++ b/src/directory/managed_directory.rs @@ -388,7 +388,7 @@ mod tests_mmap_specific { let tempdir_path = PathBuf::from(tempdir.path()); let living_files = HashSet::new(); - let mmap_directory = MmapDirectory::open(&tempdir_path).unwrap(); + let mmap_directory = MmapDirectory::open(tempdir_path).unwrap(); let mut managed_directory = ManagedDirectory::wrap(Box::new(mmap_directory)).unwrap(); let mut write = managed_directory.open_write(test_path1).unwrap(); write.write_all(&[0u8, 1u8]).unwrap(); diff --git a/src/directory/mmap_directory.rs b/src/directory/mmap_directory.rs index ffd908bc3d..f87a19a394 100644 --- a/src/directory/mmap_directory.rs +++ b/src/directory/mmap_directory.rs @@ -341,7 +341,7 @@ impl Directory for MmapDirectory { /// removed before the file is deleted. fn delete(&self, path: &Path) -> result::Result<(), DeleteError> { let full_path = self.resolve_path(path); - fs::remove_file(&full_path).map_err(|e| { + fs::remove_file(full_path).map_err(|e| { if e.kind() == io::ErrorKind::NotFound { DeleteError::FileDoesNotExist(path.to_owned()) } else { @@ -395,7 +395,7 @@ impl Directory for MmapDirectory { fn atomic_read(&self, path: &Path) -> Result, OpenReadError> { let full_path = self.resolve_path(path); let mut buffer = Vec::new(); - match File::open(&full_path) { + match File::open(full_path) { Ok(mut file) => { file.read_to_end(&mut buffer).map_err(|io_error| { OpenReadError::wrap_io_error(io_error, path.to_path_buf()) @@ -425,7 +425,7 @@ impl Directory for MmapDirectory { let file: File = OpenOptions::new() .write(true) .create(true) //< if the file does not exist yet, create it. - .open(&full_path) + .open(full_path) .map_err(LockError::wrap_io_error)?; if lock.is_blocking { file.lock_exclusive().map_err(LockError::wrap_io_error)?; diff --git a/src/fastfield/facet_reader.rs b/src/fastfield/facet_reader.rs index 76fba33c5f..47eabbe081 100644 --- a/src/fastfield/facet_reader.rs +++ b/src/fastfield/facet_reader.rs @@ -64,9 +64,7 @@ impl FacetReader { facet_ord: TermOrdinal, output: &mut Facet, ) -> crate::Result<()> { - let found_term = self - .term_dict - .ord_to_term(facet_ord as u64, &mut self.buffer)?; + let found_term = self.term_dict.ord_to_term(facet_ord, &mut self.buffer)?; assert!(found_term, "Term ordinal {} no found.", facet_ord); let facet_str = str::from_utf8(&self.buffer[..]) .map_err(|utf8_err| DataCorruption::comment_only(utf8_err.to_string()))?; diff --git a/src/fastfield/mod.rs b/src/fastfield/mod.rs index 4139a07b9a..6c7dff8159 100644 --- a/src/fastfield/mod.rs +++ b/src/fastfield/mod.rs @@ -473,7 +473,7 @@ mod tests { let fast_field_reader = open::(data)?; for a in 0..n { - assert_eq!(fast_field_reader.get_val(a as u32), permutation[a as usize]); + assert_eq!(fast_field_reader.get_val(a as u32), permutation[a]); } } Ok(()) diff --git a/src/fastfield/multivalued/writer.rs b/src/fastfield/multivalued/writer.rs index 213b7d8bf8..2d951ed6f5 100644 --- a/src/fastfield/multivalued/writer.rs +++ b/src/fastfield/multivalued/writer.rs @@ -264,7 +264,7 @@ fn iter_remapped_multivalue_index<'a, C: Column>( std::iter::once(0).chain(doc_id_map.iter_old_doc_ids().map(move |old_doc| { let num_vals_for_doc = column.get_val(old_doc + 1) - column.get_val(old_doc); offset += num_vals_for_doc; - offset as u64 + offset })) } diff --git a/src/fastfield/writer.rs b/src/fastfield/writer.rs index 45f56c8dbb..e581b36d8f 100644 --- a/src/fastfield/writer.rs +++ b/src/fastfield/writer.rs @@ -360,20 +360,10 @@ impl U128FastFieldWriter { .map(|idx| self.vals[idx as usize]) }; - serializer.create_u128_fast_field_with_idx( - self.field, - iter_gen, - self.val_count as u32, - 0, - )?; + serializer.create_u128_fast_field_with_idx(self.field, iter_gen, self.val_count, 0)?; } else { let iter_gen = || self.vals.iter().cloned(); - serializer.create_u128_fast_field_with_idx( - self.field, - iter_gen, - self.val_count as u32, - 0, - )?; + serializer.create_u128_fast_field_with_idx(self.field, iter_gen, self.val_count, 0)?; } Ok(()) diff --git a/src/indexer/demuxer.rs b/src/indexer/demuxer.rs index 8eeb983a2f..9bdb531dac 100644 --- a/src/indexer/demuxer.rs +++ b/src/indexer/demuxer.rs @@ -252,8 +252,8 @@ mod tests { &demux_mapping, target_settings, vec![ - Box::new(RamDirectory::default()), - Box::new(RamDirectory::default()), + Box::::default(), + Box::::default(), ], )?; diff --git a/src/indexer/index_writer.rs b/src/indexer/index_writer.rs index 8786ab6cd8..2295959484 100644 --- a/src/indexer/index_writer.rs +++ b/src/indexer/index_writer.rs @@ -152,7 +152,7 @@ pub(crate) fn advance_deletes( let num_deleted_docs = max_doc - num_alive_docs; if num_deleted_docs > num_deleted_docs_before { // There are new deletes. We need to write a new delete file. - segment = segment.with_delete_meta(num_deleted_docs as u32, target_opstamp); + segment = segment.with_delete_meta(num_deleted_docs, target_opstamp); let mut alive_doc_file = segment.open_write(SegmentComponent::Delete)?; write_alive_bitset(&alive_bitset, &mut alive_doc_file)?; alive_doc_file.terminate()?; @@ -984,7 +984,7 @@ mod tests { "LogMergePolicy { min_num_segments: 8, max_docs_before_merge: 10000000, \ min_layer_size: 10000, level_log_size: 0.75, del_docs_ratio_before_merge: 1.0 }" ); - let merge_policy = Box::new(NoMergePolicy::default()); + let merge_policy = Box::::default(); index_writer.set_merge_policy(merge_policy); assert_eq!( format!("{:?}", index_writer.get_merge_policy()), @@ -1813,8 +1813,8 @@ mod tests { } let num_docs_expected = expected_ids_and_num_occurrences - .iter() - .map(|(_, id_occurrences)| *id_occurrences as usize) + .values() + .map(|id_occurrences| *id_occurrences as usize) .sum::(); assert_eq!(searcher.num_docs() as usize, num_docs_expected); assert_eq!(old_searcher.num_docs() as usize, num_docs_expected); diff --git a/src/indexer/merger.rs b/src/indexer/merger.rs index aa9d4df210..0f37852db8 100644 --- a/src/indexer/merger.rs +++ b/src/indexer/merger.rs @@ -366,7 +366,7 @@ impl IndexMerger { .map(|doc| reader.num_vals(doc)) .sum() } else { - reader.total_num_vals() as u32 + reader.total_num_vals() } }) .sum(); @@ -968,7 +968,7 @@ impl IndexMerger { let doc_bytes = doc_bytes_res?; store_writer.store_bytes(&doc_bytes)?; } else { - return Err(DataCorruption::comment_only(&format!( + return Err(DataCorruption::comment_only(format!( "unexpected missing document in docstore on merge, doc address \ {old_doc_addr:?}", )) diff --git a/src/indexer/segment_updater.rs b/src/indexer/segment_updater.rs index c0269496c1..adfdb4250b 100644 --- a/src/indexer/segment_updater.rs +++ b/src/indexer/segment_updater.rs @@ -866,7 +866,7 @@ mod tests { } assert_eq!(indices.len(), 3); - let output_directory: Box = Box::new(RamDirectory::default()); + let output_directory: Box = Box::::default(); let index = merge_indices(&indices, output_directory)?; assert_eq!(index.schema(), schema); diff --git a/src/indexer/stamper.rs b/src/indexer/stamper.rs index 5ffd12b996..a0094edb16 100644 --- a/src/indexer/stamper.rs +++ b/src/indexer/stamper.rs @@ -16,11 +16,11 @@ mod atomic_impl { impl AtomicU64Wrapper { pub fn new(first_opstamp: Opstamp) -> AtomicU64Wrapper { - AtomicU64Wrapper(AtomicU64::new(first_opstamp as u64)) + AtomicU64Wrapper(AtomicU64::new(first_opstamp)) } pub fn fetch_add(&self, val: u64, order: Ordering) -> u64 { - self.0.fetch_add(val as u64, order) as u64 + self.0.fetch_add(val, order) as u64 } pub fn revert(&self, val: u64, order: Ordering) -> u64 { @@ -77,7 +77,7 @@ impl Stamper { } pub fn stamp(&self) -> Opstamp { - self.0.fetch_add(1u64, Ordering::SeqCst) as u64 + self.0.fetch_add(1u64, Ordering::SeqCst) } /// Given a desired count `n`, `stamps` returns an iterator that diff --git a/src/lib.rs b/src/lib.rs index f57b99d1e5..53ddde5644 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -177,7 +177,7 @@ impl DateTime { /// The given date/time is converted to UTC and the actual /// time zone is discarded. pub const fn from_utc(dt: OffsetDateTime) -> Self { - let timestamp_micros = dt.unix_timestamp() as i64 * 1_000_000 + dt.microsecond() as i64; + let timestamp_micros = dt.unix_timestamp() * 1_000_000 + dt.microsecond() as i64; Self { timestamp_micros } } diff --git a/src/positions/reader.rs b/src/positions/reader.rs index 5d179afdd2..bb2d35c1b0 100644 --- a/src/positions/reader.rs +++ b/src/positions/reader.rs @@ -71,7 +71,7 @@ impl PositionReader { .map(|num_bits| num_bits as usize) .sum(); let num_bytes_to_skip = num_bits * COMPRESSION_BLOCK_SIZE / 8; - self.bit_widths.advance(num_blocks as usize); + self.bit_widths.advance(num_blocks); self.positions.advance(num_bytes_to_skip); self.anchor_offset += (num_blocks * COMPRESSION_BLOCK_SIZE) as u64; } diff --git a/src/postings/per_field_postings_writer.rs b/src/postings/per_field_postings_writer.rs index b12999d99f..f3d6d6534c 100644 --- a/src/postings/per_field_postings_writer.rs +++ b/src/postings/per_field_postings_writer.rs @@ -51,7 +51,7 @@ fn posting_writer_from_field_entry(field_entry: &FieldEntry) -> Box Box::new(SpecializedPostingsWriter::::default()), + | FieldType::Facet(_) => Box::>::default(), FieldType::JsonObject(ref json_object_options) => { if let Some(text_indexing_option) = json_object_options.get_text_indexing_options() { match text_indexing_option.index_option() { diff --git a/src/postings/serializer.rs b/src/postings/serializer.rs index 76a5c95be7..6afa6edce1 100644 --- a/src/postings/serializer.rs +++ b/src/postings/serializer.rs @@ -465,7 +465,7 @@ impl PostingsSerializer { /// When called after writing the postings of a term, this value is used as a /// end offset. fn written_bytes(&self) -> u64 { - self.output_write.written_bytes() as u64 + self.output_write.written_bytes() } fn clear(&mut self) { diff --git a/src/query/bitset/mod.rs b/src/query/bitset/mod.rs index 6448e3e621..c1d4ed28c8 100644 --- a/src/query/bitset/mod.rs +++ b/src/query/bitset/mod.rs @@ -47,7 +47,7 @@ impl From for BitSetDocSet { impl DocSet for BitSetDocSet { fn advance(&mut self) -> DocId { if let Some(lower) = self.cursor_tinybitset.pop_lowest() { - self.doc = (self.cursor_bucket as u32 * 64u32) | lower; + self.doc = (self.cursor_bucket * 64u32) | lower; return self.doc; } if let Some(cursor_bucket) = self.docs.first_non_empty_bucket(self.cursor_bucket + 1) { diff --git a/src/query/range_query_ip_fastfield.rs b/src/query/range_query_ip_fastfield.rs index f73e0cf831..175d0f4a98 100644 --- a/src/query/range_query_ip_fastfield.rs +++ b/src/query/range_query_ip_fastfield.rs @@ -126,7 +126,7 @@ impl VecCursor { } #[inline] fn current(&self) -> Option { - self.docs.get(self.current_pos).map(|el| *el as u32) + self.docs.get(self.current_pos).map(|el| *el) } fn get_cleared_data(&mut self) -> &mut Vec { self.docs.clear(); @@ -268,9 +268,9 @@ impl DocSet for IpRangeDocSet { #[inline] fn advance(&mut self) -> DocId { if let Some(docid) = self.loaded_docs.next() { - docid as u32 + docid } else { - if self.next_fetch_start >= self.ip_addr_fast_field.num_docs() as u32 { + if self.next_fetch_start >= self.ip_addr_fast_field.num_docs() { return TERMINATED; } self.fetch_block(); @@ -282,7 +282,7 @@ impl DocSet for IpRangeDocSet { fn doc(&self) -> DocId { self.loaded_docs .current() - .map(|el| el as u32) + .map(|el| el) .unwrap_or(TERMINATED) } diff --git a/src/query/union.rs b/src/query/union.rs index ce9f9b82d9..b1f23156a2 100644 --- a/src/query/union.rs +++ b/src/query/union.rs @@ -43,7 +43,7 @@ fn refill( min_doc: DocId, ) { unordered_drain_filter(scorers, |scorer| { - let horizon = min_doc + HORIZON as u32; + let horizon = min_doc + HORIZON; loop { let doc = scorer.doc(); if doc >= horizon { diff --git a/src/schema/date_time_options.rs b/src/schema/date_time_options.rs index 3b8d5445b6..f6d5b62ffb 100644 --- a/src/schema/date_time_options.rs +++ b/src/schema/date_time_options.rs @@ -236,7 +236,7 @@ mod tests { ) .unwrap(); - let date_options_json = serde_json::to_value(&date_options).unwrap(); + let date_options_json = serde_json::to_value(date_options).unwrap(); assert_eq!( date_options_json, serde_json::json!({ diff --git a/src/store/index/mod.rs b/src/store/index/mod.rs index af572e758b..5e39d8b2dd 100644 --- a/src/store/index/mod.rs +++ b/src/store/index/mod.rs @@ -193,8 +193,8 @@ mod tests { (0..max_len) .prop_flat_map(move |len: usize| { ( - proptest::collection::vec(1usize..20, len as usize).prop_map(integrate_delta), - proptest::collection::vec(1usize..26, len as usize).prop_map(integrate_delta), + proptest::collection::vec(1usize..20, len).prop_map(integrate_delta), + proptest::collection::vec(1usize..26, len).prop_map(integrate_delta), ) .prop_map(|(docs, offsets)| { (0..docs.len() - 1) diff --git a/src/store/reader.rs b/src/store/reader.rs index ff0b4bee9a..c103c37d2a 100644 --- a/src/store/reader.rs +++ b/src/store/reader.rs @@ -66,11 +66,7 @@ impl BlockCache { #[cfg(test)] fn peek_lru(&self) -> Option { - self.cache - .lock() - .unwrap() - .peek_lru() - .map(|(&k, _)| k as usize) + self.cache.lock().unwrap().peek_lru().map(|(&k, _)| k) } } diff --git a/src/store/store_compressor.rs b/src/store/store_compressor.rs index bfa1744396..c528790480 100644 --- a/src/store/store_compressor.rs +++ b/src/store/store_compressor.rs @@ -142,7 +142,7 @@ impl BlockCompressorImpl { } fn close(mut self) -> io::Result<()> { - let header_offset: u64 = self.writer.written_bytes() as u64; + let header_offset: u64 = self.writer.written_bytes(); let docstore_footer = DocStoreFooter::new(header_offset, Decompressor::from(self.compressor)); self.offset_index_writer.serialize_into(&mut self.writer)?; diff --git a/src/termdict/fst_termdict/term_info_store.rs b/src/termdict/fst_termdict/term_info_store.rs index 2bcea6bf5a..fabe47ed1b 100644 --- a/src/termdict/fst_termdict/term_info_store.rs +++ b/src/termdict/fst_termdict/term_info_store.rs @@ -69,7 +69,7 @@ impl TermInfoBlockMeta { let posting_end_addr = posting_start_addr + num_bits; let positions_start_addr = posting_start_addr + self.postings_offset_nbits as usize; // the position_end is the positions_start of the next term info. - let positions_end_addr = positions_start_addr + num_bits as usize; + let positions_end_addr = positions_start_addr + num_bits; let doc_freq_addr = positions_start_addr + self.positions_offset_nbits as usize; diff --git a/src/termdict/fst_termdict/termdict.rs b/src/termdict/fst_termdict/termdict.rs index a2cde8165f..563c9a3bc4 100644 --- a/src/termdict/fst_termdict/termdict.rs +++ b/src/termdict/fst_termdict/termdict.rs @@ -80,7 +80,7 @@ where W: Write self.term_info_store_writer .serialize(&mut counting_writer)?; let footer_size = counting_writer.written_bytes(); - (footer_size as u64).serialize(&mut counting_writer)?; + footer_size.serialize(&mut counting_writer)?; } Ok(file) } diff --git a/sstable/src/sstable_index.rs b/sstable/src/sstable_index.rs index c7e132e89d..b283b961c8 100644 --- a/sstable/src/sstable_index.rs +++ b/sstable/src/sstable_index.rs @@ -133,7 +133,7 @@ mod tests { super::find_shorter_str_in_between(&mut left_buf, right); assert!(left_buf.len() <= left.len()); assert!(left <= &left_buf); - assert!(&left_buf[..] < &right); + assert!(&left_buf[..] < right); } #[test] diff --git a/sstable/src/value.rs b/sstable/src/value.rs index 969dae2f26..05d0d12dec 100644 --- a/sstable/src/value.rs +++ b/sstable/src/value.rs @@ -85,7 +85,7 @@ impl ValueReader for U64MonotonicReader { self.vals.clear(); let mut prev_val = 0u64; for _ in 0..len { - let delta = reader.deserialize_u64() as u64; + let delta = reader.deserialize_u64(); let val = prev_val + delta; self.vals.push(val); prev_val = val; diff --git a/stacker/src/expull.rs b/stacker/src/expull.rs index 7ebb59f50e..e9d34501eb 100644 --- a/stacker/src/expull.rs +++ b/stacker/src/expull.rs @@ -62,7 +62,7 @@ fn len_to_capacity(len: u32) -> CapacityResult { pub struct ExpUnrolledLinkedList { len: u32, tail: Addr, - inlined_data: [u8; INLINED_BLOCK_LEN as usize], + inlined_data: [u8; INLINED_BLOCK_LEN], } pub struct ExpUnrolledLinkedListWriter<'a> { @@ -125,7 +125,7 @@ impl Default for ExpUnrolledLinkedList { ExpUnrolledLinkedList { len: 0u32, tail: Addr::null_pointer(), - inlined_data: [0u8; INLINED_BLOCK_LEN as usize], + inlined_data: [0u8; INLINED_BLOCK_LEN], } } }