From b1bd09c52afb1fa51f0dac1f1704c0a6ab32e867 Mon Sep 17 00:00:00 2001 From: Tatsuya Kawano Date: Sat, 4 Mar 2023 17:22:29 +0800 Subject: [PATCH 1/3] Do not cache anything when max capacity is zero --- src/future/cache.rs | 26 +++++++++++++++++ src/sync/cache.rs | 22 ++++++++++++++ src/sync/segment.rs | 25 ++++++++++++++-- src/sync_base/base_cache.rs | 58 +++++++++++++++++++++++++------------ 4 files changed, 110 insertions(+), 21 deletions(-) diff --git a/src/future/cache.rs b/src/future/cache.rs index e2204624..b5f19d21 100644 --- a/src/future/cache.rs +++ b/src/future/cache.rs @@ -1214,6 +1214,10 @@ where } fn do_blocking_insert(&self, key: K, value: V) { + if self.base.is_map_disabled() { + return; + } + let hash = self.base.hash(&key); let key = Arc::new(key); let (op, now) = self.base.do_insert_with_hash(key, hash, value); @@ -1722,6 +1726,10 @@ where } async fn insert_with_hash(&self, key: Arc, hash: u64, value: V) { + if self.base.is_map_disabled() { + return; + } + let (op, now) = self.base.do_insert_with_hash(key, hash, value); let hk = self.base.housekeeper.as_ref(); Self::schedule_write_op( @@ -1855,6 +1863,24 @@ mod tests { use parking_lot::Mutex; use std::{convert::Infallible, sync::Arc, time::Duration}; + #[tokio::test] + async fn max_capacity_zero() { + let mut cache = Cache::new(0); + cache.reconfigure_for_testing(); + + // Make the cache exterior immutable. + let cache = cache; + + cache.insert(0, ()).await; + + assert!(!cache.contains_key(&0)); + assert!(cache.get(&0).is_none()); + cache.sync(); + assert!(!cache.contains_key(&0)); + assert!(cache.get(&0).is_none()); + assert_eq!(cache.entry_count(), 0) + } + #[tokio::test] async fn basic_single_async_task() { // The following `Vec`s will hold actual and expected notifications. diff --git a/src/sync/cache.rs b/src/sync/cache.rs index 9dffacdc..3a423b25 100644 --- a/src/sync/cache.rs +++ b/src/sync/cache.rs @@ -1602,6 +1602,10 @@ where } pub(crate) fn insert_with_hash(&self, key: Arc, hash: u64, value: V) { + if self.base.is_map_disabled() { + return; + } + let (op, now) = self.base.do_insert_with_hash(key, hash, value); let hk = self.base.housekeeper.as_ref(); Self::schedule_write_op( @@ -1912,6 +1916,24 @@ mod tests { use parking_lot::Mutex; use std::{convert::Infallible, sync::Arc, time::Duration}; + #[test] + fn max_capacity_zero() { + let mut cache = Cache::new(0); + cache.reconfigure_for_testing(); + + // Make the cache exterior immutable. + let cache = cache; + + cache.insert(0, ()); + + assert!(!cache.contains_key(&0)); + assert!(cache.get(&0).is_none()); + cache.sync(); + assert!(!cache.contains_key(&0)); + assert!(cache.get(&0).is_none()); + assert_eq!(cache.entry_count(), 0) + } + #[test] fn basic_single_thread() { run_test(DeliveryMode::Immediate); diff --git a/src/sync/segment.rs b/src/sync/segment.rs index 04d6db4b..c1eabe04 100644 --- a/src/sync/segment.rs +++ b/src/sync/segment.rs @@ -720,9 +720,10 @@ where let actual_num_segments = num_segments.next_power_of_two(); let segment_shift = 64 - actual_num_segments.trailing_zeros(); - // TODO: Round up. - let seg_max_capacity = max_capacity.map(|n| n / actual_num_segments as u64); - let seg_init_capacity = initial_capacity.map(|cap| cap / actual_num_segments); + let seg_max_capacity = + max_capacity.map(|n| (n as f64 / actual_num_segments as f64).ceil() as u64); + let seg_init_capacity = + initial_capacity.map(|cap| (cap as f64 / actual_num_segments as f64).ceil() as usize); // NOTE: We cannot initialize the segments as `vec![cache; actual_num_segments]` // because Cache::clone() does not clone its inner but shares the same inner. let segments = (0..actual_num_segments) @@ -789,6 +790,24 @@ mod tests { use parking_lot::Mutex; use std::{sync::Arc, time::Duration}; + #[test] + fn max_capacity_zero() { + let mut cache = SegmentedCache::new(0, 1); + cache.reconfigure_for_testing(); + + // Make the cache exterior immutable. + let cache = cache; + + cache.insert(0, ()); + + assert!(!cache.contains_key(&0)); + assert!(cache.get(&0).is_none()); + cache.sync(); + assert!(!cache.contains_key(&0)); + assert!(cache.get(&0).is_none()); + assert_eq!(cache.entry_count(), 0) + } + #[test] fn basic_single_thread() { run_test(DeliveryMode::Immediate); diff --git a/src/sync_base/base_cache.rs b/src/sync_base/base_cache.rs index 8fcdf571..fb26f07a 100644 --- a/src/sync_base/base_cache.rs +++ b/src/sync_base/base_cache.rs @@ -102,6 +102,10 @@ impl BaseCache { self.inner.weighted_size() } + pub(crate) fn is_map_disabled(&self) -> bool { + self.inner.max_capacity == Some(0) + } + #[inline] pub(crate) fn is_removal_notifier_enabled(&self) -> bool { self.inner.is_removal_notifier_enabled() @@ -163,8 +167,15 @@ where invalidator_enabled: bool, housekeeper_conf: housekeeper::Configuration, ) -> Self { - let (r_snd, r_rcv) = crossbeam_channel::bounded(READ_LOG_SIZE); - let (w_snd, w_rcv) = crossbeam_channel::bounded(WRITE_LOG_SIZE); + let (r_size, w_size) = if max_capacity == Some(0) { + (0, 0) + } else { + (READ_LOG_SIZE, WRITE_LOG_SIZE) + }; + + let (r_snd, r_rcv) = crossbeam_channel::bounded(r_size); + let (w_snd, w_rcv) = crossbeam_channel::bounded(w_size); + let inner = Arc::new(Inner::new( name, max_capacity, @@ -905,12 +916,16 @@ where time_to_idle: Option, invalidator_enabled: bool, ) -> Self { - let initial_capacity = initial_capacity - .map(|cap| cap + WRITE_LOG_SIZE) - .unwrap_or_default(); - const NUM_SEGMENTS: usize = 64; + let (num_segments, initial_capacity) = if max_capacity == Some(0) { + (1, 0) + } else { + let ic = initial_capacity + .map(|cap| cap + WRITE_LOG_SIZE) + .unwrap_or_default(); + (64, ic) + }; let cache = crate::cht::SegmentedHashMap::with_num_segments_capacity_and_hasher( - NUM_SEGMENTS, + num_segments, initial_capacity, build_hasher.clone(), ); @@ -1100,16 +1115,20 @@ where S: BuildHasher + Clone + Send + Sync + 'static, { fn sync(&self, max_repeats: usize) -> Option { + if self.max_capacity == Some(0) { + return None; + } + let mut deqs = self.deques.lock(); let mut calls = 0; - let mut should_sync = true; - let current_ec = self.entry_count.load(); let current_ws = self.weighted_size.load(); let mut eviction_state = EvictionState::new(current_ec, current_ws, self.removal_notifier.as_ref()); - while should_sync && calls <= max_repeats { + let mut should_process_logs = true; + + while should_process_logs && calls <= max_repeats { let r_len = self.read_op_ch.len(); if r_len > 0 { self.apply_reads(&mut deqs, r_len); @@ -1125,7 +1144,7 @@ where } calls += 1; - should_sync = self.read_op_ch.len() >= READ_LOG_FLUSH_POINT + should_process_logs = self.read_op_ch.len() >= READ_LOG_FLUSH_POINT || self.write_op_ch.len() >= WRITE_LOG_FLUSH_POINT; } @@ -1171,7 +1190,7 @@ where crossbeam_epoch::pin().flush(); - if should_sync { + if should_process_logs { Some(SyncPace::Fast) } else if self.write_op_ch.len() <= WRITE_LOG_LOW_WATER_MARK { Some(SyncPace::Normal) @@ -1210,12 +1229,15 @@ where #[inline] fn should_enable_frequency_sketch(&self, counters: &EvictionCounters) -> bool { - if self.frequency_sketch_enabled.load(Ordering::Acquire) { - false - } else if let Some(max_cap) = self.max_capacity { - counters.weighted_size >= max_cap / 2 - } else { - false + match self.max_capacity { + None | Some(0) => false, + Some(max_cap) => { + if self.frequency_sketch_enabled.load(Ordering::Acquire) { + false // The frequency sketch is already enabled. + } else { + counters.weighted_size >= max_cap / 2 + } + } } } From 2953d5d980b3d8816f02d8855fe2abe35945d008 Mon Sep 17 00:00:00 2001 From: Tatsuya Kawano Date: Sat, 4 Mar 2023 18:01:34 +0800 Subject: [PATCH 2/3] Do not cache anything when max capacity is zero --- CHANGELOG.md | 11 +++++++++++ src/sync_base/base_cache.rs | 4 ++++ 2 files changed, 15 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 193f986d..9f9137a8 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,15 @@ # Moka Cache — Change Log +## Version 0.10.1 + +### Changed + +- Now `sync` and `future` caches will not cache anything when the max capacity is set + to zero ([#230][gh-issue-0230]): + - Previously, they would cache some entries for short time (< 0.5 secs) even + though the max capacity is zero. + + ## Version 0.10.0 ### Breaking Changes @@ -575,6 +585,7 @@ The minimum supported Rust version (MSRV) is now 1.51.0 (2021-03-25). [gh-Swatinem]: https://github.com/Swatinem [gh-tinou98]: https://github.com/tinou98 +[gh-issue-0230]: https://github.com/moka-rs/moka/issues/230/ [gh-issue-0212]: https://github.com/moka-rs/moka/issues/212/ [gh-issue-0207]: https://github.com/moka-rs/moka/issues/207/ [gh-issue-0162]: https://github.com/moka-rs/moka/issues/162/ diff --git a/src/sync_base/base_cache.rs b/src/sync_base/base_cache.rs index fb26f07a..3eedbdc8 100644 --- a/src/sync_base/base_cache.rs +++ b/src/sync_base/base_cache.rs @@ -294,6 +294,10 @@ where R: Fn(ReadOp, Instant), I: FnMut(&V) -> bool, { + if self.is_map_disabled() { + return None; + } + let now = self.current_time_from_expiration_clock(); let maybe_entry = self From 07113f71c7a8c93ee8e7a5f9f71dbb654e060844 Mon Sep 17 00:00:00 2001 From: Tatsuya Kawano Date: Sat, 4 Mar 2023 18:12:50 +0800 Subject: [PATCH 3/3] Bump the version to v0.10.1 --- Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Cargo.toml b/Cargo.toml index d05097fe..20512a88 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "moka" -version = "0.10.0" +version = "0.10.1" edition = "2018" rust-version = "1.51"