Skip to content

Commit

Permalink
Merge pull request #238 from moka-rs/max-capacity-zero
Browse files Browse the repository at this point in the history
Do not cache anything when max capacity is zero
  • Loading branch information
tatsuya6502 authored Mar 4, 2023
2 parents 28e4b4f + 07113f7 commit 0dac6fc
Show file tree
Hide file tree
Showing 6 changed files with 126 additions and 22 deletions.
11 changes: 11 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
@@ -1,5 +1,15 @@
# Moka Cache — Change Log

## Version 0.10.1

### Changed

- Now `sync` and `future` caches will not cache anything when the max capacity is set
to zero ([#230][gh-issue-0230]):
- Previously, they would cache some entries for short time (< 0.5 secs) even
though the max capacity is zero.


## Version 0.10.0

### Breaking Changes
Expand Down Expand Up @@ -575,6 +585,7 @@ The minimum supported Rust version (MSRV) is now 1.51.0 (2021-03-25).
[gh-Swatinem]: https://github.com/Swatinem
[gh-tinou98]: https://github.com/tinou98

[gh-issue-0230]: https://github.com/moka-rs/moka/issues/230/
[gh-issue-0212]: https://github.com/moka-rs/moka/issues/212/
[gh-issue-0207]: https://github.com/moka-rs/moka/issues/207/
[gh-issue-0162]: https://github.com/moka-rs/moka/issues/162/
Expand Down
2 changes: 1 addition & 1 deletion Cargo.toml
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
[package]
name = "moka"
version = "0.10.0"
version = "0.10.1"
edition = "2018"
rust-version = "1.51"

Expand Down
26 changes: 26 additions & 0 deletions src/future/cache.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1214,6 +1214,10 @@ where
}

fn do_blocking_insert(&self, key: K, value: V) {
if self.base.is_map_disabled() {
return;
}

let hash = self.base.hash(&key);
let key = Arc::new(key);
let (op, now) = self.base.do_insert_with_hash(key, hash, value);
Expand Down Expand Up @@ -1722,6 +1726,10 @@ where
}

async fn insert_with_hash(&self, key: Arc<K>, hash: u64, value: V) {
if self.base.is_map_disabled() {
return;
}

let (op, now) = self.base.do_insert_with_hash(key, hash, value);
let hk = self.base.housekeeper.as_ref();
Self::schedule_write_op(
Expand Down Expand Up @@ -1855,6 +1863,24 @@ mod tests {
use parking_lot::Mutex;
use std::{convert::Infallible, sync::Arc, time::Duration};

#[tokio::test]
async fn max_capacity_zero() {
let mut cache = Cache::new(0);
cache.reconfigure_for_testing();

// Make the cache exterior immutable.
let cache = cache;

cache.insert(0, ()).await;

assert!(!cache.contains_key(&0));
assert!(cache.get(&0).is_none());
cache.sync();
assert!(!cache.contains_key(&0));
assert!(cache.get(&0).is_none());
assert_eq!(cache.entry_count(), 0)
}

#[tokio::test]
async fn basic_single_async_task() {
// The following `Vec`s will hold actual and expected notifications.
Expand Down
22 changes: 22 additions & 0 deletions src/sync/cache.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1602,6 +1602,10 @@ where
}

pub(crate) fn insert_with_hash(&self, key: Arc<K>, hash: u64, value: V) {
if self.base.is_map_disabled() {
return;
}

let (op, now) = self.base.do_insert_with_hash(key, hash, value);
let hk = self.base.housekeeper.as_ref();
Self::schedule_write_op(
Expand Down Expand Up @@ -1912,6 +1916,24 @@ mod tests {
use parking_lot::Mutex;
use std::{convert::Infallible, sync::Arc, time::Duration};

#[test]
fn max_capacity_zero() {
let mut cache = Cache::new(0);
cache.reconfigure_for_testing();

// Make the cache exterior immutable.
let cache = cache;

cache.insert(0, ());

assert!(!cache.contains_key(&0));
assert!(cache.get(&0).is_none());
cache.sync();
assert!(!cache.contains_key(&0));
assert!(cache.get(&0).is_none());
assert_eq!(cache.entry_count(), 0)
}

#[test]
fn basic_single_thread() {
run_test(DeliveryMode::Immediate);
Expand Down
25 changes: 22 additions & 3 deletions src/sync/segment.rs
Original file line number Diff line number Diff line change
Expand Up @@ -720,9 +720,10 @@ where

let actual_num_segments = num_segments.next_power_of_two();
let segment_shift = 64 - actual_num_segments.trailing_zeros();
// TODO: Round up.
let seg_max_capacity = max_capacity.map(|n| n / actual_num_segments as u64);
let seg_init_capacity = initial_capacity.map(|cap| cap / actual_num_segments);
let seg_max_capacity =
max_capacity.map(|n| (n as f64 / actual_num_segments as f64).ceil() as u64);
let seg_init_capacity =
initial_capacity.map(|cap| (cap as f64 / actual_num_segments as f64).ceil() as usize);
// NOTE: We cannot initialize the segments as `vec![cache; actual_num_segments]`
// because Cache::clone() does not clone its inner but shares the same inner.
let segments = (0..actual_num_segments)
Expand Down Expand Up @@ -789,6 +790,24 @@ mod tests {
use parking_lot::Mutex;
use std::{sync::Arc, time::Duration};

#[test]
fn max_capacity_zero() {
let mut cache = SegmentedCache::new(0, 1);
cache.reconfigure_for_testing();

// Make the cache exterior immutable.
let cache = cache;

cache.insert(0, ());

assert!(!cache.contains_key(&0));
assert!(cache.get(&0).is_none());
cache.sync();
assert!(!cache.contains_key(&0));
assert!(cache.get(&0).is_none());
assert_eq!(cache.entry_count(), 0)
}

#[test]
fn basic_single_thread() {
run_test(DeliveryMode::Immediate);
Expand Down
62 changes: 44 additions & 18 deletions src/sync_base/base_cache.rs
Original file line number Diff line number Diff line change
Expand Up @@ -102,6 +102,10 @@ impl<K, V, S> BaseCache<K, V, S> {
self.inner.weighted_size()
}

pub(crate) fn is_map_disabled(&self) -> bool {
self.inner.max_capacity == Some(0)
}

#[inline]
pub(crate) fn is_removal_notifier_enabled(&self) -> bool {
self.inner.is_removal_notifier_enabled()
Expand Down Expand Up @@ -163,8 +167,15 @@ where
invalidator_enabled: bool,
housekeeper_conf: housekeeper::Configuration,
) -> Self {
let (r_snd, r_rcv) = crossbeam_channel::bounded(READ_LOG_SIZE);
let (w_snd, w_rcv) = crossbeam_channel::bounded(WRITE_LOG_SIZE);
let (r_size, w_size) = if max_capacity == Some(0) {
(0, 0)
} else {
(READ_LOG_SIZE, WRITE_LOG_SIZE)
};

let (r_snd, r_rcv) = crossbeam_channel::bounded(r_size);
let (w_snd, w_rcv) = crossbeam_channel::bounded(w_size);

let inner = Arc::new(Inner::new(
name,
max_capacity,
Expand Down Expand Up @@ -283,6 +294,10 @@ where
R: Fn(ReadOp<K, V>, Instant),
I: FnMut(&V) -> bool,
{
if self.is_map_disabled() {
return None;
}

let now = self.current_time_from_expiration_clock();

let maybe_entry = self
Expand Down Expand Up @@ -905,12 +920,16 @@ where
time_to_idle: Option<Duration>,
invalidator_enabled: bool,
) -> Self {
let initial_capacity = initial_capacity
.map(|cap| cap + WRITE_LOG_SIZE)
.unwrap_or_default();
const NUM_SEGMENTS: usize = 64;
let (num_segments, initial_capacity) = if max_capacity == Some(0) {
(1, 0)
} else {
let ic = initial_capacity
.map(|cap| cap + WRITE_LOG_SIZE)
.unwrap_or_default();
(64, ic)
};
let cache = crate::cht::SegmentedHashMap::with_num_segments_capacity_and_hasher(
NUM_SEGMENTS,
num_segments,
initial_capacity,
build_hasher.clone(),
);
Expand Down Expand Up @@ -1100,16 +1119,20 @@ where
S: BuildHasher + Clone + Send + Sync + 'static,
{
fn sync(&self, max_repeats: usize) -> Option<SyncPace> {
if self.max_capacity == Some(0) {
return None;
}

let mut deqs = self.deques.lock();
let mut calls = 0;
let mut should_sync = true;

let current_ec = self.entry_count.load();
let current_ws = self.weighted_size.load();
let mut eviction_state =
EvictionState::new(current_ec, current_ws, self.removal_notifier.as_ref());

while should_sync && calls <= max_repeats {
let mut should_process_logs = true;

while should_process_logs && calls <= max_repeats {
let r_len = self.read_op_ch.len();
if r_len > 0 {
self.apply_reads(&mut deqs, r_len);
Expand All @@ -1125,7 +1148,7 @@ where
}

calls += 1;
should_sync = self.read_op_ch.len() >= READ_LOG_FLUSH_POINT
should_process_logs = self.read_op_ch.len() >= READ_LOG_FLUSH_POINT
|| self.write_op_ch.len() >= WRITE_LOG_FLUSH_POINT;
}

Expand Down Expand Up @@ -1171,7 +1194,7 @@ where

crossbeam_epoch::pin().flush();

if should_sync {
if should_process_logs {
Some(SyncPace::Fast)
} else if self.write_op_ch.len() <= WRITE_LOG_LOW_WATER_MARK {
Some(SyncPace::Normal)
Expand Down Expand Up @@ -1210,12 +1233,15 @@ where

#[inline]
fn should_enable_frequency_sketch(&self, counters: &EvictionCounters) -> bool {
if self.frequency_sketch_enabled.load(Ordering::Acquire) {
false
} else if let Some(max_cap) = self.max_capacity {
counters.weighted_size >= max_cap / 2
} else {
false
match self.max_capacity {
None | Some(0) => false,
Some(max_cap) => {
if self.frequency_sketch_enabled.load(Ordering::Acquire) {
false // The frequency sketch is already enabled.
} else {
counters.weighted_size >= max_cap / 2
}
}
}
}

Expand Down

0 comments on commit 0dac6fc

Please sign in to comment.