diff --git a/.vscode/settings.json b/.vscode/settings.json index 0a88a2ee..9d1719f1 100644 --- a/.vscode/settings.json +++ b/.vscode/settings.json @@ -33,6 +33,7 @@ "Hasher", "Kawano", "mapref", + "Miri", "Moka", "mpsc", "MSRV", diff --git a/CHANGELOG.md b/CHANGELOG.md index c3c548e7..161e8541 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,31 @@ # Moka Cache — Change Log +## Version 0.12.0 (Currently Beta) + +**IMPORTANT**: This release has major breaking changes. + +- `future::Cache` + - The thread pool was removed from `future::Cache`. It no longer spawns + background threads. + - The `notification::DeliveryMode` for eviction listener was changed from + `Queued` to `Immediate`. + - To support these changes, some of the APIs were changed. Please see the + [MIGRATION-GUIDE.md](./MIGRATION-GUIDE.md#migrating-to-v0120-from-a-prior-version) + for more details. +- `sync::Cache` and `sync::SegmentedCache` + - As of 0.12.0-beta.1, no breaking changes have been made to these caches. + - However, the future beta releases will have the following changes: + - (Not in 0.12.0-beta.1) `sync` caches will be no longer enabled by default. + Use a crate feature `sync` to enable it. + - (Not in 0.12.0-beta.1) The thread pool will be disabled by default. + +### Changed + +- Remove the thread pool from `future::Cache`. ([#294][gh-pull-0294]) +- Add support for `Immediate` notification delivery mode to future cache. + ([#228][gh-issue-0228]) + + ## Version 0.11.3 ### Fixed @@ -671,6 +697,7 @@ The minimum supported Rust version (MSRV) is now 1.51.0 (Mar 25, 2021). [gh-issue-0243]: https://github.com/moka-rs/moka/issues/243/ [gh-issue-0242]: https://github.com/moka-rs/moka/issues/242/ [gh-issue-0230]: https://github.com/moka-rs/moka/issues/230/ +[gh-issue-0228]: https://github.com/moka-rs/moka/issues/228/ [gh-issue-0212]: https://github.com/moka-rs/moka/issues/212/ [gh-issue-0207]: https://github.com/moka-rs/moka/issues/207/ [gh-issue-0162]: https://github.com/moka-rs/moka/issues/162/ @@ -686,6 +713,7 @@ The minimum supported Rust version (MSRV) is now 1.51.0 (Mar 25, 2021). [gh-issue-0031]: https://github.com/moka-rs/moka/issues/31/ [gh-pull-0295]: https://github.com/moka-rs/moka/pull/295/ +[gh-pull-0294]: https://github.com/moka-rs/moka/pull/294/ [gh-pull-0277]: https://github.com/moka-rs/moka/pull/277/ [gh-pull-0275]: https://github.com/moka-rs/moka/pull/275/ [gh-pull-0272]: https://github.com/moka-rs/moka/pull/272/ diff --git a/Cargo.toml b/Cargo.toml index cc4617ec..4cbec117 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "moka" -version = "0.11.3" +version = "0.12.0-beta.1" edition = "2018" # Rust 1.65 was released on Nov 3, 2022. rust-version = "1.65" @@ -20,10 +20,10 @@ default = ["sync", "atomic64", "quanta"] # This feature is enabled by default. Disable it when you do not need # `moka::sync::{Cache, SegmentedCache}` -sync = ["_core"] +sync = ["scheduled-thread-pool"] # Enable this feature to use `moka::future::Cache`. -future = ["_core", "async-io", "async-lock", "futures-util"] +future = ["async-lock", "async-trait", "futures-util"] # Enable this feature to activate optional logging from caches. # Currently cache will emit log only when it encounters a panic in user provided @@ -46,52 +46,37 @@ js = ["uuid/js"] # performance impacts and is intended for debugging purpose. unstable-debug-counters = ["future"] -# A feature used internally. -_core = [ - "crossbeam-channel", - "crossbeam-epoch", - "crossbeam-utils", - "once_cell", - "parking_lot", - "scheduled-thread-pool", - "smallvec", - "tagptr", - "thiserror", - "triomphe", - "uuid", -] - [dependencies] - -# The "_core" dependencies used by "sync" and "future" features. -crossbeam-channel = { version = "0.5.5", optional = true } -crossbeam-utils = { version = "0.8", optional = true } -once_cell = { version = "1.7", optional = true } -parking_lot = { version = "0.12", optional = true } -scheduled-thread-pool = { version = "0.2.7", optional = true } -smallvec = { version = "1.8", optional = true } -tagptr = { version = "0.2", optional = true } +crossbeam-channel = { version = "0.5.5" } +crossbeam-epoch = { version = "0.9.9" } +crossbeam-utils = { version = "0.8" } +once_cell = { version = "1.7" } +parking_lot = { version = "0.12" } +smallvec = { version = "1.8" } +tagptr = { version = "0.2" } +thiserror = { version = "1.0" } +uuid = { version = "1.1", features = ["v4"] } # Opt-out serde and stable_deref_trait features # https://github.com/Manishearth/triomphe/pull/5 -triomphe = { version = "0.1.3", default-features = false, optional = true } +triomphe = { version = "0.1.3", default-features = false } # Optional dependencies (enabled by default) -crossbeam-epoch = { version = "0.9.9", optional = true } quanta = { version = "0.11.0", optional = true } -thiserror = { version = "1.0", optional = true } -uuid = { version = "1.1", features = ["v4"], optional = true } + +# Optional dependencies (sync) +scheduled-thread-pool = { version = "0.2.7", optional = true } # Optional dependencies (future) -async-io = { version = "1.4", optional = true } async-lock = { version = "2.4", optional = true } -futures-util = { version = "0.3", optional = true } +async-trait = { version = "0.1.58", optional = true } +futures-util = { version = "0.3.17", optional = true } # Optional dependencies (logging) log = { version = "0.4", optional = true } [dev-dependencies] -actix-rt = { version = "2.7", default-features = false } +actix-rt = "2.8" ahash = "0.8.3" anyhow = "1.0.19" async-std = { version = "1.11", features = ["attributes"] } diff --git a/MIGRATION-GUIDE.md b/MIGRATION-GUIDE.md new file mode 100644 index 00000000..0a7d09a5 --- /dev/null +++ b/MIGRATION-GUIDE.md @@ -0,0 +1,252 @@ +# Moka Cache — Migration Guide + +## Migrating to v0.12 from a prior version + +v0.12.0 has major breaking changes on the API and internal behavior. This section +describes the code changes required to migrate to v0.12.0. + +### `future::Cache` + +- The thread pool was removed from `future::Cache`. The background threads are no + longer spawned. +- The `notification::DeliveryMode` for eviction listener was changed from `Queued` to + `Immediate`. + +To support these changes, the following API changes were made: + +1. `future::Cache::get` method is now `async fn`, so you must `await` for the result. +2. `future::Cache::blocking` method was removed. + - Please use async runtime's blocking API instead. + - See [Replacing the blocking API](#replacing-the-blocking-api) for more details. +3. Now `or_insert_with_if` method of the entry API requires `Send` bound for the + `replace_if` closure. +4. `eviction_listener_with_queued_delivery_mode` method of `future::CacheBuilder` was + removed. + - Please use one of the new methods `eviction_listener` or + `async_eviction_listener` instead. + - See [Updating the eviction listener](#updating-the-eviction-listener) for more + details. +5. `future::ConcurrentCacheExt::sync` method was renamed to + `future::Cache::run_pending_tasks`. It was also changed to `async fn`. + +The following internal behavior changes were made: + +1. Maintenance tasks such as removing expired entries are not executed periodically + anymore. + - See [Maintenance tasks](#maintenance-tasks) for more details. +2. Now `future::Cache` only supports `Immediate` delivery mode for eviction listener. + - In older versions, only `Queued` delivery mode was supported. + - If you need `Queued` delivery mode back, please file an issue. + +#### Replacing the blocking API + +`future::Cache::blocking` method was removed. Please use async runtime's blocking API +instead. + +**Tokio** + +1. Call `tokio::runtime::Handle::current()` in async context to obtain a handle to + the current Tokio runtime. +2. From outside async context, call cache's async function using `block_on` method of + the runtime. + +```rust +use std::sync::Arc; + +#[tokio::main] +async fn main() { + // Create a future cache. + let cache = Arc::new(moka::future::Cache::new(100)); + + // In async context, you can obtain a handle to the current Tokio runtime. + let rt = tokio::runtime::Handle::current(); + + // Spawn an OS thread. Pass the handle and cache. + let thread = { + let cache = Arc::clone(&cache); + + std::thread::spawn(move || { + // Call async function using block_on method of Tokio runtime. + rt.block_on(cache.insert(0, 'a')); + }) + }; + + // Wait for the threads to complete. + thread.join().unwrap(); + + // Check the result. + assert_eq!(cache.get(&0).await, Some('a')); +} +``` + +**async-std** + +- From outside async context, call cache's async function using + `async_std::task::block_on` method. + +```rust +use std::sync::Arc; + +#[async_std::main] +async fn main() { + // Create a future cache. + let cache = Arc::new(moka::future::Cache::new(100)); + + // Spawn an OS thread. Pass the cache. + let thread = { + let cache = Arc::clone(&cache); + + std::thread::spawn(move || { + use async_std::task::block_on; + + // Call async function using block_on method of async_std. + block_on(cache.insert(0, 'a')); + }) + }; + + // Wait for the threads to complete. + thread.join().unwrap(); + + // Check the result. + assert_eq!(cache.get(&0).await, Some('a')); +} +``` + +#### Updating the eviction listener + +- The `notification::DeliveryMode` for eviction listener was changed from `Queued` to + `Immediate`. +- `eviction_listener_with_queued_delivery_mode` method of `future::CacheBuilder` was + removed. Please use one of the new methods `eviction_listener` or + `async_eviction_listener` instead. + +##### `eviction_listener` method + +`eviction_listener` takes the same closure as the old method. If you do not need to +`.await` anything in the eviction listener, use this method. + +This code snippet is borrowed from [an example][listener-ex1] in the document of +`future::Cache`: + +```rust +let eviction_listener = |key, _value, cause| { + println!("Evicted key {key}. Cause: {cause:?}"); +}; + +let cache = Cache::builder() + .max_capacity(100) + .expire_after(expiry) + .eviction_listener(eviction_listener) + .build(); +``` + +[listener-ex1]: https://docs.rs/moka/latest/moka/future/struct.Cache.html#per-entry-expiration-policy + +##### `async_eviction_listener` method + +`async_eviction_listener` takes a closure that returns a `Future`. If you need to +`.await` something in the eviction listener, use this method. The actual return type +of the closure is `future::ListenerFuture`, which is a type alias of +`Pin + Send>>`. You can use the `boxed` method of +`future::FutureExt` trait to convert a regular `Future` into this type. + +This code snippet is borrowed from [an example][listener-ex2] in the document of +`future::Cache`: + +```rust +use moka::notification::ListenerFuture; +// FutureExt trait provides the boxed method. +use moka::future::FutureExt; + +let eviction_listener = move |k, v: PathBuf, cause| -> ListenerFuture { + println!( + "\n== An entry has been evicted. k: {:?}, v: {:?}, cause: {:?}", + k, v, cause + ); + let file_mgr2 = Arc::clone(&file_mgr1); + + // Create a Future that removes the data file at the path `v`. + async move { + // Acquire the write lock of the DataFileManager. + let mut mgr = file_mgr2.write().await; + // Remove the data file. We must handle error cases here to + // prevent the listener from panicking. + if let Err(_e) = mgr.remove_data_file(v.as_path()).await { + eprintln!("Failed to remove a data file at {:?}", v); + } + } + // Convert the regular Future into ListenerFuture. This method is + // provided by moka::future::FutureExt trait. + .boxed() +}; + +// Create the cache. Set time to live for two seconds and set the +// eviction listener. +let cache = Cache::builder() + .max_capacity(100) + .time_to_live(Duration::from_secs(2)) + .async_eviction_listener(eviction_listener) + .build(); +``` + +[listener-ex2]: https://docs.rs/moka/latest/moka/future/struct.Cache.html#example-eviction-listener + +#### Maintenance tasks + +In older versions, the maintenance tasks needed by the cache were periodically +executed in background by a global thread pool managed by `moka`. Now `future::Cache` +does not use the thread pool anymore, so those maintenance tasks are executed +_sometimes_ in foreground when certain cache methods (`get`, `get_with`, `insert`, +etc.) are called by user code. + +![The lifecycle of cached entries](https://github.com/moka-rs/moka/wiki/images/benchmarks/moka-tiny-lfu.png) + +Figure 1. The lifecycle of cached entries + +These maintenance tasks include: + +1. Determine whether to admit a "temporary admitted" entry or not. +2. Apply the recording of cache reads and writes to the internal data structures, + such as LFU filter, LRU queues, and timer wheels. +3. When cache's max capacity is exceeded, select existing entries to evict and remove + them from cache. +4. Remove expired entries. +5. Remove entries that have been invalidated by `invalidate_all` or + `invalidate_entries_if` methods. +6. Deliver removal notifications to the eviction listener. (Call the eviction + listener closure with the information about evicted entry) + +They will be executed in the following cache methods when necessary: + +- All cache write methods: `insert`, `get_with`, `invalidate`, etc. +- Some of the cache read methods: `get` +- `run_pending_tasks` method, which executes the pending maintenance tasks + explicitly. + +Although expired entries will not be removed until the pending maintenance tasks are +executed, they will not be returned by cache read methods such as `get`, `get_with` +and `contains_key`. So unless you need to remove expired entries immediately (e.g. to +free some memory), you do not need to call `run_pending_tasks` method. + +### `sync::Cache` and `sync::SegmentedCache` + +1. (Not in v0.12.0-beta.1) `sync` caches will be no longer enabled by default. Use a + crate feature `sync` to enable it. +2. (Not in v0.12.0-beta.1) The thread pool will be disabled by default. + - In older versions, the thread pool was used to execute maintenance tasks in + background. + - When disabled: + - those maintenance tasks are executed _sometimes_ in foreground when certain + cache methods (`get`, `get_with`, `insert`, etc.) are called by user code + - See [Maintenance tasks](#maintenance-tasks) for more details. + - To enable it, see [Enabling the thread pool](#enabling-the-thread-pool) for more + details. + + +#### Enabling the thread pool + +To enable the thread pool, do the followings: + +- Specify a crate feature `thread-pool`. +- At the cache creation time, call the `thread_pool_enabled` method of + `CacheBuilder`. diff --git a/README.md b/README.md index bd4bd143..ac5c9633 100644 --- a/README.md +++ b/README.md @@ -42,8 +42,7 @@ high level of concurrency for concurrent access. - Thread-safe, highly concurrent in-memory cache implementations: - Synchronous caches that can be shared across OS threads. - - An asynchronous (futures aware) cache that can be accessed inside and outside - of asynchronous contexts. + - An asynchronous (futures aware) cache. - A cache can be bounded by one of the followings: - The maximum number of entries. - The total weighted size of entries. (Size aware eviction) @@ -67,7 +66,7 @@ and can be overkill for your use case. Sometimes simpler caches like The following table shows the trade-offs between the different cache implementations: -| Feature | Moka v0.11 | Mini Moka v0.10 | Quick Cache v0.3 | +| Feature | Moka v0.12 | Mini Moka v0.10 | Quick Cache v0.3 | |:------- |:---- |:--------- |:----------- | | Thread-safe, sync cache | ✅ | ✅ | ✅ | | Thread-safe, async cache | ✅ | ❌ | ✅ | @@ -82,10 +81,10 @@ The following table shows the trade-offs between the different cache implementat | Lock-free, concurrent iterator | ✅ | ❌ | ❌ | | Lock-per-shard, concurrent iterator | ❌ | ✅ | ❌ | -| Performance, etc. | Moka v0.11 | Mini Moka v0.10 | Quick Cache v0.3 | +| Performance, etc. | Moka v0.12 | Mini Moka v0.10 | Quick Cache v0.3 | |:------- |:---- |:--------- |:----------- | | Small overhead compared to a concurrent hash table | ❌ | ❌ | ✅ | -| Does not use background threads | ❌ Will be removed from v0.12 or v0.13 | ✅ | ✅ | +| Does not use background threads | ❌ → ✅ Removed from v0.12 | ✅ | ✅ | | Small dependency tree | ❌ | ✅ | ✅ | [tiny-lfu]: https://github.com/moka-rs/moka/wiki#admission-and-eviction-policies @@ -154,14 +153,14 @@ Add this to your `Cargo.toml`: ```toml [dependencies] -moka = "0.11" +moka = "0.12" ``` To use the asynchronous cache, enable a crate feature called "future". ```toml [dependencies] -moka = { version = "0.11", features = ["future"] } +moka = { version = "0.12", features = ["future"] } ``` @@ -270,7 +269,7 @@ Here is a similar program to the previous example, but using asynchronous cache // Cargo.toml // // [dependencies] -// moka = { version = "0.11", features = ["future"] } +// moka = { version = "0.12", features = ["future"] } // tokio = { version = "1", features = ["rt-multi-thread", "macros" ] } // futures-util = "0.3" @@ -304,7 +303,7 @@ async fn main() { // insert() is an async method, so await it. my_cache.insert(key, value(key)).await; // get() returns Option, a clone of the stored value. - assert_eq!(my_cache.get(&key), Some(value(key))); + assert_eq!(my_cache.get(&key).await, Some(value(key))); } // Invalidate every 4 element of the inserted entries. @@ -322,9 +321,9 @@ async fn main() { // Verify the result. for key in 0..(NUM_TASKS * NUM_KEYS_PER_TASK) { if key % 4 == 0 { - assert_eq!(cache.get(&key), None); + assert_eq!(cache.get(&key).await, None); } else { - assert_eq!(cache.get(&key), Some(value(key))); + assert_eq!(cache.get(&key).await, Some(value(key))); } } } @@ -482,9 +481,9 @@ to the dependency declaration. ```toml:Cargo.toml [dependencies] -moka = { version = "0.11", default-features = false, features = ["sync"] } +moka = { version = "0.12", default-features = false, features = ["sync"] } # Or -moka = { version = "0.11", default-features = false, features = ["future"] } +moka = { version = "0.12", default-features = false, features = ["future"] } ``` This will make Moka to switch to a fall-back implementation, so it will compile. @@ -529,12 +528,15 @@ $ cargo +nightly -Z unstable-options --config 'build.rustdocflags="--cfg docsrs" - [x] Variable (per-entry) expiration, using a hierarchical timer wheel. (`v0.11.0` via [#248][gh-pull-248]) - [ ] Cache statistics. (Hit rate, etc.) +- [x] Remove background threads. (`v0.12.0` via [#294][gh-pull-294] and [#???][gh-pull-qqq]) - [ ] Upgrade TinyLFU to Window-TinyLFU. ([details][tiny-lfu]) [gh-pull-024]: https://github.com/moka-rs/moka/pull/24 [gh-pull-105]: https://github.com/moka-rs/moka/pull/105 [gh-pull-145]: https://github.com/moka-rs/moka/pull/145 [gh-pull-248]: https://github.com/moka-rs/moka/pull/248 +[gh-pull-294]: https://github.com/moka-rs/moka/pull/294 +[gh-pull-qqq]: https://github.com/moka-rs/moka/pull/qqq ## About the Name diff --git a/examples/async_example.rs b/examples/async_example.rs index c8dd1baa..e4e2f210 100644 --- a/examples/async_example.rs +++ b/examples/async_example.rs @@ -28,7 +28,7 @@ async fn main() { // insert() is an async method, so await it. my_cache.insert(key, value(key)).await; // get() returns Option, a clone of the stored value. - assert_eq!(my_cache.get(&key), Some(value(key))); + assert_eq!(my_cache.get(&key).await, Some(value(key))); } // Invalidate every 4 element of the inserted entries. @@ -46,9 +46,9 @@ async fn main() { // Verify the result. for key in 0..(NUM_TASKS * NUM_KEYS_PER_TASK) { if key % 4 == 0 { - assert_eq!(cache.get(&key), None); + assert_eq!(cache.get(&key).await, None); } else { - assert_eq!(cache.get(&key), Some(value(key))); + assert_eq!(cache.get(&key).await, Some(value(key))); } } } diff --git a/src/cht.rs b/src/cht.rs index 665123ea..073b410c 100644 --- a/src/cht.rs +++ b/src/cht.rs @@ -74,6 +74,9 @@ pub(crate) mod map; pub(crate) mod segment; +#[cfg(feature = "future")] +pub(crate) mod iter; + #[cfg(test)] #[macro_use] pub(crate) mod test_util; diff --git a/src/cht/iter.rs b/src/cht/iter.rs new file mode 100644 index 00000000..c3ae46a1 --- /dev/null +++ b/src/cht/iter.rs @@ -0,0 +1,89 @@ +use std::hash::Hash; + +pub(crate) trait ScanningGet +where + K: Clone, + V: Clone, +{ + /// Returns a _clone_ of the value corresponding to the key. + fn scanning_get(&self, key: &K) -> Option; + + /// Returns a vec of keys in a specified segment of the concurrent hash table. + fn keys(&self, cht_segment: usize) -> Option>; +} + +pub(crate) struct Iter<'i, K, V> { + keys: Option>, + map: &'i dyn ScanningGet, + num_segments: usize, + seg_index: usize, + is_done: bool, +} + +impl<'i, K, V> Iter<'i, K, V> { + pub(crate) fn with_single_cache_segment( + map: &'i dyn ScanningGet, + num_segments: usize, + ) -> Self { + Self { + keys: None, + map, + num_segments, + seg_index: 0, + is_done: false, + } + } +} + +impl<'i, K, V> Iterator for Iter<'i, K, V> +where + K: Eq + Hash + Clone + Send + Sync + 'static, + V: Clone + Send + Sync + 'static, +{ + type Item = (K, V); + + fn next(&mut self) -> Option { + if self.is_done { + return None; + } + + while let Some(key) = self.next_key() { + if let Some(v) = self.map.scanning_get(&key) { + return Some((key, v)); + } + } + + self.is_done = true; + None + } +} + +impl<'i, K, V> Iter<'i, K, V> +where + K: Eq + Hash + Clone + Send + Sync + 'static, + V: Clone + Send + Sync + 'static, +{ + fn next_key(&mut self) -> Option { + while let Some(keys) = self.current_keys() { + if let key @ Some(_) = keys.pop() { + return key; + } + } + None + } + + fn current_keys(&mut self) -> Option<&mut Vec> { + // If keys is none or some but empty, try to get next keys. + while self.keys.as_ref().map_or(true, Vec::is_empty) { + // Adjust indices. + if self.seg_index >= self.num_segments { + return None; + } + + self.keys = self.map.keys(self.seg_index); + self.seg_index += 1; + } + + self.keys.as_mut() + } +} diff --git a/src/cht/segment.rs b/src/cht/segment.rs index 84074791..a6cbabed 100644 --- a/src/cht/segment.rs +++ b/src/cht/segment.rs @@ -35,6 +35,9 @@ use crate::cht::map::{ DefaultHashBuilder, }; +#[cfg(feature = "future")] +use super::iter::{Iter, ScanningGet}; + use std::{ borrow::Borrow, hash::{BuildHasher, Hash}, @@ -203,7 +206,7 @@ impl HashMap { /// /// This method on its own is safe, but other threads can add or remove /// elements at any time. - #[cfg(test)] + #[cfg(any(test, feature = "future"))] pub(crate) fn len(&self) -> usize { self.len.load(Ordering::Relaxed) } @@ -214,7 +217,7 @@ impl HashMap { /// /// This method on its own is safe, but other threads can add or remove /// elements at any time. - #[cfg(test)] + #[cfg(any(test, feature = "future"))] pub(crate) fn is_empty(&self) -> bool { self.len() == 0 } @@ -249,6 +252,13 @@ impl HashMap { } impl HashMap { + #[cfg(feature = "future")] + #[inline] + pub(crate) fn contains_key(&self, hash: u64, eq: impl FnMut(&K) -> bool) -> bool { + self.get_key_value_and_then(hash, eq, |_, _| Some(())) + .is_some() + } + /// Returns a clone of the value corresponding to the key. #[inline] pub(crate) fn get(&self, hash: u64, eq: impl FnMut(&K) -> bool) -> Option @@ -289,7 +299,7 @@ impl HashMap { /// /// If the map did have this key present, both the key and value are /// updated. - #[cfg(test)] + #[cfg(any(test, feature = "future"))] #[inline] pub fn insert_entry_and( &self, @@ -484,6 +494,15 @@ impl HashMap { Some(bucket_array_ref.keys(with_key)) } + #[cfg(feature = "future")] + pub(crate) fn iter(&self) -> Iter<'_, K, V> + where + K: Clone, + V: Clone, + { + Iter::with_single_cache_segment(self, self.actual_num_segments()) + } + #[inline] pub(crate) fn hash(&self, key: &Q) -> u64 where @@ -494,6 +513,23 @@ impl HashMap { } } +#[cfg(feature = "future")] +impl ScanningGet for HashMap +where + K: Hash + Eq + Clone, + V: Clone, + S: BuildHasher, +{ + fn scanning_get(&self, key: &K) -> Option { + let hash = self.hash(key); + self.get_key_value_and_then(hash, |k| k == key, |_k, v| Some(v.clone())) + } + + fn keys(&self, cht_segment: usize) -> Option> { + self.keys(cht_segment, Clone::clone) + } +} + impl Drop for HashMap { fn drop(&mut self) { // Important: Since we are using a dummy guard returned by `unprotected`, diff --git a/src/common.rs b/src/common.rs index eb9bf77d..9cbecdaf 100644 --- a/src/common.rs +++ b/src/common.rs @@ -35,6 +35,18 @@ impl From for CacheRegion { } } +#[cfg(feature = "future")] +impl CacheRegion { + pub(crate) fn name(&self) -> &'static str { + match self { + Self::Window => "window", + Self::MainProbation => "main probation", + Self::MainProtected => "main protected", + Self::Other => "other", + } + } +} + impl PartialEq for CacheRegion { fn eq(&self, other: &Self) -> bool { core::mem::discriminant(self) == core::mem::discriminant(other) @@ -52,6 +64,7 @@ pub(crate) fn sketch_capacity(max_capacity: u64) -> u32 { max_capacity.try_into().unwrap_or(u32::MAX).max(128) } +#[cfg(any(test, feature = "sync"))] pub(crate) fn available_parallelism() -> usize { use std::{num::NonZeroUsize, thread::available_parallelism}; available_parallelism().map(NonZeroUsize::get).unwrap_or(1) diff --git a/src/common/builder_utils.rs b/src/common/builder_utils.rs index bd882404..20bc1f72 100644 --- a/src/common/builder_utils.rs +++ b/src/common/builder_utils.rs @@ -1,5 +1,6 @@ use std::time::Duration; +#[cfg(feature = "sync")] use super::concurrent::housekeeper; const YEAR_SECONDS: u64 = 365 * 24 * 3600; @@ -17,6 +18,7 @@ pub(crate) fn ensure_expirations_or_panic( } } +#[cfg(feature = "sync")] pub(crate) fn housekeeper_conf(thread_pool_enabled: bool) -> housekeeper::Configuration { if thread_pool_enabled { housekeeper::Configuration::new_thread_pool(true) diff --git a/src/common/concurrent.rs b/src/common/concurrent.rs index 66862efe..96ca2193 100644 --- a/src/common/concurrent.rs +++ b/src/common/concurrent.rs @@ -8,8 +8,14 @@ use triomphe::Arc as TrioArc; pub(crate) mod constants; pub(crate) mod deques; pub(crate) mod entry_info; + +#[cfg(feature = "sync")] pub(crate) mod housekeeper; + +#[cfg(feature = "sync")] pub(crate) mod thread_pool; + +#[cfg(feature = "sync")] pub(crate) mod unsafe_weak_pointer; // target_has_atomic is more convenient but yet unstable (Rust 1.55) diff --git a/src/common/concurrent/constants.rs b/src/common/concurrent/constants.rs index e36b1e24..bb6dbc68 100644 --- a/src/common/concurrent/constants.rs +++ b/src/common/concurrent/constants.rs @@ -1,14 +1,20 @@ pub(crate) const MAX_SYNC_REPEATS: usize = 4; pub(crate) const PERIODICAL_SYNC_INITIAL_DELAY_MILLIS: u64 = 500; -pub(crate) const PERIODICAL_SYNC_NORMAL_PACE_MILLIS: u64 = 300; -pub(crate) const PERIODICAL_SYNC_FAST_PACE_NANOS: u64 = 500; pub(crate) const READ_LOG_FLUSH_POINT: usize = 512; pub(crate) const READ_LOG_SIZE: usize = READ_LOG_FLUSH_POINT * (MAX_SYNC_REPEATS + 2); pub(crate) const WRITE_LOG_FLUSH_POINT: usize = 512; -pub(crate) const WRITE_LOG_LOW_WATER_MARK: usize = WRITE_LOG_FLUSH_POINT / 2; -// pub(crate) const WRITE_LOG_HIGH_WATER_MARK: usize = WRITE_LOG_FLUSH_POINT * (MAX_SYNC_REPEATS - 1); pub(crate) const WRITE_LOG_SIZE: usize = WRITE_LOG_FLUSH_POINT * (MAX_SYNC_REPEATS + 2); +#[cfg(feature = "sync")] +pub(crate) const WRITE_LOG_LOW_WATER_MARK: usize = WRITE_LOG_FLUSH_POINT / 2; + +#[cfg(feature = "sync")] pub(crate) const WRITE_RETRY_INTERVAL_MICROS: u64 = 50; + +#[cfg(feature = "sync")] +pub(crate) const PERIODICAL_SYNC_NORMAL_PACE_MILLIS: u64 = 300; + +#[cfg(feature = "sync")] +pub(crate) const PERIODICAL_SYNC_FAST_PACE_NANOS: u64 = 500; diff --git a/src/common/concurrent/deques.rs b/src/common/concurrent/deques.rs index d90ce48e..efabc798 100644 --- a/src/common/concurrent/deques.rs +++ b/src/common/concurrent/deques.rs @@ -34,6 +34,19 @@ impl Default for Deques { } impl Deques { + #[cfg(feature = "future")] + pub(crate) fn select_mut( + &mut self, + selector: CacheRegion, + ) -> (&mut Deque>, &mut Deque>) { + match selector { + CacheRegion::Window => (&mut self.window, &mut self.write_order), + CacheRegion::MainProbation => (&mut self.probation, &mut self.write_order), + CacheRegion::MainProtected => (&mut self.protected, &mut self.write_order), + _ => unreachable!(), + } + } + pub(crate) fn push_back_ao( &mut self, region: CacheRegion, diff --git a/src/common/concurrent/entry_info.rs b/src/common/concurrent/entry_info.rs index 60460203..6ffce412 100644 --- a/src/common/concurrent/entry_info.rs +++ b/src/common/concurrent/entry_info.rs @@ -140,8 +140,10 @@ mod test { use TargetArch::*; + #[allow(clippy::option_env_unwrap)] // e.g. "1.64" - let ver = option_env!("RUSTC_SEMVER").expect("RUSTC_SEMVER env var not set"); + let ver = + option_env!("RUSTC_SEMVER").expect("RUSTC_SEMVER env var was not set at compile time"); let is_quanta_enabled = cfg!(feature = "quanta"); let arch = if cfg!(target_os = "linux") { if cfg!(target_pointer_width = "64") { diff --git a/src/common/concurrent/housekeeper.rs b/src/common/concurrent/housekeeper.rs index b90dc540..d9daf87a 100644 --- a/src/common/concurrent/housekeeper.rs +++ b/src/common/concurrent/housekeeper.rs @@ -126,7 +126,7 @@ impl BlockingHousekeeper { #[inline] fn should_apply(&self, ch_len: usize, ch_flush_point: usize, now: Instant) -> bool { - ch_len >= ch_flush_point || self.sync_after.instant().unwrap() >= now + ch_len >= ch_flush_point || now >= self.sync_after.instant().unwrap() } fn try_sync(&self, cache: &T) -> bool { @@ -134,8 +134,8 @@ impl BlockingHousekeeper { match self.is_sync_running.compare_exchange( false, true, + Ordering::AcqRel, Ordering::Acquire, - Ordering::Relaxed, ) { Ok(_) => { let now = cache.now(); @@ -298,8 +298,8 @@ where match self.on_demand_sync_scheduled.compare_exchange( false, true, + Ordering::AcqRel, Ordering::Acquire, - Ordering::Relaxed, ) { Ok(_) => { let unsafe_weak_ptr = Arc::clone(&self.inner); diff --git a/src/future.rs b/src/future.rs index f0625c2e..b12a08e2 100644 --- a/src/future.rs +++ b/src/future.rs @@ -3,16 +3,24 @@ //! //! To use this module, enable a crate feature called "future". -use std::{hash::Hash, sync::Arc}; +use async_lock::Mutex; +use futures_util::future::BoxFuture; +use once_cell::sync::Lazy; +use std::{future::Future, hash::Hash, sync::Arc}; +mod base_cache; mod builder; mod cache; mod entry_selector; +mod housekeeper; +mod invalidator; +mod key_lock; +mod notifier; mod value_initializer; pub use { builder::CacheBuilder, - cache::{BlockingOp, Cache}, + cache::Cache, entry_selector::{OwnedKeyEntrySelector, RefKeyEntrySelector}, }; @@ -24,9 +32,25 @@ pub use { /// [invalidate-if]: ./struct.Cache.html#method.invalidate_entries_if pub type PredicateId = String; +pub(crate) type PredicateIdStr<'a> = &'a str; + // Empty struct to be used in InitResult::InitErr to represent the Option None. pub(crate) struct OptionallyNone; +impl FutureExt for T where T: Future {} + +pub trait FutureExt: Future { + fn boxed<'a, T>(self) -> BoxFuture<'a, T> + where + Self: Future + Sized + Send + 'a, + { + Box::pin(self) + } +} + +/// Iterator visiting all key-value pairs in a cache in arbitrary order. +/// +/// Call [`Cache::iter`](./struct.Cache.html#method.iter) method to obtain an `Iter`. pub struct Iter<'i, K, V>(crate::sync_base::iter::Iter<'i, K, V>); impl<'i, K, V> Iter<'i, K, V> { @@ -47,8 +71,13 @@ where } } -/// Provides extra methods that will be useful for testing. -pub trait ConcurrentCacheExt { - /// Performs any pending maintenance operations needed by the cache. - fn sync(&self); +/// May yield to other async tasks. +pub(crate) async fn may_yield() { + static LOCK: Lazy> = Lazy::new(Default::default); + + // Acquire the lock then immediately release it. This `await` may yield to other + // tasks. + // + // NOTE: This behavior was tested with Tokio and async-std. + let _ = LOCK.lock().await; } diff --git a/src/future/base_cache.rs b/src/future/base_cache.rs new file mode 100644 index 00000000..a746af05 --- /dev/null +++ b/src/future/base_cache.rs @@ -0,0 +1,3305 @@ +use super::{ + housekeeper::{Housekeeper, InnerSync}, + invalidator::{GetOrRemoveEntry, Invalidator, KeyDateLite, PredicateFun}, + key_lock::{KeyLock, KeyLockMap}, + notifier::RemovalNotifier, + PredicateId, +}; + +use crate::{ + common::{ + self, + concurrent::{ + atomic_time::AtomicInstant, + constants::{ + READ_LOG_FLUSH_POINT, READ_LOG_SIZE, WRITE_LOG_FLUSH_POINT, WRITE_LOG_SIZE, + }, + deques::Deques, + entry_info::EntryInfo, + AccessTime, KeyHash, KeyHashDate, KvEntry, ReadOp, ValueEntry, Weigher, WriteOp, + }, + deque::{DeqNode, Deque}, + frequency_sketch::FrequencySketch, + time::{CheckedTimeOps, Clock, Instant}, + timer_wheel::{ReschedulingResult, TimerWheel}, + CacheRegion, + }, + notification::{AsyncEvictionListener, RemovalCause}, + policy::ExpirationPolicy, + sync_base::iter::ScanningGet, + Entry, Expiry, Policy, PredicateError, +}; + +#[cfg(feature = "unstable-debug-counters")] +use common::concurrent::debug_counters::CacheDebugStats; + +use async_lock::{Mutex, MutexGuard, RwLock}; +use async_trait::async_trait; +use crossbeam_channel::{Receiver, Sender, TrySendError}; +use crossbeam_utils::atomic::AtomicCell; +use parking_lot::RwLock as SyncRwLock; +use smallvec::SmallVec; +use std::{ + borrow::Borrow, + collections::hash_map::RandomState, + hash::{BuildHasher, Hash, Hasher}, + sync::{ + atomic::{AtomicBool, AtomicU8, Ordering}, + Arc, + }, + time::{Duration, Instant as StdInstant}, +}; +use triomphe::Arc as TrioArc; + +pub(crate) type HouseKeeperArc = Arc; + +pub(crate) struct BaseCache { + pub(crate) inner: Arc>, + read_op_ch: Sender>, + pub(crate) write_op_ch: Sender>, + pub(crate) housekeeper: Option, +} + +impl Clone for BaseCache { + /// Makes a clone of this shared cache. + /// + /// This operation is cheap as it only creates thread-safe reference counted + /// pointers to the shared internal data structures. + fn clone(&self) -> Self { + Self { + inner: Arc::clone(&self.inner), + read_op_ch: self.read_op_ch.clone(), + write_op_ch: self.write_op_ch.clone(), + housekeeper: self.housekeeper.as_ref().map(Arc::clone), + } + } +} + +impl Drop for BaseCache { + fn drop(&mut self) { + // The housekeeper needs to be dropped before the inner is dropped. + std::mem::drop(self.housekeeper.take()); + } +} + +impl BaseCache { + pub(crate) fn name(&self) -> Option<&str> { + self.inner.name() + } + + pub(crate) fn policy(&self) -> Policy { + self.inner.policy() + } + + pub(crate) fn entry_count(&self) -> u64 { + self.inner.entry_count() + } + + pub(crate) fn weighted_size(&self) -> u64 { + self.inner.weighted_size() + } + + pub(crate) fn is_map_disabled(&self) -> bool { + self.inner.max_capacity == Some(0) + } + + #[inline] + pub(crate) fn is_removal_notifier_enabled(&self) -> bool { + self.inner.is_removal_notifier_enabled() + } + + #[inline] + pub(crate) fn current_time_from_expiration_clock(&self) -> Instant { + self.inner.current_time_from_expiration_clock() + } + + pub(crate) async fn notify_invalidate(&self, key: &Arc, entry: &TrioArc>) + where + K: Send + Sync + 'static, + V: Clone + Send + Sync + 'static, + { + self.inner.notify_invalidate(key, entry).await; + } + + #[cfg(feature = "unstable-debug-counters")] + pub async fn debug_stats(&self) -> CacheDebugStats { + self.inner.debug_stats().await + } +} + +impl BaseCache +where + K: Hash + Eq, + S: BuildHasher, +{ + pub(crate) fn maybe_key_lock(&self, key: &Arc) -> Option> { + self.inner.maybe_key_lock(key) + } +} + +impl BaseCache +where + K: Hash + Eq + Send + Sync + 'static, + V: Clone + Send + Sync + 'static, + S: BuildHasher + Clone + Send + Sync + 'static, +{ + // https://rust-lang.github.io/rust-clippy/master/index.html#too_many_arguments + #[allow(clippy::too_many_arguments)] + pub(crate) fn new( + name: Option, + max_capacity: Option, + initial_capacity: Option, + build_hasher: S, + weigher: Option>, + eviction_listener: Option>, + expiration_policy: ExpirationPolicy, + invalidator_enabled: bool, + ) -> Self { + let (r_size, w_size) = if max_capacity == Some(0) { + (0, 0) + } else { + (READ_LOG_SIZE, WRITE_LOG_SIZE) + }; + + let (r_snd, r_rcv) = crossbeam_channel::bounded(r_size); + let (w_snd, w_rcv) = crossbeam_channel::bounded(w_size); + + let inner = Arc::new(Inner::new( + name, + max_capacity, + initial_capacity, + build_hasher, + weigher, + eviction_listener, + r_rcv, + w_rcv, + expiration_policy, + invalidator_enabled, + )); + + Self { + inner, + read_op_ch: r_snd, + write_op_ch: w_snd, + housekeeper: Some(Arc::new(Housekeeper::default())), + } + } + + #[inline] + pub(crate) fn hash(&self, key: &Q) -> u64 + where + K: Borrow, + Q: Hash + Eq + ?Sized, + { + self.inner.hash(key) + } + + pub(crate) fn contains_key_with_hash(&self, key: &Q, hash: u64) -> bool + where + K: Borrow, + Q: Hash + Eq + ?Sized, + { + self.inner + .get_key_value_and(key, hash, |k, entry| { + let i = &self.inner; + let (ttl, tti, va) = (&i.time_to_live(), &i.time_to_idle(), &i.valid_after()); + let now = self.current_time_from_expiration_clock(); + + !is_expired_by_per_entry_ttl(entry.entry_info(), now) + && !is_expired_entry_wo(ttl, va, entry, now) + && !is_expired_entry_ao(tti, va, entry, now) + && !i.is_invalidated_entry(k, entry) + }) + .unwrap_or_default() // `false` is the default for `bool` type. + } + + pub(crate) async fn get_with_hash( + &self, + key: &Q, + hash: u64, + mut ignore_if: Option<&mut I>, + need_key: bool, + record_read: bool, + ) -> Option> + where + K: Borrow, + Q: Hash + Eq + ?Sized, + I: FnMut(&V) -> bool, + { + if self.is_map_disabled() { + return None; + } + + let mut now = self.current_time_from_expiration_clock(); + + let maybe_kv_and_op = self + .inner + .get_key_value_and_then(key, hash, move |k, entry| { + if let Some(ignore_if) = &mut ignore_if { + if ignore_if(&entry.value) { + // Ignore the entry. + return None; + } + } + + let i = &self.inner; + let (ttl, tti, va) = (&i.time_to_live(), &i.time_to_idle(), &i.valid_after()); + + if is_expired_by_per_entry_ttl(entry.entry_info(), now) + || is_expired_entry_wo(ttl, va, entry, now) + || is_expired_entry_ao(tti, va, entry, now) + || i.is_invalidated_entry(k, entry) + { + // Expired or invalidated entry. + None + } else { + // Valid entry. + let mut is_expiry_modified = false; + + // Call the user supplied `expire_after_read` method if any. + if let Some(expiry) = &self.inner.expiration_policy.expiry() { + let lm = entry.last_modified().expect("Last modified is not set"); + // Check if the `last_modified` of entry is earlier than or equals to + // `now`. If not, update the `now` to `last_modified`. This is needed + // because there is a small chance that other threads have inserted + // the entry _after_ we obtained `now`. + now = now.max(lm); + + // Convert `last_modified` from `moka::common::time::Instant` to + // `std::time::Instant`. + let lm = self.inner.clocks().to_std_instant(lm); + + // Call the user supplied `expire_after_read` method. + // + // We will put the return value (`is_expiry_modified: bool`) to a + // `ReadOp` so that `apply_reads` method can determine whether or not + // to reschedule the timer for the entry. + // + // NOTE: It is not guaranteed that the `ReadOp` is passed to + // `apply_reads`. Here are the corner cases that the `ReadOp` will + // not be passed to `apply_reads`: + // + // - If the bounded `read_op_ch` channel is full, the `ReadOp` will + // be discarded. + // - If we were called by `get_with_hash_without_recording` method, + // the `ReadOp` will not be recorded at all. + // + // These cases are okay because when the timer wheel tries to expire + // the entry, it will check if the entry is actually expired. If not, + // the timer wheel will reschedule the expiration timer for the + // entry. + is_expiry_modified = Self::expire_after_read_or_update( + |k, v, t, d| expiry.expire_after_read(k, v, t, d, lm), + &entry.entry_info().key_hash().key, + entry, + self.inner.expiration_policy.time_to_live(), + self.inner.expiration_policy.time_to_idle(), + now, + self.inner.clocks(), + ); + } + + let maybe_key = if need_key { Some(Arc::clone(k)) } else { None }; + let ent = Entry::new(maybe_key, entry.value.clone(), false); + let maybe_op = if record_read { + Some(ReadOp::Hit { + value_entry: TrioArc::clone(entry), + timestamp: now, + is_expiry_modified, + }) + } else { + None + }; + + Some((ent, maybe_op, now)) + } + }); + + match maybe_kv_and_op { + Some((ent, maybe_op, now)) => { + if let Some(op) = maybe_op { + self.record_read_op(op, now) + .await + .expect("Failed to record a get op"); + } + Some(ent) + } + None => { + if record_read { + self.record_read_op(ReadOp::Miss(hash), now) + .await + .expect("Failed to record a get op"); + } + None + } + } + } + + #[inline] + pub(crate) fn remove_entry(&self, key: &Q, hash: u64) -> Option> + where + K: Borrow, + Q: Hash + Eq + ?Sized, + { + self.inner.remove_entry(key, hash) + } + + #[inline] + pub(crate) async fn apply_reads_writes_if_needed( + inner: Arc, + ch: &Sender>, + now: Instant, + housekeeper: Option<&HouseKeeperArc>, + ) { + let w_len = ch.len(); + + if let Some(hk) = housekeeper { + if Self::should_apply_writes(hk, w_len, now) { + hk.try_run_pending_tasks(inner).await; + } + } + } + + pub(crate) fn invalidate_all(&self) { + let now = self.current_time_from_expiration_clock(); + self.inner.set_valid_after(now); + } + + pub(crate) fn invalidate_entries_if( + &self, + predicate: PredicateFun, + ) -> Result { + let now = self.current_time_from_expiration_clock(); + self.inner.register_invalidation_predicate(predicate, now) + } +} + +// +// Iterator support +// +impl ScanningGet for BaseCache +where + K: Hash + Eq + Send + Sync + 'static, + V: Clone + Send + Sync + 'static, + S: BuildHasher + Clone + Send + Sync + 'static, +{ + fn num_cht_segments(&self) -> usize { + self.inner.num_cht_segments() + } + + fn scanning_get(&self, key: &Arc) -> Option { + let hash = self.hash(key); + self.inner.get_key_value_and_then(key, hash, |k, entry| { + let i = &self.inner; + let (ttl, tti, va) = (&i.time_to_live(), &i.time_to_idle(), &i.valid_after()); + let now = self.current_time_from_expiration_clock(); + + if is_expired_by_per_entry_ttl(entry.entry_info(), now) + || is_expired_entry_wo(ttl, va, entry, now) + || is_expired_entry_ao(tti, va, entry, now) + || i.is_invalidated_entry(k, entry) + { + // Expired or invalidated entry. + None + } else { + // Valid entry. + Some(entry.value.clone()) + } + }) + } + + fn keys(&self, cht_segment: usize) -> Option>> { + self.inner.keys(cht_segment) + } +} + +// +// private methods +// +impl BaseCache +where + K: Hash + Eq + Send + Sync + 'static, + V: Clone + Send + Sync + 'static, + S: BuildHasher + Clone + Send + Sync + 'static, +{ + #[inline] + async fn record_read_op( + &self, + op: ReadOp, + now: Instant, + ) -> Result<(), TrySendError>> { + self.apply_reads_if_needed(Arc::clone(&self.inner), now) + .await; + let ch = &self.read_op_ch; + match ch.try_send(op) { + // Discard the ReadOp when the channel is full. + Ok(()) | Err(TrySendError::Full(_)) => Ok(()), + Err(e @ TrySendError::Disconnected(_)) => Err(e), + } + } + + #[inline] + pub(crate) async fn do_insert_with_hash( + &self, + key: Arc, + hash: u64, + value: V, + ) -> (WriteOp, Instant) { + let ts = self.current_time_from_expiration_clock(); + let weight = self.inner.weigh(&key, &value); + let op_cnt1 = Arc::new(AtomicU8::new(0)); + let op_cnt2 = Arc::clone(&op_cnt1); + let mut op1 = None; + let mut op2 = None; + + // Lock the key for update if blocking removal notification is enabled. + let kl = self.maybe_key_lock(&key); + let _klg = if let Some(lock) = &kl { + Some(lock.lock().await) + } else { + None + }; + + // Since the cache (cht::SegmentedHashMap) employs optimistic locking + // strategy, insert_with_or_modify() may get an insert/modify operation + // conflicted with other concurrent hash table operations. In that case, it + // has to retry the insertion or modification, so on_insert and/or on_modify + // closures can be executed more than once. In order to identify the last + // call of these closures, we use a shared counter (op_cnt{1,2}) here to + // record a serial number on a WriteOp, and consider the WriteOp with the + // largest serial number is the one made by the last call of the closures. + self.inner.cache.insert_with_or_modify( + Arc::clone(&key), + hash, + // on_insert + || { + let entry = self.new_value_entry(&key, hash, value.clone(), ts, weight); + let cnt = op_cnt1.fetch_add(1, Ordering::Relaxed); + op1 = Some(( + cnt, + WriteOp::Upsert { + key_hash: KeyHash::new(Arc::clone(&key), hash), + value_entry: TrioArc::clone(&entry), + old_weight: 0, + new_weight: weight, + }, + )); + entry + }, + // on_modify + |_k, old_entry| { + // NOTES on `new_value_entry_from` method: + // 1. The internal EntryInfo will be shared between the old and new + // ValueEntries. + // 2. This method will set the is_dirty to prevent this new + // ValueEntry from being evicted by an expiration policy. + // 3. This method will update the policy_weight with the new weight. + let old_weight = old_entry.policy_weight(); + let old_timestamps = (old_entry.last_accessed(), old_entry.last_modified()); + let entry = self.new_value_entry_from(value.clone(), ts, weight, old_entry); + let cnt = op_cnt2.fetch_add(1, Ordering::Relaxed); + op2 = Some(( + cnt, + TrioArc::clone(old_entry), + old_timestamps, + WriteOp::Upsert { + key_hash: KeyHash::new(Arc::clone(&key), hash), + value_entry: TrioArc::clone(&entry), + old_weight, + new_weight: weight, + }, + )); + entry + }, + ); + + match (op1, op2) { + (Some((_cnt, ins_op)), None) => { + if let (Some(expiry), WriteOp::Upsert { value_entry, .. }) = + (&self.inner.expiration_policy.expiry(), &ins_op) + { + Self::expire_after_create(expiry, &key, value_entry, ts, self.inner.clocks()); + } + (ins_op, ts) + } + (None, Some((_cnt, old_entry, (old_last_accessed, old_last_modified), upd_op))) => { + if let (Some(expiry), WriteOp::Upsert { value_entry, .. }) = + (&self.inner.expiration_policy.expiry(), &upd_op) + { + Self::expire_after_read_or_update( + |k, v, t, d| expiry.expire_after_update(k, v, t, d), + &key, + value_entry, + self.inner.expiration_policy.time_to_live(), + self.inner.expiration_policy.time_to_idle(), + ts, + self.inner.clocks(), + ); + } + + if self.is_removal_notifier_enabled() { + self.inner + .notify_upsert(key, &old_entry, old_last_accessed, old_last_modified) + .await; + } + crossbeam_epoch::pin().flush(); + (upd_op, ts) + } + ( + Some((cnt1, ins_op)), + Some((cnt2, old_entry, (old_last_accessed, old_last_modified), upd_op)), + ) => { + if cnt1 > cnt2 { + if let (Some(expiry), WriteOp::Upsert { value_entry, .. }) = + (&self.inner.expiration_policy.expiry(), &ins_op) + { + Self::expire_after_create( + expiry, + &key, + value_entry, + ts, + self.inner.clocks(), + ); + } + (ins_op, ts) + } else { + if let (Some(expiry), WriteOp::Upsert { value_entry, .. }) = + (&self.inner.expiration_policy.expiry(), &upd_op) + { + Self::expire_after_read_or_update( + |k, v, t, d| expiry.expire_after_update(k, v, t, d), + &key, + value_entry, + self.inner.expiration_policy.time_to_live(), + self.inner.expiration_policy.time_to_idle(), + ts, + self.inner.clocks(), + ); + } + + if self.is_removal_notifier_enabled() { + self.inner + .notify_upsert(key, &old_entry, old_last_accessed, old_last_modified) + .await; + } + crossbeam_epoch::pin().flush(); + (upd_op, ts) + } + } + (None, None) => unreachable!(), + } + } + + #[inline] + async fn apply_reads_if_needed(&self, inner: Arc>, now: Instant) { + let len = self.read_op_ch.len(); + + if let Some(hk) = &self.housekeeper { + if Self::should_apply_reads(hk, len, now) { + hk.try_run_pending_tasks(inner).await; + } + } + } + + #[inline] + fn should_apply_reads(hk: &HouseKeeperArc, ch_len: usize, now: Instant) -> bool { + hk.should_apply_reads(ch_len, now) + } + + #[inline] + fn should_apply_writes(hk: &HouseKeeperArc, ch_len: usize, now: Instant) -> bool { + hk.should_apply_writes(ch_len, now) + } +} + +impl BaseCache { + #[inline] + fn new_value_entry( + &self, + key: &Arc, + hash: u64, + value: V, + timestamp: Instant, + policy_weight: u32, + ) -> TrioArc> { + let key_hash = KeyHash::new(Arc::clone(key), hash); + let info = TrioArc::new(EntryInfo::new(key_hash, timestamp, policy_weight)); + TrioArc::new(ValueEntry::new(value, info)) + } + + #[inline] + fn new_value_entry_from( + &self, + value: V, + timestamp: Instant, + policy_weight: u32, + other: &ValueEntry, + ) -> TrioArc> { + let info = TrioArc::clone(other.entry_info()); + // To prevent this updated ValueEntry from being evicted by an expiration policy, + // set the dirty flag to true. It will be reset to false when the write is applied. + info.set_dirty(true); + info.set_last_accessed(timestamp); + info.set_last_modified(timestamp); + info.set_policy_weight(policy_weight); + TrioArc::new(ValueEntry::new_from(value, info, other)) + } + + fn expire_after_create( + expiry: &Arc + Send + Sync + 'static>, + key: &K, + value_entry: &ValueEntry, + ts: Instant, + clocks: &Clocks, + ) { + let duration = + expiry.expire_after_create(key, &value_entry.value, clocks.to_std_instant(ts)); + let expiration_time = duration.map(|duration| ts.checked_add(duration).expect("Overflow")); + value_entry + .entry_info() + .set_expiration_time(expiration_time); + } + + fn expire_after_read_or_update( + expiry: impl FnOnce(&K, &V, StdInstant, Option) -> Option, + key: &K, + value_entry: &ValueEntry, + ttl: Option, + tti: Option, + ts: Instant, + clocks: &Clocks, + ) -> bool { + let current_time = clocks.to_std_instant(ts); + let ei = &value_entry.entry_info(); + + let exp_time = IntoIterator::into_iter([ + ei.expiration_time(), + ttl.and_then(|dur| ei.last_modified().and_then(|ts| ts.checked_add(dur))), + tti.and_then(|dur| ei.last_accessed().and_then(|ts| ts.checked_add(dur))), + ]) + .flatten() + .min(); + + let current_duration = exp_time.and_then(|time| { + let std_time = clocks.to_std_instant(time); + std_time.checked_duration_since(current_time) + }); + + let duration = expiry(key, &value_entry.value, current_time, current_duration); + + if duration != current_duration { + let expiration_time = + duration.map(|duration| ts.checked_add(duration).expect("Overflow")); + value_entry + .entry_info() + .set_expiration_time(expiration_time); + // The `expiration_time` has changed from `None` to `Some` or vice versa. + true + } else { + false + } + } +} + +// +// for testing +// +#[cfg(test)] +impl BaseCache +where + K: Hash + Eq + Send + Sync + 'static, + V: Clone + Send + Sync + 'static, + S: BuildHasher + Clone + Send + Sync + 'static, +{ + pub(crate) fn invalidation_predicate_count(&self) -> usize { + self.inner.invalidation_predicate_count() + } + + pub(crate) async fn reconfigure_for_testing(&mut self) { + // Enable the frequency sketch. + self.inner.enable_frequency_sketch_for_testing().await; + // Disable auto clean up of pending tasks. + if let Some(hk) = &self.housekeeper { + hk.disable_auto_run(); + } + } + + pub(crate) async fn set_expiration_clock(&self, clock: Option) { + self.inner.set_expiration_clock(clock).await; + if let Some(hk) = &self.housekeeper { + let now = self.current_time_from_expiration_clock(); + hk.reset_run_after(now); + } + } + + pub(crate) fn key_locks_map_is_empty(&self) -> bool { + self.inner.key_locks_map_is_empty() + } +} + +struct EvictionState<'a, K, V> { + counters: EvictionCounters, + notifier: Option<&'a RemovalNotifier>, +} + +impl<'a, K, V> EvictionState<'a, K, V> { + fn new( + entry_count: u64, + weighted_size: u64, + notifier: Option<&'a RemovalNotifier>, + ) -> Self { + Self { + counters: EvictionCounters::new(entry_count, weighted_size), + notifier, + } + } + + fn is_notifier_enabled(&self) -> bool { + self.notifier.is_some() + } + + async fn add_removed_entry( + &mut self, + key: Arc, + entry: &TrioArc>, + cause: RemovalCause, + ) where + K: Send + Sync + 'static, + V: Clone + Send + Sync + 'static, + { + debug_assert!(self.is_notifier_enabled()); + + if let Some(notifier) = self.notifier { + notifier.notify(key, entry.value.clone(), cause).await; + } + } +} + +struct EvictionCounters { + entry_count: u64, + weighted_size: u64, +} + +impl EvictionCounters { + #[inline] + fn new(entry_count: u64, weighted_size: u64) -> Self { + Self { + entry_count, + weighted_size, + } + } + + #[inline] + fn saturating_add(&mut self, entry_count: u64, weight: u32) { + self.entry_count += entry_count; + let total = &mut self.weighted_size; + *total = total.saturating_add(weight as u64); + } + + #[inline] + fn saturating_sub(&mut self, entry_count: u64, weight: u32) { + self.entry_count -= entry_count; + let total = &mut self.weighted_size; + *total = total.saturating_sub(weight as u64); + } +} + +#[derive(Default)] +struct EntrySizeAndFrequency { + policy_weight: u64, + freq: u32, +} + +impl EntrySizeAndFrequency { + fn new(policy_weight: u32) -> Self { + Self { + policy_weight: policy_weight as u64, + ..Default::default() + } + } + + fn add_policy_weight(&mut self, weight: u32) { + self.policy_weight += weight as u64; + } + + fn add_frequency(&mut self, freq: &FrequencySketch, hash: u64) { + self.freq += freq.frequency(hash) as u32; + } +} + +enum AdmissionResult { + Admitted { + victim_keys: SmallVec<[KeyHash; 8]>, + }, + Rejected, +} + +type CacheStore = crate::cht::SegmentedHashMap, TrioArc>, S>; + +struct Clocks { + // Lock for this Clocks instance. Used when the `expiration_clock` is set. + _lock: Mutex<()>, + has_expiration_clock: AtomicBool, + expiration_clock: SyncRwLock>, + /// The time (`moka::common::time`) when this timer wheel was created. + origin: Instant, + /// The time (`StdInstant`) when this timer wheel was created. + origin_std: StdInstant, + /// Mutable version of `origin` and `origin_std`. Used when the + /// `expiration_clock` is set. + mutable_origin: SyncRwLock>, +} + +impl Clocks { + fn new(time: Instant, std_time: StdInstant) -> Self { + Self { + _lock: Default::default(), + has_expiration_clock: Default::default(), + expiration_clock: Default::default(), + origin: time, + origin_std: std_time, + mutable_origin: Default::default(), + } + } + + fn to_std_instant(&self, time: Instant) -> StdInstant { + let (origin, origin_std) = if self.has_expiration_clock.load(Ordering::Relaxed) { + self.mutable_origin + .read() + .expect("mutable_origin is not set") + } else { + (self.origin, self.origin_std) + }; + origin_std + (time.checked_duration_since(origin).unwrap()) + } + + #[cfg(test)] + fn set_origin(&self, time: Instant, std_time: StdInstant) { + *self.mutable_origin.write() = Some((time, std_time)); + } +} + +pub(crate) struct Inner { + name: Option, + max_capacity: Option, + entry_count: AtomicCell, + weighted_size: AtomicCell, + cache: CacheStore, + build_hasher: S, + deques: Mutex>, + timer_wheel: Mutex>, + frequency_sketch: RwLock, + frequency_sketch_enabled: AtomicBool, + read_op_ch: Receiver>, + write_op_ch: Receiver>, + expiration_policy: ExpirationPolicy, + valid_after: AtomicInstant, + weigher: Option>, + removal_notifier: Option>, + key_locks: Option>, + invalidator: Option>, + clocks: Clocks, +} + +// +// functions/methods used by BaseCache +// + +impl Inner { + fn name(&self) -> Option<&str> { + self.name.as_deref() + } + + fn policy(&self) -> Policy { + let exp = &self.expiration_policy; + Policy::new(self.max_capacity, 1, exp.time_to_live(), exp.time_to_idle()) + } + + #[inline] + fn entry_count(&self) -> u64 { + self.entry_count.load() + } + + #[inline] + fn weighted_size(&self) -> u64 { + self.weighted_size.load() + } + + #[inline] + fn is_removal_notifier_enabled(&self) -> bool { + self.removal_notifier.is_some() + } + + #[cfg(feature = "unstable-debug-counters")] + pub async fn debug_stats(&self) -> CacheDebugStats { + let ec = self.entry_count.load(); + let ws = self.weighted_size.load(); + + CacheDebugStats::new( + ec, + ws, + (self.cache.capacity() * 2) as u64, + self.frequency_sketch.read().await.table_size(), + ) + } + + #[inline] + fn current_time_from_expiration_clock(&self) -> Instant { + if self.clocks.has_expiration_clock.load(Ordering::Relaxed) { + Instant::new( + self.clocks + .expiration_clock + .read() + .as_ref() + .expect("Cannot get the expiration clock") + .now(), + ) + } else { + Instant::now() + } + } + + fn clocks(&self) -> &Clocks { + &self.clocks + } + + fn num_cht_segments(&self) -> usize { + self.cache.actual_num_segments() + } + + #[inline] + fn time_to_live(&self) -> Option { + self.expiration_policy.time_to_live() + } + + #[inline] + fn time_to_idle(&self) -> Option { + self.expiration_policy.time_to_idle() + } + + #[inline] + fn has_expiry(&self) -> bool { + let exp = &self.expiration_policy; + exp.time_to_live().is_some() || exp.time_to_idle().is_some() + } + + #[inline] + fn is_write_order_queue_enabled(&self) -> bool { + self.expiration_policy.time_to_live().is_some() || self.invalidator.is_some() + } + + #[inline] + fn valid_after(&self) -> Option { + self.valid_after.instant() + } + + #[inline] + fn set_valid_after(&self, timestamp: Instant) { + self.valid_after.set_instant(timestamp); + } + + #[inline] + fn has_valid_after(&self) -> bool { + self.valid_after.is_set() + } +} + +impl Inner +where + K: Hash + Eq, + S: BuildHasher, +{ + fn maybe_key_lock(&self, key: &Arc) -> Option> + where + K: Hash + Eq, + S: BuildHasher, + { + self.key_locks.as_ref().map(|kls| kls.key_lock(key)) + } +} + +impl Inner +where + K: Hash + Eq + Send + Sync + 'static, + V: Send + Sync + 'static, + S: BuildHasher + Clone, +{ + // Disable a Clippy warning for having more than seven arguments. + // https://rust-lang.github.io/rust-clippy/master/index.html#too_many_arguments + #[allow(clippy::too_many_arguments)] + fn new( + name: Option, + max_capacity: Option, + initial_capacity: Option, + build_hasher: S, + weigher: Option>, + eviction_listener: Option>, + read_op_ch: Receiver>, + write_op_ch: Receiver>, + expiration_policy: ExpirationPolicy, + invalidator_enabled: bool, + ) -> Self { + let (num_segments, initial_capacity) = if max_capacity == Some(0) { + (1, 0) + } else { + let ic = initial_capacity + .map(|cap| cap + WRITE_LOG_SIZE) + .unwrap_or_default(); + (64, ic) + }; + let cache = crate::cht::SegmentedHashMap::with_num_segments_capacity_and_hasher( + num_segments, + initial_capacity, + build_hasher.clone(), + ); + + // Assume that getting `moka::common::Instant::now` has lower latency than + // `StdInstant::now`. + let now_std = StdInstant::now(); + let now = Instant::now(); + let clocks = Clocks::new(now, now_std); + let timer_wheel = Mutex::new(TimerWheel::new(now)); + + let (removal_notifier, key_locks) = if let Some(listener) = eviction_listener { + let rn = RemovalNotifier::new(listener, name.clone()); + let kl = KeyLockMap::with_hasher(build_hasher.clone()); + (Some(rn), Some(kl)) + } else { + (None, None) + }; + let invalidator = if invalidator_enabled { + Some(Invalidator::new(build_hasher.clone())) + } else { + None + }; + + Self { + name, + max_capacity, + entry_count: Default::default(), + weighted_size: Default::default(), + cache, + build_hasher, + deques: Default::default(), + timer_wheel, + frequency_sketch: RwLock::new(Default::default()), + frequency_sketch_enabled: Default::default(), + read_op_ch, + write_op_ch, + expiration_policy, + valid_after: Default::default(), + weigher, + removal_notifier, + key_locks, + invalidator, + clocks, + } + } + + #[inline] + fn hash(&self, key: &Q) -> u64 + where + K: Borrow, + Q: Hash + Eq + ?Sized, + { + let mut hasher = self.build_hasher.build_hasher(); + key.hash(&mut hasher); + hasher.finish() + } + + #[inline] + fn get_key_value_and(&self, key: &Q, hash: u64, with_entry: F) -> Option + where + K: Borrow, + Q: Hash + Eq + ?Sized, + F: FnOnce(&Arc, &TrioArc>) -> T, + { + self.cache + .get_key_value_and(hash, |k| (k as &K).borrow() == key, with_entry) + } + + #[inline] + fn get_key_value_and_then(&self, key: &Q, hash: u64, with_entry: F) -> Option + where + K: Borrow, + Q: Hash + Eq + ?Sized, + F: FnOnce(&Arc, &TrioArc>) -> Option, + { + self.cache + .get_key_value_and_then(hash, |k| (k as &K).borrow() == key, with_entry) + } + + #[inline] + fn remove_entry(&self, key: &Q, hash: u64) -> Option> + where + K: Borrow, + Q: Hash + Eq + ?Sized, + { + self.cache + .remove_entry(hash, |k| (k as &K).borrow() == key) + .map(|(key, entry)| KvEntry::new(key, entry)) + } + + fn keys(&self, cht_segment: usize) -> Option>> { + // Do `Arc::clone` instead of `Arc::downgrade`. Updating existing entry + // in the cht with a new value replaces the key in the cht even though the + // old and new keys are equal. If we return `Weak`, it will not be + // upgraded later to `Arc as the key may have been replaced with a new + // key that equals to the old key. + self.cache.keys(cht_segment, Arc::clone) + } + + #[inline] + fn register_invalidation_predicate( + &self, + predicate: PredicateFun, + registered_at: Instant, + ) -> Result { + if let Some(inv) = &self.invalidator { + inv.register_predicate(predicate, registered_at) + } else { + Err(PredicateError::InvalidationClosuresDisabled) + } + } + + /// Returns `true` if the entry is invalidated by `invalidate_entries_if` method. + #[inline] + fn is_invalidated_entry(&self, key: &Arc, entry: &TrioArc>) -> bool + where + V: Clone, + { + if let Some(inv) = &self.invalidator { + return inv.apply_predicates(key, entry); + } + false + } + + #[inline] + fn weigh(&self, key: &K, value: &V) -> u32 { + self.weigher.as_ref().map(|w| w(key, value)).unwrap_or(1) + } +} + +#[async_trait] +impl GetOrRemoveEntry for Inner +where + K: Hash + Eq, + S: BuildHasher + Send + Sync + 'static, +{ + fn get_value_entry(&self, key: &Arc, hash: u64) -> Option>> { + self.cache.get(hash, |k| k == key) + } + + async fn remove_key_value_if( + &self, + key: &Arc, + hash: u64, + condition: F, + ) -> Option>> + where + K: Send + Sync + 'static, + V: Clone + Send + Sync + 'static, + F: for<'a, 'b> FnMut(&'a Arc, &'b TrioArc>) -> bool + Send, + { + // Lock the key for removal if blocking removal notification is enabled. + let kl = self.maybe_key_lock(key); + let _klg = if let Some(lock) = &kl { + Some(lock.lock().await) + } else { + None + }; + + let maybe_entry = self.cache.remove_if(hash, |k| k == key, condition); + if let Some(entry) = &maybe_entry { + if self.is_removal_notifier_enabled() { + self.notify_single_removal(Arc::clone(key), entry, RemovalCause::Explicit) + .await; + } + } + maybe_entry + } +} + +#[cfg(feature = "unstable-debug-counters")] +mod batch_size { + pub(crate) const EVICTION_BATCH_SIZE: usize = 10_000; + pub(crate) const INVALIDATION_BATCH_SIZE: usize = 10_000; +} + +#[cfg(not(feature = "unstable-debug-counters"))] +mod batch_size { + pub(crate) const EVICTION_BATCH_SIZE: usize = 500; + pub(crate) const INVALIDATION_BATCH_SIZE: usize = 500; +} + +#[async_trait] +impl InnerSync for Inner +where + K: Hash + Eq + Send + Sync + 'static, + V: Clone + Send + Sync + 'static, + S: BuildHasher + Clone + Send + Sync + 'static, +{ + async fn run_pending_tasks(&self, max_repeats: usize) { + self.do_run_pending_tasks(max_repeats).await; + } + + fn now(&self) -> Instant { + self.current_time_from_expiration_clock() + } +} + +impl Inner +where + K: Hash + Eq + Send + Sync + 'static, + V: Clone + Send + Sync + 'static, + S: BuildHasher + Clone + Send + Sync + 'static, +{ + async fn do_run_pending_tasks(&self, max_repeats: usize) { + if self.max_capacity == Some(0) { + return; + } + + let mut deqs = self.deques.lock().await; + let mut timer_wheel = self.timer_wheel.lock().await; + let mut calls = 0; + let current_ec = self.entry_count.load(); + let current_ws = self.weighted_size.load(); + let mut eviction_state = + EvictionState::new(current_ec, current_ws, self.removal_notifier.as_ref()); + + let mut should_process_logs = true; + + while should_process_logs && calls <= max_repeats { + let r_len = self.read_op_ch.len(); + if r_len > 0 { + self.apply_reads(&mut deqs, &mut timer_wheel, r_len).await; + } + + let w_len = self.write_op_ch.len(); + if w_len > 0 { + self.apply_writes(&mut deqs, &mut timer_wheel, w_len, &mut eviction_state) + .await; + } + + if self.should_enable_frequency_sketch(&eviction_state.counters) { + self.enable_frequency_sketch(&eviction_state.counters).await; + } + + calls += 1; + should_process_logs = self.read_op_ch.len() >= READ_LOG_FLUSH_POINT + || self.write_op_ch.len() >= WRITE_LOG_FLUSH_POINT; + } + + if timer_wheel.is_enabled() { + self.evict_expired_entries_using_timers( + &mut timer_wheel, + &mut deqs, + &mut eviction_state, + ) + .await; + } + + if self.has_expiry() || self.has_valid_after() { + self.evict_expired_entries_using_deqs( + &mut deqs, + &mut timer_wheel, + batch_size::EVICTION_BATCH_SIZE, + &mut eviction_state, + ) + .await; + } + + if let Some(invalidator) = &self.invalidator { + if !invalidator.is_empty() { + self.invalidate_entries( + invalidator, + &mut deqs, + &mut timer_wheel, + batch_size::INVALIDATION_BATCH_SIZE, + &mut eviction_state, + ) + .await; + } + } + + // Evict if this cache has more entries than its capacity. + let weights_to_evict = self.weights_to_evict(&eviction_state.counters); + if weights_to_evict > 0 { + self.evict_lru_entries( + &mut deqs, + &mut timer_wheel, + batch_size::EVICTION_BATCH_SIZE, + weights_to_evict, + &mut eviction_state, + ) + .await; + } + + debug_assert_eq!(self.entry_count.load(), current_ec); + debug_assert_eq!(self.weighted_size.load(), current_ws); + self.entry_count.store(eviction_state.counters.entry_count); + self.weighted_size + .store(eviction_state.counters.weighted_size); + + crossbeam_epoch::pin().flush(); + } +} + +// +// private methods +// +impl Inner +where + K: Hash + Eq + Send + Sync + 'static, + V: Send + Sync + 'static, + S: BuildHasher + Clone + Send + Sync + 'static, +{ + fn has_enough_capacity(&self, candidate_weight: u32, counters: &EvictionCounters) -> bool { + self.max_capacity + .map(|limit| counters.weighted_size + candidate_weight as u64 <= limit) + .unwrap_or(true) + } + + fn weights_to_evict(&self, counters: &EvictionCounters) -> u64 { + self.max_capacity + .map(|limit| counters.weighted_size.saturating_sub(limit)) + .unwrap_or_default() + } + + #[inline] + fn should_enable_frequency_sketch(&self, counters: &EvictionCounters) -> bool { + match self.max_capacity { + None | Some(0) => false, + Some(max_cap) => { + if self.frequency_sketch_enabled.load(Ordering::Acquire) { + false // The frequency sketch is already enabled. + } else { + counters.weighted_size >= max_cap / 2 + } + } + } + } + + #[inline] + async fn enable_frequency_sketch(&self, counters: &EvictionCounters) { + if let Some(max_cap) = self.max_capacity { + let c = counters; + let cap = if self.weigher.is_none() { + max_cap + } else { + (c.entry_count as f64 * (c.weighted_size as f64 / max_cap as f64)) as u64 + }; + self.do_enable_frequency_sketch(cap).await; + } + } + + #[cfg(test)] + async fn enable_frequency_sketch_for_testing(&self) { + if let Some(max_cap) = self.max_capacity { + self.do_enable_frequency_sketch(max_cap).await; + } + } + + #[inline] + async fn do_enable_frequency_sketch(&self, cache_capacity: u64) { + let skt_capacity = common::sketch_capacity(cache_capacity); + self.frequency_sketch + .write() + .await + .ensure_capacity(skt_capacity); + self.frequency_sketch_enabled.store(true, Ordering::Release); + } + + async fn apply_reads( + &self, + deqs: &mut Deques, + timer_wheel: &mut TimerWheel, + count: usize, + ) { + use ReadOp::*; + let mut freq = self.frequency_sketch.write().await; + let ch = &self.read_op_ch; + for _ in 0..count { + match ch.try_recv() { + Ok(Hit { + value_entry, + timestamp, + is_expiry_modified, + }) => { + let kh = value_entry.entry_info().key_hash(); + freq.increment(kh.hash); + value_entry.set_last_accessed(timestamp); + if is_expiry_modified { + self.update_timer_wheel(&value_entry, timer_wheel); + } + deqs.move_to_back_ao(&value_entry); + } + Ok(Miss(hash)) => freq.increment(hash), + Err(_) => break, + } + } + } + + async fn apply_writes( + &self, + deqs: &mut Deques, + timer_wheel: &mut TimerWheel, + count: usize, + eviction_state: &mut EvictionState<'_, K, V>, + ) where + V: Clone, + { + use WriteOp::*; + let freq = self.frequency_sketch.read().await; + let ch = &self.write_op_ch; + + for _ in 0..count { + match ch.try_recv() { + Ok(Upsert { + key_hash: kh, + value_entry: entry, + old_weight, + new_weight, + }) => { + self.handle_upsert( + kh, + entry, + old_weight, + new_weight, + deqs, + timer_wheel, + &freq, + eviction_state, + ) + .await + } + Ok(Remove(KvEntry { key: _key, entry })) => { + Self::handle_remove(deqs, timer_wheel, entry, &mut eviction_state.counters) + } + Err(_) => break, + }; + } + } + + #[allow(clippy::too_many_arguments)] + async fn handle_upsert( + &self, + kh: KeyHash, + entry: TrioArc>, + old_weight: u32, + new_weight: u32, + deqs: &mut Deques, + timer_wheel: &mut TimerWheel, + freq: &FrequencySketch, + eviction_state: &mut EvictionState<'_, K, V>, + ) where + V: Clone, + { + entry.set_dirty(false); + + { + let counters = &mut eviction_state.counters; + + if entry.is_admitted() { + // The entry has been already admitted, so treat this as an update. + counters.saturating_sub(0, old_weight); + counters.saturating_add(0, new_weight); + self.update_timer_wheel(&entry, timer_wheel); + deqs.move_to_back_ao(&entry); + deqs.move_to_back_wo(&entry); + return; + } + + if self.has_enough_capacity(new_weight, counters) { + // There are enough room in the cache (or the cache is unbounded). + // Add the candidate to the deques. + self.handle_admit(&entry, new_weight, deqs, timer_wheel, counters); + return; + } + } + + if let Some(max) = self.max_capacity { + if new_weight as u64 > max { + // The candidate is too big to fit in the cache. Reject it. + + // Lock the key for removal if blocking removal notification is enabled. + let kl = self.maybe_key_lock(&kh.key); + let _klg = if let Some(lock) = &kl { + Some(lock.lock().await) + } else { + None + }; + + let removed = self.cache.remove(kh.hash, |k| k == &kh.key); + if let Some(entry) = removed { + if eviction_state.is_notifier_enabled() { + let key = Arc::clone(&kh.key); + eviction_state + .add_removed_entry(key, &entry, RemovalCause::Size) + .await; + } + } + return; + } + } + + let mut candidate = EntrySizeAndFrequency::new(new_weight); + candidate.add_frequency(freq, kh.hash); + + // Try to admit the candidate. + // + // NOTE: We need to call `admit` here, instead of a part of the `match` + // expression. Otherwise the future returned from this `handle_upsert` method + // will not be `Send`. + let admission_result = Self::admit(&candidate, &self.cache, deqs, freq); + match admission_result { + AdmissionResult::Admitted { victim_keys } => { + // Try to remove the victims from the hash map. + for victim in victim_keys { + let vic_key = victim.key; + let vic_hash = victim.hash; + + // Lock the key for removal if blocking removal notification is enabled. + let kl = self.maybe_key_lock(&vic_key); + let _klg = if let Some(lock) = &kl { + Some(lock.lock().await) + } else { + None + }; + + if let Some((vic_key, vic_entry)) = + self.cache.remove_entry(vic_hash, |k| k == &vic_key) + { + if eviction_state.is_notifier_enabled() { + eviction_state + .add_removed_entry(vic_key, &vic_entry, RemovalCause::Size) + .await; + } + // And then remove the victim from the deques. + Self::handle_remove( + deqs, + timer_wheel, + vic_entry, + &mut eviction_state.counters, + ); + } else { + // Could not remove the victim from the cache. Skip it as its + // ValueEntry might have been invalidated. + if let Some(node) = deqs.probation.peek_front() { + if node.element.key() == &vic_key && node.element.hash() == vic_hash { + deqs.probation.move_front_to_back(); + } + } + } + } + // Add the candidate to the deques. + self.handle_admit( + &entry, + new_weight, + deqs, + timer_wheel, + &mut eviction_state.counters, + ); + } + AdmissionResult::Rejected => { + // Lock the key for removal if blocking removal notification is enabled. + let kl = self.maybe_key_lock(&kh.key); + let _klg = if let Some(lock) = &kl { + Some(lock.lock().await) + } else { + None + }; + + // Remove the candidate from the cache (hash map). + let key = Arc::clone(&kh.key); + self.cache.remove(kh.hash, |k| k == &key); + if eviction_state.is_notifier_enabled() { + eviction_state + .add_removed_entry(key, &entry, RemovalCause::Size) + .await; + } + } + } + } + + /// Performs size-aware admission explained in the paper: + /// [Lightweight Robust Size Aware Cache Management][size-aware-cache-paper] + /// by Gil Einziger, Ohad Eytan, Roy Friedman, Ben Manes. + /// + /// [size-aware-cache-paper]: https://arxiv.org/abs/2105.08770 + /// + /// There are some modifications in this implementation: + /// - To admit to the main space, candidate's frequency must be higher than + /// the aggregated frequencies of the potential victims. (In the paper, + /// `>=` operator is used rather than `>`) The `>` operator will do a better + /// job to prevent the main space from polluting. + /// - When a candidate is rejected, the potential victims will stay at the LRU + /// position of the probation access-order queue. (In the paper, they will be + /// promoted (to the MRU position?) to force the eviction policy to select a + /// different set of victims for the next candidate). We may implement the + /// paper's behavior later? + /// + #[inline] + fn admit( + candidate: &EntrySizeAndFrequency, + cache: &CacheStore, + deqs: &mut Deques, + freq: &FrequencySketch, + ) -> AdmissionResult { + const MAX_CONSECUTIVE_RETRIES: usize = 5; + let mut retries = 0; + + let mut victims = EntrySizeAndFrequency::default(); + let mut victim_keys = SmallVec::default(); + + let deq = &mut deqs.probation; + + // Get first potential victim at the LRU position. + let mut next_victim = deq.peek_front_ptr(); + + // Aggregate potential victims. + while victims.policy_weight < candidate.policy_weight { + if candidate.freq < victims.freq { + break; + } + if let Some(victim) = next_victim.take() { + next_victim = DeqNode::next_node_ptr(victim); + + let vic_elem = &unsafe { victim.as_ref() }.element; + let key = vic_elem.key(); + let hash = vic_elem.hash(); + + if let Some(vic_entry) = cache.get(hash, |k| k == key) { + victims.add_policy_weight(vic_entry.policy_weight()); + victims.add_frequency(freq, hash); + victim_keys.push(KeyHash::new(Arc::clone(key), hash)); + retries = 0; + } else { + // Could not get the victim from the cache (hash map). Skip this node + // as its ValueEntry might have been invalidated. + unsafe { deq.move_to_back(victim) }; + retries += 1; + } + } else { + // No more potential victims. + break; + } + + if retries > MAX_CONSECUTIVE_RETRIES { + break; + } + } + + // Admit or reject the candidate. + + // TODO: Implement some randomness to mitigate hash DoS attack. + // See Caffeine's implementation. + + if victims.policy_weight >= candidate.policy_weight && candidate.freq > victims.freq { + AdmissionResult::Admitted { victim_keys } + } else { + AdmissionResult::Rejected + } + } + + fn handle_admit( + &self, + entry: &TrioArc>, + policy_weight: u32, + deqs: &mut Deques, + timer_wheel: &mut TimerWheel, + counters: &mut EvictionCounters, + ) { + counters.saturating_add(1, policy_weight); + + self.update_timer_wheel(entry, timer_wheel); + + // Update the deques. + deqs.push_back_ao( + CacheRegion::MainProbation, + KeyHashDate::new(entry.entry_info()), + entry, + ); + if self.is_write_order_queue_enabled() { + deqs.push_back_wo(KeyHashDate::new(entry.entry_info()), entry); + } + entry.set_admitted(true); + } + + /// NOTE: This method may enable the timer wheel. + fn update_timer_wheel( + &self, + entry: &TrioArc>, + timer_wheel: &mut TimerWheel, + ) { + // Enable the timer wheel if needed. + if entry.entry_info().expiration_time().is_some() && !timer_wheel.is_enabled() { + timer_wheel.enable(); + } + + // Update the timer wheel. + match ( + entry.entry_info().expiration_time().is_some(), + entry.timer_node(), + ) { + // Do nothing; the cache entry has no expiration time and not registered + // to the timer wheel. + (false, None) => (), + // Register the cache entry to the timer wheel; the cache entry has an + // expiration time and not registered to the timer wheel. + (true, None) => { + let timer = timer_wheel.schedule( + TrioArc::clone(entry.entry_info()), + TrioArc::clone(entry.deq_nodes()), + ); + entry.set_timer_node(timer); + } + // Reschedule the cache entry in the timer wheel; the cache entry has an + // expiration time and already registered to the timer wheel. + (true, Some(tn)) => { + let result = timer_wheel.reschedule(tn); + if let ReschedulingResult::Removed(removed_tn) = result { + // The timer node was removed from the timer wheel because the + // expiration time has been unset by other thread after we + // checked. + entry.set_timer_node(None); + drop(removed_tn); + } + } + // Unregister the cache entry from the timer wheel; the cache entry has + // no expiration time but registered to the timer wheel. + (false, Some(tn)) => { + entry.set_timer_node(None); + timer_wheel.deschedule(tn); + } + } + } + + fn handle_remove( + deqs: &mut Deques, + timer_wheel: &mut TimerWheel, + entry: TrioArc>, + counters: &mut EvictionCounters, + ) { + if let Some(timer_node) = entry.take_timer_node() { + timer_wheel.deschedule(timer_node); + } + Self::handle_remove_without_timer_wheel(deqs, entry, counters); + } + + fn handle_remove_without_timer_wheel( + deqs: &mut Deques, + entry: TrioArc>, + counters: &mut EvictionCounters, + ) { + if entry.is_admitted() { + entry.set_admitted(false); + counters.saturating_sub(1, entry.policy_weight()); + // The following two unlink_* functions will unset the deq nodes. + deqs.unlink_ao(&entry); + Deques::unlink_wo(&mut deqs.write_order, &entry); + } else { + entry.unset_q_nodes(); + } + } + + fn handle_remove_with_deques( + ao_deq_name: &str, + ao_deq: &mut Deque>, + wo_deq: &mut Deque>, + timer_wheel: &mut TimerWheel, + entry: TrioArc>, + counters: &mut EvictionCounters, + ) { + if let Some(timer) = entry.take_timer_node() { + timer_wheel.deschedule(timer); + } + if entry.is_admitted() { + entry.set_admitted(false); + counters.saturating_sub(1, entry.policy_weight()); + // The following two unlink_* functions will unset the deq nodes. + Deques::unlink_ao_from_deque(ao_deq_name, ao_deq, &entry); + Deques::unlink_wo(wo_deq, &entry); + } else { + entry.unset_q_nodes(); + } + } + + async fn evict_expired_entries_using_timers( + &self, + timer_wheel: &mut TimerWheel, + deqs: &mut Deques, + eviction_state: &mut EvictionState<'_, K, V>, + ) where + V: Clone, + { + use crate::common::timer_wheel::TimerEvent; + + let now = self.current_time_from_expiration_clock(); + + // NOTE: When necessary, the iterator returned from advance() will unset the + // timer node pointer in the `ValueEntry`, so we do not have to do it here. + let expired_keys = timer_wheel + .advance(now) + .filter_map(|event| { + // We do not have to do anything if event is `TimerEvent::Descheduled(_)` + // or `TimerEvent::Rescheduled(_)`. + if let TimerEvent::Expired(node) = event { + let entry_info = node.element.entry_info(); + let kh = entry_info.key_hash(); + Some((Arc::clone(&kh.key), kh.hash)) + } else { + None + } + }) + .collect::>(); + + for (key, hash) in expired_keys { + // Lock the key for removal if blocking removal notification is + // enabled. + let kl = self.maybe_key_lock(&key); + let _klg = if let Some(lock) = &kl { + Some(lock.lock().await) + } else { + None + }; + + // Remove the key from the map only when the entry is really + // expired. + let maybe_entry = self.cache.remove_if( + hash, + |k| k == &key, + |_, v| is_expired_by_per_entry_ttl(v.entry_info(), now), + ); + + if let Some(entry) = maybe_entry { + if eviction_state.is_notifier_enabled() { + eviction_state + .add_removed_entry(key, &entry, RemovalCause::Expired) + .await; + } + Self::handle_remove_without_timer_wheel(deqs, entry, &mut eviction_state.counters); + } else { + // Other thread might have updated or invalidated the entry + // already. We have nothing to do here as the `advance()` + // iterator has unset the timer node pointer in the old + // `ValueEntry`. (In the case of update, the timer node will be + // recreated for the new `ValueEntry` when it is processed by the + // `handle_upsert` method.) + } + } + } + + async fn evict_expired_entries_using_deqs( + &self, + deqs: &mut MutexGuard<'_, Deques>, + timer_wheel: &mut TimerWheel, + batch_size: usize, + state: &mut EvictionState<'_, K, V>, + ) where + V: Clone, + { + use CacheRegion::{MainProbation as Probation, MainProtected as Protected, Window}; + + let now = self.current_time_from_expiration_clock(); + + if self.is_write_order_queue_enabled() { + self.remove_expired_wo(deqs, timer_wheel, batch_size, now, state) + .await; + } + + self.remove_expired_ao(Window, deqs, timer_wheel, batch_size, now, state) + .await; + self.remove_expired_ao(Probation, deqs, timer_wheel, batch_size, now, state) + .await; + self.remove_expired_ao(Protected, deqs, timer_wheel, batch_size, now, state) + .await; + } + + #[allow(clippy::too_many_arguments)] + #[inline] + async fn remove_expired_ao( + &self, + cache_region: CacheRegion, + deqs: &mut MutexGuard<'_, Deques>, + timer_wheel: &mut TimerWheel, + batch_size: usize, + now: Instant, + eviction_state: &mut EvictionState<'_, K, V>, + ) where + V: Clone, + { + let tti = &self.expiration_policy.time_to_idle(); + let va = &self.valid_after(); + let deq_name = cache_region.name(); + for _ in 0..batch_size { + // Peek the front node of the deque and check if it is expired. + let key_hash_cause = deqs + .select_mut(cache_region) + .0 + .peek_front() + .and_then(|node| { + // TODO: Skip the entry if it is dirty. See `evict_lru_entries` method as an example. + match is_entry_expired_ao_or_invalid(tti, va, node, now) { + (true, _) => Some(( + Arc::clone(node.element.key()), + node.element.hash(), + RemovalCause::Expired, + )), + (false, true) => Some(( + Arc::clone(node.element.key()), + node.element.hash(), + RemovalCause::Explicit, + )), + (false, false) => None, + } + }); + + if key_hash_cause.is_none() { + break; + } + + let (key, hash, cause) = key_hash_cause + .as_ref() + .map(|(k, h, c)| (k, *h, *c)) + .unwrap(); + + // Lock the key for removal if blocking removal notification is enabled. + let kl = self.maybe_key_lock(key); + let _klg = if let Some(lock) = &kl { + Some(lock.lock().await) + } else { + None + }; + + // Remove the key from the map only when the entry is really + // expired. This check is needed because it is possible that the entry in + // the map has been updated or deleted but its deque node we checked + // above has not been updated yet. + let maybe_entry = self.cache.remove_if( + hash, + |k| k == key, + |_, v| is_expired_entry_ao(tti, va, v, now), + ); + + if let Some(entry) = maybe_entry { + if eviction_state.is_notifier_enabled() { + let key = Arc::clone(key); + eviction_state.add_removed_entry(key, &entry, cause).await; + } + let (ao_deq, wo_deq) = deqs.select_mut(cache_region); + Self::handle_remove_with_deques( + cache_region.name(), + ao_deq, + wo_deq, + timer_wheel, + entry, + &mut eviction_state.counters, + ); + } else { + let (ao_deq, wo_deq) = deqs.select_mut(cache_region); + if !self.try_skip_updated_entry(key, hash, deq_name, ao_deq, wo_deq) { + break; + } + } + } + } + + #[inline] + fn try_skip_updated_entry( + &self, + key: &K, + hash: u64, + deq_name: &str, + deq: &mut Deque>, + write_order_deq: &mut Deque>, + ) -> bool { + if let Some(entry) = self.cache.get(hash, |k| (k.borrow() as &K) == key) { + if entry.is_dirty() { + // The key exists and the entry has been updated. + Deques::move_to_back_ao_in_deque(deq_name, deq, &entry); + Deques::move_to_back_wo_in_deque(write_order_deq, &entry); + true + } else { + // The key exists but something unexpected. + false + } + } else { + // Skip this entry as the key might have been invalidated. Since the + // invalidated ValueEntry (which should be still in the write op queue) + // has a pointer to this node, move the node to the back of the deque + // instead of popping (dropping) it. + deq.move_front_to_back(); + true + } + } + + #[inline] + async fn remove_expired_wo( + &self, + deqs: &mut Deques, + timer_wheel: &mut TimerWheel, + batch_size: usize, + now: Instant, + eviction_state: &mut EvictionState<'_, K, V>, + ) where + V: Clone, + { + let ttl = &self.expiration_policy.time_to_live(); + let va = &self.valid_after(); + for _ in 0..batch_size { + let key_cause = deqs.write_order.peek_front().and_then( + // TODO: Skip the entry if it is dirty. See `evict_lru_entries` method as an example. + |node| match is_entry_expired_wo_or_invalid(ttl, va, node, now) { + (true, _) => Some((Arc::clone(node.element.key()), RemovalCause::Expired)), + (false, true) => Some((Arc::clone(node.element.key()), RemovalCause::Explicit)), + (false, false) => None, + }, + ); + + if key_cause.is_none() { + break; + } + + let (key, cause) = key_cause.as_ref().unwrap(); + let hash = self.hash(key); + + // Lock the key for removal if blocking removal notification is enabled. + let kl = self.maybe_key_lock(key); + let _klg = if let Some(lock) = &kl { + Some(lock.lock().await) + } else { + None + }; + + let maybe_entry = self.cache.remove_if( + hash, + |k| k == key, + |_, v| is_expired_entry_wo(ttl, va, v, now), + ); + + if let Some(entry) = maybe_entry { + if eviction_state.is_notifier_enabled() { + let key = Arc::clone(key); + eviction_state.add_removed_entry(key, &entry, *cause).await; + } + Self::handle_remove(deqs, timer_wheel, entry, &mut eviction_state.counters); + } else if let Some(entry) = self.cache.get(hash, |k| k == key) { + if entry.is_dirty() { + deqs.move_to_back_ao(&entry); + deqs.move_to_back_wo(&entry); + } else { + // The key exists but something unexpected. Break. + break; + } + } else { + // Skip this entry as the key might have been invalidated. Since the + // invalidated ValueEntry (which should be still in the write op + // queue) has a pointer to this node, move the node to the back of + // the deque instead of popping (dropping) it. + deqs.write_order.move_front_to_back(); + } + } + } + + async fn invalidate_entries( + &self, + invalidator: &Invalidator, + deqs: &mut Deques, + timer_wheel: &mut TimerWheel, + batch_size: usize, + eviction_state: &mut EvictionState<'_, K, V>, + ) where + V: Clone, + { + let now = self.current_time_from_expiration_clock(); + + // If the write order queue is empty, we are done and can remove the predicates + // that have been registered by now. + if deqs.write_order.len() == 0 { + invalidator.remove_predicates_registered_before(now).await; + return; + } + + let mut candidates = Vec::with_capacity(batch_size); + let mut len = 0; + let has_next; + { + let iter = &mut deqs.write_order.peekable(); + + while len < batch_size { + if let Some(kd) = iter.next() { + if !kd.is_dirty() { + if let Some(ts) = kd.last_modified() { + let key = kd.key(); + let hash = self.hash(key); + candidates.push(KeyDateLite::new(key, hash, ts)); + len += 1; + } + } + } else { + break; + } + } + + has_next = iter.peek().is_some(); + } + + if len == 0 { + return; + } + + let is_truncated = len == batch_size && has_next; + let (invalidated, is_done) = invalidator + .scan_and_invalidate(self, candidates, is_truncated) + .await; + + for KvEntry { key: _key, entry } in invalidated { + Self::handle_remove(deqs, timer_wheel, entry, &mut eviction_state.counters); + } + if is_done { + deqs.write_order.reset_cursor(); + } + } + + async fn evict_lru_entries( + &self, + deqs: &mut Deques, + timer_wheel: &mut TimerWheel, + batch_size: usize, + weights_to_evict: u64, + eviction_state: &mut EvictionState<'_, K, V>, + ) where + V: Clone, + { + const DEQ_NAME: &str = "probation"; + let mut evicted = 0u64; + + for _ in 0..batch_size { + if evicted >= weights_to_evict { + break; + } + + let maybe_key_hash_ts = deqs + .select_mut(CacheRegion::MainProbation) + .0 + .peek_front() + .map(|node| { + let entry_info = node.element.entry_info(); + ( + Arc::clone(node.element.key()), + node.element.hash(), + entry_info.is_dirty(), + entry_info.last_modified(), + ) + }); + + let (key, hash, ts) = match maybe_key_hash_ts { + Some((key, hash, false, Some(ts))) => (key, hash, ts), + // TODO: Remove the second pattern `Some((_key, false, None))` once we change + // `last_modified` and `last_accessed` in `EntryInfo` from `Option` to + // `Instant`. + Some((key, hash, true, _)) | Some((key, hash, false, None)) => { + let (deq, write_order_deq) = deqs.select_mut(CacheRegion::MainProbation); + if self.try_skip_updated_entry(&key, hash, DEQ_NAME, deq, write_order_deq) { + continue; + } else { + break; + } + } + None => break, + }; + + // Lock the key for removal if blocking removal notification is enabled. + let kl = self.maybe_key_lock(&key); + let _klg = if let Some(lock) = &kl { + Some(lock.lock().await) + } else { + None + }; + + let maybe_entry = self.cache.remove_if( + hash, + |k| k == &key, + |_, v| { + if let Some(lm) = v.last_modified() { + lm == ts + } else { + false + } + }, + ); + + if let Some(entry) = maybe_entry { + if eviction_state.is_notifier_enabled() { + eviction_state + .add_removed_entry(key, &entry, RemovalCause::Size) + .await; + } + let weight = entry.policy_weight(); + let (deq, write_order_deq) = deqs.select_mut(CacheRegion::MainProbation); + Self::handle_remove_with_deques( + DEQ_NAME, + deq, + write_order_deq, + timer_wheel, + entry, + &mut eviction_state.counters, + ); + evicted = evicted.saturating_add(weight as u64); + } else { + let (deq, write_order_deq) = deqs.select_mut(CacheRegion::MainProbation); + if !self.try_skip_updated_entry(&key, hash, DEQ_NAME, deq, write_order_deq) { + break; + } + } + } + } +} + +impl Inner +where + K: Send + Sync + 'static, + V: Clone + Send + Sync + 'static, +{ + async fn notify_single_removal( + &self, + key: Arc, + entry: &TrioArc>, + cause: RemovalCause, + ) { + if let Some(notifier) = &self.removal_notifier { + notifier.notify(key, entry.value.clone(), cause).await + } + } + + #[inline] + async fn notify_upsert( + &self, + key: Arc, + entry: &TrioArc>, + last_accessed: Option, + last_modified: Option, + ) { + let now = self.current_time_from_expiration_clock(); + let exp = &self.expiration_policy; + + let mut cause = RemovalCause::Replaced; + + if let Some(last_accessed) = last_accessed { + if is_expired_by_tti(&exp.time_to_idle(), last_accessed, now) { + cause = RemovalCause::Expired; + } + } + + if let Some(last_modified) = last_modified { + if is_expired_by_ttl(&exp.time_to_live(), last_modified, now) { + cause = RemovalCause::Expired; + } else if is_invalid_entry(&self.valid_after(), last_modified) { + cause = RemovalCause::Explicit; + } + } + + self.notify_single_removal(key, entry, cause).await; + } + + #[inline] + async fn notify_invalidate(&self, key: &Arc, entry: &TrioArc>) { + let now = self.current_time_from_expiration_clock(); + let exp = &self.expiration_policy; + + let mut cause = RemovalCause::Explicit; + + if let Some(last_accessed) = entry.last_accessed() { + if is_expired_by_tti(&exp.time_to_idle(), last_accessed, now) { + cause = RemovalCause::Expired; + } + } + + if let Some(last_modified) = entry.last_modified() { + if is_expired_by_ttl(&exp.time_to_live(), last_modified, now) { + cause = RemovalCause::Expired; + } + } + + self.notify_single_removal(Arc::clone(key), entry, cause) + .await; + } +} + +// +// for testing +// +#[cfg(test)] +impl Inner +where + K: Hash + Eq, + S: BuildHasher + Clone, +{ + fn invalidation_predicate_count(&self) -> usize { + if let Some(inv) = &self.invalidator { + inv.predicate_count() + } else { + 0 + } + } + + async fn set_expiration_clock(&self, clock: Option) { + // Acquire the lock for the clocks to prevent other threads from + // updating the expiration clock while we are setting it. + let _clocks_lock = self.clocks._lock.lock(); + + if let Some(clock) = clock { + let std_now = StdInstant::now(); + let now = Instant::new(clock.now()); + *(self.clocks.expiration_clock.write()) = Some(clock); + self.clocks + .has_expiration_clock + .store(true, Ordering::SeqCst); + self.clocks.set_origin(now, std_now); + self.timer_wheel.lock().await.set_origin(now); + } else { + self.clocks + .has_expiration_clock + .store(false, Ordering::SeqCst); + *(self.clocks.expiration_clock.write()) = None; + } + } + + fn key_locks_map_is_empty(&self) -> bool { + self.key_locks + .as_ref() + .map(|m| m.is_empty()) + // If key_locks is None, consider it is empty. + .unwrap_or(true) + } +} + +// +// private free-standing functions +// + +/// Returns `true` if this entry is expired by its per-entry TTL. +#[inline] +fn is_expired_by_per_entry_ttl(entry_info: &TrioArc>, now: Instant) -> bool { + if let Some(ts) = entry_info.expiration_time() { + return ts <= now; + } + false +} + +/// Returns `true` when one of the followings conditions is met: +/// +/// - This entry is expired by the time-to-idle config of this cache instance. +/// - Or, it is invalidated by the `invalidate_all` method. +#[inline] +fn is_expired_entry_ao( + time_to_idle: &Option, + valid_after: &Option, + entry: &impl AccessTime, + now: Instant, +) -> bool { + if let Some(ts) = entry.last_accessed() { + if is_invalid_entry(valid_after, ts) || is_expired_by_tti(time_to_idle, ts, now) { + return true; + } + } + false +} + +/// Returns `true` when one of the following conditions is met: +/// +/// - This entry is expired by the time-to-live (TTL) config of this cache instance. +/// - Or, it is invalidated by the `invalidate_all` method. +#[inline] +fn is_expired_entry_wo( + time_to_live: &Option, + valid_after: &Option, + entry: &impl AccessTime, + now: Instant, +) -> bool { + if let Some(ts) = entry.last_modified() { + if is_invalid_entry(valid_after, ts) || is_expired_by_ttl(time_to_live, ts, now) { + return true; + } + } + false +} + +#[inline] +fn is_entry_expired_ao_or_invalid( + time_to_idle: &Option, + valid_after: &Option, + entry: &impl AccessTime, + now: Instant, +) -> (bool, bool) { + if let Some(ts) = entry.last_accessed() { + let expired = is_expired_by_tti(time_to_idle, ts, now); + let invalid = is_invalid_entry(valid_after, ts); + return (expired, invalid); + } + (false, false) +} + +#[inline] +fn is_entry_expired_wo_or_invalid( + time_to_live: &Option, + valid_after: &Option, + entry: &impl AccessTime, + now: Instant, +) -> (bool, bool) { + if let Some(ts) = entry.last_modified() { + let expired = is_expired_by_ttl(time_to_live, ts, now); + let invalid = is_invalid_entry(valid_after, ts); + return (expired, invalid); + } + (false, false) +} + +#[inline] +fn is_invalid_entry(valid_after: &Option, entry_ts: Instant) -> bool { + if let Some(va) = valid_after { + if entry_ts < *va { + return true; + } + } + false +} + +#[inline] +fn is_expired_by_tti( + time_to_idle: &Option, + entry_last_accessed: Instant, + now: Instant, +) -> bool { + if let Some(tti) = time_to_idle { + let checked_add = entry_last_accessed.checked_add(*tti); + if checked_add.is_none() { + panic!("tti overflow") + } + return checked_add.unwrap() <= now; + } + false +} + +#[inline] +fn is_expired_by_ttl( + time_to_live: &Option, + entry_last_modified: Instant, + now: Instant, +) -> bool { + if let Some(ttl) = time_to_live { + let checked_add = entry_last_modified.checked_add(*ttl); + if checked_add.is_none() { + panic!("ttl overflow"); + } + return checked_add.unwrap() <= now; + } + false +} + +#[cfg(test)] +mod tests { + use crate::policy::ExpirationPolicy; + + use super::BaseCache; + + #[cfg_attr(target_pointer_width = "16", ignore)] + #[tokio::test] + async fn test_skt_capacity_will_not_overflow() { + use std::collections::hash_map::RandomState; + + // power of two + let pot = |exp| 2u64.pow(exp); + + async fn ensure_sketch_len(max_capacity: u64, len: u64, name: &str) { + let cache = BaseCache::::new( + None, + Some(max_capacity), + None, + RandomState::default(), + None, + None, + Default::default(), + false, + ); + cache.inner.enable_frequency_sketch_for_testing().await; + assert_eq!( + cache.inner.frequency_sketch.read().await.table_len(), + len as usize, + "{}", + name + ); + } + + if cfg!(target_pointer_width = "32") { + let pot24 = pot(24); + let pot16 = pot(16); + ensure_sketch_len(0, 128, "0").await; + ensure_sketch_len(128, 128, "128").await; + ensure_sketch_len(pot16, pot16, "pot16").await; + // due to ceiling to next_power_of_two + ensure_sketch_len(pot16 + 1, pot(17), "pot16 + 1").await; + // due to ceiling to next_power_of_two + ensure_sketch_len(pot24 - 1, pot24, "pot24 - 1").await; + ensure_sketch_len(pot24, pot24, "pot24").await; + ensure_sketch_len(pot(27), pot24, "pot(27)").await; + ensure_sketch_len(u32::MAX as u64, pot24, "u32::MAX").await; + } else { + // target_pointer_width: 64 or larger. + let pot30 = pot(30); + let pot16 = pot(16); + ensure_sketch_len(0, 128, "0").await; + ensure_sketch_len(128, 128, "128").await; + ensure_sketch_len(pot16, pot16, "pot16").await; + // due to ceiling to next_power_of_two + ensure_sketch_len(pot16 + 1, pot(17), "pot16 + 1").await; + + // The following tests will allocate large memory (~8GiB). + // Skip when running on Circle CI. + if !cfg!(circleci) { + // due to ceiling to next_power_of_two + ensure_sketch_len(pot30 - 1, pot30, "pot30- 1").await; + ensure_sketch_len(pot30, pot30, "pot30").await; + ensure_sketch_len(u64::MAX, pot30, "u64::MAX").await; + } + }; + } + + #[tokio::test] + async fn test_per_entry_expiration() { + use crate::{common::time::Clock, Entry, Expiry}; + + use std::{ + collections::hash_map::RandomState, + sync::{Arc, Mutex}, + time::{Duration, Instant as StdInstant}, + }; + + type Key = u32; + type Value = char; + + fn current_time(cache: &BaseCache) -> StdInstant { + cache + .inner + .clocks() + .to_std_instant(cache.current_time_from_expiration_clock()) + } + + async fn insert(cache: &BaseCache, key: Key, hash: u64, value: Value) { + let (op, _now) = cache.do_insert_with_hash(Arc::new(key), hash, value).await; + cache.write_op_ch.send(op).expect("Failed to send"); + } + + fn never_ignore<'a, V>() -> Option<&'a mut fn(&V) -> bool> { + None + } + + macro_rules! assert_params_eq { + ($left:expr, $right:expr, $param_name:expr, $line:expr) => { + assert_eq!( + $left, $right, + "Mismatched `{}`s. line: {}", + $param_name, $line + ); + }; + } + + macro_rules! assert_expiry { + ($cache:ident, $key:ident, $hash:ident, $mock:ident, $duration_secs:expr) => { + // Increment the time. + $mock.increment(Duration::from_millis($duration_secs * 1000 - 1)); + $cache.inner.do_run_pending_tasks(1).await; + assert!($cache.contains_key_with_hash(&$key, $hash)); + assert_eq!($cache.entry_count(), 1); + + // Increment the time by 1ms (3). The entry should be expired. + $mock.increment(Duration::from_millis(1)); + $cache.inner.do_run_pending_tasks(1).await; + assert!(!$cache.contains_key_with_hash(&$key, $hash)); + + // Increment the time again to ensure the entry has been evicted from the + // cache. + $mock.increment(Duration::from_secs(1)); + $cache.inner.do_run_pending_tasks(1).await; + assert_eq!($cache.entry_count(), 0); + }; + } + + /// Contains expected call parameters and also a return value. + #[derive(Debug)] + enum ExpiryExpectation { + NoCall, + AfterCreate { + caller_line: u32, + key: Key, + value: Value, + current_time: StdInstant, + new_duration_secs: Option, + }, + AfterRead { + caller_line: u32, + key: Key, + value: Value, + current_time: StdInstant, + current_duration_secs: Option, + last_modified_at: StdInstant, + new_duration_secs: Option, + }, + AfterUpdate { + caller_line: u32, + key: Key, + value: Value, + current_time: StdInstant, + current_duration_secs: Option, + new_duration_secs: Option, + }, + } + + impl ExpiryExpectation { + fn after_create( + caller_line: u32, + key: Key, + value: Value, + current_time: StdInstant, + new_duration_secs: Option, + ) -> Self { + Self::AfterCreate { + caller_line, + key, + value, + current_time, + new_duration_secs, + } + } + + fn after_read( + caller_line: u32, + key: Key, + value: Value, + current_time: StdInstant, + current_duration_secs: Option, + last_modified_at: StdInstant, + new_duration_secs: Option, + ) -> Self { + Self::AfterRead { + caller_line, + key, + value, + current_time, + current_duration_secs, + last_modified_at, + new_duration_secs, + } + } + + fn after_update( + caller_line: u32, + key: Key, + value: Value, + current_time: StdInstant, + current_duration_secs: Option, + new_duration_secs: Option, + ) -> Self { + Self::AfterUpdate { + caller_line, + key, + value, + current_time, + current_duration_secs, + new_duration_secs, + } + } + } + + let expectation = Arc::new(Mutex::new(ExpiryExpectation::NoCall)); + + struct MyExpiry { + expectation: Arc>, + } + + impl Expiry for MyExpiry { + fn expire_after_create( + &self, + actual_key: &u32, + actual_value: &char, + actual_current_time: StdInstant, + ) -> Option { + use ExpiryExpectation::*; + + let lock = &mut *self.expectation.lock().unwrap(); + let expected = std::mem::replace(lock, NoCall); + match expected { + AfterCreate { + caller_line, + key, + value, + current_time, + new_duration_secs: new_duration, + } => { + assert_params_eq!(*actual_key, key, "key", caller_line); + assert_params_eq!(*actual_value, value, "value", caller_line); + assert_params_eq!( + actual_current_time, + current_time, + "current_time", + caller_line + ); + new_duration.map(Duration::from_secs) + } + expected => { + panic!("Unexpected call to expire_after_create: caller_line {}, expected: {:?}", + line!(), expected + ); + } + } + } + + fn expire_after_read( + &self, + actual_key: &u32, + actual_value: &char, + actual_current_time: StdInstant, + actual_current_duration: Option, + actual_last_modified_at: StdInstant, + ) -> Option { + use ExpiryExpectation::*; + + let lock = &mut *self.expectation.lock().unwrap(); + let expected = std::mem::replace(lock, NoCall); + match expected { + AfterRead { + caller_line, + key, + value, + current_time, + current_duration_secs, + last_modified_at, + new_duration_secs, + } => { + assert_params_eq!(*actual_key, key, "key", caller_line); + assert_params_eq!(*actual_value, value, "value", caller_line); + assert_params_eq!( + actual_current_time, + current_time, + "current_time", + caller_line + ); + assert_params_eq!( + actual_current_duration, + current_duration_secs.map(Duration::from_secs), + "current_duration", + caller_line + ); + assert_params_eq!( + actual_last_modified_at, + last_modified_at, + "last_modified_at", + caller_line + ); + new_duration_secs.map(Duration::from_secs) + } + expected => { + panic!( + "Unexpected call to expire_after_read: caller_line {}, expected: {:?}", + line!(), + expected + ); + } + } + } + + fn expire_after_update( + &self, + actual_key: &u32, + actual_value: &char, + actual_current_time: StdInstant, + actual_current_duration: Option, + ) -> Option { + use ExpiryExpectation::*; + + let lock = &mut *self.expectation.lock().unwrap(); + let expected = std::mem::replace(lock, NoCall); + match expected { + AfterUpdate { + caller_line, + key, + value, + current_time, + current_duration_secs, + new_duration_secs, + } => { + assert_params_eq!(*actual_key, key, "key", caller_line); + assert_params_eq!(*actual_value, value, "value", caller_line); + assert_params_eq!( + actual_current_time, + current_time, + "current_time", + caller_line + ); + assert_params_eq!( + actual_current_duration, + current_duration_secs.map(Duration::from_secs), + "current_duration", + caller_line + ); + new_duration_secs.map(Duration::from_secs) + } + expected => { + panic!("Unexpected call to expire_after_update: caller_line {}, expected: {:?}", + line!(), expected + ); + } + } + } + } + + const TTL: u64 = 16; + const TTI: u64 = 7; + let expiry: Option + Send + Sync + 'static>> = + Some(Arc::new(MyExpiry { + expectation: Arc::clone(&expectation), + })); + + let mut cache = BaseCache::::new( + None, + None, + None, + RandomState::default(), + None, + None, + ExpirationPolicy::new( + Some(Duration::from_secs(TTL)), + Some(Duration::from_secs(TTI)), + expiry, + ), + false, + ); + cache.reconfigure_for_testing().await; + + let (clock, mock) = Clock::mock(); + cache.set_expiration_clock(Some(clock)).await; + + // Make the cache exterior immutable. + let cache = cache; + + mock.increment(Duration::from_millis(10)); + + // ---------------------------------------------------- + // Case 1 + // + // 1. 0s: Insert with per-entry TTL 1s. + // 2. +1s: Expires. + // ---------------------------------------------------- + + // Insert an entry (1). It will have a per-entry TTL of 1 second. + let key = 1; + let hash = cache.hash(&key); + let value = 'a'; + + *expectation.lock().unwrap() = + ExpiryExpectation::after_create(line!(), key, value, current_time(&cache), Some(1)); + + insert(&cache, key, hash, value).await; + // Run a sync to register the entry to the internal data structures including + // the timer wheel. + cache.inner.do_run_pending_tasks(1).await; + assert_eq!(cache.entry_count(), 1); + + assert_expiry!(cache, key, hash, mock, 1); + + // ---------------------------------------------------- + // Case 2 + // + // 1. 0s: Insert with no per-entry TTL. + // 2. +1s: Get with per-entry TTL 3s. + // 3. +3s: Expires. + // ---------------------------------------------------- + + // Insert an entry (1). + let key = 2; + let hash = cache.hash(&key); + let value = 'b'; + + *expectation.lock().unwrap() = + ExpiryExpectation::after_create(line!(), key, value, current_time(&cache), None); + let inserted_at = current_time(&cache); + insert(&cache, key, hash, value).await; + cache.inner.do_run_pending_tasks(1).await; + assert_eq!(cache.entry_count(), 1); + + // Increment the time. + mock.increment(Duration::from_secs(1)); + cache.inner.do_run_pending_tasks(1).await; + assert!(cache.contains_key_with_hash(&key, hash)); + + // Read the entry (2). + *expectation.lock().unwrap() = ExpiryExpectation::after_read( + line!(), + key, + value, + current_time(&cache), + Some(TTI - 1), + inserted_at, + Some(3), + ); + assert_eq!( + cache + .get_with_hash(&key, hash, never_ignore(), false, true) + .await + .map(Entry::into_value), + Some(value) + ); + cache.inner.do_run_pending_tasks(1).await; + + assert_expiry!(cache, key, hash, mock, 3); + + // ---------------------------------------------------- + // Case 3 + // + // 1. 0s: Insert with no per-entry TTL. + // 2. +1s: Get with no per-entry TTL. + // 3. +2s: Update with per-entry TTL 3s. + // 4. +3s: Expires. + // ---------------------------------------------------- + + // Insert an entry (1). + let key = 3; + let hash = cache.hash(&key); + let value = 'c'; + + *expectation.lock().unwrap() = + ExpiryExpectation::after_create(line!(), key, value, current_time(&cache), None); + let inserted_at = current_time(&cache); + insert(&cache, key, hash, value).await; + cache.inner.do_run_pending_tasks(1).await; + assert_eq!(cache.entry_count(), 1); + + // Increment the time. + mock.increment(Duration::from_secs(1)); + cache.inner.do_run_pending_tasks(1).await; + assert!(cache.contains_key_with_hash(&key, hash)); + + // Read the entry (2). + *expectation.lock().unwrap() = ExpiryExpectation::after_read( + line!(), + key, + value, + current_time(&cache), + Some(TTI - 1), + inserted_at, + None, + ); + assert_eq!( + cache + .get_with_hash(&key, hash, never_ignore(), false, true) + .await + .map(Entry::into_value), + Some(value) + ); + cache.inner.do_run_pending_tasks(1).await; + + // Increment the time. + mock.increment(Duration::from_secs(2)); + cache.inner.do_run_pending_tasks(1).await; + assert!(cache.contains_key_with_hash(&key, hash)); + assert_eq!(cache.entry_count(), 1); + + // Update the entry (3). + *expectation.lock().unwrap() = ExpiryExpectation::after_update( + line!(), + key, + value, + current_time(&cache), + // TTI should be reset by this update. + Some(TTI), + Some(3), + ); + insert(&cache, key, hash, value).await; + cache.inner.do_run_pending_tasks(1).await; + assert_eq!(cache.entry_count(), 1); + + assert_expiry!(cache, key, hash, mock, 3); + + // ---------------------------------------------------- + // Case 4 + // + // 1. 0s: Insert with no per-entry TTL. + // 2. +1s: Get with no per-entry TTL. + // 3. +2s: Update with no per-entry TTL. + // 4. +7s: Expires by TTI (7s from step 3). + // ---------------------------------------------------- + + // Insert an entry (1). + let key = 4; + let hash = cache.hash(&key); + let value = 'd'; + + *expectation.lock().unwrap() = + ExpiryExpectation::after_create(line!(), key, value, current_time(&cache), None); + let inserted_at = current_time(&cache); + insert(&cache, key, hash, value).await; + cache.inner.do_run_pending_tasks(1).await; + assert_eq!(cache.entry_count(), 1); + + // Increment the time. + mock.increment(Duration::from_secs(1)); + cache.inner.do_run_pending_tasks(1).await; + assert!(cache.contains_key_with_hash(&key, hash)); + assert_eq!(cache.entry_count(), 1); + + // Read the entry (2). + *expectation.lock().unwrap() = ExpiryExpectation::after_read( + line!(), + key, + value, + current_time(&cache), + Some(TTI - 1), + inserted_at, + None, + ); + assert_eq!( + cache + .get_with_hash(&key, hash, never_ignore(), false, true) + .await + .map(Entry::into_value), + Some(value) + ); + cache.inner.do_run_pending_tasks(1).await; + + // Increment the time. + mock.increment(Duration::from_secs(2)); + cache.inner.do_run_pending_tasks(1).await; + assert!(cache.contains_key_with_hash(&key, hash)); + assert_eq!(cache.entry_count(), 1); + + // Update the entry (3). + *expectation.lock().unwrap() = ExpiryExpectation::after_update( + line!(), + key, + value, + current_time(&cache), + // TTI should be reset by this update. + Some(TTI), + None, + ); + insert(&cache, key, hash, value).await; + cache.inner.do_run_pending_tasks(1).await; + assert_eq!(cache.entry_count(), 1); + + assert_expiry!(cache, key, hash, mock, 7); + + // ---------------------------------------------------- + // Case 5 + // + // 1. 0s: Insert with per-entry TTL 8s. + // 2. +5s: Get with per-entry TTL 8s. + // 3. +7s: Expires by TTI (7s). + // ---------------------------------------------------- + + // Insert an entry. + let key = 5; + let hash = cache.hash(&key); + let value = 'e'; + + *expectation.lock().unwrap() = + ExpiryExpectation::after_create(line!(), key, value, current_time(&cache), Some(8)); + let inserted_at = current_time(&cache); + insert(&cache, key, hash, value).await; + cache.inner.do_run_pending_tasks(1).await; + assert_eq!(cache.entry_count(), 1); + + // Increment the time. + mock.increment(Duration::from_secs(5)); + cache.inner.do_run_pending_tasks(1).await; + assert!(cache.contains_key_with_hash(&key, hash)); + assert_eq!(cache.entry_count(), 1); + + // Read the entry. + *expectation.lock().unwrap() = ExpiryExpectation::after_read( + line!(), + key, + value, + current_time(&cache), + Some(TTI - 5), + inserted_at, + Some(8), + ); + assert_eq!( + cache + .get_with_hash(&key, hash, never_ignore(), false, true) + .await + .map(Entry::into_value), + Some(value) + ); + cache.inner.do_run_pending_tasks(1).await; + + assert_expiry!(cache, key, hash, mock, 7); + + // ---------------------------------------------------- + // Case 6 + // + // 1. 0s: Insert with per-entry TTL 8s. + // 2. +5s: Get with per-entry TTL 9s. + // 3. +6s: Get with per-entry TTL 10s. + // 4. +5s: Expires by TTL (16s). + // ---------------------------------------------------- + + // Insert an entry. + let key = 6; + let hash = cache.hash(&key); + let value = 'f'; + + *expectation.lock().unwrap() = + ExpiryExpectation::after_create(line!(), key, value, current_time(&cache), Some(8)); + let inserted_at = current_time(&cache); + insert(&cache, key, hash, value).await; + cache.inner.do_run_pending_tasks(1).await; + assert_eq!(cache.entry_count(), 1); + + // Increment the time. + mock.increment(Duration::from_secs(5)); + cache.inner.do_run_pending_tasks(1).await; + assert!(cache.contains_key_with_hash(&key, hash)); + assert_eq!(cache.entry_count(), 1); + + // Read the entry. + *expectation.lock().unwrap() = ExpiryExpectation::after_read( + line!(), + key, + value, + current_time(&cache), + Some(TTI - 5), + inserted_at, + Some(9), + ); + assert_eq!( + cache + .get_with_hash(&key, hash, never_ignore(), false, true) + .await + .map(Entry::into_value), + Some(value) + ); + cache.inner.do_run_pending_tasks(1).await; + + // Increment the time. + mock.increment(Duration::from_secs(6)); + cache.inner.do_run_pending_tasks(1).await; + assert!(cache.contains_key_with_hash(&key, hash)); + assert_eq!(cache.entry_count(), 1); + + // Read the entry. + *expectation.lock().unwrap() = ExpiryExpectation::after_read( + line!(), + key, + value, + current_time(&cache), + Some(TTI - 6), + inserted_at, + Some(10), + ); + assert_eq!( + cache + .get_with_hash(&key, hash, never_ignore(), false, true) + .await + .map(Entry::into_value), + Some(value) + ); + cache.inner.do_run_pending_tasks(1).await; + + assert_expiry!(cache, key, hash, mock, 5); + + // ---------------------------------------------------- + // Case 7 + // + // 1. 0s: Insert with per-entry TTL 9s. + // 2. +6s: Update with per-entry TTL 8s. + // 3. +6s: Get with per-entry TTL 9s + // 4. +6s: Get with per-entry TTL 5s. + // 5. +4s: Expires by TTL (16s from step 2). + // ---------------------------------------------------- + // Insert an entry. + let key = 7; + let hash = cache.hash(&key); + let value = 'g'; + + *expectation.lock().unwrap() = + ExpiryExpectation::after_create(line!(), key, value, current_time(&cache), Some(9)); + insert(&cache, key, hash, value).await; + cache.inner.do_run_pending_tasks(1).await; + assert_eq!(cache.entry_count(), 1); + + // Increment the time. + mock.increment(Duration::from_secs(6)); + cache.inner.do_run_pending_tasks(1).await; + assert!(cache.contains_key_with_hash(&key, hash)); + assert_eq!(cache.entry_count(), 1); + + // Update the entry (3). + *expectation.lock().unwrap() = ExpiryExpectation::after_update( + line!(), + key, + value, + current_time(&cache), + // From the per-entry TTL. + Some(9 - 6), + Some(8), + ); + let updated_at = current_time(&cache); + insert(&cache, key, hash, value).await; + cache.inner.do_run_pending_tasks(1).await; + assert_eq!(cache.entry_count(), 1); + + // Increment the time. + mock.increment(Duration::from_secs(6)); + cache.inner.do_run_pending_tasks(1).await; + assert!(cache.contains_key_with_hash(&key, hash)); + assert_eq!(cache.entry_count(), 1); + + // Read the entry. + *expectation.lock().unwrap() = ExpiryExpectation::after_read( + line!(), + key, + value, + current_time(&cache), + Some(TTI - 6), + updated_at, + Some(9), + ); + assert_eq!( + cache + .get_with_hash(&key, hash, never_ignore(), false, true) + .await + .map(Entry::into_value), + Some(value) + ); + cache.inner.do_run_pending_tasks(1).await; + + // Increment the time. + mock.increment(Duration::from_secs(6)); + cache.inner.do_run_pending_tasks(1).await; + assert!(cache.contains_key_with_hash(&key, hash)); + assert_eq!(cache.entry_count(), 1); + + // Read the entry. + *expectation.lock().unwrap() = ExpiryExpectation::after_read( + line!(), + key, + value, + current_time(&cache), + Some(TTI - 6), + updated_at, + Some(5), + ); + assert_eq!( + cache + .get_with_hash(&key, hash, never_ignore(), false, true) + .await + .map(Entry::into_value), + Some(value) + ); + cache.inner.do_run_pending_tasks(1).await; + + assert_expiry!(cache, key, hash, mock, 4); + } +} diff --git a/src/future/builder.rs b/src/future/builder.rs index e3055944..4772ef13 100644 --- a/src/future/builder.rs +++ b/src/future/builder.rs @@ -1,7 +1,7 @@ -use super::Cache; +use super::{Cache, FutureExt}; use crate::{ common::{builder_utils, concurrent::Weigher}, - notification::{self, DeliveryMode, EvictionListener, RemovalCause}, + notification::{AsyncEvictionListener, ListenerFuture, RemovalCause}, policy::ExpirationPolicy, Expiry, }; @@ -24,7 +24,7 @@ use std::{ /// // Cargo.toml /// // /// // [dependencies] -/// // moka = { version = "0.11", features = ["future"] } +/// // moka = { version = "0.12", features = ["future"] } /// // tokio = { version = "1", features = ["rt-multi-thread", "macros" ] } /// // futures = "0.3" /// @@ -60,8 +60,7 @@ pub struct CacheBuilder { max_capacity: Option, initial_capacity: Option, weigher: Option>, - eviction_listener: Option>, - eviction_listener_conf: Option, + eviction_listener: Option>, expiration_policy: ExpirationPolicy, invalidator_enabled: bool, cache_type: PhantomData, @@ -79,7 +78,6 @@ where initial_capacity: None, weigher: None, eviction_listener: None, - eviction_listener_conf: None, expiration_policy: Default::default(), invalidator_enabled: false, cache_type: Default::default(), @@ -119,10 +117,8 @@ where build_hasher, self.weigher, self.eviction_listener, - self.eviction_listener_conf, self.expiration_policy, self.invalidator_enabled, - builder_utils::housekeeper_conf(true), ) } @@ -217,10 +213,8 @@ where hasher, self.weigher, self.eviction_listener, - self.eviction_listener_conf, self.expiration_policy, self.invalidator_enabled, - builder_utils::housekeeper_conf(true), ) } } @@ -262,11 +256,22 @@ impl CacheBuilder { } } - /// Sets the eviction listener closure to the cache. + /// Sets the eviction listener closure to the cache. The closure should take + /// `Arc`, `V` and [`RemovalCause`][removal-cause] as the arguments. The + /// [immediate delivery mode][immediate-mode] is used for the listener. /// - /// The closure should take `Arc`, `V` and [`RemovalCause`][removal-cause] as - /// the arguments. The [queued delivery mode][queued-mode] is used for the - /// listener. + /// See [this example][example] for a usage of eviction listener. + /// + /// # Sync or Async Eviction Listener + /// + /// The closure can be either synchronous or asynchronous, and `CacheBuilder` + /// provides two methods for setting the eviction listener closure: + /// + /// - If you do not need to `.await` anything in the eviction listener, use this + /// `eviction_listener` method. + /// - If you need to `.await` something in the eviction listener, use + /// [`async_eviction_listener`](#method.async_eviction_listener) method + /// instead. /// /// # Panics /// @@ -276,17 +281,57 @@ impl CacheBuilder { /// call the panicked lister again. /// /// [removal-cause]: ../notification/enum.RemovalCause.html - /// [queued-mode]: ../notification/enum.DeliveryMode.html#variant.Queued - pub fn eviction_listener_with_queued_delivery_mode( - self, - listener: impl Fn(Arc, V, RemovalCause) + Send + Sync + 'static, - ) -> Self { - let conf = notification::Configuration::builder() - .delivery_mode(DeliveryMode::Queued) - .build(); + /// [immediate-mode]: ../notification/enum.DeliveryMode.html#variant.Immediate + /// [example]: ./struct.Cache.html#per-entry-expiration-policy + pub fn eviction_listener(self, listener: F) -> Self + where + F: Fn(Arc, V, RemovalCause) + Send + Sync + 'static, + { + let async_listener = move |k, v, c| { + { + listener(k, v, c); + std::future::ready(()) + } + .boxed() + }; + + self.async_eviction_listener(async_listener) + } + + /// Sets the eviction listener closure to the cache. The closure should take + /// `Arc`, `V` and [`RemovalCause`][removal-cause] as the arguments, and + /// return a [`ListenerFuture`][listener-future]. The + /// [immediate delivery mode][immediate-mode] is used for the listener. + /// + /// See [this example][example] for a usage of asynchronous eviction listener. + /// + /// # Sync or Async Eviction Listener + /// + /// The closure can be either synchronous or asynchronous, and `CacheBuilder` + /// provides two methods for setting the eviction listener closure: + /// + /// - If you do not need to `.await` anything in the eviction listener, use + /// [`eviction_listener`](#method.eviction_listener) method instead. + /// - If you need to `.await` something in the eviction listener, use + /// this method. + /// + /// # Panics + /// + /// It is very important to make the listener closure not to panic. Otherwise, + /// the cache will stop calling the listener after a panic. This is an intended + /// behavior because the cache cannot know whether is is memory safe or not to + /// call the panicked lister again. + /// + /// [removal-cause]: ../notification/enum.RemovalCause.html + /// [listener-future]: ../notification/type.ListenerFuture.html + /// [immediate-mode]: ../notification/enum.DeliveryMode.html#variant.Immediate + /// [example]: ./struct.Cache.html#example-eviction-listener + pub fn async_eviction_listener(self, listener: F) -> Self + where + F: Fn(Arc, V, RemovalCause) -> ListenerFuture + Send + Sync + 'static, + { Self { - eviction_listener: Some(Arc::new(listener)), - eviction_listener_conf: Some(conf), + eviction_listener: Some(Box::new(listener)), ..self } } @@ -369,7 +414,7 @@ mod tests { assert_eq!(policy.num_segments(), 1); cache.insert('a', "Alice").await; - assert_eq!(cache.get(&'a'), Some("Alice")); + assert_eq!(cache.get(&'a').await, Some("Alice")); let cache = CacheBuilder::new(100) .time_to_live(Duration::from_secs(45 * 60)) @@ -383,7 +428,7 @@ mod tests { assert_eq!(policy.num_segments(), 1); cache.insert('a', "Alice").await; - assert_eq!(cache.get(&'a'), Some("Alice")); + assert_eq!(cache.get(&'a').await, Some("Alice")); } #[tokio::test] diff --git a/src/future/cache.rs b/src/future/cache.rs index 4d783abb..83b3ff1d 100644 --- a/src/future/cache.rs +++ b/src/future/cache.rs @@ -1,26 +1,24 @@ use super::{ - value_initializer::{InitResult, ValueInitializer}, - CacheBuilder, ConcurrentCacheExt, Iter, OwnedKeyEntrySelector, PredicateId, - RefKeyEntrySelector, + base_cache::{BaseCache, HouseKeeperArc}, + housekeeper::InnerSync, + value_initializer::{GetOrInsert, InitResult, ValueInitializer}, + CacheBuilder, Iter, OwnedKeyEntrySelector, PredicateId, RefKeyEntrySelector, }; use crate::{ common::{ - concurrent::{ - constants::{MAX_SYNC_REPEATS, WRITE_RETRY_INTERVAL_MICROS}, - housekeeper::{self, InnerSync}, - Weigher, WriteOp, - }, + concurrent::{Weigher, WriteOp}, time::Instant, }, - notification::{self, EvictionListener}, + notification::AsyncEvictionListener, policy::ExpirationPolicy, - sync_base::base_cache::{BaseCache, HouseKeeperArc}, Entry, Policy, PredicateError, }; #[cfg(feature = "unstable-debug-counters")] use crate::common::concurrent::debug_counters::CacheDebugStats; +use async_lock::Mutex; +use async_trait::async_trait; use crossbeam_channel::{Sender, TrySendError}; use std::{ borrow::Borrow, @@ -30,16 +28,13 @@ use std::{ hash::{BuildHasher, Hash}, pin::Pin, sync::Arc, - time::Duration, }; /// A thread-safe, futures-aware concurrent in-memory cache. /// /// `Cache` supports full concurrency of retrievals and a high expected concurrency -/// for updates. It can be accessed inside and outside of asynchronous contexts. -/// -/// `Cache` utilizes a lock-free concurrent hash table as the central key-value -/// storage. `Cache` performs a best-effort bounding of the map using an entry +/// for updates. It utilizes a lock-free concurrent hash table as the central +/// key-value storage. It performs a best-effort bounding of the map using an entry /// replacement algorithm to determine which entries to evict when the capacity is /// exceeded. /// @@ -62,16 +57,9 @@ use std::{ /// /// # Example: `insert`, `get` and `invalidate` /// -/// Cache entries are manually added using an insert method, and are stored in the -/// cache until either evicted or manually invalidated: -/// -/// - Inside an async context (`async fn` or `async` block), use -/// [`insert`](#method.insert), [`get_with`](#method.get_with) or -/// [`invalidate`](#method.invalidate) methods for updating the cache and `await` -/// them. -/// - Outside any async context, use [`blocking`](#method.blocking) method to access -/// blocking version of [`insert`](./struct.BlockingOp.html#method.insert) or -/// [`invalidate`](struct.BlockingOp.html#method.invalidate) methods. +/// Cache entries are manually added using [`insert`](#method.insert) of +/// [`get_with`](#method.get_with) method, and are stored in the cache until either +/// evicted or manually invalidated: /// /// Here's an example of reading and updating a cache by using multiple asynchronous /// tasks with [Tokio][tokio-crate] runtime: @@ -82,7 +70,7 @@ use std::{ /// // Cargo.toml /// // /// // [dependencies] -/// // moka = { version = "0.11", features = ["future"] } +/// // moka = { version = "0.12", features = ["future"] } /// // tokio = { version = "1", features = ["rt-multi-thread", "macros" ] } /// // futures-util = "0.3" /// @@ -112,15 +100,13 @@ use std::{ /// tokio::spawn(async move { /// // Insert 64 entries. (NUM_KEYS_PER_TASK = 64) /// for key in start..end { -/// // insert() is an async method, so await it. /// my_cache.insert(key, value(key)).await; /// // get() returns Option, a clone of the stored value. -/// assert_eq!(my_cache.get(&key), Some(value(key))); +/// assert_eq!(my_cache.get(&key).await, Some(value(key))); /// } /// /// // Invalidate every 4 element of the inserted entries. /// for key in (start..end).step_by(4) { -/// // invalidate() is an async method, so await it. /// my_cache.invalidate(&key).await; /// } /// }) @@ -133,9 +119,9 @@ use std::{ /// // Verify the result. /// for key in 0..(NUM_TASKS * NUM_KEYS_PER_TASK) { /// if key % 4 == 0 { -/// assert_eq!(cache.get(&key), None); +/// assert_eq!(cache.get(&key).await, None); /// } else { -/// assert_eq!(cache.get(&key), Some(value(key))); +/// assert_eq!(cache.get(&key).await, Some(value(key))); /// } /// } /// } @@ -212,7 +198,7 @@ use std::{ /// // Cargo.toml /// // /// // [dependencies] -/// // moka = { version = "0.11", features = ["future"] } +/// // moka = { version = "0.12", features = ["future"] } /// // tokio = { version = "1", features = ["rt-multi-thread", "macros" ] } /// // futures-util = "0.3" /// @@ -280,7 +266,7 @@ use std::{ /// // Cargo.toml /// // /// // [dependencies] -/// // moka = { version = "0.11", features = ["future"] } +/// // moka = { version = "0.12", features = ["future"] } /// // tokio = { version = "1", features = ["rt-multi-thread", "macros" ] } /// // futures-util = "0.3" /// @@ -329,10 +315,10 @@ use std::{ /// // Cargo.toml /// // /// // [dependencies] -/// // moka = { version = "0.11", features = ["future"] } +/// // moka = { version = "0.12", features = ["future"] } /// // tokio = { version = "1", features = ["rt-multi-thread", "macros" ] } /// -/// use moka::{future::Cache, Expiry}; +/// use moka::{future::{Cache, FutureExt}, Expiry, notification::ListenerFuture}; /// use std::time::{Duration, Instant}; /// /// // In this example, we will create a `future::Cache` with `u32` as the key, and @@ -396,7 +382,7 @@ use std::{ /// let cache = Cache::builder() /// .max_capacity(100) /// .expire_after(expiry) -/// .eviction_listener_with_queued_delivery_mode(eviction_listener) +/// .eviction_listener(eviction_listener) /// .build(); /// /// // Insert some entries into the cache with different expirations. @@ -473,7 +459,9 @@ use std::{ /// // uuid = { version = "1.1", features = ["v4"] } /// // tokio = { version = "1.18", features = ["fs", "macros", "rt-multi-thread", "sync", "time"] } /// -/// use moka::future::Cache; +/// use moka::{future::Cache, notification::ListenerFuture}; +/// // FutureExt trait provides the boxed method. +/// use moka::future::FutureExt; /// /// use anyhow::{anyhow, Context}; /// use std::{ @@ -553,21 +541,26 @@ use std::{ /// let rt = tokio::runtime::Handle::current(); /// /// // Create an eviction lister closure. -/// let listener = move |k, v: PathBuf, cause| { -/// // Try to remove the data file at the path `v`. +/// let eviction_listener = move |k, v: PathBuf, cause| -> ListenerFuture { /// println!( /// "\n== An entry has been evicted. k: {:?}, v: {:?}, cause: {:?}", /// k, v, cause /// ); -/// rt.block_on(async { +/// let file_mgr2 = Arc::clone(&file_mgr1); +/// +/// // Create a Future that removes the data file at the path `v`. +/// async move { /// // Acquire the write lock of the DataFileManager. -/// let mut mgr = file_mgr1.write().await; +/// let mut mgr = file_mgr2.write().await; /// // Remove the data file. We must handle error cases here to /// // prevent the listener from panicking. /// if let Err(_e) = mgr.remove_data_file(v.as_path()).await { /// eprintln!("Failed to remove a data file at {:?}", v); /// } -/// }); +/// } +/// // Convert the regular Future into ListenerFuture. This method is +/// // provided by moka::future::FutureExt trait. +/// .boxed() /// }; /// /// // Create the cache. Set time to live for two seconds and set the @@ -575,7 +568,7 @@ use std::{ /// let cache = Cache::builder() /// .max_capacity(100) /// .time_to_live(Duration::from_secs(2)) -/// .eviction_listener_with_queued_delivery_mode(listener) +/// .async_eviction_listener(eviction_listener) /// .build(); /// /// // Insert an entry to the cache. @@ -641,18 +634,12 @@ use std::{ /// ## Delivery Modes for eviction listener /// /// The [`DeliveryMode`][delivery-mode] specifies how and when an eviction -/// notification should be delivered to an eviction listener. Currently, the -/// `future::Cache` supports only one delivery mode: `Queued` mode. +/// notification should be delivered to an eviction listener. /// -/// A future version of `future::Cache` will support `Immediate` mode, which will be -/// easier to use in many use cases than queued mode. Unlike the `future::Cache`, -/// the `sync::Cache` already supports it. +/// The `future::Cache` supports the following delivery mode: /// -/// Once `future::Cache` supports the immediate mode, the `eviction_listener` and -/// `eviction_listener_with_conf` methods will be added to the -/// `future::CacheBuilder`. The former will use the immediate mode, and the latter -/// will take a custom configurations to specify the queued mode. The current method -/// `eviction_listener_with_queued_delivery_mode` will be deprecated. +/// - From v0.12.0, it only supports `Immediate` mode. +/// - Up to v0.11.x, it only supported `Queued` modes. /// /// For more details about the delivery modes, see [this section][sync-delivery-modes] /// of `sync::Cache` documentation. @@ -733,8 +720,8 @@ impl Cache { /// /// The value returned is _an estimate_; the actual count may differ if there are /// concurrent insertions or removals, or if some entries are pending removal due - /// to expiration. This inaccuracy can be mitigated by performing a `sync()` - /// first. + /// to expiration. This inaccuracy can be mitigated by calling + /// `run_pending_tasks` first. /// /// # Example /// @@ -742,7 +729,7 @@ impl Cache { /// // Cargo.toml /// // /// // [dependencies] - /// // moka = { version = "0.11", features = ["future"] } + /// // moka = { version = "0.12", features = ["future"] } /// // tokio = { version = "1", features = ["rt-multi-thread", "macros" ] } /// use moka::future::Cache; /// @@ -760,11 +747,9 @@ impl Cache { /// println!("{}", cache.entry_count()); // -> 0 /// println!("{}", cache.weighted_size()); // -> 0 /// - /// // To mitigate the inaccuracy, bring `ConcurrentCacheExt` trait to - /// // the scope so we can use `sync` method. - /// use moka::future::ConcurrentCacheExt; - /// // Call `sync` to run pending internal tasks. - /// cache.sync(); + /// // To mitigate the inaccuracy, call `run_pending_tasks` to run pending + /// // internal tasks. + /// cache.run_pending_tasks().await; /// /// // Followings will print the actual numbers. /// println!("{}", cache.entry_count()); // -> 3 @@ -780,16 +765,17 @@ impl Cache { /// /// The value returned is _an estimate_; the actual size may differ if there are /// concurrent insertions or removals, or if some entries are pending removal due - /// to expiration. This inaccuracy can be mitigated by performing a `sync()` - /// first. See [`entry_count`](#method.entry_count) for a sample code. + /// to expiration. This inaccuracy can be mitigated by calling + /// `run_pending_tasks` first. See [`entry_count`](#method.entry_count) for a + /// sample code. pub fn weighted_size(&self) -> u64 { self.base.weighted_size() } #[cfg(feature = "unstable-debug-counters")] #[cfg_attr(docsrs, doc(cfg(feature = "unstable-debug-counters")))] - pub fn debug_stats(&self) -> CacheDebugStats { - self.base.debug_stats() + pub async fn debug_stats(&self) -> CacheDebugStats { + self.base.debug_stats().await } } @@ -813,10 +799,8 @@ where build_hasher, None, None, - None, Default::default(), false, - housekeeper::Configuration::new_thread_pool(true), ) } @@ -843,11 +827,9 @@ where initial_capacity: Option, build_hasher: S, weigher: Option>, - eviction_listener: Option>, - eviction_listener_conf: Option, + eviction_listener: Option>, expiration_policy: ExpirationPolicy, invalidator_enabled: bool, - housekeeper_conf: housekeeper::Configuration, ) -> Self { Self { base: BaseCache::new( @@ -857,10 +839,8 @@ where build_hasher.clone(), weigher, eviction_listener, - eviction_listener_conf, expiration_policy, invalidator_enabled, - housekeeper_conf, ), value_initializer: Arc::new(ValueInitializer::with_hasher(build_hasher)), } @@ -892,13 +872,16 @@ where /// on the borrowed form _must_ match those for the key type. /// /// [rustdoc-std-arc]: https://doc.rust-lang.org/stable/std/sync/struct.Arc.html - pub fn get(&self, key: &Q) -> Option + pub async fn get(&self, key: &Q) -> Option where K: Borrow, Q: Hash + Eq + ?Sized, { + let ignore_if = None as Option<&mut fn(&V) -> bool>; + self.base - .get_with_hash(key, self.base.hash(key), false) + .get_with_hash(key, self.base.hash(key), ignore_if, false, true) + .await .map(Entry::into_value) } @@ -913,7 +896,7 @@ where /// // Cargo.toml /// // /// // [dependencies] - /// // moka = { version = "0.11", features = ["future"] } + /// // moka = { version = "0.12", features = ["future"] } /// // tokio = { version = "1", features = ["rt-multi-thread", "macros" ] } /// /// use moka::future::Cache; @@ -953,7 +936,7 @@ where /// // Cargo.toml /// // /// // [dependencies] - /// // moka = { version = "0.11", features = ["future"] } + /// // moka = { version = "0.12", features = ["future"] } /// // tokio = { version = "1", features = ["rt-multi-thread", "macros" ] } /// /// use moka::future::Cache; @@ -998,7 +981,7 @@ where /// // Cargo.toml /// // /// // [dependencies] - /// // moka = { version = "0.11", features = ["future"] } + /// // moka = { version = "0.12", features = ["future"] } /// // futures-util = "0.3" /// // tokio = { version = "1", features = ["rt-multi-thread", "macros" ] } /// use moka::future::Cache; @@ -1016,9 +999,9 @@ where /// tokio::spawn(async move { /// println!("Task {} started.", task_id); /// - /// // Insert and get the value for key1. Although all four async tasks - /// // will call `get_with` at the same time, the `init` async - /// // block must be resolved only once. + /// // Insert and get the value for key1. Although all four async + /// // tasks will call `get_with` at the same time, the `init` + /// // async block must be resolved only once. /// let value = my_cache /// .get_with("key1", async move { /// println!("Task {} inserting a value.", task_id); @@ -1028,7 +1011,7 @@ where /// /// // Ensure the value exists now. /// assert_eq!(value.len(), TEN_MIB); - /// assert!(my_cache.get(&"key1").is_some()); + /// assert!(my_cache.get(&"key1").await.is_some()); /// /// println!("Task {} got the value. (len: {})", task_id, value.len()); /// }) @@ -1098,7 +1081,7 @@ where &self, key: K, init: impl Future, - replace_if: impl FnMut(&V) -> bool, + replace_if: impl FnMut(&V) -> bool + Send, ) -> V { futures_util::pin_mut!(init); let hash = self.base.hash(&key); @@ -1125,7 +1108,7 @@ where /// // Cargo.toml /// // /// // [dependencies] - /// // moka = { version = "0.11", features = ["future"] } + /// // moka = { version = "0.12", features = ["future"] } /// // futures-util = "0.3" /// // reqwest = "0.11" /// // tokio = { version = "1", features = ["rt-multi-thread", "macros" ] } @@ -1159,7 +1142,7 @@ where /// /// // Ensure the value exists now. /// assert!(value.is_some()); - /// assert!(my_cache.get(&"key1").is_some()); + /// assert!(my_cache.get(&"key1").await.is_some()); /// /// println!( /// "Task {} got the value. (len: {})", @@ -1249,7 +1232,7 @@ where /// // Cargo.toml /// // /// // [dependencies] - /// // moka = { version = "0.11", features = ["future"] } + /// // moka = { version = "0.12", features = ["future"] } /// // futures-util = "0.3" /// // reqwest = "0.11" /// // tokio = { version = "1", features = ["rt-multi-thread", "macros" ] } @@ -1283,7 +1266,7 @@ where /// /// // Ensure the value exists now. /// assert!(value.is_ok()); - /// assert!(my_cache.get(&"key1").is_some()); + /// assert!(my_cache.get(&"key1").await.is_some()); /// /// println!( /// "Task {} got the value. (len: {})", @@ -1363,25 +1346,6 @@ where self.insert_with_hash(key, hash, value).await } - fn do_blocking_insert(&self, key: K, value: V) { - if self.base.is_map_disabled() { - return; - } - - let hash = self.base.hash(&key); - let key = Arc::new(key); - let (op, now) = self.base.do_insert_with_hash(key, hash, value); - let hk = self.base.housekeeper.as_ref(); - Self::blocking_schedule_write_op( - self.base.inner.as_ref(), - &self.base.write_op_ch, - op, - now, - hk, - ) - .expect("Failed to insert"); - } - /// Discards any cached value for the key. /// /// If you need to get the value that has been discarded, use the @@ -1429,51 +1393,21 @@ where }; if self.base.is_removal_notifier_enabled() { - self.base.notify_invalidate(&kv.key, &kv.entry) + self.base.notify_invalidate(&kv.key, &kv.entry).await } let op = WriteOp::Remove(kv); let now = self.base.current_time_from_expiration_clock(); let hk = self.base.housekeeper.as_ref(); - Self::schedule_write_op( - self.base.inner.as_ref(), - &self.base.write_op_ch, - op, - now, - hk, - ) - .await - .expect("Failed to remove"); + Self::schedule_write_op(&self.base.inner, &self.base.write_op_ch, op, now, hk) + .await + .expect("Failed to remove"); crossbeam_epoch::pin().flush(); maybe_v } } } - fn do_blocking_invalidate(&self, key: &Q) - where - K: Borrow, - Q: Hash + Eq + ?Sized, - { - let hash = self.base.hash(key); - if let Some(kv) = self.base.remove_entry(key, hash) { - if self.base.is_removal_notifier_enabled() { - self.base.notify_invalidate(&kv.key, &kv.entry) - } - let op = WriteOp::Remove(kv); - let now = self.base.current_time_from_expiration_clock(); - let hk = self.base.housekeeper.as_ref(); - Self::blocking_schedule_write_op( - self.base.inner.as_ref(), - &self.base.write_op_ch, - op, - now, - hk, - ) - .expect("Failed to remove"); - } - } - /// Discards all cached values. /// /// This method returns immediately and a background thread will evict all the @@ -1557,7 +1491,7 @@ where /// // Cargo.toml /// // /// // [dependencies] - /// // moka = { version = "0.11", features = ["future"] } + /// // moka = { version = "0.12", features = ["future"] } /// // tokio = { version = "1", features = ["rt-multi-thread", "macros" ] } /// use moka::future::Cache; /// @@ -1582,12 +1516,10 @@ where Iter::new(inner) } - /// Returns a `BlockingOp` for this cache. It provides blocking - /// [`insert`](./struct.BlockingOp.html#method.insert) and - /// [`invalidate`](struct.BlockingOp.html#method.invalidate) methods, which - /// can be called outside of asynchronous contexts. - pub fn blocking(&self) -> BlockingOp<'_, K, V, S> { - BlockingOp(self) + pub async fn run_pending_tasks(&self) { + if let Some(hk) = &self.base.housekeeper { + hk.run_pending_tasks(Arc::clone(&self.base.inner)).await; + } } } @@ -1606,17 +1538,6 @@ where } } -impl ConcurrentCacheExt for Cache -where - K: Hash + Eq + Send + Sync + 'static, - V: Clone + Send + Sync + 'static, - S: BuildHasher + Clone + Send + Sync + 'static, -{ - fn sync(&self) { - self.base.inner.sync(MAX_SYNC_REPEATS); - } -} - // // private methods // @@ -1631,12 +1552,13 @@ where key: Arc, hash: u64, init: Pin<&mut impl Future>, - mut replace_if: Option bool>, + mut replace_if: Option bool + Send>, need_key: bool, ) -> Entry { - let maybe_entry = - self.base - .get_with_hash_and_ignore_if(&key, hash, replace_if.as_mut(), need_key); + let maybe_entry = self + .base + .get_with_hash(&key, hash, replace_if.as_mut(), need_key, true) + .await; if let Some(entry) = maybe_entry { entry } else { @@ -1650,16 +1572,17 @@ where key: &Q, hash: u64, init: Pin<&mut impl Future>, - mut replace_if: Option bool>, + mut replace_if: Option bool + Send>, need_key: bool, ) -> Entry where K: Borrow, Q: ToOwned + Hash + Eq + ?Sized, { - let maybe_entry = - self.base - .get_with_hash_and_ignore_if(key, hash, replace_if.as_mut(), need_key); + let maybe_entry = self + .base + .get_with_hash(key, hash, replace_if.as_mut(), need_key, true) + .await; if let Some(entry) = maybe_entry { entry } else { @@ -1674,29 +1597,22 @@ where key: Arc, hash: u64, init: Pin<&mut impl Future>, - mut replace_if: Option bool>, + replace_if: Option bool + Send>, need_key: bool, ) -> Entry { - use futures_util::FutureExt; - - let get = || { - self.base - .get_with_hash_without_recording(&key, hash, replace_if.as_mut()) - }; - let insert = |v| self.insert_with_hash(key.clone(), hash, v).boxed(); - let k = if need_key { Some(Arc::clone(&key)) } else { None }; + let replace_if = Arc::new(Mutex::new(replace_if)); let type_id = ValueInitializer::::type_id_for_get_with(); let post_init = ValueInitializer::::post_init_for_get_with; match self .value_initializer - .try_init_or_read(&key, type_id, get, init, insert, post_init) + .try_init_or_read(&key, hash, type_id, self, replace_if, init, post_init) .await { InitResult::Initialized(v) => { @@ -1714,7 +1630,11 @@ where hash: u64, init: impl FnOnce() -> V, ) -> Entry { - match self.base.get_with_hash(&key, hash, true) { + match self + .base + .get_with_hash(&key, hash, never_ignore(), true, true) + .await + { Some(entry) => entry, None => { let value = init(); @@ -1735,7 +1655,11 @@ where K: Borrow, Q: ToOwned + Hash + Eq + ?Sized, { - match self.base.get_with_hash(key, hash, true) { + match self + .base + .get_with_hash(key, hash, never_ignore(), true, true) + .await + { Some(entry) => entry, None => { let key = Arc::new(key.to_owned()); @@ -1757,7 +1681,10 @@ where where F: Future>, { - let entry = self.base.get_with_hash(&key, hash, need_key); + let entry = self + .base + .get_with_hash(&key, hash, never_ignore(), need_key, true) + .await; if entry.is_some() { return entry; } @@ -1778,7 +1705,10 @@ where K: Borrow, Q: ToOwned + Hash + Eq + ?Sized, { - let entry = self.base.get_with_hash(key, hash, need_key); + let entry = self + .base + .get_with_hash(key, hash, never_ignore(), need_key, true) + .await; if entry.is_some() { return entry; } @@ -1798,27 +1728,19 @@ where where F: Future>, { - use futures_util::FutureExt; - - let get = || { - let ignore_if = None as Option<&mut fn(&V) -> bool>; - self.base - .get_with_hash_without_recording(&key, hash, ignore_if) - }; - let insert = |v| self.insert_with_hash(key.clone(), hash, v).boxed(); - let k = if need_key { Some(Arc::clone(&key)) } else { None }; + let ignore_if = Arc::new(Mutex::new(never_ignore())); let type_id = ValueInitializer::::type_id_for_optionally_get_with(); let post_init = ValueInitializer::::post_init_for_optionally_get_with; match self .value_initializer - .try_init_or_read(&key, type_id, get, init, insert, post_init) + .try_init_or_read(&key, hash, type_id, self, ignore_if, init, post_init) .await { InitResult::Initialized(v) => { @@ -1841,7 +1763,11 @@ where F: Future>, E: Send + Sync + 'static, { - if let Some(entry) = self.base.get_with_hash(&key, hash, need_key) { + if let Some(entry) = self + .base + .get_with_hash(&key, hash, never_ignore(), need_key, true) + .await + { return Ok(entry); } @@ -1862,7 +1788,11 @@ where K: Borrow, Q: ToOwned + Hash + Eq + ?Sized, { - if let Some(entry) = self.base.get_with_hash(key, hash, need_key) { + if let Some(entry) = self + .base + .get_with_hash(key, hash, never_ignore(), need_key, true) + .await + { return Ok(entry); } let key = Arc::new(key.to_owned()); @@ -1881,27 +1811,19 @@ where F: Future>, E: Send + Sync + 'static, { - use futures_util::FutureExt; - - let get = || { - let ignore_if = None as Option<&mut fn(&V) -> bool>; - self.base - .get_with_hash_without_recording(&key, hash, ignore_if) - }; - let insert = |v| self.insert_with_hash(key.clone(), hash, v).boxed(); - let k = if need_key { Some(Arc::clone(&key)) } else { None }; + let ignore_if = Arc::new(Mutex::new(never_ignore())); let type_id = ValueInitializer::::type_id_for_try_get_with::(); let post_init = ValueInitializer::::post_init_for_try_get_with; match self .value_initializer - .try_init_or_read(&key, type_id, get, init, insert, post_init) + .try_init_or_read(&key, hash, type_id, self, ignore_if, init, post_init) .await { InitResult::Initialized(v) => { @@ -1921,68 +1843,83 @@ where return; } - let (op, now) = self.base.do_insert_with_hash(key, hash, value); + let (op, now) = self.base.do_insert_with_hash(key, hash, value).await; let hk = self.base.housekeeper.as_ref(); - Self::schedule_write_op( - self.base.inner.as_ref(), - &self.base.write_op_ch, - op, - now, - hk, - ) - .await - .expect("Failed to insert"); + Self::schedule_write_op(&self.base.inner, &self.base.write_op_ch, op, now, hk) + .await + .expect("Failed to insert"); } #[inline] async fn schedule_write_op( - inner: &impl InnerSync, + inner: &Arc, ch: &Sender>, op: WriteOp, now: Instant, - housekeeper: Option<&HouseKeeperArc>, + housekeeper: Option<&HouseKeeperArc>, ) -> Result<(), TrySendError>> { let mut op = op; - - // TODO: Try to replace the timer with an async event listener to see if it - // can provide better performance. + let mut spin_count = 0u8; loop { - BaseCache::apply_reads_writes_if_needed(inner, ch, now, housekeeper); + BaseCache::::apply_reads_writes_if_needed( + Arc::clone(inner), + ch, + now, + housekeeper, + ) + .await; match ch.try_send(op) { - Ok(()) => break, + Ok(()) => return Ok(()), Err(TrySendError::Full(op1)) => { op = op1; - async_io::Timer::after(Duration::from_micros(WRITE_RETRY_INTERVAL_MICROS)) - .await; } Err(e @ TrySendError::Disconnected(_)) => return Err(e), } - } - Ok(()) - } - #[inline] - fn blocking_schedule_write_op( - inner: &impl InnerSync, - ch: &Sender>, - op: WriteOp, - now: Instant, - housekeeper: Option<&HouseKeeperArc>, - ) -> Result<(), TrySendError>> { - let mut op = op; - - loop { - BaseCache::apply_reads_writes_if_needed(inner, ch, now, housekeeper); - match ch.try_send(op) { - Ok(()) => break, - Err(TrySendError::Full(op1)) => { - op = op1; - std::thread::sleep(Duration::from_micros(WRITE_RETRY_INTERVAL_MICROS)); + // We have got a `TrySendError::Full` above. Wait for a bit and try + // again. + if spin_count < 10 { + spin_count += 1; + // Wastes some CPU time with a hint to indicate to the CPU that we + // are spinning + for _ in 0..8 { + std::hint::spin_loop(); } - Err(e @ TrySendError::Disconnected(_)) => return Err(e), + } else { + spin_count = 0; + // Try to yield to other tasks. We have to yield sometimes, otherwise + // other task, which is draining the `ch`, will not make any + // progress. If this happens, we will stuck in this loop forever. + super::may_yield().await; } } - Ok(()) + } +} + +#[async_trait] +impl GetOrInsert for Cache +where + K: Hash + Eq + Send + Sync + 'static, + V: Clone + Send + Sync + 'static, + S: BuildHasher + Clone + Send + Sync + 'static, +{ + async fn get_without_recording( + &self, + key: &Arc, + hash: u64, + replace_if: Option<&mut I>, + ) -> Option + where + I: for<'i> FnMut(&'i V) -> bool + Send, + { + self.base + .get_with_hash(key, hash, replace_if, false, false) + .await + .map(Entry::into_value) + } + + async fn insert(&self, key: Arc, hash: u64, value: V) { + self.insert_with_hash(key.clone(), hash, value).await; } } @@ -2002,73 +1939,72 @@ where self.base.invalidation_predicate_count() } - fn reconfigure_for_testing(&mut self) { - self.base.reconfigure_for_testing(); + async fn reconfigure_for_testing(&mut self) { + self.base.reconfigure_for_testing().await; } - fn set_expiration_clock(&self, clock: Option) { - self.base.set_expiration_clock(clock); + async fn set_expiration_clock(&self, clock: Option) { + self.base.set_expiration_clock(clock).await; } fn key_locks_map_is_empty(&self) -> bool { self.base.key_locks_map_is_empty() } -} - -pub struct BlockingOp<'a, K, V, S>(&'a Cache); -impl<'a, K, V, S> BlockingOp<'a, K, V, S> -where - K: Hash + Eq + Send + Sync + 'static, - V: Clone + Send + Sync + 'static, - S: BuildHasher + Clone + Send + Sync + 'static, -{ - /// Inserts a key-value pair into the cache. If the cache has this key present, - /// the value is updated. - /// - /// This method is intended for use cases where you are inserting from - /// synchronous code. - pub fn insert(&self, key: K, value: V) { - self.0.do_blocking_insert(key, value) + fn run_pending_tasks_initiation_count(&self) -> usize { + use std::sync::atomic::Ordering; + self.base + .housekeeper + .as_ref() + .map(|hk| hk.start_count.load(Ordering::Acquire)) + .expect("housekeeper is not set") } - /// Discards any cached value for the key. - /// - /// This method is intended for use cases where you are invalidating from - /// synchronous code. - /// - /// The key may be any borrowed form of the cache's key type, but `Hash` and `Eq` - /// on the borrowed form _must_ match those for the key type. - pub fn invalidate(&self, key: &Q) - where - K: Borrow, - Q: Hash + Eq + ?Sized, - { - self.0.do_blocking_invalidate(key) + fn run_pending_tasks_completion_count(&self) -> usize { + use std::sync::atomic::Ordering; + self.base + .housekeeper + .as_ref() + .map(|hk| hk.complete_count.load(Ordering::Acquire)) + .expect("housekeeper is not set") } } +// AS of Rust 1.71, we cannot make this function into a `const fn` because mutable +// references are not allowed. +// See [#57349](https://github.com/rust-lang/rust/issues/57349). +#[inline] +fn never_ignore<'a, V>() -> Option<&'a mut fn(&V) -> bool> { + None +} + // To see the debug prints, run test as `cargo test -- --nocapture` #[cfg(test)] mod tests { - use super::{Cache, ConcurrentCacheExt}; + use super::Cache; use crate::{ - common::time::Clock, notification::RemovalCause, policy::test_utils::ExpiryCallCounters, + common::time::Clock, + future::FutureExt, + notification::{ListenerFuture, RemovalCause}, + policy::test_utils::ExpiryCallCounters, Expiry, }; - use async_io::Timer; - use parking_lot::Mutex; + use async_lock::{Barrier, Mutex}; use std::{ convert::Infallible, - sync::Arc, + sync::{ + atomic::{AtomicU32, Ordering}, + Arc, + }, time::{Duration, Instant as StdInstant}, }; + use tokio::time::sleep; #[tokio::test] async fn max_capacity_zero() { let mut cache = Cache::new(0); - cache.reconfigure_for_testing(); + cache.reconfigure_for_testing().await; // Make the cache exterior immutable. let cache = cache; @@ -2076,10 +2012,10 @@ mod tests { cache.insert(0, ()).await; assert!(!cache.contains_key(&0)); - assert!(cache.get(&0).is_none()); - cache.sync(); + assert!(cache.get(&0).await.is_none()); + cache.run_pending_tasks().await; assert!(!cache.contains_key(&0)); - assert!(cache.get(&0).is_none()); + assert!(cache.get(&0).await.is_none()); assert_eq!(cache.entry_count(), 0) } @@ -2091,64 +2027,68 @@ mod tests { // Create an eviction listener. let a1 = Arc::clone(&actual); - // We use non-async mutex in the eviction listener (because the listener - // is a regular closure). - let listener = move |k, v, cause| a1.lock().push((k, v, cause)); + let listener = move |k, v, cause| -> ListenerFuture { + let a2 = Arc::clone(&a1); + async move { + a2.lock().await.push((k, v, cause)); + } + .boxed() + }; // Create a cache with the eviction listener. let mut cache = Cache::builder() .max_capacity(3) - .eviction_listener_with_queued_delivery_mode(listener) + .async_eviction_listener(listener) .build(); - cache.reconfigure_for_testing(); + cache.reconfigure_for_testing().await; // Make the cache exterior immutable. let cache = cache; cache.insert("a", "alice").await; cache.insert("b", "bob").await; - assert_eq!(cache.get(&"a"), Some("alice")); + assert_eq!(cache.get(&"a").await, Some("alice")); assert!(cache.contains_key(&"a")); assert!(cache.contains_key(&"b")); - assert_eq!(cache.get(&"b"), Some("bob")); - cache.sync(); + assert_eq!(cache.get(&"b").await, Some("bob")); + cache.run_pending_tasks().await; // counts: a -> 1, b -> 1 cache.insert("c", "cindy").await; - assert_eq!(cache.get(&"c"), Some("cindy")); + assert_eq!(cache.get(&"c").await, Some("cindy")); assert!(cache.contains_key(&"c")); // counts: a -> 1, b -> 1, c -> 1 - cache.sync(); + cache.run_pending_tasks().await; assert!(cache.contains_key(&"a")); - assert_eq!(cache.get(&"a"), Some("alice")); - assert_eq!(cache.get(&"b"), Some("bob")); + assert_eq!(cache.get(&"a").await, Some("alice")); + assert_eq!(cache.get(&"b").await, Some("bob")); assert!(cache.contains_key(&"b")); - cache.sync(); + cache.run_pending_tasks().await; // counts: a -> 2, b -> 2, c -> 1 // "d" should not be admitted because its frequency is too low. cache.insert("d", "david").await; // count: d -> 0 expected.push((Arc::new("d"), "david", RemovalCause::Size)); - cache.sync(); - assert_eq!(cache.get(&"d"), None); // d -> 1 + cache.run_pending_tasks().await; + assert_eq!(cache.get(&"d").await, None); // d -> 1 assert!(!cache.contains_key(&"d")); cache.insert("d", "david").await; expected.push((Arc::new("d"), "david", RemovalCause::Size)); - cache.sync(); + cache.run_pending_tasks().await; assert!(!cache.contains_key(&"d")); - assert_eq!(cache.get(&"d"), None); // d -> 2 + assert_eq!(cache.get(&"d").await, None); // d -> 2 // "d" should be admitted and "c" should be evicted // because d's frequency is higher than c's. cache.insert("d", "dennis").await; expected.push((Arc::new("c"), "cindy", RemovalCause::Size)); - cache.sync(); - assert_eq!(cache.get(&"a"), Some("alice")); - assert_eq!(cache.get(&"b"), Some("bob")); - assert_eq!(cache.get(&"c"), None); - assert_eq!(cache.get(&"d"), Some("dennis")); + cache.run_pending_tasks().await; + assert_eq!(cache.get(&"a").await, Some("alice")); + assert_eq!(cache.get(&"b").await, Some("bob")); + assert_eq!(cache.get(&"c").await, None); + assert_eq!(cache.get(&"d").await, Some("dennis")); assert!(cache.contains_key(&"a")); assert!(cache.contains_key(&"b")); assert!(!cache.contains_key(&"c")); @@ -2156,88 +2096,21 @@ mod tests { cache.invalidate(&"b").await; expected.push((Arc::new("b"), "bob", RemovalCause::Explicit)); - cache.sync(); - assert_eq!(cache.get(&"b"), None); + cache.run_pending_tasks().await; + assert_eq!(cache.get(&"b").await, None); assert!(!cache.contains_key(&"b")); assert!(cache.remove(&"b").await.is_none()); assert_eq!(cache.remove(&"d").await, Some("dennis")); expected.push((Arc::new("d"), "dennis", RemovalCause::Explicit)); - cache.sync(); - assert_eq!(cache.get(&"d"), None); + cache.run_pending_tasks().await; + assert_eq!(cache.get(&"d").await, None); assert!(!cache.contains_key(&"d")); - verify_notification_vec(&cache, actual, &expected); + verify_notification_vec(&cache, actual, &expected).await; assert!(cache.key_locks_map_is_empty()); } - #[test] - fn basic_single_blocking_api() { - // The following `Vec`s will hold actual and expected notifications. - let actual = Arc::new(Mutex::new(Vec::new())); - let mut expected = Vec::new(); - - // Create an eviction listener. - let a1 = Arc::clone(&actual); - // We use non-async mutex in the eviction listener (because the listener - // is a regular closure). - let listener = move |k, v, cause| a1.lock().push((k, v, cause)); - - // Create a cache with the eviction listener. - let mut cache = Cache::builder() - .max_capacity(3) - .eviction_listener_with_queued_delivery_mode(listener) - .build(); - cache.reconfigure_for_testing(); - - // Make the cache exterior immutable. - let cache = cache; - - cache.blocking().insert("a", "alice"); - cache.blocking().insert("b", "bob"); - assert_eq!(cache.get(&"a"), Some("alice")); - assert_eq!(cache.get(&"b"), Some("bob")); - cache.sync(); - // counts: a -> 1, b -> 1 - - cache.blocking().insert("c", "cindy"); - assert_eq!(cache.get(&"c"), Some("cindy")); - // counts: a -> 1, b -> 1, c -> 1 - cache.sync(); - - assert_eq!(cache.get(&"a"), Some("alice")); - assert_eq!(cache.get(&"b"), Some("bob")); - cache.sync(); - // counts: a -> 2, b -> 2, c -> 1 - - // "d" should not be admitted because its frequency is too low. - cache.blocking().insert("d", "david"); // count: d -> 0 - expected.push((Arc::new("d"), "david", RemovalCause::Size)); - cache.sync(); - assert_eq!(cache.get(&"d"), None); // d -> 1 - - cache.blocking().insert("d", "david"); - expected.push((Arc::new("d"), "david", RemovalCause::Size)); - cache.sync(); - assert_eq!(cache.get(&"d"), None); // d -> 2 - - // "d" should be admitted and "c" should be evicted - // because d's frequency is higher than c's. - cache.blocking().insert("d", "dennis"); - expected.push((Arc::new("c"), "cindy", RemovalCause::Size)); - cache.sync(); - assert_eq!(cache.get(&"a"), Some("alice")); - assert_eq!(cache.get(&"b"), Some("bob")); - assert_eq!(cache.get(&"c"), None); - assert_eq!(cache.get(&"d"), Some("dennis")); - - cache.blocking().invalidate(&"b"); - expected.push((Arc::new("b"), "bob", RemovalCause::Explicit)); - assert_eq!(cache.get(&"b"), None); - - verify_notification_vec(&cache, actual, &expected); - } - #[tokio::test] async fn size_aware_eviction() { let weigher = |_k: &&str, v: &(&str, u32)| v.1; @@ -2255,39 +2128,45 @@ mod tests { // Create an eviction listener. let a1 = Arc::clone(&actual); - let listener = move |k, v, cause| a1.lock().push((k, v, cause)); + let listener = move |k, v, cause| -> ListenerFuture { + let a2 = Arc::clone(&a1); + async move { + a2.lock().await.push((k, v, cause)); + } + .boxed() + }; // Create a cache with the eviction listener. let mut cache = Cache::builder() .max_capacity(31) .weigher(weigher) - .eviction_listener_with_queued_delivery_mode(listener) + .async_eviction_listener(listener) .build(); - cache.reconfigure_for_testing(); + cache.reconfigure_for_testing().await; // Make the cache exterior immutable. let cache = cache; cache.insert("a", alice).await; cache.insert("b", bob).await; - assert_eq!(cache.get(&"a"), Some(alice)); + assert_eq!(cache.get(&"a").await, Some(alice)); assert!(cache.contains_key(&"a")); assert!(cache.contains_key(&"b")); - assert_eq!(cache.get(&"b"), Some(bob)); - cache.sync(); + assert_eq!(cache.get(&"b").await, Some(bob)); + cache.run_pending_tasks().await; // order (LRU -> MRU) and counts: a -> 1, b -> 1 cache.insert("c", cindy).await; - assert_eq!(cache.get(&"c"), Some(cindy)); + assert_eq!(cache.get(&"c").await, Some(cindy)); assert!(cache.contains_key(&"c")); // order and counts: a -> 1, b -> 1, c -> 1 - cache.sync(); + cache.run_pending_tasks().await; assert!(cache.contains_key(&"a")); - assert_eq!(cache.get(&"a"), Some(alice)); - assert_eq!(cache.get(&"b"), Some(bob)); + assert_eq!(cache.get(&"a").await, Some(alice)); + assert_eq!(cache.get(&"b").await, Some(bob)); assert!(cache.contains_key(&"b")); - cache.sync(); + cache.run_pending_tasks().await; // order and counts: c -> 1, a -> 2, b -> 2 // To enter "d" (weight: 15), it needs to evict "c" (w: 5) and "a" (w: 10). @@ -2295,37 +2174,37 @@ mod tests { // of "a" and "c". cache.insert("d", david).await; // count: d -> 0 expected.push((Arc::new("d"), david, RemovalCause::Size)); - cache.sync(); - assert_eq!(cache.get(&"d"), None); // d -> 1 + cache.run_pending_tasks().await; + assert_eq!(cache.get(&"d").await, None); // d -> 1 assert!(!cache.contains_key(&"d")); cache.insert("d", david).await; expected.push((Arc::new("d"), david, RemovalCause::Size)); - cache.sync(); + cache.run_pending_tasks().await; assert!(!cache.contains_key(&"d")); - assert_eq!(cache.get(&"d"), None); // d -> 2 + assert_eq!(cache.get(&"d").await, None); // d -> 2 cache.insert("d", david).await; expected.push((Arc::new("d"), david, RemovalCause::Size)); - cache.sync(); - assert_eq!(cache.get(&"d"), None); // d -> 3 + cache.run_pending_tasks().await; + assert_eq!(cache.get(&"d").await, None); // d -> 3 assert!(!cache.contains_key(&"d")); cache.insert("d", david).await; expected.push((Arc::new("d"), david, RemovalCause::Size)); - cache.sync(); + cache.run_pending_tasks().await; assert!(!cache.contains_key(&"d")); - assert_eq!(cache.get(&"d"), None); // d -> 4 + assert_eq!(cache.get(&"d").await, None); // d -> 4 // Finally "d" should be admitted by evicting "c" and "a". cache.insert("d", dennis).await; expected.push((Arc::new("c"), cindy, RemovalCause::Size)); expected.push((Arc::new("a"), alice, RemovalCause::Size)); - cache.sync(); - assert_eq!(cache.get(&"a"), None); - assert_eq!(cache.get(&"b"), Some(bob)); - assert_eq!(cache.get(&"c"), None); - assert_eq!(cache.get(&"d"), Some(dennis)); + cache.run_pending_tasks().await; + assert_eq!(cache.get(&"a").await, None); + assert_eq!(cache.get(&"b").await, Some(bob)); + assert_eq!(cache.get(&"c").await, None); + assert_eq!(cache.get(&"d").await, Some(dennis)); assert!(!cache.contains_key(&"a")); assert!(cache.contains_key(&"b")); assert!(!cache.contains_key(&"c")); @@ -2335,9 +2214,9 @@ mod tests { cache.insert("b", bill).await; expected.push((Arc::new("b"), bob, RemovalCause::Replaced)); expected.push((Arc::new("d"), dennis, RemovalCause::Size)); - cache.sync(); - assert_eq!(cache.get(&"b"), Some(bill)); - assert_eq!(cache.get(&"d"), None); + cache.run_pending_tasks().await; + assert_eq!(cache.get(&"b").await, Some(bill)); + assert_eq!(cache.get(&"d").await, None); assert!(cache.contains_key(&"b")); assert!(!cache.contains_key(&"d")); @@ -2345,10 +2224,10 @@ mod tests { cache.insert("a", alice).await; cache.insert("b", bob).await; expected.push((Arc::new("b"), bill, RemovalCause::Replaced)); - cache.sync(); - assert_eq!(cache.get(&"a"), Some(alice)); - assert_eq!(cache.get(&"b"), Some(bob)); - assert_eq!(cache.get(&"d"), None); + cache.run_pending_tasks().await; + assert_eq!(cache.get(&"a").await, Some(alice)); + assert_eq!(cache.get(&"b").await, Some(bob)); + assert_eq!(cache.get(&"d").await, None); assert!(cache.contains_key(&"a")); assert!(cache.contains_key(&"b")); assert!(!cache.contains_key(&"d")); @@ -2357,40 +2236,56 @@ mod tests { assert_eq!(cache.entry_count(), 2); assert_eq!(cache.weighted_size(), 25); - verify_notification_vec(&cache, actual, &expected); + verify_notification_vec(&cache, actual, &expected).await; assert!(cache.key_locks_map_is_empty()); } #[tokio::test] async fn basic_multi_async_tasks() { - let num_tasks = 4; + let num_tasks = 2; + let num_threads = 2; + let cache = Cache::new(100); + let barrier = Arc::new(Barrier::new(num_tasks + num_threads as usize)); let tasks = (0..num_tasks) .map(|id| { let cache = cache.clone(); - if id == 0 { - tokio::spawn(async move { - cache.blocking().insert(10, format!("{}-100", id)); - cache.get(&10); - cache.blocking().insert(20, format!("{}-200", id)); - cache.blocking().invalidate(&10); - }) - } else { - tokio::spawn(async move { - cache.insert(10, format!("{}-100", id)).await; - cache.get(&10); - cache.insert(20, format!("{}-200", id)).await; - cache.invalidate(&10).await; - }) - } + let barrier = Arc::clone(&barrier); + + tokio::spawn(async move { + barrier.wait().await; + + cache.insert(10, format!("{}-100", id)).await; + cache.get(&10).await; + cache.insert(20, format!("{}-200", id)).await; + cache.invalidate(&10).await; + }) + }) + .collect::>(); + + let threads = (0..num_threads) + .map(|id| { + let cache = cache.clone(); + let barrier = Arc::clone(&barrier); + let rt = tokio::runtime::Handle::current(); + + std::thread::spawn(move || { + rt.block_on(barrier.wait()); + + rt.block_on(cache.insert(10, format!("{}-100", id))); + rt.block_on(cache.get(&10)); + rt.block_on(cache.insert(20, format!("{}-200", id))); + rt.block_on(cache.invalidate(&10)); + }) }) .collect::>(); let _ = futures_util::future::join_all(tasks).await; + threads.into_iter().for_each(|t| t.join().unwrap()); - assert!(cache.get(&10).is_none()); - assert!(cache.get(&20).is_some()); + assert!(cache.get(&10).await.is_none()); + assert!(cache.get(&20).await.is_some()); assert!(!cache.contains_key(&10)); assert!(cache.contains_key(&20)); } @@ -2403,14 +2298,20 @@ mod tests { // Create an eviction listener. let a1 = Arc::clone(&actual); - let listener = move |k, v, cause| a1.lock().push((k, v, cause)); + let listener = move |k, v, cause| -> ListenerFuture { + let a2 = Arc::clone(&a1); + async move { + a2.lock().await.push((k, v, cause)); + } + .boxed() + }; // Create a cache with the eviction listener. let mut cache = Cache::builder() .max_capacity(100) - .eviction_listener_with_queued_delivery_mode(listener) + .async_eviction_listener(listener) .build(); - cache.reconfigure_for_testing(); + cache.reconfigure_for_testing().await; // Make the cache exterior immutable. let cache = cache; @@ -2418,14 +2319,14 @@ mod tests { cache.insert("a", "alice").await; cache.insert("b", "bob").await; cache.insert("c", "cindy").await; - assert_eq!(cache.get(&"a"), Some("alice")); - assert_eq!(cache.get(&"b"), Some("bob")); - assert_eq!(cache.get(&"c"), Some("cindy")); + assert_eq!(cache.get(&"a").await, Some("alice")); + assert_eq!(cache.get(&"b").await, Some("bob")); + assert_eq!(cache.get(&"c").await, Some("cindy")); assert!(cache.contains_key(&"a")); assert!(cache.contains_key(&"b")); assert!(cache.contains_key(&"c")); - // `cache.sync()` is no longer needed here before invalidating. The last + // `cache.run_pending_tasks().await` is no longer needed here before invalidating. The last // modified timestamp of the entries were updated when they were inserted. // https://github.com/moka-rs/moka/issues/155 @@ -2433,34 +2334,34 @@ mod tests { expected.push((Arc::new("a"), "alice", RemovalCause::Explicit)); expected.push((Arc::new("b"), "bob", RemovalCause::Explicit)); expected.push((Arc::new("c"), "cindy", RemovalCause::Explicit)); - cache.sync(); + cache.run_pending_tasks().await; cache.insert("d", "david").await; - cache.sync(); + cache.run_pending_tasks().await; - assert!(cache.get(&"a").is_none()); - assert!(cache.get(&"b").is_none()); - assert!(cache.get(&"c").is_none()); - assert_eq!(cache.get(&"d"), Some("david")); + assert!(cache.get(&"a").await.is_none()); + assert!(cache.get(&"b").await.is_none()); + assert!(cache.get(&"c").await.is_none()); + assert_eq!(cache.get(&"d").await, Some("david")); assert!(!cache.contains_key(&"a")); assert!(!cache.contains_key(&"b")); assert!(!cache.contains_key(&"c")); assert!(cache.contains_key(&"d")); - verify_notification_vec(&cache, actual, &expected); + verify_notification_vec(&cache, actual, &expected).await; } // This test is for https://github.com/moka-rs/moka/issues/155 #[tokio::test] - async fn invalidate_all_without_sync() { + async fn invalidate_all_without_running_pending_tasks() { let cache = Cache::new(1024); - assert_eq!(cache.get(&0), None); + assert_eq!(cache.get(&0).await, None); cache.insert(0, 1).await; - assert_eq!(cache.get(&0), Some(1)); + assert_eq!(cache.get(&0).await, Some(1)); cache.invalidate_all(); - assert_eq!(cache.get(&0), None); + assert_eq!(cache.get(&0).await, None); } #[tokio::test] @@ -2473,18 +2374,24 @@ mod tests { // Create an eviction listener. let a1 = Arc::clone(&actual); - let listener = move |k, v, cause| a1.lock().push((k, v, cause)); + let listener = move |k, v, cause| -> ListenerFuture { + let a2 = Arc::clone(&a1); + async move { + a2.lock().await.push((k, v, cause)); + } + .boxed() + }; // Create a cache with the eviction listener. let mut cache = Cache::builder() .max_capacity(100) .support_invalidation_closures() - .eviction_listener_with_queued_delivery_mode(listener) + .async_eviction_listener(listener) .build(); - cache.reconfigure_for_testing(); + cache.reconfigure_for_testing().await; let (clock, mock) = Clock::mock(); - cache.set_expiration_clock(Some(clock)); + cache.set_expiration_clock(Some(clock)).await; // Make the cache exterior immutable. let cache = cache; @@ -2492,14 +2399,14 @@ mod tests { cache.insert(0, "alice").await; cache.insert(1, "bob").await; cache.insert(2, "alex").await; - cache.sync(); + cache.run_pending_tasks().await; mock.increment(Duration::from_secs(5)); // 5 secs from the start. - cache.sync(); + cache.run_pending_tasks().await; - assert_eq!(cache.get(&0), Some("alice")); - assert_eq!(cache.get(&1), Some("bob")); - assert_eq!(cache.get(&2), Some("alex")); + assert_eq!(cache.get(&0).await, Some("alice")); + assert_eq!(cache.get(&1).await, Some("bob")); + assert_eq!(cache.get(&2).await, Some("alex")); assert!(cache.contains_key(&0)); assert!(cache.contains_key(&1)); assert!(cache.contains_key(&2)); @@ -2515,16 +2422,16 @@ mod tests { cache.insert(3, "alice").await; // Run the invalidation task and wait for it to finish. (TODO: Need a better way than sleeping) - cache.sync(); // To submit the invalidation task. + cache.run_pending_tasks().await; // To submit the invalidation task. std::thread::sleep(Duration::from_millis(200)); - cache.sync(); // To process the task result. + cache.run_pending_tasks().await; // To process the task result. std::thread::sleep(Duration::from_millis(200)); - assert!(cache.get(&0).is_none()); - assert!(cache.get(&2).is_none()); - assert_eq!(cache.get(&1), Some("bob")); + assert!(cache.get(&0).await.is_none()); + assert!(cache.get(&2).await.is_none()); + assert_eq!(cache.get(&1).await, Some("bob")); // This should survive as it was inserted after calling invalidate_entries_if. - assert_eq!(cache.get(&3), Some("alice")); + assert_eq!(cache.get(&3).await, Some("alice")); assert!(!cache.contains_key(&0)); assert!(cache.contains_key(&1)); @@ -2544,13 +2451,13 @@ mod tests { expected.push((Arc::new(3), "alice", RemovalCause::Explicit)); // Run the invalidation task and wait for it to finish. (TODO: Need a better way than sleeping) - cache.sync(); // To submit the invalidation task. + cache.run_pending_tasks().await; // To submit the invalidation task. std::thread::sleep(Duration::from_millis(200)); - cache.sync(); // To process the task result. + cache.run_pending_tasks().await; // To process the task result. std::thread::sleep(Duration::from_millis(200)); - assert!(cache.get(&1).is_none()); - assert!(cache.get(&3).is_none()); + assert!(cache.get(&1).await.is_none()); + assert!(cache.get(&3).await.is_none()); assert!(!cache.contains_key(&1)); assert!(!cache.contains_key(&3)); @@ -2558,7 +2465,7 @@ mod tests { assert_eq!(cache.entry_count(), 0); assert_eq!(cache.invalidation_predicate_count(), 0); - verify_notification_vec(&cache, actual, &expected); + verify_notification_vec(&cache, actual, &expected).await; Ok(()) } @@ -2571,78 +2478,84 @@ mod tests { // Create an eviction listener. let a1 = Arc::clone(&actual); - let listener = move |k, v, cause| a1.lock().push((k, v, cause)); + let listener = move |k, v, cause| -> ListenerFuture { + let a2 = Arc::clone(&a1); + async move { + a2.lock().await.push((k, v, cause)); + } + .boxed() + }; // Create a cache with the eviction listener. let mut cache = Cache::builder() .max_capacity(100) .time_to_live(Duration::from_secs(10)) - .eviction_listener_with_queued_delivery_mode(listener) + .async_eviction_listener(listener) .build(); - cache.reconfigure_for_testing(); + cache.reconfigure_for_testing().await; let (clock, mock) = Clock::mock(); - cache.set_expiration_clock(Some(clock)); + cache.set_expiration_clock(Some(clock)).await; // Make the cache exterior immutable. let cache = cache; cache.insert("a", "alice").await; - cache.sync(); + cache.run_pending_tasks().await; mock.increment(Duration::from_secs(5)); // 5 secs from the start. - cache.sync(); + cache.run_pending_tasks().await; - assert_eq!(cache.get(&"a"), Some("alice")); + assert_eq!(cache.get(&"a").await, Some("alice")); assert!(cache.contains_key(&"a")); mock.increment(Duration::from_secs(5)); // 10 secs. expected.push((Arc::new("a"), "alice", RemovalCause::Expired)); - assert_eq!(cache.get(&"a"), None); + assert_eq!(cache.get(&"a").await, None); assert!(!cache.contains_key(&"a")); assert_eq!(cache.iter().count(), 0); - cache.sync(); + cache.run_pending_tasks().await; assert!(cache.is_table_empty()); cache.insert("b", "bob").await; - cache.sync(); + cache.run_pending_tasks().await; assert_eq!(cache.entry_count(), 1); mock.increment(Duration::from_secs(5)); // 15 secs. - cache.sync(); + cache.run_pending_tasks().await; - assert_eq!(cache.get(&"b"), Some("bob")); + assert_eq!(cache.get(&"b").await, Some("bob")); assert!(cache.contains_key(&"b")); assert_eq!(cache.entry_count(), 1); cache.insert("b", "bill").await; expected.push((Arc::new("b"), "bob", RemovalCause::Replaced)); - cache.sync(); + cache.run_pending_tasks().await; mock.increment(Duration::from_secs(5)); // 20 secs - cache.sync(); + cache.run_pending_tasks().await; - assert_eq!(cache.get(&"b"), Some("bill")); + assert_eq!(cache.get(&"b").await, Some("bill")); assert!(cache.contains_key(&"b")); assert_eq!(cache.entry_count(), 1); mock.increment(Duration::from_secs(5)); // 25 secs expected.push((Arc::new("b"), "bill", RemovalCause::Expired)); - assert_eq!(cache.get(&"a"), None); - assert_eq!(cache.get(&"b"), None); + assert_eq!(cache.get(&"a").await, None); + assert_eq!(cache.get(&"b").await, None); assert!(!cache.contains_key(&"a")); assert!(!cache.contains_key(&"b")); assert_eq!(cache.iter().count(), 0); - cache.sync(); + cache.run_pending_tasks().await; assert!(cache.is_table_empty()); - verify_notification_vec(&cache, actual, &expected); + verify_notification_vec(&cache, actual, &expected).await; } #[tokio::test] @@ -2653,75 +2566,81 @@ mod tests { // Create an eviction listener. let a1 = Arc::clone(&actual); - let listener = move |k, v, cause| a1.lock().push((k, v, cause)); + let listener = move |k, v, cause| -> ListenerFuture { + let a2 = Arc::clone(&a1); + async move { + a2.lock().await.push((k, v, cause)); + } + .boxed() + }; // Create a cache with the eviction listener. let mut cache = Cache::builder() .max_capacity(100) .time_to_idle(Duration::from_secs(10)) - .eviction_listener_with_queued_delivery_mode(listener) + .async_eviction_listener(listener) .build(); - cache.reconfigure_for_testing(); + cache.reconfigure_for_testing().await; let (clock, mock) = Clock::mock(); - cache.set_expiration_clock(Some(clock)); + cache.set_expiration_clock(Some(clock)).await; // Make the cache exterior immutable. let cache = cache; cache.insert("a", "alice").await; - cache.sync(); + cache.run_pending_tasks().await; mock.increment(Duration::from_secs(5)); // 5 secs from the start. - cache.sync(); + cache.run_pending_tasks().await; - assert_eq!(cache.get(&"a"), Some("alice")); + assert_eq!(cache.get(&"a").await, Some("alice")); mock.increment(Duration::from_secs(5)); // 10 secs. - cache.sync(); + cache.run_pending_tasks().await; cache.insert("b", "bob").await; - cache.sync(); + cache.run_pending_tasks().await; assert_eq!(cache.entry_count(), 2); mock.increment(Duration::from_secs(2)); // 12 secs. - cache.sync(); + cache.run_pending_tasks().await; // contains_key does not reset the idle timer for the key. assert!(cache.contains_key(&"a")); assert!(cache.contains_key(&"b")); - cache.sync(); + cache.run_pending_tasks().await; assert_eq!(cache.entry_count(), 2); mock.increment(Duration::from_secs(3)); // 15 secs. expected.push((Arc::new("a"), "alice", RemovalCause::Expired)); - assert_eq!(cache.get(&"a"), None); - assert_eq!(cache.get(&"b"), Some("bob")); + assert_eq!(cache.get(&"a").await, None); + assert_eq!(cache.get(&"b").await, Some("bob")); assert!(!cache.contains_key(&"a")); assert!(cache.contains_key(&"b")); assert_eq!(cache.iter().count(), 1); - cache.sync(); + cache.run_pending_tasks().await; assert_eq!(cache.entry_count(), 1); mock.increment(Duration::from_secs(10)); // 25 secs expected.push((Arc::new("b"), "bob", RemovalCause::Expired)); - assert_eq!(cache.get(&"a"), None); - assert_eq!(cache.get(&"b"), None); + assert_eq!(cache.get(&"a").await, None); + assert_eq!(cache.get(&"b").await, None); assert!(!cache.contains_key(&"a")); assert!(!cache.contains_key(&"b")); assert_eq!(cache.iter().count(), 0); - cache.sync(); + cache.run_pending_tasks().await; assert!(cache.is_table_empty()); - verify_notification_vec(&cache, actual, &expected); + verify_notification_vec(&cache, actual, &expected).await; } #[tokio::test] @@ -2766,7 +2685,13 @@ mod tests { // Create an eviction listener. let a1 = Arc::clone(&actual); - let listener = move |k, v, cause| a1.lock().push((k, v, cause)); + let listener = move |k, v, cause| -> ListenerFuture { + let a2 = Arc::clone(&a1); + async move { + a2.lock().await.push((k, v, cause)); + } + .boxed() + }; // Create expiry counters and the expiry. let expiry_counters = Arc::new(ExpiryCallCounters::default()); @@ -2776,76 +2701,76 @@ mod tests { let mut cache = Cache::builder() .max_capacity(100) .expire_after(expiry) - .eviction_listener_with_queued_delivery_mode(listener) + .async_eviction_listener(listener) .build(); - cache.reconfigure_for_testing(); + cache.reconfigure_for_testing().await; let (clock, mock) = Clock::mock(); - cache.set_expiration_clock(Some(clock)); + cache.set_expiration_clock(Some(clock)).await; // Make the cache exterior immutable. let cache = cache; cache.insert("a", "alice").await; expiry_counters.incl_expected_creations(); - cache.sync(); + cache.run_pending_tasks().await; mock.increment(Duration::from_secs(5)); // 5 secs from the start. - cache.sync(); + cache.run_pending_tasks().await; - assert_eq!(cache.get(&"a"), Some("alice")); + assert_eq!(cache.get(&"a").await, Some("alice")); assert!(cache.contains_key(&"a")); mock.increment(Duration::from_secs(5)); // 10 secs. expected.push((Arc::new("a"), "alice", RemovalCause::Expired)); - assert_eq!(cache.get(&"a"), None); + assert_eq!(cache.get(&"a").await, None); assert!(!cache.contains_key(&"a")); assert_eq!(cache.iter().count(), 0); - cache.sync(); + cache.run_pending_tasks().await; assert!(cache.is_table_empty()); cache.insert("b", "bob").await; expiry_counters.incl_expected_creations(); - cache.sync(); + cache.run_pending_tasks().await; assert_eq!(cache.entry_count(), 1); mock.increment(Duration::from_secs(5)); // 15 secs. - cache.sync(); + cache.run_pending_tasks().await; - assert_eq!(cache.get(&"b"), Some("bob")); + assert_eq!(cache.get(&"b").await, Some("bob")); assert!(cache.contains_key(&"b")); assert_eq!(cache.entry_count(), 1); cache.insert("b", "bill").await; expected.push((Arc::new("b"), "bob", RemovalCause::Replaced)); expiry_counters.incl_expected_updates(); - cache.sync(); + cache.run_pending_tasks().await; mock.increment(Duration::from_secs(5)); // 20 secs - cache.sync(); + cache.run_pending_tasks().await; - assert_eq!(cache.get(&"b"), Some("bill")); + assert_eq!(cache.get(&"b").await, Some("bill")); assert!(cache.contains_key(&"b")); assert_eq!(cache.entry_count(), 1); mock.increment(Duration::from_secs(5)); // 25 secs expected.push((Arc::new("b"), "bill", RemovalCause::Expired)); - assert_eq!(cache.get(&"a"), None); - assert_eq!(cache.get(&"b"), None); + assert_eq!(cache.get(&"a").await, None); + assert_eq!(cache.get(&"b").await, None); assert!(!cache.contains_key(&"a")); assert!(!cache.contains_key(&"b")); assert_eq!(cache.iter().count(), 0); - cache.sync(); + cache.run_pending_tasks().await; assert!(cache.is_table_empty()); expiry_counters.verify(); - verify_notification_vec(&cache, actual, &expected); + verify_notification_vec(&cache, actual, &expected).await; } #[tokio::test] @@ -2881,7 +2806,13 @@ mod tests { // Create an eviction listener. let a1 = Arc::clone(&actual); - let listener = move |k, v, cause| a1.lock().push((k, v, cause)); + let listener = move |k, v, cause| -> ListenerFuture { + let a2 = Arc::clone(&a1); + async move { + a2.lock().await.push((k, v, cause)); + } + .boxed() + }; // Create expiry counters and the expiry. let expiry_counters = Arc::new(ExpiryCallCounters::default()); @@ -2891,72 +2822,72 @@ mod tests { let mut cache = Cache::builder() .max_capacity(100) .expire_after(expiry) - .eviction_listener_with_queued_delivery_mode(listener) + .async_eviction_listener(listener) .build(); - cache.reconfigure_for_testing(); + cache.reconfigure_for_testing().await; let (clock, mock) = Clock::mock(); - cache.set_expiration_clock(Some(clock)); + cache.set_expiration_clock(Some(clock)).await; // Make the cache exterior immutable. let cache = cache; cache.insert("a", "alice").await; - cache.sync(); + cache.run_pending_tasks().await; mock.increment(Duration::from_secs(5)); // 5 secs from the start. - cache.sync(); + cache.run_pending_tasks().await; - assert_eq!(cache.get(&"a"), Some("alice")); + assert_eq!(cache.get(&"a").await, Some("alice")); expiry_counters.incl_expected_reads(); mock.increment(Duration::from_secs(5)); // 10 secs. - cache.sync(); + cache.run_pending_tasks().await; cache.insert("b", "bob").await; - cache.sync(); + cache.run_pending_tasks().await; assert_eq!(cache.entry_count(), 2); mock.increment(Duration::from_secs(2)); // 12 secs. - cache.sync(); + cache.run_pending_tasks().await; // contains_key does not reset the idle timer for the key. assert!(cache.contains_key(&"a")); assert!(cache.contains_key(&"b")); - cache.sync(); + cache.run_pending_tasks().await; assert_eq!(cache.entry_count(), 2); mock.increment(Duration::from_secs(3)); // 15 secs. expected.push((Arc::new("a"), "alice", RemovalCause::Expired)); - assert_eq!(cache.get(&"a"), None); - assert_eq!(cache.get(&"b"), Some("bob")); + assert_eq!(cache.get(&"a").await, None); + assert_eq!(cache.get(&"b").await, Some("bob")); expiry_counters.incl_expected_reads(); assert!(!cache.contains_key(&"a")); assert!(cache.contains_key(&"b")); assert_eq!(cache.iter().count(), 1); - cache.sync(); + cache.run_pending_tasks().await; assert_eq!(cache.entry_count(), 1); mock.increment(Duration::from_secs(10)); // 25 secs expected.push((Arc::new("b"), "bob", RemovalCause::Expired)); - assert_eq!(cache.get(&"a"), None); - assert_eq!(cache.get(&"b"), None); + assert_eq!(cache.get(&"a").await, None); + assert_eq!(cache.get(&"b").await, None); assert!(!cache.contains_key(&"a")); assert!(!cache.contains_key(&"b")); assert_eq!(cache.iter().count(), 0); - cache.sync(); + cache.run_pending_tasks().await; assert!(cache.is_table_empty()); expiry_counters.verify(); - verify_notification_vec(&cache, actual, &expected); + verify_notification_vec(&cache, actual, &expected).await; } /// Verify that the `Expiry::expire_after_read()` method is called in `get_with` @@ -3017,7 +2948,7 @@ mod tests { .max_capacity(100) .expire_after(expiry) .build(); - cache.reconfigure_for_testing(); + cache.reconfigure_for_testing().await; // Make the cache exterior immutable. let cache = cache; @@ -3025,18 +2956,18 @@ mod tests { // The key is not present. cache.get_with("a", async { "alice" }).await; expiry_counters.incl_expected_creations(); - cache.sync(); + cache.run_pending_tasks().await; // The key is present. cache.get_with("a", async { "alex" }).await; expiry_counters.incl_expected_reads(); - cache.sync(); + cache.run_pending_tasks().await; // The key is not present. cache.invalidate("a").await; cache.get_with("a", async { "amanda" }).await; expiry_counters.incl_expected_creations(); - cache.sync(); + cache.run_pending_tasks().await; expiry_counters.verify(); } @@ -3160,7 +3091,7 @@ mod tests { let v = cache1 .get_with(KEY, async { // Wait for 300 ms and return a &str value. - Timer::after(Duration::from_millis(300)).await; + sleep(Duration::from_millis(300)).await; "task1" }) .await; @@ -3175,7 +3106,7 @@ mod tests { let cache2 = cache.clone(); async move { // Wait for 100 ms before calling `get_with`. - Timer::after(Duration::from_millis(100)).await; + sleep(Duration::from_millis(100)).await; let v = cache2.get_with(KEY, async { unreachable!() }).await; assert_eq!(v, "task1"); } @@ -3190,7 +3121,7 @@ mod tests { let cache3 = cache.clone(); async move { // Wait for 400 ms before calling `get_with`. - Timer::after(Duration::from_millis(400)).await; + sleep(Duration::from_millis(400)).await; let v = cache3.get_with(KEY, async { unreachable!() }).await; assert_eq!(v, "task1"); } @@ -3202,8 +3133,8 @@ mod tests { let cache4 = cache.clone(); async move { // Wait for 200 ms before calling `get`. - Timer::after(Duration::from_millis(200)).await; - let maybe_v = cache4.get(&KEY); + sleep(Duration::from_millis(200)).await; + let maybe_v = cache4.get(&KEY).await; assert!(maybe_v.is_none()); } }; @@ -3214,8 +3145,8 @@ mod tests { let cache5 = cache.clone(); async move { // Wait for 400 ms before calling `get`. - Timer::after(Duration::from_millis(400)).await; - let maybe_v = cache5.get(&KEY); + sleep(Duration::from_millis(400)).await; + let maybe_v = cache5.get(&KEY).await; assert_eq!(maybe_v, Some("task1")); } }; @@ -3240,7 +3171,7 @@ mod tests { let v = cache1 .get_with_by_ref(KEY, async { // Wait for 300 ms and return a &str value. - Timer::after(Duration::from_millis(300)).await; + sleep(Duration::from_millis(300)).await; "task1" }) .await; @@ -3255,7 +3186,7 @@ mod tests { let cache2 = cache.clone(); async move { // Wait for 100 ms before calling `get_with_by_ref`. - Timer::after(Duration::from_millis(100)).await; + sleep(Duration::from_millis(100)).await; let v = cache2.get_with_by_ref(KEY, async { unreachable!() }).await; assert_eq!(v, "task1"); } @@ -3270,7 +3201,7 @@ mod tests { let cache3 = cache.clone(); async move { // Wait for 400 ms before calling `get_with_by_ref`. - Timer::after(Duration::from_millis(400)).await; + sleep(Duration::from_millis(400)).await; let v = cache3.get_with_by_ref(KEY, async { unreachable!() }).await; assert_eq!(v, "task1"); } @@ -3282,8 +3213,8 @@ mod tests { let cache4 = cache.clone(); async move { // Wait for 200 ms before calling `get`. - Timer::after(Duration::from_millis(200)).await; - let maybe_v = cache4.get(KEY); + sleep(Duration::from_millis(200)).await; + let maybe_v = cache4.get(KEY).await; assert!(maybe_v.is_none()); } }; @@ -3294,8 +3225,8 @@ mod tests { let cache5 = cache.clone(); async move { // Wait for 400 ms before calling `get`. - Timer::after(Duration::from_millis(400)).await; - let maybe_v = cache5.get(KEY); + sleep(Duration::from_millis(400)).await; + let maybe_v = cache5.get(KEY).await; assert_eq!(maybe_v, Some("task1")); } }; @@ -3322,7 +3253,7 @@ mod tests { .or_insert_with_if( async { // Wait for 300 ms and return a &str value. - Timer::after(Duration::from_millis(300)).await; + sleep(Duration::from_millis(300)).await; "task1" }, |_v| unreachable!(), @@ -3342,7 +3273,7 @@ mod tests { let cache2 = cache.clone(); async move { // Wait for 100 ms before calling `or_insert_with_if`. - Timer::after(Duration::from_millis(100)).await; + sleep(Duration::from_millis(100)).await; let entry = cache2 .entry(KEY) .or_insert_with_if(async { unreachable!() }, |_v| unreachable!()) @@ -3364,7 +3295,7 @@ mod tests { let cache3 = cache.clone(); async move { // Wait for 350 ms before calling `or_insert_with_if`. - Timer::after(Duration::from_millis(350)).await; + sleep(Duration::from_millis(350)).await; let entry = cache3 .entry(KEY) .or_insert_with_if(async { unreachable!() }, |v| { @@ -3385,7 +3316,7 @@ mod tests { let cache4 = cache.clone(); async move { // Wait for 400 ms before calling `or_insert_with_if`. - Timer::after(Duration::from_millis(400)).await; + sleep(Duration::from_millis(400)).await; let entry = cache4 .entry(KEY) .or_insert_with_if(async { "task4" }, |v| { @@ -3404,8 +3335,8 @@ mod tests { let cache5 = cache.clone(); async move { // Wait for 200 ms before calling `get`. - Timer::after(Duration::from_millis(200)).await; - let maybe_v = cache5.get(&KEY); + sleep(Duration::from_millis(200)).await; + let maybe_v = cache5.get(&KEY).await; assert!(maybe_v.is_none()); } }; @@ -3416,8 +3347,8 @@ mod tests { let cache6 = cache.clone(); async move { // Wait for 350 ms before calling `get`. - Timer::after(Duration::from_millis(350)).await; - let maybe_v = cache6.get(&KEY); + sleep(Duration::from_millis(350)).await; + let maybe_v = cache6.get(&KEY).await; assert_eq!(maybe_v, Some("task1")); } }; @@ -3428,8 +3359,8 @@ mod tests { let cache7 = cache.clone(); async move { // Wait for 450 ms before calling `get`. - Timer::after(Duration::from_millis(450)).await; - let maybe_v = cache7.get(&KEY); + sleep(Duration::from_millis(450)).await; + let maybe_v = cache7.get(&KEY).await; assert_eq!(maybe_v, Some("task4")); } }; @@ -3456,7 +3387,7 @@ mod tests { .or_insert_with_if( async { // Wait for 300 ms and return a &str value. - Timer::after(Duration::from_millis(300)).await; + sleep(Duration::from_millis(300)).await; "task1" }, |_v| unreachable!(), @@ -3476,7 +3407,7 @@ mod tests { let cache2 = cache.clone(); async move { // Wait for 100 ms before calling `or_insert_with_if`. - Timer::after(Duration::from_millis(100)).await; + sleep(Duration::from_millis(100)).await; let entry = cache2 .entry_by_ref(KEY) .or_insert_with_if(async { unreachable!() }, |_v| unreachable!()) @@ -3498,7 +3429,7 @@ mod tests { let cache3 = cache.clone(); async move { // Wait for 350 ms before calling `or_insert_with_if`. - Timer::after(Duration::from_millis(350)).await; + sleep(Duration::from_millis(350)).await; let entry = cache3 .entry_by_ref(KEY) .or_insert_with_if(async { unreachable!() }, |v| { @@ -3519,7 +3450,7 @@ mod tests { let cache4 = cache.clone(); async move { // Wait for 400 ms before calling `or_insert_with_if`. - Timer::after(Duration::from_millis(400)).await; + sleep(Duration::from_millis(400)).await; let entry = cache4 .entry_by_ref(KEY) .or_insert_with_if(async { "task4" }, |v| { @@ -3538,8 +3469,8 @@ mod tests { let cache5 = cache.clone(); async move { // Wait for 200 ms before calling `get`. - Timer::after(Duration::from_millis(200)).await; - let maybe_v = cache5.get(KEY); + sleep(Duration::from_millis(200)).await; + let maybe_v = cache5.get(KEY).await; assert!(maybe_v.is_none()); } }; @@ -3550,8 +3481,8 @@ mod tests { let cache6 = cache.clone(); async move { // Wait for 350 ms before calling `get`. - Timer::after(Duration::from_millis(350)).await; - let maybe_v = cache6.get(KEY); + sleep(Duration::from_millis(350)).await; + let maybe_v = cache6.get(KEY).await; assert_eq!(maybe_v, Some("task1")); } }; @@ -3562,8 +3493,8 @@ mod tests { let cache7 = cache.clone(); async move { // Wait for 450 ms before calling `get`. - Timer::after(Duration::from_millis(450)).await; - let maybe_v = cache7.get(KEY); + sleep(Duration::from_millis(450)).await; + let maybe_v = cache7.get(KEY).await; assert_eq!(maybe_v, Some("task4")); } }; @@ -3597,7 +3528,7 @@ mod tests { let v = cache1 .try_get_with(KEY, async { // Wait for 300 ms and return an error. - Timer::after(Duration::from_millis(300)).await; + sleep(Duration::from_millis(300)).await; Err(MyError("task1 error".into())) }) .await; @@ -3612,7 +3543,7 @@ mod tests { let cache2 = cache.clone(); async move { // Wait for 100 ms before calling `try_get_with`. - Timer::after(Duration::from_millis(100)).await; + sleep(Duration::from_millis(100)).await; let v: MyResult<_> = cache2.try_get_with(KEY, async { unreachable!() }).await; assert!(v.is_err()); } @@ -3627,11 +3558,11 @@ mod tests { let cache3 = cache.clone(); async move { // Wait for 400 ms before calling `try_get_with`. - Timer::after(Duration::from_millis(400)).await; + sleep(Duration::from_millis(400)).await; let v: MyResult<_> = cache3 .try_get_with(KEY, async { // Wait for 300 ms and return an Ok(&str) value. - Timer::after(Duration::from_millis(300)).await; + sleep(Duration::from_millis(300)).await; Ok("task3") }) .await; @@ -3646,7 +3577,7 @@ mod tests { let cache4 = cache.clone(); async move { // Wait for 500 ms before calling `try_get_with`. - Timer::after(Duration::from_millis(500)).await; + sleep(Duration::from_millis(500)).await; let v: MyResult<_> = cache4.try_get_with(KEY, async { unreachable!() }).await; assert_eq!(v.unwrap(), "task3"); } @@ -3661,7 +3592,7 @@ mod tests { let cache5 = cache.clone(); async move { // Wait for 800 ms before calling `try_get_with`. - Timer::after(Duration::from_millis(800)).await; + sleep(Duration::from_millis(800)).await; let v: MyResult<_> = cache5.try_get_with(KEY, async { unreachable!() }).await; assert_eq!(v.unwrap(), "task3"); } @@ -3673,8 +3604,8 @@ mod tests { let cache6 = cache.clone(); async move { // Wait for 200 ms before calling `get`. - Timer::after(Duration::from_millis(200)).await; - let maybe_v = cache6.get(&KEY); + sleep(Duration::from_millis(200)).await; + let maybe_v = cache6.get(&KEY).await; assert!(maybe_v.is_none()); } }; @@ -3685,8 +3616,8 @@ mod tests { let cache7 = cache.clone(); async move { // Wait for 400 ms before calling `get`. - Timer::after(Duration::from_millis(400)).await; - let maybe_v = cache7.get(&KEY); + sleep(Duration::from_millis(400)).await; + let maybe_v = cache7.get(&KEY).await; assert!(maybe_v.is_none()); } }; @@ -3697,8 +3628,8 @@ mod tests { let cache8 = cache.clone(); async move { // Wait for 800 ms before calling `get`. - Timer::after(Duration::from_millis(800)).await; - let maybe_v = cache8.get(&KEY); + sleep(Duration::from_millis(800)).await; + let maybe_v = cache8.get(&KEY).await; assert_eq!(maybe_v, Some("task3")); } }; @@ -3732,7 +3663,7 @@ mod tests { let v = cache1 .try_get_with_by_ref(KEY, async { // Wait for 300 ms and return an error. - Timer::after(Duration::from_millis(300)).await; + sleep(Duration::from_millis(300)).await; Err(MyError("task1 error".into())) }) .await; @@ -3747,7 +3678,7 @@ mod tests { let cache2 = cache.clone(); async move { // Wait for 100 ms before calling `try_get_with_by_ref`. - Timer::after(Duration::from_millis(100)).await; + sleep(Duration::from_millis(100)).await; let v: MyResult<_> = cache2 .try_get_with_by_ref(KEY, async { unreachable!() }) .await; @@ -3764,11 +3695,11 @@ mod tests { let cache3 = cache.clone(); async move { // Wait for 400 ms before calling `try_get_with_by_ref`. - Timer::after(Duration::from_millis(400)).await; + sleep(Duration::from_millis(400)).await; let v: MyResult<_> = cache3 .try_get_with_by_ref(KEY, async { // Wait for 300 ms and return an Ok(&str) value. - Timer::after(Duration::from_millis(300)).await; + sleep(Duration::from_millis(300)).await; Ok("task3") }) .await; @@ -3783,7 +3714,7 @@ mod tests { let cache4 = cache.clone(); async move { // Wait for 500 ms before calling `try_get_with_by_ref`. - Timer::after(Duration::from_millis(500)).await; + sleep(Duration::from_millis(500)).await; let v: MyResult<_> = cache4 .try_get_with_by_ref(KEY, async { unreachable!() }) .await; @@ -3800,7 +3731,7 @@ mod tests { let cache5 = cache.clone(); async move { // Wait for 800 ms before calling `try_get_with_by_ref`. - Timer::after(Duration::from_millis(800)).await; + sleep(Duration::from_millis(800)).await; let v: MyResult<_> = cache5 .try_get_with_by_ref(KEY, async { unreachable!() }) .await; @@ -3814,8 +3745,8 @@ mod tests { let cache6 = cache.clone(); async move { // Wait for 200 ms before calling `get`. - Timer::after(Duration::from_millis(200)).await; - let maybe_v = cache6.get(KEY); + sleep(Duration::from_millis(200)).await; + let maybe_v = cache6.get(KEY).await; assert!(maybe_v.is_none()); } }; @@ -3826,8 +3757,8 @@ mod tests { let cache7 = cache.clone(); async move { // Wait for 400 ms before calling `get`. - Timer::after(Duration::from_millis(400)).await; - let maybe_v = cache7.get(KEY); + sleep(Duration::from_millis(400)).await; + let maybe_v = cache7.get(KEY).await; assert!(maybe_v.is_none()); } }; @@ -3838,8 +3769,8 @@ mod tests { let cache8 = cache.clone(); async move { // Wait for 800 ms before calling `get`. - Timer::after(Duration::from_millis(800)).await; - let maybe_v = cache8.get(KEY); + sleep(Duration::from_millis(800)).await; + let maybe_v = cache8.get(KEY).await; assert_eq!(maybe_v, Some("task3")); } }; @@ -3864,7 +3795,7 @@ mod tests { let v = cache1 .optionally_get_with(KEY, async { // Wait for 300 ms and return an None. - Timer::after(Duration::from_millis(300)).await; + sleep(Duration::from_millis(300)).await; None }) .await; @@ -3880,7 +3811,7 @@ mod tests { let cache2 = cache.clone(); async move { // Wait for 100 ms before calling `optionally_get_with`. - Timer::after(Duration::from_millis(100)).await; + sleep(Duration::from_millis(100)).await; let v = cache2 .optionally_get_with(KEY, async { unreachable!() }) .await; @@ -3897,11 +3828,11 @@ mod tests { let cache3 = cache.clone(); async move { // Wait for 400 ms before calling `optionally_get_with`. - Timer::after(Duration::from_millis(400)).await; + sleep(Duration::from_millis(400)).await; let v = cache3 .optionally_get_with(KEY, async { // Wait for 300 ms and return an Some(&str) value. - Timer::after(Duration::from_millis(300)).await; + sleep(Duration::from_millis(300)).await; Some("task3") }) .await; @@ -3916,7 +3847,7 @@ mod tests { let cache4 = cache.clone(); async move { // Wait for 500 ms before calling `try_get_with`. - Timer::after(Duration::from_millis(500)).await; + sleep(Duration::from_millis(500)).await; let v = cache4 .optionally_get_with(KEY, async { unreachable!() }) .await; @@ -3933,7 +3864,7 @@ mod tests { let cache5 = cache.clone(); async move { // Wait for 800 ms before calling `optionally_get_with`. - Timer::after(Duration::from_millis(800)).await; + sleep(Duration::from_millis(800)).await; let v = cache5 .optionally_get_with(KEY, async { unreachable!() }) .await; @@ -3947,8 +3878,8 @@ mod tests { let cache6 = cache.clone(); async move { // Wait for 200 ms before calling `get`. - Timer::after(Duration::from_millis(200)).await; - let maybe_v = cache6.get(&KEY); + sleep(Duration::from_millis(200)).await; + let maybe_v = cache6.get(&KEY).await; assert!(maybe_v.is_none()); } }; @@ -3959,8 +3890,8 @@ mod tests { let cache7 = cache.clone(); async move { // Wait for 400 ms before calling `get`. - Timer::after(Duration::from_millis(400)).await; - let maybe_v = cache7.get(&KEY); + sleep(Duration::from_millis(400)).await; + let maybe_v = cache7.get(&KEY).await; assert!(maybe_v.is_none()); } }; @@ -3971,8 +3902,8 @@ mod tests { let cache8 = cache.clone(); async move { // Wait for 800 ms before calling `get`. - Timer::after(Duration::from_millis(800)).await; - let maybe_v = cache8.get(&KEY); + sleep(Duration::from_millis(800)).await; + let maybe_v = cache8.get(&KEY).await; assert_eq!(maybe_v, Some("task3")); } }; @@ -3997,7 +3928,7 @@ mod tests { let v = cache1 .optionally_get_with_by_ref(KEY, async { // Wait for 300 ms and return an None. - Timer::after(Duration::from_millis(300)).await; + sleep(Duration::from_millis(300)).await; None }) .await; @@ -4013,7 +3944,7 @@ mod tests { let cache2 = cache.clone(); async move { // Wait for 100 ms before calling `optionally_get_with_by_ref`. - Timer::after(Duration::from_millis(100)).await; + sleep(Duration::from_millis(100)).await; let v = cache2 .optionally_get_with_by_ref(KEY, async { unreachable!() }) .await; @@ -4030,11 +3961,11 @@ mod tests { let cache3 = cache.clone(); async move { // Wait for 400 ms before calling `optionally_get_with_by_ref`. - Timer::after(Duration::from_millis(400)).await; + sleep(Duration::from_millis(400)).await; let v = cache3 .optionally_get_with_by_ref(KEY, async { // Wait for 300 ms and return an Some(&str) value. - Timer::after(Duration::from_millis(300)).await; + sleep(Duration::from_millis(300)).await; Some("task3") }) .await; @@ -4049,7 +3980,7 @@ mod tests { let cache4 = cache.clone(); async move { // Wait for 500 ms before calling `try_get_with`. - Timer::after(Duration::from_millis(500)).await; + sleep(Duration::from_millis(500)).await; let v = cache4 .optionally_get_with_by_ref(KEY, async { unreachable!() }) .await; @@ -4066,7 +3997,7 @@ mod tests { let cache5 = cache.clone(); async move { // Wait for 800 ms before calling `optionally_get_with_by_ref`. - Timer::after(Duration::from_millis(800)).await; + sleep(Duration::from_millis(800)).await; let v = cache5 .optionally_get_with_by_ref(KEY, async { unreachable!() }) .await; @@ -4080,8 +4011,8 @@ mod tests { let cache6 = cache.clone(); async move { // Wait for 200 ms before calling `get`. - Timer::after(Duration::from_millis(200)).await; - let maybe_v = cache6.get(KEY); + sleep(Duration::from_millis(200)).await; + let maybe_v = cache6.get(KEY).await; assert!(maybe_v.is_none()); } }; @@ -4092,8 +4023,8 @@ mod tests { let cache7 = cache.clone(); async move { // Wait for 400 ms before calling `get`. - Timer::after(Duration::from_millis(400)).await; - let maybe_v = cache7.get(KEY); + sleep(Duration::from_millis(400)).await; + let maybe_v = cache7.get(KEY).await; assert!(maybe_v.is_none()); } }; @@ -4104,8 +4035,8 @@ mod tests { let cache8 = cache.clone(); async move { // Wait for 800 ms before calling `get`. - Timer::after(Duration::from_millis(800)).await; - let maybe_v = cache8.get(KEY); + sleep(Duration::from_millis(800)).await; + let maybe_v = cache8.get(KEY).await; assert_eq!(maybe_v, Some("task3")); } }; @@ -4235,14 +4166,20 @@ mod tests { // Create an eviction listener. let a1 = Arc::clone(&actual); - let listener = move |k, v, cause| a1.lock().push((k, v, cause)); + let listener = move |k, v, cause| -> ListenerFuture { + let a2 = Arc::clone(&a1); + async move { + a2.lock().await.push((k, v, cause)); + } + .boxed() + }; // Create a cache with the eviction listener. let mut cache = Cache::builder() .max_capacity(3) - .eviction_listener_with_queued_delivery_mode(listener) + .async_eviction_listener(listener) .build(); - cache.reconfigure_for_testing(); + cache.reconfigure_for_testing().await; // Make the cache exterior immutable. let cache = cache; @@ -4251,39 +4188,39 @@ mod tests { cache.invalidate(&'a').await; expected.push((Arc::new('a'), "alice", RemovalCause::Explicit)); - cache.sync(); + cache.run_pending_tasks().await; assert_eq!(cache.entry_count(), 0); cache.insert('b', "bob").await; cache.insert('c', "cathy").await; cache.insert('d', "david").await; - cache.sync(); + cache.run_pending_tasks().await; assert_eq!(cache.entry_count(), 3); // This will be rejected due to the size constraint. cache.insert('e', "emily").await; expected.push((Arc::new('e'), "emily", RemovalCause::Size)); - cache.sync(); + cache.run_pending_tasks().await; assert_eq!(cache.entry_count(), 3); // Raise the popularity of 'e' so it will be accepted next time. - cache.get(&'e'); - cache.sync(); + cache.get(&'e').await; + cache.run_pending_tasks().await; // Retry. cache.insert('e', "eliza").await; // and the LRU entry will be evicted. expected.push((Arc::new('b'), "bob", RemovalCause::Size)); - cache.sync(); + cache.run_pending_tasks().await; assert_eq!(cache.entry_count(), 3); // Replace an existing entry. cache.insert('d', "dennis").await; expected.push((Arc::new('d'), "david", RemovalCause::Replaced)); - cache.sync(); + cache.run_pending_tasks().await; assert_eq!(cache.entry_count(), 3); - verify_notification_vec(&cache, actual, &expected); + verify_notification_vec(&cache, actual, &expected).await; } #[tokio::test] @@ -4294,29 +4231,35 @@ mod tests { // Create an eviction listener. let a1 = Arc::clone(&actual); - let listener = move |k, v, cause| a1.lock().push((k, v, cause)); + let listener = move |k, v, cause| -> ListenerFuture { + let a2 = Arc::clone(&a1); + async move { + a2.lock().await.push((k, v, cause)); + } + .boxed() + }; // Create a cache with the eviction listener and also TTL and TTI. let mut cache = Cache::builder() - .eviction_listener_with_queued_delivery_mode(listener) + .async_eviction_listener(listener) .time_to_live(Duration::from_secs(7)) .time_to_idle(Duration::from_secs(5)) .build(); - cache.reconfigure_for_testing(); + cache.reconfigure_for_testing().await; let (clock, mock) = Clock::mock(); - cache.set_expiration_clock(Some(clock)); + cache.set_expiration_clock(Some(clock)).await; // Make the cache exterior immutable. let cache = cache; cache.insert("alice", "a0").await; - cache.sync(); + cache.run_pending_tasks().await; // Now alice (a0) has been expired by the idle timeout (TTI). mock.increment(Duration::from_secs(6)); expected.push((Arc::new("alice"), "a0", RemovalCause::Expired)); - assert_eq!(cache.get(&"alice"), None); + assert_eq!(cache.get(&"alice").await, None); // We have not ran sync after the expiration of alice (a0), so it is // still in the cache. @@ -4327,16 +4270,16 @@ mod tests { // insert operation. We want to verify that the RemovalCause of a0 is // Expired, not Replaced. cache.insert("alice", "a1").await; - cache.sync(); + cache.run_pending_tasks().await; mock.increment(Duration::from_secs(4)); - assert_eq!(cache.get(&"alice"), Some("a1")); - cache.sync(); + assert_eq!(cache.get(&"alice").await, Some("a1")); + cache.run_pending_tasks().await; // Now alice has been expired by time-to-live (TTL). mock.increment(Duration::from_secs(4)); expected.push((Arc::new("alice"), "a1", RemovalCause::Expired)); - assert_eq!(cache.get(&"alice"), None); + assert_eq!(cache.get(&"alice").await, None); // But, again, it is still in the cache. assert_eq!(cache.entry_count(), 1); @@ -4344,38 +4287,38 @@ mod tests { // Re-insert alice with a different value and verify that the // RemovalCause of a1 is Expired (not Replaced). cache.insert("alice", "a2").await; - cache.sync(); + cache.run_pending_tasks().await; assert_eq!(cache.entry_count(), 1); // Now alice (a2) has been expired by the idle timeout. mock.increment(Duration::from_secs(6)); expected.push((Arc::new("alice"), "a2", RemovalCause::Expired)); - assert_eq!(cache.get(&"alice"), None); + assert_eq!(cache.get(&"alice").await, None); assert_eq!(cache.entry_count(), 1); // This invalidate will internally remove alice (a2). cache.invalidate(&"alice").await; - cache.sync(); + cache.run_pending_tasks().await; assert_eq!(cache.entry_count(), 0); // Re-insert, and this time, make it expired by the TTL. cache.insert("alice", "a3").await; - cache.sync(); + cache.run_pending_tasks().await; mock.increment(Duration::from_secs(4)); - assert_eq!(cache.get(&"alice"), Some("a3")); - cache.sync(); + assert_eq!(cache.get(&"alice").await, Some("a3")); + cache.run_pending_tasks().await; mock.increment(Duration::from_secs(4)); expected.push((Arc::new("alice"), "a3", RemovalCause::Expired)); - assert_eq!(cache.get(&"alice"), None); + assert_eq!(cache.get(&"alice").await, None); assert_eq!(cache.entry_count(), 1); // This invalidate will internally remove alice (a2). cache.invalidate(&"alice").await; - cache.sync(); + cache.run_pending_tasks().await; assert_eq!(cache.entry_count(), 0); - verify_notification_vec(&cache, actual, &expected); + verify_notification_vec(&cache, actual, &expected).await; } // NOTE: To enable the panic logging, run the following command: @@ -4395,43 +4338,127 @@ mod tests { // Create an eviction listener that panics when it see // a value "panic now!". let a1 = Arc::clone(&actual); - let listener = move |k, v, cause| { - if v == "panic now!" { - panic!("Panic now!"); + let listener = move |k, v, cause| -> ListenerFuture { + let a2 = Arc::clone(&a1); + async move { + if v == "panic now!" { + panic!("Panic now!"); + } + a2.lock().await.push((k, v, cause)); } - a1.lock().push((k, v, cause)) + .boxed() }; // Create a cache with the eviction listener. let mut cache = Cache::builder() .name("My Future Cache") - .eviction_listener_with_queued_delivery_mode(listener) + .async_eviction_listener(listener) .build(); - cache.reconfigure_for_testing(); + cache.reconfigure_for_testing().await; // Make the cache exterior immutable. let cache = cache; // Insert an okay value. cache.insert("alice", "a0").await; - cache.sync(); + cache.run_pending_tasks().await; // Insert a value that will cause the eviction listener to panic. cache.insert("alice", "panic now!").await; expected.push((Arc::new("alice"), "a0", RemovalCause::Replaced)); - cache.sync(); + cache.run_pending_tasks().await; // Insert an okay value. This will replace the previous // value "panic now!" so the eviction listener will panic. cache.insert("alice", "a2").await; - cache.sync(); + cache.run_pending_tasks().await; // No more removal notification should be sent. // Invalidate the okay value. cache.invalidate(&"alice").await; - cache.sync(); + cache.run_pending_tasks().await; + + verify_notification_vec(&cache, actual, &expected).await; + } + + #[tokio::test] + async fn cancel_future_while_running_pending_tasks() { + use crate::future::FutureExt; + use futures_util::future::poll_immediate; + use tokio::task::yield_now; + + let listener_initiation_count: Arc = Default::default(); + let listener_completion_count: Arc = Default::default(); + + let listener = { + // Variables to capture. + let init_count = Arc::clone(&listener_initiation_count); + let comp_count = Arc::clone(&listener_completion_count); + + // Our eviction listener closure. + move |_k, _v, _r| { + init_count.fetch_add(1, Ordering::AcqRel); + let comp_count1 = Arc::clone(&comp_count); + + async move { + yield_now().await; + comp_count1.fetch_add(1, Ordering::AcqRel); + } + .boxed() + } + }; + + let mut cache: Cache = Cache::builder() + .time_to_live(Duration::from_millis(10)) + .async_eviction_listener(listener) + .build(); + + cache.reconfigure_for_testing().await; + + let (clock, mock) = Clock::mock(); + cache.set_expiration_clock(Some(clock)).await; + + // Make the cache exterior immutable. + let cache = cache; - verify_notification_vec(&cache, actual, &expected); + cache.insert(1, 1).await; + assert_eq!(cache.run_pending_tasks_initiation_count(), 0); + assert_eq!(cache.run_pending_tasks_completion_count(), 0); + + // Key 1 is not yet expired. + mock.increment(Duration::from_millis(7)); + + cache.run_pending_tasks().await; + assert_eq!(cache.run_pending_tasks_initiation_count(), 1); + assert_eq!(cache.run_pending_tasks_completion_count(), 1); + assert_eq!(listener_initiation_count.load(Ordering::Acquire), 0); + assert_eq!(listener_completion_count.load(Ordering::Acquire), 0); + + // Now key 1 is expired, so the eviction listener should be called when we + // call run_pending_tasks() and poll the returned future. + mock.increment(Duration::from_millis(7)); + + let fut = cache.run_pending_tasks(); + // Poll the fut only once, and drop it. The fut should not be completed (so + // it is cancelled) because the eviction listener performed a yield_now(). + assert!(poll_immediate(fut).await.is_none()); + + // The task is initiated but not completed. + assert_eq!(cache.run_pending_tasks_initiation_count(), 2); + assert_eq!(cache.run_pending_tasks_completion_count(), 1); + // The listener is initiated but not completed. + assert_eq!(listener_initiation_count.load(Ordering::Acquire), 1); + assert_eq!(listener_completion_count.load(Ordering::Acquire), 0); + + // This will resume the task and the listener, and continue polling + // until complete. + cache.run_pending_tasks().await; + // Now the task is completed. + assert_eq!(cache.run_pending_tasks_initiation_count(), 2); + assert_eq!(cache.run_pending_tasks_completion_count(), 2); + // Now the listener is completed. + assert_eq!(listener_initiation_count.load(Ordering::Acquire), 1); + assert_eq!(listener_completion_count.load(Ordering::Acquire), 1); } // This test ensures that the `contains_key`, `get` and `invalidate` can use @@ -4447,7 +4474,7 @@ mod tests { // key as &Vec let key_v: &Vec = &key; assert!(cache.contains_key(key_v)); - assert_eq!(cache.get(key_v), Some(())); + assert_eq!(cache.get(key_v).await, Some(())); cache.invalidate(key_v).await; cache.insert(key, ()).await; @@ -4455,7 +4482,7 @@ mod tests { // key as &[u8] let key_s: &[u8] = &[1_u8]; assert!(cache.contains_key(key_s)); - assert_eq!(cache.get(key_s), Some(())); + assert_eq!(cache.get(key_s).await, Some(())); cache.invalidate(key_s).await; } @@ -4477,9 +4504,9 @@ mod tests { let mut cache = Cache::builder() .max_capacity(MAX_CAPACITY as u64) - .eviction_listener_with_queued_delivery_mode(listener) + .eviction_listener(listener) .build(); - cache.reconfigure_for_testing(); + cache.reconfigure_for_testing().await; // Make the cache exterior immutable. let cache = cache; @@ -4488,7 +4515,7 @@ mod tests { let value = Arc::new(Value::new(vec![0u8; 1024], &counters)); cache.insert(key, value).await; counters.incl_inserted(); - cache.sync(); + cache.run_pending_tasks().await; } let eviction_count = KEYS - MAX_CAPACITY; @@ -4503,7 +4530,7 @@ mod tests { if counters.evicted() != eviction_count || counters.value_dropped() != eviction_count { if retries <= MAX_RETRIES { retries += 1; - cache.sync(); + cache.run_pending_tasks().await; continue; } else { assert_eq!(counters.evicted(), eviction_count, "Retries exhausted"); @@ -4526,7 +4553,7 @@ mod tests { for key in 0..KEYS { cache.invalidate(&key).await; - cache.sync(); + cache.run_pending_tasks().await; } let mut retries = 0; @@ -4537,7 +4564,7 @@ mod tests { if counters.invalidated() != MAX_CAPACITY || counters.value_dropped() != KEYS { if retries <= MAX_RETRIES { retries += 1; - cache.sync(); + cache.run_pending_tasks().await; continue; } else { assert_eq!(counters.invalidated(), MAX_CAPACITY, "Retries exhausted"); @@ -4575,7 +4602,7 @@ mod tests { type NotificationTuple = (Arc, V, RemovalCause); - fn verify_notification_vec( + async fn verify_notification_vec( cache: &Cache, actual: Arc>>>, expected: &[NotificationTuple], @@ -4591,11 +4618,11 @@ mod tests { // Ensure all scheduled notifications have been processed. std::thread::sleep(Duration::from_millis(500)); - let actual = &*actual.lock(); + let actual = &*actual.lock().await; if actual.len() != expected.len() { if retries <= MAX_RETRIES { retries += 1; - cache.sync(); + cache.run_pending_tasks().await; continue; } else { assert_eq!(actual.len(), expected.len(), "Retries exhausted"); diff --git a/src/future/entry_selector.rs b/src/future/entry_selector.rs index 9616e792..d4954b34 100644 --- a/src/future/entry_selector.rs +++ b/src/future/entry_selector.rs @@ -52,7 +52,7 @@ where /// // Cargo.toml /// // /// // [dependencies] - /// // moka = { version = "0.11", features = ["future"] } + /// // moka = { version = "0.12", features = ["future"] } /// // tokio = { version = "1", features = ["rt-multi-thread", "macros" ] } /// /// use moka::future::Cache; @@ -94,7 +94,7 @@ where /// // Cargo.toml /// // /// // [dependencies] - /// // moka = { version = "0.11", features = ["future"] } + /// // moka = { version = "0.12", features = ["future"] } /// // tokio = { version = "1", features = ["rt-multi-thread", "macros" ] } /// /// use moka::future::Cache; @@ -135,7 +135,7 @@ where /// // Cargo.toml /// // /// // [dependencies] - /// // moka = { version = "0.11", features = ["future"] } + /// // moka = { version = "0.12", features = ["future"] } /// // tokio = { version = "1", features = ["rt-multi-thread", "macros" ] } /// /// use moka::future::Cache; @@ -195,7 +195,7 @@ where pub async fn or_insert_with_if( self, init: impl Future, - replace_if: impl FnMut(&V) -> bool, + replace_if: impl FnMut(&V) -> bool + Send, ) -> Entry { futures_util::pin_mut!(init); let key = Arc::new(self.owned_key); @@ -218,7 +218,7 @@ where /// // Cargo.toml /// // /// // [dependencies] - /// // moka = { version = "0.11", features = ["future"] } + /// // moka = { version = "0.12", features = ["future"] } /// // tokio = { version = "1", features = ["rt-multi-thread", "macros" ] } /// /// use moka::future::Cache; @@ -293,7 +293,7 @@ where /// // Cargo.toml /// // /// // [dependencies] - /// // moka = { version = "0.11", features = ["future"] } + /// // moka = { version = "0.12", features = ["future"] } /// // tokio = { version = "1", features = ["rt-multi-thread", "macros" ] } /// /// use moka::future::Cache; @@ -403,7 +403,7 @@ where /// // Cargo.toml /// // /// // [dependencies] - /// // moka = { version = "0.11", features = ["future"] } + /// // moka = { version = "0.12", features = ["future"] } /// // tokio = { version = "1", features = ["rt-multi-thread", "macros" ] } /// /// use moka::future::Cache; @@ -444,7 +444,7 @@ where /// // Cargo.toml /// // /// // [dependencies] - /// // moka = { version = "0.11", features = ["future"] } + /// // moka = { version = "0.12", features = ["future"] } /// // tokio = { version = "1", features = ["rt-multi-thread", "macros" ] } /// /// use moka::future::Cache; @@ -484,7 +484,7 @@ where /// // Cargo.toml /// // /// // [dependencies] - /// // moka = { version = "0.11", features = ["future"] } + /// // moka = { version = "0.12", features = ["future"] } /// // tokio = { version = "1", features = ["rt-multi-thread", "macros" ] } /// /// use moka::future::Cache; @@ -543,7 +543,7 @@ where pub async fn or_insert_with_if( self, init: impl Future, - replace_if: impl FnMut(&V) -> bool, + replace_if: impl FnMut(&V) -> bool + Send, ) -> Entry { futures_util::pin_mut!(init); self.cache @@ -571,7 +571,7 @@ where /// // Cargo.toml /// // /// // [dependencies] - /// // moka = { version = "0.11", features = ["future"] } + /// // moka = { version = "0.12", features = ["future"] } /// // tokio = { version = "1", features = ["rt-multi-thread", "macros" ] } /// /// use moka::future::Cache; @@ -645,7 +645,7 @@ where /// // Cargo.toml /// // /// // [dependencies] - /// // moka = { version = "0.11", features = ["future"] } + /// // moka = { version = "0.12", features = ["future"] } /// // tokio = { version = "1", features = ["rt-multi-thread", "macros" ] } /// /// use moka::future::Cache; diff --git a/src/future/housekeeper.rs b/src/future/housekeeper.rs new file mode 100644 index 00000000..06e6c340 --- /dev/null +++ b/src/future/housekeeper.rs @@ -0,0 +1,149 @@ +use crate::common::{ + concurrent::{ + atomic_time::AtomicInstant, + constants::{ + MAX_SYNC_REPEATS, PERIODICAL_SYNC_INITIAL_DELAY_MILLIS, READ_LOG_FLUSH_POINT, + WRITE_LOG_FLUSH_POINT, + }, + }, + time::{CheckedTimeOps, Instant}, +}; + +use std::{ + sync::{ + atomic::{AtomicBool, Ordering}, + Arc, + }, + time::Duration, +}; + +#[cfg(test)] +use std::sync::atomic::AtomicUsize; + +use async_lock::Mutex; +use async_trait::async_trait; +use futures_util::future::{BoxFuture, Shared}; + +#[async_trait] +pub(crate) trait InnerSync { + async fn run_pending_tasks(&self, max_sync_repeats: usize); + fn now(&self) -> Instant; +} + +pub(crate) struct Housekeeper { + /// A shared `Future` of the maintenance task that is currently being resolved. + current_task: Mutex>>>, + run_after: AtomicInstant, + auto_run_enabled: AtomicBool, + #[cfg(test)] + pub(crate) start_count: AtomicUsize, + #[cfg(test)] + pub(crate) complete_count: AtomicUsize, +} + +impl Default for Housekeeper { + fn default() -> Self { + Self { + current_task: Default::default(), + run_after: AtomicInstant::new(Self::sync_after(Instant::now())), + auto_run_enabled: AtomicBool::new(true), + #[cfg(test)] + start_count: Default::default(), + #[cfg(test)] + complete_count: Default::default(), + } + } +} + +impl Housekeeper { + pub(crate) fn should_apply_reads(&self, ch_len: usize, now: Instant) -> bool { + self.should_apply(ch_len, READ_LOG_FLUSH_POINT / 8, now) + } + + pub(crate) fn should_apply_writes(&self, ch_len: usize, now: Instant) -> bool { + self.should_apply(ch_len, WRITE_LOG_FLUSH_POINT / 8, now) + } + + #[inline] + fn should_apply(&self, ch_len: usize, ch_flush_point: usize, now: Instant) -> bool { + self.auto_run_enabled.load(Ordering::Relaxed) + && (ch_len >= ch_flush_point || now >= self.run_after.instant().unwrap()) + } + + pub(crate) async fn run_pending_tasks(&self, cache: Arc) + where + T: InnerSync + Send + Sync + 'static, + { + let mut current_task = self.current_task.lock().await; + self.do_run_pending_tasks(cache, &mut current_task).await; + } + + pub(crate) async fn try_run_pending_tasks(&self, cache: Arc) -> bool + where + T: InnerSync + Send + Sync + 'static, + { + if let Some(mut current_task) = self.current_task.try_lock() { + self.do_run_pending_tasks(cache, &mut current_task).await; + true + } else { + false + } + } + + async fn do_run_pending_tasks( + &self, + cache: Arc, + current_task: &mut Option>>, + ) where + T: InnerSync + Send + Sync + 'static, + { + use futures_util::FutureExt; + + let now = cache.now(); + + if let Some(task) = &*current_task { + // This task was being resolved, but did not complete. This means the + // previous run was canceled due to the enclosing Future was dropped. + // Resume the task now by awaiting. + task.clone().await; + } else { + // Create a new maintenance task and resolve it. + let task = async move { cache.run_pending_tasks(MAX_SYNC_REPEATS).await } + .boxed() + .shared(); + *current_task = Some(task.clone()); + + #[cfg(test)] + self.start_count.fetch_add(1, Ordering::AcqRel); + + task.await; + } + + // If we are here, it means that the maintenance task has been resolved. + // We can remove it from the lock. + *current_task = None; + self.run_after.set_instant(Self::sync_after(now)); + + #[cfg(test)] + self.complete_count.fetch_add(1, Ordering::AcqRel); + } + + fn sync_after(now: Instant) -> Instant { + let dur = Duration::from_millis(PERIODICAL_SYNC_INITIAL_DELAY_MILLIS); + let ts = now.checked_add(dur); + // Assuming that `now` is current wall clock time, this should never fail at + // least next millions of years. + ts.expect("Timestamp overflow") + } +} + +#[cfg(test)] +impl Housekeeper { + pub(crate) fn disable_auto_run(&self) { + self.auto_run_enabled.store(false, Ordering::Relaxed); + } + + pub(crate) fn reset_run_after(&self, now: Instant) { + self.run_after.set_instant(Self::sync_after(now)); + } +} diff --git a/src/future/invalidator.rs b/src/future/invalidator.rs new file mode 100644 index 00000000..f3038194 --- /dev/null +++ b/src/future/invalidator.rs @@ -0,0 +1,392 @@ +use super::{PredicateId, PredicateIdStr}; +use crate::{ + common::{ + concurrent::{AccessTime, KvEntry, ValueEntry}, + time::Instant, + }, + PredicateError, +}; + +use async_lock::{Mutex, MutexGuard}; +use async_trait::async_trait; +use std::{ + hash::{BuildHasher, Hash}, + sync::{ + atomic::{AtomicBool, Ordering}, + Arc, + }, +}; +use triomphe::Arc as TrioArc; +use uuid::Uuid; + +pub(crate) type PredicateFun = Arc bool + Send + Sync + 'static>; + +const PREDICATE_MAP_NUM_SEGMENTS: usize = 16; + +#[async_trait] +pub(crate) trait GetOrRemoveEntry { + fn get_value_entry(&self, key: &Arc, hash: u64) -> Option>>; + + async fn remove_key_value_if( + &self, + key: &Arc, + hash: u64, + condition: F, + ) -> Option>> + where + K: Send + Sync + 'static, + V: Clone + Send + Sync + 'static, + F: for<'a, 'b> FnMut(&'a Arc, &'b TrioArc>) -> bool + Send; +} + +pub(crate) struct KeyDateLite { + key: Arc, + hash: u64, + timestamp: Instant, +} + +impl Clone for KeyDateLite { + fn clone(&self) -> Self { + Self { + key: Arc::clone(&self.key), + hash: self.hash, + timestamp: self.timestamp, + } + } +} + +impl KeyDateLite { + pub(crate) fn new(key: &Arc, hash: u64, timestamp: Instant) -> Self { + Self { + key: Arc::clone(key), + hash, + timestamp, + } + } +} + +pub(crate) struct Invalidator { + predicates: crate::cht::SegmentedHashMap, S>, + is_empty: AtomicBool, + scan_context: Arc>, +} + +// +// Crate public methods. +// +impl Invalidator { + pub(crate) fn new(hasher: S) -> Self + where + S: BuildHasher, + { + const CAPACITY: usize = 0; + let predicates = crate::cht::SegmentedHashMap::with_num_segments_capacity_and_hasher( + PREDICATE_MAP_NUM_SEGMENTS, + CAPACITY, + hasher, + ); + Self { + predicates, + is_empty: AtomicBool::new(true), + scan_context: Arc::new(ScanContext::default()), + } + } + + pub(crate) fn is_empty(&self) -> bool { + self.is_empty.load(Ordering::Acquire) + } + + pub(crate) async fn remove_predicates_registered_before(&self, ts: Instant) + where + K: Hash + Eq + Send + Sync + 'static, + V: Clone + Send + Sync + 'static, + S: BuildHasher, + { + let pred_map = &self.predicates; + + let removing_ids = pred_map + .iter() + .filter(|(_, pred)| pred.registered_at <= ts) + .map(|(id, _)| id) + .collect::>(); + + for id in removing_ids { + let hash = pred_map.hash(&id); + pred_map.remove(hash, |k| k == &id); + } + + if pred_map.is_empty() { + self.is_empty.store(true, Ordering::Release); + } + } + + pub(crate) fn register_predicate( + &self, + predicate: PredicateFun, + registered_at: Instant, + ) -> Result + where + K: Hash + Eq, + S: BuildHasher, + { + const MAX_RETRY: usize = 1_000; + let mut tries = 0; + let preds = &self.predicates; + + while tries < MAX_RETRY { + let id = Uuid::new_v4().as_hyphenated().to_string(); + + let hash = preds.hash(&id); + if preds.contains_key(hash, |k| k == &id) { + tries += 1; + + continue; // Retry + } + let pred = Predicate::new(&id, predicate, registered_at); + preds.insert_entry_and(id.clone(), hash, pred, |_, _| ()); + self.is_empty.store(false, Ordering::Release); + + return Ok(id); + } + + // Since we are using 128-bit UUID for the ID and we do retries for MAX_RETRY + // times, this panic should extremely unlikely occur (unless there is a bug in + // UUID generation). + panic!("Cannot assign a new PredicateId to a predicate"); + } + + // This method will be called by the get method of Cache. + #[inline] + pub(crate) fn apply_predicates(&self, key: &Arc, entry: &TrioArc>) -> bool + where + K: Hash + Eq + Send + Sync + 'static, + V: Clone + Send + Sync + 'static, + S: BuildHasher, + { + if self.is_empty() { + false + } else if let Some(ts) = entry.last_modified() { + Self::do_apply_predicates( + self.predicates.iter().map(|(_, v)| v), + key, + &entry.value, + ts, + ) + } else { + false + } + } + + pub(crate) async fn scan_and_invalidate( + &self, + cache: &C, + candidates: Vec>, + is_truncated: bool, + ) -> (Vec>, bool) + where + C: GetOrRemoveEntry, + K: Hash + Eq + Send + Sync + 'static, + V: Clone + Send + Sync + 'static, + S: BuildHasher, + { + let mut predicates = self.scan_context.predicates.lock().await; + if predicates.is_empty() { + *predicates = self.predicates.iter().map(|(_k, v)| v).collect(); + } + + let mut invalidated = Vec::default(); + let mut newest_timestamp = None; + + for candidate in &candidates { + let key = &candidate.key; + let hash = candidate.hash; + let ts = candidate.timestamp; + if self.apply(&predicates, cache, key, hash, ts) { + if let Some(entry) = Self::invalidate(cache, key, hash, ts).await { + invalidated.push(KvEntry { + key: Arc::clone(key), + entry, + }) + } + } + newest_timestamp = Some(ts); + } + + self.remove_finished_predicates(predicates, is_truncated, newest_timestamp) + .await; + + (invalidated, self.predicates.is_empty()) + } +} + +// +// Private methods. +// +impl Invalidator { + #[inline] + fn do_apply_predicates(predicates: I, key: &K, value: &V, ts: Instant) -> bool + where + I: Iterator>, + { + for predicate in predicates { + if predicate.is_applicable(ts) && predicate.apply(key, value) { + return true; + } + } + false + } + + async fn remove_finished_predicates( + &self, + mut predicates: MutexGuard<'_, Vec>>, + is_truncated: bool, + newest_timestamp: Option, + ) where + K: Hash + Eq, + S: BuildHasher, + { + let predicates = &mut *predicates; + if is_truncated { + if let Some(ts) = newest_timestamp { + let (active, finished): (Vec<_>, Vec<_>) = + predicates.drain(..).partition(|p| p.is_applicable(ts)); + + // Remove finished predicates from the predicate registry. + self.remove_predicates(&finished).await; + // Set the active predicates to the scan context. + *predicates = active; + } else { + unreachable!(); + } + } else { + // Remove all the predicates from the predicate registry and scan context. + self.remove_predicates(predicates).await; + predicates.clear(); + } + } + + async fn remove_predicates(&self, predicates: &[Predicate]) + where + K: Hash + Eq, + S: BuildHasher, + { + let pred_map = &self.predicates; + predicates.iter().for_each(|p| { + let hash = pred_map.hash(p.id()); + pred_map.remove(hash, |k| k == p.id()); + }); + + if pred_map.is_empty() { + self.is_empty.store(true, Ordering::Release); + } + } + + fn apply( + &self, + predicates: &[Predicate], + cache: &C, + key: &Arc, + hash: u64, + ts: Instant, + ) -> bool + where + C: GetOrRemoveEntry, + { + if let Some(entry) = cache.get_value_entry(key, hash) { + if let Some(lm) = entry.last_modified() { + if lm == ts { + return Self::do_apply_predicates( + predicates.iter().cloned(), + key, + &entry.value, + lm, + ); + } + } + } + + false + } + + async fn invalidate( + cache: &C, + key: &Arc, + hash: u64, + ts: Instant, + ) -> Option>> + where + C: GetOrRemoveEntry, + K: Send + Sync + 'static, + V: Clone + Send + Sync + 'static, + { + cache + .remove_key_value_if(key, hash, |_, v| { + if let Some(lm) = v.last_modified() { + lm == ts + } else { + false + } + }) + .await + } +} + +// +// for testing +// +#[cfg(test)] +impl Invalidator { + pub(crate) fn predicate_count(&self) -> usize { + self.predicates.len() + } +} + +struct ScanContext { + predicates: Mutex>>, +} + +impl Default for ScanContext { + fn default() -> Self { + Self { + predicates: Mutex::new(Vec::default()), + } + } +} + +struct Predicate { + id: PredicateId, + f: PredicateFun, + registered_at: Instant, +} + +impl Clone for Predicate { + fn clone(&self) -> Self { + Self { + id: self.id.clone(), + f: Arc::clone(&self.f), + registered_at: self.registered_at, + } + } +} + +impl Predicate { + fn new(id: PredicateIdStr<'_>, f: PredicateFun, registered_at: Instant) -> Self { + Self { + id: id.to_string(), + f, + registered_at, + } + } + + fn id(&self) -> PredicateIdStr<'_> { + &self.id + } + + fn is_applicable(&self, last_modified: Instant) -> bool { + last_modified <= self.registered_at + } + + fn apply(&self, key: &K, value: &V) -> bool { + (self.f)(key, value) + } +} diff --git a/src/future/key_lock.rs b/src/future/key_lock.rs new file mode 100644 index 00000000..fc06ff2f --- /dev/null +++ b/src/future/key_lock.rs @@ -0,0 +1,95 @@ +use std::{ + hash::{BuildHasher, Hash}, + sync::Arc, +}; + +use crate::cht::SegmentedHashMap; + +use async_lock::{Mutex, MutexGuard}; +use triomphe::Arc as TrioArc; + +const LOCK_MAP_NUM_SEGMENTS: usize = 64; + +type LockMap = SegmentedHashMap, TrioArc>, S>; + +// We need the `where` clause here because of the Drop impl. +pub(crate) struct KeyLock<'a, K, S> +where + K: Eq + Hash, + S: BuildHasher, +{ + map: &'a LockMap, + key: Arc, + hash: u64, + lock: TrioArc>, +} + +impl<'a, K, S> Drop for KeyLock<'a, K, S> +where + K: Eq + Hash, + S: BuildHasher, +{ + fn drop(&mut self) { + if TrioArc::count(&self.lock) <= 2 { + self.map.remove_if( + self.hash, + |k| k == &self.key, + |_k, v| TrioArc::count(v) <= 2, + ); + } + } +} + +impl<'a, K, S> KeyLock<'a, K, S> +where + K: Eq + Hash, + S: BuildHasher, +{ + fn new(map: &'a LockMap, key: &Arc, hash: u64, lock: TrioArc>) -> Self { + Self { + map, + key: Arc::clone(key), + hash, + lock, + } + } + + pub(crate) async fn lock(&self) -> MutexGuard<'_, ()> { + self.lock.lock().await + } +} + +pub(crate) struct KeyLockMap { + locks: LockMap, +} + +impl KeyLockMap +where + K: Eq + Hash, + S: BuildHasher, +{ + pub(crate) fn with_hasher(hasher: S) -> Self { + Self { + locks: SegmentedHashMap::with_num_segments_and_hasher(LOCK_MAP_NUM_SEGMENTS, hasher), + } + } + + pub(crate) fn key_lock(&self, key: &Arc) -> KeyLock<'_, K, S> { + let hash = self.locks.hash(key); + let kl = TrioArc::new(Mutex::new(())); + match self + .locks + .insert_if_not_present(Arc::clone(key), hash, kl.clone()) + { + None => KeyLock::new(&self.locks, key, hash, kl), + Some(existing_kl) => KeyLock::new(&self.locks, key, hash, existing_kl), + } + } +} + +#[cfg(test)] +impl KeyLockMap { + pub(crate) fn is_empty(&self) -> bool { + self.locks.len() == 0 + } +} diff --git a/src/future/notifier.rs b/src/future/notifier.rs new file mode 100644 index 00000000..356865ed --- /dev/null +++ b/src/future/notifier.rs @@ -0,0 +1,82 @@ +use std::sync::{ + atomic::{AtomicBool, Ordering}, + Arc, +}; + +use futures_util::FutureExt; + +use crate::notification::{AsyncEvictionListener, RemovalCause}; + +pub(crate) struct RemovalNotifier { + listener: AsyncEvictionListener, + is_enabled: AtomicBool, + #[cfg(feature = "logging")] + cache_name: Option, +} + +impl RemovalNotifier { + pub(crate) fn new(listener: AsyncEvictionListener, _cache_name: Option) -> Self { + Self { + listener, + is_enabled: AtomicBool::new(true), + #[cfg(feature = "logging")] + cache_name: _cache_name, + } + } + + pub(crate) async fn notify(&self, key: Arc, value: V, cause: RemovalCause) { + use std::panic::{catch_unwind, AssertUnwindSafe}; + + if !self.is_enabled.load(Ordering::Acquire) { + return; + } + + // This macro unwraps the result of the catch_unwind call if it is Ok. And + // disable the notifier and do early return if the listener panicked. + macro_rules! try_or_disable { + ($match_expr:expr) => { + match $match_expr { + Ok(v) => v, + Err(_payload) => { + self.is_enabled.store(false, Ordering::Release); + #[cfg(feature = "logging")] + log_panic(&*_payload, self.cache_name.as_deref()); + return; + } + } + }; + } + + let listener_clo = || (self.listener)(key, value, cause); + + // Safety: It is safe to assert unwind safety here because we will not + // call the listener again if it has been panicked. + let fut = try_or_disable!(catch_unwind(AssertUnwindSafe(listener_clo))); + try_or_disable!(AssertUnwindSafe(fut).catch_unwind().await); + } +} + +#[cfg(feature = "logging")] +fn log_panic(payload: &(dyn std::any::Any + Send + 'static), cache_name: Option<&str>) { + // Try to downcast the payload into &str or String. + // + // NOTE: Clippy will complain if we use `if let Some(_)` here. + // https://rust-lang.github.io/rust-clippy/master/index.html#manual_map + let message: Option> = + (payload.downcast_ref::<&str>().map(|s| (*s).into())) + .or_else(|| payload.downcast_ref::().map(Into::into)); + + let cn = cache_name + .map(|name| format!("[{}] ", name)) + .unwrap_or_default(); + + if let Some(m) = message { + log::error!( + "{}Disabled the eviction listener because it panicked at '{}'", + cn, + m + ); + } else { + log::error!("{}Disabled the eviction listener because it panicked", cn); + } +} diff --git a/src/future/value_initializer.rs b/src/future/value_initializer.rs index a479a9c1..a6ed101b 100644 --- a/src/future/value_initializer.rs +++ b/src/future/value_initializer.rs @@ -1,5 +1,6 @@ -use async_lock::{RwLock, RwLockWriteGuard}; -use futures_util::{future::BoxFuture, FutureExt}; +use async_lock::{Mutex, RwLock, RwLockWriteGuard}; +use async_trait::async_trait; +use futures_util::FutureExt; use std::{ any::{Any, TypeId}, future::Future, @@ -13,6 +14,21 @@ use super::OptionallyNone; const WAITER_MAP_NUM_SEGMENTS: usize = 64; +#[async_trait] +pub(crate) trait GetOrInsert { + async fn get_without_recording( + &self, + key: &Arc, + hash: u64, + replace_if: Option<&mut I>, + ) -> Option + where + V: 'static, + I: for<'i> FnMut(&'i V) -> bool + Send; + + async fn insert(&self, key: Arc, hash: u64, value: V); +} + type ErrorObject = Arc; pub(crate) enum InitResult { @@ -128,21 +144,23 @@ where /// # Panics /// Panics if the `init` future has been panicked. - pub(crate) async fn try_init_or_read<'a, O, E>( + #[allow(clippy::too_many_arguments)] + pub(crate) async fn try_init_or_read<'a, C, I, O, E>( &'a self, key: &Arc, + hash: u64, type_id: TypeId, - // Closure to get an existing value from cache. - mut get: impl FnMut() -> Option, + cache: &C, + ignore_if: Arc>>, // Future to initialize a new value. init: Pin<&mut impl Future>, - // Closure that returns a future to insert a new value into cache. - mut insert: impl FnMut(V) -> BoxFuture<'a, ()> + Send + 'a, // Function to convert a value O, returned from the init future, into // Result. post_init: fn(O) -> Result, ) -> InitResult where + C: GetOrInsert + Send + 'a, + I: FnMut(&V) -> bool + Send, E: Send + Sync + 'static, { use std::panic::{resume_unwind, AssertUnwindSafe}; @@ -151,7 +169,7 @@ where const MAX_RETRIES: usize = 200; let mut retries = 0; - let (cht_key, hash) = cht_key_hash(&self.waiters, key, type_id); + let cht_key = (Arc::clone(key), type_id); loop { let waiter = TrioArc::new(RwLock::new(WaiterValue::Computing)); @@ -172,7 +190,10 @@ where ); // Check if the value has already been inserted by other thread. - if let Some(value) = get() { + if let Some(value) = cache + .get_without_recording(key, hash, ignore_if.lock().await.as_mut()) + .await + { // Yes. Set the waiter value, remove our waiter, and return // the existing value. waiter_guard.set_waiter_value(WaiterValue::Ready(Ok(value.clone()))); @@ -188,7 +209,7 @@ where Ok(value) => { let (waiter_val, init_res) = match post_init(value) { Ok(value) => { - insert(value.clone()).await; + cache.insert(Arc::clone(key), hash, value.clone()).await; ( WaiterValue::Ready(Ok(value.clone())), InitResult::Initialized(value), @@ -313,21 +334,6 @@ where waiter_map.insert_if_not_present(cht_key, hash, waiter) } -#[inline] -fn cht_key_hash( - waiter_map: &WaiterMap, - key: &Arc, - type_id: TypeId, -) -> ((Arc, TypeId), u64) -where - (Arc, TypeId): Eq + Hash, - S: BuildHasher, -{ - let cht_key = (Arc::clone(key), type_id); - let hash = waiter_map.hash(&cht_key); - (cht_key, hash) -} - fn panic_if_retry_exhausted_for_panicking(retries: usize, max: usize) { if retries >= max { panic!( diff --git a/src/lib.rs b/src/lib.rs index 160ec20f..7bdfd1af 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -22,17 +22,17 @@ //! //! - Thread-safe, highly concurrent in-memory cache implementations: //! - Synchronous caches that can be shared across OS threads. -//! - An asynchronous (futures aware) cache that can be accessed inside and -//! outside of asynchronous contexts. +//! - An asynchronous (futures aware) cache. //! - A cache can be bounded by one of the followings: //! - The maximum number of entries. //! - The total weighted size of entries. (Size aware eviction) -//! - Maintains good hit rate by using entry replacement algorithms inspired by -//! [Caffeine][caffeine-git]: +//! - Maintains near optimal hit ratio by using an entry replacement algorithms +//! inspired by Caffeine: //! - Admission to a cache is controlled by the Least Frequently Used (LFU) //! policy. //! - Eviction from a cache is controlled by the Least Recently Used (LRU) //! policy. +//! - [More details and some benchmark results are available here][tiny-lfu]. //! - Supports expiration policies: //! - Time to live. //! - Time to idle. @@ -40,6 +40,8 @@ //! - Supports eviction listener, a callback function that will be called when an //! entry is removed from the cache. //! +//! [tiny-lfu]: https://github.com/moka-rs/moka/wiki#admission-and-eviction-policies +//! //! # Examples //! //! See the following document: @@ -59,7 +61,7 @@ //! //! - Non concurrent cache for single threaded applications: //! - `moka::unsync::Cache` → [`mini_moka::unsync::Cache`][unsync-cache-struct] -//! - Experimental, thread-safe, synchronous cache: +//! - A simple, thread-safe, synchronous cache: //! - `moka::dash::Cache` → [`mini_moka::sync::Cache`][dash-cache-struct] //! //! [mini-moka-crate]: https://crates.io/crates/mini-moka @@ -99,6 +101,9 @@ //! - The numbers of read/write recordings reach to the configured amounts. //! - Or, the certain time past from the last draining. //! +//! **TODO (v0.12.0 release)**: Update the following section as we do not use the +//! worker threads anymore. +//! //! In a `Cache`, this draining and batch application is handled by a single worker //! thread. So under heavy concurrent operations from clients, draining may not be //! able to catch up and the bounded channels can become full. @@ -155,8 +160,7 @@ //! //! - The time-to-live policy uses a write-order queue. //! - The time-to-idle policy uses an access-order queue. -//! - The variable expiration will use a [hierarchical timer wheel][timer-wheel] -//! (*1). +//! - The variable expiration uses a [hierarchical timer wheel][timer-wheel] (*1). //! //! *1: If you get 404 page not found when you click on the link to the hierarchical //! timer wheel paper, try to change the URL from `https:` to `http:`. diff --git a/src/notification.rs b/src/notification.rs index 672eb7f8..cad55a7e 100644 --- a/src/notification.rs +++ b/src/notification.rs @@ -1,15 +1,30 @@ //! Common data types for notifications. +#[cfg(feature = "sync")] pub(crate) mod notifier; -use std::sync::Arc; +use std::{future::Future, pin::Pin, sync::Arc}; +/// A future returned by an eviction listener. +/// +/// You can use the [`boxed` method][boxed-method] of `FutureExt` trait to convert a +/// regular `Future` object into `ListenerFuture`. +/// +/// [boxed-method]: ../future/trait.FutureExt.html#method.boxed +pub type ListenerFuture = Pin + Send>>; + +#[cfg(feature = "sync")] pub(crate) type EvictionListener = Arc, V, RemovalCause) + Send + Sync + 'static>; +#[cfg(feature = "sync")] pub(crate) type EvictionListenerRef<'a, K, V> = &'a Arc, V, RemovalCause) + Send + Sync + 'static>; +#[cfg(feature = "future")] +pub(crate) type AsyncEvictionListener = + Box, V, RemovalCause) -> ListenerFuture + Send + Sync + 'static>; + // NOTE: Currently, dropping the cache will drop all entries without sending // notifications. Calling `invalidate_all` method of the cache will trigger // the notifications, but currently there is no way to know when all entries @@ -20,11 +35,13 @@ pub(crate) type EvictionListenerRef<'a, K, V> = /// Currently only setting the [`DeliveryMode`][delivery-mode] is supported. /// /// [delivery-mode]: ./enum.DeliveryMode.html +#[cfg(feature = "sync")] #[derive(Clone, Debug, Default)] pub struct Configuration { mode: DeliveryMode, } +#[cfg(feature = "sync")] impl Configuration { pub fn builder() -> ConfigurationBuilder { ConfigurationBuilder::default() @@ -41,11 +58,13 @@ impl Configuration { /// /// [conf]: ./struct.Configuration.html /// [delivery-mode]: ./enum.DeliveryMode.html +#[cfg(feature = "sync")] #[derive(Default)] pub struct ConfigurationBuilder { mode: DeliveryMode, } +#[cfg(feature = "sync")] impl ConfigurationBuilder { pub fn build(self) -> Configuration { Configuration { mode: self.mode } @@ -62,6 +81,7 @@ impl ConfigurationBuilder { /// For more details, see [the document][delivery-mode-doc] of `sync::Cache`. /// /// [delivery-mode-doc]: ../sync/struct.Cache.html#delivery-modes-for-eviction-listener +#[cfg(feature = "sync")] #[derive(Copy, Clone, Debug, PartialEq, Eq)] pub enum DeliveryMode { /// With this mode, a notification should be delivered to the listener @@ -87,6 +107,7 @@ pub enum DeliveryMode { Queued, } +#[cfg(feature = "sync")] impl Default for DeliveryMode { fn default() -> Self { Self::Immediate diff --git a/src/sync_base.rs b/src/sync_base.rs index 8ca085f9..567ef320 100644 --- a/src/sync_base.rs +++ b/src/sync_base.rs @@ -1,6 +1,12 @@ +pub(crate) mod iter; + +#[cfg(feature = "sync")] pub(crate) mod base_cache; + +#[cfg(feature = "sync")] mod invalidator; -pub(crate) mod iter; + +#[cfg(feature = "sync")] mod key_lock; /// The type of the unique ID to identify a predicate used by @@ -9,6 +15,8 @@ mod key_lock; /// A `PredicateId` is a `String` of UUID (version 4). /// /// [invalidate-if]: ./struct.Cache.html#method.invalidate_entries_if +#[cfg(feature = "sync")] pub type PredicateId = String; +#[cfg(feature = "sync")] pub(crate) type PredicateIdStr<'a> = &'a str; diff --git a/src/sync_base/base_cache.rs b/src/sync_base/base_cache.rs index 7dd68220..c19b4267 100644 --- a/src/sync_base/base_cache.rs +++ b/src/sync_base/base_cache.rs @@ -34,9 +34,6 @@ use crate::{ Entry, Expiry, Policy, PredicateError, }; -#[cfg(feature = "unstable-debug-counters")] -use common::concurrent::debug_counters::CacheDebugStats; - use crossbeam_channel::{Receiver, Sender, TrySendError}; use crossbeam_utils::atomic::AtomicCell; use parking_lot::{Mutex, RwLock}; @@ -45,7 +42,6 @@ use std::{ borrow::Borrow, collections::hash_map::RandomState, hash::{BuildHasher, Hash, Hasher}, - ptr::NonNull, rc::Rc, sync::{ atomic::{AtomicBool, AtomicU8, Ordering}, @@ -130,11 +126,6 @@ impl BaseCache { { self.inner.notify_invalidate(key, entry); } - - #[cfg(feature = "unstable-debug-counters")] - pub fn debug_stats(&self) -> CacheDebugStats { - self.inner.debug_stats() - } } impl BaseCache @@ -904,17 +895,11 @@ impl EntrySizeAndFrequency { } } -// Access-Order Queue Node -type AoqNode = NonNull>>; - enum AdmissionResult { Admitted { - victim_nodes: SmallVec<[AoqNode; 8]>, - skipped_nodes: SmallVec<[AoqNode; 4]>, - }, - Rejected { - skipped_nodes: SmallVec<[AoqNode; 4]>, + victim_keys: SmallVec<[KeyHash; 8]>, }, + Rejected, } type CacheStore = crate::cht::SegmentedHashMap, TrioArc>, S>; @@ -1028,19 +1013,6 @@ impl Inner { self.key_locks.as_ref().map(|kls| kls.key_lock(key)) } - #[cfg(feature = "unstable-debug-counters")] - pub fn debug_stats(&self) -> CacheDebugStats { - let ec = self.entry_count.load(); - let ws = self.weighted_size.load(); - - CacheDebugStats::new( - ec, - ws, - (self.cache.capacity() * 2) as u64, - self.frequency_sketch.read().table_size(), - ) - } - #[inline] fn current_time_from_expiration_clock(&self) -> Instant { if self.clocks.has_expiration_clock.load(Ordering::Relaxed) { @@ -1614,27 +1586,23 @@ where } } - let skipped_nodes; let mut candidate = EntrySizeAndFrequency::new(new_weight); candidate.add_frequency(freq, kh.hash); // Try to admit the candidate. match Self::admit(&candidate, &self.cache, deqs, freq) { - AdmissionResult::Admitted { - victim_nodes, - skipped_nodes: mut skipped, - } => { - // Try to remove the victims from the cache (hash map). - for victim in victim_nodes { - let element = unsafe { &victim.as_ref().element }; + AdmissionResult::Admitted { victim_keys } => { + // Try to remove the victims from the hash map. + for victim in victim_keys { + let vic_key = victim.key; + let vic_hash = victim.hash; // Lock the key for removal if blocking removal notification is enabled. - let kl = self.maybe_key_lock(element.key()); + let kl = self.maybe_key_lock(&vic_key); let _klg = &kl.as_ref().map(|kl| kl.lock()); - if let Some((vic_key, vic_entry)) = self - .cache - .remove_entry(element.hash(), |k| k == element.key()) + if let Some((vic_key, vic_entry)) = + self.cache.remove_entry(vic_hash, |k| k == &vic_key) { if eviction_state.is_notifier_enabled() { eviction_state.add_removed_entry( @@ -1651,13 +1619,15 @@ where &mut eviction_state.counters, ); } else { - // Could not remove the victim from the cache. Skip this - // victim node as its ValueEntry might have been - // invalidated. Add it to the skipped nodes. - skipped.push(victim); + // Could not remove the victim from the cache. Skip it as its + // ValueEntry might have been invalidated. + if let Some(node) = deqs.probation.peek_front() { + if node.element.key() == &vic_key && node.element.hash() == vic_hash { + deqs.probation.move_front_to_back(); + } + } } } - skipped_nodes = skipped; // Add the candidate to the deques. self.handle_admit( @@ -1668,9 +1638,7 @@ where &mut eviction_state.counters, ); } - AdmissionResult::Rejected { skipped_nodes: s } => { - skipped_nodes = s; - + AdmissionResult::Rejected => { // Lock the key for removal if blocking removal notification is enabled. let kl = self.maybe_key_lock(&kh.key); let _klg = &kl.as_ref().map(|kl| kl.lock()); @@ -1683,12 +1651,6 @@ where } } }; - - // Move the skipped nodes to the back of the deque. We do not unlink (drop) - // them because ValueEntries in the write op queue should be pointing them. - for node in skipped_nodes { - unsafe { deqs.probation.move_to_back(node) }; - } } /// Performs size-aware admission explained in the paper: @@ -1712,18 +1674,19 @@ where fn admit( candidate: &EntrySizeAndFrequency, cache: &CacheStore, - deqs: &Deques, + deqs: &mut Deques, freq: &FrequencySketch, ) -> AdmissionResult { const MAX_CONSECUTIVE_RETRIES: usize = 5; let mut retries = 0; let mut victims = EntrySizeAndFrequency::default(); - let mut victim_nodes = SmallVec::default(); - let mut skipped_nodes = SmallVec::default(); + let mut victim_keys = SmallVec::default(); + + let deq = &mut deqs.probation; // Get first potential victim at the LRU position. - let mut next_victim = deqs.probation.peek_front_ptr(); + let mut next_victim = deq.peek_front_ptr(); // Aggregate potential victims. while victims.policy_weight < candidate.policy_weight { @@ -1732,27 +1695,30 @@ where } if let Some(victim) = next_victim.take() { next_victim = DeqNode::next_node_ptr(victim); + let vic_elem = &unsafe { victim.as_ref() }.element; + let key = vic_elem.key(); + let hash = vic_elem.hash(); - if let Some(vic_entry) = cache.get(vic_elem.hash(), |k| k == vic_elem.key()) { + if let Some(vic_entry) = cache.get(hash, |k| k == key) { victims.add_policy_weight(vic_entry.policy_weight()); - victims.add_frequency(freq, vic_elem.hash()); - victim_nodes.push(victim); + victims.add_frequency(freq, hash); + victim_keys.push(KeyHash::new(Arc::clone(key), hash)); retries = 0; } else { // Could not get the victim from the cache (hash map). Skip this node // as its ValueEntry might have been invalidated. - skipped_nodes.push(victim); - + unsafe { deq.move_to_back(victim) }; retries += 1; - if retries > MAX_CONSECUTIVE_RETRIES { - break; - } } } else { // No more potential victims. break; } + + if retries > MAX_CONSECUTIVE_RETRIES { + break; + } } // Admit or reject the candidate. @@ -1761,12 +1727,9 @@ where // See Caffeine's implementation. if victims.policy_weight >= candidate.policy_weight && candidate.freq > victims.freq { - AdmissionResult::Admitted { - victim_nodes, - skipped_nodes, - } + AdmissionResult::Admitted { victim_keys } } else { - AdmissionResult::Rejected { skipped_nodes } + AdmissionResult::Rejected } } @@ -2137,8 +2100,7 @@ where } Self::handle_remove(deqs, timer_wheel, entry, &mut eviction_state.counters); } else if let Some(entry) = self.cache.get(hash, |k| k == key) { - // TODO: CHECKME: Should we check `entry.is_dirty()` instead? - if entry.last_modified().is_none() { + if entry.is_dirty() { deqs.move_to_back_ao(&entry); deqs.move_to_back_wo(&entry); } else { diff --git a/src/sync_base/iter.rs b/src/sync_base/iter.rs index c52d72d4..7e518036 100644 --- a/src/sync_base/iter.rs +++ b/src/sync_base/iter.rs @@ -16,6 +16,9 @@ pub(crate) trait ScanningGet { fn keys(&self, cht_segment: usize) -> Option>>; } +/// Iterator visiting all key-value pairs in a cache in arbitrary order. +/// +/// Call [`Cache::iter`](./struct.Cache.html#method.iter) method to obtain an `Iter`. pub struct Iter<'i, K, V> { keys: Option>>, cache_segments: Box<[&'i dyn ScanningGet]>, diff --git a/tests/entry_api_actix_rt2.rs b/tests/entry_api_actix_rt2.rs index e72a4187..1254864e 100644 --- a/tests/entry_api_actix_rt2.rs +++ b/tests/entry_api_actix_rt2.rs @@ -73,7 +73,7 @@ fn test_get_with() -> Result<(), Box> { }; assert_eq!(value.len(), TEN_MIB); - assert!(my_cache.get(key.as_str()).is_some()); + assert!(my_cache.get(key.as_str()).await.is_some()); println!("Task {} got the value. (len: {})", task_id, value.len()); }) diff --git a/tests/entry_api_async_std.rs b/tests/entry_api_async_std.rs index 79919c37..4b3ab926 100644 --- a/tests/entry_api_async_std.rs +++ b/tests/entry_api_async_std.rs @@ -70,7 +70,7 @@ async fn test_get_with() { }; assert_eq!(value.len(), TEN_MIB); - assert!(my_cache.get(key.as_str()).is_some()); + assert!(my_cache.get(key.as_str()).await.is_some()); println!("Task {} got the value. (len: {})", task_id, value.len()); }) diff --git a/tests/entry_api_tokio.rs b/tests/entry_api_tokio.rs index e9a85fa6..aa6a0338 100644 --- a/tests/entry_api_tokio.rs +++ b/tests/entry_api_tokio.rs @@ -71,7 +71,7 @@ async fn test_get_with() { }; assert_eq!(value.len(), TEN_MIB); - assert!(my_cache.get(key.as_str()).is_some()); + assert!(my_cache.get(key.as_str()).await.is_some()); println!("Task {} got the value. (len: {})", task_id, value.len()); }) @@ -137,7 +137,7 @@ async fn test_optionally_get_with() { }; assert!(value.is_some()); - assert!(my_cache.get(key.as_str()).is_some()); + assert!(my_cache.get(key.as_str()).await.is_some()); println!( "Task {} got the value. (len: {})", @@ -208,7 +208,7 @@ async fn test_try_get_with() { }; assert!(value.is_ok()); - assert!(my_cache.get(key.as_str()).is_some()); + assert!(my_cache.get(key.as_str()).await.is_some()); println!( "Task {} got the value. (len: {})", diff --git a/tests/runtime_actix_rt2.rs b/tests/runtime_actix_rt2.rs index ac21030e..2a487694 100644 --- a/tests/runtime_actix_rt2.rs +++ b/tests/runtime_actix_rt2.rs @@ -1,11 +1,15 @@ #![cfg(all(test, feature = "future"))] -use actix_rt::Runtime; +use std::sync::Arc; + +use actix_rt::System; use moka::future::Cache; +use tokio::sync::Barrier; -#[test] -fn main() -> Result<(), Box> { - const NUM_TASKS: usize = 16; +#[actix_rt::test] +async fn main() -> Result<(), Box> { + const NUM_TASKS: usize = 12; + const NUM_THREADS: usize = 4; const NUM_KEYS_PER_TASK: usize = 64; fn value(n: usize) -> String { @@ -15,55 +19,83 @@ fn main() -> Result<(), Box> { // Create a cache that can store up to 10,000 entries. let cache = Cache::new(10_000); - // Create Actix Runtime - let rt = Runtime::new()?; + let barrier = Arc::new(Barrier::new(NUM_THREADS + NUM_TASKS)); // Spawn async tasks and write to and read from the cache. + // NOTE: Actix Runtime is single threaded. let tasks: Vec<_> = (0..NUM_TASKS) .map(|i| { - // To share the same cache across the async tasks, clone it. - // This is a cheap operation. + // To share the same cache across the async tasks and OS threads, clone + // it. This is a cheap operation. + let my_cache = cache.clone(); + let my_barrier = Arc::clone(&barrier); + let start = i * NUM_KEYS_PER_TASK; + let end = (i + 1) * NUM_KEYS_PER_TASK; + + actix_rt::spawn(async move { + // Wait for the all async tasks and threads to be spawned. + my_barrier.wait().await; + + // Insert 64 entries. (NUM_KEYS_PER_TASK = 64) + for key in start..end { + my_cache.insert(key, value(key)).await; + assert_eq!(my_cache.get(&key).await, Some(value(key))); + } + + // Invalidate every 4 element of the inserted entries. + for key in (start..end).step_by(4) { + my_cache.invalidate(&key).await; + } + }) + }) + .collect(); + + // Spawn OS threads and write to and read from the cache. + let threads: Vec<_> = (0..NUM_THREADS) + .map(|i| i + NUM_TASKS) + .map(|i| { let my_cache = cache.clone(); + let my_barrier = Arc::clone(&barrier); let start = i * NUM_KEYS_PER_TASK; let end = (i + 1) * NUM_KEYS_PER_TASK; - // NOTE: Actix Runtime is single threaded. - rt.spawn(async move { + std::thread::spawn(move || { + // It seems there is no way to get a SystemRunner from the current + // System (`System::current()`). So, create a new System. + let runner = System::new(); // Returns a SystemRunner. + + // Wait for the all async tasks and threads to be spawned. + runner.block_on(my_barrier.wait()); + // Insert 64 entries. (NUM_KEYS_PER_TASK = 64) for key in start..end { - if key % 8 == 0 { - my_cache.blocking().insert(key, value(key)); - } else { - // insert() is an async method, so await it - my_cache.insert(key, value(key)).await; - } - // get() returns Option, a clone of the stored value. - assert_eq!(my_cache.get(&key), Some(value(key))); + runner.block_on(my_cache.insert(key, value(key))); + assert_eq!(runner.block_on(my_cache.get(&key)), Some(value(key))); } // Invalidate every 4 element of the inserted entries. for key in (start..end).step_by(4) { - if key % 8 == 0 { - my_cache.blocking().invalidate(&key); - } else { - // invalidate() is an async method, so await it - my_cache.invalidate(&key).await; - } + runner.block_on(my_cache.invalidate(&key)); } }) }) .collect(); - rt.block_on(futures_util::future::join_all(tasks)); + futures_util::future::join_all(tasks).await; + for t in threads { + t.join().unwrap(); + } // Verify the result. for key in 0..(NUM_TASKS * NUM_KEYS_PER_TASK) { if key % 4 == 0 { - assert_eq!(cache.get(&key), None); + assert_eq!(cache.get(&key).await, None); } else { - assert_eq!(cache.get(&key), Some(value(key))); + assert_eq!(cache.get(&key).await, Some(value(key))); } } + System::current().stop(); + Ok(()) } diff --git a/tests/runtime_async_std.rs b/tests/runtime_async_std.rs index 4f0ecebd..8d762d32 100644 --- a/tests/runtime_async_std.rs +++ b/tests/runtime_async_std.rs @@ -1,10 +1,16 @@ #![cfg(all(test, feature = "future"))] +use std::sync::Arc; + +// Use async_lock's Barrier instead of async_std's Barrier as the latter requires +// `unstable` feature (v1.12.0). +use async_lock::Barrier; use moka::future::Cache; #[async_std::test] async fn main() { - const NUM_TASKS: usize = 16; + const NUM_TASKS: usize = 12; + const NUM_THREADS: usize = 4; const NUM_KEYS_PER_TASK: usize = 64; fn value(n: usize) -> String { @@ -14,36 +20,60 @@ async fn main() { // Create a cache that can store up to 10,000 entries. let cache = Cache::new(10_000); + let barrier = Arc::new(Barrier::new(NUM_THREADS + NUM_TASKS)); + // Spawn async tasks and write to and read from the cache. let tasks: Vec<_> = (0..NUM_TASKS) .map(|i| { - // To share the same cache across the async tasks, clone it. - // This is a cheap operation. + // To share the same cache across the async tasks and OS threads, clone + // it. This is a cheap operation. let my_cache = cache.clone(); + let my_barrier = Arc::clone(&barrier); let start = i * NUM_KEYS_PER_TASK; let end = (i + 1) * NUM_KEYS_PER_TASK; async_std::task::spawn(async move { + // Wait for the all async tasks and threads to be spawned. + my_barrier.wait().await; + + // Insert 64 entries. (NUM_KEYS_PER_TASK = 64) + for key in start..end { + my_cache.insert(key, value(key)).await; + assert_eq!(my_cache.get(&key).await, Some(value(key))); + } + + // Invalidate every 4 element of the inserted entries. + for key in (start..end).step_by(4) { + my_cache.invalidate(&key).await; + } + }) + }) + .collect(); + + // Spawn threads and write to and read from the cache. + let threads: Vec<_> = (0..NUM_THREADS) + .map(|i| i + NUM_TASKS) + .map(|i| { + let my_cache = cache.clone(); + let my_barrier = Arc::clone(&barrier); + let start = i * NUM_KEYS_PER_TASK; + let end = (i + 1) * NUM_KEYS_PER_TASK; + + std::thread::spawn(move || { + use async_std::task::block_on; + + // Wait for the all async tasks and threads to be spawned. + block_on(my_barrier.wait()); + // Insert 64 entries. (NUM_KEYS_PER_TASK = 64) for key in start..end { - if key % 8 == 0 { - my_cache.blocking().insert(key, value(key)); - } else { - // insert() is an async method, so await it - my_cache.insert(key, value(key)).await; - } - // get() returns Option, a clone of the stored value. - assert_eq!(my_cache.get(&key), Some(value(key))); + block_on(my_cache.insert(key, value(key))); + assert_eq!(block_on(my_cache.get(&key)), Some(value(key))); } // Invalidate every 4 element of the inserted entries. for key in (start..end).step_by(4) { - if key % 8 == 0 { - my_cache.blocking().invalidate(&key); - } else { - // invalidate() is an async method, so await it - my_cache.invalidate(&key).await; - } + block_on(my_cache.invalidate(&key)); } }) }) @@ -51,13 +81,16 @@ async fn main() { // Wait for all tasks to complete. futures_util::future::join_all(tasks).await; + for t in threads { + t.join().unwrap(); + } // Verify the result. for key in 0..(NUM_TASKS * NUM_KEYS_PER_TASK) { if key % 4 == 0 { - assert_eq!(cache.get(&key), None); + assert_eq!(cache.get(&key).await, None); } else { - assert_eq!(cache.get(&key), Some(value(key))); + assert_eq!(cache.get(&key).await, Some(value(key))); } } } diff --git a/tests/runtime_tokio.rs b/tests/runtime_tokio.rs index e03ef69b..9b4d4c74 100644 --- a/tests/runtime_tokio.rs +++ b/tests/runtime_tokio.rs @@ -1,10 +1,14 @@ #![cfg(all(test, feature = "future"))] +use std::sync::Arc; + use moka::future::Cache; +use tokio::sync::Barrier; #[tokio::test] async fn main() { - const NUM_TASKS: usize = 16; + const NUM_TASKS: usize = 12; + const NUM_THREADS: usize = 4; const NUM_KEYS_PER_TASK: usize = 64; fn value(n: usize) -> String { @@ -14,50 +18,76 @@ async fn main() { // Create a cache that can store up to 10,000 entries. let cache = Cache::new(10_000); + let barrier = Arc::new(Barrier::new(NUM_THREADS + NUM_TASKS)); + // Spawn async tasks and write to and read from the cache. let tasks: Vec<_> = (0..NUM_TASKS) .map(|i| { - // To share the same cache across the async tasks, clone it. - // This is a cheap operation. + // To share the same cache across the async tasks and OS threads, clone + // it. This is a cheap operation. let my_cache = cache.clone(); + let my_barrier = Arc::clone(&barrier); let start = i * NUM_KEYS_PER_TASK; let end = (i + 1) * NUM_KEYS_PER_TASK; tokio::spawn(async move { + // Wait for the all async tasks and threads to be spawned. + my_barrier.wait().await; + // Insert 64 entries. (NUM_KEYS_PER_TASK = 64) for key in start..end { - if key % 8 == 0 { - my_cache.blocking().insert(key, value(key)); - } else { - // insert() is an async method, so await it - my_cache.insert(key, value(key)).await; - } - // get() returns Option, a clone of the stored value. - assert_eq!(my_cache.get(&key), Some(value(key))); + my_cache.insert(key, value(key)).await; + assert_eq!(my_cache.get(&key).await, Some(value(key))); } // Invalidate every 4 element of the inserted entries. for key in (start..end).step_by(4) { - if key % 8 == 0 { - my_cache.blocking().invalidate(&key); - } else { - // invalidate() is an async method, so await it - my_cache.invalidate(&key).await; - } + my_cache.invalidate(&key).await; } }) }) .collect(); - // Wait for all tasks to complete. + // Spawn OS threads and write to and read from the cache. + let threads: Vec<_> = (0..NUM_THREADS) + .map(|i| i + NUM_TASKS) + .map(|i| { + let my_cache = cache.clone(); + let my_barrier = Arc::clone(&barrier); + let start = i * NUM_KEYS_PER_TASK; + let end = (i + 1) * NUM_KEYS_PER_TASK; + let rt = tokio::runtime::Handle::current(); + + std::thread::spawn(move || { + // Wait for the all async tasks and threads to be spawned. + rt.block_on(my_barrier.wait()); + + // Insert 64 entries. (NUM_KEYS_PER_TASK = 64) + for key in start..end { + rt.block_on(my_cache.insert(key, value(key))); + assert_eq!(rt.block_on(my_cache.get(&key)), Some(value(key))); + } + + // Invalidate every 4 element of the inserted entries. + for key in (start..end).step_by(4) { + rt.block_on(my_cache.invalidate(&key)); + } + }) + }) + .collect(); + + // Wait for all tasks and threads to complete. futures_util::future::join_all(tasks).await; + for t in threads { + t.join().unwrap(); + } // Verify the result. for key in 0..(NUM_TASKS * NUM_KEYS_PER_TASK) { if key % 4 == 0 { - assert_eq!(cache.get(&key), None); + assert_eq!(cache.get(&key).await, None); } else { - assert_eq!(cache.get(&key), Some(value(key))); + assert_eq!(cache.get(&key).await, Some(value(key))); } } }