moka/future/
entry_selector.rs

1use crate::{ops::compute, Entry};
2
3use super::Cache;
4
5use std::{
6    borrow::Borrow,
7    future::Future,
8    hash::{BuildHasher, Hash},
9    sync::Arc,
10};
11
12/// Provides advanced methods to select or insert an entry of the cache.
13///
14/// Many methods here return an [`Entry`], a snapshot of a single key-value pair in
15/// the cache, carrying additional information like `is_fresh`.
16///
17/// `OwnedKeyEntrySelector` is constructed from the [`entry`][entry-method] method on
18/// the cache.
19///
20/// [`Entry`]: ../struct.Entry.html
21/// [entry-method]: ./struct.Cache.html#method.entry
22pub struct OwnedKeyEntrySelector<'a, K, V, S> {
23    owned_key: K,
24    hash: u64,
25    cache: &'a Cache<K, V, S>,
26}
27
28impl<'a, K, V, S> OwnedKeyEntrySelector<'a, K, V, S>
29where
30    K: Hash + Eq + Send + Sync + 'static,
31    V: Clone + Send + Sync + 'static,
32    S: BuildHasher + Clone + Send + Sync + 'static,
33{
34    pub(crate) fn new(owned_key: K, hash: u64, cache: &'a Cache<K, V, S>) -> Self {
35        Self {
36            owned_key,
37            hash,
38            cache,
39        }
40    }
41
42    /// Performs a compute operation on a cached entry by using the given closure
43    /// `f`. A compute operation is either put, remove or no-operation (nop).
44    ///
45    /// The closure `f` should take the current entry of `Option<Entry<K, V>>` for
46    /// the key, and return a `Future` that resolves to an `ops::compute::Op<V>`
47    /// enum.
48    ///
49    /// This method works as the followings:
50    ///
51    /// 1. Apply the closure `f` to the current cached `Entry`, and get a `Future`.
52    /// 2. Resolve the `Future`, and get an `ops::compute::Op<V>`.
53    /// 3. Execute the op on the cache:
54    ///    - `Op::Put(V)`: Put the new value `V` to the cache.
55    ///    - `Op::Remove`: Remove the current cached entry.
56    ///    - `Op::Nop`: Do nothing.
57    /// 4. Return an `ops::compute::CompResult<K, V>` as the followings:
58    ///
59    /// | [`Op<V>`] | [`Entry<K, V>`] already exists? | [`CompResult<K, V>`] | Notes |
60    /// |:--------- |:--- |:--------------------------- |:------------------------------- |
61    /// | `Put(V)`  | no  | `Inserted(Entry<K, V>)`     | The new entry is returned.      |
62    /// | `Put(V)`  | yes | `ReplacedWith(Entry<K, V>)` | The new entry is returned.      |
63    /// | `Remove`  | no  | `StillNone(Arc<K>)`         |                                 |
64    /// | `Remove`  | yes | `Removed(Entry<K, V>)`      | The removed entry is returned.  |
65    /// | `Nop`     | no  | `StillNone(Arc<K>)`         |                                 |
66    /// | `Nop`     | yes | `Unchanged(Entry<K, V>)`    | The existing entry is returned. |
67    ///
68    /// # See Also
69    ///
70    /// - If you want the `Future` resolve to `Result<Op<V>>` instead of `Op<V>`, and
71    ///   modify entry only when resolved to `Ok(V)`, use the
72    ///   [`and_try_compute_with`] method.
73    /// - If you only want to update or insert, use the [`and_upsert_with`] method.
74    ///
75    /// [`Entry<K, V>`]: ../struct.Entry.html
76    /// [`Op<V>`]: ../ops/compute/enum.Op.html
77    /// [`CompResult<K, V>`]: ../ops/compute/enum.CompResult.html
78    /// [`and_upsert_with`]: #method.and_upsert_with
79    /// [`and_try_compute_with`]: #method.and_try_compute_with
80    ///
81    /// # Example
82    ///
83    /// ```rust
84    /// // Cargo.toml
85    /// //
86    /// // [dependencies]
87    /// // moka = { version = "0.12.8", features = ["future"] }
88    /// // tokio = { version = "1", features = ["rt-multi-thread", "macros" ] }
89    ///
90    /// use moka::{
91    ///     future::Cache,
92    ///     ops::compute::{CompResult, Op},
93    /// };
94    ///
95    /// #[tokio::main]
96    /// async fn main() {
97    ///     let cache: Cache<String, u64> = Cache::new(100);
98    ///     let key = "key1".to_string();
99    ///
100    ///     /// Increment a cached `u64` counter. If the counter is greater than or
101    ///     /// equal to 2, remove it.
102    ///     async fn inclement_or_remove_counter(
103    ///         cache: &Cache<String, u64>,
104    ///         key: &str,
105    ///     ) -> CompResult<String, u64> {
106    ///         cache
107    ///             .entry(key.to_string())
108    ///             .and_compute_with(|maybe_entry| {
109    ///                 let op = if let Some(entry) = maybe_entry {
110    ///                     let counter = entry.into_value();
111    ///                     if counter < 2 {
112    ///                         Op::Put(counter.saturating_add(1)) // Update
113    ///                     } else {
114    ///                         Op::Remove
115    ///                     }
116    ///                 } else {
117    ///                       Op::Put(1) // Insert
118    ///                 };
119    ///                 // Return a Future that is resolved to `op` immediately.
120    ///                 std::future::ready(op)
121    ///             })
122    ///             .await
123    ///     }
124    ///
125    ///     // This should insert a new counter value 1 to the cache, and return the
126    ///     // value with the kind of the operation performed.
127    ///     let result = inclement_or_remove_counter(&cache, &key).await;
128    ///     let CompResult::Inserted(entry) = result else {
129    ///         panic!("`Inserted` should be returned: {result:?}");
130    ///     };
131    ///     assert_eq!(entry.into_value(), 1);
132    ///
133    ///     // This should increment the cached counter value by 1.
134    ///     let result = inclement_or_remove_counter(&cache, &key).await;
135    ///     let CompResult::ReplacedWith(entry) = result else {
136    ///         panic!("`ReplacedWith` should be returned: {result:?}");
137    ///     };
138    ///     assert_eq!(entry.into_value(), 2);
139    ///
140    ///     // This should remove the cached counter from the cache, and returns the
141    ///     // _removed_ value.
142    ///     let result = inclement_or_remove_counter(&cache, &key).await;
143    ///     let CompResult::Removed(entry) = result else {
144    ///         panic!("`Removed` should be returned: {result:?}");
145    ///     };
146    ///     assert_eq!(entry.into_value(), 2);
147    ///
148    ///     // The key should not exist.
149    ///     assert!(!cache.contains_key(&key));
150    ///
151    ///     // This should start over; insert a new counter value 1 to the cache.
152    ///     let result = inclement_or_remove_counter(&cache, &key).await;
153    ///     let CompResult::Inserted(entry) = result else {
154    ///         panic!("`Inserted` should be returned: {result:?}");
155    ///     };
156    ///     assert_eq!(entry.into_value(), 1);
157    /// }
158    /// ```
159    ///
160    /// # Concurrent calls on the same key
161    ///
162    /// This method guarantees that concurrent calls on the same key are executed
163    /// serially. That is, `and_compute_with` calls on the same key never run
164    /// concurrently. The calls are serialized by the order of their invocation. It
165    /// uses a key-level lock to achieve this.
166    pub async fn and_compute_with<F, Fut>(self, f: F) -> compute::CompResult<K, V>
167    where
168        F: FnOnce(Option<Entry<K, V>>) -> Fut,
169        Fut: Future<Output = compute::Op<V>>,
170    {
171        let key = Arc::new(self.owned_key);
172        self.cache
173            .compute_with_hash_and_fun(key, self.hash, f)
174            .await
175    }
176
177    /// Performs a compute operation on a cached entry by using the given closure
178    /// `f`. A compute operation is either put, remove or no-operation (nop).
179    ///
180    /// The closure `f` should take the current entry of `Option<Entry<K, V>>` for
181    /// the key, and return a `Future` that resolves to a
182    /// `Result<ops::compute::Op<V>, E>`.
183    ///
184    /// This method works as the followings:
185    ///
186    /// 1. Apply the closure `f` to the current cached `Entry`, and get a `Future`.
187    /// 2. Resolve the `Future`, and get a `Result<ops::compute::Op<V>, E>`.
188    /// 3. If resolved to `Err(E)`, return it.
189    /// 4. Else, execute the op on the cache:
190    ///    - `Ok(Op::Put(V))`: Put the new value `V` to the cache.
191    ///    - `Ok(Op::Remove)`: Remove the current cached entry.
192    ///    - `Ok(Op::Nop)`: Do nothing.
193    /// 5. Return an `Ok(ops::compute::CompResult<K, V>)` as the followings:
194    ///
195    /// | [`Op<V>`] | [`Entry<K, V>`] already exists? | [`CompResult<K, V>`] | Notes |
196    /// |:--------- |:--- |:--------------------------- |:------------------------------- |
197    /// | `Put(V)`  | no  | `Inserted(Entry<K, V>)`     | The new entry is returned.      |
198    /// | `Put(V)`  | yes | `ReplacedWith(Entry<K, V>)` | The new entry is returned.      |
199    /// | `Remove`  | no  | `StillNone(Arc<K>)`         |                                 |
200    /// | `Remove`  | yes | `Removed(Entry<K, V>)`      | The removed entry is returned.  |
201    /// | `Nop`     | no  | `StillNone(Arc<K>)`         |                                 |
202    /// | `Nop`     | yes | `Unchanged(Entry<K, V>)`    | The existing entry is returned. |
203    ///
204    /// # See Also
205    ///
206    /// - If you want the `Future` resolve to `Op<V>` instead of `Result<Op<V>>`, use
207    ///   the [`and_compute_with`] method.
208    /// - If you only want to update or insert, use the [`and_upsert_with`] method.
209    ///
210    /// [`Entry<K, V>`]: ../struct.Entry.html
211    /// [`Op<V>`]: ../ops/compute/enum.Op.html
212    /// [`CompResult<K, V>`]: ../ops/compute/enum.CompResult.html
213    /// [`and_upsert_with`]: #method.and_upsert_with
214    /// [`and_compute_with`]: #method.and_compute_with
215    ///
216    /// # Example
217    ///
218    /// See [`try_append_value_async.rs`] in the `examples` directory.
219    ///
220    /// [`try_append_value_async.rs`]:
221    ///     https://github.com/moka-rs/moka/tree/main/examples/try_append_value_async.rs
222    ///
223    /// # Concurrent calls on the same key
224    ///
225    /// This method guarantees that concurrent calls on the same key are executed
226    /// serially. That is, `and_try_compute_with` calls on the same key never run
227    /// concurrently. The calls are serialized by the order of their invocation. It
228    /// uses a key-level lock to achieve this.
229    pub async fn and_try_compute_with<F, Fut, E>(self, f: F) -> Result<compute::CompResult<K, V>, E>
230    where
231        F: FnOnce(Option<Entry<K, V>>) -> Fut,
232        Fut: Future<Output = Result<compute::Op<V>, E>>,
233        E: Send + Sync + 'static,
234    {
235        let key = Arc::new(self.owned_key);
236        self.cache
237            .try_compute_with_hash_and_fun(key, self.hash, f)
238            .await
239    }
240
241    pub async fn and_try_compute_if_nobody_else<F, Fut, E>(
242        self,
243        f: F,
244    ) -> Result<compute::CompResult<K, V>, E>
245    where
246        F: FnOnce(Option<Entry<K, V>>) -> Fut,
247        Fut: Future<Output = Result<compute::Op<V>, E>>,
248        E: Send + Sync + 'static,
249    {
250        let key = Arc::new(self.owned_key);
251        self.cache
252            .try_compute_if_nobody_else_with_hash_and_fun(key, self.hash, f)
253            .await
254    }
255
256    /// Performs an upsert of an [`Entry`] by using the given closure `f`. The word
257    /// "upsert" here means "update" or "insert".
258    ///
259    /// The closure `f` should take the current entry of `Option<Entry<K, V>>` for
260    /// the key, and return a `Future` that resolves to a new value `V`.
261    ///
262    /// This method works as the followings:
263    ///
264    /// 1. Apply the closure `f` to the current cached `Entry`, and get a `Future`.
265    /// 2. Resolve the `Future`, and get a new value `V`.
266    /// 3. Upsert the new value to the cache.
267    /// 4. Return the `Entry` having the upserted value.
268    ///
269    /// # See Also
270    ///
271    /// - If you want to optionally upsert, that is to upsert only when certain
272    ///   conditions meet, use the [`and_compute_with`] method.
273    /// - If you try to upsert, that is to make the `Future` resolve to `Result<V>`
274    ///   instead of `V`, and upsert only when resolved to `Ok(V)`, use the
275    ///   [`and_try_compute_with`] method.
276    ///
277    /// [`Entry`]: ../struct.Entry.html
278    /// [`and_compute_with`]: #method.and_compute_with
279    /// [`and_try_compute_with`]: #method.and_try_compute_with
280    ///
281    /// # Example
282    ///
283    /// ```rust
284    /// // Cargo.toml
285    /// //
286    /// // [dependencies]
287    /// // moka = { version = "0.12.8", features = ["future"] }
288    /// // tokio = { version = "1", features = ["rt-multi-thread", "macros" ] }
289    ///
290    /// use moka::future::Cache;
291    ///
292    /// #[tokio::main]
293    /// async fn main() {
294    ///     let cache: Cache<String, u64> = Cache::new(100);
295    ///     let key = "key1".to_string();
296    ///
297    ///     let entry = cache
298    ///         .entry(key.clone())
299    ///         .and_upsert_with(|maybe_entry| {
300    ///             let counter = if let Some(entry) = maybe_entry {
301    ///                 entry.into_value().saturating_add(1) // Update
302    ///             } else {
303    ///                 1 // Insert
304    ///             };
305    ///             // Return a Future that is resolved to `counter` immediately.
306    ///             std::future::ready(counter)
307    ///         })
308    ///         .await;
309    ///     // It was not an update.
310    ///     assert!(!entry.is_old_value_replaced());
311    ///     assert_eq!(entry.key(), &key);
312    ///     assert_eq!(entry.into_value(), 1);
313    ///
314    ///     let entry = cache
315    ///         .entry(key.clone())
316    ///         .and_upsert_with(|maybe_entry| {
317    ///             let counter = if let Some(entry) = maybe_entry {
318    ///                 entry.into_value().saturating_add(1)
319    ///             } else {
320    ///                 1
321    ///             };
322    ///             std::future::ready(counter)
323    ///         })
324    ///         .await;
325    ///     // It was an update.
326    ///     assert!(entry.is_old_value_replaced());
327    ///     assert_eq!(entry.key(), &key);
328    ///     assert_eq!(entry.into_value(), 2);
329    /// }
330    /// ```
331    ///
332    /// # Concurrent calls on the same key
333    ///
334    /// This method guarantees that concurrent calls on the same key are executed
335    /// serially. That is, `and_upsert_with` calls on the same key never run
336    /// concurrently. The calls are serialized by the order of their invocation. It
337    /// uses a key-level lock to achieve this.
338    pub async fn and_upsert_with<F, Fut>(self, f: F) -> Entry<K, V>
339    where
340        F: FnOnce(Option<Entry<K, V>>) -> Fut,
341        Fut: Future<Output = V>,
342    {
343        let key = Arc::new(self.owned_key);
344        self.cache.upsert_with_hash_and_fun(key, self.hash, f).await
345    }
346
347    /// Returns the corresponding [`Entry`] for the key given when this entry
348    /// selector was constructed. If the entry does not exist, inserts one by calling
349    /// the [`default`][std-default-function] function of the value type `V`.
350    ///
351    /// [`Entry`]: ../struct.Entry.html
352    /// [std-default-function]: https://doc.rust-lang.org/stable/std/default/trait.Default.html#tymethod.default
353    ///
354    /// # Example
355    ///
356    /// ```rust
357    /// // Cargo.toml
358    /// //
359    /// // [dependencies]
360    /// // moka = { version = "0.12", features = ["future"] }
361    /// // tokio = { version = "1", features = ["rt-multi-thread", "macros" ] }
362    ///
363    /// use moka::future::Cache;
364    ///
365    /// #[tokio::main]
366    /// async fn main() {
367    ///     let cache: Cache<String, Option<u32>> = Cache::new(100);
368    ///     let key = "key1".to_string();
369    ///
370    ///     let entry = cache.entry(key.clone()).or_default().await;
371    ///     assert!(entry.is_fresh());
372    ///     assert_eq!(entry.key(), &key);
373    ///     assert_eq!(entry.into_value(), None);
374    ///
375    ///     let entry = cache.entry(key).or_default().await;
376    ///     // Not fresh because the value was already in the cache.
377    ///     assert!(!entry.is_fresh());
378    /// }
379    /// ```
380    pub async fn or_default(self) -> Entry<K, V>
381    where
382        V: Default,
383    {
384        let key = Arc::new(self.owned_key);
385        self.cache
386            .get_or_insert_with_hash(key, self.hash, Default::default)
387            .await
388    }
389
390    /// Returns the corresponding [`Entry`] for the key given when this entry
391    /// selector was constructed. If the entry does not exist, inserts one by using
392    /// the the given `default` value for `V`.
393    ///
394    /// [`Entry`]: ../struct.Entry.html
395    ///
396    /// # Example
397    ///
398    /// ```rust
399    /// // Cargo.toml
400    /// //
401    /// // [dependencies]
402    /// // moka = { version = "0.12", features = ["future"] }
403    /// // tokio = { version = "1", features = ["rt-multi-thread", "macros" ] }
404    ///
405    /// use moka::future::Cache;
406    ///
407    /// #[tokio::main]
408    /// async fn main() {
409    ///     let cache: Cache<String, u32> = Cache::new(100);
410    ///     let key = "key1".to_string();
411    ///
412    ///     let entry = cache.entry(key.clone()).or_insert(3).await;
413    ///     assert!(entry.is_fresh());
414    ///     assert_eq!(entry.key(), &key);
415    ///     assert_eq!(entry.into_value(), 3);
416    ///
417    ///     let entry = cache.entry(key).or_insert(6).await;
418    ///     // Not fresh because the value was already in the cache.
419    ///     assert!(!entry.is_fresh());
420    ///     assert_eq!(entry.into_value(), 3);
421    /// }
422    /// ```
423    pub async fn or_insert(self, default: V) -> Entry<K, V> {
424        let key = Arc::new(self.owned_key);
425        let init = || default;
426        self.cache
427            .get_or_insert_with_hash(key, self.hash, init)
428            .await
429    }
430
431    /// Returns the corresponding [`Entry`] for the key given when this entry
432    /// selector was constructed. If the entry does not exist, resolves the `init`
433    /// future and inserts the output.
434    ///
435    /// [`Entry`]: ../struct.Entry.html
436    ///
437    /// # Example
438    ///
439    /// ```rust
440    /// // Cargo.toml
441    /// //
442    /// // [dependencies]
443    /// // moka = { version = "0.12", features = ["future"] }
444    /// // tokio = { version = "1", features = ["rt-multi-thread", "macros" ] }
445    ///
446    /// use moka::future::Cache;
447    ///
448    /// #[tokio::main]
449    /// async fn main() {
450    ///     let cache: Cache<String, String> = Cache::new(100);
451    ///     let key = "key1".to_string();
452    ///
453    ///     let entry = cache
454    ///         .entry(key.clone())
455    ///         .or_insert_with(async { "value1".to_string() })
456    ///         .await;
457    ///     assert!(entry.is_fresh());
458    ///     assert_eq!(entry.key(), &key);
459    ///     assert_eq!(entry.into_value(), "value1");
460    ///
461    ///     let entry = cache
462    ///         .entry(key)
463    ///         .or_insert_with(async { "value2".to_string() })
464    ///         .await;
465    ///     // Not fresh because the value was already in the cache.
466    ///     assert!(!entry.is_fresh());
467    ///     assert_eq!(entry.into_value(), "value1");
468    /// }
469    /// ```
470    ///
471    /// # Concurrent calls on the same key
472    ///
473    /// This method guarantees that concurrent calls on the same not-existing entry
474    /// are coalesced into one evaluation of the `init` future. Only one of the calls
475    /// evaluates its future (thus returned entry's `is_fresh` method returns
476    /// `true`), and other calls wait for that future to resolve (and their
477    /// `is_fresh` return `false`).
478    ///
479    /// For more detail about the coalescing behavior, see
480    /// [`Cache::get_with`][get-with-method].
481    ///
482    /// [get-with-method]: ./struct.Cache.html#method.get_with
483    pub async fn or_insert_with(self, init: impl Future<Output = V>) -> Entry<K, V> {
484        futures_util::pin_mut!(init);
485        let key = Arc::new(self.owned_key);
486        let replace_if = None as Option<fn(&V) -> bool>;
487        self.cache
488            .get_or_insert_with_hash_and_fun(key, self.hash, init, replace_if, true)
489            .await
490    }
491
492    /// Works like [`or_insert_with`](#method.or_insert_with), but takes an additional
493    /// `replace_if` closure.
494    ///
495    /// This method will resolve the `init` future and insert the output to the
496    /// cache when:
497    ///
498    /// - The key does not exist.
499    /// - Or, `replace_if` closure returns `true`.
500    pub async fn or_insert_with_if(
501        self,
502        init: impl Future<Output = V>,
503        replace_if: impl FnMut(&V) -> bool + Send,
504    ) -> Entry<K, V> {
505        futures_util::pin_mut!(init);
506        let key = Arc::new(self.owned_key);
507        self.cache
508            .get_or_insert_with_hash_and_fun(key, self.hash, init, Some(replace_if), true)
509            .await
510    }
511
512    /// Returns the corresponding [`Entry`] for the key given when this entry
513    /// selector was constructed. If the entry does not exist, resolves the `init`
514    /// future, and inserts an entry if `Some(value)` was returned. If `None` was
515    /// returned from the future, this method does not insert an entry and returns
516    /// `None`.
517    ///
518    /// [`Entry`]: ../struct.Entry.html
519    ///
520    /// # Example
521    ///
522    /// ```rust
523    /// // Cargo.toml
524    /// //
525    /// // [dependencies]
526    /// // moka = { version = "0.12", features = ["future"] }
527    /// // tokio = { version = "1", features = ["rt-multi-thread", "macros" ] }
528    ///
529    /// use moka::future::Cache;
530    ///
531    /// #[tokio::main]
532    /// async fn main() {
533    ///     let cache: Cache<String, u32> = Cache::new(100);
534    ///     let key = "key1".to_string();
535    ///
536    ///     let none_entry = cache
537    ///         .entry(key.clone())
538    ///         .or_optionally_insert_with(async { None })
539    ///         .await;
540    ///     assert!(none_entry.is_none());
541    ///
542    ///     let some_entry = cache
543    ///         .entry(key.clone())
544    ///         .or_optionally_insert_with(async { Some(3) })
545    ///         .await;
546    ///     assert!(some_entry.is_some());
547    ///     let entry = some_entry.unwrap();
548    ///     assert!(entry.is_fresh());
549    ///     assert_eq!(entry.key(), &key);
550    ///     assert_eq!(entry.into_value(), 3);
551    ///
552    ///     let some_entry = cache
553    ///         .entry(key)
554    ///         .or_optionally_insert_with(async { Some(6) })
555    ///         .await;
556    ///     let entry = some_entry.unwrap();
557    ///     // Not fresh because the value was already in the cache.
558    ///     assert!(!entry.is_fresh());
559    ///     assert_eq!(entry.into_value(), 3);
560    /// }
561    /// ```
562    ///
563    /// # Concurrent calls on the same key
564    ///
565    /// This method guarantees that concurrent calls on the same not-existing entry
566    /// are coalesced into one evaluation of the `init` future. Only one of the calls
567    /// evaluates its future (thus returned entry's `is_fresh` method returns
568    /// `true`), and other calls wait for that future to resolve (and their
569    /// `is_fresh` return `false`).
570    ///
571    /// For more detail about the coalescing behavior, see
572    /// [`Cache::optionally_get_with`][opt-get-with-method].
573    ///
574    /// [opt-get-with-method]: ./struct.Cache.html#method.optionally_get_with
575    pub async fn or_optionally_insert_with(
576        self,
577        init: impl Future<Output = Option<V>>,
578    ) -> Option<Entry<K, V>> {
579        futures_util::pin_mut!(init);
580        let key = Arc::new(self.owned_key);
581        self.cache
582            .get_or_optionally_insert_with_hash_and_fun(key, self.hash, init, true)
583            .await
584    }
585
586    /// Returns the corresponding [`Entry`] for the key given when this entry
587    /// selector was constructed. If the entry does not exist, resolves the `init`
588    /// future, and inserts an entry if `Ok(value)` was returned. If `Err(_)` was
589    /// returned from the future, this method does not insert an entry and returns
590    /// the `Err` wrapped by [`std::sync::Arc`][std-arc].
591    ///
592    /// [`Entry`]: ../struct.Entry.html
593    /// [std-arc]: https://doc.rust-lang.org/stable/std/sync/struct.Arc.html
594    ///
595    /// # Example
596    ///
597    /// ```rust
598    /// // Cargo.toml
599    /// //
600    /// // [dependencies]
601    /// // moka = { version = "0.12", features = ["future"] }
602    /// // tokio = { version = "1", features = ["rt-multi-thread", "macros" ] }
603    ///
604    /// use moka::future::Cache;
605    ///
606    /// #[tokio::main]
607    /// async fn main() {
608    ///     let cache: Cache<String, u32> = Cache::new(100);
609    ///     let key = "key1".to_string();
610    ///
611    ///     let error_entry = cache
612    ///         .entry(key.clone())
613    ///         .or_try_insert_with(async { Err("error") })
614    ///         .await;
615    ///     assert!(error_entry.is_err());
616    ///
617    ///     let ok_entry = cache
618    ///         .entry(key.clone())
619    ///         .or_try_insert_with(async { Ok::<u32, &str>(3) })
620    ///         .await;
621    ///     assert!(ok_entry.is_ok());
622    ///     let entry = ok_entry.unwrap();
623    ///     assert!(entry.is_fresh());
624    ///     assert_eq!(entry.key(), &key);
625    ///     assert_eq!(entry.into_value(), 3);
626    ///
627    ///     let ok_entry = cache
628    ///         .entry(key)
629    ///         .or_try_insert_with(async { Ok::<u32, &str>(6) })
630    ///         .await;
631    ///     let entry = ok_entry.unwrap();
632    ///     // Not fresh because the value was already in the cache.
633    ///     assert!(!entry.is_fresh());
634    ///     assert_eq!(entry.into_value(), 3);
635    /// }
636    /// ```
637    ///
638    /// # Concurrent calls on the same key
639    ///
640    /// This method guarantees that concurrent calls on the same not-existing entry
641    /// are coalesced into one evaluation of the `init` future (as long as these
642    /// futures return the same error type). Only one of the calls evaluates its
643    /// future (thus returned entry's `is_fresh` method returns `true`), and other
644    /// calls wait for that future to resolve (and their `is_fresh` return `false`).
645    ///
646    /// For more detail about the coalescing behavior, see
647    /// [`Cache::try_get_with`][try-get-with-method].
648    ///
649    /// [try-get-with-method]: ./struct.Cache.html#method.try_get_with
650    pub async fn or_try_insert_with<F, E>(self, init: F) -> Result<Entry<K, V>, Arc<E>>
651    where
652        F: Future<Output = Result<V, E>>,
653        E: Send + Sync + 'static,
654    {
655        futures_util::pin_mut!(init);
656        let key = Arc::new(self.owned_key);
657        self.cache
658            .get_or_try_insert_with_hash_and_fun(key, self.hash, init, true)
659            .await
660    }
661}
662
663/// Provides advanced methods to select or insert an entry of the cache.
664///
665/// Many methods here return an [`Entry`], a snapshot of a single key-value pair in
666/// the cache, carrying additional information like `is_fresh`.
667///
668/// `RefKeyEntrySelector` is constructed from the
669/// [`entry_by_ref`][entry-by-ref-method] method on the cache.
670///
671/// [`Entry`]: ../struct.Entry.html
672/// [entry-by-ref-method]: ./struct.Cache.html#method.entry_by_ref
673pub struct RefKeyEntrySelector<'a, K, Q, V, S>
674where
675    Q: ?Sized,
676{
677    ref_key: &'a Q,
678    hash: u64,
679    cache: &'a Cache<K, V, S>,
680}
681
682impl<'a, K, Q, V, S> RefKeyEntrySelector<'a, K, Q, V, S>
683where
684    K: Borrow<Q> + Hash + Eq + Send + Sync + 'static,
685    Q: ToOwned<Owned = K> + Hash + Eq + ?Sized,
686    V: Clone + Send + Sync + 'static,
687    S: BuildHasher + Clone + Send + Sync + 'static,
688{
689    pub(crate) fn new(ref_key: &'a Q, hash: u64, cache: &'a Cache<K, V, S>) -> Self {
690        Self {
691            ref_key,
692            hash,
693            cache,
694        }
695    }
696
697    /// Performs a compute operation on a cached entry by using the given closure
698    /// `f`. A compute operation is either put, remove or no-operation (nop).
699    ///
700    /// The closure `f` should take the current entry of `Option<Entry<K, V>>` for
701    /// the key, and return a `Future` that resolves to an `ops::compute::Op<V>`
702    /// enum.
703    ///
704    /// This method works as the followings:
705    ///
706    /// 1. Apply the closure `f` to the current cached `Entry`, and get a `Future`.
707    /// 2. Resolve the `Future`, and get an `ops::compute::Op<V>`.
708    /// 3. Execute the op on the cache:
709    ///    - `Op::Put(V)`: Put the new value `V` to the cache.
710    ///    - `Op::Remove`: Remove the current cached entry.
711    ///    - `Op::Nop`: Do nothing.
712    /// 4. Return an `ops::compute::CompResult<K, V>` as the followings:
713    ///
714    /// | [`Op<V>`] | [`Entry<K, V>`] already exists? | [`CompResult<K, V>`] | Notes |
715    /// |:--------- |:--- |:--------------------------- |:------------------------------- |
716    /// | `Put(V)`  | no  | `Inserted(Entry<K, V>)`     | The new entry is returned.      |
717    /// | `Put(V)`  | yes | `ReplacedWith(Entry<K, V>)` | The new entry is returned.      |
718    /// | `Remove`  | no  | `StillNone(Arc<K>)`         |                                 |
719    /// | `Remove`  | yes | `Removed(Entry<K, V>)`      | The removed entry is returned.  |
720    /// | `Nop`     | no  | `StillNone(Arc<K>)`         |                                 |
721    /// | `Nop`     | yes | `Unchanged(Entry<K, V>)`    | The existing entry is returned. |
722    ///
723    /// # See Also
724    ///
725    /// - If you want the `Future` resolve to `Result<Op<V>>` instead of `Op<V>`, and
726    ///   modify entry only when resolved to `Ok(V)`, use the
727    ///   [`and_try_compute_with`] method.
728    /// - If you only want to update or insert, use the [`and_upsert_with`] method.
729    ///
730    /// [`Entry<K, V>`]: ../struct.Entry.html
731    /// [`Op<V>`]: ../ops/compute/enum.Op.html
732    /// [`CompResult<K, V>`]: ../ops/compute/enum.CompResult.html
733    /// [`and_upsert_with`]: #method.and_upsert_with
734    /// [`and_try_compute_with`]: #method.and_try_compute_with
735    ///
736    /// # Example
737    ///
738    /// ```rust
739    /// // Cargo.toml
740    /// //
741    /// // [dependencies]
742    /// // moka = { version = "0.12.8", features = ["future"] }
743    /// // tokio = { version = "1", features = ["rt-multi-thread", "macros" ] }
744    ///
745    /// use moka::{
746    ///     future::Cache,
747    ///     ops::compute::{CompResult, Op},
748    /// };
749    ///
750    /// #[tokio::main]
751    /// async fn main() {
752    ///     let cache: Cache<String, u64> = Cache::new(100);
753    ///     let key = "key1";
754    ///
755    ///     /// Increment a cached `u64` counter. If the counter is greater than or
756    ///     /// equal to 2, remove it.
757    ///     async fn inclement_or_remove_counter(
758    ///         cache: &Cache<String, u64>,
759    ///         key: &str,
760    ///     ) -> CompResult<String, u64> {
761    ///         cache
762    ///             .entry_by_ref(key)
763    ///             .and_compute_with(|maybe_entry| {
764    ///                 let op = if let Some(entry) = maybe_entry {
765    ///                     let counter = entry.into_value();
766    ///                     if counter < 2 {
767    ///                         Op::Put(counter.saturating_add(1)) // Update
768    ///                     } else {
769    ///                         Op::Remove
770    ///                     }
771    ///                 } else {
772    ///                       Op::Put(1) // Insert
773    ///                 };
774    ///                 // Return a Future that is resolved to `op` immediately.
775    ///                 std::future::ready(op)
776    ///             })
777    ///             .await
778    ///     }
779    ///
780    ///     // This should insert a now counter value 1 to the cache, and return the
781    ///     // value with the kind of the operation performed.
782    ///     let result = inclement_or_remove_counter(&cache, &key).await;
783    ///     let CompResult::Inserted(entry) = result else {
784    ///         panic!("`Inserted` should be returned: {result:?}");
785    ///     };
786    ///     assert_eq!(entry.into_value(), 1);
787    ///
788    ///     // This should increment the cached counter value by 1.
789    ///     let result = inclement_or_remove_counter(&cache, &key).await;
790    ///     let CompResult::ReplacedWith(entry) = result else {
791    ///         panic!("`ReplacedWith` should be returned: {result:?}");
792    ///     };
793    ///     assert_eq!(entry.into_value(), 2);
794    ///
795    ///     // This should remove the cached counter from the cache, and returns the
796    ///     // _removed_ value.
797    ///     let result = inclement_or_remove_counter(&cache, &key).await;
798    ///     let CompResult::Removed(entry) = result else {
799    ///         panic!("`Removed` should be returned: {result:?}");
800    ///     };
801    ///     assert_eq!(entry.into_value(), 2);
802    ///
803    ///     // The key should no longer exist.
804    ///     assert!(!cache.contains_key(key));
805    ///
806    ///     // This should start over; insert a new counter value 1 to the cache.
807    ///     let result = inclement_or_remove_counter(&cache, &key).await;
808    ///     let CompResult::Inserted(entry) = result else {
809    ///         panic!("`Inserted` should be returned: {result:?}");
810    ///     };
811    ///     assert_eq!(entry.into_value(), 1);
812    /// }
813    /// ```
814    ///
815    /// # Concurrent calls on the same key
816    ///
817    /// This method guarantees that concurrent calls on the same key are executed
818    /// serially. That is, `and_compute_with` calls on the same key never run
819    /// concurrently. The calls are serialized by the order of their invocation. It
820    /// uses a key-level lock to achieve this.
821    pub async fn and_compute_with<F, Fut>(self, f: F) -> compute::CompResult<K, V>
822    where
823        F: FnOnce(Option<Entry<K, V>>) -> Fut,
824        Fut: Future<Output = compute::Op<V>>,
825    {
826        let key = Arc::new(self.ref_key.to_owned());
827        self.cache
828            .compute_with_hash_and_fun(key, self.hash, f)
829            .await
830    }
831
832    /// Performs a compute operation on a cached entry by using the given closure
833    /// `f`. A compute operation is either put, remove or no-operation (nop).
834    ///
835    /// The closure `f` should take the current entry of `Option<Entry<K, V>>` for
836    /// the key, and return a `Future` that resolves to a
837    /// `Result<ops::compute::Op<V>, E>`.
838    ///
839    /// This method works as the followings:
840    ///
841    /// 1. Apply the closure `f` to the current cached `Entry`, and get a `Future`.
842    /// 2. Resolve the `Future`, and get a `Result<ops::compute::Op<V>, E>`.
843    /// 3. If resolved to `Err(E)`, return it.
844    /// 4. Else, execute the op on the cache:
845    ///    - `Ok(Op::Put(V))`: Put the new value `V` to the cache.
846    ///    - `Ok(Op::Remove)`: Remove the current cached entry.
847    ///    - `Ok(Op::Nop)`: Do nothing.
848    /// 5. Return an `Ok(ops::compute::CompResult<K, V>)` as the followings:
849    ///
850    /// | [`Op<V>`] | [`Entry<K, V>`] already exists? | [`CompResult<K, V>`] | Notes |
851    /// |:--------- |:--- |:--------------------------- |:------------------------------- |
852    /// | `Put(V)`  | no  | `Inserted(Entry<K, V>)`     | The new entry is returned.      |
853    /// | `Put(V)`  | yes | `ReplacedWith(Entry<K, V>)` | The new entry is returned.      |
854    /// | `Remove`  | no  | `StillNone(Arc<K>)`         |                                 |
855    /// | `Remove`  | yes | `Removed(Entry<K, V>)`      | The removed entry is returned.  |
856    /// | `Nop`     | no  | `StillNone(Arc<K>)`         |                                 |
857    /// | `Nop`     | yes | `Unchanged(Entry<K, V>)`    | The existing entry is returned. |
858    ///
859    /// # See Also
860    ///
861    /// - If you want the `Future` resolve to `Op<V>` instead of `Result<Op<V>>`, use
862    ///   the [`and_compute_with`] method.
863    /// - If you only want to update or insert, use the [`and_upsert_with`] method.
864    ///
865    /// [`Entry<K, V>`]: ../struct.Entry.html
866    /// [`Op<V>`]: ../ops/compute/enum.Op.html
867    /// [`CompResult<K, V>`]: ../ops/compute/enum.CompResult.html
868    /// [`and_upsert_with`]: #method.and_upsert_with
869    /// [`and_compute_with`]: #method.and_compute_with
870    ///
871    /// # Example
872    ///
873    /// See [`try_append_value_async.rs`] in the `examples` directory.
874    ///
875    /// [`try_append_value_async.rs`]:
876    ///     https://github.com/moka-rs/moka/tree/main/examples/try_append_value_async.rs
877    ///
878    /// # Concurrent calls on the same key
879    ///
880    /// This method guarantees that concurrent calls on the same key are executed
881    /// serially. That is, `and_try_compute_with` calls on the same key never run
882    /// concurrently. The calls are serialized by the order of their invocation. It
883    /// uses a key-level lock to achieve this.
884    pub async fn and_try_compute_with<F, Fut, E>(self, f: F) -> Result<compute::CompResult<K, V>, E>
885    where
886        F: FnOnce(Option<Entry<K, V>>) -> Fut,
887        Fut: Future<Output = Result<compute::Op<V>, E>>,
888        E: Send + Sync + 'static,
889    {
890        let key = Arc::new(self.ref_key.to_owned());
891        self.cache
892            .try_compute_with_hash_and_fun(key, self.hash, f)
893            .await
894    }
895
896    pub async fn and_try_compute_if_nobody_else<F, Fut, E>(
897        self,
898        f: F,
899    ) -> Result<compute::CompResult<K, V>, E>
900    where
901        F: FnOnce(Option<Entry<K, V>>) -> Fut,
902        Fut: Future<Output = Result<compute::Op<V>, E>>,
903        E: Send + Sync + 'static,
904    {
905        let key = Arc::new(self.ref_key.to_owned());
906        self.cache
907            .try_compute_if_nobody_else_with_hash_and_fun(key, self.hash, f)
908            .await
909    }
910
911    /// Performs an upsert of an [`Entry`] by using the given closure `f`. The word
912    /// "upsert" here means "update" or "insert".
913    ///
914    /// The closure `f` should take the current entry of `Option<Entry<K, V>>` for
915    /// the key, and return a `Future` that resolves to a new value `V`.
916    ///
917    /// This method works as the followings:
918    ///
919    /// 1. Apply the closure `f` to the current cached `Entry`, and get a `Future`.
920    /// 2. Resolve the `Future`, and get a new value `V`.
921    /// 3. Upsert the new value to the cache.
922    /// 4. Return the `Entry` having the upserted value.
923    ///
924    /// # See Also
925    ///
926    /// - If you want to optionally upsert, that is to upsert only when certain
927    ///   conditions meet, use the [`and_compute_with`] method.
928    /// - If you try to upsert, that is to make the `Future` resolve to `Result<V>`
929    ///   instead of `V`, and upsert only when resolved to `Ok(V)`, use the
930    ///   [`and_try_compute_with`] method.
931    ///
932    /// [`Entry`]: ../struct.Entry.html
933    /// [`and_compute_with`]: #method.and_compute_with
934    /// [`and_try_compute_with`]: #method.and_try_compute_with
935    ///
936    /// # Example
937    ///
938    /// ```rust
939    /// // Cargo.toml
940    /// //
941    /// // [dependencies]
942    /// // moka = { version = "0.12.8", features = ["future"] }
943    /// // tokio = { version = "1", features = ["rt-multi-thread", "macros" ] }
944    ///
945    /// use moka::future::Cache;
946    ///
947    /// #[tokio::main]
948    /// async fn main() {
949    ///     let cache: Cache<String, u64> = Cache::new(100);
950    ///     let key = "key1";
951    ///
952    ///     let entry = cache
953    ///         .entry_by_ref(key)
954    ///         .and_upsert_with(|maybe_entry| {
955    ///             let counter = if let Some(entry) = maybe_entry {
956    ///                 entry.into_value().saturating_add(1) // Update
957    ///             } else {
958    ///                 1 // Insert
959    ///             };
960    ///             // Return a Future that is resolved to `counter` immediately.
961    ///             std::future::ready(counter)
962    ///         })
963    ///         .await;
964    ///     // It was not an update.
965    ///     assert!(!entry.is_old_value_replaced());
966    ///     assert_eq!(entry.key(), &key);
967    ///     assert_eq!(entry.into_value(), 1);
968    ///
969    ///     let entry = cache
970    ///         .entry_by_ref(key)
971    ///         .and_upsert_with(|maybe_entry| {
972    ///             let counter = if let Some(entry) = maybe_entry {
973    ///                 entry.into_value().saturating_add(1)
974    ///             } else {
975    ///                 1
976    ///             };
977    ///             std::future::ready(counter)
978    ///         })
979    ///         .await;
980    ///     // It was an update.
981    ///     assert!(entry.is_old_value_replaced());
982    ///     assert_eq!(entry.key(), &key);
983    ///     assert_eq!(entry.into_value(), 2);
984    /// }
985    /// ```
986    ///
987    /// # Concurrent calls on the same key
988    ///
989    /// This method guarantees that concurrent calls on the same key are executed
990    /// serially. That is, `and_upsert_with` calls on the same key never run
991    /// concurrently. The calls are serialized by the order of their invocation. It
992    /// uses a key-level lock to achieve this.
993    pub async fn and_upsert_with<F, Fut>(self, f: F) -> Entry<K, V>
994    where
995        F: FnOnce(Option<Entry<K, V>>) -> Fut,
996        Fut: Future<Output = V>,
997    {
998        let key = Arc::new(self.ref_key.to_owned());
999        self.cache.upsert_with_hash_and_fun(key, self.hash, f).await
1000    }
1001
1002    /// Returns the corresponding [`Entry`] for the reference of the key given when
1003    /// this entry selector was constructed. If the entry does not exist, inserts one
1004    /// by cloning the key and calling the [`default`][std-default-function] function
1005    /// of the value type `V`.
1006    ///
1007    /// [`Entry`]: ../struct.Entry.html
1008    /// [std-default-function]: https://doc.rust-lang.org/stable/std/default/trait.Default.html#tymethod.default
1009    ///
1010    /// # Example
1011    ///
1012    /// ```rust
1013    /// // Cargo.toml
1014    /// //
1015    /// // [dependencies]
1016    /// // moka = { version = "0.12", features = ["future"] }
1017    /// // tokio = { version = "1", features = ["rt-multi-thread", "macros" ] }
1018    ///
1019    /// use moka::future::Cache;
1020    ///
1021    /// #[tokio::main]
1022    /// async fn main() {
1023    ///     let cache: Cache<String, Option<u32>> = Cache::new(100);
1024    ///     let key = "key1".to_string();
1025    ///
1026    ///     let entry = cache.entry_by_ref(&key).or_default().await;
1027    ///     assert!(entry.is_fresh());
1028    ///     assert_eq!(entry.key(), &key);
1029    ///     assert_eq!(entry.into_value(), None);
1030    ///
1031    ///     let entry = cache.entry_by_ref(&key).or_default().await;
1032    ///     // Not fresh because the value was already in the cache.
1033    ///     assert!(!entry.is_fresh());
1034    /// }
1035    /// ```
1036    pub async fn or_default(self) -> Entry<K, V>
1037    where
1038        V: Default,
1039    {
1040        self.cache
1041            .get_or_insert_with_hash_by_ref(self.ref_key, self.hash, Default::default)
1042            .await
1043    }
1044
1045    /// Returns the corresponding [`Entry`] for the reference of the key given when
1046    /// this entry selector was constructed. If the entry does not exist, inserts one
1047    /// by cloning the key and using the given `default` value for `V`.
1048    ///
1049    /// [`Entry`]: ../struct.Entry.html
1050    ///
1051    /// # Example
1052    ///
1053    /// ```rust
1054    /// // Cargo.toml
1055    /// //
1056    /// // [dependencies]
1057    /// // moka = { version = "0.12", features = ["future"] }
1058    /// // tokio = { version = "1", features = ["rt-multi-thread", "macros" ] }
1059    ///
1060    /// use moka::future::Cache;
1061    ///
1062    /// #[tokio::main]
1063    /// async fn main() {
1064    ///     let cache: Cache<String, u32> = Cache::new(100);
1065    ///     let key = "key1".to_string();
1066    ///
1067    ///     let entry = cache.entry_by_ref(&key).or_insert(3).await;
1068    ///     assert!(entry.is_fresh());
1069    ///     assert_eq!(entry.key(), &key);
1070    ///     assert_eq!(entry.into_value(), 3);
1071    ///
1072    ///     let entry = cache.entry_by_ref(&key).or_insert(6).await;
1073    ///     // Not fresh because the value was already in the cache.
1074    ///     assert!(!entry.is_fresh());
1075    ///     assert_eq!(entry.into_value(), 3);
1076    /// }
1077    /// ```
1078    pub async fn or_insert(self, default: V) -> Entry<K, V> {
1079        let init = || default;
1080        self.cache
1081            .get_or_insert_with_hash_by_ref(self.ref_key, self.hash, init)
1082            .await
1083    }
1084
1085    /// Returns the corresponding [`Entry`] for the reference of the key given when
1086    /// this entry selector was constructed. If the entry does not exist, inserts one
1087    /// by cloning the key and resolving the `init` future for the value.
1088    ///
1089    /// [`Entry`]: ../struct.Entry.html
1090    ///
1091    /// # Example
1092    ///
1093    /// ```rust
1094    /// // Cargo.toml
1095    /// //
1096    /// // [dependencies]
1097    /// // moka = { version = "0.12", features = ["future"] }
1098    /// // tokio = { version = "1", features = ["rt-multi-thread", "macros" ] }
1099    ///
1100    /// use moka::future::Cache;
1101    ///
1102    /// #[tokio::main]
1103    /// async fn main() {
1104    ///     let cache: Cache<String, String> = Cache::new(100);
1105    ///     let key = "key1".to_string();
1106    ///
1107    ///     let entry = cache
1108    ///         .entry_by_ref(&key)
1109    ///         .or_insert_with(async { "value1".to_string() })
1110    ///         .await;
1111    ///     assert!(entry.is_fresh());
1112    ///     assert_eq!(entry.key(), &key);
1113    ///     assert_eq!(entry.into_value(), "value1");
1114    ///
1115    ///     let entry = cache
1116    ///         .entry_by_ref(&key)
1117    ///         .or_insert_with(async { "value2".to_string() })
1118    ///         .await;
1119    ///     // Not fresh because the value was already in the cache.
1120    ///     assert!(!entry.is_fresh());
1121    ///     assert_eq!(entry.into_value(), "value1");
1122    /// }
1123    /// ```
1124    ///
1125    /// # Concurrent calls on the same key
1126    ///
1127    /// This method guarantees that concurrent calls on the same not-existing entry
1128    /// are coalesced into one evaluation of the `init` future. Only one of the calls
1129    /// evaluates its future (thus returned entry's `is_fresh` method returns
1130    /// `true`), and other calls wait for that future to resolve (and their
1131    /// `is_fresh` return `false`).
1132    ///
1133    /// For more detail about the coalescing behavior, see
1134    /// [`Cache::get_with`][get-with-method].
1135    ///
1136    /// [get-with-method]: ./struct.Cache.html#method.get_with
1137    pub async fn or_insert_with(self, init: impl Future<Output = V>) -> Entry<K, V> {
1138        futures_util::pin_mut!(init);
1139        let replace_if = None as Option<fn(&V) -> bool>;
1140        self.cache
1141            .get_or_insert_with_hash_by_ref_and_fun(self.ref_key, self.hash, init, replace_if, true)
1142            .await
1143    }
1144
1145    /// Works like [`or_insert_with`](#method.or_insert_with), but takes an additional
1146    /// `replace_if` closure.
1147    ///
1148    /// This method will resolve the `init` future and insert the output to the
1149    /// cache when:
1150    ///
1151    /// - The key does not exist.
1152    /// - Or, `replace_if` closure returns `true`.
1153    pub async fn or_insert_with_if(
1154        self,
1155        init: impl Future<Output = V>,
1156        replace_if: impl FnMut(&V) -> bool + Send,
1157    ) -> Entry<K, V> {
1158        futures_util::pin_mut!(init);
1159        self.cache
1160            .get_or_insert_with_hash_by_ref_and_fun(
1161                self.ref_key,
1162                self.hash,
1163                init,
1164                Some(replace_if),
1165                true,
1166            )
1167            .await
1168    }
1169
1170    /// Returns the corresponding [`Entry`] for the reference of the key given when
1171    /// this entry selector was constructed. If the entry does not exist, clones the
1172    /// key and resolves the `init` future. If `Some(value)` was returned by the
1173    /// future, inserts an entry with the value . If `None` was returned, this method
1174    /// does not insert an entry and returns `None`.
1175    ///
1176    /// [`Entry`]: ../struct.Entry.html
1177    ///
1178    /// # Example
1179    ///
1180    /// ```rust
1181    /// // Cargo.toml
1182    /// //
1183    /// // [dependencies]
1184    /// // moka = { version = "0.12", features = ["future"] }
1185    /// // tokio = { version = "1", features = ["rt-multi-thread", "macros" ] }
1186    ///
1187    /// use moka::future::Cache;
1188    ///
1189    /// #[tokio::main]
1190    /// async fn main() {
1191    ///     let cache: Cache<String, u32> = Cache::new(100);
1192    ///     let key = "key1".to_string();
1193    ///
1194    ///     let none_entry = cache
1195    ///         .entry_by_ref(&key)
1196    ///         .or_optionally_insert_with(async { None })
1197    ///         .await;
1198    ///     assert!(none_entry.is_none());
1199    ///
1200    ///     let some_entry = cache
1201    ///         .entry_by_ref(&key)
1202    ///         .or_optionally_insert_with(async { Some(3) })
1203    ///         .await;
1204    ///     assert!(some_entry.is_some());
1205    ///     let entry = some_entry.unwrap();
1206    ///     assert!(entry.is_fresh());
1207    ///     assert_eq!(entry.key(), &key);
1208    ///     assert_eq!(entry.into_value(), 3);
1209    ///
1210    ///     let some_entry = cache
1211    ///         .entry_by_ref(&key)
1212    ///         .or_optionally_insert_with(async { Some(6) })
1213    ///         .await;
1214    ///     let entry = some_entry.unwrap();
1215    ///     // Not fresh because the value was already in the cache.
1216    ///     assert!(!entry.is_fresh());
1217    ///     assert_eq!(entry.into_value(), 3);
1218    /// }
1219    /// ```
1220    ///
1221    /// # Concurrent calls on the same key
1222    /// This method guarantees that concurrent calls on the same not-existing entry
1223    /// are coalesced into one evaluation of the `init` future. Only one of the calls
1224    /// evaluates its future (thus returned entry's `is_fresh` method returns
1225    /// `true`), and other calls wait for that future to resolve (and their
1226    /// `is_fresh` return `false`).
1227    ///
1228    /// For more detail about the coalescing behavior, see
1229    /// [`Cache::optionally_get_with`][opt-get-with-method].
1230    ///
1231    /// [opt-get-with-method]: ./struct.Cache.html#method.optionally_get_with
1232    pub async fn or_optionally_insert_with(
1233        self,
1234        init: impl Future<Output = Option<V>>,
1235    ) -> Option<Entry<K, V>> {
1236        futures_util::pin_mut!(init);
1237        self.cache
1238            .get_or_optionally_insert_with_hash_by_ref_and_fun(self.ref_key, self.hash, init, true)
1239            .await
1240    }
1241
1242    /// Returns the corresponding [`Entry`] for the reference of the key given when
1243    /// this entry selector was constructed. If the entry does not exist, clones the
1244    /// key and resolves the `init` future. If `Ok(value)` was returned from the
1245    /// future, inserts an entry with the value. If `Err(_)` was returned, this
1246    /// method does not insert an entry and returns the `Err` wrapped by
1247    /// [`std::sync::Arc`][std-arc].
1248    ///
1249    /// [`Entry`]: ../struct.Entry.html
1250    /// [std-arc]: https://doc.rust-lang.org/stable/std/sync/struct.Arc.html
1251    ///
1252    /// # Example
1253    ///
1254    /// ```rust
1255    /// // Cargo.toml
1256    /// //
1257    /// // [dependencies]
1258    /// // moka = { version = "0.12", features = ["future"] }
1259    /// // tokio = { version = "1", features = ["rt-multi-thread", "macros" ] }
1260    ///
1261    /// use moka::future::Cache;
1262    ///
1263    /// #[tokio::main]
1264    /// async fn main() {
1265    ///     let cache: Cache<String, u32> = Cache::new(100);
1266    ///     let key = "key1".to_string();
1267    ///
1268    ///     let error_entry = cache
1269    ///         .entry_by_ref(&key)
1270    ///         .or_try_insert_with(async { Err("error") })
1271    ///         .await;
1272    ///     assert!(error_entry.is_err());
1273    ///
1274    ///     let ok_entry = cache
1275    ///         .entry_by_ref(&key)
1276    ///         .or_try_insert_with(async { Ok::<u32, &str>(3) })
1277    ///         .await;
1278    ///     assert!(ok_entry.is_ok());
1279    ///     let entry = ok_entry.unwrap();
1280    ///     assert!(entry.is_fresh());
1281    ///     assert_eq!(entry.key(), &key);
1282    ///     assert_eq!(entry.into_value(), 3);
1283    ///
1284    ///     let ok_entry = cache
1285    ///         .entry_by_ref(&key)
1286    ///         .or_try_insert_with(async { Ok::<u32, &str>(6) })
1287    ///         .await;
1288    ///     let entry = ok_entry.unwrap();
1289    ///     // Not fresh because the value was already in the cache.
1290    ///     assert!(!entry.is_fresh());
1291    ///     assert_eq!(entry.into_value(), 3);
1292    /// }
1293    /// ```
1294    ///
1295    /// # Concurrent calls on the same key
1296    ///
1297    /// This method guarantees that concurrent calls on the same not-existing entry
1298    /// are coalesced into one evaluation of the `init` future (as long as these
1299    /// futures return the same error type). Only one of the calls evaluates its
1300    /// future (thus returned entry's `is_fresh` method returns `true`), and other
1301    /// calls wait for that future to resolve (and their `is_fresh` return `false`).
1302    ///
1303    /// For more detail about the coalescing behavior, see
1304    /// [`Cache::try_get_with`][try-get-with-method].
1305    ///
1306    /// [try-get-with-method]: ./struct.Cache.html#method.try_get_with
1307    pub async fn or_try_insert_with<F, E>(self, init: F) -> Result<Entry<K, V>, Arc<E>>
1308    where
1309        F: Future<Output = Result<V, E>>,
1310        E: Send + Sync + 'static,
1311    {
1312        futures_util::pin_mut!(init);
1313        self.cache
1314            .get_or_try_insert_with_hash_by_ref_and_fun(self.ref_key, self.hash, init, true)
1315            .await
1316    }
1317}