moka/future/entry_selector.rs
1use equivalent::Equivalent;
2
3use crate::{ops::compute, Entry};
4
5use super::Cache;
6
7use std::{
8 future::Future,
9 hash::{BuildHasher, Hash},
10 sync::Arc,
11};
12
13/// Provides advanced methods to select or insert an entry of the cache.
14///
15/// Many methods here return an [`Entry`], a snapshot of a single key-value pair in
16/// the cache, carrying additional information like `is_fresh`.
17///
18/// `OwnedKeyEntrySelector` is constructed from the [`entry`][entry-method] method on
19/// the cache.
20///
21/// [`Entry`]: ../struct.Entry.html
22/// [entry-method]: ./struct.Cache.html#method.entry
23pub struct OwnedKeyEntrySelector<'a, K, V, S> {
24 owned_key: K,
25 hash: u64,
26 cache: &'a Cache<K, V, S>,
27}
28
29impl<'a, K, V, S> OwnedKeyEntrySelector<'a, K, V, S>
30where
31 K: Hash + Eq + Send + Sync + 'static,
32 V: Clone + Send + Sync + 'static,
33 S: BuildHasher + Clone + Send + Sync + 'static,
34{
35 pub(crate) fn new(owned_key: K, hash: u64, cache: &'a Cache<K, V, S>) -> Self {
36 Self {
37 owned_key,
38 hash,
39 cache,
40 }
41 }
42
43 /// Performs a compute operation on a cached entry by using the given closure
44 /// `f`. A compute operation is either put, remove or no-operation (nop).
45 ///
46 /// The closure `f` should take the current entry of `Option<Entry<K, V>>` for
47 /// the key, and return a `Future` that resolves to an `ops::compute::Op<V>`
48 /// enum.
49 ///
50 /// This method works as the followings:
51 ///
52 /// 1. Apply the closure `f` to the current cached `Entry`, and get a `Future`.
53 /// 2. Resolve the `Future`, and get an `ops::compute::Op<V>`.
54 /// 3. Execute the op on the cache:
55 /// - `Op::Put(V)`: Put the new value `V` to the cache.
56 /// - `Op::Remove`: Remove the current cached entry.
57 /// - `Op::Nop`: Do nothing.
58 /// 4. Return an `ops::compute::CompResult<K, V>` as the followings:
59 ///
60 /// | [`Op<V>`] | [`Entry<K, V>`] already exists? | [`CompResult<K, V>`] | Notes |
61 /// |:--------- |:--- |:--------------------------- |:------------------------------- |
62 /// | `Put(V)` | no | `Inserted(Entry<K, V>)` | The new entry is returned. |
63 /// | `Put(V)` | yes | `ReplacedWith(Entry<K, V>)` | The new entry is returned. |
64 /// | `Remove` | no | `StillNone(Arc<K>)` | |
65 /// | `Remove` | yes | `Removed(Entry<K, V>)` | The removed entry is returned. |
66 /// | `Nop` | no | `StillNone(Arc<K>)` | |
67 /// | `Nop` | yes | `Unchanged(Entry<K, V>)` | The existing entry is returned. |
68 ///
69 /// # See Also
70 ///
71 /// - If you want the `Future` resolve to `Result<Op<V>>` instead of `Op<V>`, and
72 /// modify entry only when resolved to `Ok(V)`, use the
73 /// [`and_try_compute_with`] method.
74 /// - If you only want to update or insert, use the [`and_upsert_with`] method.
75 ///
76 /// [`Entry<K, V>`]: ../struct.Entry.html
77 /// [`Op<V>`]: ../ops/compute/enum.Op.html
78 /// [`CompResult<K, V>`]: ../ops/compute/enum.CompResult.html
79 /// [`and_upsert_with`]: #method.and_upsert_with
80 /// [`and_try_compute_with`]: #method.and_try_compute_with
81 ///
82 /// # Example
83 ///
84 /// ```rust
85 /// // Cargo.toml
86 /// //
87 /// // [dependencies]
88 /// // moka = { version = "0.12.8", features = ["future"] }
89 /// // tokio = { version = "1", features = ["rt-multi-thread", "macros" ] }
90 ///
91 /// use moka::{
92 /// future::Cache,
93 /// ops::compute::{CompResult, Op},
94 /// };
95 ///
96 /// #[tokio::main]
97 /// async fn main() {
98 /// let cache: Cache<String, u64> = Cache::new(100);
99 /// let key = "key1".to_string();
100 ///
101 /// /// Increment a cached `u64` counter. If the counter is greater than or
102 /// /// equal to 2, remove it.
103 /// async fn inclement_or_remove_counter(
104 /// cache: &Cache<String, u64>,
105 /// key: &str,
106 /// ) -> CompResult<String, u64> {
107 /// cache
108 /// .entry(key.to_string())
109 /// .and_compute_with(|maybe_entry| {
110 /// let op = if let Some(entry) = maybe_entry {
111 /// let counter = entry.into_value();
112 /// if counter < 2 {
113 /// Op::Put(counter.saturating_add(1)) // Update
114 /// } else {
115 /// Op::Remove
116 /// }
117 /// } else {
118 /// Op::Put(1) // Insert
119 /// };
120 /// // Return a Future that is resolved to `op` immediately.
121 /// std::future::ready(op)
122 /// })
123 /// .await
124 /// }
125 ///
126 /// // This should insert a new counter value 1 to the cache, and return the
127 /// // value with the kind of the operation performed.
128 /// let result = inclement_or_remove_counter(&cache, &key).await;
129 /// let CompResult::Inserted(entry) = result else {
130 /// panic!("`Inserted` should be returned: {result:?}");
131 /// };
132 /// assert_eq!(entry.into_value(), 1);
133 ///
134 /// // This should increment the cached counter value by 1.
135 /// let result = inclement_or_remove_counter(&cache, &key).await;
136 /// let CompResult::ReplacedWith(entry) = result else {
137 /// panic!("`ReplacedWith` should be returned: {result:?}");
138 /// };
139 /// assert_eq!(entry.into_value(), 2);
140 ///
141 /// // This should remove the cached counter from the cache, and returns the
142 /// // _removed_ value.
143 /// let result = inclement_or_remove_counter(&cache, &key).await;
144 /// let CompResult::Removed(entry) = result else {
145 /// panic!("`Removed` should be returned: {result:?}");
146 /// };
147 /// assert_eq!(entry.into_value(), 2);
148 ///
149 /// // The key should not exist.
150 /// assert!(!cache.contains_key(&key));
151 ///
152 /// // This should start over; insert a new counter value 1 to the cache.
153 /// let result = inclement_or_remove_counter(&cache, &key).await;
154 /// let CompResult::Inserted(entry) = result else {
155 /// panic!("`Inserted` should be returned: {result:?}");
156 /// };
157 /// assert_eq!(entry.into_value(), 1);
158 /// }
159 /// ```
160 ///
161 /// # Concurrent calls on the same key
162 ///
163 /// This method guarantees that concurrent calls on the same key are executed
164 /// serially. That is, `and_compute_with` calls on the same key never run
165 /// concurrently. The calls are serialized by the order of their invocation. It
166 /// uses a key-level lock to achieve this.
167 pub async fn and_compute_with<F, Fut>(self, f: F) -> compute::CompResult<K, V>
168 where
169 F: FnOnce(Option<Entry<K, V>>) -> Fut,
170 Fut: Future<Output = compute::Op<V>>,
171 {
172 let key = Arc::new(self.owned_key);
173 self.cache
174 .compute_with_hash_and_fun(key, self.hash, f)
175 .await
176 }
177
178 /// Performs a compute operation on a cached entry by using the given closure
179 /// `f`. A compute operation is either put, remove or no-operation (nop).
180 ///
181 /// The closure `f` should take the current entry of `Option<Entry<K, V>>` for
182 /// the key, and return a `Future` that resolves to a
183 /// `Result<ops::compute::Op<V>, E>`.
184 ///
185 /// This method works as the followings:
186 ///
187 /// 1. Apply the closure `f` to the current cached `Entry`, and get a `Future`.
188 /// 2. Resolve the `Future`, and get a `Result<ops::compute::Op<V>, E>`.
189 /// 3. If resolved to `Err(E)`, return it.
190 /// 4. Else, execute the op on the cache:
191 /// - `Ok(Op::Put(V))`: Put the new value `V` to the cache.
192 /// - `Ok(Op::Remove)`: Remove the current cached entry.
193 /// - `Ok(Op::Nop)`: Do nothing.
194 /// 5. Return an `Ok(ops::compute::CompResult<K, V>)` as the followings:
195 ///
196 /// | [`Op<V>`] | [`Entry<K, V>`] already exists? | [`CompResult<K, V>`] | Notes |
197 /// |:--------- |:--- |:--------------------------- |:------------------------------- |
198 /// | `Put(V)` | no | `Inserted(Entry<K, V>)` | The new entry is returned. |
199 /// | `Put(V)` | yes | `ReplacedWith(Entry<K, V>)` | The new entry is returned. |
200 /// | `Remove` | no | `StillNone(Arc<K>)` | |
201 /// | `Remove` | yes | `Removed(Entry<K, V>)` | The removed entry is returned. |
202 /// | `Nop` | no | `StillNone(Arc<K>)` | |
203 /// | `Nop` | yes | `Unchanged(Entry<K, V>)` | The existing entry is returned. |
204 ///
205 /// # See Also
206 ///
207 /// - If you want the `Future` resolve to `Op<V>` instead of `Result<Op<V>>`, use
208 /// the [`and_compute_with`] method.
209 /// - If you only want to update or insert, use the [`and_upsert_with`] method.
210 ///
211 /// [`Entry<K, V>`]: ../struct.Entry.html
212 /// [`Op<V>`]: ../ops/compute/enum.Op.html
213 /// [`CompResult<K, V>`]: ../ops/compute/enum.CompResult.html
214 /// [`and_upsert_with`]: #method.and_upsert_with
215 /// [`and_compute_with`]: #method.and_compute_with
216 ///
217 /// # Example
218 ///
219 /// See [`try_append_value_async.rs`] in the `examples` directory.
220 ///
221 /// [`try_append_value_async.rs`]:
222 /// https://github.com/moka-rs/moka/tree/main/examples/try_append_value_async.rs
223 ///
224 /// # Concurrent calls on the same key
225 ///
226 /// This method guarantees that concurrent calls on the same key are executed
227 /// serially. That is, `and_try_compute_with` calls on the same key never run
228 /// concurrently. The calls are serialized by the order of their invocation. It
229 /// uses a key-level lock to achieve this.
230 pub async fn and_try_compute_with<F, Fut, E>(self, f: F) -> Result<compute::CompResult<K, V>, E>
231 where
232 F: FnOnce(Option<Entry<K, V>>) -> Fut,
233 Fut: Future<Output = Result<compute::Op<V>, E>>,
234 E: Send + Sync + 'static,
235 {
236 let key = Arc::new(self.owned_key);
237 self.cache
238 .try_compute_with_hash_and_fun(key, self.hash, f)
239 .await
240 }
241
242 pub async fn and_try_compute_if_nobody_else<F, Fut, E>(
243 self,
244 f: F,
245 ) -> Result<compute::CompResult<K, V>, E>
246 where
247 F: FnOnce(Option<Entry<K, V>>) -> Fut,
248 Fut: Future<Output = Result<compute::Op<V>, E>>,
249 E: Send + Sync + 'static,
250 {
251 let key = Arc::new(self.owned_key);
252 self.cache
253 .try_compute_if_nobody_else_with_hash_and_fun(key, self.hash, f)
254 .await
255 }
256
257 /// Performs an upsert of an [`Entry`] by using the given closure `f`. The word
258 /// "upsert" here means "update" or "insert".
259 ///
260 /// The closure `f` should take the current entry of `Option<Entry<K, V>>` for
261 /// the key, and return a `Future` that resolves to a new value `V`.
262 ///
263 /// This method works as the followings:
264 ///
265 /// 1. Apply the closure `f` to the current cached `Entry`, and get a `Future`.
266 /// 2. Resolve the `Future`, and get a new value `V`.
267 /// 3. Upsert the new value to the cache.
268 /// 4. Return the `Entry` having the upserted value.
269 ///
270 /// # See Also
271 ///
272 /// - If you want to optionally upsert, that is to upsert only when certain
273 /// conditions meet, use the [`and_compute_with`] method.
274 /// - If you try to upsert, that is to make the `Future` resolve to `Result<V>`
275 /// instead of `V`, and upsert only when resolved to `Ok(V)`, use the
276 /// [`and_try_compute_with`] method.
277 ///
278 /// [`Entry`]: ../struct.Entry.html
279 /// [`and_compute_with`]: #method.and_compute_with
280 /// [`and_try_compute_with`]: #method.and_try_compute_with
281 ///
282 /// # Example
283 ///
284 /// ```rust
285 /// // Cargo.toml
286 /// //
287 /// // [dependencies]
288 /// // moka = { version = "0.12.8", features = ["future"] }
289 /// // tokio = { version = "1", features = ["rt-multi-thread", "macros" ] }
290 ///
291 /// use moka::future::Cache;
292 ///
293 /// #[tokio::main]
294 /// async fn main() {
295 /// let cache: Cache<String, u64> = Cache::new(100);
296 /// let key = "key1".to_string();
297 ///
298 /// let entry = cache
299 /// .entry(key.clone())
300 /// .and_upsert_with(|maybe_entry| {
301 /// let counter = if let Some(entry) = maybe_entry {
302 /// entry.into_value().saturating_add(1) // Update
303 /// } else {
304 /// 1 // Insert
305 /// };
306 /// // Return a Future that is resolved to `counter` immediately.
307 /// std::future::ready(counter)
308 /// })
309 /// .await;
310 /// // It was not an update.
311 /// assert!(!entry.is_old_value_replaced());
312 /// assert_eq!(entry.key(), &key);
313 /// assert_eq!(entry.into_value(), 1);
314 ///
315 /// let entry = cache
316 /// .entry(key.clone())
317 /// .and_upsert_with(|maybe_entry| {
318 /// let counter = if let Some(entry) = maybe_entry {
319 /// entry.into_value().saturating_add(1)
320 /// } else {
321 /// 1
322 /// };
323 /// std::future::ready(counter)
324 /// })
325 /// .await;
326 /// // It was an update.
327 /// assert!(entry.is_old_value_replaced());
328 /// assert_eq!(entry.key(), &key);
329 /// assert_eq!(entry.into_value(), 2);
330 /// }
331 /// ```
332 ///
333 /// # Concurrent calls on the same key
334 ///
335 /// This method guarantees that concurrent calls on the same key are executed
336 /// serially. That is, `and_upsert_with` calls on the same key never run
337 /// concurrently. The calls are serialized by the order of their invocation. It
338 /// uses a key-level lock to achieve this.
339 pub async fn and_upsert_with<F, Fut>(self, f: F) -> Entry<K, V>
340 where
341 F: FnOnce(Option<Entry<K, V>>) -> Fut,
342 Fut: Future<Output = V>,
343 {
344 let key = Arc::new(self.owned_key);
345 self.cache.upsert_with_hash_and_fun(key, self.hash, f).await
346 }
347
348 /// Returns the corresponding [`Entry`] for the key given when this entry
349 /// selector was constructed. If the entry does not exist, inserts one by calling
350 /// the [`default`][std-default-function] function of the value type `V`.
351 ///
352 /// [`Entry`]: ../struct.Entry.html
353 /// [std-default-function]: https://doc.rust-lang.org/stable/std/default/trait.Default.html#tymethod.default
354 ///
355 /// # Example
356 ///
357 /// ```rust
358 /// // Cargo.toml
359 /// //
360 /// // [dependencies]
361 /// // moka = { version = "0.12", features = ["future"] }
362 /// // tokio = { version = "1", features = ["rt-multi-thread", "macros" ] }
363 ///
364 /// use moka::future::Cache;
365 ///
366 /// #[tokio::main]
367 /// async fn main() {
368 /// let cache: Cache<String, Option<u32>> = Cache::new(100);
369 /// let key = "key1".to_string();
370 ///
371 /// let entry = cache.entry(key.clone()).or_default().await;
372 /// assert!(entry.is_fresh());
373 /// assert_eq!(entry.key(), &key);
374 /// assert_eq!(entry.into_value(), None);
375 ///
376 /// let entry = cache.entry(key).or_default().await;
377 /// // Not fresh because the value was already in the cache.
378 /// assert!(!entry.is_fresh());
379 /// }
380 /// ```
381 pub async fn or_default(self) -> Entry<K, V>
382 where
383 V: Default,
384 {
385 let key = Arc::new(self.owned_key);
386 self.cache
387 .get_or_insert_with_hash(key, self.hash, Default::default)
388 .await
389 }
390
391 /// Returns the corresponding [`Entry`] for the key given when this entry
392 /// selector was constructed. If the entry does not exist, inserts one by using
393 /// the the given `default` value for `V`.
394 ///
395 /// [`Entry`]: ../struct.Entry.html
396 ///
397 /// # Example
398 ///
399 /// ```rust
400 /// // Cargo.toml
401 /// //
402 /// // [dependencies]
403 /// // moka = { version = "0.12", features = ["future"] }
404 /// // tokio = { version = "1", features = ["rt-multi-thread", "macros" ] }
405 ///
406 /// use moka::future::Cache;
407 ///
408 /// #[tokio::main]
409 /// async fn main() {
410 /// let cache: Cache<String, u32> = Cache::new(100);
411 /// let key = "key1".to_string();
412 ///
413 /// let entry = cache.entry(key.clone()).or_insert(3).await;
414 /// assert!(entry.is_fresh());
415 /// assert_eq!(entry.key(), &key);
416 /// assert_eq!(entry.into_value(), 3);
417 ///
418 /// let entry = cache.entry(key).or_insert(6).await;
419 /// // Not fresh because the value was already in the cache.
420 /// assert!(!entry.is_fresh());
421 /// assert_eq!(entry.into_value(), 3);
422 /// }
423 /// ```
424 pub async fn or_insert(self, default: V) -> Entry<K, V> {
425 let key = Arc::new(self.owned_key);
426 let init = || default;
427 self.cache
428 .get_or_insert_with_hash(key, self.hash, init)
429 .await
430 }
431
432 /// Returns the corresponding [`Entry`] for the key given when this entry
433 /// selector was constructed. If the entry does not exist, resolves the `init`
434 /// future and inserts the output.
435 ///
436 /// [`Entry`]: ../struct.Entry.html
437 ///
438 /// # Example
439 ///
440 /// ```rust
441 /// // Cargo.toml
442 /// //
443 /// // [dependencies]
444 /// // moka = { version = "0.12", features = ["future"] }
445 /// // tokio = { version = "1", features = ["rt-multi-thread", "macros" ] }
446 ///
447 /// use moka::future::Cache;
448 ///
449 /// #[tokio::main]
450 /// async fn main() {
451 /// let cache: Cache<String, String> = Cache::new(100);
452 /// let key = "key1".to_string();
453 ///
454 /// let entry = cache
455 /// .entry(key.clone())
456 /// .or_insert_with(async { "value1".to_string() })
457 /// .await;
458 /// assert!(entry.is_fresh());
459 /// assert_eq!(entry.key(), &key);
460 /// assert_eq!(entry.into_value(), "value1");
461 ///
462 /// let entry = cache
463 /// .entry(key)
464 /// .or_insert_with(async { "value2".to_string() })
465 /// .await;
466 /// // Not fresh because the value was already in the cache.
467 /// assert!(!entry.is_fresh());
468 /// assert_eq!(entry.into_value(), "value1");
469 /// }
470 /// ```
471 ///
472 /// # Concurrent calls on the same key
473 ///
474 /// This method guarantees that concurrent calls on the same not-existing entry
475 /// are coalesced into one evaluation of the `init` future. Only one of the calls
476 /// evaluates its future (thus returned entry's `is_fresh` method returns
477 /// `true`), and other calls wait for that future to resolve (and their
478 /// `is_fresh` return `false`).
479 ///
480 /// For more detail about the coalescing behavior, see
481 /// [`Cache::get_with`][get-with-method].
482 ///
483 /// [get-with-method]: ./struct.Cache.html#method.get_with
484 pub async fn or_insert_with(self, init: impl Future<Output = V>) -> Entry<K, V> {
485 futures_util::pin_mut!(init);
486 let key = Arc::new(self.owned_key);
487 let replace_if = None as Option<fn(&V) -> bool>;
488 self.cache
489 .get_or_insert_with_hash_and_fun(key, self.hash, init, replace_if, true)
490 .await
491 }
492
493 /// Works like [`or_insert_with`](#method.or_insert_with), but takes an additional
494 /// `replace_if` closure.
495 ///
496 /// This method will resolve the `init` future and insert the output to the
497 /// cache when:
498 ///
499 /// - The key does not exist.
500 /// - Or, `replace_if` closure returns `true`.
501 pub async fn or_insert_with_if(
502 self,
503 init: impl Future<Output = V>,
504 replace_if: impl FnMut(&V) -> bool + Send,
505 ) -> Entry<K, V> {
506 futures_util::pin_mut!(init);
507 let key = Arc::new(self.owned_key);
508 self.cache
509 .get_or_insert_with_hash_and_fun(key, self.hash, init, Some(replace_if), true)
510 .await
511 }
512
513 /// Returns the corresponding [`Entry`] for the key given when this entry
514 /// selector was constructed. If the entry does not exist, resolves the `init`
515 /// future, and inserts an entry if `Some(value)` was returned. If `None` was
516 /// returned from the future, this method does not insert an entry and returns
517 /// `None`.
518 ///
519 /// [`Entry`]: ../struct.Entry.html
520 ///
521 /// # Example
522 ///
523 /// ```rust
524 /// // Cargo.toml
525 /// //
526 /// // [dependencies]
527 /// // moka = { version = "0.12", features = ["future"] }
528 /// // tokio = { version = "1", features = ["rt-multi-thread", "macros" ] }
529 ///
530 /// use moka::future::Cache;
531 ///
532 /// #[tokio::main]
533 /// async fn main() {
534 /// let cache: Cache<String, u32> = Cache::new(100);
535 /// let key = "key1".to_string();
536 ///
537 /// let none_entry = cache
538 /// .entry(key.clone())
539 /// .or_optionally_insert_with(async { None })
540 /// .await;
541 /// assert!(none_entry.is_none());
542 ///
543 /// let some_entry = cache
544 /// .entry(key.clone())
545 /// .or_optionally_insert_with(async { Some(3) })
546 /// .await;
547 /// assert!(some_entry.is_some());
548 /// let entry = some_entry.unwrap();
549 /// assert!(entry.is_fresh());
550 /// assert_eq!(entry.key(), &key);
551 /// assert_eq!(entry.into_value(), 3);
552 ///
553 /// let some_entry = cache
554 /// .entry(key)
555 /// .or_optionally_insert_with(async { Some(6) })
556 /// .await;
557 /// let entry = some_entry.unwrap();
558 /// // Not fresh because the value was already in the cache.
559 /// assert!(!entry.is_fresh());
560 /// assert_eq!(entry.into_value(), 3);
561 /// }
562 /// ```
563 ///
564 /// # Concurrent calls on the same key
565 ///
566 /// This method guarantees that concurrent calls on the same not-existing entry
567 /// are coalesced into one evaluation of the `init` future. Only one of the calls
568 /// evaluates its future (thus returned entry's `is_fresh` method returns
569 /// `true`), and other calls wait for that future to resolve (and their
570 /// `is_fresh` return `false`).
571 ///
572 /// For more detail about the coalescing behavior, see
573 /// [`Cache::optionally_get_with`][opt-get-with-method].
574 ///
575 /// [opt-get-with-method]: ./struct.Cache.html#method.optionally_get_with
576 pub async fn or_optionally_insert_with(
577 self,
578 init: impl Future<Output = Option<V>>,
579 ) -> Option<Entry<K, V>> {
580 futures_util::pin_mut!(init);
581 let key = Arc::new(self.owned_key);
582 self.cache
583 .get_or_optionally_insert_with_hash_and_fun(key, self.hash, init, true)
584 .await
585 }
586
587 /// Returns the corresponding [`Entry`] for the key given when this entry
588 /// selector was constructed. If the entry does not exist, resolves the `init`
589 /// future, and inserts an entry if `Ok(value)` was returned. If `Err(_)` was
590 /// returned from the future, this method does not insert an entry and returns
591 /// the `Err` wrapped by [`std::sync::Arc`][std-arc].
592 ///
593 /// [`Entry`]: ../struct.Entry.html
594 /// [std-arc]: https://doc.rust-lang.org/stable/std/sync/struct.Arc.html
595 ///
596 /// # Example
597 ///
598 /// ```rust
599 /// // Cargo.toml
600 /// //
601 /// // [dependencies]
602 /// // moka = { version = "0.12", features = ["future"] }
603 /// // tokio = { version = "1", features = ["rt-multi-thread", "macros" ] }
604 ///
605 /// use moka::future::Cache;
606 ///
607 /// #[tokio::main]
608 /// async fn main() {
609 /// let cache: Cache<String, u32> = Cache::new(100);
610 /// let key = "key1".to_string();
611 ///
612 /// let error_entry = cache
613 /// .entry(key.clone())
614 /// .or_try_insert_with(async { Err("error") })
615 /// .await;
616 /// assert!(error_entry.is_err());
617 ///
618 /// let ok_entry = cache
619 /// .entry(key.clone())
620 /// .or_try_insert_with(async { Ok::<u32, &str>(3) })
621 /// .await;
622 /// assert!(ok_entry.is_ok());
623 /// let entry = ok_entry.unwrap();
624 /// assert!(entry.is_fresh());
625 /// assert_eq!(entry.key(), &key);
626 /// assert_eq!(entry.into_value(), 3);
627 ///
628 /// let ok_entry = cache
629 /// .entry(key)
630 /// .or_try_insert_with(async { Ok::<u32, &str>(6) })
631 /// .await;
632 /// let entry = ok_entry.unwrap();
633 /// // Not fresh because the value was already in the cache.
634 /// assert!(!entry.is_fresh());
635 /// assert_eq!(entry.into_value(), 3);
636 /// }
637 /// ```
638 ///
639 /// # Concurrent calls on the same key
640 ///
641 /// This method guarantees that concurrent calls on the same not-existing entry
642 /// are coalesced into one evaluation of the `init` future (as long as these
643 /// futures return the same error type). Only one of the calls evaluates its
644 /// future (thus returned entry's `is_fresh` method returns `true`), and other
645 /// calls wait for that future to resolve (and their `is_fresh` return `false`).
646 ///
647 /// For more detail about the coalescing behavior, see
648 /// [`Cache::try_get_with`][try-get-with-method].
649 ///
650 /// [try-get-with-method]: ./struct.Cache.html#method.try_get_with
651 pub async fn or_try_insert_with<F, E>(self, init: F) -> Result<Entry<K, V>, Arc<E>>
652 where
653 F: Future<Output = Result<V, E>>,
654 E: Send + Sync + 'static,
655 {
656 futures_util::pin_mut!(init);
657 let key = Arc::new(self.owned_key);
658 self.cache
659 .get_or_try_insert_with_hash_and_fun(key, self.hash, init, true)
660 .await
661 }
662}
663
664/// Provides advanced methods to select or insert an entry of the cache.
665///
666/// Many methods here return an [`Entry`], a snapshot of a single key-value pair in
667/// the cache, carrying additional information like `is_fresh`.
668///
669/// `RefKeyEntrySelector` is constructed from the
670/// [`entry_by_ref`][entry-by-ref-method] method on the cache.
671///
672/// [`Entry`]: ../struct.Entry.html
673/// [entry-by-ref-method]: ./struct.Cache.html#method.entry_by_ref
674pub struct RefKeyEntrySelector<'a, K, Q, V, S>
675where
676 Q: ?Sized,
677{
678 ref_key: &'a Q,
679 hash: u64,
680 cache: &'a Cache<K, V, S>,
681}
682
683impl<'a, K, Q, V, S> RefKeyEntrySelector<'a, K, Q, V, S>
684where
685 K: Hash + Eq + Send + Sync + 'static,
686 Q: Equivalent<K> + ToOwned<Owned = K> + Hash + ?Sized,
687 V: Clone + Send + Sync + 'static,
688 S: BuildHasher + Clone + Send + Sync + 'static,
689{
690 pub(crate) fn new(ref_key: &'a Q, hash: u64, cache: &'a Cache<K, V, S>) -> Self {
691 Self {
692 ref_key,
693 hash,
694 cache,
695 }
696 }
697
698 /// Performs a compute operation on a cached entry by using the given closure
699 /// `f`. A compute operation is either put, remove or no-operation (nop).
700 ///
701 /// The closure `f` should take the current entry of `Option<Entry<K, V>>` for
702 /// the key, and return a `Future` that resolves to an `ops::compute::Op<V>`
703 /// enum.
704 ///
705 /// This method works as the followings:
706 ///
707 /// 1. Apply the closure `f` to the current cached `Entry`, and get a `Future`.
708 /// 2. Resolve the `Future`, and get an `ops::compute::Op<V>`.
709 /// 3. Execute the op on the cache:
710 /// - `Op::Put(V)`: Put the new value `V` to the cache.
711 /// - `Op::Remove`: Remove the current cached entry.
712 /// - `Op::Nop`: Do nothing.
713 /// 4. Return an `ops::compute::CompResult<K, V>` as the followings:
714 ///
715 /// | [`Op<V>`] | [`Entry<K, V>`] already exists? | [`CompResult<K, V>`] | Notes |
716 /// |:--------- |:--- |:--------------------------- |:------------------------------- |
717 /// | `Put(V)` | no | `Inserted(Entry<K, V>)` | The new entry is returned. |
718 /// | `Put(V)` | yes | `ReplacedWith(Entry<K, V>)` | The new entry is returned. |
719 /// | `Remove` | no | `StillNone(Arc<K>)` | |
720 /// | `Remove` | yes | `Removed(Entry<K, V>)` | The removed entry is returned. |
721 /// | `Nop` | no | `StillNone(Arc<K>)` | |
722 /// | `Nop` | yes | `Unchanged(Entry<K, V>)` | The existing entry is returned. |
723 ///
724 /// # See Also
725 ///
726 /// - If you want the `Future` resolve to `Result<Op<V>>` instead of `Op<V>`, and
727 /// modify entry only when resolved to `Ok(V)`, use the
728 /// [`and_try_compute_with`] method.
729 /// - If you only want to update or insert, use the [`and_upsert_with`] method.
730 ///
731 /// [`Entry<K, V>`]: ../struct.Entry.html
732 /// [`Op<V>`]: ../ops/compute/enum.Op.html
733 /// [`CompResult<K, V>`]: ../ops/compute/enum.CompResult.html
734 /// [`and_upsert_with`]: #method.and_upsert_with
735 /// [`and_try_compute_with`]: #method.and_try_compute_with
736 ///
737 /// # Example
738 ///
739 /// ```rust
740 /// // Cargo.toml
741 /// //
742 /// // [dependencies]
743 /// // moka = { version = "0.12.8", features = ["future"] }
744 /// // tokio = { version = "1", features = ["rt-multi-thread", "macros" ] }
745 ///
746 /// use moka::{
747 /// future::Cache,
748 /// ops::compute::{CompResult, Op},
749 /// };
750 ///
751 /// #[tokio::main]
752 /// async fn main() {
753 /// let cache: Cache<String, u64> = Cache::new(100);
754 /// let key = "key1";
755 ///
756 /// /// Increment a cached `u64` counter. If the counter is greater than or
757 /// /// equal to 2, remove it.
758 /// async fn inclement_or_remove_counter(
759 /// cache: &Cache<String, u64>,
760 /// key: &str,
761 /// ) -> CompResult<String, u64> {
762 /// cache
763 /// .entry_by_ref(key)
764 /// .and_compute_with(|maybe_entry| {
765 /// let op = if let Some(entry) = maybe_entry {
766 /// let counter = entry.into_value();
767 /// if counter < 2 {
768 /// Op::Put(counter.saturating_add(1)) // Update
769 /// } else {
770 /// Op::Remove
771 /// }
772 /// } else {
773 /// Op::Put(1) // Insert
774 /// };
775 /// // Return a Future that is resolved to `op` immediately.
776 /// std::future::ready(op)
777 /// })
778 /// .await
779 /// }
780 ///
781 /// // This should insert a now counter value 1 to the cache, and return the
782 /// // value with the kind of the operation performed.
783 /// let result = inclement_or_remove_counter(&cache, &key).await;
784 /// let CompResult::Inserted(entry) = result else {
785 /// panic!("`Inserted` should be returned: {result:?}");
786 /// };
787 /// assert_eq!(entry.into_value(), 1);
788 ///
789 /// // This should increment the cached counter value by 1.
790 /// let result = inclement_or_remove_counter(&cache, &key).await;
791 /// let CompResult::ReplacedWith(entry) = result else {
792 /// panic!("`ReplacedWith` should be returned: {result:?}");
793 /// };
794 /// assert_eq!(entry.into_value(), 2);
795 ///
796 /// // This should remove the cached counter from the cache, and returns the
797 /// // _removed_ value.
798 /// let result = inclement_or_remove_counter(&cache, &key).await;
799 /// let CompResult::Removed(entry) = result else {
800 /// panic!("`Removed` should be returned: {result:?}");
801 /// };
802 /// assert_eq!(entry.into_value(), 2);
803 ///
804 /// // The key should no longer exist.
805 /// assert!(!cache.contains_key(key));
806 ///
807 /// // This should start over; insert a new counter value 1 to the cache.
808 /// let result = inclement_or_remove_counter(&cache, &key).await;
809 /// let CompResult::Inserted(entry) = result else {
810 /// panic!("`Inserted` should be returned: {result:?}");
811 /// };
812 /// assert_eq!(entry.into_value(), 1);
813 /// }
814 /// ```
815 ///
816 /// # Concurrent calls on the same key
817 ///
818 /// This method guarantees that concurrent calls on the same key are executed
819 /// serially. That is, `and_compute_with` calls on the same key never run
820 /// concurrently. The calls are serialized by the order of their invocation. It
821 /// uses a key-level lock to achieve this.
822 pub async fn and_compute_with<F, Fut>(self, f: F) -> compute::CompResult<K, V>
823 where
824 F: FnOnce(Option<Entry<K, V>>) -> Fut,
825 Fut: Future<Output = compute::Op<V>>,
826 {
827 let key = Arc::new(self.ref_key.to_owned());
828 self.cache
829 .compute_with_hash_and_fun(key, self.hash, f)
830 .await
831 }
832
833 /// Performs a compute operation on a cached entry by using the given closure
834 /// `f`. A compute operation is either put, remove or no-operation (nop).
835 ///
836 /// The closure `f` should take the current entry of `Option<Entry<K, V>>` for
837 /// the key, and return a `Future` that resolves to a
838 /// `Result<ops::compute::Op<V>, E>`.
839 ///
840 /// This method works as the followings:
841 ///
842 /// 1. Apply the closure `f` to the current cached `Entry`, and get a `Future`.
843 /// 2. Resolve the `Future`, and get a `Result<ops::compute::Op<V>, E>`.
844 /// 3. If resolved to `Err(E)`, return it.
845 /// 4. Else, execute the op on the cache:
846 /// - `Ok(Op::Put(V))`: Put the new value `V` to the cache.
847 /// - `Ok(Op::Remove)`: Remove the current cached entry.
848 /// - `Ok(Op::Nop)`: Do nothing.
849 /// 5. Return an `Ok(ops::compute::CompResult<K, V>)` as the followings:
850 ///
851 /// | [`Op<V>`] | [`Entry<K, V>`] already exists? | [`CompResult<K, V>`] | Notes |
852 /// |:--------- |:--- |:--------------------------- |:------------------------------- |
853 /// | `Put(V)` | no | `Inserted(Entry<K, V>)` | The new entry is returned. |
854 /// | `Put(V)` | yes | `ReplacedWith(Entry<K, V>)` | The new entry is returned. |
855 /// | `Remove` | no | `StillNone(Arc<K>)` | |
856 /// | `Remove` | yes | `Removed(Entry<K, V>)` | The removed entry is returned. |
857 /// | `Nop` | no | `StillNone(Arc<K>)` | |
858 /// | `Nop` | yes | `Unchanged(Entry<K, V>)` | The existing entry is returned. |
859 ///
860 /// # See Also
861 ///
862 /// - If you want the `Future` resolve to `Op<V>` instead of `Result<Op<V>>`, use
863 /// the [`and_compute_with`] method.
864 /// - If you only want to update or insert, use the [`and_upsert_with`] method.
865 ///
866 /// [`Entry<K, V>`]: ../struct.Entry.html
867 /// [`Op<V>`]: ../ops/compute/enum.Op.html
868 /// [`CompResult<K, V>`]: ../ops/compute/enum.CompResult.html
869 /// [`and_upsert_with`]: #method.and_upsert_with
870 /// [`and_compute_with`]: #method.and_compute_with
871 ///
872 /// # Example
873 ///
874 /// See [`try_append_value_async.rs`] in the `examples` directory.
875 ///
876 /// [`try_append_value_async.rs`]:
877 /// https://github.com/moka-rs/moka/tree/main/examples/try_append_value_async.rs
878 ///
879 /// # Concurrent calls on the same key
880 ///
881 /// This method guarantees that concurrent calls on the same key are executed
882 /// serially. That is, `and_try_compute_with` calls on the same key never run
883 /// concurrently. The calls are serialized by the order of their invocation. It
884 /// uses a key-level lock to achieve this.
885 pub async fn and_try_compute_with<F, Fut, E>(self, f: F) -> Result<compute::CompResult<K, V>, E>
886 where
887 F: FnOnce(Option<Entry<K, V>>) -> Fut,
888 Fut: Future<Output = Result<compute::Op<V>, E>>,
889 E: Send + Sync + 'static,
890 {
891 let key = Arc::new(self.ref_key.to_owned());
892 self.cache
893 .try_compute_with_hash_and_fun(key, self.hash, f)
894 .await
895 }
896
897 pub async fn and_try_compute_if_nobody_else<F, Fut, E>(
898 self,
899 f: F,
900 ) -> Result<compute::CompResult<K, V>, E>
901 where
902 F: FnOnce(Option<Entry<K, V>>) -> Fut,
903 Fut: Future<Output = Result<compute::Op<V>, E>>,
904 E: Send + Sync + 'static,
905 {
906 let key = Arc::new(self.ref_key.to_owned());
907 self.cache
908 .try_compute_if_nobody_else_with_hash_and_fun(key, self.hash, f)
909 .await
910 }
911
912 /// Performs an upsert of an [`Entry`] by using the given closure `f`. The word
913 /// "upsert" here means "update" or "insert".
914 ///
915 /// The closure `f` should take the current entry of `Option<Entry<K, V>>` for
916 /// the key, and return a `Future` that resolves to a new value `V`.
917 ///
918 /// This method works as the followings:
919 ///
920 /// 1. Apply the closure `f` to the current cached `Entry`, and get a `Future`.
921 /// 2. Resolve the `Future`, and get a new value `V`.
922 /// 3. Upsert the new value to the cache.
923 /// 4. Return the `Entry` having the upserted value.
924 ///
925 /// # See Also
926 ///
927 /// - If you want to optionally upsert, that is to upsert only when certain
928 /// conditions meet, use the [`and_compute_with`] method.
929 /// - If you try to upsert, that is to make the `Future` resolve to `Result<V>`
930 /// instead of `V`, and upsert only when resolved to `Ok(V)`, use the
931 /// [`and_try_compute_with`] method.
932 ///
933 /// [`Entry`]: ../struct.Entry.html
934 /// [`and_compute_with`]: #method.and_compute_with
935 /// [`and_try_compute_with`]: #method.and_try_compute_with
936 ///
937 /// # Example
938 ///
939 /// ```rust
940 /// // Cargo.toml
941 /// //
942 /// // [dependencies]
943 /// // moka = { version = "0.12.8", features = ["future"] }
944 /// // tokio = { version = "1", features = ["rt-multi-thread", "macros" ] }
945 ///
946 /// use moka::future::Cache;
947 ///
948 /// #[tokio::main]
949 /// async fn main() {
950 /// let cache: Cache<String, u64> = Cache::new(100);
951 /// let key = "key1";
952 ///
953 /// let entry = cache
954 /// .entry_by_ref(key)
955 /// .and_upsert_with(|maybe_entry| {
956 /// let counter = if let Some(entry) = maybe_entry {
957 /// entry.into_value().saturating_add(1) // Update
958 /// } else {
959 /// 1 // Insert
960 /// };
961 /// // Return a Future that is resolved to `counter` immediately.
962 /// std::future::ready(counter)
963 /// })
964 /// .await;
965 /// // It was not an update.
966 /// assert!(!entry.is_old_value_replaced());
967 /// assert_eq!(entry.key(), &key);
968 /// assert_eq!(entry.into_value(), 1);
969 ///
970 /// let entry = cache
971 /// .entry_by_ref(key)
972 /// .and_upsert_with(|maybe_entry| {
973 /// let counter = if let Some(entry) = maybe_entry {
974 /// entry.into_value().saturating_add(1)
975 /// } else {
976 /// 1
977 /// };
978 /// std::future::ready(counter)
979 /// })
980 /// .await;
981 /// // It was an update.
982 /// assert!(entry.is_old_value_replaced());
983 /// assert_eq!(entry.key(), &key);
984 /// assert_eq!(entry.into_value(), 2);
985 /// }
986 /// ```
987 ///
988 /// # Concurrent calls on the same key
989 ///
990 /// This method guarantees that concurrent calls on the same key are executed
991 /// serially. That is, `and_upsert_with` calls on the same key never run
992 /// concurrently. The calls are serialized by the order of their invocation. It
993 /// uses a key-level lock to achieve this.
994 pub async fn and_upsert_with<F, Fut>(self, f: F) -> Entry<K, V>
995 where
996 F: FnOnce(Option<Entry<K, V>>) -> Fut,
997 Fut: Future<Output = V>,
998 {
999 let key = Arc::new(self.ref_key.to_owned());
1000 self.cache.upsert_with_hash_and_fun(key, self.hash, f).await
1001 }
1002
1003 /// Returns the corresponding [`Entry`] for the reference of the key given when
1004 /// this entry selector was constructed. If the entry does not exist, inserts one
1005 /// by cloning the key and calling the [`default`][std-default-function] function
1006 /// of the value type `V`.
1007 ///
1008 /// [`Entry`]: ../struct.Entry.html
1009 /// [std-default-function]: https://doc.rust-lang.org/stable/std/default/trait.Default.html#tymethod.default
1010 ///
1011 /// # Example
1012 ///
1013 /// ```rust
1014 /// // Cargo.toml
1015 /// //
1016 /// // [dependencies]
1017 /// // moka = { version = "0.12", features = ["future"] }
1018 /// // tokio = { version = "1", features = ["rt-multi-thread", "macros" ] }
1019 ///
1020 /// use moka::future::Cache;
1021 ///
1022 /// #[tokio::main]
1023 /// async fn main() {
1024 /// let cache: Cache<String, Option<u32>> = Cache::new(100);
1025 /// let key = "key1".to_string();
1026 ///
1027 /// let entry = cache.entry_by_ref(&key).or_default().await;
1028 /// assert!(entry.is_fresh());
1029 /// assert_eq!(entry.key(), &key);
1030 /// assert_eq!(entry.into_value(), None);
1031 ///
1032 /// let entry = cache.entry_by_ref(&key).or_default().await;
1033 /// // Not fresh because the value was already in the cache.
1034 /// assert!(!entry.is_fresh());
1035 /// }
1036 /// ```
1037 pub async fn or_default(self) -> Entry<K, V>
1038 where
1039 V: Default,
1040 {
1041 self.cache
1042 .get_or_insert_with_hash_by_ref(self.ref_key, self.hash, Default::default)
1043 .await
1044 }
1045
1046 /// Returns the corresponding [`Entry`] for the reference of the key given when
1047 /// this entry selector was constructed. If the entry does not exist, inserts one
1048 /// by cloning the key and using the given `default` value for `V`.
1049 ///
1050 /// [`Entry`]: ../struct.Entry.html
1051 ///
1052 /// # Example
1053 ///
1054 /// ```rust
1055 /// // Cargo.toml
1056 /// //
1057 /// // [dependencies]
1058 /// // moka = { version = "0.12", features = ["future"] }
1059 /// // tokio = { version = "1", features = ["rt-multi-thread", "macros" ] }
1060 ///
1061 /// use moka::future::Cache;
1062 ///
1063 /// #[tokio::main]
1064 /// async fn main() {
1065 /// let cache: Cache<String, u32> = Cache::new(100);
1066 /// let key = "key1".to_string();
1067 ///
1068 /// let entry = cache.entry_by_ref(&key).or_insert(3).await;
1069 /// assert!(entry.is_fresh());
1070 /// assert_eq!(entry.key(), &key);
1071 /// assert_eq!(entry.into_value(), 3);
1072 ///
1073 /// let entry = cache.entry_by_ref(&key).or_insert(6).await;
1074 /// // Not fresh because the value was already in the cache.
1075 /// assert!(!entry.is_fresh());
1076 /// assert_eq!(entry.into_value(), 3);
1077 /// }
1078 /// ```
1079 pub async fn or_insert(self, default: V) -> Entry<K, V> {
1080 let init = || default;
1081 self.cache
1082 .get_or_insert_with_hash_by_ref(self.ref_key, self.hash, init)
1083 .await
1084 }
1085
1086 /// Returns the corresponding [`Entry`] for the reference of the key given when
1087 /// this entry selector was constructed. If the entry does not exist, inserts one
1088 /// by cloning the key and resolving the `init` future for the value.
1089 ///
1090 /// [`Entry`]: ../struct.Entry.html
1091 ///
1092 /// # Example
1093 ///
1094 /// ```rust
1095 /// // Cargo.toml
1096 /// //
1097 /// // [dependencies]
1098 /// // moka = { version = "0.12", features = ["future"] }
1099 /// // tokio = { version = "1", features = ["rt-multi-thread", "macros" ] }
1100 ///
1101 /// use moka::future::Cache;
1102 ///
1103 /// #[tokio::main]
1104 /// async fn main() {
1105 /// let cache: Cache<String, String> = Cache::new(100);
1106 /// let key = "key1".to_string();
1107 ///
1108 /// let entry = cache
1109 /// .entry_by_ref(&key)
1110 /// .or_insert_with(async { "value1".to_string() })
1111 /// .await;
1112 /// assert!(entry.is_fresh());
1113 /// assert_eq!(entry.key(), &key);
1114 /// assert_eq!(entry.into_value(), "value1");
1115 ///
1116 /// let entry = cache
1117 /// .entry_by_ref(&key)
1118 /// .or_insert_with(async { "value2".to_string() })
1119 /// .await;
1120 /// // Not fresh because the value was already in the cache.
1121 /// assert!(!entry.is_fresh());
1122 /// assert_eq!(entry.into_value(), "value1");
1123 /// }
1124 /// ```
1125 ///
1126 /// # Concurrent calls on the same key
1127 ///
1128 /// This method guarantees that concurrent calls on the same not-existing entry
1129 /// are coalesced into one evaluation of the `init` future. Only one of the calls
1130 /// evaluates its future (thus returned entry's `is_fresh` method returns
1131 /// `true`), and other calls wait for that future to resolve (and their
1132 /// `is_fresh` return `false`).
1133 ///
1134 /// For more detail about the coalescing behavior, see
1135 /// [`Cache::get_with`][get-with-method].
1136 ///
1137 /// [get-with-method]: ./struct.Cache.html#method.get_with
1138 pub async fn or_insert_with(self, init: impl Future<Output = V>) -> Entry<K, V> {
1139 futures_util::pin_mut!(init);
1140 let replace_if = None as Option<fn(&V) -> bool>;
1141 self.cache
1142 .get_or_insert_with_hash_by_ref_and_fun(self.ref_key, self.hash, init, replace_if, true)
1143 .await
1144 }
1145
1146 /// Works like [`or_insert_with`](#method.or_insert_with), but takes an additional
1147 /// `replace_if` closure.
1148 ///
1149 /// This method will resolve the `init` future and insert the output to the
1150 /// cache when:
1151 ///
1152 /// - The key does not exist.
1153 /// - Or, `replace_if` closure returns `true`.
1154 pub async fn or_insert_with_if(
1155 self,
1156 init: impl Future<Output = V>,
1157 replace_if: impl FnMut(&V) -> bool + Send,
1158 ) -> Entry<K, V> {
1159 futures_util::pin_mut!(init);
1160 self.cache
1161 .get_or_insert_with_hash_by_ref_and_fun(
1162 self.ref_key,
1163 self.hash,
1164 init,
1165 Some(replace_if),
1166 true,
1167 )
1168 .await
1169 }
1170
1171 /// Returns the corresponding [`Entry`] for the reference of the key given when
1172 /// this entry selector was constructed. If the entry does not exist, clones the
1173 /// key and resolves the `init` future. If `Some(value)` was returned by the
1174 /// future, inserts an entry with the value . If `None` was returned, this method
1175 /// does not insert an entry and returns `None`.
1176 ///
1177 /// [`Entry`]: ../struct.Entry.html
1178 ///
1179 /// # Example
1180 ///
1181 /// ```rust
1182 /// // Cargo.toml
1183 /// //
1184 /// // [dependencies]
1185 /// // moka = { version = "0.12", features = ["future"] }
1186 /// // tokio = { version = "1", features = ["rt-multi-thread", "macros" ] }
1187 ///
1188 /// use moka::future::Cache;
1189 ///
1190 /// #[tokio::main]
1191 /// async fn main() {
1192 /// let cache: Cache<String, u32> = Cache::new(100);
1193 /// let key = "key1".to_string();
1194 ///
1195 /// let none_entry = cache
1196 /// .entry_by_ref(&key)
1197 /// .or_optionally_insert_with(async { None })
1198 /// .await;
1199 /// assert!(none_entry.is_none());
1200 ///
1201 /// let some_entry = cache
1202 /// .entry_by_ref(&key)
1203 /// .or_optionally_insert_with(async { Some(3) })
1204 /// .await;
1205 /// assert!(some_entry.is_some());
1206 /// let entry = some_entry.unwrap();
1207 /// assert!(entry.is_fresh());
1208 /// assert_eq!(entry.key(), &key);
1209 /// assert_eq!(entry.into_value(), 3);
1210 ///
1211 /// let some_entry = cache
1212 /// .entry_by_ref(&key)
1213 /// .or_optionally_insert_with(async { Some(6) })
1214 /// .await;
1215 /// let entry = some_entry.unwrap();
1216 /// // Not fresh because the value was already in the cache.
1217 /// assert!(!entry.is_fresh());
1218 /// assert_eq!(entry.into_value(), 3);
1219 /// }
1220 /// ```
1221 ///
1222 /// # Concurrent calls on the same key
1223 /// This method guarantees that concurrent calls on the same not-existing entry
1224 /// are coalesced into one evaluation of the `init` future. Only one of the calls
1225 /// evaluates its future (thus returned entry's `is_fresh` method returns
1226 /// `true`), and other calls wait for that future to resolve (and their
1227 /// `is_fresh` return `false`).
1228 ///
1229 /// For more detail about the coalescing behavior, see
1230 /// [`Cache::optionally_get_with`][opt-get-with-method].
1231 ///
1232 /// [opt-get-with-method]: ./struct.Cache.html#method.optionally_get_with
1233 pub async fn or_optionally_insert_with(
1234 self,
1235 init: impl Future<Output = Option<V>>,
1236 ) -> Option<Entry<K, V>> {
1237 futures_util::pin_mut!(init);
1238 self.cache
1239 .get_or_optionally_insert_with_hash_by_ref_and_fun(self.ref_key, self.hash, init, true)
1240 .await
1241 }
1242
1243 /// Returns the corresponding [`Entry`] for the reference of the key given when
1244 /// this entry selector was constructed. If the entry does not exist, clones the
1245 /// key and resolves the `init` future. If `Ok(value)` was returned from the
1246 /// future, inserts an entry with the value. If `Err(_)` was returned, this
1247 /// method does not insert an entry and returns the `Err` wrapped by
1248 /// [`std::sync::Arc`][std-arc].
1249 ///
1250 /// [`Entry`]: ../struct.Entry.html
1251 /// [std-arc]: https://doc.rust-lang.org/stable/std/sync/struct.Arc.html
1252 ///
1253 /// # Example
1254 ///
1255 /// ```rust
1256 /// // Cargo.toml
1257 /// //
1258 /// // [dependencies]
1259 /// // moka = { version = "0.12", features = ["future"] }
1260 /// // tokio = { version = "1", features = ["rt-multi-thread", "macros" ] }
1261 ///
1262 /// use moka::future::Cache;
1263 ///
1264 /// #[tokio::main]
1265 /// async fn main() {
1266 /// let cache: Cache<String, u32> = Cache::new(100);
1267 /// let key = "key1".to_string();
1268 ///
1269 /// let error_entry = cache
1270 /// .entry_by_ref(&key)
1271 /// .or_try_insert_with(async { Err("error") })
1272 /// .await;
1273 /// assert!(error_entry.is_err());
1274 ///
1275 /// let ok_entry = cache
1276 /// .entry_by_ref(&key)
1277 /// .or_try_insert_with(async { Ok::<u32, &str>(3) })
1278 /// .await;
1279 /// assert!(ok_entry.is_ok());
1280 /// let entry = ok_entry.unwrap();
1281 /// assert!(entry.is_fresh());
1282 /// assert_eq!(entry.key(), &key);
1283 /// assert_eq!(entry.into_value(), 3);
1284 ///
1285 /// let ok_entry = cache
1286 /// .entry_by_ref(&key)
1287 /// .or_try_insert_with(async { Ok::<u32, &str>(6) })
1288 /// .await;
1289 /// let entry = ok_entry.unwrap();
1290 /// // Not fresh because the value was already in the cache.
1291 /// assert!(!entry.is_fresh());
1292 /// assert_eq!(entry.into_value(), 3);
1293 /// }
1294 /// ```
1295 ///
1296 /// # Concurrent calls on the same key
1297 ///
1298 /// This method guarantees that concurrent calls on the same not-existing entry
1299 /// are coalesced into one evaluation of the `init` future (as long as these
1300 /// futures return the same error type). Only one of the calls evaluates its
1301 /// future (thus returned entry's `is_fresh` method returns `true`), and other
1302 /// calls wait for that future to resolve (and their `is_fresh` return `false`).
1303 ///
1304 /// For more detail about the coalescing behavior, see
1305 /// [`Cache::try_get_with`][try-get-with-method].
1306 ///
1307 /// [try-get-with-method]: ./struct.Cache.html#method.try_get_with
1308 pub async fn or_try_insert_with<F, E>(self, init: F) -> Result<Entry<K, V>, Arc<E>>
1309 where
1310 F: Future<Output = Result<V, E>>,
1311 E: Send + Sync + 'static,
1312 {
1313 futures_util::pin_mut!(init);
1314 self.cache
1315 .get_or_try_insert_with_hash_by_ref_and_fun(self.ref_key, self.hash, init, true)
1316 .await
1317 }
1318}