tagptr/imp/
atomic.rs

1use core::{
2    fmt,
3    marker::PhantomData,
4    sync::atomic::{AtomicUsize, Ordering},
5};
6
7use crate::{AtomicTagPtr, TagPtr};
8
9/********** impl Send + Sync **********************************************************************/
10
11unsafe impl<T, const N: usize> Send for AtomicTagPtr<T, N> {}
12unsafe impl<T, const N: usize> Sync for AtomicTagPtr<T, N> {}
13
14/********** impl inherent *************************************************************************/
15
16impl<T, const N: usize> AtomicTagPtr<T, N> {
17    doc_comment! {
18        doc_tag_bits!(),
19        pub const TAG_BITS: usize = N;
20    }
21
22    doc_comment! {
23        doc_tag_mask!(),
24        pub const TAG_MASK: usize = crate::mark_mask(Self::TAG_BITS);
25    }
26
27    doc_comment! {
28        doc_ptr_mask!(),
29        pub const POINTER_MASK: usize = !Self::TAG_MASK;
30    }
31
32    doc_comment! {
33        doc_null!(),
34        ///
35        /// # Examples
36        ///
37        /// ```
38        /// use core::{ptr, sync::atomic::Ordering};
39        ///
40        /// type AtomicTagPtr = tagptr::AtomicTagPtr<i32, 2>;
41        ///
42        /// let ptr = AtomicTagPtr::null();
43        /// assert_eq!(
44        ///     ptr.load(Ordering::Relaxed).decompose(),
45        ///     (ptr::null_mut(), 0)
46        /// );
47        /// ```
48        pub const fn null() -> Self {
49            Self { inner: AtomicUsize::new(0), _marker: PhantomData }
50        }
51    }
52
53    doc_comment! {
54        doc_atomic_new!(),
55        #[inline]
56        pub fn new(marked_ptr: TagPtr<T, N>) -> Self {
57            Self { inner: AtomicUsize::new(marked_ptr.into_usize()), _marker: PhantomData }
58        }
59    }
60
61    doc_comment! {
62        doc_atomic_into_inner!(),
63        #[inline]
64        pub fn into_inner(self) -> TagPtr<T, N> {
65            TagPtr::from_usize(self.inner.into_inner())
66        }
67    }
68
69    /// Returns a mutable reference to the underlying marked pointer.
70    ///
71    /// This is safe because the mutable reference guarantees no other
72    /// threads are concurrently accessing the atomic pointer.
73    #[inline]
74    pub fn get_mut(&mut self) -> &mut TagPtr<T, N> {
75        // SAFETY: the mutable self reference ensures the dereferencing is sound
76        unsafe { &mut *(self.inner.get_mut() as *mut usize as *mut _) }
77    }
78
79    /// Loads the value of the atomic marked pointer.
80    ///
81    /// `load` takes an [`Ordering`] argument which describes the memory
82    /// ordering of this operation.
83    /// Possible values are [`SeqCst`][seq_cst], [`Acquire`][acq] and
84    /// [`Relaxed`][rlx].
85    ///
86    /// # Panics
87    ///
88    /// Panics if `order` is [`Release`][rel] or [`AcqRel`][acq_rel].
89    ///
90    /// [rlx]: Ordering::Relaxed
91    /// [acq]: Ordering::Acquire
92    /// [rel]: Ordering::Release
93    /// [acq_rel]: Ordering::AcqRel
94    /// [seq_cst]: Ordering::SeqCst
95    #[inline]
96    pub fn load(&self, order: Ordering) -> TagPtr<T, N> {
97        TagPtr::from_usize(self.inner.load(order))
98    }
99
100    /// Stores a value into the atomic marked pointer.
101    ///
102    /// `store` takes an [`Ordering`] argument which describes the memory
103    /// ordering of this operation.
104    /// Possible values are [`SeqCst`][seq_cst], [`Release`][rel] and
105    /// [`Relaxed`][rlx].
106    ///
107    /// # Panics
108    ///
109    /// Panics if `order` is [`Acquire`][acq] or [`AcqRel`][acq_rel].
110    ///
111    /// [rlx]: Ordering::Relaxed
112    /// [acq]: Ordering::Acquire
113    /// [rel]: Ordering::Release
114    /// [acq_rel]: Ordering::AcqRel
115    /// [seq_cst]: Ordering::SeqCst
116    #[inline]
117    pub fn store(&self, ptr: TagPtr<T, N>, order: Ordering) {
118        self.inner.store(ptr.into_usize(), order)
119    }
120
121    /// Stores a value into the atomic marked pointer and returns the previous
122    /// value.
123    ///
124    /// `swap` takes an [`Ordering`] argument which describes the memory
125    /// ordering of this operation.
126    /// All ordering modes are possible.
127    /// Note that using [`Acquire`][acq] makes the store part of this operation
128    /// [`Relaxed`][rlx], and using [`Release`][rel] makes the load part
129    /// [`Relaxed`][rlx].
130    ///
131    /// [rlx]: Ordering::Relaxed
132    /// [acq]: Ordering::Acquire
133    /// [rel]: Ordering::Release
134    ///
135    /// # Examples
136    ///
137    /// ```
138    /// use core::sync::atomic::Ordering;
139    ///
140    /// type AtomicTagPtr = tagptr::AtomicTagPtr<i32, 2>;
141    /// type TagPtr = tagptr::TagPtr<i32, 2>;
142    ///
143    /// let ptr = AtomicTagPtr::null();
144    /// let prev = ptr.swap(TagPtr::new(&mut 1), Ordering::Relaxed);
145    ///
146    /// assert!(prev.is_null());
147    /// ```
148    pub fn swap(&self, ptr: TagPtr<T, N>, order: Ordering) -> TagPtr<T, N> {
149        TagPtr::from_usize(self.inner.swap(ptr.into_usize(), order))
150    }
151
152    /// Stores a value into the pointer if the current value is the same as
153    /// `current`.
154    ///
155    /// The return value is a result indicating whether the new value was
156    /// written and containing the previous value.
157    /// On success this value is guaranteed to be equal to `current`.
158    ///
159    /// `compare_exchange` takes takes two [`Ordering`] arguments to describe
160    /// the memory ordering of this operation.
161    /// The first describes the required ordering if the operation succeeds
162    /// while the second describes the required ordering when the operation
163    /// fails.
164    /// Using [`Acquire`][acq] as success ordering makes store part of this
165    /// operation [`Relaxed`][rlx], and using [`Release`][rel] makes the
166    /// successful load [`Relaxed`][rlx].
167    /// The failure ordering can only be [`SeqCst`][seq_cst], [`Acquire`][acq]
168    /// or [`Relaxed`][rlx] and must be equivalent or weaker than the success
169    /// ordering.
170    ///
171    /// [rlx]: Ordering::Relaxed
172    /// [acq]: Ordering::Acquire
173    /// [rel]: Ordering::Release
174    /// [seq_cst]: Ordering::SeqCst
175    #[inline]
176    pub fn compare_exchange(
177        &self,
178        current: TagPtr<T, N>,
179        new: TagPtr<T, N>,
180        (success, failure): (Ordering, Ordering),
181    ) -> Result<TagPtr<T, N>, TagPtr<T, N>> {
182        self.inner
183            .compare_exchange(current.into_usize(), new.into_usize(), success, failure)
184            .map(|_| current)
185            .map_err(TagPtr::from_usize)
186    }
187
188    /// Stores a value into the pointer if the current value is the same as
189    /// `current`.
190    ///
191    /// The return value is a result indicating whether the new value was
192    /// written and containing the previous value.
193    /// On success this value is guaranteed to be equal to `current`.
194    ///
195    /// Unlike `compare_exchange`, this function is allowed to spuriously fail,
196    /// even when the comparison succeeds, which can result in more efficient
197    /// code on some platforms.
198    /// The return value is a result indicating whether the new value was
199    /// written and containing the previous value.
200    ///
201    /// `compare_exchange` takes takes two [`Ordering`] arguments to describe
202    /// the memory ordering of this operation.
203    /// The first describes the required ordering if the operation succeeds
204    /// while the second describes the required ordering when the operation
205    /// fails.
206    /// Using [`Acquire`][acq] as success ordering makes store part of this
207    /// operation [`Relaxed`][rlx], and using [`Release`][rel] makes the
208    /// successful load [`Relaxed`][rlx].
209    /// The failure ordering can only be [`SeqCst`][seq_cst], [`Acquire`][acq]
210    /// or [`Relaxed`][rlx] and must be equivalent or weaker than the success
211    /// ordering.
212    ///
213    /// [rlx]: Ordering::Relaxed
214    /// [acq]: Ordering::Acquire
215    /// [rel]: Ordering::Release
216    /// [seq_cst]: Ordering::SeqCst
217    #[inline]
218    pub fn compare_exchange_weak(
219        &self,
220        current: TagPtr<T, N>,
221        new: TagPtr<T, N>,
222        (success, failure): (Ordering, Ordering),
223    ) -> Result<TagPtr<T, N>, TagPtr<T, N>> {
224        self.inner
225            .compare_exchange_weak(current.into_usize(), new.into_usize(), success, failure)
226            .map(|_| current)
227            .map_err(TagPtr::from_usize)
228    }
229
230    /// Adds `value` to the current tag value, returning the previous marked
231    /// pointer.
232    ///
233    /// This operation directly and unconditionally alters the internal numeric
234    /// representation of the atomic marked pointer.
235    /// Hence there is no way to reliably guarantee the operation only affects
236    /// the tag bits and does not overflow into the pointer bits.
237    ///
238    /// `fetch_add` takes takes an [`Ordering`] argument which describes the
239    /// memory ordering of this operation.
240    /// All ordering modes are possible.
241    /// Note that using [`Acquire`][acq] makes the store part of this operation
242    /// [`Relaxed`][rlx] and using [`Release`][rel] makes the load part
243    /// [`Relaxed`][rlx].
244    ///
245    /// [rlx]: Ordering::Relaxed
246    /// [acq]: Ordering::Acquire
247    /// [rel]: Ordering::Release
248    ///
249    /// # Examples
250    ///
251    /// ```
252    /// use core::sync::atomic::Ordering;
253    ///
254    /// type AtomicTagPtr = tagptr::AtomicTagPtr<i32, 2>;
255    /// type TagPtr = tagptr::TagPtr<i32, 2>;
256    ///
257    /// let reference = &mut 1;
258    /// let ptr = AtomicTagPtr::new(TagPtr::new(reference));
259    ///
260    /// assert_eq!(
261    ///     ptr.fetch_add(1, Ordering::Relaxed).decompose(),
262    ///     (reference as *mut _, 0)
263    /// );
264    ///
265    /// assert_eq!(
266    ///     ptr.load(Ordering::Relaxed).decompose(),
267    ///     (reference as *mut _, 0b01)
268    /// );
269    /// ```
270    #[inline]
271    pub fn fetch_add(&self, value: usize, order: Ordering) -> TagPtr<T, N> {
272        debug_assert!(value < Self::TAG_MASK, "`value` exceeds tag bits (would overflow)");
273        TagPtr::from_usize(self.inner.fetch_add(value, order))
274    }
275
276    /// Subtracts `value` from the current tag value, returning the previous
277    /// marked pointer.
278    ///
279    /// This operation directly and unconditionally alters the internal numeric
280    /// representation of the atomic marked pointer.
281    /// Hence there is no way to reliably guarantee the operation only affects
282    /// the tag bits and does not overflow into the pointer bits.
283    ///
284    /// `fetch_sub` takes takes an [`Ordering`] argument which describes the
285    /// memory ordering of this operation.
286    /// All ordering modes are possible.
287    /// Note that using [`Acquire`][acq] makes the store part of this operation
288    /// [`Relaxed`][rlx] and using [`Release`][rel] makes the load part
289    /// [`Relaxed`][rlx].
290    ///
291    /// [rlx]: Ordering::Relaxed
292    /// [acq]: Ordering::Acquire
293    /// [rel]: Ordering::Release
294    ///
295    /// # Examples
296    ///
297    /// ```
298    /// use core::sync::atomic::Ordering;
299    ///
300    /// type AtomicTagPtr = tagptr::AtomicTagPtr<i32, 2>;
301    /// type TagPtr = tagptr::TagPtr<i32, 2>;
302    ///
303    /// let reference = &mut 1;
304    /// let ptr = AtomicTagPtr::new(TagPtr::compose(reference, 0b10));
305    ///
306    /// assert_eq!(
307    ///     ptr.fetch_sub(1, Ordering::Relaxed).decompose(),
308    ///     (reference as *mut _, 0b10)
309    /// );
310    ///
311    /// assert_eq!(
312    ///     ptr.load(Ordering::Relaxed).decompose(),
313    ///     (reference as *mut _, 0b01)
314    /// );
315    /// ```
316    #[inline]
317    pub fn fetch_sub(&self, value: usize, order: Ordering) -> TagPtr<T, N> {
318        debug_assert!(value < Self::TAG_MASK, "`value` exceeds tag bits (would underflow)");
319        TagPtr::from_usize(self.inner.fetch_sub(value, order))
320    }
321
322    /// Performs a bitwise "or" of `value` with the current tag value, returning
323    /// the previous marked pointer.
324    ///
325    /// This operation directly and unconditionally alters the internal numeric
326    /// representation of the atomic marked pointer.
327    /// Hence there is no way to reliably guarantee the operation only affects
328    /// the tag bits and does not overflow into the pointer bits.
329    ///
330    /// `fetch_or` takes takes an [`Ordering`] argument which describes the
331    /// memory ordering of this operation.
332    /// All ordering modes are possible.
333    /// Note that using [`Acquire`][acq] makes the store part of this operation
334    /// [`Relaxed`][rlx] and using [`Release`][rel] makes the load part
335    /// [`Relaxed`][rlx].
336    ///
337    /// [rlx]: Ordering::Relaxed
338    /// [acq]: Ordering::Acquire
339    /// [rel]: Ordering::Release
340    ///
341    /// # Examples
342    ///
343    /// ```
344    /// use core::sync::atomic::Ordering;
345    ///
346    /// type AtomicTagPtr = tagptr::AtomicTagPtr<i32, 2>;
347    /// type TagPtr = tagptr::TagPtr<i32, 2>;
348    ///
349    /// let reference = &mut 1;
350    /// let ptr = AtomicTagPtr::new(TagPtr::compose(reference, 0b10));
351    ///
352    /// assert_eq!(
353    ///     ptr.fetch_or(0b11, Ordering::Relaxed).decompose(),
354    ///     (reference as *mut _, 0b10)
355    /// );
356    ///
357    /// assert_eq!(
358    ///     ptr.load(Ordering::Relaxed).decompose(),
359    ///     (reference as *mut _, 0b11)
360    /// );
361    /// ```
362    #[inline]
363    pub fn fetch_or(&self, value: usize, order: Ordering) -> TagPtr<T, N> {
364        debug_assert!(value <= Self::TAG_MASK, "`value` exceeds tag bits (would corrupt pointer)");
365        TagPtr::from_usize(self.inner.fetch_or(Self::TAG_MASK & value, order))
366    }
367
368    /// Performs a bitwise "and" of `value` with the current tag value,
369    /// returning the previous marked pointer.
370    ///
371    /// This operation directly and unconditionally alters the internal numeric
372    /// representation of the atomic marked pointer.
373    /// Hence there is no way to reliably guarantee the operation only affects
374    /// the tag bits and does not overflow into the pointer bits.
375    ///
376    /// `fetch_and` takes takes an [`Ordering`] argument which describes the
377    /// memory ordering of this operation.
378    /// All ordering modes are possible.
379    /// Note that using [`Acquire`][acq] makes the store part of this operation
380    /// [`Relaxed`][rlx] and using [`Release`][rel] makes the load part
381    /// [`Relaxed`][rlx].
382    ///
383    /// [rlx]: Ordering::Relaxed
384    /// [acq]: Ordering::Acquire
385    /// [rel]: Ordering::Release
386    ///
387    /// # Examples
388    ///
389    /// ```
390    /// use core::sync::atomic::Ordering;
391    ///
392    /// type AtomicTagPtr = tagptr::AtomicTagPtr<i32, 2>;
393    /// type TagPtr = tagptr::TagPtr<i32, 2>;
394    ///
395    /// let reference = &mut 1;
396    /// let ptr = AtomicTagPtr::new(TagPtr::compose(reference, 0b10));
397    ///
398    /// // fetch_x returns previous value
399    /// assert_eq!(
400    ///     ptr.fetch_and(0b11, Ordering::Relaxed).decompose(),
401    ///     (reference as *mut _, 0b10)
402    /// );
403    ///
404    /// assert_eq!(
405    ///     ptr.load(Ordering::Relaxed).decompose(),
406    ///     (reference as *mut _, 0b10)
407    /// );
408    /// ```
409    #[inline]
410    pub fn fetch_and(&self, value: usize, order: Ordering) -> TagPtr<T, N> {
411        debug_assert!(value <= Self::TAG_MASK, "`value` exceeds tag bits (would corrupt pointer)");
412        TagPtr::from_usize(self.inner.fetch_and(Self::POINTER_MASK | value, order))
413    }
414}
415
416/********** impl Debug ****************************************************************************/
417
418impl<T, const N: usize> fmt::Debug for AtomicTagPtr<T, N> {
419    #[inline]
420    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
421        let (ptr, tag) = self.load(Ordering::SeqCst).decompose();
422        f.debug_struct("AtomicTagPtr").field("ptr", &ptr).field("tag", &tag).finish()
423    }
424}
425
426/********** impl Default **************************************************************************/
427
428impl<T, const N: usize> Default for AtomicTagPtr<T, N> {
429    impl_default!();
430}
431
432/********** impl From (*mut T) ********************************************************************/
433
434impl<T, const N: usize> From<*mut T> for AtomicTagPtr<T, N> {
435    #[inline]
436    fn from(ptr: *mut T) -> Self {
437        Self::new(ptr.into())
438    }
439}
440
441/********** impl From (TagPtr<T, N>) ***********************************************************/
442
443impl<T, const N: usize> From<TagPtr<T, N>> for AtomicTagPtr<T, N> {
444    #[inline]
445    fn from(ptr: TagPtr<T, N>) -> Self {
446        Self::new(ptr)
447    }
448}
449
450/********** impl Pointer **************************************************************************/
451
452impl<T, const N: usize> fmt::Pointer for AtomicTagPtr<T, N> {
453    #[inline]
454    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
455        fmt::Pointer::fmt(&self.load(Ordering::SeqCst), f)
456    }
457}