#![cfg_attr(not(all(test, feature = "float")), allow(dead_code, unused_macros))]
use core::{cell::UnsafeCell, ops, sync::atomic::Ordering};
use crate::hint;
#[cfg(not(portable_atomic_no_underscore_consts))]
macro_rules! static_assert {
($cond:expr $(,)?) => {
const _: [(); true as usize] = [(); $crate::utils::_assert_is_bool($cond) as usize];
};
}
#[cfg(not(portable_atomic_no_underscore_consts))]
pub(crate) const fn _assert_is_bool(v: bool) -> bool {
v
}
#[cfg(portable_atomic_no_underscore_consts)]
macro_rules! static_assert {
($($tt:tt)*) => {};
}
macro_rules! static_assert_layout {
($atomic_type:ty, $value_type:ident, $align:expr) => {
static_assert!(
core::mem::align_of::<$atomic_type>() == core::mem::size_of::<$atomic_type>()
);
static_assert!(core::mem::size_of::<$atomic_type>() == core::mem::size_of::<$value_type>());
static_assert!(core::mem::align_of::<$atomic_type>() == $align);
};
($atomic_type:ty, bool) => {
static_assert_layout!($atomic_type, u8);
};
($atomic_type:ty, i8) => {
static_assert_layout!($atomic_type, u8);
};
($atomic_type:ty, u8) => {
static_assert_layout!($atomic_type, u8, 1);
};
($atomic_type:ty, i16) => {
static_assert_layout!($atomic_type, u16);
};
($atomic_type:ty, u16) => {
static_assert_layout!($atomic_type, u16, 2);
};
($atomic_type:ty, i32) => {
static_assert_layout!($atomic_type, u32);
};
($atomic_type:ty, u32) => {
static_assert_layout!($atomic_type, u32, 4);
};
($atomic_type:ty, f32) => {
static_assert_layout!($atomic_type, u32);
};
($atomic_type:ty, i64) => {
static_assert_layout!($atomic_type, u64);
};
($atomic_type:ty, u64) => {
static_assert_layout!($atomic_type, u64, 8);
};
($atomic_type:ty, f64) => {
static_assert_layout!($atomic_type, u64);
};
($atomic_type:ty, i128) => {
static_assert_layout!($atomic_type, u128);
};
($atomic_type:ty, u128) => {
static_assert_layout!($atomic_type, u128, 16);
};
($atomic_type:ty, *mut ()) => {
static_assert_layout!($atomic_type, usize);
};
($atomic_type:ty, isize) => {
static_assert_layout!($atomic_type, usize);
};
($atomic_type:ty, usize) => {
#[cfg(target_pointer_width = "16")]
static_assert_layout!($atomic_type, usize, 2);
#[cfg(target_pointer_width = "32")]
static_assert_layout!($atomic_type, usize, 4);
#[cfg(target_pointer_width = "64")]
static_assert_layout!($atomic_type, usize, 8);
#[cfg(target_pointer_width = "128")]
static_assert_layout!($atomic_type, usize, 16);
};
}
#[allow(unused_macros)]
macro_rules! unreachable_unchecked {
($($tt:tt)*) => {
if cfg!(debug_assertions) {
unreachable!($($tt)*);
} else {
core::hint::unreachable_unchecked()
}
};
}
macro_rules! doc_comment {
($doc:expr, $($tt:tt)*) => {
#[doc = $doc]
$($tt)*
};
}
macro_rules! serde_impls {
($atomic_type:ident) => {
#[cfg(feature = "serde")]
#[cfg_attr(docsrs, doc(cfg(feature = "serde")))]
impl serde::Serialize for $atomic_type {
#[allow(clippy::missing_inline_in_public_items)] fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: serde::Serializer,
{
self.load(Ordering::SeqCst).serialize(serializer)
}
}
#[cfg(feature = "serde")]
#[cfg_attr(docsrs, doc(cfg(feature = "serde")))]
impl<'de> serde::Deserialize<'de> for $atomic_type {
#[allow(clippy::missing_inline_in_public_items)] fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: serde::Deserializer<'de>,
{
serde::Deserialize::deserialize(deserializer).map(Self::new)
}
}
};
}
#[allow(unused_macros)]
macro_rules! ifunc {
(unsafe fn($($arg_pat:ident: $arg_ty:ty),*) $(-> $ret_ty:ty)?; $if_block:expr) => {{
type FnRaw = *mut ();
type FnTy = unsafe fn($($arg_ty),*) $(-> $ret_ty)?;
static FUNC: core::sync::atomic::AtomicPtr<()>
= core::sync::atomic::AtomicPtr::new(detect as FnRaw);
#[cold]
unsafe fn detect($($arg_pat: $arg_ty),*) $(-> $ret_ty)? {
let func: FnTy = $if_block;
FUNC.store(func as FnRaw, core::sync::atomic::Ordering::Relaxed);
unsafe { func($($arg_pat),*) }
}
let func = FUNC.load(core::sync::atomic::Ordering::Relaxed);
core::mem::transmute::<FnRaw, FnTy>(func)($($arg_pat),*)
}};
(fn($($arg_pat:ident: $arg_ty:ty),*) $(-> $ret_ty:ty)?; $if_block:expr) => {{
type FnRaw = *mut ();
type FnTy = fn($($arg_ty),*) $(-> $ret_ty)?;
static FUNC: core::sync::atomic::AtomicPtr<()>
= core::sync::atomic::AtomicPtr::new(detect as FnRaw);
#[cold]
fn detect($($arg_pat: $arg_ty),*) $(-> $ret_ty)? {
let func: FnTy = $if_block;
FUNC.store(func as FnRaw, core::sync::atomic::Ordering::Relaxed);
func($($arg_pat),*)
}
let func = unsafe {
core::mem::transmute::<FnRaw, FnTy>(FUNC.load(core::sync::atomic::Ordering::Relaxed))
};
func($($arg_pat),*)
}};
}
pub(crate) struct NoRefUnwindSafe(UnsafeCell<()>);
unsafe impl Sync for NoRefUnwindSafe {}
#[inline]
pub(crate) fn strongest_failure_ordering(order: Ordering) -> Ordering {
match order {
Ordering::Release | Ordering::Relaxed => Ordering::Relaxed,
Ordering::SeqCst => Ordering::SeqCst,
Ordering::Acquire | Ordering::AcqRel => Ordering::Acquire,
_ => unreachable!("{:?}", order),
}
}
#[inline]
#[cfg_attr(all(debug_assertions, not(portable_atomic_no_track_caller)), track_caller)]
pub(crate) fn assert_load_ordering(order: Ordering) {
match order {
Ordering::Acquire | Ordering::Relaxed | Ordering::SeqCst => {}
Ordering::Release => panic!("there is no such thing as a release load"),
Ordering::AcqRel => panic!("there is no such thing as an acquire-release load"),
_ => unreachable!("{:?}", order),
}
}
#[inline]
#[cfg_attr(all(debug_assertions, not(portable_atomic_no_track_caller)), track_caller)]
pub(crate) fn assert_store_ordering(order: Ordering) {
match order {
Ordering::Release | Ordering::Relaxed | Ordering::SeqCst => {}
Ordering::Acquire => panic!("there is no such thing as an acquire store"),
Ordering::AcqRel => panic!("there is no such thing as an acquire-release store"),
_ => unreachable!("{:?}", order),
}
}
#[allow(dead_code)]
#[inline]
pub(crate) fn assert_swap_ordering(order: Ordering) {
match order {
Ordering::AcqRel
| Ordering::Acquire
| Ordering::Relaxed
| Ordering::Release
| Ordering::SeqCst => {}
_ => unreachable!("{:?}", order),
}
}
#[inline]
#[cfg_attr(all(debug_assertions, not(portable_atomic_no_track_caller)), track_caller)]
pub(crate) fn assert_compare_exchange_ordering(success: Ordering, failure: Ordering) {
match success {
Ordering::AcqRel
| Ordering::Acquire
| Ordering::Relaxed
| Ordering::Release
| Ordering::SeqCst => {}
_ => unreachable!("{:?}, {:?}", success, failure),
}
match failure {
Ordering::Acquire | Ordering::Relaxed | Ordering::SeqCst => {}
Ordering::Release => panic!("there is no such thing as a release failure ordering"),
Ordering::AcqRel => panic!("there is no such thing as an acquire-release failure ordering"),
_ => unreachable!("{:?}, {:?}", success, failure),
}
}
#[allow(dead_code)]
#[inline]
pub(crate) fn upgrade_success_ordering(success: Ordering, failure: Ordering) -> Ordering {
match (success, failure) {
(Ordering::Relaxed, Ordering::Acquire) => Ordering::Acquire,
(Ordering::Release, Ordering::Acquire) => Ordering::AcqRel,
(_, Ordering::SeqCst) => Ordering::SeqCst,
_ => success,
}
}
#[cfg_attr(
any(target_arch = "x86_64", target_arch = "aarch64", target_arch = "powerpc64"),
repr(align(128))
)]
#[cfg_attr(
any(
target_arch = "arm",
target_arch = "mips",
target_arch = "mips64",
target_arch = "riscv64",
),
repr(align(32))
)]
#[cfg_attr(target_arch = "s390x", repr(align(256)))]
#[cfg_attr(
not(any(
target_arch = "x86_64",
target_arch = "aarch64",
target_arch = "powerpc64",
target_arch = "arm",
target_arch = "mips",
target_arch = "mips64",
target_arch = "riscv64",
target_arch = "s390x",
)),
repr(align(64))
)]
pub(crate) struct CachePadded<T> {
value: T,
}
impl<T> CachePadded<T> {
#[inline]
pub(crate) const fn new(value: T) -> Self {
Self { value }
}
}
impl<T> ops::Deref for CachePadded<T> {
type Target = T;
#[inline]
fn deref(&self) -> &T {
&self.value
}
}
impl<T> ops::DerefMut for CachePadded<T> {
#[inline]
fn deref_mut(&mut self) -> &mut T {
&mut self.value
}
}
pub(crate) struct Backoff {
step: u32,
}
const SPIN_LIMIT: u32 = 4;
impl Backoff {
#[inline]
pub(crate) fn new() -> Self {
Self { step: 0 }
}
#[inline]
pub(crate) fn snooze(&mut self) {
if self.step <= SPIN_LIMIT {
for _ in 0..1 << self.step {
hint::spin_loop();
}
self.step += 1;
} else {
#[cfg(not(feature = "std"))]
for _ in 0..1 << self.step {
hint::spin_loop();
}
#[cfg(feature = "std")]
std::thread::yield_now();
}
}
}