pub struct NEON(/* private fields */);
Trait Implementations§
Source§impl InstructionSet for NEON
impl InstructionSet for NEON
Source§impl SIMD128 for NEON
impl SIMD128 for NEON
Source§fn v128_create_zero(self) -> V128
fn v128_create_zero(self) -> V128
T1: SSE2, NEON, WASM128
Source§fn u8x16_splat(self, x: u8) -> V128
fn u8x16_splat(self, x: u8) -> V128
T1: SSE2, NEON, WASM128
Source§fn u16x8_splat(self, x: u16) -> V128
fn u16x8_splat(self, x: u16) -> V128
T1: SSE2, NEON, WASM128
Source§fn u32x4_splat(self, x: u32) -> V128
fn u32x4_splat(self, x: u32) -> V128
T1: SSE2, NEON, WASM128
Source§fn u64x2_splat(self, x: u64) -> V128
fn u64x2_splat(self, x: u64) -> V128
T1: SSE2, NEON, WASM128
Source§fn i8x16_splat(self, x: i8) -> V128
fn i8x16_splat(self, x: i8) -> V128
T1: SSE2, NEON, WASM128
Source§fn i16x8_splat(self, x: i16) -> V128
fn i16x8_splat(self, x: i16) -> V128
T1: SSE2, NEON, WASM128
Source§fn i32x4_splat(self, x: i32) -> V128
fn i32x4_splat(self, x: i32) -> V128
T1: SSE2, NEON, WASM128
Source§fn i64x2_splat(self, x: i64) -> V128
fn i64x2_splat(self, x: i64) -> V128
T1: SSE2, NEON, WASM128
Source§fn u16x8_bswap(self, a: V128) -> V128
fn u16x8_bswap(self, a: V128) -> V128
T1: SSE41, NEON, WASM128
Source§fn u32x4_bswap(self, a: V128) -> V128
fn u32x4_bswap(self, a: V128) -> V128
T1: SSE41, NEON, WASM128
Source§fn u64x2_bswap(self, a: V128) -> V128
fn u64x2_bswap(self, a: V128) -> V128
T1: SSE41, NEON, WASM128
Source§fn u8x16_bitmask(self, a: V128) -> u16
fn u8x16_bitmask(self, a: V128) -> u16
T1: SSE2, WASM128
Source§fn u8x16_reduce_max(self, a: V128) -> u8
fn u8x16_reduce_max(self, a: V128) -> u8
T1: NEON-A64
Source§fn u8x16_reduce_min(self, a: V128) -> u8
fn u8x16_reduce_min(self, a: V128) -> u8
T1: NEON-A64
Source§impl SIMD256 for NEON
impl SIMD256 for NEON
unsafe fn v256_load(self, addr: *const u8) -> V256
unsafe fn v256_load_unaligned(self, addr: *const u8) -> V256
unsafe fn v256_store(self, addr: *mut u8, a: V256)
unsafe fn v256_store_unaligned(self, addr: *mut u8, a: V256)
fn v256_create_zero(self) -> V256
fn v256_not(self, a: V256) -> V256
fn v256_and(self, a: V256, b: V256) -> V256
fn v256_or(self, a: V256, b: V256) -> V256
fn v256_xor(self, a: V256, b: V256) -> V256
fn v256_andnot(self, a: V256, b: V256) -> V256
fn v256_all_zero(self, a: V256) -> bool
fn u8x32_splat(self, x: u8) -> V256
fn u16x16_splat(self, x: u16) -> V256
fn u32x8_splat(self, x: u32) -> V256
fn u64x4_splat(self, x: u64) -> V256
fn i8x32_splat(self, x: i8) -> V256
fn i16x16_splat(self, x: i16) -> V256
fn i32x8_splat(self, x: i32) -> V256
fn i64x4_splat(self, x: i64) -> V256
fn u8x32_add(self, a: V256, b: V256) -> V256
fn u16x16_add(self, a: V256, b: V256) -> V256
fn u32x8_add(self, a: V256, b: V256) -> V256
fn u64x4_add(self, a: V256, b: V256) -> V256
fn u8x32_sub(self, a: V256, b: V256) -> V256
fn u16x16_sub(self, a: V256, b: V256) -> V256
fn u32x8_sub(self, a: V256, b: V256) -> V256
fn u64x4_sub(self, a: V256, b: V256) -> V256
fn u8x32_sub_sat(self, a: V256, b: V256) -> V256
fn u16x16_sub_sat(self, a: V256, b: V256) -> V256
fn i8x32_sub_sat(self, a: V256, b: V256) -> V256
fn i16x16_sub_sat(self, a: V256, b: V256) -> V256
fn i16x16_mul_lo(self, a: V256, b: V256) -> V256
fn i32x8_mul_lo(self, a: V256, b: V256) -> V256
fn u16x16_shl<const IMM8: i32>(self, a: V256) -> V256
fn u32x8_shl<const IMM8: i32>(self, a: V256) -> V256
fn u16x16_shr<const IMM8: i32>(self, a: V256) -> V256
fn u32x8_shr<const IMM8: i32>(self, a: V256) -> V256
fn u8x32_eq(self, a: V256, b: V256) -> V256
fn u16x16_eq(self, a: V256, b: V256) -> V256
fn u32x8_eq(self, a: V256, b: V256) -> V256
fn u8x32_lt(self, a: V256, b: V256) -> V256
fn u16x16_lt(self, a: V256, b: V256) -> V256
fn u32x8_lt(self, a: V256, b: V256) -> V256
fn i8x32_lt(self, a: V256, b: V256) -> V256
fn i16x16_lt(self, a: V256, b: V256) -> V256
fn i32x8_lt(self, a: V256, b: V256) -> V256
fn u8x32_max(self, a: V256, b: V256) -> V256
fn u16x16_max(self, a: V256, b: V256) -> V256
fn u32x8_max(self, a: V256, b: V256) -> V256
fn i8x32_max(self, a: V256, b: V256) -> V256
fn i16x16_max(self, a: V256, b: V256) -> V256
fn i32x8_max(self, a: V256, b: V256) -> V256
fn u8x32_min(self, a: V256, b: V256) -> V256
fn u16x16_min(self, a: V256, b: V256) -> V256
fn u32x8_min(self, a: V256, b: V256) -> V256
fn i8x32_min(self, a: V256, b: V256) -> V256
fn i16x16_min(self, a: V256, b: V256) -> V256
fn i32x8_min(self, a: V256, b: V256) -> V256
fn u8x16x2_swizzle(self, a: V256, b: V256) -> V256
fn u16x16_bswap(self, a: V256) -> V256
fn u32x8_bswap(self, a: V256) -> V256
fn u64x4_bswap(self, a: V256) -> V256
fn u8x32_swizzle(self, a: V256, b: V256) -> V256
fn u8x32_any_zero(self, a: V256) -> bool
fn u8x32_bitmask(self, a: V256) -> u32
fn u8x32_reduce_max(self, a: V256) -> u8
fn u8x32_reduce_min(self, a: V256) -> u8
Source§fn v256_bsl(self, a: V256, b: V256, c: V256) -> V256
fn v256_bsl(self, a: V256, b: V256, c: V256) -> V256
for each bit: if a == 1 { b } else { c } Read more
fn u16x16_from_u8x16(self, a: V128) -> V256
fn u8x16x2_zip_lo(self, a: V256, b: V256) -> V256
fn u8x16x2_zip_hi(self, a: V256, b: V256) -> V256
fn u16x8x2_zip_lo(self, a: V256, b: V256) -> V256
fn u16x8x2_zip_hi(self, a: V256, b: V256) -> V256
fn u32x4x2_zip_lo(self, a: V256, b: V256) -> V256
fn u32x4x2_zip_hi(self, a: V256, b: V256) -> V256
fn u64x2x2_zip_lo(self, a: V256, b: V256) -> V256
fn u64x2x2_zip_hi(self, a: V256, b: V256) -> V256
fn v128x2_zip_lo(self, a: V256, b: V256) -> V256
fn v128x2_zip_hi(self, a: V256, b: V256) -> V256
fn u64x4_permute<const IMM8: i32>(self, a: V256) -> V256
fn u8x32_unzip_even(self, a: V256, b: V256) -> V256
fn u8x32_unzip_odd(self, a: V256, b: V256) -> V256
fn u64x4_unzip_even(self, a: V256, b: V256) -> V256
fn u64x4_unzip_odd(self, a: V256, b: V256) -> V256
fn u16x16_mul_hi(self, a: V256, b: V256) -> V256
fn i16x16_mul_hi(self, a: V256, b: V256) -> V256
fn i16x16_maddubs(self, a: V256, b: V256) -> V256
fn u32x8_blend<const IMM8: i32>(self, a: V256, b: V256) -> V256
fn i16x16_madd(self, a: V256, b: V256) -> V256
fn u8x32_avgr(self, a: V256, b: V256) -> V256
fn i8x32_add_sat(self, a: V256, b: V256) -> V256
fn u8x32_add_sat(self, a: V256, b: V256) -> V256
impl Copy for NEON
Auto Trait Implementations§
impl Freeze for NEON
impl RefUnwindSafe for NEON
impl Send for NEON
impl Sync for NEON
impl Unpin for NEON
impl UnwindSafe for NEON
Blanket Implementations§
Source§impl<T> BorrowMut<T> for Twhere
T: ?Sized,
impl<T> BorrowMut<T> for Twhere
T: ?Sized,
Source§fn borrow_mut(&mut self) -> &mut T
fn borrow_mut(&mut self) -> &mut T
Mutably borrows from an owned value. Read more