vsimd

Trait SIMD128

Source
pub unsafe trait SIMD128: SIMD64 {
Show 87 methods // Provided methods unsafe fn v128_load(self, addr: *const u8) -> V128 { ... } unsafe fn v128_load_unaligned(self, addr: *const u8) -> V128 { ... } unsafe fn v128_store(self, addr: *mut u8, a: V128) { ... } unsafe fn v128_store_unaligned(self, addr: *mut u8, a: V128) { ... } fn v128_create_zero(self) -> V128 { ... } fn v128_not(self, a: V128) -> V128 { ... } fn v128_and(self, a: V128, b: V128) -> V128 { ... } fn v128_or(self, a: V128, b: V128) -> V128 { ... } fn v128_xor(self, a: V128, b: V128) -> V128 { ... } fn v128_andnot(self, a: V128, b: V128) -> V128 { ... } fn v128_all_zero(self, a: V128) -> bool { ... } fn u8x16_splat(self, x: u8) -> V128 { ... } fn u16x8_splat(self, x: u16) -> V128 { ... } fn u32x4_splat(self, x: u32) -> V128 { ... } fn u64x2_splat(self, x: u64) -> V128 { ... } fn i8x16_splat(self, x: i8) -> V128 { ... } fn i16x8_splat(self, x: i16) -> V128 { ... } fn i32x4_splat(self, x: i32) -> V128 { ... } fn i64x2_splat(self, x: i64) -> V128 { ... } fn u8x16_add(self, a: V128, b: V128) -> V128 { ... } fn u16x8_add(self, a: V128, b: V128) -> V128 { ... } fn u32x4_add(self, a: V128, b: V128) -> V128 { ... } fn u64x2_add(self, a: V128, b: V128) -> V128 { ... } fn u8x16_sub(self, a: V128, b: V128) -> V128 { ... } fn u16x8_sub(self, a: V128, b: V128) -> V128 { ... } fn u32x4_sub(self, a: V128, b: V128) -> V128 { ... } fn u64x2_sub(self, a: V128, b: V128) -> V128 { ... } fn u8x16_sub_sat(self, a: V128, b: V128) -> V128 { ... } fn u16x8_sub_sat(self, a: V128, b: V128) -> V128 { ... } fn i8x16_sub_sat(self, a: V128, b: V128) -> V128 { ... } fn i16x8_sub_sat(self, a: V128, b: V128) -> V128 { ... } fn i16x8_mul_lo(self, a: V128, b: V128) -> V128 { ... } fn i32x4_mul_lo(self, a: V128, b: V128) -> V128 { ... } fn u16x8_shl<const IMM8: i32>(self, a: V128) -> V128 { ... } fn u32x4_shl<const IMM8: i32>(self, a: V128) -> V128 { ... } fn u16x8_shr<const IMM8: i32>(self, a: V128) -> V128 { ... } fn u32x4_shr<const IMM8: i32>(self, a: V128) -> V128 { ... } fn u8x16_eq(self, a: V128, b: V128) -> V128 { ... } fn u16x8_eq(self, a: V128, b: V128) -> V128 { ... } fn u32x4_eq(self, a: V128, b: V128) -> V128 { ... } fn u8x16_lt(self, a: V128, b: V128) -> V128 { ... } fn u16x8_lt(self, a: V128, b: V128) -> V128 { ... } fn u32x4_lt(self, a: V128, b: V128) -> V128 { ... } fn i8x16_lt(self, a: V128, b: V128) -> V128 { ... } fn i16x8_lt(self, a: V128, b: V128) -> V128 { ... } fn i32x4_lt(self, a: V128, b: V128) -> V128 { ... } fn u8x16_max(self, a: V128, b: V128) -> V128 { ... } fn u16x8_max(self, a: V128, b: V128) -> V128 { ... } fn u32x4_max(self, a: V128, b: V128) -> V128 { ... } fn i8x16_max(self, a: V128, b: V128) -> V128 { ... } fn i16x8_max(self, a: V128, b: V128) -> V128 { ... } fn i32x4_max(self, a: V128, b: V128) -> V128 { ... } fn u8x16_min(self, a: V128, b: V128) -> V128 { ... } fn u16x8_min(self, a: V128, b: V128) -> V128 { ... } fn u32x4_min(self, a: V128, b: V128) -> V128 { ... } fn i8x16_min(self, a: V128, b: V128) -> V128 { ... } fn i16x8_min(self, a: V128, b: V128) -> V128 { ... } fn i32x4_min(self, a: V128, b: V128) -> V128 { ... } fn u8x16_swizzle(self, a: V128, b: V128) -> V128 { ... } fn u16x8_bswap(self, a: V128) -> V128 { ... } fn u32x4_bswap(self, a: V128) -> V128 { ... } fn u64x2_bswap(self, a: V128) -> V128 { ... } fn u8x16_any_zero(self, a: V128) -> bool { ... } fn u8x16_bitmask(self, a: V128) -> u16 { ... } fn u8x16_reduce_max(self, a: V128) -> u8 { ... } fn u8x16_reduce_min(self, a: V128) -> u8 { ... } fn v128_bsl(self, a: V128, b: V128, c: V128) -> V128 { ... } fn u8x16_zip_lo(self, a: V128, b: V128) -> V128 { ... } fn u8x16_zip_hi(self, a: V128, b: V128) -> V128 { ... } fn u16x8_zip_lo(self, a: V128, b: V128) -> V128 { ... } fn u16x8_zip_hi(self, a: V128, b: V128) -> V128 { ... } fn u32x4_zip_lo(self, a: V128, b: V128) -> V128 { ... } fn u32x4_zip_hi(self, a: V128, b: V128) -> V128 { ... } fn u64x2_zip_lo(self, a: V128, b: V128) -> V128 { ... } fn u64x2_zip_hi(self, a: V128, b: V128) -> V128 { ... } fn u8x16_unzip_even(self, a: V128, b: V128) -> V128 { ... } fn u8x16_unzip_odd(self, a: V128, b: V128) -> V128 { ... } fn u16x8_mul_hi(self, a: V128, b: V128) -> V128 { ... } fn i16x8_mul_hi(self, a: V128, b: V128) -> V128 { ... } fn i16x8_maddubs(self, a: V128, b: V128) -> V128 { ... } fn u16x8_blend<const IMM8: i32>(self, a: V128, b: V128) -> V128 { ... } fn u8x16_blendv(self, a: V128, b: V128, c: V128) -> V128 { ... } fn i16x8_madd(self, a: V128, b: V128) -> V128 { ... } fn u8x16_avgr(self, a: V128, b: V128) -> V128 { ... } fn i8x16_add_sat(self, a: V128, b: V128) -> V128 { ... } fn u8x16_add_sat(self, a: V128, b: V128) -> V128 { ... } fn i16x8_packus(self, a: V128, b: V128) -> V128 { ... }
}

Provided Methods§

Source

unsafe fn v128_load(self, addr: *const u8) -> V128

T1: SSE2, NEON, WASM128

Source

unsafe fn v128_load_unaligned(self, addr: *const u8) -> V128

T1: SSE2, NEON, WASM128

Source

unsafe fn v128_store(self, addr: *mut u8, a: V128)

T1: SSE2, NEON, WASM128

Source

unsafe fn v128_store_unaligned(self, addr: *mut u8, a: V128)

T1: SSE2, NEON, WASM128

Source

fn v128_create_zero(self) -> V128

T1: SSE2, NEON, WASM128

Source

fn v128_not(self, a: V128) -> V128

T1: NEON, WASM128

T2: SSE2

Source

fn v128_and(self, a: V128, b: V128) -> V128

T1: SSE2, NEON, WASM128

Source

fn v128_or(self, a: V128, b: V128) -> V128

T1: SSE2, NEON, WASM128

Source

fn v128_xor(self, a: V128, b: V128) -> V128

T1: SSE2, NEON, WASM128

Source

fn v128_andnot(self, a: V128, b: V128) -> V128

T1: SSE2, NEON, WASM128

Source

fn v128_all_zero(self, a: V128) -> bool

T1: SSE41, NEON-A64, WASM128

T2: NEON-A32

Source

fn u8x16_splat(self, x: u8) -> V128

T1: SSE2, NEON, WASM128

Source

fn u16x8_splat(self, x: u16) -> V128

T1: SSE2, NEON, WASM128

Source

fn u32x4_splat(self, x: u32) -> V128

T1: SSE2, NEON, WASM128

Source

fn u64x2_splat(self, x: u64) -> V128

T1: SSE2, NEON, WASM128

Source

fn i8x16_splat(self, x: i8) -> V128

T1: SSE2, NEON, WASM128

Source

fn i16x8_splat(self, x: i16) -> V128

T1: SSE2, NEON, WASM128

Source

fn i32x4_splat(self, x: i32) -> V128

T1: SSE2, NEON, WASM128

Source

fn i64x2_splat(self, x: i64) -> V128

T1: SSE2, NEON, WASM128

Source

fn u8x16_add(self, a: V128, b: V128) -> V128

T1: SSE2, NEON, WASM128

Source

fn u16x8_add(self, a: V128, b: V128) -> V128

T1: SSE2, NEON, WASM128

Source

fn u32x4_add(self, a: V128, b: V128) -> V128

T1: SSE2, NEON, WASM128

Source

fn u64x2_add(self, a: V128, b: V128) -> V128

T1: SSE2, NEON, WASM128

Source

fn u8x16_sub(self, a: V128, b: V128) -> V128

T1: SSE2, NEON, WASM128

Source

fn u16x8_sub(self, a: V128, b: V128) -> V128

T1: SSE2, NEON, WASM128

Source

fn u32x4_sub(self, a: V128, b: V128) -> V128

T1: SSE2, NEON, WASM128

Source

fn u64x2_sub(self, a: V128, b: V128) -> V128

T1: SSE2, NEON, WASM128

Source

fn u8x16_sub_sat(self, a: V128, b: V128) -> V128

T1: SSE2, NEON, WASM128

Source

fn u16x8_sub_sat(self, a: V128, b: V128) -> V128

T1: SSE2, NEON, WASM128

Source

fn i8x16_sub_sat(self, a: V128, b: V128) -> V128

T1: SSE2, NEON, WASM128

Source

fn i16x8_sub_sat(self, a: V128, b: V128) -> V128

T1: SSE2, NEON, WASM128

Source

fn i16x8_mul_lo(self, a: V128, b: V128) -> V128

T1: SSE2, NEON, WASM128

Source

fn i32x4_mul_lo(self, a: V128, b: V128) -> V128

T1: SSE41, NEON, WASM128

Source

fn u16x8_shl<const IMM8: i32>(self, a: V128) -> V128

T1: SSE2, NEON, WASM128

Source

fn u32x4_shl<const IMM8: i32>(self, a: V128) -> V128

T1: SSE2, NEON, WASM128

Source

fn u16x8_shr<const IMM8: i32>(self, a: V128) -> V128

T1: SSE2, NEON, WASM128

Source

fn u32x4_shr<const IMM8: i32>(self, a: V128) -> V128

T1: SSE2, NEON, WASM128

Source

fn u8x16_eq(self, a: V128, b: V128) -> V128

T1: SSE2, NEON, WASM128

Source

fn u16x8_eq(self, a: V128, b: V128) -> V128

T1: SSE2, NEON, WASM128

Source

fn u32x4_eq(self, a: V128, b: V128) -> V128

T1: SSE2, NEON, WASM128

Source

fn u8x16_lt(self, a: V128, b: V128) -> V128

T1: NEON, WASM128

T2: SSE2

Source

fn u16x8_lt(self, a: V128, b: V128) -> V128

T1: NEON, WASM128

T2: SSE2

Source

fn u32x4_lt(self, a: V128, b: V128) -> V128

T1: NEON, WASM128

T2: SSE2

Source

fn i8x16_lt(self, a: V128, b: V128) -> V128

T1: SSE2, NEON, WASM128

Source

fn i16x8_lt(self, a: V128, b: V128) -> V128

T1: SSE2, NEON, WASM128

Source

fn i32x4_lt(self, a: V128, b: V128) -> V128

T1: SSE2, NEON, WASM128

Source

fn u8x16_max(self, a: V128, b: V128) -> V128

T1: SSE2, NEON, WASM128

Source

fn u16x8_max(self, a: V128, b: V128) -> V128

T1: SSE41, NEON, WASM128

Source

fn u32x4_max(self, a: V128, b: V128) -> V128

T1: SSE41, NEON, WASM128

Source

fn i8x16_max(self, a: V128, b: V128) -> V128

T1: SSE41, NEON, WASM128

Source

fn i16x8_max(self, a: V128, b: V128) -> V128

T1: SSE2, NEON, WASM128

Source

fn i32x4_max(self, a: V128, b: V128) -> V128

T1: SSE41, NEON, WASM128

Source

fn u8x16_min(self, a: V128, b: V128) -> V128

T1: SSE2, NEON, WASM128

Source

fn u16x8_min(self, a: V128, b: V128) -> V128

T1: SSE41, NEON, WASM128

Source

fn u32x4_min(self, a: V128, b: V128) -> V128

T1: SSE41, NEON, WASM128

Source

fn i8x16_min(self, a: V128, b: V128) -> V128

T1: SSE41, NEON, WASM128

Source

fn i16x8_min(self, a: V128, b: V128) -> V128

T1: SSE2, NEON, WASM128

Source

fn i32x4_min(self, a: V128, b: V128) -> V128

T1: SSE41, NEON, WASM128

Source

fn u8x16_swizzle(self, a: V128, b: V128) -> V128

T1: SSSE3, NEON-A64, WASM128

T2: NEON-A32

Source

fn u16x8_bswap(self, a: V128) -> V128

T1: SSE41, NEON, WASM128

Source

fn u32x4_bswap(self, a: V128) -> V128

T1: SSE41, NEON, WASM128

Source

fn u64x2_bswap(self, a: V128) -> V128

T1: SSE41, NEON, WASM128

Source

fn u8x16_any_zero(self, a: V128) -> bool

T1: NEON-A64, WASM128

T2: SSE2, NEON-A32

Source

fn u8x16_bitmask(self, a: V128) -> u16

T1: SSE2, WASM128

Source

fn u8x16_reduce_max(self, a: V128) -> u8

T1: NEON-A64

Source

fn u8x16_reduce_min(self, a: V128) -> u8

T1: NEON-A64

Source

fn v128_bsl(self, a: V128, b: V128, c: V128) -> V128

T1: NEON

T2: SSE2, WASM128

Source

fn u8x16_zip_lo(self, a: V128, b: V128) -> V128

T1: SSE2, NEON, WASM128

Source

fn u8x16_zip_hi(self, a: V128, b: V128) -> V128

T1: SSE2, NEON, WASM128

Source

fn u16x8_zip_lo(self, a: V128, b: V128) -> V128

T1: SSE2, NEON, WASM128

Source

fn u16x8_zip_hi(self, a: V128, b: V128) -> V128

T1: SSE2, NEON, WASM128

Source

fn u32x4_zip_lo(self, a: V128, b: V128) -> V128

T1: SSE2, NEON, WASM128

Source

fn u32x4_zip_hi(self, a: V128, b: V128) -> V128

T1: SSE2, NEON, WASM128

Source

fn u64x2_zip_lo(self, a: V128, b: V128) -> V128

T1: SSE2, NEON, WASM128

Source

fn u64x2_zip_hi(self, a: V128, b: V128) -> V128

T1: SSE2, NEON, WASM128

Source

fn u8x16_unzip_even(self, a: V128, b: V128) -> V128

T1: NEON, WASM128

Source

fn u8x16_unzip_odd(self, a: V128, b: V128) -> V128

T1: NEON, WASM128

Source

fn u16x8_mul_hi(self, a: V128, b: V128) -> V128

T1: SSE2

Source

fn i16x8_mul_hi(self, a: V128, b: V128) -> V128

T1: SSE2

Source

fn i16x8_maddubs(self, a: V128, b: V128) -> V128

T1: SSSE3

Source

fn u16x8_blend<const IMM8: i32>(self, a: V128, b: V128) -> V128

T1: SSE41

Source

fn u8x16_blendv(self, a: V128, b: V128, c: V128) -> V128

if highbit(c) { b } else { a }

T1: SSE41

Source

fn i16x8_madd(self, a: V128, b: V128) -> V128

T1: SSE2

Source

fn u8x16_avgr(self, a: V128, b: V128) -> V128

T1: SSE2, NEON, WASM128

Source

fn i8x16_add_sat(self, a: V128, b: V128) -> V128

T1: SSE2, NEON, WASM128

Source

fn u8x16_add_sat(self, a: V128, b: V128) -> V128

T1: SSE2, NEON, WASM128

Source

fn i16x8_packus(self, a: V128, b: V128) -> V128

T1: SSE2

Dyn Compatibility§

This trait is not dyn compatible.

In older versions of Rust, dyn compatibility was called "object safety", so this trait is not object safe.

Implementors§