1use crate::isa::InstructionSet;
2use crate::pod::POD;
3use crate::vector::{V128, V256};
4use crate::{mask::*, unified};
5use crate::{SIMD128, SIMD256};
6
7pub unsafe trait Scalable<V: POD>: InstructionSet {
8 #[inline(always)]
9 fn and(self, a: V, b: V) -> V {
10 unified::and(self, a, b)
11 }
12
13 #[inline(always)]
14 fn or(self, a: V, b: V) -> V {
15 unified::or(self, a, b)
16 }
17
18 #[inline(always)]
19 fn xor(self, a: V, b: V) -> V {
20 unified::xor(self, a, b)
21 }
22
23 #[inline(always)]
24 fn andnot(self, a: V, b: V) -> V {
25 unified::andnot(self, a, b)
26 }
27
28 #[inline(always)]
29 fn u8xn_splat(self, x: u8) -> V {
30 unified::splat::<_, u8, _>(self, x)
31 }
32
33 #[inline(always)]
34 fn i8xn_splat(self, x: i8) -> V {
35 unified::splat::<_, i8, _>(self, x)
36 }
37
38 #[inline(always)]
39 fn u32xn_splat(self, x: u32) -> V {
40 unified::splat::<_, u32, _>(self, x)
41 }
42
43 #[inline(always)]
44 fn u8xn_add(self, a: V, b: V) -> V {
45 unified::add::<_, u8, _>(self, a, b)
46 }
47
48 #[inline(always)]
49 fn u8xn_sub(self, a: V, b: V) -> V {
50 unified::sub::<_, u8, _>(self, a, b)
51 }
52
53 #[inline(always)]
54 fn u32xn_sub(self, a: V, b: V) -> V {
55 unified::sub::<_, u32, _>(self, a, b)
56 }
57
58 #[inline(always)]
59 fn u8xn_add_sat(self, a: V, b: V) -> V {
60 unified::add_sat::<_, u8, _>(self, a, b)
61 }
62
63 #[inline(always)]
64 fn i8xn_add_sat(self, a: V, b: V) -> V {
65 unified::add_sat::<_, i8, _>(self, a, b)
66 }
67
68 #[inline(always)]
69 fn u8xn_sub_sat(self, a: V, b: V) -> V {
70 unified::sub_sat::<_, u8, _>(self, a, b)
71 }
72
73 #[inline(always)]
74 fn u8xn_eq(self, a: V, b: V) -> V {
75 unified::eq::<_, u8, _>(self, a, b)
76 }
77
78 #[inline(always)]
79 fn i8xn_lt(self, a: V, b: V) -> V {
80 unified::lt::<_, i8, _>(self, a, b)
81 }
82
83 #[inline(always)]
84 fn u32xn_lt(self, a: V, b: V) -> V {
85 unified::lt::<_, u32, _>(self, a, b)
86 }
87
88 #[inline(always)]
89 fn u32xn_max(self, a: V, b: V) -> V {
90 unified::max::<_, u32, _>(self, a, b)
91 }
92
93 fn u16xn_shl<const IMM8: i32>(self, a: V) -> V;
94
95 fn u16xn_shr<const IMM8: i32>(self, a: V) -> V;
96 fn u32xn_shr<const IMM8: i32>(self, a: V) -> V;
97
98 fn u8xn_avgr(self, a: V, b: V) -> V;
99
100 fn u8x16xn_swizzle(self, a: V, b: V) -> V;
101
102 fn all_zero(self, a: V) -> bool;
103
104 fn mask8xn_all(self, a: V) -> bool;
105 fn mask8xn_any(self, a: V) -> bool;
106
107 fn u8xn_highbit_all(self, a: V) -> bool;
108 fn u8xn_highbit_any(self, a: V) -> bool;
109
110 fn u16xn_bswap(self, a: V) -> V;
111 fn u32xn_bswap(self, a: V) -> V;
112 fn u64xn_bswap(self, a: V) -> V;
113}
114
115unsafe impl<S> Scalable<V128> for S
116where
117 S: SIMD128,
118{
119 #[inline(always)]
120 fn u16xn_shl<const IMM8: i32>(self, a: V128) -> V128 {
121 self.u16x8_shl::<IMM8>(a)
122 }
123
124 #[inline(always)]
125 fn u16xn_shr<const IMM8: i32>(self, a: V128) -> V128 {
126 self.u16x8_shr::<IMM8>(a)
127 }
128
129 #[inline(always)]
130 fn u32xn_shr<const IMM8: i32>(self, a: V128) -> V128 {
131 self.u32x4_shr::<IMM8>(a)
132 }
133
134 #[inline(always)]
135 fn u8xn_avgr(self, a: V128, b: V128) -> V128 {
136 self.u8x16_avgr(a, b)
137 }
138
139 #[inline(always)]
140 fn u8x16xn_swizzle(self, a: V128, b: V128) -> V128 {
141 self.u8x16_swizzle(a, b)
142 }
143
144 #[inline(always)]
145 fn all_zero(self, a: V128) -> bool {
146 self.v128_all_zero(a)
147 }
148
149 #[inline(always)]
150 fn mask8xn_all(self, a: V128) -> bool {
151 mask8x16_all(self, a)
152 }
153
154 #[inline(always)]
155 fn mask8xn_any(self, a: V128) -> bool {
156 mask8x16_any(self, a)
157 }
158
159 #[inline(always)]
160 fn u8xn_highbit_all(self, a: V128) -> bool {
161 u8x16_highbit_all(self, a)
162 }
163
164 #[inline(always)]
165 fn u8xn_highbit_any(self, a: V128) -> bool {
166 u8x16_highbit_any(self, a)
167 }
168
169 #[inline(always)]
170 fn u16xn_bswap(self, a: V128) -> V128 {
171 self.u16x8_bswap(a)
172 }
173
174 #[inline(always)]
175 fn u32xn_bswap(self, a: V128) -> V128 {
176 self.u32x4_bswap(a)
177 }
178
179 #[inline(always)]
180 fn u64xn_bswap(self, a: V128) -> V128 {
181 self.u64x2_bswap(a)
182 }
183}
184
185unsafe impl<S> Scalable<V256> for S
186where
187 S: SIMD256,
188{
189 #[inline(always)]
190 fn u16xn_shl<const IMM8: i32>(self, a: V256) -> V256 {
191 self.u16x16_shl::<IMM8>(a)
192 }
193
194 #[inline(always)]
195 fn u16xn_shr<const IMM8: i32>(self, a: V256) -> V256 {
196 self.u16x16_shr::<IMM8>(a)
197 }
198
199 #[inline(always)]
200 fn u32xn_shr<const IMM8: i32>(self, a: V256) -> V256 {
201 self.u32x8_shr::<IMM8>(a)
202 }
203
204 #[inline(always)]
205 fn u8xn_avgr(self, a: V256, b: V256) -> V256 {
206 self.u8x32_avgr(a, b)
207 }
208
209 #[inline(always)]
210 fn u8x16xn_swizzle(self, a: V256, b: V256) -> V256 {
211 self.u8x16x2_swizzle(a, b)
212 }
213
214 #[inline(always)]
215 fn all_zero(self, a: V256) -> bool {
216 self.v256_all_zero(a)
217 }
218
219 #[inline(always)]
220 fn mask8xn_all(self, a: V256) -> bool {
221 mask8x32_all(self, a)
222 }
223
224 #[inline(always)]
225 fn mask8xn_any(self, a: V256) -> bool {
226 mask8x32_any(self, a)
227 }
228
229 #[inline(always)]
230 fn u8xn_highbit_all(self, a: V256) -> bool {
231 u8x32_highbit_all(self, a)
232 }
233
234 #[inline(always)]
235 fn u8xn_highbit_any(self, a: V256) -> bool {
236 u8x32_highbit_any(self, a)
237 }
238
239 #[inline(always)]
240 fn u16xn_bswap(self, a: V256) -> V256 {
241 self.u16x16_bswap(a)
242 }
243
244 #[inline(always)]
245 fn u32xn_bswap(self, a: V256) -> V256 {
246 self.u32x8_bswap(a)
247 }
248
249 #[inline(always)]
250 fn u64xn_bswap(self, a: V256) -> V256 {
251 self.u64x4_bswap(a)
252 }
253}