1 use super::*;
2 
3 pick! {
4   if #[cfg(target_feature="avx2")] {
5     #[derive(Default, Clone, Copy, PartialEq, Eq)]
6     #[repr(C, align(32))]
7     pub struct i64x4 { avx2: m256i }
8   } else if #[cfg(target_feature="sse2")] {
9     #[derive(Default, Clone, Copy, PartialEq, Eq)]
10     #[repr(C, align(32))]
11     pub struct i64x4 { sse0: m128i, sse1: m128i }
12   } else if #[cfg(target_feature="simd128")] {
13     use core::arch::wasm32::*;
14 
15     #[derive(Clone, Copy)]
16     #[repr(C, align(32))]
17     pub struct i64x4 { simd0: v128, simd1: v128 }
18 
19     impl Default for i64x4 {
20       fn default() -> Self {
21         Self::splat(0)
22       }
23     }
24 
25     impl PartialEq for i64x4 {
26       fn eq(&self, other: &Self) -> bool {
27         !v128_any_true(v128_or(v128_xor(self.simd0, other.simd0), v128_xor(self.simd1, other.simd1)))
28       }
29     }
30 
31     impl Eq for i64x4 { }
32   } else {
33     #[derive(Default, Clone, Copy, PartialEq, Eq)]
34     #[repr(C, align(32))]
35     pub struct i64x4 { arr: [i64;4] }
36   }
37 }
38 
39 int_uint_consts!(i64, 4, i64x4, i64x4, i64a4, const_i64_as_i64x4, 256);
40 
41 unsafe impl Zeroable for i64x4 {}
42 unsafe impl Pod for i64x4 {}
43 
44 impl Add for i64x4 {
45   type Output = Self;
46   #[inline]
47   #[must_use]
add(self, rhs: Self) -> Self::Output48   fn add(self, rhs: Self) -> Self::Output {
49     pick! {
50       if #[cfg(target_feature="avx2")] {
51         Self { avx2: add_i64_m256i(self.avx2, rhs.avx2) }
52       } else if #[cfg(target_feature="sse2")] {
53         Self { sse0: add_i64_m128i(self.sse0, rhs.sse0), sse1: add_i64_m128i(self.sse1, rhs.sse1) }
54       } else if #[cfg(target_feature="simd128")] {
55         Self { simd0: i64x2_add(self.simd0, rhs.simd0), simd1: i64x2_add(self.simd1, rhs.simd1) }
56       } else {
57         Self { arr: [
58           self.arr[0].wrapping_add(rhs.arr[0]),
59           self.arr[1].wrapping_add(rhs.arr[1]),
60           self.arr[2].wrapping_add(rhs.arr[2]),
61           self.arr[3].wrapping_add(rhs.arr[3]),
62         ]}
63       }
64     }
65   }
66 }
67 
68 impl Sub for i64x4 {
69   type Output = Self;
70   #[inline]
71   #[must_use]
sub(self, rhs: Self) -> Self::Output72   fn sub(self, rhs: Self) -> Self::Output {
73     pick! {
74       if #[cfg(target_feature="avx2")] {
75         Self { avx2: sub_i64_m256i(self.avx2, rhs.avx2) }
76       } else if #[cfg(target_feature="sse2")] {
77         Self { sse0: sub_i64_m128i(self.sse0, rhs.sse0), sse1: sub_i64_m128i(self.sse1, rhs.sse1) }
78       } else if #[cfg(target_feature="simd128")] {
79         Self { simd0: i64x2_sub(self.simd0, rhs.simd0), simd1: i64x2_sub(self.simd1, rhs.simd1) }
80       } else {
81         Self { arr: [
82           self.arr[0].wrapping_sub(rhs.arr[0]),
83           self.arr[1].wrapping_sub(rhs.arr[1]),
84           self.arr[2].wrapping_sub(rhs.arr[2]),
85           self.arr[3].wrapping_sub(rhs.arr[3]),
86         ]}
87       }
88     }
89   }
90 }
91 
92 impl Mul for i64x4 {
93   type Output = Self;
94   #[inline]
95   #[must_use]
mul(self, rhs: Self) -> Self::Output96   fn mul(self, rhs: Self) -> Self::Output {
97     pick! {
98       if #[cfg(target_feature="simd128")] {
99         Self { simd0: i64x2_mul(self.simd0, rhs.simd0), simd1: i64x2_mul(self.simd1, rhs.simd1) }
100       } else {
101         let arr1: [i64; 4] = cast(self);
102         let arr2: [i64; 4] = cast(rhs);
103         cast([
104           arr1[0].wrapping_mul(arr2[0]),
105           arr1[1].wrapping_mul(arr2[1]),
106           arr1[2].wrapping_mul(arr2[2]),
107           arr1[3].wrapping_mul(arr2[3]),
108         ])
109       }
110     }
111   }
112 }
113 
114 impl Add<i64> for i64x4 {
115   type Output = Self;
116   #[inline]
117   #[must_use]
add(self, rhs: i64) -> Self::Output118   fn add(self, rhs: i64) -> Self::Output {
119     self.add(Self::splat(rhs))
120   }
121 }
122 
123 impl Sub<i64> for i64x4 {
124   type Output = Self;
125   #[inline]
126   #[must_use]
sub(self, rhs: i64) -> Self::Output127   fn sub(self, rhs: i64) -> Self::Output {
128     self.sub(Self::splat(rhs))
129   }
130 }
131 
132 impl Mul<i64> for i64x4 {
133   type Output = Self;
134   #[inline]
135   #[must_use]
mul(self, rhs: i64) -> Self::Output136   fn mul(self, rhs: i64) -> Self::Output {
137     self.mul(Self::splat(rhs))
138   }
139 }
140 
141 impl Add<i64x4> for i64 {
142   type Output = i64x4;
143   #[inline]
144   #[must_use]
add(self, rhs: i64x4) -> Self::Output145   fn add(self, rhs: i64x4) -> Self::Output {
146     i64x4::splat(self).add(rhs)
147   }
148 }
149 
150 impl Sub<i64x4> for i64 {
151   type Output = i64x4;
152   #[inline]
153   #[must_use]
sub(self, rhs: i64x4) -> Self::Output154   fn sub(self, rhs: i64x4) -> Self::Output {
155     i64x4::splat(self).sub(rhs)
156   }
157 }
158 
159 impl Mul<i64x4> for i64 {
160   type Output = i64x4;
161   #[inline]
162   #[must_use]
mul(self, rhs: i64x4) -> Self::Output163   fn mul(self, rhs: i64x4) -> Self::Output {
164     i64x4::splat(self).mul(rhs)
165   }
166 }
167 
168 impl BitAnd for i64x4 {
169   type Output = Self;
170   #[inline]
171   #[must_use]
bitand(self, rhs: Self) -> Self::Output172   fn bitand(self, rhs: Self) -> Self::Output {
173     pick! {
174       if #[cfg(target_feature="avx2")] {
175         Self { avx2: bitand_m256i(self.avx2, rhs.avx2) }
176       } else if #[cfg(target_feature="sse2")] {
177         Self { sse0: bitand_m128i(self.sse0, rhs.sse0), sse1: bitand_m128i(self.sse1, rhs.sse1) }
178       } else if #[cfg(target_feature="simd128")] {
179         Self { simd0: v128_and(self.simd0, rhs.simd0), simd1: v128_and(self.simd1, rhs.simd1) }
180       } else {
181         Self { arr: [
182           self.arr[0].bitand(rhs.arr[0]),
183           self.arr[1].bitand(rhs.arr[1]),
184           self.arr[2].bitand(rhs.arr[2]),
185           self.arr[3].bitand(rhs.arr[3]),
186         ]}
187       }
188     }
189   }
190 }
191 
192 impl BitOr for i64x4 {
193   type Output = Self;
194   #[inline]
195   #[must_use]
bitor(self, rhs: Self) -> Self::Output196   fn bitor(self, rhs: Self) -> Self::Output {
197     pick! {
198     if #[cfg(target_feature="avx2")] {
199             Self { avx2: bitor_m256i(self.avx2, rhs.avx2) }
200           } else  if #[cfg(target_feature="sse2")] {
201             Self { sse0: bitor_m128i(self.sse0, rhs.sse0) , sse1: bitor_m128i(self.sse1, rhs.sse1)}
202           } else if #[cfg(target_feature="simd128")] {
203             Self { simd0: v128_or(self.simd0, rhs.simd0), simd1: v128_or(self.simd1, rhs.simd1) }
204           } else {
205             Self { arr: [
206               self.arr[0].bitor(rhs.arr[0]),
207               self.arr[1].bitor(rhs.arr[1]),
208               self.arr[2].bitor(rhs.arr[2]),
209               self.arr[3].bitor(rhs.arr[3]),
210             ]}
211           }
212         }
213   }
214 }
215 
216 impl BitXor for i64x4 {
217   type Output = Self;
218   #[inline]
219   #[must_use]
bitxor(self, rhs: Self) -> Self::Output220   fn bitxor(self, rhs: Self) -> Self::Output {
221     pick! {
222       if #[cfg(target_feature="avx2")] {
223         Self { avx2: bitxor_m256i(self.avx2, rhs.avx2) }
224       } else if #[cfg(target_feature="sse2")] {
225         Self { sse0: bitxor_m128i(self.sse0, rhs.sse0), sse1: bitxor_m128i(self.sse1, rhs.sse1) }
226       } else if #[cfg(target_feature="simd128")] {
227         Self { simd0: v128_xor(self.simd0, rhs.simd0), simd1: v128_xor(self.simd1, rhs.simd1) }
228       } else {
229         Self { arr: [
230           self.arr[0].bitxor(rhs.arr[0]),
231           self.arr[1].bitxor(rhs.arr[1]),
232           self.arr[2].bitxor(rhs.arr[2]),
233           self.arr[3].bitxor(rhs.arr[3]),
234         ]}
235       }
236     }
237   }
238 }
239 
240 macro_rules! impl_shl_t_for_i64x4 {
241   ($($shift_type:ty),+ $(,)?) => {
242     $(impl Shl<$shift_type> for i64x4 {
243       type Output = Self;
244       /// Shifts all lanes by the value given.
245       #[inline]
246       #[must_use]
247       fn shl(self, rhs: $shift_type) -> Self::Output {
248         pick! {
249           if #[cfg(target_feature="avx2")] {
250             let shift = cast([rhs as u64, 0]);
251             Self { avx2: shl_all_u64_m256i(self.avx2, shift) }
252           } else if #[cfg(target_feature="sse2")] {
253             let shift = cast([rhs as u64, 0]);
254             Self { sse0: shl_all_u64_m128i(self.sse0, shift), sse1: shl_all_u64_m128i(self.sse1, shift) }
255           } else if #[cfg(target_feature="simd128")] {
256             let u = rhs as u32;
257             Self { simd0: i64x2_shl(self.simd0, u), simd1: i64x2_shl(self.simd1, u) }
258           } else {
259             let u = rhs as u64;
260             Self { arr: [
261               self.arr[0] << u,
262               self.arr[1] << u,
263               self.arr[2] << u,
264               self.arr[3] << u,
265 
266             ]}
267           }
268         }
269       }
270     })+
271   };
272 }
273 impl_shl_t_for_i64x4!(i8, u8, i16, u16, i32, u32, i64, u64, i128, u128);
274 
275 macro_rules! impl_shr_t_for_i64x4 {
276   ($($shift_type:ty),+ $(,)?) => {
277     $(impl Shr<$shift_type> for i64x4 {
278       type Output = Self;
279       /// Shifts all lanes by the value given.
280       #[inline]
281       #[must_use]
282       fn shr(self, rhs: $shift_type) -> Self::Output {
283         pick! {
284           if #[cfg(target_feature="simd128")] {
285             let u = rhs as u32;
286             Self { simd0: i64x2_shr(self.simd0, u), simd1: i64x2_shr(self.simd1, u) }
287           } else {
288             let u = rhs as u64;
289             let arr: [i64; 4] = cast(self);
290             cast([
291               arr[0] >> u,
292               arr[1] >> u,
293               arr[2] >> u,
294               arr[3] >> u,
295             ])
296           }
297         }
298       }
299     })+
300   };
301 }
302 impl_shr_t_for_i64x4!(i8, u8, i16, u16, i32, u32, i64, u64, i128, u128);
303 
304 impl CmpEq for i64x4 {
305   type Output = Self;
306   #[inline]
307   #[must_use]
cmp_eq(self, rhs: Self) -> Self::Output308   fn cmp_eq(self, rhs: Self) -> Self::Output {
309     pick! {
310       if #[cfg(target_feature="avx2")] {
311         Self { avx2: cmp_eq_mask_i64_m256i(self.avx2, rhs.avx2) }
312       } else if #[cfg(target_feature="sse4.1")] {
313         Self { sse0: cmp_eq_mask_i64_m128i(self.sse0, rhs.sse0),sse1: cmp_eq_mask_i64_m128i(self.sse1, rhs.sse1) }
314       } else if #[cfg(target_feature="simd128")] {
315         Self { simd0: i64x2_eq(self.simd0, rhs.simd0), simd1: i64x2_eq(self.simd1, rhs.simd1) }
316       } else {
317         let s: [i64;4] = cast(self);
318         let r: [i64;4] = cast(rhs);
319         cast([
320           if s[0] == r[0] { -1_i64 } else { 0 },
321           if s[1] == r[1] { -1_i64 } else { 0 },
322           if s[2] == r[2] { -1_i64 } else { 0 },
323           if s[3] == r[3] { -1_i64 } else { 0 },
324         ])
325       }
326     }
327   }
328 }
329 
330 impl CmpGt for i64x4 {
331   type Output = Self;
332   #[inline]
333   #[must_use]
cmp_gt(self, rhs: Self) -> Self::Output334   fn cmp_gt(self, rhs: Self) -> Self::Output {
335     pick! {
336       if #[cfg(target_feature="avx2")] {
337         Self { avx2: cmp_gt_mask_i64_m256i(self.avx2, rhs.avx2) }
338       } else if #[cfg(target_feature="sse4.2")] {
339         Self { sse0: cmp_gt_mask_i64_m128i(self.sse0, rhs.sse0), sse1: cmp_gt_mask_i64_m128i(self.sse1, rhs.sse1) }
340       } else if #[cfg(target_feature="simd128")] {
341         Self { simd0: i64x2_gt(self.simd0, rhs.simd0), simd1: i64x2_gt(self.simd1, rhs.simd1) }
342       } else {
343         let s: [i64;4] = cast(self);
344         let r: [i64;4] = cast(rhs);
345         cast([
346           if s[0] > r[0] { -1_i64 } else { 0 },
347           if s[1] > r[1] { -1_i64 } else { 0 },
348           if s[2] > r[2] { -1_i64 } else { 0 },
349           if s[3] > r[3] { -1_i64 } else { 0 },
350         ])
351       }
352     }
353   }
354 }
355 
356 impl CmpLt for i64x4 {
357   type Output = Self;
358   #[inline]
359   #[must_use]
cmp_lt(self, rhs: Self) -> Self::Output360   fn cmp_lt(self, rhs: Self) -> Self::Output {
361     pick! {
362       if #[cfg(target_feature="avx2")] {
363         Self { avx2: !(cmp_gt_mask_i64_m256i(self.avx2, rhs.avx2) ^ cmp_eq_mask_i64_m256i(self.avx2, rhs.avx2)) }
364       } else if #[cfg(target_feature="sse4.2")] {
365         Self { sse0: !cmp_gt_mask_i64_m128i(self.sse0, rhs.sse0) ^ cmp_eq_mask_i64_m128i(self.sse0, rhs.sse0),
366                sse1: !cmp_gt_mask_i64_m128i(self.sse1, rhs.sse1) ^ cmp_eq_mask_i64_m128i(self.sse1, rhs.sse1)}
367       } else if #[cfg(target_feature="simd128")] {
368         Self { simd0: i64x2_lt(self.simd0, rhs.simd0), simd1: i64x2_lt(self.simd1, rhs.simd1) }
369       } else {
370         let s: [i64;4] = cast(self);
371         let r: [i64;4] = cast(rhs);
372         cast([
373           if s[0] < r[0] { -1_i64 } else { 0 },
374           if s[1] < r[1] { -1_i64 } else { 0 },
375           if s[2] < r[2] { -1_i64 } else { 0 },
376           if s[3] < r[3] { -1_i64 } else { 0 },
377         ])
378       }
379     }
380   }
381 }
382 
383 impl i64x4 {
384   #[inline]
385   #[must_use]
new(array: [i64; 4]) -> Self386   pub fn new(array: [i64; 4]) -> Self {
387     Self::from(array)
388   }
389   #[inline]
390   #[must_use]
blend(self, t: Self, f: Self) -> Self391   pub fn blend(self, t: Self, f: Self) -> Self {
392     pick! {
393       if #[cfg(target_feature="avx2")] {
394         Self { avx2: blend_varying_i8_m256i(f.avx2,t.avx2,self.avx2) }
395       } else if #[cfg(target_feature="sse4.1")] {
396         Self { sse0: blend_varying_i8_m128i(f.sse0, t.sse0, self.sse0), sse1: blend_varying_i8_m128i(f.sse1, t.sse1, self.sse1) }
397       } else if #[cfg(target_feature="simd128")] {
398         Self { simd0: v128_bitselect(t.simd0, f.simd0, self.simd0), simd1: v128_bitselect(t.simd1, f.simd1, self.simd1) }
399       } else {
400         generic_bit_blend(self, t, f)
401       }
402     }
403   }
404 
405   #[inline]
406   #[must_use]
round_float(self) -> f64x4407   pub fn round_float(self) -> f64x4 {
408     let arr: [i64; 4] = cast(self);
409     cast([arr[0] as f64, arr[1] as f64, arr[2] as f64, arr[3] as f64])
410   }
411 
to_array(self) -> [i64; 4]412   pub fn to_array(self) -> [i64; 4] {
413     cast(self)
414   }
415 
as_array_ref(&self) -> &[i64; 4]416   pub fn as_array_ref(&self) -> &[i64; 4] {
417     cast_ref(self)
418   }
419 }
420 
421 impl Not for i64x4 {
422   type Output = Self;
not(self) -> Self423   fn not(self) -> Self {
424     pick! {
425       if #[cfg(target_feature="avx2")] {
426         Self { avx2: self.avx2.not()  }
427       } else if #[cfg(target_feature="sse2")] {
428         Self { sse0: self.sse0.not() , sse1: self.sse1.not() }
429       } else if #[cfg(target_feature="simd128")] {
430         Self { simd0: v128_not(self.simd0), simd1: v128_not(self.simd1) }
431       } else {
432         Self {
433           arr: [
434           !self.arr[0],
435           !self.arr[1],
436           !self.arr[2],
437           !self.arr[3],
438         ]}
439       }
440     }
441   }
442 }
443