1 //! This module implements the [WebAssembly `SIMD128` ISA].
2 //!
3 //! [WebAssembly `SIMD128` ISA]:
4 //! https://github.com/WebAssembly/simd/blob/master/proposals/simd/SIMD.md
5 
6 #![allow(non_camel_case_types)]
7 #![allow(unused_imports)]
8 
9 use crate::{
10     core_arch::{simd, simd_llvm::*},
11     marker::Sized,
12     mem, ptr,
13 };
14 
15 #[cfg(test)]
16 use stdarch_test::assert_instr;
17 
18 types! {
19     /// WASM-specific 128-bit wide SIMD vector type.
20     ///
21     /// This type corresponds to the `v128` type in the [WebAssembly SIMD
22     /// proposal](https://github.com/webassembly/simd). This type is 128-bits
23     /// large and the meaning of all the bits is defined within the context of
24     /// how this value is used.
25     ///
26     /// This same type is used simultaneously for all 128-bit-wide SIMD types,
27     /// for example:
28     ///
29     /// * sixteen 8-bit integers (both `i8` and `u8`)
30     /// * eight 16-bit integers (both `i16` and `u16`)
31     /// * four 32-bit integers (both `i32` and `u32`)
32     /// * two 64-bit integers (both `i64` and `u64`)
33     /// * four 32-bit floats (`f32`)
34     /// * two 64-bit floats (`f64`)
35     ///
36     /// The `v128` type in Rust is intended to be quite analagous to the `v128`
37     /// type in WebAssembly. Operations on `v128` can only be performed with the
38     /// functions in this module.
39     // N.B., internals here are arbitrary.
40     #[stable(feature = "wasm_simd", since = "1.54.0")]
41     pub struct v128(i32, i32, i32, i32);
42 }
43 
44 macro_rules! conversions {
45     ($(($name:ident = $ty:ty))*) => {
46         impl v128 {
47             $(
48                 #[inline(always)]
49                 fn $name(self) -> $ty {
50                     unsafe { mem::transmute(self) }
51                 }
52             )*
53         }
54         $(
55             impl $ty {
56                 #[inline(always)]
57                 #[rustc_const_stable(feature = "wasm_simd_const", since = "1.56.0")]
58                 const fn v128(self) -> v128 {
59                     unsafe { mem::transmute(self) }
60                 }
61             }
62         )*
63     }
64 }
65 
66 conversions! {
67     (as_u8x16 = simd::u8x16)
68     (as_u16x8 = simd::u16x8)
69     (as_u32x4 = simd::u32x4)
70     (as_u64x2 = simd::u64x2)
71     (as_i8x16 = simd::i8x16)
72     (as_i16x8 = simd::i16x8)
73     (as_i32x4 = simd::i32x4)
74     (as_i64x2 = simd::i64x2)
75     (as_f32x4 = simd::f32x4)
76     (as_f64x2 = simd::f64x2)
77 }
78 
79 #[allow(improper_ctypes)]
80 extern "C" {
81     #[link_name = "llvm.wasm.swizzle"]
llvm_swizzle(a: simd::i8x16, b: simd::i8x16) -> simd::i8x1682     fn llvm_swizzle(a: simd::i8x16, b: simd::i8x16) -> simd::i8x16;
83 
84     #[link_name = "llvm.wasm.bitselect.v16i8"]
llvm_bitselect(a: simd::i8x16, b: simd::i8x16, c: simd::i8x16) -> simd::i8x1685     fn llvm_bitselect(a: simd::i8x16, b: simd::i8x16, c: simd::i8x16) -> simd::i8x16;
86     #[link_name = "llvm.wasm.anytrue.v16i8"]
llvm_any_true_i8x16(x: simd::i8x16) -> i3287     fn llvm_any_true_i8x16(x: simd::i8x16) -> i32;
88 
89     #[link_name = "llvm.wasm.alltrue.v16i8"]
llvm_i8x16_all_true(x: simd::i8x16) -> i3290     fn llvm_i8x16_all_true(x: simd::i8x16) -> i32;
91     #[link_name = "llvm.ctpop.v16i8"]
llvm_popcnt(a: simd::i8x16) -> simd::i8x1692     fn llvm_popcnt(a: simd::i8x16) -> simd::i8x16;
93     #[link_name = "llvm.wasm.bitmask.v16i8"]
llvm_bitmask_i8x16(a: simd::i8x16) -> i3294     fn llvm_bitmask_i8x16(a: simd::i8x16) -> i32;
95     #[link_name = "llvm.wasm.narrow.signed.v16i8.v8i16"]
llvm_narrow_i8x16_s(a: simd::i16x8, b: simd::i16x8) -> simd::i8x1696     fn llvm_narrow_i8x16_s(a: simd::i16x8, b: simd::i16x8) -> simd::i8x16;
97     #[link_name = "llvm.wasm.narrow.unsigned.v16i8.v8i16"]
llvm_narrow_i8x16_u(a: simd::i16x8, b: simd::i16x8) -> simd::i8x1698     fn llvm_narrow_i8x16_u(a: simd::i16x8, b: simd::i16x8) -> simd::i8x16;
99     #[link_name = "llvm.sadd.sat.v16i8"]
llvm_i8x16_add_sat_s(a: simd::i8x16, b: simd::i8x16) -> simd::i8x16100     fn llvm_i8x16_add_sat_s(a: simd::i8x16, b: simd::i8x16) -> simd::i8x16;
101     #[link_name = "llvm.uadd.sat.v16i8"]
llvm_i8x16_add_sat_u(a: simd::i8x16, b: simd::i8x16) -> simd::i8x16102     fn llvm_i8x16_add_sat_u(a: simd::i8x16, b: simd::i8x16) -> simd::i8x16;
103     #[link_name = "llvm.wasm.sub.sat.signed.v16i8"]
llvm_i8x16_sub_sat_s(a: simd::i8x16, b: simd::i8x16) -> simd::i8x16104     fn llvm_i8x16_sub_sat_s(a: simd::i8x16, b: simd::i8x16) -> simd::i8x16;
105     #[link_name = "llvm.wasm.sub.sat.unsigned.v16i8"]
llvm_i8x16_sub_sat_u(a: simd::i8x16, b: simd::i8x16) -> simd::i8x16106     fn llvm_i8x16_sub_sat_u(a: simd::i8x16, b: simd::i8x16) -> simd::i8x16;
107     #[link_name = "llvm.wasm.avgr.unsigned.v16i8"]
llvm_avgr_u_i8x16(a: simd::i8x16, b: simd::i8x16) -> simd::i8x16108     fn llvm_avgr_u_i8x16(a: simd::i8x16, b: simd::i8x16) -> simd::i8x16;
109 
110     #[link_name = "llvm.wasm.extadd.pairwise.signed.v8i16"]
llvm_i16x8_extadd_pairwise_i8x16_s(x: simd::i8x16) -> simd::i16x8111     fn llvm_i16x8_extadd_pairwise_i8x16_s(x: simd::i8x16) -> simd::i16x8;
112     #[link_name = "llvm.wasm.extadd.pairwise.unsigned.v8i16"]
llvm_i16x8_extadd_pairwise_i8x16_u(x: simd::i8x16) -> simd::i16x8113     fn llvm_i16x8_extadd_pairwise_i8x16_u(x: simd::i8x16) -> simd::i16x8;
114     #[link_name = "llvm.wasm.q15mulr.sat.signed"]
llvm_q15mulr(a: simd::i16x8, b: simd::i16x8) -> simd::i16x8115     fn llvm_q15mulr(a: simd::i16x8, b: simd::i16x8) -> simd::i16x8;
116     #[link_name = "llvm.wasm.alltrue.v8i16"]
llvm_i16x8_all_true(x: simd::i16x8) -> i32117     fn llvm_i16x8_all_true(x: simd::i16x8) -> i32;
118     #[link_name = "llvm.wasm.bitmask.v8i16"]
llvm_bitmask_i16x8(a: simd::i16x8) -> i32119     fn llvm_bitmask_i16x8(a: simd::i16x8) -> i32;
120     #[link_name = "llvm.wasm.narrow.signed.v8i16.v4i32"]
llvm_narrow_i16x8_s(a: simd::i32x4, b: simd::i32x4) -> simd::i16x8121     fn llvm_narrow_i16x8_s(a: simd::i32x4, b: simd::i32x4) -> simd::i16x8;
122     #[link_name = "llvm.wasm.narrow.unsigned.v8i16.v4i32"]
llvm_narrow_i16x8_u(a: simd::i32x4, b: simd::i32x4) -> simd::i16x8123     fn llvm_narrow_i16x8_u(a: simd::i32x4, b: simd::i32x4) -> simd::i16x8;
124     #[link_name = "llvm.sadd.sat.v8i16"]
llvm_i16x8_add_sat_s(a: simd::i16x8, b: simd::i16x8) -> simd::i16x8125     fn llvm_i16x8_add_sat_s(a: simd::i16x8, b: simd::i16x8) -> simd::i16x8;
126     #[link_name = "llvm.uadd.sat.v8i16"]
llvm_i16x8_add_sat_u(a: simd::i16x8, b: simd::i16x8) -> simd::i16x8127     fn llvm_i16x8_add_sat_u(a: simd::i16x8, b: simd::i16x8) -> simd::i16x8;
128     #[link_name = "llvm.wasm.sub.sat.signed.v8i16"]
llvm_i16x8_sub_sat_s(a: simd::i16x8, b: simd::i16x8) -> simd::i16x8129     fn llvm_i16x8_sub_sat_s(a: simd::i16x8, b: simd::i16x8) -> simd::i16x8;
130     #[link_name = "llvm.wasm.sub.sat.unsigned.v8i16"]
llvm_i16x8_sub_sat_u(a: simd::i16x8, b: simd::i16x8) -> simd::i16x8131     fn llvm_i16x8_sub_sat_u(a: simd::i16x8, b: simd::i16x8) -> simd::i16x8;
132     #[link_name = "llvm.wasm.avgr.unsigned.v8i16"]
llvm_avgr_u_i16x8(a: simd::i16x8, b: simd::i16x8) -> simd::i16x8133     fn llvm_avgr_u_i16x8(a: simd::i16x8, b: simd::i16x8) -> simd::i16x8;
134 
135     #[link_name = "llvm.wasm.extadd.pairwise.signed.v16i8"]
llvm_i32x4_extadd_pairwise_i16x8_s(x: simd::i16x8) -> simd::i32x4136     fn llvm_i32x4_extadd_pairwise_i16x8_s(x: simd::i16x8) -> simd::i32x4;
137     #[link_name = "llvm.wasm.extadd.pairwise.unsigned.v16i8"]
llvm_i32x4_extadd_pairwise_i16x8_u(x: simd::i16x8) -> simd::i32x4138     fn llvm_i32x4_extadd_pairwise_i16x8_u(x: simd::i16x8) -> simd::i32x4;
139     #[link_name = "llvm.wasm.alltrue.v4i32"]
llvm_i32x4_all_true(x: simd::i32x4) -> i32140     fn llvm_i32x4_all_true(x: simd::i32x4) -> i32;
141     #[link_name = "llvm.wasm.bitmask.v4i32"]
llvm_bitmask_i32x4(a: simd::i32x4) -> i32142     fn llvm_bitmask_i32x4(a: simd::i32x4) -> i32;
143     #[link_name = "llvm.wasm.dot"]
llvm_i32x4_dot_i16x8_s(a: simd::i16x8, b: simd::i16x8) -> simd::i32x4144     fn llvm_i32x4_dot_i16x8_s(a: simd::i16x8, b: simd::i16x8) -> simd::i32x4;
145 
146     #[link_name = "llvm.wasm.alltrue.v2i64"]
llvm_i64x2_all_true(x: simd::i64x2) -> i32147     fn llvm_i64x2_all_true(x: simd::i64x2) -> i32;
148     #[link_name = "llvm.wasm.bitmask.v2i64"]
llvm_bitmask_i64x2(a: simd::i64x2) -> i32149     fn llvm_bitmask_i64x2(a: simd::i64x2) -> i32;
150 
151     #[link_name = "llvm.ceil.v4f32"]
llvm_f32x4_ceil(x: simd::f32x4) -> simd::f32x4152     fn llvm_f32x4_ceil(x: simd::f32x4) -> simd::f32x4;
153     #[link_name = "llvm.floor.v4f32"]
llvm_f32x4_floor(x: simd::f32x4) -> simd::f32x4154     fn llvm_f32x4_floor(x: simd::f32x4) -> simd::f32x4;
155     #[link_name = "llvm.trunc.v4f32"]
llvm_f32x4_trunc(x: simd::f32x4) -> simd::f32x4156     fn llvm_f32x4_trunc(x: simd::f32x4) -> simd::f32x4;
157     #[link_name = "llvm.nearbyint.v4f32"]
llvm_f32x4_nearest(x: simd::f32x4) -> simd::f32x4158     fn llvm_f32x4_nearest(x: simd::f32x4) -> simd::f32x4;
159     #[link_name = "llvm.fabs.v4f32"]
llvm_f32x4_abs(x: simd::f32x4) -> simd::f32x4160     fn llvm_f32x4_abs(x: simd::f32x4) -> simd::f32x4;
161     #[link_name = "llvm.sqrt.v4f32"]
llvm_f32x4_sqrt(x: simd::f32x4) -> simd::f32x4162     fn llvm_f32x4_sqrt(x: simd::f32x4) -> simd::f32x4;
163     #[link_name = "llvm.minimum.v4f32"]
llvm_f32x4_min(x: simd::f32x4, y: simd::f32x4) -> simd::f32x4164     fn llvm_f32x4_min(x: simd::f32x4, y: simd::f32x4) -> simd::f32x4;
165     #[link_name = "llvm.maximum.v4f32"]
llvm_f32x4_max(x: simd::f32x4, y: simd::f32x4) -> simd::f32x4166     fn llvm_f32x4_max(x: simd::f32x4, y: simd::f32x4) -> simd::f32x4;
167 
168     #[link_name = "llvm.ceil.v2f64"]
llvm_f64x2_ceil(x: simd::f64x2) -> simd::f64x2169     fn llvm_f64x2_ceil(x: simd::f64x2) -> simd::f64x2;
170     #[link_name = "llvm.floor.v2f64"]
llvm_f64x2_floor(x: simd::f64x2) -> simd::f64x2171     fn llvm_f64x2_floor(x: simd::f64x2) -> simd::f64x2;
172     #[link_name = "llvm.trunc.v2f64"]
llvm_f64x2_trunc(x: simd::f64x2) -> simd::f64x2173     fn llvm_f64x2_trunc(x: simd::f64x2) -> simd::f64x2;
174     #[link_name = "llvm.nearbyint.v2f64"]
llvm_f64x2_nearest(x: simd::f64x2) -> simd::f64x2175     fn llvm_f64x2_nearest(x: simd::f64x2) -> simd::f64x2;
176     #[link_name = "llvm.fabs.v2f64"]
llvm_f64x2_abs(x: simd::f64x2) -> simd::f64x2177     fn llvm_f64x2_abs(x: simd::f64x2) -> simd::f64x2;
178     #[link_name = "llvm.sqrt.v2f64"]
llvm_f64x2_sqrt(x: simd::f64x2) -> simd::f64x2179     fn llvm_f64x2_sqrt(x: simd::f64x2) -> simd::f64x2;
180     #[link_name = "llvm.minimum.v2f64"]
llvm_f64x2_min(x: simd::f64x2, y: simd::f64x2) -> simd::f64x2181     fn llvm_f64x2_min(x: simd::f64x2, y: simd::f64x2) -> simd::f64x2;
182     #[link_name = "llvm.maximum.v2f64"]
llvm_f64x2_max(x: simd::f64x2, y: simd::f64x2) -> simd::f64x2183     fn llvm_f64x2_max(x: simd::f64x2, y: simd::f64x2) -> simd::f64x2;
184 
185     #[link_name = "llvm.fptosi.sat.v4i32.v4f32"]
llvm_i32x4_trunc_sat_f32x4_s(x: simd::f32x4) -> simd::i32x4186     fn llvm_i32x4_trunc_sat_f32x4_s(x: simd::f32x4) -> simd::i32x4;
187     #[link_name = "llvm.fptoui.sat.v4i32.v4f32"]
llvm_i32x4_trunc_sat_f32x4_u(x: simd::f32x4) -> simd::i32x4188     fn llvm_i32x4_trunc_sat_f32x4_u(x: simd::f32x4) -> simd::i32x4;
189     #[link_name = "llvm.fptosi.sat.v2i32.v2f64"]
llvm_i32x2_trunc_sat_f64x2_s(x: simd::f64x2) -> simd::i32x2190     fn llvm_i32x2_trunc_sat_f64x2_s(x: simd::f64x2) -> simd::i32x2;
191     #[link_name = "llvm.fptoui.sat.v2i32.v2f64"]
llvm_i32x2_trunc_sat_f64x2_u(x: simd::f64x2) -> simd::i32x2192     fn llvm_i32x2_trunc_sat_f64x2_u(x: simd::f64x2) -> simd::i32x2;
193 }
194 
195 #[repr(packed)]
196 #[derive(Copy)]
197 struct Unaligned<T>(T);
198 
199 impl<T: Copy> Clone for Unaligned<T> {
clone(&self) -> Unaligned<T>200     fn clone(&self) -> Unaligned<T> {
201         *self
202     }
203 }
204 
205 /// Loads a `v128` vector from the given heap address.
206 ///
207 /// This intrinsic will emit a load with an alignment of 1. While this is
208 /// provided for completeness it is not strictly necessary, you can also load
209 /// the pointer directly:
210 ///
211 /// ```rust,ignore
212 /// let a: &v128 = ...;
213 /// let value = unsafe { v128_load(a) };
214 /// // .. is the same as ..
215 /// let value = *a;
216 /// ```
217 ///
218 /// The alignment of the load can be configured by doing a manual load without
219 /// this intrinsic.
220 ///
221 /// # Unsafety
222 ///
223 /// This intrinsic is unsafe because it takes a raw pointer as an argument, and
224 /// the pointer must be valid to load 16 bytes from. Note that there is no
225 /// alignment requirement on this pointer since this intrinsic performs a
226 /// 1-aligned load.
227 #[inline]
228 #[cfg_attr(test, assert_instr(v128.load))]
229 #[target_feature(enable = "simd128")]
230 #[doc(alias("v128.load"))]
231 #[stable(feature = "wasm_simd", since = "1.54.0")]
v128_load(m: *const v128) -> v128232 pub unsafe fn v128_load(m: *const v128) -> v128 {
233     (*(m as *const Unaligned<v128>)).0
234 }
235 
236 /// Load eight 8-bit integers and sign extend each one to a 16-bit lane
237 ///
238 /// # Unsafety
239 ///
240 /// This intrinsic is unsafe because it takes a raw pointer as an argument, and
241 /// the pointer must be valid to load 8 bytes from. Note that there is no
242 /// alignment requirement on this pointer since this intrinsic performs a
243 /// 1-aligned load.
244 #[inline]
245 #[cfg_attr(test, assert_instr(v128.load8x8_s))]
246 #[target_feature(enable = "simd128")]
247 #[doc(alias("v128.load8x8_s"))]
248 #[stable(feature = "wasm_simd", since = "1.54.0")]
i16x8_load_extend_i8x8(m: *const i8) -> v128249 pub unsafe fn i16x8_load_extend_i8x8(m: *const i8) -> v128 {
250     let m = *(m as *const Unaligned<simd::i8x8>);
251     simd_cast::<_, simd::i16x8>(m.0).v128()
252 }
253 
254 /// Load eight 8-bit integers and zero extend each one to a 16-bit lane
255 ///
256 /// # Unsafety
257 ///
258 /// This intrinsic is unsafe because it takes a raw pointer as an argument, and
259 /// the pointer must be valid to load 8 bytes from. Note that there is no
260 /// alignment requirement on this pointer since this intrinsic performs a
261 /// 1-aligned load.
262 #[inline]
263 #[cfg_attr(test, assert_instr(v128.load8x8_u))]
264 #[target_feature(enable = "simd128")]
265 #[doc(alias("v128.load8x8_u"))]
266 #[stable(feature = "wasm_simd", since = "1.54.0")]
i16x8_load_extend_u8x8(m: *const u8) -> v128267 pub unsafe fn i16x8_load_extend_u8x8(m: *const u8) -> v128 {
268     let m = *(m as *const Unaligned<simd::u8x8>);
269     simd_cast::<_, simd::u16x8>(m.0).v128()
270 }
271 
272 #[stable(feature = "wasm_simd", since = "1.54.0")]
273 pub use i16x8_load_extend_u8x8 as u16x8_load_extend_u8x8;
274 
275 /// Load four 16-bit integers and sign extend each one to a 32-bit lane
276 ///
277 /// # Unsafety
278 ///
279 /// This intrinsic is unsafe because it takes a raw pointer as an argument, and
280 /// the pointer must be valid to load 8 bytes from. Note that there is no
281 /// alignment requirement on this pointer since this intrinsic performs a
282 /// 1-aligned load.
283 #[inline]
284 #[cfg_attr(test, assert_instr(v128.load16x4_s))]
285 #[target_feature(enable = "simd128")]
286 #[doc(alias("v128.load16x4_s"))]
287 #[stable(feature = "wasm_simd", since = "1.54.0")]
i32x4_load_extend_i16x4(m: *const i16) -> v128288 pub unsafe fn i32x4_load_extend_i16x4(m: *const i16) -> v128 {
289     let m = *(m as *const Unaligned<simd::i16x4>);
290     simd_cast::<_, simd::i32x4>(m.0).v128()
291 }
292 
293 /// Load four 16-bit integers and zero extend each one to a 32-bit lane
294 ///
295 /// # Unsafety
296 ///
297 /// This intrinsic is unsafe because it takes a raw pointer as an argument, and
298 /// the pointer must be valid to load 8 bytes from. Note that there is no
299 /// alignment requirement on this pointer since this intrinsic performs a
300 /// 1-aligned load.
301 #[inline]
302 #[cfg_attr(test, assert_instr(v128.load16x4_u))]
303 #[target_feature(enable = "simd128")]
304 #[doc(alias("v128.load16x4_u"))]
305 #[stable(feature = "wasm_simd", since = "1.54.0")]
i32x4_load_extend_u16x4(m: *const u16) -> v128306 pub unsafe fn i32x4_load_extend_u16x4(m: *const u16) -> v128 {
307     let m = *(m as *const Unaligned<simd::u16x4>);
308     simd_cast::<_, simd::u32x4>(m.0).v128()
309 }
310 
311 #[stable(feature = "wasm_simd", since = "1.54.0")]
312 pub use i32x4_load_extend_u16x4 as u32x4_load_extend_u16x4;
313 
314 /// Load two 32-bit integers and sign extend each one to a 64-bit lane
315 ///
316 /// # Unsafety
317 ///
318 /// This intrinsic is unsafe because it takes a raw pointer as an argument, and
319 /// the pointer must be valid to load 8 bytes from. Note that there is no
320 /// alignment requirement on this pointer since this intrinsic performs a
321 /// 1-aligned load.
322 #[inline]
323 #[cfg_attr(test, assert_instr(v128.load32x2_s))]
324 #[target_feature(enable = "simd128")]
325 #[doc(alias("v128.load32x2_s"))]
326 #[stable(feature = "wasm_simd", since = "1.54.0")]
i64x2_load_extend_i32x2(m: *const i32) -> v128327 pub unsafe fn i64x2_load_extend_i32x2(m: *const i32) -> v128 {
328     let m = *(m as *const Unaligned<simd::i32x2>);
329     simd_cast::<_, simd::i64x2>(m.0).v128()
330 }
331 
332 /// Load two 32-bit integers and zero extend each one to a 64-bit lane
333 ///
334 /// # Unsafety
335 ///
336 /// This intrinsic is unsafe because it takes a raw pointer as an argument, and
337 /// the pointer must be valid to load 8 bytes from. Note that there is no
338 /// alignment requirement on this pointer since this intrinsic performs a
339 /// 1-aligned load.
340 #[inline]
341 #[cfg_attr(test, assert_instr(v128.load32x2_u))]
342 #[target_feature(enable = "simd128")]
343 #[doc(alias("v128.load32x2_u"))]
344 #[stable(feature = "wasm_simd", since = "1.54.0")]
i64x2_load_extend_u32x2(m: *const u32) -> v128345 pub unsafe fn i64x2_load_extend_u32x2(m: *const u32) -> v128 {
346     let m = *(m as *const Unaligned<simd::u32x2>);
347     simd_cast::<_, simd::u64x2>(m.0).v128()
348 }
349 
350 #[stable(feature = "wasm_simd", since = "1.54.0")]
351 pub use i64x2_load_extend_u32x2 as u64x2_load_extend_u32x2;
352 
353 /// Load a single element and splat to all lanes of a v128 vector.
354 ///
355 /// While this intrinsic is provided for completeness it can also be replaced
356 /// with `u8x16_splat(*m)` and it should generate equivalent code (and also not
357 /// require `unsafe`).
358 ///
359 /// # Unsafety
360 ///
361 /// This intrinsic is unsafe because it takes a raw pointer as an argument, and
362 /// the pointer must be valid to load 1 byte from. Note that there is no
363 /// alignment requirement on this pointer since this intrinsic performs a
364 /// 1-aligned load.
365 #[inline]
366 #[cfg_attr(test, assert_instr(v128.load8_splat))]
367 #[target_feature(enable = "simd128")]
368 #[doc(alias("v128.load8_splat"))]
369 #[stable(feature = "wasm_simd", since = "1.54.0")]
v128_load8_splat(m: *const u8) -> v128370 pub unsafe fn v128_load8_splat(m: *const u8) -> v128 {
371     u8x16_splat(*m)
372 }
373 
374 /// Load a single element and splat to all lanes of a v128 vector.
375 ///
376 /// While this intrinsic is provided for completeness it can also be replaced
377 /// with `u16x8_splat(*m)` and it should generate equivalent code (and also not
378 /// require `unsafe`).
379 ///
380 /// # Unsafety
381 ///
382 /// This intrinsic is unsafe because it takes a raw pointer as an argument, and
383 /// the pointer must be valid to load 2 bytes from. Note that there is no
384 /// alignment requirement on this pointer since this intrinsic performs a
385 /// 1-aligned load.
386 #[inline]
387 #[cfg_attr(test, assert_instr(v128.load16_splat))]
388 #[target_feature(enable = "simd128")]
389 #[doc(alias("v128.load16_splat"))]
390 #[stable(feature = "wasm_simd", since = "1.54.0")]
v128_load16_splat(m: *const u16) -> v128391 pub unsafe fn v128_load16_splat(m: *const u16) -> v128 {
392     u16x8_splat(ptr::read_unaligned(m))
393 }
394 
395 /// Load a single element and splat to all lanes of a v128 vector.
396 ///
397 /// While this intrinsic is provided for completeness it can also be replaced
398 /// with `u32x4_splat(*m)` and it should generate equivalent code (and also not
399 /// require `unsafe`).
400 ///
401 /// # Unsafety
402 ///
403 /// This intrinsic is unsafe because it takes a raw pointer as an argument, and
404 /// the pointer must be valid to load 4 bytes from. Note that there is no
405 /// alignment requirement on this pointer since this intrinsic performs a
406 /// 1-aligned load.
407 #[inline]
408 #[cfg_attr(test, assert_instr(v128.load32_splat))]
409 #[target_feature(enable = "simd128")]
410 #[doc(alias("v128.load32_splat"))]
411 #[stable(feature = "wasm_simd", since = "1.54.0")]
v128_load32_splat(m: *const u32) -> v128412 pub unsafe fn v128_load32_splat(m: *const u32) -> v128 {
413     u32x4_splat(ptr::read_unaligned(m))
414 }
415 
416 /// Load a single element and splat to all lanes of a v128 vector.
417 ///
418 /// While this intrinsic is provided for completeness it can also be replaced
419 /// with `u64x2_splat(*m)` and it should generate equivalent code (and also not
420 /// require `unsafe`).
421 ///
422 /// # Unsafety
423 ///
424 /// This intrinsic is unsafe because it takes a raw pointer as an argument, and
425 /// the pointer must be valid to load 8 bytes from. Note that there is no
426 /// alignment requirement on this pointer since this intrinsic performs a
427 /// 1-aligned load.
428 #[inline]
429 #[cfg_attr(test, assert_instr(v128.load64_splat))]
430 #[target_feature(enable = "simd128")]
431 #[doc(alias("v128.load64_splat"))]
432 #[stable(feature = "wasm_simd", since = "1.54.0")]
v128_load64_splat(m: *const u64) -> v128433 pub unsafe fn v128_load64_splat(m: *const u64) -> v128 {
434     u64x2_splat(ptr::read_unaligned(m))
435 }
436 
437 /// Load a 32-bit element into the low bits of the vector and sets all other
438 /// bits to zero.
439 ///
440 /// This intrinsic is provided for completeness and is equivalent to `u32x4(*m,
441 /// 0, 0, 0)` (which doesn't require `unsafe`).
442 ///
443 /// # Unsafety
444 ///
445 /// This intrinsic is unsafe because it takes a raw pointer as an argument, and
446 /// the pointer must be valid to load 4 bytes from. Note that there is no
447 /// alignment requirement on this pointer since this intrinsic performs a
448 /// 1-aligned load.
449 #[inline]
450 #[cfg_attr(test, assert_instr(v128.load32_zero))]
451 #[target_feature(enable = "simd128")]
452 #[doc(alias("v128.load32_zero"))]
453 #[stable(feature = "wasm_simd", since = "1.54.0")]
v128_load32_zero(m: *const u32) -> v128454 pub unsafe fn v128_load32_zero(m: *const u32) -> v128 {
455     u32x4(ptr::read_unaligned(m), 0, 0, 0)
456 }
457 
458 /// Load a 64-bit element into the low bits of the vector and sets all other
459 /// bits to zero.
460 ///
461 /// This intrinsic is provided for completeness and is equivalent to
462 /// `u64x2_replace_lane::<0>(u64x2(0, 0), *m)` (which doesn't require `unsafe`).
463 ///
464 /// # Unsafety
465 ///
466 /// This intrinsic is unsafe because it takes a raw pointer as an argument, and
467 /// the pointer must be valid to load 8 bytes from. Note that there is no
468 /// alignment requirement on this pointer since this intrinsic performs a
469 /// 1-aligned load.
470 #[inline]
471 #[cfg_attr(test, assert_instr(v128.load64_zero))]
472 #[target_feature(enable = "simd128")]
473 #[doc(alias("v128.load64_zero"))]
474 #[stable(feature = "wasm_simd", since = "1.54.0")]
v128_load64_zero(m: *const u64) -> v128475 pub unsafe fn v128_load64_zero(m: *const u64) -> v128 {
476     u64x2_replace_lane::<0>(u64x2(0, 0), ptr::read_unaligned(m))
477 }
478 
479 /// Stores a `v128` vector to the given heap address.
480 ///
481 /// This intrinsic will emit a store with an alignment of 1. While this is
482 /// provided for completeness it is not strictly necessary, you can also store
483 /// the pointer directly:
484 ///
485 /// ```rust,ignore
486 /// let a: &mut v128 = ...;
487 /// unsafe { v128_store(a, value) };
488 /// // .. is the same as ..
489 /// *a = value;
490 /// ```
491 ///
492 /// The alignment of the store can be configured by doing a manual store without
493 /// this intrinsic.
494 ///
495 /// # Unsafety
496 ///
497 /// This intrinsic is unsafe because it takes a raw pointer as an argument, and
498 /// the pointer must be valid to store 16 bytes to. Note that there is no
499 /// alignment requirement on this pointer since this intrinsic performs a
500 /// 1-aligned store.
501 #[inline]
502 #[cfg_attr(test, assert_instr(v128.store))]
503 #[target_feature(enable = "simd128")]
504 #[doc(alias("v128.store"))]
505 #[stable(feature = "wasm_simd", since = "1.54.0")]
v128_store(m: *mut v128, a: v128)506 pub unsafe fn v128_store(m: *mut v128, a: v128) {
507     *(m as *mut Unaligned<v128>) = Unaligned(a);
508 }
509 
510 /// Loads an 8-bit value from `m` and sets lane `L` of `v` to that value.
511 ///
512 /// This intrinsic is provided for completeness and is equivalent to
513 /// `u8x16_replace_lane::<L>(v, *m)` (which doesn't require `unsafe`).
514 ///
515 /// # Unsafety
516 ///
517 /// This intrinsic is unsafe because it takes a raw pointer as an argument, and
518 /// the pointer must be valid to load 1 byte from. Note that there is no
519 /// alignment requirement on this pointer since this intrinsic performs a
520 /// 1-aligned load.
521 #[inline]
522 #[cfg_attr(test, assert_instr(v128.load8_lane, L = 0))]
523 #[target_feature(enable = "simd128")]
524 #[doc(alias("v128.load8_lane"))]
525 #[stable(feature = "wasm_simd", since = "1.54.0")]
v128_load8_lane<const L: usize>(v: v128, m: *const u8) -> v128526 pub unsafe fn v128_load8_lane<const L: usize>(v: v128, m: *const u8) -> v128 {
527     u8x16_replace_lane::<L>(v, *m)
528 }
529 
530 /// Loads a 16-bit value from `m` and sets lane `L` of `v` to that value.
531 ///
532 /// This intrinsic is provided for completeness and is equivalent to
533 /// `u16x8_replace_lane::<L>(v, *m)` (which doesn't require `unsafe`).
534 ///
535 /// # Unsafety
536 ///
537 /// This intrinsic is unsafe because it takes a raw pointer as an argument, and
538 /// the pointer must be valid to load 2 bytes from. Note that there is no
539 /// alignment requirement on this pointer since this intrinsic performs a
540 /// 1-aligned load.
541 #[inline]
542 #[cfg_attr(test, assert_instr(v128.load16_lane, L = 0))]
543 #[target_feature(enable = "simd128")]
544 #[doc(alias("v128.load16_lane"))]
545 #[stable(feature = "wasm_simd", since = "1.54.0")]
v128_load16_lane<const L: usize>(v: v128, m: *const u16) -> v128546 pub unsafe fn v128_load16_lane<const L: usize>(v: v128, m: *const u16) -> v128 {
547     u16x8_replace_lane::<L>(v, ptr::read_unaligned(m))
548 }
549 
550 /// Loads a 32-bit value from `m` and sets lane `L` of `v` to that value.
551 ///
552 /// This intrinsic is provided for completeness and is equivalent to
553 /// `u32x4_replace_lane::<L>(v, *m)` (which doesn't require `unsafe`).
554 ///
555 /// # Unsafety
556 ///
557 /// This intrinsic is unsafe because it takes a raw pointer as an argument, and
558 /// the pointer must be valid to load 4 bytes from. Note that there is no
559 /// alignment requirement on this pointer since this intrinsic performs a
560 /// 1-aligned load.
561 #[inline]
562 #[cfg_attr(test, assert_instr(v128.load32_lane, L = 0))]
563 #[target_feature(enable = "simd128")]
564 #[doc(alias("v128.load32_lane"))]
565 #[stable(feature = "wasm_simd", since = "1.54.0")]
v128_load32_lane<const L: usize>(v: v128, m: *const u32) -> v128566 pub unsafe fn v128_load32_lane<const L: usize>(v: v128, m: *const u32) -> v128 {
567     u32x4_replace_lane::<L>(v, ptr::read_unaligned(m))
568 }
569 
570 /// Loads a 64-bit value from `m` and sets lane `L` of `v` to that value.
571 ///
572 /// This intrinsic is provided for completeness and is equivalent to
573 /// `u64x2_replace_lane::<L>(v, *m)` (which doesn't require `unsafe`).
574 ///
575 /// # Unsafety
576 ///
577 /// This intrinsic is unsafe because it takes a raw pointer as an argument, and
578 /// the pointer must be valid to load 8 bytes from. Note that there is no
579 /// alignment requirement on this pointer since this intrinsic performs a
580 /// 1-aligned load.
581 #[inline]
582 #[cfg_attr(test, assert_instr(v128.load64_lane, L = 0))]
583 #[target_feature(enable = "simd128")]
584 #[doc(alias("v128.load64_lane"))]
585 #[stable(feature = "wasm_simd", since = "1.54.0")]
v128_load64_lane<const L: usize>(v: v128, m: *const u64) -> v128586 pub unsafe fn v128_load64_lane<const L: usize>(v: v128, m: *const u64) -> v128 {
587     u64x2_replace_lane::<L>(v, ptr::read_unaligned(m))
588 }
589 
590 /// Stores the 8-bit value from lane `L` of `v` into `m`
591 ///
592 /// This intrinsic is provided for completeness and is equivalent to
593 /// `*m = u8x16_extract_lane::<L>(v)` (which doesn't require `unsafe`).
594 ///
595 /// # Unsafety
596 ///
597 /// This intrinsic is unsafe because it takes a raw pointer as an argument, and
598 /// the pointer must be valid to store 1 byte to. Note that there is no
599 /// alignment requirement on this pointer since this intrinsic performs a
600 /// 1-aligned store.
601 #[inline]
602 #[cfg_attr(test, assert_instr(v128.store8_lane, L = 0))]
603 #[target_feature(enable = "simd128")]
604 #[doc(alias("v128.store8_lane"))]
605 #[stable(feature = "wasm_simd", since = "1.54.0")]
v128_store8_lane<const L: usize>(v: v128, m: *mut u8)606 pub unsafe fn v128_store8_lane<const L: usize>(v: v128, m: *mut u8) {
607     *m = u8x16_extract_lane::<L>(v);
608 }
609 
610 /// Stores the 16-bit value from lane `L` of `v` into `m`
611 ///
612 /// This intrinsic is provided for completeness and is equivalent to
613 /// `*m = u16x8_extract_lane::<L>(v)` (which doesn't require `unsafe`).
614 ///
615 /// # Unsafety
616 ///
617 /// This intrinsic is unsafe because it takes a raw pointer as an argument, and
618 /// the pointer must be valid to store 2 bytes to. Note that there is no
619 /// alignment requirement on this pointer since this intrinsic performs a
620 /// 1-aligned store.
621 #[inline]
622 #[cfg_attr(test, assert_instr(v128.store16_lane, L = 0))]
623 #[target_feature(enable = "simd128")]
624 #[doc(alias("v128.store16_lane"))]
625 #[stable(feature = "wasm_simd", since = "1.54.0")]
v128_store16_lane<const L: usize>(v: v128, m: *mut u16)626 pub unsafe fn v128_store16_lane<const L: usize>(v: v128, m: *mut u16) {
627     ptr::write_unaligned(m, u16x8_extract_lane::<L>(v))
628 }
629 
630 /// Stores the 32-bit value from lane `L` of `v` into `m`
631 ///
632 /// This intrinsic is provided for completeness and is equivalent to
633 /// `*m = u32x4_extract_lane::<L>(v)` (which doesn't require `unsafe`).
634 ///
635 /// # Unsafety
636 ///
637 /// This intrinsic is unsafe because it takes a raw pointer as an argument, and
638 /// the pointer must be valid to store 4 bytes to. Note that there is no
639 /// alignment requirement on this pointer since this intrinsic performs a
640 /// 1-aligned store.
641 #[inline]
642 #[cfg_attr(test, assert_instr(v128.store32_lane, L = 0))]
643 #[target_feature(enable = "simd128")]
644 #[doc(alias("v128.store32_lane"))]
645 #[stable(feature = "wasm_simd", since = "1.54.0")]
v128_store32_lane<const L: usize>(v: v128, m: *mut u32)646 pub unsafe fn v128_store32_lane<const L: usize>(v: v128, m: *mut u32) {
647     ptr::write_unaligned(m, u32x4_extract_lane::<L>(v))
648 }
649 
650 /// Stores the 64-bit value from lane `L` of `v` into `m`
651 ///
652 /// This intrinsic is provided for completeness and is equivalent to
653 /// `*m = u64x2_extract_lane::<L>(v)` (which doesn't require `unsafe`).
654 ///
655 /// # Unsafety
656 ///
657 /// This intrinsic is unsafe because it takes a raw pointer as an argument, and
658 /// the pointer must be valid to store 8 bytes to. Note that there is no
659 /// alignment requirement on this pointer since this intrinsic performs a
660 /// 1-aligned store.
661 #[inline]
662 #[cfg_attr(test, assert_instr(v128.store64_lane, L = 0))]
663 #[target_feature(enable = "simd128")]
664 #[doc(alias("v128.store64_lane"))]
665 #[stable(feature = "wasm_simd", since = "1.54.0")]
v128_store64_lane<const L: usize>(v: v128, m: *mut u64)666 pub unsafe fn v128_store64_lane<const L: usize>(v: v128, m: *mut u64) {
667     ptr::write_unaligned(m, u64x2_extract_lane::<L>(v))
668 }
669 
670 /// Materializes a SIMD value from the provided operands.
671 ///
672 /// If possible this will generate a `v128.const` instruction, otherwise it may
673 /// be lowered to a sequence of instructions to materialize the vector value.
674 #[inline]
675 #[target_feature(enable = "simd128")]
676 #[cfg_attr(
677     test,
678     assert_instr(
679         v128.const,
680         a0 = 0,
681         a1 = 1,
682         a2 = 2,
683         a3 = 3,
684         a4 = 4,
685         a5 = 5,
686         a6 = 6,
687         a7 = 7,
688         a8 = 8,
689         a9 = 9,
690         a10 = 10,
691         a11 = 11,
692         a12 = 12,
693         a13 = 13,
694         a14 = 14,
695         a15 = 15,
696     )
697 )]
698 #[doc(alias("v128.const"))]
699 #[stable(feature = "wasm_simd", since = "1.54.0")]
700 #[rustc_const_stable(feature = "wasm_simd", since = "1.54.0")]
i8x16( a0: i8, a1: i8, a2: i8, a3: i8, a4: i8, a5: i8, a6: i8, a7: i8, a8: i8, a9: i8, a10: i8, a11: i8, a12: i8, a13: i8, a14: i8, a15: i8, ) -> v128701 pub const fn i8x16(
702     a0: i8,
703     a1: i8,
704     a2: i8,
705     a3: i8,
706     a4: i8,
707     a5: i8,
708     a6: i8,
709     a7: i8,
710     a8: i8,
711     a9: i8,
712     a10: i8,
713     a11: i8,
714     a12: i8,
715     a13: i8,
716     a14: i8,
717     a15: i8,
718 ) -> v128 {
719     simd::i8x16(
720         a0, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15,
721     )
722     .v128()
723 }
724 
725 /// Materializes a SIMD value from the provided operands.
726 ///
727 /// If possible this will generate a `v128.const` instruction, otherwise it may
728 /// be lowered to a sequence of instructions to materialize the vector value.
729 #[inline]
730 #[target_feature(enable = "simd128")]
731 #[doc(alias("v128.const"))]
732 #[stable(feature = "wasm_simd", since = "1.54.0")]
733 #[rustc_const_stable(feature = "wasm_simd", since = "1.54.0")]
u8x16( a0: u8, a1: u8, a2: u8, a3: u8, a4: u8, a5: u8, a6: u8, a7: u8, a8: u8, a9: u8, a10: u8, a11: u8, a12: u8, a13: u8, a14: u8, a15: u8, ) -> v128734 pub const fn u8x16(
735     a0: u8,
736     a1: u8,
737     a2: u8,
738     a3: u8,
739     a4: u8,
740     a5: u8,
741     a6: u8,
742     a7: u8,
743     a8: u8,
744     a9: u8,
745     a10: u8,
746     a11: u8,
747     a12: u8,
748     a13: u8,
749     a14: u8,
750     a15: u8,
751 ) -> v128 {
752     simd::u8x16(
753         a0, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15,
754     )
755     .v128()
756 }
757 
758 /// Materializes a SIMD value from the provided operands.
759 ///
760 /// If possible this will generate a `v128.const` instruction, otherwise it may
761 /// be lowered to a sequence of instructions to materialize the vector value.
762 #[inline]
763 #[target_feature(enable = "simd128")]
764 #[cfg_attr(
765     test,
766     assert_instr(
767         v128.const,
768         a0 = 0,
769         a1 = 1,
770         a2 = 2,
771         a3 = 3,
772         a4 = 4,
773         a5 = 5,
774         a6 = 6,
775         a7 = 7,
776     )
777 )]
778 #[doc(alias("v128.const"))]
779 #[stable(feature = "wasm_simd", since = "1.54.0")]
780 #[rustc_const_stable(feature = "wasm_simd", since = "1.54.0")]
i16x8(a0: i16, a1: i16, a2: i16, a3: i16, a4: i16, a5: i16, a6: i16, a7: i16) -> v128781 pub const fn i16x8(a0: i16, a1: i16, a2: i16, a3: i16, a4: i16, a5: i16, a6: i16, a7: i16) -> v128 {
782     simd::i16x8(a0, a1, a2, a3, a4, a5, a6, a7).v128()
783 }
784 
785 /// Materializes a SIMD value from the provided operands.
786 ///
787 /// If possible this will generate a `v128.const` instruction, otherwise it may
788 /// be lowered to a sequence of instructions to materialize the vector value.
789 #[inline]
790 #[target_feature(enable = "simd128")]
791 #[doc(alias("v128.const"))]
792 #[stable(feature = "wasm_simd", since = "1.54.0")]
793 #[rustc_const_stable(feature = "wasm_simd", since = "1.54.0")]
u16x8(a0: u16, a1: u16, a2: u16, a3: u16, a4: u16, a5: u16, a6: u16, a7: u16) -> v128794 pub const fn u16x8(a0: u16, a1: u16, a2: u16, a3: u16, a4: u16, a5: u16, a6: u16, a7: u16) -> v128 {
795     simd::u16x8(a0, a1, a2, a3, a4, a5, a6, a7).v128()
796 }
797 
798 /// Materializes a SIMD value from the provided operands.
799 ///
800 /// If possible this will generate a `v128.const` instruction, otherwise it may
801 /// be lowered to a sequence of instructions to materialize the vector value.
802 #[inline]
803 #[target_feature(enable = "simd128")]
804 #[cfg_attr(test, assert_instr(v128.const, a0 = 0, a1 = 1, a2 = 2, a3 = 3))]
805 #[doc(alias("v128.const"))]
806 #[stable(feature = "wasm_simd", since = "1.54.0")]
807 #[rustc_const_stable(feature = "wasm_simd", since = "1.54.0")]
i32x4(a0: i32, a1: i32, a2: i32, a3: i32) -> v128808 pub const fn i32x4(a0: i32, a1: i32, a2: i32, a3: i32) -> v128 {
809     simd::i32x4(a0, a1, a2, a3).v128()
810 }
811 
812 /// Materializes a SIMD value from the provided operands.
813 ///
814 /// If possible this will generate a `v128.const` instruction, otherwise it may
815 /// be lowered to a sequence of instructions to materialize the vector value.
816 #[inline]
817 #[target_feature(enable = "simd128")]
818 #[doc(alias("v128.const"))]
819 #[stable(feature = "wasm_simd", since = "1.54.0")]
820 #[rustc_const_stable(feature = "wasm_simd", since = "1.54.0")]
u32x4(a0: u32, a1: u32, a2: u32, a3: u32) -> v128821 pub const fn u32x4(a0: u32, a1: u32, a2: u32, a3: u32) -> v128 {
822     simd::u32x4(a0, a1, a2, a3).v128()
823 }
824 
825 /// Materializes a SIMD value from the provided operands.
826 ///
827 /// If possible this will generate a `v128.const` instruction, otherwise it may
828 /// be lowered to a sequence of instructions to materialize the vector value.
829 #[inline]
830 #[target_feature(enable = "simd128")]
831 #[cfg_attr(test, assert_instr(v128.const, a0 = 1, a1 = 2))]
832 #[doc(alias("v128.const"))]
833 #[stable(feature = "wasm_simd", since = "1.54.0")]
834 #[rustc_const_stable(feature = "wasm_simd", since = "1.54.0")]
i64x2(a0: i64, a1: i64) -> v128835 pub const fn i64x2(a0: i64, a1: i64) -> v128 {
836     simd::i64x2(a0, a1).v128()
837 }
838 
839 /// Materializes a SIMD value from the provided operands.
840 ///
841 /// If possible this will generate a `v128.const` instruction, otherwise it may
842 /// be lowered to a sequence of instructions to materialize the vector value.
843 #[inline]
844 #[target_feature(enable = "simd128")]
845 #[doc(alias("v128.const"))]
846 #[stable(feature = "wasm_simd", since = "1.54.0")]
847 #[rustc_const_stable(feature = "wasm_simd", since = "1.54.0")]
u64x2(a0: u64, a1: u64) -> v128848 pub const fn u64x2(a0: u64, a1: u64) -> v128 {
849     simd::u64x2(a0, a1).v128()
850 }
851 
852 /// Materializes a SIMD value from the provided operands.
853 ///
854 /// If possible this will generate a `v128.const` instruction, otherwise it may
855 /// be lowered to a sequence of instructions to materialize the vector value.
856 #[inline]
857 #[target_feature(enable = "simd128")]
858 #[cfg_attr(test, assert_instr(v128.const, a0 = 0.0, a1 = 1.0, a2 = 2.0, a3 = 3.0))]
859 #[doc(alias("v128.const"))]
860 #[stable(feature = "wasm_simd", since = "1.54.0")]
861 #[rustc_const_stable(feature = "wasm_simd_const", since = "1.56.0")]
f32x4(a0: f32, a1: f32, a2: f32, a3: f32) -> v128862 pub const fn f32x4(a0: f32, a1: f32, a2: f32, a3: f32) -> v128 {
863     simd::f32x4(a0, a1, a2, a3).v128()
864 }
865 
866 /// Materializes a SIMD value from the provided operands.
867 ///
868 /// If possible this will generate a `v128.const` instruction, otherwise it may
869 /// be lowered to a sequence of instructions to materialize the vector value.
870 #[inline]
871 #[target_feature(enable = "simd128")]
872 #[cfg_attr(test, assert_instr(v128.const, a0 = 0.0, a1 = 1.0))]
873 #[doc(alias("v128.const"))]
874 #[stable(feature = "wasm_simd", since = "1.54.0")]
875 #[rustc_const_stable(feature = "wasm_simd_const", since = "1.56.0")]
f64x2(a0: f64, a1: f64) -> v128876 pub const fn f64x2(a0: f64, a1: f64) -> v128 {
877     simd::f64x2(a0, a1).v128()
878 }
879 
880 /// Returns a new vector with lanes selected from the lanes of the two input
881 /// vectors `$a` and `$b` specified in the 16 immediate operands.
882 ///
883 /// The `$a` and `$b` expressions must have type `v128`, and this function
884 /// generates a wasm instruction that is encoded with 16 bytes providing the
885 /// indices of the elements to return. The indices `i` in range [0, 15] select
886 /// the `i`-th element of `a`. The indices in range [16, 31] select the `i -
887 /// 16`-th element of `b`.
888 ///
889 /// Note that this is a macro due to the codegen requirements of all of the
890 /// index expressions `$i*` must be constant. A compiler error will be
891 /// generated if any of the expressions are not constant.
892 ///
893 /// All indexes `$i*` must have the type `u32`.
894 #[inline]
895 #[cfg_attr(test,
896     assert_instr(
897         i8x16.shuffle,
898         I0 = 0,
899         I1 = 2,
900         I2 = 4,
901         I3 = 6,
902         I4 = 8,
903         I5 = 10,
904         I6 = 12,
905         I7 = 14,
906         I8 = 16,
907         I9 = 18,
908         I10 = 20,
909         I11 = 22,
910         I12 = 24,
911         I13 = 26,
912         I14 = 28,
913         I15 = 30,
914     )
915 )]
916 #[target_feature(enable = "simd128")]
917 #[doc(alias("i8x16.shuffle"))]
918 #[stable(feature = "wasm_simd", since = "1.54.0")]
i8x16_shuffle< const I0: usize, const I1: usize, const I2: usize, const I3: usize, const I4: usize, const I5: usize, const I6: usize, const I7: usize, const I8: usize, const I9: usize, const I10: usize, const I11: usize, const I12: usize, const I13: usize, const I14: usize, const I15: usize, >( a: v128, b: v128, ) -> v128919 pub fn i8x16_shuffle<
920     const I0: usize,
921     const I1: usize,
922     const I2: usize,
923     const I3: usize,
924     const I4: usize,
925     const I5: usize,
926     const I6: usize,
927     const I7: usize,
928     const I8: usize,
929     const I9: usize,
930     const I10: usize,
931     const I11: usize,
932     const I12: usize,
933     const I13: usize,
934     const I14: usize,
935     const I15: usize,
936 >(
937     a: v128,
938     b: v128,
939 ) -> v128 {
940     static_assert!(I0: usize where I0 < 32);
941     static_assert!(I1: usize where I1 < 32);
942     static_assert!(I2: usize where I2 < 32);
943     static_assert!(I3: usize where I3 < 32);
944     static_assert!(I4: usize where I4 < 32);
945     static_assert!(I5: usize where I5 < 32);
946     static_assert!(I6: usize where I6 < 32);
947     static_assert!(I7: usize where I7 < 32);
948     static_assert!(I8: usize where I8 < 32);
949     static_assert!(I9: usize where I9 < 32);
950     static_assert!(I10: usize where I10 < 32);
951     static_assert!(I11: usize where I11 < 32);
952     static_assert!(I12: usize where I12 < 32);
953     static_assert!(I13: usize where I13 < 32);
954     static_assert!(I14: usize where I14 < 32);
955     static_assert!(I15: usize where I15 < 32);
956     let shuf: simd::u8x16 = unsafe {
957         simd_shuffle16!(
958             a.as_u8x16(),
959             b.as_u8x16(),
960             <
961                 const I0: usize,
962                 const I1: usize,
963                 const I2: usize,
964                 const I3: usize,
965                 const I4: usize,
966                 const I5: usize,
967                 const I6: usize,
968                 const I7: usize,
969                 const I8: usize,
970                 const I9: usize,
971                 const I10: usize,
972                 const I11: usize,
973                 const I12: usize,
974                 const I13: usize,
975                 const I14: usize,
976                 const I15: usize,
977             > [
978                 I0 as u32, I1 as u32, I2 as u32, I3 as u32, I4 as u32, I5 as u32, I6 as u32, I7 as u32,
979                 I8 as u32, I9 as u32, I10 as u32, I11 as u32, I12 as u32, I13 as u32, I14 as u32,
980                 I15 as u32,
981             ],
982         )
983     };
984     shuf.v128()
985 }
986 
987 #[stable(feature = "wasm_simd", since = "1.54.0")]
988 pub use i8x16_shuffle as u8x16_shuffle;
989 
990 /// Same as [`i8x16_shuffle`], except operates as if the inputs were eight
991 /// 16-bit integers, only taking 8 indices to shuffle.
992 ///
993 /// Indices in the range [0, 7] select from `a` while [8, 15] select from `b`.
994 /// Note that this will generate the `i8x16.shuffle` instruction, since there
995 /// is no native `i16x8.shuffle` instruction (there is no need for one since
996 /// `i8x16.shuffle` suffices).
997 #[inline]
998 #[cfg_attr(test,
999     assert_instr(
1000         i8x16.shuffle,
1001         I0 = 0,
1002         I1 = 2,
1003         I2 = 4,
1004         I3 = 6,
1005         I4 = 8,
1006         I5 = 10,
1007         I6 = 12,
1008         I7 = 14,
1009     )
1010 )]
1011 #[target_feature(enable = "simd128")]
1012 #[doc(alias("i8x16.shuffle"))]
1013 #[stable(feature = "wasm_simd", since = "1.54.0")]
i16x8_shuffle< const I0: usize, const I1: usize, const I2: usize, const I3: usize, const I4: usize, const I5: usize, const I6: usize, const I7: usize, >( a: v128, b: v128, ) -> v1281014 pub fn i16x8_shuffle<
1015     const I0: usize,
1016     const I1: usize,
1017     const I2: usize,
1018     const I3: usize,
1019     const I4: usize,
1020     const I5: usize,
1021     const I6: usize,
1022     const I7: usize,
1023 >(
1024     a: v128,
1025     b: v128,
1026 ) -> v128 {
1027     static_assert!(I0: usize where I0 < 16);
1028     static_assert!(I1: usize where I1 < 16);
1029     static_assert!(I2: usize where I2 < 16);
1030     static_assert!(I3: usize where I3 < 16);
1031     static_assert!(I4: usize where I4 < 16);
1032     static_assert!(I5: usize where I5 < 16);
1033     static_assert!(I6: usize where I6 < 16);
1034     static_assert!(I7: usize where I7 < 16);
1035     let shuf: simd::u16x8 = unsafe {
1036         simd_shuffle8!(
1037             a.as_u16x8(),
1038             b.as_u16x8(),
1039             <
1040                 const I0: usize,
1041                 const I1: usize,
1042                 const I2: usize,
1043                 const I3: usize,
1044                 const I4: usize,
1045                 const I5: usize,
1046                 const I6: usize,
1047                 const I7: usize,
1048             > [
1049                 I0 as u32, I1 as u32, I2 as u32, I3 as u32, I4 as u32, I5 as u32, I6 as u32, I7 as u32,
1050             ],
1051         )
1052     };
1053     shuf.v128()
1054 }
1055 
1056 #[stable(feature = "wasm_simd", since = "1.54.0")]
1057 pub use i16x8_shuffle as u16x8_shuffle;
1058 
1059 /// Same as [`i8x16_shuffle`], except operates as if the inputs were four
1060 /// 32-bit integers, only taking 4 indices to shuffle.
1061 ///
1062 /// Indices in the range [0, 3] select from `a` while [4, 7] select from `b`.
1063 /// Note that this will generate the `i8x16.shuffle` instruction, since there
1064 /// is no native `i32x4.shuffle` instruction (there is no need for one since
1065 /// `i8x16.shuffle` suffices).
1066 #[inline]
1067 #[cfg_attr(test, assert_instr(i8x16.shuffle, I0 = 0, I1 = 2, I2 = 4, I3 = 6))]
1068 #[target_feature(enable = "simd128")]
1069 #[doc(alias("i8x16.shuffle"))]
1070 #[stable(feature = "wasm_simd", since = "1.54.0")]
i32x4_shuffle<const I0: usize, const I1: usize, const I2: usize, const I3: usize>( a: v128, b: v128, ) -> v1281071 pub fn i32x4_shuffle<const I0: usize, const I1: usize, const I2: usize, const I3: usize>(
1072     a: v128,
1073     b: v128,
1074 ) -> v128 {
1075     static_assert!(I0: usize where I0 < 8);
1076     static_assert!(I1: usize where I1 < 8);
1077     static_assert!(I2: usize where I2 < 8);
1078     static_assert!(I3: usize where I3 < 8);
1079     let shuf: simd::u32x4 = unsafe {
1080         simd_shuffle4!(
1081             a.as_u32x4(),
1082             b.as_u32x4(),
1083             <const I0: usize, const I1: usize, const I2: usize, const I3: usize> [I0 as u32, I1 as u32, I2 as u32, I3 as u32],
1084         )
1085     };
1086     shuf.v128()
1087 }
1088 
1089 #[stable(feature = "wasm_simd", since = "1.54.0")]
1090 pub use i32x4_shuffle as u32x4_shuffle;
1091 
1092 /// Same as [`i8x16_shuffle`], except operates as if the inputs were two
1093 /// 64-bit integers, only taking 2 indices to shuffle.
1094 ///
1095 /// Indices in the range [0, 1] select from `a` while [2, 3] select from `b`.
1096 /// Note that this will generate the `v8x16.shuffle` instruction, since there
1097 /// is no native `i64x2.shuffle` instruction (there is no need for one since
1098 /// `i8x16.shuffle` suffices).
1099 #[inline]
1100 #[cfg_attr(test, assert_instr(i8x16.shuffle, I0 = 0, I1 = 2))]
1101 #[target_feature(enable = "simd128")]
1102 #[doc(alias("i8x16.shuffle"))]
1103 #[stable(feature = "wasm_simd", since = "1.54.0")]
i64x2_shuffle<const I0: usize, const I1: usize>(a: v128, b: v128) -> v1281104 pub fn i64x2_shuffle<const I0: usize, const I1: usize>(a: v128, b: v128) -> v128 {
1105     static_assert!(I0: usize where I0 < 4);
1106     static_assert!(I1: usize where I1 < 4);
1107     let shuf: simd::u64x2 = unsafe {
1108         simd_shuffle2!(
1109             a.as_u64x2(),
1110             b.as_u64x2(),
1111             <const I0: usize, const I1: usize> [I0 as u32, I1 as u32],
1112         )
1113     };
1114     shuf.v128()
1115 }
1116 
1117 #[stable(feature = "wasm_simd", since = "1.54.0")]
1118 pub use i64x2_shuffle as u64x2_shuffle;
1119 
1120 /// Extracts a lane from a 128-bit vector interpreted as 16 packed i8 numbers.
1121 ///
1122 /// Extracts the scalar value of lane specified in the immediate mode operand
1123 /// `N` from `a`. If `N` is out of bounds then it is a compile time error.
1124 #[inline]
1125 #[cfg_attr(test, assert_instr(i8x16.extract_lane_s, N = 3))]
1126 #[target_feature(enable = "simd128")]
1127 #[doc(alias("i8x16.extract_lane_s"))]
1128 #[stable(feature = "wasm_simd", since = "1.54.0")]
i8x16_extract_lane<const N: usize>(a: v128) -> i81129 pub fn i8x16_extract_lane<const N: usize>(a: v128) -> i8 {
1130     static_assert!(N: usize where N < 16);
1131     unsafe { simd_extract(a.as_i8x16(), N as u32) }
1132 }
1133 
1134 /// Extracts a lane from a 128-bit vector interpreted as 16 packed u8 numbers.
1135 ///
1136 /// Extracts the scalar value of lane specified in the immediate mode operand
1137 /// `N` from `a`. If `N` is out of bounds then it is a compile time error.
1138 #[inline]
1139 #[cfg_attr(test, assert_instr(i8x16.extract_lane_u, N = 3))]
1140 #[target_feature(enable = "simd128")]
1141 #[doc(alias("i8x16.extract_lane_u"))]
1142 #[stable(feature = "wasm_simd", since = "1.54.0")]
u8x16_extract_lane<const N: usize>(a: v128) -> u81143 pub fn u8x16_extract_lane<const N: usize>(a: v128) -> u8 {
1144     static_assert!(N: usize where N < 16);
1145     unsafe { simd_extract(a.as_u8x16(), N as u32) }
1146 }
1147 
1148 /// Replaces a lane from a 128-bit vector interpreted as 16 packed i8 numbers.
1149 ///
1150 /// Replaces the scalar value of lane specified in the immediate mode operand
1151 /// `N` from `a`. If `N` is out of bounds then it is a compile time error.
1152 #[inline]
1153 #[cfg_attr(test, assert_instr(i8x16.replace_lane, N = 2))]
1154 #[target_feature(enable = "simd128")]
1155 #[doc(alias("i8x16.replace_lane"))]
1156 #[stable(feature = "wasm_simd", since = "1.54.0")]
i8x16_replace_lane<const N: usize>(a: v128, val: i8) -> v1281157 pub fn i8x16_replace_lane<const N: usize>(a: v128, val: i8) -> v128 {
1158     static_assert!(N: usize where N < 16);
1159     unsafe { simd_insert(a.as_i8x16(), N as u32, val).v128() }
1160 }
1161 
1162 /// Replaces a lane from a 128-bit vector interpreted as 16 packed u8 numbers.
1163 ///
1164 /// Replaces the scalar value of lane specified in the immediate mode operand
1165 /// `N` from `a`. If `N` is out of bounds then it is a compile time error.
1166 #[inline]
1167 #[cfg_attr(test, assert_instr(i8x16.replace_lane, N = 2))]
1168 #[target_feature(enable = "simd128")]
1169 #[doc(alias("i8x16.replace_lane"))]
1170 #[stable(feature = "wasm_simd", since = "1.54.0")]
u8x16_replace_lane<const N: usize>(a: v128, val: u8) -> v1281171 pub fn u8x16_replace_lane<const N: usize>(a: v128, val: u8) -> v128 {
1172     static_assert!(N: usize where N < 16);
1173     unsafe { simd_insert(a.as_u8x16(), N as u32, val).v128() }
1174 }
1175 
1176 /// Extracts a lane from a 128-bit vector interpreted as 8 packed i16 numbers.
1177 ///
1178 /// Extracts a the scalar value of lane specified in the immediate mode operand
1179 /// `N` from `a`. If `N` is out of bounds then it is a compile time error.
1180 #[inline]
1181 #[cfg_attr(test, assert_instr(i16x8.extract_lane_s, N = 2))]
1182 #[target_feature(enable = "simd128")]
1183 #[doc(alias("i16x8.extract_lane_s"))]
1184 #[stable(feature = "wasm_simd", since = "1.54.0")]
i16x8_extract_lane<const N: usize>(a: v128) -> i161185 pub fn i16x8_extract_lane<const N: usize>(a: v128) -> i16 {
1186     static_assert!(N: usize where N < 8);
1187     unsafe { simd_extract(a.as_i16x8(), N as u32) }
1188 }
1189 
1190 /// Extracts a lane from a 128-bit vector interpreted as 8 packed u16 numbers.
1191 ///
1192 /// Extracts a the scalar value of lane specified in the immediate mode operand
1193 /// `N` from `a`. If `N` is out of bounds then it is a compile time error.
1194 #[inline]
1195 #[cfg_attr(test, assert_instr(i16x8.extract_lane_u, N = 2))]
1196 #[target_feature(enable = "simd128")]
1197 #[doc(alias("i16x8.extract_lane_u"))]
1198 #[stable(feature = "wasm_simd", since = "1.54.0")]
u16x8_extract_lane<const N: usize>(a: v128) -> u161199 pub fn u16x8_extract_lane<const N: usize>(a: v128) -> u16 {
1200     static_assert!(N: usize where N < 8);
1201     unsafe { simd_extract(a.as_u16x8(), N as u32) }
1202 }
1203 
1204 /// Replaces a lane from a 128-bit vector interpreted as 8 packed i16 numbers.
1205 ///
1206 /// Replaces the scalar value of lane specified in the immediate mode operand
1207 /// `N` from `a`. If `N` is out of bounds then it is a compile time error.
1208 #[inline]
1209 #[cfg_attr(test, assert_instr(i16x8.replace_lane, N = 2))]
1210 #[target_feature(enable = "simd128")]
1211 #[doc(alias("i16x8.replace_lane"))]
1212 #[stable(feature = "wasm_simd", since = "1.54.0")]
i16x8_replace_lane<const N: usize>(a: v128, val: i16) -> v1281213 pub fn i16x8_replace_lane<const N: usize>(a: v128, val: i16) -> v128 {
1214     static_assert!(N: usize where N < 8);
1215     unsafe { simd_insert(a.as_i16x8(), N as u32, val).v128() }
1216 }
1217 
1218 /// Replaces a lane from a 128-bit vector interpreted as 8 packed u16 numbers.
1219 ///
1220 /// Replaces the scalar value of lane specified in the immediate mode operand
1221 /// `N` from `a`. If `N` is out of bounds then it is a compile time error.
1222 #[inline]
1223 #[cfg_attr(test, assert_instr(i16x8.replace_lane, N = 2))]
1224 #[target_feature(enable = "simd128")]
1225 #[doc(alias("i16x8.replace_lane"))]
1226 #[stable(feature = "wasm_simd", since = "1.54.0")]
u16x8_replace_lane<const N: usize>(a: v128, val: u16) -> v1281227 pub fn u16x8_replace_lane<const N: usize>(a: v128, val: u16) -> v128 {
1228     static_assert!(N: usize where N < 8);
1229     unsafe { simd_insert(a.as_u16x8(), N as u32, val).v128() }
1230 }
1231 
1232 /// Extracts a lane from a 128-bit vector interpreted as 4 packed i32 numbers.
1233 ///
1234 /// Extracts the scalar value of lane specified in the immediate mode operand
1235 /// `N` from `a`. If `N` is out of bounds then it is a compile time error.
1236 #[inline]
1237 #[cfg_attr(test, assert_instr(i32x4.extract_lane, N = 2))]
1238 #[target_feature(enable = "simd128")]
1239 #[doc(alias("i32x4.extract_lane"))]
1240 #[stable(feature = "wasm_simd", since = "1.54.0")]
i32x4_extract_lane<const N: usize>(a: v128) -> i321241 pub fn i32x4_extract_lane<const N: usize>(a: v128) -> i32 {
1242     static_assert!(N: usize where N < 4);
1243     unsafe { simd_extract(a.as_i32x4(), N as u32) }
1244 }
1245 
1246 /// Extracts a lane from a 128-bit vector interpreted as 4 packed u32 numbers.
1247 ///
1248 /// Extracts the scalar value of lane specified in the immediate mode operand
1249 /// `N` from `a`. If `N` is out of bounds then it is a compile time error.
1250 #[inline]
1251 #[target_feature(enable = "simd128")]
1252 #[doc(alias("i32x4.extract_lane"))]
1253 #[stable(feature = "wasm_simd", since = "1.54.0")]
u32x4_extract_lane<const N: usize>(a: v128) -> u321254 pub fn u32x4_extract_lane<const N: usize>(a: v128) -> u32 {
1255     i32x4_extract_lane::<N>(a) as u32
1256 }
1257 
1258 /// Replaces a lane from a 128-bit vector interpreted as 4 packed i32 numbers.
1259 ///
1260 /// Replaces the scalar value of lane specified in the immediate mode operand
1261 /// `N` from `a`. If `N` is out of bounds then it is a compile time error.
1262 #[inline]
1263 #[cfg_attr(test, assert_instr(i32x4.replace_lane, N = 2))]
1264 #[target_feature(enable = "simd128")]
1265 #[doc(alias("i32x4.replace_lane"))]
1266 #[stable(feature = "wasm_simd", since = "1.54.0")]
i32x4_replace_lane<const N: usize>(a: v128, val: i32) -> v1281267 pub fn i32x4_replace_lane<const N: usize>(a: v128, val: i32) -> v128 {
1268     static_assert!(N: usize where N < 4);
1269     unsafe { simd_insert(a.as_i32x4(), N as u32, val).v128() }
1270 }
1271 
1272 /// Replaces a lane from a 128-bit vector interpreted as 4 packed u32 numbers.
1273 ///
1274 /// Replaces the scalar value of lane specified in the immediate mode operand
1275 /// `N` from `a`. If `N` is out of bounds then it is a compile time error.
1276 #[inline]
1277 #[target_feature(enable = "simd128")]
1278 #[doc(alias("i32x4.replace_lane"))]
1279 #[stable(feature = "wasm_simd", since = "1.54.0")]
u32x4_replace_lane<const N: usize>(a: v128, val: u32) -> v1281280 pub fn u32x4_replace_lane<const N: usize>(a: v128, val: u32) -> v128 {
1281     i32x4_replace_lane::<N>(a, val as i32)
1282 }
1283 
1284 /// Extracts a lane from a 128-bit vector interpreted as 2 packed i64 numbers.
1285 ///
1286 /// Extracts the scalar value of lane specified in the immediate mode operand
1287 /// `N` from `a`. If `N` is out of bounds then it is a compile time error.
1288 #[inline]
1289 #[cfg_attr(test, assert_instr(i64x2.extract_lane, N = 1))]
1290 #[target_feature(enable = "simd128")]
1291 #[doc(alias("i64x2.extract_lane"))]
1292 #[stable(feature = "wasm_simd", since = "1.54.0")]
i64x2_extract_lane<const N: usize>(a: v128) -> i641293 pub fn i64x2_extract_lane<const N: usize>(a: v128) -> i64 {
1294     static_assert!(N: usize where N < 2);
1295     unsafe { simd_extract(a.as_i64x2(), N as u32) }
1296 }
1297 
1298 /// Extracts a lane from a 128-bit vector interpreted as 2 packed u64 numbers.
1299 ///
1300 /// Extracts the scalar value of lane specified in the immediate mode operand
1301 /// `N` from `a`. If `N` is out of bounds then it is a compile time error.
1302 #[inline]
1303 #[target_feature(enable = "simd128")]
1304 #[doc(alias("i64x2.extract_lane"))]
1305 #[stable(feature = "wasm_simd", since = "1.54.0")]
u64x2_extract_lane<const N: usize>(a: v128) -> u641306 pub fn u64x2_extract_lane<const N: usize>(a: v128) -> u64 {
1307     i64x2_extract_lane::<N>(a) as u64
1308 }
1309 
1310 /// Replaces a lane from a 128-bit vector interpreted as 2 packed i64 numbers.
1311 ///
1312 /// Replaces the scalar value of lane specified in the immediate mode operand
1313 /// `N` from `a`. If `N` is out of bounds then it is a compile time error.
1314 #[inline]
1315 #[cfg_attr(test, assert_instr(i64x2.replace_lane, N = 0))]
1316 #[target_feature(enable = "simd128")]
1317 #[doc(alias("i64x2.replace_lane"))]
1318 #[stable(feature = "wasm_simd", since = "1.54.0")]
i64x2_replace_lane<const N: usize>(a: v128, val: i64) -> v1281319 pub fn i64x2_replace_lane<const N: usize>(a: v128, val: i64) -> v128 {
1320     static_assert!(N: usize where N < 2);
1321     unsafe { simd_insert(a.as_i64x2(), N as u32, val).v128() }
1322 }
1323 
1324 /// Replaces a lane from a 128-bit vector interpreted as 2 packed u64 numbers.
1325 ///
1326 /// Replaces the scalar value of lane specified in the immediate mode operand
1327 /// `N` from `a`. If `N` is out of bounds then it is a compile time error.
1328 #[inline]
1329 #[target_feature(enable = "simd128")]
1330 #[doc(alias("i64x2.replace_lane"))]
1331 #[stable(feature = "wasm_simd", since = "1.54.0")]
u64x2_replace_lane<const N: usize>(a: v128, val: u64) -> v1281332 pub fn u64x2_replace_lane<const N: usize>(a: v128, val: u64) -> v128 {
1333     i64x2_replace_lane::<N>(a, val as i64)
1334 }
1335 
1336 /// Extracts a lane from a 128-bit vector interpreted as 4 packed f32 numbers.
1337 ///
1338 /// Extracts the scalar value of lane specified fn the immediate mode operand
1339 /// `N` from `a`. If `N` is out of bounds then it is a compile time error.
1340 #[inline]
1341 #[cfg_attr(test, assert_instr(f32x4.extract_lane, N = 1))]
1342 #[target_feature(enable = "simd128")]
1343 #[doc(alias("f32x4.extract_lane"))]
1344 #[stable(feature = "wasm_simd", since = "1.54.0")]
f32x4_extract_lane<const N: usize>(a: v128) -> f321345 pub fn f32x4_extract_lane<const N: usize>(a: v128) -> f32 {
1346     static_assert!(N: usize where N < 4);
1347     unsafe { simd_extract(a.as_f32x4(), N as u32) }
1348 }
1349 
1350 /// Replaces a lane from a 128-bit vector interpreted as 4 packed f32 numbers.
1351 ///
1352 /// Replaces the scalar value of lane specified fn the immediate mode operand
1353 /// `N` from `a`. If `N` is out of bounds then it is a compile time error.
1354 #[inline]
1355 #[cfg_attr(test, assert_instr(f32x4.replace_lane, N = 1))]
1356 #[target_feature(enable = "simd128")]
1357 #[doc(alias("f32x4.replace_lane"))]
1358 #[stable(feature = "wasm_simd", since = "1.54.0")]
f32x4_replace_lane<const N: usize>(a: v128, val: f32) -> v1281359 pub fn f32x4_replace_lane<const N: usize>(a: v128, val: f32) -> v128 {
1360     static_assert!(N: usize where N < 4);
1361     unsafe { simd_insert(a.as_f32x4(), N as u32, val).v128() }
1362 }
1363 
1364 /// Extracts a lane from a 128-bit vector interpreted as 2 packed f64 numbers.
1365 ///
1366 /// Extracts the scalar value of lane specified fn the immediate mode operand
1367 /// `N` from `a`. If `N` fs out of bounds then it is a compile time error.
1368 #[inline]
1369 #[cfg_attr(test, assert_instr(f64x2.extract_lane, N = 1))]
1370 #[target_feature(enable = "simd128")]
1371 #[doc(alias("f64x2.extract_lane"))]
1372 #[stable(feature = "wasm_simd", since = "1.54.0")]
f64x2_extract_lane<const N: usize>(a: v128) -> f641373 pub fn f64x2_extract_lane<const N: usize>(a: v128) -> f64 {
1374     static_assert!(N: usize where N < 2);
1375     unsafe { simd_extract(a.as_f64x2(), N as u32) }
1376 }
1377 
1378 /// Replaces a lane from a 128-bit vector interpreted as 2 packed f64 numbers.
1379 ///
1380 /// Replaces the scalar value of lane specified in the immediate mode operand
1381 /// `N` from `a`. If `N` is out of bounds then it is a compile time error.
1382 #[inline]
1383 #[cfg_attr(test, assert_instr(f64x2.replace_lane, N = 1))]
1384 #[target_feature(enable = "simd128")]
1385 #[doc(alias("f64x2.replace_lane"))]
1386 #[stable(feature = "wasm_simd", since = "1.54.0")]
f64x2_replace_lane<const N: usize>(a: v128, val: f64) -> v1281387 pub fn f64x2_replace_lane<const N: usize>(a: v128, val: f64) -> v128 {
1388     static_assert!(N: usize where N < 2);
1389     unsafe { simd_insert(a.as_f64x2(), N as u32, val).v128() }
1390 }
1391 
1392 /// Returns a new vector with lanes selected from the lanes of the first input
1393 /// vector `a` specified in the second input vector `s`.
1394 ///
1395 /// The indices `i` in range [0, 15] select the `i`-th element of `a`. For
1396 /// indices outside of the range the resulting lane is 0.
1397 #[inline]
1398 #[cfg_attr(test, assert_instr(i8x16.swizzle))]
1399 #[target_feature(enable = "simd128")]
1400 #[doc(alias("i8x16.swizzle"))]
1401 #[stable(feature = "wasm_simd", since = "1.54.0")]
i8x16_swizzle(a: v128, s: v128) -> v1281402 pub fn i8x16_swizzle(a: v128, s: v128) -> v128 {
1403     unsafe { llvm_swizzle(a.as_i8x16(), s.as_i8x16()).v128() }
1404 }
1405 
1406 #[stable(feature = "wasm_simd", since = "1.54.0")]
1407 pub use i8x16_swizzle as u8x16_swizzle;
1408 
1409 /// Creates a vector with identical lanes.
1410 ///
1411 /// Constructs a vector with `x` replicated to all 16 lanes.
1412 #[inline]
1413 #[cfg_attr(test, assert_instr(i8x16.splat))]
1414 #[target_feature(enable = "simd128")]
1415 #[doc(alias("i8x16.splat"))]
1416 #[stable(feature = "wasm_simd", since = "1.54.0")]
i8x16_splat(a: i8) -> v1281417 pub fn i8x16_splat(a: i8) -> v128 {
1418     simd::i8x16::splat(a).v128()
1419 }
1420 
1421 /// Creates a vector with identical lanes.
1422 ///
1423 /// Constructs a vector with `x` replicated to all 16 lanes.
1424 #[inline]
1425 #[cfg_attr(test, assert_instr(i8x16.splat))]
1426 #[target_feature(enable = "simd128")]
1427 #[doc(alias("i8x16.splat"))]
1428 #[stable(feature = "wasm_simd", since = "1.54.0")]
u8x16_splat(a: u8) -> v1281429 pub fn u8x16_splat(a: u8) -> v128 {
1430     simd::u8x16::splat(a).v128()
1431 }
1432 
1433 /// Creates a vector with identical lanes.
1434 ///
1435 /// Construct a vector with `x` replicated to all 8 lanes.
1436 #[inline]
1437 #[cfg_attr(test, assert_instr(i16x8.splat))]
1438 #[target_feature(enable = "simd128")]
1439 #[doc(alias("i16x8.splat"))]
1440 #[stable(feature = "wasm_simd", since = "1.54.0")]
i16x8_splat(a: i16) -> v1281441 pub fn i16x8_splat(a: i16) -> v128 {
1442     simd::i16x8::splat(a).v128()
1443 }
1444 
1445 /// Creates a vector with identical lanes.
1446 ///
1447 /// Construct a vector with `x` replicated to all 8 lanes.
1448 #[inline]
1449 #[cfg_attr(test, assert_instr(i16x8.splat))]
1450 #[target_feature(enable = "simd128")]
1451 #[doc(alias("i16x8.splat"))]
1452 #[stable(feature = "wasm_simd", since = "1.54.0")]
u16x8_splat(a: u16) -> v1281453 pub fn u16x8_splat(a: u16) -> v128 {
1454     simd::u16x8::splat(a).v128()
1455 }
1456 
1457 /// Creates a vector with identical lanes.
1458 ///
1459 /// Constructs a vector with `x` replicated to all 4 lanes.
1460 #[inline]
1461 #[cfg_attr(test, assert_instr(i32x4.splat))]
1462 #[target_feature(enable = "simd128")]
1463 #[doc(alias("i32x4.splat"))]
1464 #[stable(feature = "wasm_simd", since = "1.54.0")]
i32x4_splat(a: i32) -> v1281465 pub fn i32x4_splat(a: i32) -> v128 {
1466     simd::i32x4::splat(a).v128()
1467 }
1468 
1469 /// Creates a vector with identical lanes.
1470 ///
1471 /// Constructs a vector with `x` replicated to all 4 lanes.
1472 #[inline]
1473 #[target_feature(enable = "simd128")]
1474 #[doc(alias("i32x4.splat"))]
1475 #[stable(feature = "wasm_simd", since = "1.54.0")]
u32x4_splat(a: u32) -> v1281476 pub fn u32x4_splat(a: u32) -> v128 {
1477     i32x4_splat(a as i32)
1478 }
1479 
1480 /// Creates a vector with identical lanes.
1481 ///
1482 /// Construct a vector with `x` replicated to all 2 lanes.
1483 #[inline]
1484 #[cfg_attr(test, assert_instr(i64x2.splat))]
1485 #[target_feature(enable = "simd128")]
1486 #[doc(alias("i64x2.splat"))]
1487 #[stable(feature = "wasm_simd", since = "1.54.0")]
i64x2_splat(a: i64) -> v1281488 pub fn i64x2_splat(a: i64) -> v128 {
1489     simd::i64x2::splat(a).v128()
1490 }
1491 
1492 /// Creates a vector with identical lanes.
1493 ///
1494 /// Construct a vector with `x` replicated to all 2 lanes.
1495 #[inline]
1496 #[target_feature(enable = "simd128")]
1497 #[doc(alias("u64x2.splat"))]
1498 #[stable(feature = "wasm_simd", since = "1.54.0")]
u64x2_splat(a: u64) -> v1281499 pub fn u64x2_splat(a: u64) -> v128 {
1500     i64x2_splat(a as i64)
1501 }
1502 
1503 /// Creates a vector with identical lanes.
1504 ///
1505 /// Constructs a vector with `x` replicated to all 4 lanes.
1506 #[inline]
1507 #[cfg_attr(test, assert_instr(f32x4.splat))]
1508 #[target_feature(enable = "simd128")]
1509 #[doc(alias("f32x4.splat"))]
1510 #[stable(feature = "wasm_simd", since = "1.54.0")]
f32x4_splat(a: f32) -> v1281511 pub fn f32x4_splat(a: f32) -> v128 {
1512     simd::f32x4::splat(a).v128()
1513 }
1514 
1515 /// Creates a vector with identical lanes.
1516 ///
1517 /// Constructs a vector with `x` replicated to all 2 lanes.
1518 #[inline]
1519 #[cfg_attr(test, assert_instr(f64x2.splat))]
1520 #[target_feature(enable = "simd128")]
1521 #[doc(alias("f64x2.splat"))]
1522 #[stable(feature = "wasm_simd", since = "1.54.0")]
f64x2_splat(a: f64) -> v1281523 pub fn f64x2_splat(a: f64) -> v128 {
1524     simd::f64x2::splat(a).v128()
1525 }
1526 
1527 /// Compares two 128-bit vectors as if they were two vectors of 16 eight-bit
1528 /// integers.
1529 ///
1530 /// Returns a new vector where each lane is all ones if the pairwise elements
1531 /// were equal, or all zeros if the elements were not equal.
1532 #[inline]
1533 #[cfg_attr(test, assert_instr(i8x16.eq))]
1534 #[target_feature(enable = "simd128")]
1535 #[doc(alias("i8x16.eq"))]
1536 #[stable(feature = "wasm_simd", since = "1.54.0")]
i8x16_eq(a: v128, b: v128) -> v1281537 pub fn i8x16_eq(a: v128, b: v128) -> v128 {
1538     unsafe { simd_eq::<_, simd::i8x16>(a.as_i8x16(), b.as_i8x16()).v128() }
1539 }
1540 
1541 /// Compares two 128-bit vectors as if they were two vectors of 16 eight-bit
1542 /// integers.
1543 ///
1544 /// Returns a new vector where each lane is all ones if the pairwise elements
1545 /// were not equal, or all zeros if the elements were equal.
1546 #[inline]
1547 #[cfg_attr(test, assert_instr(i8x16.ne))]
1548 #[target_feature(enable = "simd128")]
1549 #[doc(alias("i8x16.ne"))]
1550 #[stable(feature = "wasm_simd", since = "1.54.0")]
i8x16_ne(a: v128, b: v128) -> v1281551 pub fn i8x16_ne(a: v128, b: v128) -> v128 {
1552     unsafe { simd_ne::<_, simd::i8x16>(a.as_i8x16(), b.as_i8x16()).v128() }
1553 }
1554 
1555 #[stable(feature = "wasm_simd", since = "1.54.0")]
1556 pub use i8x16_eq as u8x16_eq;
1557 #[stable(feature = "wasm_simd", since = "1.54.0")]
1558 pub use i8x16_ne as u8x16_ne;
1559 
1560 /// Compares two 128-bit vectors as if they were two vectors of 16 eight-bit
1561 /// signed integers.
1562 ///
1563 /// Returns a new vector where each lane is all ones if the pairwise left
1564 /// element is less than the pairwise right element, or all zeros otherwise.
1565 #[inline]
1566 #[cfg_attr(test, assert_instr(i8x16.lt_s))]
1567 #[target_feature(enable = "simd128")]
1568 #[doc(alias("i8x16.lt_s"))]
1569 #[stable(feature = "wasm_simd", since = "1.54.0")]
i8x16_lt(a: v128, b: v128) -> v1281570 pub fn i8x16_lt(a: v128, b: v128) -> v128 {
1571     unsafe { simd_lt::<_, simd::i8x16>(a.as_i8x16(), b.as_i8x16()).v128() }
1572 }
1573 
1574 /// Compares two 128-bit vectors as if they were two vectors of 16 eight-bit
1575 /// unsigned integers.
1576 ///
1577 /// Returns a new vector where each lane is all ones if the pairwise left
1578 /// element is less than the pairwise right element, or all zeros otherwise.
1579 #[inline]
1580 #[cfg_attr(test, assert_instr(i8x16.lt_u))]
1581 #[target_feature(enable = "simd128")]
1582 #[doc(alias("i8x16.lt_u"))]
1583 #[stable(feature = "wasm_simd", since = "1.54.0")]
u8x16_lt(a: v128, b: v128) -> v1281584 pub fn u8x16_lt(a: v128, b: v128) -> v128 {
1585     unsafe { simd_lt::<_, simd::i8x16>(a.as_u8x16(), b.as_u8x16()).v128() }
1586 }
1587 
1588 /// Compares two 128-bit vectors as if they were two vectors of 16 eight-bit
1589 /// signed integers.
1590 ///
1591 /// Returns a new vector where each lane is all ones if the pairwise left
1592 /// element is greater than the pairwise right element, or all zeros otherwise.
1593 #[inline]
1594 #[cfg_attr(test, assert_instr(i8x16.gt_s))]
1595 #[target_feature(enable = "simd128")]
1596 #[doc(alias("i8x16.gt_s"))]
1597 #[stable(feature = "wasm_simd", since = "1.54.0")]
i8x16_gt(a: v128, b: v128) -> v1281598 pub fn i8x16_gt(a: v128, b: v128) -> v128 {
1599     unsafe { simd_gt::<_, simd::i8x16>(a.as_i8x16(), b.as_i8x16()).v128() }
1600 }
1601 
1602 /// Compares two 128-bit vectors as if they were two vectors of 16 eight-bit
1603 /// unsigned integers.
1604 ///
1605 /// Returns a new vector where each lane is all ones if the pairwise left
1606 /// element is greater than the pairwise right element, or all zeros otherwise.
1607 #[inline]
1608 #[cfg_attr(test, assert_instr(i8x16.gt_u))]
1609 #[target_feature(enable = "simd128")]
1610 #[doc(alias("i8x16.gt_u"))]
1611 #[stable(feature = "wasm_simd", since = "1.54.0")]
u8x16_gt(a: v128, b: v128) -> v1281612 pub fn u8x16_gt(a: v128, b: v128) -> v128 {
1613     unsafe { simd_gt::<_, simd::i8x16>(a.as_u8x16(), b.as_u8x16()).v128() }
1614 }
1615 
1616 /// Compares two 128-bit vectors as if they were two vectors of 16 eight-bit
1617 /// signed integers.
1618 ///
1619 /// Returns a new vector where each lane is all ones if the pairwise left
1620 /// element is less than the pairwise right element, or all zeros otherwise.
1621 #[inline]
1622 #[cfg_attr(test, assert_instr(i8x16.le_s))]
1623 #[target_feature(enable = "simd128")]
1624 #[doc(alias("i8x16.le_s"))]
1625 #[stable(feature = "wasm_simd", since = "1.54.0")]
i8x16_le(a: v128, b: v128) -> v1281626 pub fn i8x16_le(a: v128, b: v128) -> v128 {
1627     unsafe { simd_le::<_, simd::i8x16>(a.as_i8x16(), b.as_i8x16()).v128() }
1628 }
1629 
1630 /// Compares two 128-bit vectors as if they were two vectors of 16 eight-bit
1631 /// unsigned integers.
1632 ///
1633 /// Returns a new vector where each lane is all ones if the pairwise left
1634 /// element is less than the pairwise right element, or all zeros otherwise.
1635 #[inline]
1636 #[cfg_attr(test, assert_instr(i8x16.le_u))]
1637 #[target_feature(enable = "simd128")]
1638 #[doc(alias("i8x16.le_u"))]
1639 #[stable(feature = "wasm_simd", since = "1.54.0")]
u8x16_le(a: v128, b: v128) -> v1281640 pub fn u8x16_le(a: v128, b: v128) -> v128 {
1641     unsafe { simd_le::<_, simd::i8x16>(a.as_u8x16(), b.as_u8x16()).v128() }
1642 }
1643 
1644 /// Compares two 128-bit vectors as if they were two vectors of 16 eight-bit
1645 /// signed integers.
1646 ///
1647 /// Returns a new vector where each lane is all ones if the pairwise left
1648 /// element is greater than the pairwise right element, or all zeros otherwise.
1649 #[inline]
1650 #[cfg_attr(test, assert_instr(i8x16.ge_s))]
1651 #[target_feature(enable = "simd128")]
1652 #[doc(alias("i8x16.ge_s"))]
1653 #[stable(feature = "wasm_simd", since = "1.54.0")]
i8x16_ge(a: v128, b: v128) -> v1281654 pub fn i8x16_ge(a: v128, b: v128) -> v128 {
1655     unsafe { simd_ge::<_, simd::i8x16>(a.as_i8x16(), b.as_i8x16()).v128() }
1656 }
1657 
1658 /// Compares two 128-bit vectors as if they were two vectors of 16 eight-bit
1659 /// unsigned integers.
1660 ///
1661 /// Returns a new vector where each lane is all ones if the pairwise left
1662 /// element is greater than the pairwise right element, or all zeros otherwise.
1663 #[inline]
1664 #[cfg_attr(test, assert_instr(i8x16.ge_u))]
1665 #[target_feature(enable = "simd128")]
1666 #[doc(alias("i8x16.ge_u"))]
1667 #[stable(feature = "wasm_simd", since = "1.54.0")]
u8x16_ge(a: v128, b: v128) -> v1281668 pub fn u8x16_ge(a: v128, b: v128) -> v128 {
1669     unsafe { simd_ge::<_, simd::i8x16>(a.as_u8x16(), b.as_u8x16()).v128() }
1670 }
1671 
1672 /// Compares two 128-bit vectors as if they were two vectors of 8 sixteen-bit
1673 /// integers.
1674 ///
1675 /// Returns a new vector where each lane is all ones if the pairwise elements
1676 /// were equal, or all zeros if the elements were not equal.
1677 #[inline]
1678 #[cfg_attr(test, assert_instr(i16x8.eq))]
1679 #[target_feature(enable = "simd128")]
1680 #[doc(alias("i16x8.eq"))]
1681 #[stable(feature = "wasm_simd", since = "1.54.0")]
i16x8_eq(a: v128, b: v128) -> v1281682 pub fn i16x8_eq(a: v128, b: v128) -> v128 {
1683     unsafe { simd_eq::<_, simd::i16x8>(a.as_i16x8(), b.as_i16x8()).v128() }
1684 }
1685 
1686 /// Compares two 128-bit vectors as if they were two vectors of 8 sixteen-bit
1687 /// integers.
1688 ///
1689 /// Returns a new vector where each lane is all ones if the pairwise elements
1690 /// were not equal, or all zeros if the elements were equal.
1691 #[inline]
1692 #[cfg_attr(test, assert_instr(i16x8.ne))]
1693 #[target_feature(enable = "simd128")]
1694 #[doc(alias("i16x8.ne"))]
1695 #[stable(feature = "wasm_simd", since = "1.54.0")]
i16x8_ne(a: v128, b: v128) -> v1281696 pub fn i16x8_ne(a: v128, b: v128) -> v128 {
1697     unsafe { simd_ne::<_, simd::i16x8>(a.as_i16x8(), b.as_i16x8()).v128() }
1698 }
1699 
1700 #[stable(feature = "wasm_simd", since = "1.54.0")]
1701 pub use i16x8_eq as u16x8_eq;
1702 #[stable(feature = "wasm_simd", since = "1.54.0")]
1703 pub use i16x8_ne as u16x8_ne;
1704 
1705 /// Compares two 128-bit vectors as if they were two vectors of 8 sixteen-bit
1706 /// signed integers.
1707 ///
1708 /// Returns a new vector where each lane is all ones if the pairwise left
1709 /// element is less than the pairwise right element, or all zeros otherwise.
1710 #[inline]
1711 #[cfg_attr(test, assert_instr(i16x8.lt_s))]
1712 #[target_feature(enable = "simd128")]
1713 #[doc(alias("i16x8.lt_s"))]
1714 #[stable(feature = "wasm_simd", since = "1.54.0")]
i16x8_lt(a: v128, b: v128) -> v1281715 pub fn i16x8_lt(a: v128, b: v128) -> v128 {
1716     unsafe { simd_lt::<_, simd::i16x8>(a.as_i16x8(), b.as_i16x8()).v128() }
1717 }
1718 
1719 /// Compares two 128-bit vectors as if they were two vectors of 8 sixteen-bit
1720 /// unsigned integers.
1721 ///
1722 /// Returns a new vector where each lane is all ones if the pairwise left
1723 /// element is less than the pairwise right element, or all zeros otherwise.
1724 #[inline]
1725 #[cfg_attr(test, assert_instr(i16x8.lt_u))]
1726 #[target_feature(enable = "simd128")]
1727 #[doc(alias("i16x8.lt_u"))]
1728 #[stable(feature = "wasm_simd", since = "1.54.0")]
u16x8_lt(a: v128, b: v128) -> v1281729 pub fn u16x8_lt(a: v128, b: v128) -> v128 {
1730     unsafe { simd_lt::<_, simd::i16x8>(a.as_u16x8(), b.as_u16x8()).v128() }
1731 }
1732 
1733 /// Compares two 128-bit vectors as if they were two vectors of 8 sixteen-bit
1734 /// signed integers.
1735 ///
1736 /// Returns a new vector where each lane is all ones if the pairwise left
1737 /// element is greater than the pairwise right element, or all zeros otherwise.
1738 #[inline]
1739 #[cfg_attr(test, assert_instr(i16x8.gt_s))]
1740 #[target_feature(enable = "simd128")]
1741 #[doc(alias("i16x8.gt_s"))]
1742 #[stable(feature = "wasm_simd", since = "1.54.0")]
i16x8_gt(a: v128, b: v128) -> v1281743 pub fn i16x8_gt(a: v128, b: v128) -> v128 {
1744     unsafe { simd_gt::<_, simd::i16x8>(a.as_i16x8(), b.as_i16x8()).v128() }
1745 }
1746 
1747 /// Compares two 128-bit vectors as if they were two vectors of 8 sixteen-bit
1748 /// unsigned integers.
1749 ///
1750 /// Returns a new vector where each lane is all ones if the pairwise left
1751 /// element is greater than the pairwise right element, or all zeros otherwise.
1752 #[inline]
1753 #[cfg_attr(test, assert_instr(i16x8.gt_u))]
1754 #[target_feature(enable = "simd128")]
1755 #[doc(alias("i16x8.gt_u"))]
1756 #[stable(feature = "wasm_simd", since = "1.54.0")]
u16x8_gt(a: v128, b: v128) -> v1281757 pub fn u16x8_gt(a: v128, b: v128) -> v128 {
1758     unsafe { simd_gt::<_, simd::i16x8>(a.as_u16x8(), b.as_u16x8()).v128() }
1759 }
1760 
1761 /// Compares two 128-bit vectors as if they were two vectors of 8 sixteen-bit
1762 /// signed integers.
1763 ///
1764 /// Returns a new vector where each lane is all ones if the pairwise left
1765 /// element is less than the pairwise right element, or all zeros otherwise.
1766 #[inline]
1767 #[cfg_attr(test, assert_instr(i16x8.le_s))]
1768 #[target_feature(enable = "simd128")]
1769 #[doc(alias("i16x8.le_s"))]
1770 #[stable(feature = "wasm_simd", since = "1.54.0")]
i16x8_le(a: v128, b: v128) -> v1281771 pub fn i16x8_le(a: v128, b: v128) -> v128 {
1772     unsafe { simd_le::<_, simd::i16x8>(a.as_i16x8(), b.as_i16x8()).v128() }
1773 }
1774 
1775 /// Compares two 128-bit vectors as if they were two vectors of 8 sixteen-bit
1776 /// unsigned integers.
1777 ///
1778 /// Returns a new vector where each lane is all ones if the pairwise left
1779 /// element is less than the pairwise right element, or all zeros otherwise.
1780 #[inline]
1781 #[cfg_attr(test, assert_instr(i16x8.le_u))]
1782 #[target_feature(enable = "simd128")]
1783 #[doc(alias("i16x8.le_u"))]
1784 #[stable(feature = "wasm_simd", since = "1.54.0")]
u16x8_le(a: v128, b: v128) -> v1281785 pub fn u16x8_le(a: v128, b: v128) -> v128 {
1786     unsafe { simd_le::<_, simd::i16x8>(a.as_u16x8(), b.as_u16x8()).v128() }
1787 }
1788 
1789 /// Compares two 128-bit vectors as if they were two vectors of 8 sixteen-bit
1790 /// signed integers.
1791 ///
1792 /// Returns a new vector where each lane is all ones if the pairwise left
1793 /// element is greater than the pairwise right element, or all zeros otherwise.
1794 #[inline]
1795 #[cfg_attr(test, assert_instr(i16x8.ge_s))]
1796 #[target_feature(enable = "simd128")]
1797 #[doc(alias("i16x8.ge_s"))]
1798 #[stable(feature = "wasm_simd", since = "1.54.0")]
i16x8_ge(a: v128, b: v128) -> v1281799 pub fn i16x8_ge(a: v128, b: v128) -> v128 {
1800     unsafe { simd_ge::<_, simd::i16x8>(a.as_i16x8(), b.as_i16x8()).v128() }
1801 }
1802 
1803 /// Compares two 128-bit vectors as if they were two vectors of 8 sixteen-bit
1804 /// unsigned integers.
1805 ///
1806 /// Returns a new vector where each lane is all ones if the pairwise left
1807 /// element is greater than the pairwise right element, or all zeros otherwise.
1808 #[inline]
1809 #[cfg_attr(test, assert_instr(i16x8.ge_u))]
1810 #[target_feature(enable = "simd128")]
1811 #[doc(alias("i16x8.ge_u"))]
1812 #[stable(feature = "wasm_simd", since = "1.54.0")]
u16x8_ge(a: v128, b: v128) -> v1281813 pub fn u16x8_ge(a: v128, b: v128) -> v128 {
1814     unsafe { simd_ge::<_, simd::i16x8>(a.as_u16x8(), b.as_u16x8()).v128() }
1815 }
1816 
1817 /// Compares two 128-bit vectors as if they were two vectors of 4 thirty-two-bit
1818 /// integers.
1819 ///
1820 /// Returns a new vector where each lane is all ones if the pairwise elements
1821 /// were equal, or all zeros if the elements were not equal.
1822 #[inline]
1823 #[cfg_attr(test, assert_instr(i32x4.eq))]
1824 #[target_feature(enable = "simd128")]
1825 #[doc(alias("i32x4.eq"))]
1826 #[stable(feature = "wasm_simd", since = "1.54.0")]
i32x4_eq(a: v128, b: v128) -> v1281827 pub fn i32x4_eq(a: v128, b: v128) -> v128 {
1828     unsafe { simd_eq::<_, simd::i32x4>(a.as_i32x4(), b.as_i32x4()).v128() }
1829 }
1830 
1831 /// Compares two 128-bit vectors as if they were two vectors of 4 thirty-two-bit
1832 /// integers.
1833 ///
1834 /// Returns a new vector where each lane is all ones if the pairwise elements
1835 /// were not equal, or all zeros if the elements were equal.
1836 #[inline]
1837 #[cfg_attr(test, assert_instr(i32x4.ne))]
1838 #[target_feature(enable = "simd128")]
1839 #[doc(alias("i32x4.ne"))]
1840 #[stable(feature = "wasm_simd", since = "1.54.0")]
i32x4_ne(a: v128, b: v128) -> v1281841 pub fn i32x4_ne(a: v128, b: v128) -> v128 {
1842     unsafe { simd_ne::<_, simd::i32x4>(a.as_i32x4(), b.as_i32x4()).v128() }
1843 }
1844 
1845 #[stable(feature = "wasm_simd", since = "1.54.0")]
1846 pub use i32x4_eq as u32x4_eq;
1847 #[stable(feature = "wasm_simd", since = "1.54.0")]
1848 pub use i32x4_ne as u32x4_ne;
1849 
1850 /// Compares two 128-bit vectors as if they were two vectors of 4 thirty-two-bit
1851 /// signed integers.
1852 ///
1853 /// Returns a new vector where each lane is all ones if the pairwise left
1854 /// element is less than the pairwise right element, or all zeros otherwise.
1855 #[inline]
1856 #[cfg_attr(test, assert_instr(i32x4.lt_s))]
1857 #[target_feature(enable = "simd128")]
1858 #[doc(alias("i32x4.lt_s"))]
1859 #[stable(feature = "wasm_simd", since = "1.54.0")]
i32x4_lt(a: v128, b: v128) -> v1281860 pub fn i32x4_lt(a: v128, b: v128) -> v128 {
1861     unsafe { simd_lt::<_, simd::i32x4>(a.as_i32x4(), b.as_i32x4()).v128() }
1862 }
1863 
1864 /// Compares two 128-bit vectors as if they were two vectors of 4 thirty-two-bit
1865 /// unsigned integers.
1866 ///
1867 /// Returns a new vector where each lane is all ones if the pairwise left
1868 /// element is less than the pairwise right element, or all zeros otherwise.
1869 #[inline]
1870 #[cfg_attr(test, assert_instr(i32x4.lt_u))]
1871 #[target_feature(enable = "simd128")]
1872 #[doc(alias("i32x4.lt_u"))]
1873 #[stable(feature = "wasm_simd", since = "1.54.0")]
u32x4_lt(a: v128, b: v128) -> v1281874 pub fn u32x4_lt(a: v128, b: v128) -> v128 {
1875     unsafe { simd_lt::<_, simd::i32x4>(a.as_u32x4(), b.as_u32x4()).v128() }
1876 }
1877 
1878 /// Compares two 128-bit vectors as if they were two vectors of 4 thirty-two-bit
1879 /// signed integers.
1880 ///
1881 /// Returns a new vector where each lane is all ones if the pairwise left
1882 /// element is greater than the pairwise right element, or all zeros otherwise.
1883 #[inline]
1884 #[cfg_attr(test, assert_instr(i32x4.gt_s))]
1885 #[target_feature(enable = "simd128")]
1886 #[doc(alias("i32x4.gt_s"))]
1887 #[stable(feature = "wasm_simd", since = "1.54.0")]
i32x4_gt(a: v128, b: v128) -> v1281888 pub fn i32x4_gt(a: v128, b: v128) -> v128 {
1889     unsafe { simd_gt::<_, simd::i32x4>(a.as_i32x4(), b.as_i32x4()).v128() }
1890 }
1891 
1892 /// Compares two 128-bit vectors as if they were two vectors of 4 thirty-two-bit
1893 /// unsigned integers.
1894 ///
1895 /// Returns a new vector where each lane is all ones if the pairwise left
1896 /// element is greater than the pairwise right element, or all zeros otherwise.
1897 #[inline]
1898 #[cfg_attr(test, assert_instr(i32x4.gt_u))]
1899 #[target_feature(enable = "simd128")]
1900 #[doc(alias("i32x4.gt_u"))]
1901 #[stable(feature = "wasm_simd", since = "1.54.0")]
u32x4_gt(a: v128, b: v128) -> v1281902 pub fn u32x4_gt(a: v128, b: v128) -> v128 {
1903     unsafe { simd_gt::<_, simd::i32x4>(a.as_u32x4(), b.as_u32x4()).v128() }
1904 }
1905 
1906 /// Compares two 128-bit vectors as if they were two vectors of 4 thirty-two-bit
1907 /// signed integers.
1908 ///
1909 /// Returns a new vector where each lane is all ones if the pairwise left
1910 /// element is less than the pairwise right element, or all zeros otherwise.
1911 #[inline]
1912 #[cfg_attr(test, assert_instr(i32x4.le_s))]
1913 #[target_feature(enable = "simd128")]
1914 #[doc(alias("i32x4.le_s"))]
1915 #[stable(feature = "wasm_simd", since = "1.54.0")]
i32x4_le(a: v128, b: v128) -> v1281916 pub fn i32x4_le(a: v128, b: v128) -> v128 {
1917     unsafe { simd_le::<_, simd::i32x4>(a.as_i32x4(), b.as_i32x4()).v128() }
1918 }
1919 
1920 /// Compares two 128-bit vectors as if they were two vectors of 4 thirty-two-bit
1921 /// unsigned integers.
1922 ///
1923 /// Returns a new vector where each lane is all ones if the pairwise left
1924 /// element is less than the pairwise right element, or all zeros otherwise.
1925 #[inline]
1926 #[cfg_attr(test, assert_instr(i32x4.le_u))]
1927 #[target_feature(enable = "simd128")]
1928 #[doc(alias("i32x4.le_u"))]
1929 #[stable(feature = "wasm_simd", since = "1.54.0")]
u32x4_le(a: v128, b: v128) -> v1281930 pub fn u32x4_le(a: v128, b: v128) -> v128 {
1931     unsafe { simd_le::<_, simd::i32x4>(a.as_u32x4(), b.as_u32x4()).v128() }
1932 }
1933 
1934 /// Compares two 128-bit vectors as if they were two vectors of 4 thirty-two-bit
1935 /// signed integers.
1936 ///
1937 /// Returns a new vector where each lane is all ones if the pairwise left
1938 /// element is greater than the pairwise right element, or all zeros otherwise.
1939 #[inline]
1940 #[cfg_attr(test, assert_instr(i32x4.ge_s))]
1941 #[target_feature(enable = "simd128")]
1942 #[doc(alias("i32x4.ge_s"))]
1943 #[stable(feature = "wasm_simd", since = "1.54.0")]
i32x4_ge(a: v128, b: v128) -> v1281944 pub fn i32x4_ge(a: v128, b: v128) -> v128 {
1945     unsafe { simd_ge::<_, simd::i32x4>(a.as_i32x4(), b.as_i32x4()).v128() }
1946 }
1947 
1948 /// Compares two 128-bit vectors as if they were two vectors of 4 thirty-two-bit
1949 /// unsigned integers.
1950 ///
1951 /// Returns a new vector where each lane is all ones if the pairwise left
1952 /// element is greater than the pairwise right element, or all zeros otherwise.
1953 #[inline]
1954 #[cfg_attr(test, assert_instr(i32x4.ge_u))]
1955 #[target_feature(enable = "simd128")]
1956 #[doc(alias("i32x4.ge_u"))]
1957 #[stable(feature = "wasm_simd", since = "1.54.0")]
u32x4_ge(a: v128, b: v128) -> v1281958 pub fn u32x4_ge(a: v128, b: v128) -> v128 {
1959     unsafe { simd_ge::<_, simd::i32x4>(a.as_u32x4(), b.as_u32x4()).v128() }
1960 }
1961 
1962 /// Compares two 128-bit vectors as if they were two vectors of 2 sixty-four-bit
1963 /// integers.
1964 ///
1965 /// Returns a new vector where each lane is all ones if the pairwise elements
1966 /// were equal, or all zeros if the elements were not equal.
1967 #[inline]
1968 #[cfg_attr(test, assert_instr(i64x2.eq))]
1969 #[target_feature(enable = "simd128")]
1970 #[doc(alias("i64x2.eq"))]
1971 #[stable(feature = "wasm_simd", since = "1.54.0")]
i64x2_eq(a: v128, b: v128) -> v1281972 pub fn i64x2_eq(a: v128, b: v128) -> v128 {
1973     unsafe { simd_eq::<_, simd::i64x2>(a.as_i64x2(), b.as_i64x2()).v128() }
1974 }
1975 
1976 /// Compares two 128-bit vectors as if they were two vectors of 2 sixty-four-bit
1977 /// integers.
1978 ///
1979 /// Returns a new vector where each lane is all ones if the pairwise elements
1980 /// were not equal, or all zeros if the elements were equal.
1981 #[inline]
1982 #[cfg_attr(test, assert_instr(i64x2.ne))]
1983 #[target_feature(enable = "simd128")]
1984 #[doc(alias("i64x2.ne"))]
1985 #[stable(feature = "wasm_simd", since = "1.54.0")]
i64x2_ne(a: v128, b: v128) -> v1281986 pub fn i64x2_ne(a: v128, b: v128) -> v128 {
1987     unsafe { simd_ne::<_, simd::i64x2>(a.as_i64x2(), b.as_i64x2()).v128() }
1988 }
1989 
1990 #[stable(feature = "wasm_simd", since = "1.54.0")]
1991 pub use i64x2_eq as u64x2_eq;
1992 #[stable(feature = "wasm_simd", since = "1.54.0")]
1993 pub use i64x2_ne as u64x2_ne;
1994 
1995 /// Compares two 128-bit vectors as if they were two vectors of 2 sixty-four-bit
1996 /// signed integers.
1997 ///
1998 /// Returns a new vector where each lane is all ones if the pairwise left
1999 /// element is less than the pairwise right element, or all zeros otherwise.
2000 #[inline]
2001 #[cfg_attr(test, assert_instr(i64x2.lt_s))]
2002 #[target_feature(enable = "simd128")]
2003 #[doc(alias("i64x2.lt_s"))]
2004 #[stable(feature = "wasm_simd", since = "1.54.0")]
i64x2_lt(a: v128, b: v128) -> v1282005 pub fn i64x2_lt(a: v128, b: v128) -> v128 {
2006     unsafe { simd_lt::<_, simd::i64x2>(a.as_i64x2(), b.as_i64x2()).v128() }
2007 }
2008 
2009 /// Compares two 128-bit vectors as if they were two vectors of 2 sixty-four-bit
2010 /// signed integers.
2011 ///
2012 /// Returns a new vector where each lane is all ones if the pairwise left
2013 /// element is greater than the pairwise right element, or all zeros otherwise.
2014 #[inline]
2015 #[cfg_attr(test, assert_instr(i64x2.gt_s))]
2016 #[target_feature(enable = "simd128")]
2017 #[doc(alias("i64x2.gt_s"))]
2018 #[stable(feature = "wasm_simd", since = "1.54.0")]
i64x2_gt(a: v128, b: v128) -> v1282019 pub fn i64x2_gt(a: v128, b: v128) -> v128 {
2020     unsafe { simd_gt::<_, simd::i64x2>(a.as_i64x2(), b.as_i64x2()).v128() }
2021 }
2022 
2023 /// Compares two 128-bit vectors as if they were two vectors of 2 sixty-four-bit
2024 /// signed integers.
2025 ///
2026 /// Returns a new vector where each lane is all ones if the pairwise left
2027 /// element is less than the pairwise right element, or all zeros otherwise.
2028 #[inline]
2029 #[cfg_attr(test, assert_instr(i64x2.le_s))]
2030 #[target_feature(enable = "simd128")]
2031 #[doc(alias("i64x2.le_s"))]
2032 #[stable(feature = "wasm_simd", since = "1.54.0")]
i64x2_le(a: v128, b: v128) -> v1282033 pub fn i64x2_le(a: v128, b: v128) -> v128 {
2034     unsafe { simd_le::<_, simd::i64x2>(a.as_i64x2(), b.as_i64x2()).v128() }
2035 }
2036 
2037 /// Compares two 128-bit vectors as if they were two vectors of 2 sixty-four-bit
2038 /// signed integers.
2039 ///
2040 /// Returns a new vector where each lane is all ones if the pairwise left
2041 /// element is greater than the pairwise right element, or all zeros otherwise.
2042 #[inline]
2043 #[cfg_attr(test, assert_instr(i64x2.ge_s))]
2044 #[target_feature(enable = "simd128")]
2045 #[doc(alias("i64x2.ge_s"))]
2046 #[stable(feature = "wasm_simd", since = "1.54.0")]
i64x2_ge(a: v128, b: v128) -> v1282047 pub fn i64x2_ge(a: v128, b: v128) -> v128 {
2048     unsafe { simd_ge::<_, simd::i64x2>(a.as_i64x2(), b.as_i64x2()).v128() }
2049 }
2050 
2051 /// Compares two 128-bit vectors as if they were two vectors of 4 thirty-two-bit
2052 /// floating point numbers.
2053 ///
2054 /// Returns a new vector where each lane is all ones if the pairwise elements
2055 /// were equal, or all zeros if the elements were not equal.
2056 #[inline]
2057 #[cfg_attr(test, assert_instr(f32x4.eq))]
2058 #[target_feature(enable = "simd128")]
2059 #[doc(alias("f32x4.eq"))]
2060 #[stable(feature = "wasm_simd", since = "1.54.0")]
f32x4_eq(a: v128, b: v128) -> v1282061 pub fn f32x4_eq(a: v128, b: v128) -> v128 {
2062     unsafe { simd_eq::<_, simd::i32x4>(a.as_f32x4(), b.as_f32x4()).v128() }
2063 }
2064 
2065 /// Compares two 128-bit vectors as if they were two vectors of 4 thirty-two-bit
2066 /// floating point numbers.
2067 ///
2068 /// Returns a new vector where each lane is all ones if the pairwise elements
2069 /// were not equal, or all zeros if the elements were equal.
2070 #[inline]
2071 #[cfg_attr(test, assert_instr(f32x4.ne))]
2072 #[target_feature(enable = "simd128")]
2073 #[doc(alias("f32x4.ne"))]
2074 #[stable(feature = "wasm_simd", since = "1.54.0")]
f32x4_ne(a: v128, b: v128) -> v1282075 pub fn f32x4_ne(a: v128, b: v128) -> v128 {
2076     unsafe { simd_ne::<_, simd::i32x4>(a.as_f32x4(), b.as_f32x4()).v128() }
2077 }
2078 
2079 /// Compares two 128-bit vectors as if they were two vectors of 4 thirty-two-bit
2080 /// floating point numbers.
2081 ///
2082 /// Returns a new vector where each lane is all ones if the pairwise left
2083 /// element is less than the pairwise right element, or all zeros otherwise.
2084 #[inline]
2085 #[cfg_attr(test, assert_instr(f32x4.lt))]
2086 #[target_feature(enable = "simd128")]
2087 #[doc(alias("f32x4.lt"))]
2088 #[stable(feature = "wasm_simd", since = "1.54.0")]
f32x4_lt(a: v128, b: v128) -> v1282089 pub fn f32x4_lt(a: v128, b: v128) -> v128 {
2090     unsafe { simd_lt::<_, simd::i32x4>(a.as_f32x4(), b.as_f32x4()).v128() }
2091 }
2092 
2093 /// Compares two 128-bit vectors as if they were two vectors of 4 thirty-two-bit
2094 /// floating point numbers.
2095 ///
2096 /// Returns a new vector where each lane is all ones if the pairwise left
2097 /// element is greater than the pairwise right element, or all zeros otherwise.
2098 #[inline]
2099 #[cfg_attr(test, assert_instr(f32x4.gt))]
2100 #[target_feature(enable = "simd128")]
2101 #[doc(alias("f32x4.gt"))]
2102 #[stable(feature = "wasm_simd", since = "1.54.0")]
f32x4_gt(a: v128, b: v128) -> v1282103 pub fn f32x4_gt(a: v128, b: v128) -> v128 {
2104     unsafe { simd_gt::<_, simd::i32x4>(a.as_f32x4(), b.as_f32x4()).v128() }
2105 }
2106 
2107 /// Compares two 128-bit vectors as if they were two vectors of 4 thirty-two-bit
2108 /// floating point numbers.
2109 ///
2110 /// Returns a new vector where each lane is all ones if the pairwise left
2111 /// element is less than the pairwise right element, or all zeros otherwise.
2112 #[inline]
2113 #[cfg_attr(test, assert_instr(f32x4.le))]
2114 #[target_feature(enable = "simd128")]
2115 #[doc(alias("f32x4.le"))]
2116 #[stable(feature = "wasm_simd", since = "1.54.0")]
f32x4_le(a: v128, b: v128) -> v1282117 pub fn f32x4_le(a: v128, b: v128) -> v128 {
2118     unsafe { simd_le::<_, simd::i32x4>(a.as_f32x4(), b.as_f32x4()).v128() }
2119 }
2120 
2121 /// Compares two 128-bit vectors as if they were two vectors of 4 thirty-two-bit
2122 /// floating point numbers.
2123 ///
2124 /// Returns a new vector where each lane is all ones if the pairwise left
2125 /// element is greater than the pairwise right element, or all zeros otherwise.
2126 #[inline]
2127 #[cfg_attr(test, assert_instr(f32x4.ge))]
2128 #[target_feature(enable = "simd128")]
2129 #[doc(alias("f32x4.ge"))]
2130 #[stable(feature = "wasm_simd", since = "1.54.0")]
f32x4_ge(a: v128, b: v128) -> v1282131 pub fn f32x4_ge(a: v128, b: v128) -> v128 {
2132     unsafe { simd_ge::<_, simd::i32x4>(a.as_f32x4(), b.as_f32x4()).v128() }
2133 }
2134 
2135 /// Compares two 128-bit vectors as if they were two vectors of 2 sixty-four-bit
2136 /// floating point numbers.
2137 ///
2138 /// Returns a new vector where each lane is all ones if the pairwise elements
2139 /// were equal, or all zeros if the elements were not equal.
2140 #[inline]
2141 #[cfg_attr(test, assert_instr(f64x2.eq))]
2142 #[target_feature(enable = "simd128")]
2143 #[doc(alias("f64x2.eq"))]
2144 #[stable(feature = "wasm_simd", since = "1.54.0")]
f64x2_eq(a: v128, b: v128) -> v1282145 pub fn f64x2_eq(a: v128, b: v128) -> v128 {
2146     unsafe { simd_eq::<_, simd::i64x2>(a.as_f64x2(), b.as_f64x2()).v128() }
2147 }
2148 
2149 /// Compares two 128-bit vectors as if they were two vectors of 2 sixty-four-bit
2150 /// floating point numbers.
2151 ///
2152 /// Returns a new vector where each lane is all ones if the pairwise elements
2153 /// were not equal, or all zeros if the elements were equal.
2154 #[inline]
2155 #[cfg_attr(test, assert_instr(f64x2.ne))]
2156 #[target_feature(enable = "simd128")]
2157 #[doc(alias("f64x2.ne"))]
2158 #[stable(feature = "wasm_simd", since = "1.54.0")]
f64x2_ne(a: v128, b: v128) -> v1282159 pub fn f64x2_ne(a: v128, b: v128) -> v128 {
2160     unsafe { simd_ne::<_, simd::i64x2>(a.as_f64x2(), b.as_f64x2()).v128() }
2161 }
2162 
2163 /// Compares two 128-bit vectors as if they were two vectors of 2 sixty-four-bit
2164 /// floating point numbers.
2165 ///
2166 /// Returns a new vector where each lane is all ones if the pairwise left
2167 /// element is less than the pairwise right element, or all zeros otherwise.
2168 #[inline]
2169 #[cfg_attr(test, assert_instr(f64x2.lt))]
2170 #[target_feature(enable = "simd128")]
2171 #[doc(alias("f64x2.lt"))]
2172 #[stable(feature = "wasm_simd", since = "1.54.0")]
f64x2_lt(a: v128, b: v128) -> v1282173 pub fn f64x2_lt(a: v128, b: v128) -> v128 {
2174     unsafe { simd_lt::<_, simd::i64x2>(a.as_f64x2(), b.as_f64x2()).v128() }
2175 }
2176 
2177 /// Compares two 128-bit vectors as if they were two vectors of 2 sixty-four-bit
2178 /// floating point numbers.
2179 ///
2180 /// Returns a new vector where each lane is all ones if the pairwise left
2181 /// element is greater than the pairwise right element, or all zeros otherwise.
2182 #[inline]
2183 #[cfg_attr(test, assert_instr(f64x2.gt))]
2184 #[target_feature(enable = "simd128")]
2185 #[doc(alias("f64x2.gt"))]
2186 #[stable(feature = "wasm_simd", since = "1.54.0")]
f64x2_gt(a: v128, b: v128) -> v1282187 pub fn f64x2_gt(a: v128, b: v128) -> v128 {
2188     unsafe { simd_gt::<_, simd::i64x2>(a.as_f64x2(), b.as_f64x2()).v128() }
2189 }
2190 
2191 /// Compares two 128-bit vectors as if they were two vectors of 2 sixty-four-bit
2192 /// floating point numbers.
2193 ///
2194 /// Returns a new vector where each lane is all ones if the pairwise left
2195 /// element is less than the pairwise right element, or all zeros otherwise.
2196 #[inline]
2197 #[cfg_attr(test, assert_instr(f64x2.le))]
2198 #[target_feature(enable = "simd128")]
2199 #[doc(alias("f64x2.le"))]
2200 #[stable(feature = "wasm_simd", since = "1.54.0")]
f64x2_le(a: v128, b: v128) -> v1282201 pub fn f64x2_le(a: v128, b: v128) -> v128 {
2202     unsafe { simd_le::<_, simd::i64x2>(a.as_f64x2(), b.as_f64x2()).v128() }
2203 }
2204 
2205 /// Compares two 128-bit vectors as if they were two vectors of 2 sixty-four-bit
2206 /// floating point numbers.
2207 ///
2208 /// Returns a new vector where each lane is all ones if the pairwise left
2209 /// element is greater than the pairwise right element, or all zeros otherwise.
2210 #[inline]
2211 #[cfg_attr(test, assert_instr(f64x2.ge))]
2212 #[target_feature(enable = "simd128")]
2213 #[doc(alias("f64x2.ge"))]
2214 #[stable(feature = "wasm_simd", since = "1.54.0")]
f64x2_ge(a: v128, b: v128) -> v1282215 pub fn f64x2_ge(a: v128, b: v128) -> v128 {
2216     unsafe { simd_ge::<_, simd::i64x2>(a.as_f64x2(), b.as_f64x2()).v128() }
2217 }
2218 
2219 /// Flips each bit of the 128-bit input vector.
2220 #[inline]
2221 #[cfg_attr(test, assert_instr(v128.not))]
2222 #[target_feature(enable = "simd128")]
2223 #[doc(alias("v128.not"))]
2224 #[stable(feature = "wasm_simd", since = "1.54.0")]
v128_not(a: v128) -> v1282225 pub fn v128_not(a: v128) -> v128 {
2226     unsafe { simd_xor(a.as_i64x2(), simd::i64x2(!0, !0)).v128() }
2227 }
2228 
2229 /// Performs a bitwise and of the two input 128-bit vectors, returning the
2230 /// resulting vector.
2231 #[inline]
2232 #[cfg_attr(test, assert_instr(v128.and))]
2233 #[target_feature(enable = "simd128")]
2234 #[doc(alias("v128.and"))]
2235 #[stable(feature = "wasm_simd", since = "1.54.0")]
v128_and(a: v128, b: v128) -> v1282236 pub fn v128_and(a: v128, b: v128) -> v128 {
2237     unsafe { simd_and(a.as_i64x2(), b.as_i64x2()).v128() }
2238 }
2239 
2240 /// Bitwise AND of bits of `a` and the logical inverse of bits of `b`.
2241 ///
2242 /// This operation is equivalent to `v128.and(a, v128.not(b))`
2243 #[inline]
2244 #[cfg_attr(test, assert_instr(v128.andnot))]
2245 #[target_feature(enable = "simd128")]
2246 #[doc(alias("v128.andnot"))]
2247 #[stable(feature = "wasm_simd", since = "1.54.0")]
v128_andnot(a: v128, b: v128) -> v1282248 pub fn v128_andnot(a: v128, b: v128) -> v128 {
2249     unsafe { simd_and(a.as_i64x2(), simd_xor(b.as_i64x2(), simd::i64x2(-1, -1))).v128() }
2250 }
2251 
2252 /// Performs a bitwise or of the two input 128-bit vectors, returning the
2253 /// resulting vector.
2254 #[inline]
2255 #[cfg_attr(test, assert_instr(v128.or))]
2256 #[target_feature(enable = "simd128")]
2257 #[doc(alias("v128.or"))]
2258 #[stable(feature = "wasm_simd", since = "1.54.0")]
v128_or(a: v128, b: v128) -> v1282259 pub fn v128_or(a: v128, b: v128) -> v128 {
2260     unsafe { simd_or(a.as_i64x2(), b.as_i64x2()).v128() }
2261 }
2262 
2263 /// Performs a bitwise xor of the two input 128-bit vectors, returning the
2264 /// resulting vector.
2265 #[inline]
2266 #[cfg_attr(test, assert_instr(v128.xor))]
2267 #[target_feature(enable = "simd128")]
2268 #[doc(alias("v128.xor"))]
2269 #[stable(feature = "wasm_simd", since = "1.54.0")]
v128_xor(a: v128, b: v128) -> v1282270 pub fn v128_xor(a: v128, b: v128) -> v128 {
2271     unsafe { simd_xor(a.as_i64x2(), b.as_i64x2()).v128() }
2272 }
2273 
2274 /// Use the bitmask in `c` to select bits from `v1` when 1 and `v2` when 0.
2275 #[inline]
2276 #[cfg_attr(test, assert_instr(v128.bitselect))]
2277 #[target_feature(enable = "simd128")]
2278 #[doc(alias("v128.bitselect"))]
2279 #[stable(feature = "wasm_simd", since = "1.54.0")]
v128_bitselect(v1: v128, v2: v128, c: v128) -> v1282280 pub fn v128_bitselect(v1: v128, v2: v128, c: v128) -> v128 {
2281     unsafe { llvm_bitselect(v1.as_i8x16(), v2.as_i8x16(), c.as_i8x16()).v128() }
2282 }
2283 
2284 /// Returns `true` if any bit in `a` is set, or `false` otherwise.
2285 #[inline]
2286 #[cfg_attr(test, assert_instr(v128.any_true))]
2287 #[target_feature(enable = "simd128")]
2288 #[doc(alias("v128.any_true"))]
2289 #[stable(feature = "wasm_simd", since = "1.54.0")]
v128_any_true(a: v128) -> bool2290 pub fn v128_any_true(a: v128) -> bool {
2291     unsafe { llvm_any_true_i8x16(a.as_i8x16()) != 0 }
2292 }
2293 
2294 /// Lane-wise wrapping absolute value.
2295 #[inline]
2296 #[cfg_attr(test, assert_instr(i8x16.abs))]
2297 #[target_feature(enable = "simd128")]
2298 #[doc(alias("i8x16.abs"))]
2299 #[stable(feature = "wasm_simd", since = "1.54.0")]
i8x16_abs(a: v128) -> v1282300 pub fn i8x16_abs(a: v128) -> v128 {
2301     unsafe {
2302         let a = a.as_i8x16();
2303         let zero = simd::i8x16::splat(0);
2304         simd_select::<simd::m8x16, simd::i8x16>(simd_lt(a, zero), simd_sub(zero, a), a).v128()
2305     }
2306 }
2307 
2308 /// Negates a 128-bit vectors intepreted as sixteen 8-bit signed integers
2309 #[inline]
2310 #[cfg_attr(test, assert_instr(i8x16.neg))]
2311 #[target_feature(enable = "simd128")]
2312 #[doc(alias("i8x16.neg"))]
2313 #[stable(feature = "wasm_simd", since = "1.54.0")]
i8x16_neg(a: v128) -> v1282314 pub fn i8x16_neg(a: v128) -> v128 {
2315     unsafe { simd_mul(a.as_i8x16(), simd::i8x16::splat(-1)).v128() }
2316 }
2317 
2318 /// Count the number of bits set to one within each lane.
2319 #[inline]
2320 #[cfg_attr(test, assert_instr(i8x16.popcnt))]
2321 #[target_feature(enable = "simd128")]
2322 #[doc(alias("i8x16.popcnt"))]
2323 #[stable(feature = "wasm_simd", since = "1.54.0")]
i8x16_popcnt(v: v128) -> v1282324 pub fn i8x16_popcnt(v: v128) -> v128 {
2325     unsafe { llvm_popcnt(v.as_i8x16()).v128() }
2326 }
2327 
2328 #[stable(feature = "wasm_simd", since = "1.54.0")]
2329 pub use i8x16_popcnt as u8x16_popcnt;
2330 
2331 /// Returns true if all lanes are non-zero, false otherwise.
2332 #[inline]
2333 #[cfg_attr(test, assert_instr(i8x16.all_true))]
2334 #[target_feature(enable = "simd128")]
2335 #[doc(alias("i8x16.all_true"))]
2336 #[stable(feature = "wasm_simd", since = "1.54.0")]
i8x16_all_true(a: v128) -> bool2337 pub fn i8x16_all_true(a: v128) -> bool {
2338     unsafe { llvm_i8x16_all_true(a.as_i8x16()) != 0 }
2339 }
2340 
2341 #[stable(feature = "wasm_simd", since = "1.54.0")]
2342 pub use i8x16_all_true as u8x16_all_true;
2343 
2344 /// Extracts the high bit for each lane in `a` and produce a scalar mask with
2345 /// all bits concatenated.
2346 #[inline]
2347 #[cfg_attr(test, assert_instr(i8x16.bitmask))]
2348 #[target_feature(enable = "simd128")]
2349 #[doc(alias("i8x16.bitmask"))]
2350 #[stable(feature = "wasm_simd", since = "1.54.0")]
i8x16_bitmask(a: v128) -> u162351 pub fn i8x16_bitmask(a: v128) -> u16 {
2352     // FIXME(https://bugs.llvm.org/show_bug.cgi?id=50507) - this produces an
2353     // extraneous `i32.and` instruction against a mask of 65535 when converting
2354     // from the native intrinsic's i32 return value to our desired u16. This
2355     // shouldn't be necessary, though, but requires upstream LLVM changes.
2356     unsafe { llvm_bitmask_i8x16(a.as_i8x16()) as u16 }
2357 }
2358 
2359 #[stable(feature = "wasm_simd", since = "1.54.0")]
2360 pub use i8x16_bitmask as u8x16_bitmask;
2361 
2362 /// Converts two input vectors into a smaller lane vector by narrowing each
2363 /// lane.
2364 ///
2365 /// Signed saturation to 0x7f or 0x80 is used and the input lanes are always
2366 /// interpreted as signed integers.
2367 #[inline]
2368 #[cfg_attr(test, assert_instr(i8x16.narrow_i16x8_s))]
2369 #[target_feature(enable = "simd128")]
2370 #[doc(alias("i8x16.narrow_i16x8_s"))]
2371 #[stable(feature = "wasm_simd", since = "1.54.0")]
i8x16_narrow_i16x8(a: v128, b: v128) -> v1282372 pub fn i8x16_narrow_i16x8(a: v128, b: v128) -> v128 {
2373     unsafe { llvm_narrow_i8x16_s(a.as_i16x8(), b.as_i16x8()).v128() }
2374 }
2375 
2376 /// Converts two input vectors into a smaller lane vector by narrowing each
2377 /// lane.
2378 ///
2379 /// Signed saturation to 0x00 or 0xff is used and the input lanes are always
2380 /// interpreted as signed integers.
2381 #[inline]
2382 #[cfg_attr(test, assert_instr(i8x16.narrow_i16x8_u))]
2383 #[target_feature(enable = "simd128")]
2384 #[doc(alias("i8x16.narrow_i16x8_u"))]
2385 #[stable(feature = "wasm_simd", since = "1.54.0")]
u8x16_narrow_i16x8(a: v128, b: v128) -> v1282386 pub fn u8x16_narrow_i16x8(a: v128, b: v128) -> v128 {
2387     unsafe { llvm_narrow_i8x16_u(a.as_i16x8(), b.as_i16x8()).v128() }
2388 }
2389 
2390 /// Shifts each lane to the left by the specified number of bits.
2391 ///
2392 /// Only the low bits of the shift amount are used if the shift amount is
2393 /// greater than the lane width.
2394 #[inline]
2395 #[cfg_attr(test, assert_instr(i8x16.shl))]
2396 #[target_feature(enable = "simd128")]
2397 #[doc(alias("i8x16.shl"))]
2398 #[stable(feature = "wasm_simd", since = "1.54.0")]
i8x16_shl(a: v128, amt: u32) -> v1282399 pub fn i8x16_shl(a: v128, amt: u32) -> v128 {
2400     unsafe { simd_shl(a.as_i8x16(), simd::i8x16::splat(amt as i8)).v128() }
2401 }
2402 
2403 #[stable(feature = "wasm_simd", since = "1.54.0")]
2404 pub use i8x16_shl as u8x16_shl;
2405 
2406 /// Shifts each lane to the right by the specified number of bits, sign
2407 /// extending.
2408 ///
2409 /// Only the low bits of the shift amount are used if the shift amount is
2410 /// greater than the lane width.
2411 #[inline]
2412 #[cfg_attr(test, assert_instr(i8x16.shr_s))]
2413 #[target_feature(enable = "simd128")]
2414 #[doc(alias("i8x16.shr_s"))]
2415 #[stable(feature = "wasm_simd", since = "1.54.0")]
i8x16_shr(a: v128, amt: u32) -> v1282416 pub fn i8x16_shr(a: v128, amt: u32) -> v128 {
2417     unsafe { simd_shr(a.as_i8x16(), simd::i8x16::splat(amt as i8)).v128() }
2418 }
2419 
2420 /// Shifts each lane to the right by the specified number of bits, shifting in
2421 /// zeros.
2422 ///
2423 /// Only the low bits of the shift amount are used if the shift amount is
2424 /// greater than the lane width.
2425 #[inline]
2426 #[cfg_attr(test, assert_instr(i8x16.shr_u))]
2427 #[target_feature(enable = "simd128")]
2428 #[doc(alias("i8x16.shr_u"))]
2429 #[stable(feature = "wasm_simd", since = "1.54.0")]
u8x16_shr(a: v128, amt: u32) -> v1282430 pub fn u8x16_shr(a: v128, amt: u32) -> v128 {
2431     unsafe { simd_shr(a.as_u8x16(), simd::u8x16::splat(amt as u8)).v128() }
2432 }
2433 
2434 /// Adds two 128-bit vectors as if they were two packed sixteen 8-bit integers.
2435 #[inline]
2436 #[cfg_attr(test, assert_instr(i8x16.add))]
2437 #[target_feature(enable = "simd128")]
2438 #[doc(alias("i8x16.add"))]
2439 #[stable(feature = "wasm_simd", since = "1.54.0")]
i8x16_add(a: v128, b: v128) -> v1282440 pub fn i8x16_add(a: v128, b: v128) -> v128 {
2441     unsafe { simd_add(a.as_i8x16(), b.as_i8x16()).v128() }
2442 }
2443 
2444 #[stable(feature = "wasm_simd", since = "1.54.0")]
2445 pub use i8x16_add as u8x16_add;
2446 
2447 /// Adds two 128-bit vectors as if they were two packed sixteen 8-bit signed
2448 /// integers, saturating on overflow to `i8::MAX`.
2449 #[inline]
2450 #[cfg_attr(test, assert_instr(i8x16.add_sat_s))]
2451 #[target_feature(enable = "simd128")]
2452 #[doc(alias("i8x16.add_sat_s"))]
2453 #[stable(feature = "wasm_simd", since = "1.54.0")]
i8x16_add_sat(a: v128, b: v128) -> v1282454 pub fn i8x16_add_sat(a: v128, b: v128) -> v128 {
2455     unsafe { llvm_i8x16_add_sat_s(a.as_i8x16(), b.as_i8x16()).v128() }
2456 }
2457 
2458 /// Adds two 128-bit vectors as if they were two packed sixteen 8-bit unsigned
2459 /// integers, saturating on overflow to `u8::MAX`.
2460 #[inline]
2461 #[cfg_attr(test, assert_instr(i8x16.add_sat_u))]
2462 #[target_feature(enable = "simd128")]
2463 #[doc(alias("i8x16.add_sat_u"))]
2464 #[stable(feature = "wasm_simd", since = "1.54.0")]
u8x16_add_sat(a: v128, b: v128) -> v1282465 pub fn u8x16_add_sat(a: v128, b: v128) -> v128 {
2466     unsafe { llvm_i8x16_add_sat_u(a.as_i8x16(), b.as_i8x16()).v128() }
2467 }
2468 
2469 /// Subtracts two 128-bit vectors as if they were two packed sixteen 8-bit integers.
2470 #[inline]
2471 #[cfg_attr(test, assert_instr(i8x16.sub))]
2472 #[target_feature(enable = "simd128")]
2473 #[doc(alias("i8x16.sub"))]
2474 #[stable(feature = "wasm_simd", since = "1.54.0")]
i8x16_sub(a: v128, b: v128) -> v1282475 pub fn i8x16_sub(a: v128, b: v128) -> v128 {
2476     unsafe { simd_sub(a.as_i8x16(), b.as_i8x16()).v128() }
2477 }
2478 
2479 #[stable(feature = "wasm_simd", since = "1.54.0")]
2480 pub use i8x16_sub as u8x16_sub;
2481 
2482 /// Subtracts two 128-bit vectors as if they were two packed sixteen 8-bit
2483 /// signed integers, saturating on overflow to `i8::MIN`.
2484 #[inline]
2485 #[cfg_attr(test, assert_instr(i8x16.sub_sat_s))]
2486 #[target_feature(enable = "simd128")]
2487 #[doc(alias("i8x16.sub_sat_s"))]
2488 #[stable(feature = "wasm_simd", since = "1.54.0")]
i8x16_sub_sat(a: v128, b: v128) -> v1282489 pub fn i8x16_sub_sat(a: v128, b: v128) -> v128 {
2490     unsafe { llvm_i8x16_sub_sat_s(a.as_i8x16(), b.as_i8x16()).v128() }
2491 }
2492 
2493 /// Subtracts two 128-bit vectors as if they were two packed sixteen 8-bit
2494 /// unsigned integers, saturating on overflow to 0.
2495 #[inline]
2496 #[cfg_attr(test, assert_instr(i8x16.sub_sat_u))]
2497 #[target_feature(enable = "simd128")]
2498 #[doc(alias("i8x16.sub_sat_u"))]
2499 #[stable(feature = "wasm_simd", since = "1.54.0")]
u8x16_sub_sat(a: v128, b: v128) -> v1282500 pub fn u8x16_sub_sat(a: v128, b: v128) -> v128 {
2501     unsafe { llvm_i8x16_sub_sat_u(a.as_i8x16(), b.as_i8x16()).v128() }
2502 }
2503 
2504 /// Compares lane-wise signed integers, and returns the minimum of
2505 /// each pair.
2506 #[inline]
2507 #[cfg_attr(test, assert_instr(i8x16.min_s))]
2508 #[target_feature(enable = "simd128")]
2509 #[doc(alias("i8x16.min_s"))]
2510 #[stable(feature = "wasm_simd", since = "1.54.0")]
i8x16_min(a: v128, b: v128) -> v1282511 pub fn i8x16_min(a: v128, b: v128) -> v128 {
2512     let a = a.as_i8x16();
2513     let b = b.as_i8x16();
2514     unsafe { simd_select::<simd::i8x16, _>(simd_lt(a, b), a, b).v128() }
2515 }
2516 
2517 /// Compares lane-wise unsigned integers, and returns the minimum of
2518 /// each pair.
2519 #[inline]
2520 #[cfg_attr(test, assert_instr(i8x16.min_u))]
2521 #[target_feature(enable = "simd128")]
2522 #[doc(alias("i8x16.min_u"))]
2523 #[stable(feature = "wasm_simd", since = "1.54.0")]
u8x16_min(a: v128, b: v128) -> v1282524 pub fn u8x16_min(a: v128, b: v128) -> v128 {
2525     let a = a.as_u8x16();
2526     let b = b.as_u8x16();
2527     unsafe { simd_select::<simd::i8x16, _>(simd_lt(a, b), a, b).v128() }
2528 }
2529 
2530 /// Compares lane-wise signed integers, and returns the maximum of
2531 /// each pair.
2532 #[inline]
2533 #[cfg_attr(test, assert_instr(i8x16.max_s))]
2534 #[target_feature(enable = "simd128")]
2535 #[doc(alias("i8x16.max_s"))]
2536 #[stable(feature = "wasm_simd", since = "1.54.0")]
i8x16_max(a: v128, b: v128) -> v1282537 pub fn i8x16_max(a: v128, b: v128) -> v128 {
2538     let a = a.as_i8x16();
2539     let b = b.as_i8x16();
2540     unsafe { simd_select::<simd::i8x16, _>(simd_gt(a, b), a, b).v128() }
2541 }
2542 
2543 /// Compares lane-wise unsigned integers, and returns the maximum of
2544 /// each pair.
2545 #[inline]
2546 #[cfg_attr(test, assert_instr(i8x16.max_u))]
2547 #[target_feature(enable = "simd128")]
2548 #[doc(alias("i8x16.max_u"))]
2549 #[stable(feature = "wasm_simd", since = "1.54.0")]
u8x16_max(a: v128, b: v128) -> v1282550 pub fn u8x16_max(a: v128, b: v128) -> v128 {
2551     let a = a.as_u8x16();
2552     let b = b.as_u8x16();
2553     unsafe { simd_select::<simd::i8x16, _>(simd_gt(a, b), a, b).v128() }
2554 }
2555 
2556 /// Lane-wise rounding average.
2557 #[inline]
2558 #[cfg_attr(test, assert_instr(i8x16.avgr_u))]
2559 #[target_feature(enable = "simd128")]
2560 #[doc(alias("i8x16.avgr_u"))]
2561 #[stable(feature = "wasm_simd", since = "1.54.0")]
u8x16_avgr(a: v128, b: v128) -> v1282562 pub fn u8x16_avgr(a: v128, b: v128) -> v128 {
2563     unsafe { llvm_avgr_u_i8x16(a.as_i8x16(), b.as_i8x16()).v128() }
2564 }
2565 
2566 /// Lane-wise integer extended pairwise addition producing extended results
2567 /// (twice wider results than the inputs).
2568 #[inline]
2569 #[cfg_attr(test, assert_instr(i16x8.extadd_pairwise_i8x16_s))]
2570 #[target_feature(enable = "simd128")]
2571 #[doc(alias("i16x8.extadd_pairwise_i8x16_s"))]
2572 #[stable(feature = "wasm_simd", since = "1.54.0")]
i16x8_extadd_pairwise_i8x16(a: v128) -> v1282573 pub fn i16x8_extadd_pairwise_i8x16(a: v128) -> v128 {
2574     unsafe { llvm_i16x8_extadd_pairwise_i8x16_s(a.as_i8x16()).v128() }
2575 }
2576 
2577 /// Lane-wise integer extended pairwise addition producing extended results
2578 /// (twice wider results than the inputs).
2579 #[inline]
2580 #[cfg_attr(test, assert_instr(i16x8.extadd_pairwise_i8x16_u))]
2581 #[target_feature(enable = "simd128")]
2582 #[doc(alias("i16x8.extadd_pairwise_i8x16_u"))]
2583 #[stable(feature = "wasm_simd", since = "1.54.0")]
i16x8_extadd_pairwise_u8x16(a: v128) -> v1282584 pub fn i16x8_extadd_pairwise_u8x16(a: v128) -> v128 {
2585     unsafe { llvm_i16x8_extadd_pairwise_i8x16_u(a.as_i8x16()).v128() }
2586 }
2587 
2588 #[stable(feature = "wasm_simd", since = "1.54.0")]
2589 pub use i16x8_extadd_pairwise_u8x16 as u16x8_extadd_pairwise_u8x16;
2590 
2591 /// Lane-wise wrapping absolute value.
2592 #[inline]
2593 #[cfg_attr(test, assert_instr(i16x8.abs))]
2594 #[target_feature(enable = "simd128")]
2595 #[doc(alias("i16x8.abs"))]
2596 #[stable(feature = "wasm_simd", since = "1.54.0")]
i16x8_abs(a: v128) -> v1282597 pub fn i16x8_abs(a: v128) -> v128 {
2598     let a = a.as_i16x8();
2599     let zero = simd::i16x8::splat(0);
2600     unsafe {
2601         simd_select::<simd::m16x8, simd::i16x8>(simd_lt(a, zero), simd_sub(zero, a), a).v128()
2602     }
2603 }
2604 
2605 /// Negates a 128-bit vectors intepreted as eight 16-bit signed integers
2606 #[inline]
2607 #[cfg_attr(test, assert_instr(i16x8.neg))]
2608 #[target_feature(enable = "simd128")]
2609 #[doc(alias("i16x8.neg"))]
2610 #[stable(feature = "wasm_simd", since = "1.54.0")]
i16x8_neg(a: v128) -> v1282611 pub fn i16x8_neg(a: v128) -> v128 {
2612     unsafe { simd_mul(a.as_i16x8(), simd::i16x8::splat(-1)).v128() }
2613 }
2614 
2615 /// Lane-wise saturating rounding multiplication in Q15 format.
2616 #[inline]
2617 #[cfg_attr(test, assert_instr(i16x8.q15mulr_sat_s))]
2618 #[target_feature(enable = "simd128")]
2619 #[doc(alias("i16x8.q15mulr_sat_s"))]
2620 #[stable(feature = "wasm_simd", since = "1.54.0")]
i16x8_q15mulr_sat(a: v128, b: v128) -> v1282621 pub fn i16x8_q15mulr_sat(a: v128, b: v128) -> v128 {
2622     unsafe { llvm_q15mulr(a.as_i16x8(), b.as_i16x8()).v128() }
2623 }
2624 
2625 /// Returns true if all lanes are non-zero, false otherwise.
2626 #[inline]
2627 #[cfg_attr(test, assert_instr(i16x8.all_true))]
2628 #[target_feature(enable = "simd128")]
2629 #[doc(alias("i16x8.all_true"))]
2630 #[stable(feature = "wasm_simd", since = "1.54.0")]
i16x8_all_true(a: v128) -> bool2631 pub fn i16x8_all_true(a: v128) -> bool {
2632     unsafe { llvm_i16x8_all_true(a.as_i16x8()) != 0 }
2633 }
2634 
2635 #[stable(feature = "wasm_simd", since = "1.54.0")]
2636 pub use i16x8_all_true as u16x8_all_true;
2637 
2638 /// Extracts the high bit for each lane in `a` and produce a scalar mask with
2639 /// all bits concatenated.
2640 #[inline]
2641 #[cfg_attr(test, assert_instr(i16x8.bitmask))]
2642 #[target_feature(enable = "simd128")]
2643 #[doc(alias("i16x8.bitmask"))]
2644 #[stable(feature = "wasm_simd", since = "1.54.0")]
i16x8_bitmask(a: v128) -> u82645 pub fn i16x8_bitmask(a: v128) -> u8 {
2646     unsafe { llvm_bitmask_i16x8(a.as_i16x8()) as u8 }
2647 }
2648 
2649 #[stable(feature = "wasm_simd", since = "1.54.0")]
2650 pub use i16x8_bitmask as u16x8_bitmask;
2651 
2652 /// Converts two input vectors into a smaller lane vector by narrowing each
2653 /// lane.
2654 ///
2655 /// Signed saturation to 0x7fff or 0x8000 is used and the input lanes are always
2656 /// interpreted as signed integers.
2657 #[inline]
2658 #[cfg_attr(test, assert_instr(i16x8.narrow_i32x4_s))]
2659 #[target_feature(enable = "simd128")]
2660 #[doc(alias("i16x8.narrow_i32x4_s"))]
2661 #[stable(feature = "wasm_simd", since = "1.54.0")]
i16x8_narrow_i32x4(a: v128, b: v128) -> v1282662 pub fn i16x8_narrow_i32x4(a: v128, b: v128) -> v128 {
2663     unsafe { llvm_narrow_i16x8_s(a.as_i32x4(), b.as_i32x4()).v128() }
2664 }
2665 
2666 /// Converts two input vectors into a smaller lane vector by narrowing each
2667 /// lane.
2668 ///
2669 /// Signed saturation to 0x0000 or 0xffff is used and the input lanes are always
2670 /// interpreted as signed integers.
2671 #[inline]
2672 #[cfg_attr(test, assert_instr(i16x8.narrow_i32x4_u))]
2673 #[target_feature(enable = "simd128")]
2674 #[doc(alias("i16x8.narrow_i32x4_u"))]
2675 #[stable(feature = "wasm_simd", since = "1.54.0")]
u16x8_narrow_i32x4(a: v128, b: v128) -> v1282676 pub fn u16x8_narrow_i32x4(a: v128, b: v128) -> v128 {
2677     unsafe { llvm_narrow_i16x8_u(a.as_i32x4(), b.as_i32x4()).v128() }
2678 }
2679 
2680 /// Converts low half of the smaller lane vector to a larger lane
2681 /// vector, sign extended.
2682 #[inline]
2683 #[cfg_attr(test, assert_instr(i16x8.extend_low_i8x16_s))]
2684 #[target_feature(enable = "simd128")]
2685 #[doc(alias("i16x8.extend_low_i8x16_s"))]
2686 #[stable(feature = "wasm_simd", since = "1.54.0")]
i16x8_extend_low_i8x16(a: v128) -> v1282687 pub fn i16x8_extend_low_i8x16(a: v128) -> v128 {
2688     unsafe {
2689         simd_cast::<simd::i8x8, simd::i16x8>(simd_shuffle8!(
2690             a.as_i8x16(),
2691             a.as_i8x16(),
2692             [0, 1, 2, 3, 4, 5, 6, 7],
2693         ))
2694         .v128()
2695     }
2696 }
2697 
2698 /// Converts high half of the smaller lane vector to a larger lane
2699 /// vector, sign extended.
2700 #[inline]
2701 #[cfg_attr(test, assert_instr(i16x8.extend_high_i8x16_s))]
2702 #[target_feature(enable = "simd128")]
2703 #[doc(alias("i16x8.extend_high_i8x16_s"))]
2704 #[stable(feature = "wasm_simd", since = "1.54.0")]
i16x8_extend_high_i8x16(a: v128) -> v1282705 pub fn i16x8_extend_high_i8x16(a: v128) -> v128 {
2706     unsafe {
2707         simd_cast::<simd::i8x8, simd::i16x8>(simd_shuffle8!(
2708             a.as_i8x16(),
2709             a.as_i8x16(),
2710             [8, 9, 10, 11, 12, 13, 14, 15],
2711         ))
2712         .v128()
2713     }
2714 }
2715 
2716 /// Converts low half of the smaller lane vector to a larger lane
2717 /// vector, zero extended.
2718 #[inline]
2719 #[cfg_attr(test, assert_instr(i16x8.extend_low_i8x16_u))]
2720 #[target_feature(enable = "simd128")]
2721 #[doc(alias("i16x8.extend_low_i8x16_u"))]
2722 #[stable(feature = "wasm_simd", since = "1.54.0")]
i16x8_extend_low_u8x16(a: v128) -> v1282723 pub fn i16x8_extend_low_u8x16(a: v128) -> v128 {
2724     unsafe {
2725         simd_cast::<simd::u8x8, simd::u16x8>(simd_shuffle8!(
2726             a.as_u8x16(),
2727             a.as_u8x16(),
2728             [0, 1, 2, 3, 4, 5, 6, 7],
2729         ))
2730         .v128()
2731     }
2732 }
2733 
2734 #[stable(feature = "wasm_simd", since = "1.54.0")]
2735 pub use i16x8_extend_low_u8x16 as u16x8_extend_low_u8x16;
2736 
2737 /// Converts high half of the smaller lane vector to a larger lane
2738 /// vector, zero extended.
2739 #[inline]
2740 #[cfg_attr(test, assert_instr(i16x8.extend_high_i8x16_u))]
2741 #[target_feature(enable = "simd128")]
2742 #[doc(alias("i16x8.extend_high_i8x16_u"))]
2743 #[stable(feature = "wasm_simd", since = "1.54.0")]
i16x8_extend_high_u8x16(a: v128) -> v1282744 pub fn i16x8_extend_high_u8x16(a: v128) -> v128 {
2745     unsafe {
2746         simd_cast::<simd::u8x8, simd::u16x8>(simd_shuffle8!(
2747             a.as_u8x16(),
2748             a.as_u8x16(),
2749             [8, 9, 10, 11, 12, 13, 14, 15],
2750         ))
2751         .v128()
2752     }
2753 }
2754 
2755 #[stable(feature = "wasm_simd", since = "1.54.0")]
2756 pub use i16x8_extend_high_u8x16 as u16x8_extend_high_u8x16;
2757 
2758 /// Shifts each lane to the left by the specified number of bits.
2759 ///
2760 /// Only the low bits of the shift amount are used if the shift amount is
2761 /// greater than the lane width.
2762 #[inline]
2763 #[cfg_attr(test, assert_instr(i16x8.shl))]
2764 #[target_feature(enable = "simd128")]
2765 #[doc(alias("i16x8.shl"))]
2766 #[stable(feature = "wasm_simd", since = "1.54.0")]
i16x8_shl(a: v128, amt: u32) -> v1282767 pub fn i16x8_shl(a: v128, amt: u32) -> v128 {
2768     unsafe { simd_shl(a.as_i16x8(), simd::i16x8::splat(amt as i16)).v128() }
2769 }
2770 
2771 #[stable(feature = "wasm_simd", since = "1.54.0")]
2772 pub use i16x8_shl as u16x8_shl;
2773 
2774 /// Shifts each lane to the right by the specified number of bits, sign
2775 /// extending.
2776 ///
2777 /// Only the low bits of the shift amount are used if the shift amount is
2778 /// greater than the lane width.
2779 #[inline]
2780 #[cfg_attr(test, assert_instr(i16x8.shr_s))]
2781 #[target_feature(enable = "simd128")]
2782 #[doc(alias("i16x8.shr_s"))]
2783 #[stable(feature = "wasm_simd", since = "1.54.0")]
i16x8_shr(a: v128, amt: u32) -> v1282784 pub fn i16x8_shr(a: v128, amt: u32) -> v128 {
2785     unsafe { simd_shr(a.as_i16x8(), simd::i16x8::splat(amt as i16)).v128() }
2786 }
2787 
2788 /// Shifts each lane to the right by the specified number of bits, shifting in
2789 /// zeros.
2790 ///
2791 /// Only the low bits of the shift amount are used if the shift amount is
2792 /// greater than the lane width.
2793 #[inline]
2794 #[cfg_attr(test, assert_instr(i16x8.shr_u))]
2795 #[target_feature(enable = "simd128")]
2796 #[doc(alias("i16x8.shr_u"))]
2797 #[stable(feature = "wasm_simd", since = "1.54.0")]
u16x8_shr(a: v128, amt: u32) -> v1282798 pub fn u16x8_shr(a: v128, amt: u32) -> v128 {
2799     unsafe { simd_shr(a.as_u16x8(), simd::u16x8::splat(amt as u16)).v128() }
2800 }
2801 
2802 /// Adds two 128-bit vectors as if they were two packed eight 16-bit integers.
2803 #[inline]
2804 #[cfg_attr(test, assert_instr(i16x8.add))]
2805 #[target_feature(enable = "simd128")]
2806 #[doc(alias("i16x8.add"))]
2807 #[stable(feature = "wasm_simd", since = "1.54.0")]
i16x8_add(a: v128, b: v128) -> v1282808 pub fn i16x8_add(a: v128, b: v128) -> v128 {
2809     unsafe { simd_add(a.as_i16x8(), b.as_i16x8()).v128() }
2810 }
2811 
2812 #[stable(feature = "wasm_simd", since = "1.54.0")]
2813 pub use i16x8_add as u16x8_add;
2814 
2815 /// Adds two 128-bit vectors as if they were two packed eight 16-bit signed
2816 /// integers, saturating on overflow to `i16::MAX`.
2817 #[inline]
2818 #[cfg_attr(test, assert_instr(i16x8.add_sat_s))]
2819 #[target_feature(enable = "simd128")]
2820 #[doc(alias("i16x8.add_sat_s"))]
2821 #[stable(feature = "wasm_simd", since = "1.54.0")]
i16x8_add_sat(a: v128, b: v128) -> v1282822 pub fn i16x8_add_sat(a: v128, b: v128) -> v128 {
2823     unsafe { llvm_i16x8_add_sat_s(a.as_i16x8(), b.as_i16x8()).v128() }
2824 }
2825 
2826 /// Adds two 128-bit vectors as if they were two packed eight 16-bit unsigned
2827 /// integers, saturating on overflow to `u16::MAX`.
2828 #[inline]
2829 #[cfg_attr(test, assert_instr(i16x8.add_sat_u))]
2830 #[target_feature(enable = "simd128")]
2831 #[doc(alias("i16x8.add_sat_u"))]
2832 #[stable(feature = "wasm_simd", since = "1.54.0")]
u16x8_add_sat(a: v128, b: v128) -> v1282833 pub fn u16x8_add_sat(a: v128, b: v128) -> v128 {
2834     unsafe { llvm_i16x8_add_sat_u(a.as_i16x8(), b.as_i16x8()).v128() }
2835 }
2836 
2837 /// Subtracts two 128-bit vectors as if they were two packed eight 16-bit integers.
2838 #[inline]
2839 #[cfg_attr(test, assert_instr(i16x8.sub))]
2840 #[target_feature(enable = "simd128")]
2841 #[doc(alias("i16x8.sub"))]
2842 #[stable(feature = "wasm_simd", since = "1.54.0")]
i16x8_sub(a: v128, b: v128) -> v1282843 pub fn i16x8_sub(a: v128, b: v128) -> v128 {
2844     unsafe { simd_sub(a.as_i16x8(), b.as_i16x8()).v128() }
2845 }
2846 
2847 #[stable(feature = "wasm_simd", since = "1.54.0")]
2848 pub use i16x8_sub as u16x8_sub;
2849 
2850 /// Subtracts two 128-bit vectors as if they were two packed eight 16-bit
2851 /// signed integers, saturating on overflow to `i16::MIN`.
2852 #[inline]
2853 #[cfg_attr(test, assert_instr(i16x8.sub_sat_s))]
2854 #[target_feature(enable = "simd128")]
2855 #[doc(alias("i16x8.sub_sat_s"))]
2856 #[stable(feature = "wasm_simd", since = "1.54.0")]
i16x8_sub_sat(a: v128, b: v128) -> v1282857 pub fn i16x8_sub_sat(a: v128, b: v128) -> v128 {
2858     unsafe { llvm_i16x8_sub_sat_s(a.as_i16x8(), b.as_i16x8()).v128() }
2859 }
2860 
2861 /// Subtracts two 128-bit vectors as if they were two packed eight 16-bit
2862 /// unsigned integers, saturating on overflow to 0.
2863 #[inline]
2864 #[cfg_attr(test, assert_instr(i16x8.sub_sat_u))]
2865 #[target_feature(enable = "simd128")]
2866 #[doc(alias("i16x8.sub_sat_u"))]
2867 #[stable(feature = "wasm_simd", since = "1.54.0")]
u16x8_sub_sat(a: v128, b: v128) -> v1282868 pub fn u16x8_sub_sat(a: v128, b: v128) -> v128 {
2869     unsafe { llvm_i16x8_sub_sat_u(a.as_i16x8(), b.as_i16x8()).v128() }
2870 }
2871 
2872 /// Multiplies two 128-bit vectors as if they were two packed eight 16-bit
2873 /// signed integers.
2874 #[inline]
2875 #[cfg_attr(test, assert_instr(i16x8.mul))]
2876 #[target_feature(enable = "simd128")]
2877 #[doc(alias("i16x8.mul"))]
2878 #[stable(feature = "wasm_simd", since = "1.54.0")]
i16x8_mul(a: v128, b: v128) -> v1282879 pub fn i16x8_mul(a: v128, b: v128) -> v128 {
2880     unsafe { simd_mul(a.as_i16x8(), b.as_i16x8()).v128() }
2881 }
2882 
2883 #[stable(feature = "wasm_simd", since = "1.54.0")]
2884 pub use i16x8_mul as u16x8_mul;
2885 
2886 /// Compares lane-wise signed integers, and returns the minimum of
2887 /// each pair.
2888 #[inline]
2889 #[cfg_attr(test, assert_instr(i16x8.min_s))]
2890 #[target_feature(enable = "simd128")]
2891 #[doc(alias("i16x8.min_s"))]
2892 #[stable(feature = "wasm_simd", since = "1.54.0")]
i16x8_min(a: v128, b: v128) -> v1282893 pub fn i16x8_min(a: v128, b: v128) -> v128 {
2894     let a = a.as_i16x8();
2895     let b = b.as_i16x8();
2896     unsafe { simd_select::<simd::i16x8, _>(simd_lt(a, b), a, b).v128() }
2897 }
2898 
2899 /// Compares lane-wise unsigned integers, and returns the minimum of
2900 /// each pair.
2901 #[inline]
2902 #[cfg_attr(test, assert_instr(i16x8.min_u))]
2903 #[target_feature(enable = "simd128")]
2904 #[doc(alias("i16x8.min_u"))]
2905 #[stable(feature = "wasm_simd", since = "1.54.0")]
u16x8_min(a: v128, b: v128) -> v1282906 pub fn u16x8_min(a: v128, b: v128) -> v128 {
2907     let a = a.as_u16x8();
2908     let b = b.as_u16x8();
2909     unsafe { simd_select::<simd::i16x8, _>(simd_lt(a, b), a, b).v128() }
2910 }
2911 
2912 /// Compares lane-wise signed integers, and returns the maximum of
2913 /// each pair.
2914 #[inline]
2915 #[cfg_attr(test, assert_instr(i16x8.max_s))]
2916 #[target_feature(enable = "simd128")]
2917 #[doc(alias("i16x8.max_s"))]
2918 #[stable(feature = "wasm_simd", since = "1.54.0")]
i16x8_max(a: v128, b: v128) -> v1282919 pub fn i16x8_max(a: v128, b: v128) -> v128 {
2920     let a = a.as_i16x8();
2921     let b = b.as_i16x8();
2922     unsafe { simd_select::<simd::i16x8, _>(simd_gt(a, b), a, b).v128() }
2923 }
2924 
2925 /// Compares lane-wise unsigned integers, and returns the maximum of
2926 /// each pair.
2927 #[inline]
2928 #[cfg_attr(test, assert_instr(i16x8.max_u))]
2929 #[target_feature(enable = "simd128")]
2930 #[doc(alias("i16x8.max_u"))]
2931 #[stable(feature = "wasm_simd", since = "1.54.0")]
u16x8_max(a: v128, b: v128) -> v1282932 pub fn u16x8_max(a: v128, b: v128) -> v128 {
2933     let a = a.as_u16x8();
2934     let b = b.as_u16x8();
2935     unsafe { simd_select::<simd::i16x8, _>(simd_gt(a, b), a, b).v128() }
2936 }
2937 
2938 /// Lane-wise rounding average.
2939 #[inline]
2940 #[cfg_attr(test, assert_instr(i16x8.avgr_u))]
2941 #[target_feature(enable = "simd128")]
2942 #[doc(alias("i16x8.avgr_u"))]
2943 #[stable(feature = "wasm_simd", since = "1.54.0")]
u16x8_avgr(a: v128, b: v128) -> v1282944 pub fn u16x8_avgr(a: v128, b: v128) -> v128 {
2945     unsafe { llvm_avgr_u_i16x8(a.as_i16x8(), b.as_i16x8()).v128() }
2946 }
2947 
2948 /// Lane-wise integer extended multiplication producing twice wider result than
2949 /// the inputs.
2950 ///
2951 /// Equivalent of `i16x8_mul(i16x8_extend_low_i8x16(a), i16x8_extend_low_i8x16(b))`
2952 #[inline]
2953 #[cfg_attr(test, assert_instr(i16x8.extmul_low_i8x16_s))]
2954 #[target_feature(enable = "simd128")]
2955 #[doc(alias("i16x8.extmul_low_i8x16_s"))]
2956 #[stable(feature = "wasm_simd", since = "1.54.0")]
i16x8_extmul_low_i8x16(a: v128, b: v128) -> v1282957 pub fn i16x8_extmul_low_i8x16(a: v128, b: v128) -> v128 {
2958     unsafe {
2959         let lhs = simd_cast::<simd::i8x8, simd::i16x8>(simd_shuffle8!(
2960             a.as_i8x16(),
2961             a.as_i8x16(),
2962             [0, 1, 2, 3, 4, 5, 6, 7],
2963         ));
2964         let rhs = simd_cast::<simd::i8x8, simd::i16x8>(simd_shuffle8!(
2965             b.as_i8x16(),
2966             b.as_i8x16(),
2967             [0, 1, 2, 3, 4, 5, 6, 7],
2968         ));
2969         simd_mul(lhs, rhs).v128()
2970     }
2971 }
2972 
2973 /// Lane-wise integer extended multiplication producing twice wider result than
2974 /// the inputs.
2975 ///
2976 /// Equivalent of `i16x8_mul(i16x8_extend_high_i8x16(a), i16x8_extend_high_i8x16(b))`
2977 #[inline]
2978 #[cfg_attr(test, assert_instr(i16x8.extmul_high_i8x16_s))]
2979 #[target_feature(enable = "simd128")]
2980 #[doc(alias("i16x8.extmul_high_i8x16_s"))]
2981 #[stable(feature = "wasm_simd", since = "1.54.0")]
i16x8_extmul_high_i8x16(a: v128, b: v128) -> v1282982 pub fn i16x8_extmul_high_i8x16(a: v128, b: v128) -> v128 {
2983     unsafe {
2984         let lhs = simd_cast::<simd::i8x8, simd::i16x8>(simd_shuffle8!(
2985             a.as_i8x16(),
2986             a.as_i8x16(),
2987             [8, 9, 10, 11, 12, 13, 14, 15],
2988         ));
2989         let rhs = simd_cast::<simd::i8x8, simd::i16x8>(simd_shuffle8!(
2990             b.as_i8x16(),
2991             b.as_i8x16(),
2992             [8, 9, 10, 11, 12, 13, 14, 15],
2993         ));
2994         simd_mul(lhs, rhs).v128()
2995     }
2996 }
2997 
2998 /// Lane-wise integer extended multiplication producing twice wider result than
2999 /// the inputs.
3000 ///
3001 /// Equivalent of `i16x8_mul(i16x8_extend_low_u8x16(a), i16x8_extend_low_u8x16(b))`
3002 #[inline]
3003 #[cfg_attr(test, assert_instr(i16x8.extmul_low_i8x16_u))]
3004 #[target_feature(enable = "simd128")]
3005 #[doc(alias("i16x8.extmul_low_i8x16_u"))]
3006 #[stable(feature = "wasm_simd", since = "1.54.0")]
i16x8_extmul_low_u8x16(a: v128, b: v128) -> v1283007 pub fn i16x8_extmul_low_u8x16(a: v128, b: v128) -> v128 {
3008     unsafe {
3009         let lhs = simd_cast::<simd::u8x8, simd::u16x8>(simd_shuffle8!(
3010             a.as_u8x16(),
3011             a.as_u8x16(),
3012             [0, 1, 2, 3, 4, 5, 6, 7],
3013         ));
3014         let rhs = simd_cast::<simd::u8x8, simd::u16x8>(simd_shuffle8!(
3015             b.as_u8x16(),
3016             b.as_u8x16(),
3017             [0, 1, 2, 3, 4, 5, 6, 7],
3018         ));
3019         simd_mul(lhs, rhs).v128()
3020     }
3021 }
3022 
3023 #[stable(feature = "wasm_simd", since = "1.54.0")]
3024 pub use i16x8_extmul_low_u8x16 as u16x8_extmul_low_u8x16;
3025 
3026 /// Lane-wise integer extended multiplication producing twice wider result than
3027 /// the inputs.
3028 ///
3029 /// Equivalent of `i16x8_mul(i16x8_extend_high_u8x16(a), i16x8_extend_high_u8x16(b))`
3030 #[inline]
3031 #[cfg_attr(test, assert_instr(i16x8.extmul_high_i8x16_u))]
3032 #[target_feature(enable = "simd128")]
3033 #[doc(alias("i16x8.extmul_high_i8x16_u"))]
3034 #[stable(feature = "wasm_simd", since = "1.54.0")]
i16x8_extmul_high_u8x16(a: v128, b: v128) -> v1283035 pub fn i16x8_extmul_high_u8x16(a: v128, b: v128) -> v128 {
3036     unsafe {
3037         let lhs = simd_cast::<simd::u8x8, simd::u16x8>(simd_shuffle8!(
3038             a.as_u8x16(),
3039             a.as_u8x16(),
3040             [8, 9, 10, 11, 12, 13, 14, 15],
3041         ));
3042         let rhs = simd_cast::<simd::u8x8, simd::u16x8>(simd_shuffle8!(
3043             b.as_u8x16(),
3044             b.as_u8x16(),
3045             [8, 9, 10, 11, 12, 13, 14, 15],
3046         ));
3047         simd_mul(lhs, rhs).v128()
3048     }
3049 }
3050 
3051 #[stable(feature = "wasm_simd", since = "1.54.0")]
3052 pub use i16x8_extmul_high_u8x16 as u16x8_extmul_high_u8x16;
3053 
3054 /// Lane-wise integer extended pairwise addition producing extended results
3055 /// (twice wider results than the inputs).
3056 #[inline]
3057 #[cfg_attr(test, assert_instr(i32x4.extadd_pairwise_i16x8_s))]
3058 #[target_feature(enable = "simd128")]
3059 #[doc(alias("i32x4.extadd_pairwise_i16x8_s"))]
3060 #[stable(feature = "wasm_simd", since = "1.54.0")]
i32x4_extadd_pairwise_i16x8(a: v128) -> v1283061 pub fn i32x4_extadd_pairwise_i16x8(a: v128) -> v128 {
3062     unsafe { llvm_i32x4_extadd_pairwise_i16x8_s(a.as_i16x8()).v128() }
3063 }
3064 
3065 /// Lane-wise integer extended pairwise addition producing extended results
3066 /// (twice wider results than the inputs).
3067 #[inline]
3068 #[cfg_attr(test, assert_instr(i32x4.extadd_pairwise_i16x8_u))]
3069 #[doc(alias("i32x4.extadd_pairwise_i16x8_u"))]
3070 #[target_feature(enable = "simd128")]
3071 #[stable(feature = "wasm_simd", since = "1.54.0")]
i32x4_extadd_pairwise_u16x8(a: v128) -> v1283072 pub fn i32x4_extadd_pairwise_u16x8(a: v128) -> v128 {
3073     unsafe { llvm_i32x4_extadd_pairwise_i16x8_u(a.as_i16x8()).v128() }
3074 }
3075 
3076 #[stable(feature = "wasm_simd", since = "1.54.0")]
3077 pub use i32x4_extadd_pairwise_u16x8 as u32x4_extadd_pairwise_u16x8;
3078 
3079 /// Lane-wise wrapping absolute value.
3080 #[inline]
3081 #[cfg_attr(test, assert_instr(i32x4.abs))]
3082 #[target_feature(enable = "simd128")]
3083 #[doc(alias("i32x4.abs"))]
3084 #[stable(feature = "wasm_simd", since = "1.54.0")]
i32x4_abs(a: v128) -> v1283085 pub fn i32x4_abs(a: v128) -> v128 {
3086     let a = a.as_i32x4();
3087     let zero = simd::i32x4::splat(0);
3088     unsafe {
3089         simd_select::<simd::m32x4, simd::i32x4>(simd_lt(a, zero), simd_sub(zero, a), a).v128()
3090     }
3091 }
3092 
3093 /// Negates a 128-bit vectors intepreted as four 32-bit signed integers
3094 #[inline]
3095 #[cfg_attr(test, assert_instr(i32x4.neg))]
3096 #[target_feature(enable = "simd128")]
3097 #[doc(alias("i32x4.neg"))]
3098 #[stable(feature = "wasm_simd", since = "1.54.0")]
i32x4_neg(a: v128) -> v1283099 pub fn i32x4_neg(a: v128) -> v128 {
3100     unsafe { simd_mul(a.as_i32x4(), simd::i32x4::splat(-1)).v128() }
3101 }
3102 
3103 /// Returns true if all lanes are non-zero, false otherwise.
3104 #[inline]
3105 #[cfg_attr(test, assert_instr(i32x4.all_true))]
3106 #[target_feature(enable = "simd128")]
3107 #[doc(alias("i32x4.all_true"))]
3108 #[stable(feature = "wasm_simd", since = "1.54.0")]
i32x4_all_true(a: v128) -> bool3109 pub fn i32x4_all_true(a: v128) -> bool {
3110     unsafe { llvm_i32x4_all_true(a.as_i32x4()) != 0 }
3111 }
3112 
3113 #[stable(feature = "wasm_simd", since = "1.54.0")]
3114 pub use i32x4_all_true as u32x4_all_true;
3115 
3116 /// Extracts the high bit for each lane in `a` and produce a scalar mask with
3117 /// all bits concatenated.
3118 #[inline]
3119 #[cfg_attr(test, assert_instr(i32x4.bitmask))]
3120 #[target_feature(enable = "simd128")]
3121 #[doc(alias("i32x4.bitmask"))]
3122 #[stable(feature = "wasm_simd", since = "1.54.0")]
i32x4_bitmask(a: v128) -> u83123 pub fn i32x4_bitmask(a: v128) -> u8 {
3124     unsafe { llvm_bitmask_i32x4(a.as_i32x4()) as u8 }
3125 }
3126 
3127 #[stable(feature = "wasm_simd", since = "1.54.0")]
3128 pub use i32x4_bitmask as u32x4_bitmask;
3129 
3130 /// Converts low half of the smaller lane vector to a larger lane
3131 /// vector, sign extended.
3132 #[inline]
3133 #[cfg_attr(test, assert_instr(i32x4.extend_low_i16x8_s))]
3134 #[target_feature(enable = "simd128")]
3135 #[doc(alias("i32x4.extend_low_i16x8_s"))]
3136 #[stable(feature = "wasm_simd", since = "1.54.0")]
i32x4_extend_low_i16x8(a: v128) -> v1283137 pub fn i32x4_extend_low_i16x8(a: v128) -> v128 {
3138     unsafe {
3139         simd_cast::<simd::i16x4, simd::i32x4>(simd_shuffle4!(
3140             a.as_i16x8(),
3141             a.as_i16x8(),
3142             [0, 1, 2, 3]
3143         ))
3144         .v128()
3145     }
3146 }
3147 
3148 /// Converts high half of the smaller lane vector to a larger lane
3149 /// vector, sign extended.
3150 #[inline]
3151 #[cfg_attr(test, assert_instr(i32x4.extend_high_i16x8_s))]
3152 #[target_feature(enable = "simd128")]
3153 #[doc(alias("i32x4.extend_high_i16x8_s"))]
3154 #[stable(feature = "wasm_simd", since = "1.54.0")]
i32x4_extend_high_i16x8(a: v128) -> v1283155 pub fn i32x4_extend_high_i16x8(a: v128) -> v128 {
3156     unsafe {
3157         simd_cast::<simd::i16x4, simd::i32x4>(simd_shuffle4!(
3158             a.as_i16x8(),
3159             a.as_i16x8(),
3160             [4, 5, 6, 7]
3161         ))
3162         .v128()
3163     }
3164 }
3165 
3166 /// Converts low half of the smaller lane vector to a larger lane
3167 /// vector, zero extended.
3168 #[inline]
3169 #[cfg_attr(test, assert_instr(i32x4.extend_low_i16x8_u))]
3170 #[target_feature(enable = "simd128")]
3171 #[doc(alias("i32x4.extend_low_i16x8_u"))]
3172 #[stable(feature = "wasm_simd", since = "1.54.0")]
i32x4_extend_low_u16x8(a: v128) -> v1283173 pub fn i32x4_extend_low_u16x8(a: v128) -> v128 {
3174     unsafe {
3175         simd_cast::<simd::u16x4, simd::u32x4>(simd_shuffle4!(
3176             a.as_u16x8(),
3177             a.as_u16x8(),
3178             [0, 1, 2, 3]
3179         ))
3180         .v128()
3181     }
3182 }
3183 
3184 #[stable(feature = "wasm_simd", since = "1.54.0")]
3185 pub use i32x4_extend_low_u16x8 as u32x4_extend_low_u16x8;
3186 
3187 /// Converts high half of the smaller lane vector to a larger lane
3188 /// vector, zero extended.
3189 #[inline]
3190 #[cfg_attr(test, assert_instr(i32x4.extend_high_i16x8_u))]
3191 #[target_feature(enable = "simd128")]
3192 #[doc(alias("i32x4.extend_high_i16x8_u"))]
3193 #[stable(feature = "wasm_simd", since = "1.54.0")]
i32x4_extend_high_u16x8(a: v128) -> v1283194 pub fn i32x4_extend_high_u16x8(a: v128) -> v128 {
3195     unsafe {
3196         simd_cast::<simd::u16x4, simd::u32x4>(simd_shuffle4!(
3197             a.as_u16x8(),
3198             a.as_u16x8(),
3199             [4, 5, 6, 7]
3200         ))
3201         .v128()
3202     }
3203 }
3204 
3205 #[stable(feature = "wasm_simd", since = "1.54.0")]
3206 pub use i32x4_extend_high_u16x8 as u32x4_extend_high_u16x8;
3207 
3208 /// Shifts each lane to the left by the specified number of bits.
3209 ///
3210 /// Only the low bits of the shift amount are used if the shift amount is
3211 /// greater than the lane width.
3212 #[inline]
3213 #[cfg_attr(test, assert_instr(i32x4.shl))]
3214 #[target_feature(enable = "simd128")]
3215 #[doc(alias("i32x4.shl"))]
3216 #[stable(feature = "wasm_simd", since = "1.54.0")]
i32x4_shl(a: v128, amt: u32) -> v1283217 pub fn i32x4_shl(a: v128, amt: u32) -> v128 {
3218     unsafe { simd_shl(a.as_i32x4(), simd::i32x4::splat(amt as i32)).v128() }
3219 }
3220 
3221 #[stable(feature = "wasm_simd", since = "1.54.0")]
3222 pub use i32x4_shl as u32x4_shl;
3223 
3224 /// Shifts each lane to the right by the specified number of bits, sign
3225 /// extending.
3226 ///
3227 /// Only the low bits of the shift amount are used if the shift amount is
3228 /// greater than the lane width.
3229 #[inline]
3230 #[cfg_attr(test, assert_instr(i32x4.shr_s))]
3231 #[target_feature(enable = "simd128")]
3232 #[doc(alias("i32x4.shr_s"))]
3233 #[stable(feature = "wasm_simd", since = "1.54.0")]
i32x4_shr(a: v128, amt: u32) -> v1283234 pub fn i32x4_shr(a: v128, amt: u32) -> v128 {
3235     unsafe { simd_shr(a.as_i32x4(), simd::i32x4::splat(amt as i32)).v128() }
3236 }
3237 
3238 /// Shifts each lane to the right by the specified number of bits, shifting in
3239 /// zeros.
3240 ///
3241 /// Only the low bits of the shift amount are used if the shift amount is
3242 /// greater than the lane width.
3243 #[inline]
3244 #[cfg_attr(test, assert_instr(i32x4.shr_u))]
3245 #[target_feature(enable = "simd128")]
3246 #[doc(alias("i32x4.shr_u"))]
3247 #[stable(feature = "wasm_simd", since = "1.54.0")]
u32x4_shr(a: v128, amt: u32) -> v1283248 pub fn u32x4_shr(a: v128, amt: u32) -> v128 {
3249     unsafe { simd_shr(a.as_u32x4(), simd::u32x4::splat(amt as u32)).v128() }
3250 }
3251 
3252 /// Adds two 128-bit vectors as if they were two packed four 32-bit integers.
3253 #[inline]
3254 #[cfg_attr(test, assert_instr(i32x4.add))]
3255 #[target_feature(enable = "simd128")]
3256 #[doc(alias("i32x4.add"))]
3257 #[stable(feature = "wasm_simd", since = "1.54.0")]
i32x4_add(a: v128, b: v128) -> v1283258 pub fn i32x4_add(a: v128, b: v128) -> v128 {
3259     unsafe { simd_add(a.as_i32x4(), b.as_i32x4()).v128() }
3260 }
3261 
3262 #[stable(feature = "wasm_simd", since = "1.54.0")]
3263 pub use i32x4_add as u32x4_add;
3264 
3265 /// Subtracts two 128-bit vectors as if they were two packed four 32-bit integers.
3266 #[inline]
3267 #[cfg_attr(test, assert_instr(i32x4.sub))]
3268 #[target_feature(enable = "simd128")]
3269 #[doc(alias("i32x4.sub"))]
3270 #[stable(feature = "wasm_simd", since = "1.54.0")]
i32x4_sub(a: v128, b: v128) -> v1283271 pub fn i32x4_sub(a: v128, b: v128) -> v128 {
3272     unsafe { simd_sub(a.as_i32x4(), b.as_i32x4()).v128() }
3273 }
3274 
3275 #[stable(feature = "wasm_simd", since = "1.54.0")]
3276 pub use i32x4_sub as u32x4_sub;
3277 
3278 /// Multiplies two 128-bit vectors as if they were two packed four 32-bit
3279 /// signed integers.
3280 #[inline]
3281 #[cfg_attr(test, assert_instr(i32x4.mul))]
3282 #[target_feature(enable = "simd128")]
3283 #[doc(alias("i32x4.mul"))]
3284 #[stable(feature = "wasm_simd", since = "1.54.0")]
i32x4_mul(a: v128, b: v128) -> v1283285 pub fn i32x4_mul(a: v128, b: v128) -> v128 {
3286     unsafe { simd_mul(a.as_i32x4(), b.as_i32x4()).v128() }
3287 }
3288 
3289 #[stable(feature = "wasm_simd", since = "1.54.0")]
3290 pub use i32x4_mul as u32x4_mul;
3291 
3292 /// Compares lane-wise signed integers, and returns the minimum of
3293 /// each pair.
3294 #[inline]
3295 #[cfg_attr(test, assert_instr(i32x4.min_s))]
3296 #[target_feature(enable = "simd128")]
3297 #[doc(alias("i32x4.min_s"))]
3298 #[stable(feature = "wasm_simd", since = "1.54.0")]
i32x4_min(a: v128, b: v128) -> v1283299 pub fn i32x4_min(a: v128, b: v128) -> v128 {
3300     let a = a.as_i32x4();
3301     let b = b.as_i32x4();
3302     unsafe { simd_select::<simd::i32x4, _>(simd_lt(a, b), a, b).v128() }
3303 }
3304 
3305 /// Compares lane-wise unsigned integers, and returns the minimum of
3306 /// each pair.
3307 #[inline]
3308 #[cfg_attr(test, assert_instr(i32x4.min_u))]
3309 #[target_feature(enable = "simd128")]
3310 #[doc(alias("i32x4.min_u"))]
3311 #[stable(feature = "wasm_simd", since = "1.54.0")]
u32x4_min(a: v128, b: v128) -> v1283312 pub fn u32x4_min(a: v128, b: v128) -> v128 {
3313     let a = a.as_u32x4();
3314     let b = b.as_u32x4();
3315     unsafe { simd_select::<simd::i32x4, _>(simd_lt(a, b), a, b).v128() }
3316 }
3317 
3318 /// Compares lane-wise signed integers, and returns the maximum of
3319 /// each pair.
3320 #[inline]
3321 #[cfg_attr(test, assert_instr(i32x4.max_s))]
3322 #[target_feature(enable = "simd128")]
3323 #[doc(alias("i32x4.max_s"))]
3324 #[stable(feature = "wasm_simd", since = "1.54.0")]
i32x4_max(a: v128, b: v128) -> v1283325 pub fn i32x4_max(a: v128, b: v128) -> v128 {
3326     let a = a.as_i32x4();
3327     let b = b.as_i32x4();
3328     unsafe { simd_select::<simd::i32x4, _>(simd_gt(a, b), a, b).v128() }
3329 }
3330 
3331 /// Compares lane-wise unsigned integers, and returns the maximum of
3332 /// each pair.
3333 #[inline]
3334 #[cfg_attr(test, assert_instr(i32x4.max_u))]
3335 #[target_feature(enable = "simd128")]
3336 #[doc(alias("i32x4.max_u"))]
3337 #[stable(feature = "wasm_simd", since = "1.54.0")]
u32x4_max(a: v128, b: v128) -> v1283338 pub fn u32x4_max(a: v128, b: v128) -> v128 {
3339     let a = a.as_u32x4();
3340     let b = b.as_u32x4();
3341     unsafe { simd_select::<simd::i32x4, _>(simd_gt(a, b), a, b).v128() }
3342 }
3343 
3344 /// Lane-wise multiply signed 16-bit integers in the two input vectors and add
3345 /// adjacent pairs of the full 32-bit results.
3346 #[inline]
3347 #[cfg_attr(test, assert_instr(i32x4.dot_i16x8_s))]
3348 #[target_feature(enable = "simd128")]
3349 #[doc(alias("i32x4.dot_i16x8_s"))]
3350 #[stable(feature = "wasm_simd", since = "1.54.0")]
i32x4_dot_i16x8(a: v128, b: v128) -> v1283351 pub fn i32x4_dot_i16x8(a: v128, b: v128) -> v128 {
3352     unsafe { llvm_i32x4_dot_i16x8_s(a.as_i16x8(), b.as_i16x8()).v128() }
3353 }
3354 
3355 /// Lane-wise integer extended multiplication producing twice wider result than
3356 /// the inputs.
3357 ///
3358 /// Equivalent of `i32x4_mul(i32x4_extend_low_i16x8_s(a), i32x4_extend_low_i16x8_s(b))`
3359 #[inline]
3360 #[cfg_attr(test, assert_instr(i32x4.extmul_low_i16x8_s))]
3361 #[target_feature(enable = "simd128")]
3362 #[doc(alias("i32x4.extmul_low_i16x8_s"))]
3363 #[stable(feature = "wasm_simd", since = "1.54.0")]
i32x4_extmul_low_i16x8(a: v128, b: v128) -> v1283364 pub fn i32x4_extmul_low_i16x8(a: v128, b: v128) -> v128 {
3365     unsafe {
3366         let lhs = simd_cast::<simd::i16x4, simd::i32x4>(simd_shuffle4!(
3367             a.as_i16x8(),
3368             a.as_i16x8(),
3369             [0, 1, 2, 3]
3370         ));
3371         let rhs = simd_cast::<simd::i16x4, simd::i32x4>(simd_shuffle4!(
3372             b.as_i16x8(),
3373             b.as_i16x8(),
3374             [0, 1, 2, 3]
3375         ));
3376         simd_mul(lhs, rhs).v128()
3377     }
3378 }
3379 
3380 /// Lane-wise integer extended multiplication producing twice wider result than
3381 /// the inputs.
3382 ///
3383 /// Equivalent of `i32x4_mul(i32x4_extend_high_i16x8_s(a), i32x4_extend_high_i16x8_s(b))`
3384 #[inline]
3385 #[cfg_attr(test, assert_instr(i32x4.extmul_high_i16x8_s))]
3386 #[target_feature(enable = "simd128")]
3387 #[doc(alias("i32x4.extmul_high_i16x8_s"))]
3388 #[stable(feature = "wasm_simd", since = "1.54.0")]
i32x4_extmul_high_i16x8(a: v128, b: v128) -> v1283389 pub fn i32x4_extmul_high_i16x8(a: v128, b: v128) -> v128 {
3390     unsafe {
3391         let lhs = simd_cast::<simd::i16x4, simd::i32x4>(simd_shuffle4!(
3392             a.as_i16x8(),
3393             a.as_i16x8(),
3394             [4, 5, 6, 7]
3395         ));
3396         let rhs = simd_cast::<simd::i16x4, simd::i32x4>(simd_shuffle4!(
3397             b.as_i16x8(),
3398             b.as_i16x8(),
3399             [4, 5, 6, 7]
3400         ));
3401         simd_mul(lhs, rhs).v128()
3402     }
3403 }
3404 
3405 /// Lane-wise integer extended multiplication producing twice wider result than
3406 /// the inputs.
3407 ///
3408 /// Equivalent of `i32x4_mul(i32x4_extend_low_u16x8(a), i32x4_extend_low_u16x8(b))`
3409 #[inline]
3410 #[cfg_attr(test, assert_instr(i32x4.extmul_low_i16x8_u))]
3411 #[target_feature(enable = "simd128")]
3412 #[doc(alias("i32x4.extmul_low_i16x8_u"))]
3413 #[stable(feature = "wasm_simd", since = "1.54.0")]
i32x4_extmul_low_u16x8(a: v128, b: v128) -> v1283414 pub fn i32x4_extmul_low_u16x8(a: v128, b: v128) -> v128 {
3415     unsafe {
3416         let lhs = simd_cast::<simd::u16x4, simd::u32x4>(simd_shuffle4!(
3417             a.as_u16x8(),
3418             a.as_u16x8(),
3419             [0, 1, 2, 3]
3420         ));
3421         let rhs = simd_cast::<simd::u16x4, simd::u32x4>(simd_shuffle4!(
3422             b.as_u16x8(),
3423             b.as_u16x8(),
3424             [0, 1, 2, 3]
3425         ));
3426         simd_mul(lhs, rhs).v128()
3427     }
3428 }
3429 
3430 #[stable(feature = "wasm_simd", since = "1.54.0")]
3431 pub use i32x4_extmul_low_u16x8 as u32x4_extmul_low_u16x8;
3432 
3433 /// Lane-wise integer extended multiplication producing twice wider result than
3434 /// the inputs.
3435 ///
3436 /// Equivalent of `i32x4_mul(i32x4_extend_high_u16x8(a), i32x4_extend_high_u16x8(b))`
3437 #[inline]
3438 #[cfg_attr(test, assert_instr(i32x4.extmul_high_i16x8_u))]
3439 #[target_feature(enable = "simd128")]
3440 #[doc(alias("i32x4.extmul_high_i16x8_u"))]
3441 #[stable(feature = "wasm_simd", since = "1.54.0")]
i32x4_extmul_high_u16x8(a: v128, b: v128) -> v1283442 pub fn i32x4_extmul_high_u16x8(a: v128, b: v128) -> v128 {
3443     unsafe {
3444         let lhs = simd_cast::<simd::u16x4, simd::u32x4>(simd_shuffle4!(
3445             a.as_u16x8(),
3446             a.as_u16x8(),
3447             [4, 5, 6, 7]
3448         ));
3449         let rhs = simd_cast::<simd::u16x4, simd::u32x4>(simd_shuffle4!(
3450             b.as_u16x8(),
3451             b.as_u16x8(),
3452             [4, 5, 6, 7]
3453         ));
3454         simd_mul(lhs, rhs).v128()
3455     }
3456 }
3457 
3458 #[stable(feature = "wasm_simd", since = "1.54.0")]
3459 pub use i32x4_extmul_high_u16x8 as u32x4_extmul_high_u16x8;
3460 
3461 /// Lane-wise wrapping absolute value.
3462 #[inline]
3463 // #[cfg_attr(test, assert_instr(i64x2.abs))] // FIXME llvm
3464 #[target_feature(enable = "simd128")]
3465 #[doc(alias("i64x2.abs"))]
3466 #[stable(feature = "wasm_simd", since = "1.54.0")]
i64x2_abs(a: v128) -> v1283467 pub fn i64x2_abs(a: v128) -> v128 {
3468     let a = a.as_i64x2();
3469     let zero = simd::i64x2::splat(0);
3470     unsafe {
3471         simd_select::<simd::m64x2, simd::i64x2>(simd_lt(a, zero), simd_sub(zero, a), a).v128()
3472     }
3473 }
3474 
3475 /// Negates a 128-bit vectors intepreted as two 64-bit signed integers
3476 #[inline]
3477 #[cfg_attr(test, assert_instr(i64x2.neg))]
3478 #[target_feature(enable = "simd128")]
3479 #[doc(alias("i64x2.neg"))]
3480 #[stable(feature = "wasm_simd", since = "1.54.0")]
i64x2_neg(a: v128) -> v1283481 pub fn i64x2_neg(a: v128) -> v128 {
3482     unsafe { simd_mul(a.as_i64x2(), simd::i64x2::splat(-1)).v128() }
3483 }
3484 
3485 /// Returns true if all lanes are non-zero, false otherwise.
3486 #[inline]
3487 #[cfg_attr(test, assert_instr(i64x2.all_true))]
3488 #[target_feature(enable = "simd128")]
3489 #[doc(alias("i64x2.all_true"))]
3490 #[stable(feature = "wasm_simd", since = "1.54.0")]
i64x2_all_true(a: v128) -> bool3491 pub fn i64x2_all_true(a: v128) -> bool {
3492     unsafe { llvm_i64x2_all_true(a.as_i64x2()) != 0 }
3493 }
3494 
3495 #[stable(feature = "wasm_simd", since = "1.54.0")]
3496 pub use i64x2_all_true as u64x2_all_true;
3497 
3498 /// Extracts the high bit for each lane in `a` and produce a scalar mask with
3499 /// all bits concatenated.
3500 #[inline]
3501 #[cfg_attr(test, assert_instr(i64x2.bitmask))]
3502 #[target_feature(enable = "simd128")]
3503 #[doc(alias("i64x2.bitmask"))]
3504 #[stable(feature = "wasm_simd", since = "1.54.0")]
i64x2_bitmask(a: v128) -> u83505 pub fn i64x2_bitmask(a: v128) -> u8 {
3506     unsafe { llvm_bitmask_i64x2(a.as_i64x2()) as u8 }
3507 }
3508 
3509 #[stable(feature = "wasm_simd", since = "1.54.0")]
3510 pub use i64x2_bitmask as u64x2_bitmask;
3511 
3512 /// Converts low half of the smaller lane vector to a larger lane
3513 /// vector, sign extended.
3514 #[inline]
3515 #[cfg_attr(test, assert_instr(i64x2.extend_low_i32x4_s))]
3516 #[target_feature(enable = "simd128")]
3517 #[doc(alias("i64x2.extend_low_i32x4_s"))]
3518 #[stable(feature = "wasm_simd", since = "1.54.0")]
i64x2_extend_low_i32x4(a: v128) -> v1283519 pub fn i64x2_extend_low_i32x4(a: v128) -> v128 {
3520     unsafe {
3521         simd_cast::<simd::i32x2, simd::i64x2>(simd_shuffle2!(a.as_i32x4(), a.as_i32x4(), [0, 1]))
3522             .v128()
3523     }
3524 }
3525 
3526 /// Converts high half of the smaller lane vector to a larger lane
3527 /// vector, sign extended.
3528 #[inline]
3529 #[cfg_attr(test, assert_instr(i64x2.extend_high_i32x4_s))]
3530 #[target_feature(enable = "simd128")]
3531 #[doc(alias("i64x2.extend_high_i32x4_s"))]
3532 #[stable(feature = "wasm_simd", since = "1.54.0")]
i64x2_extend_high_i32x4(a: v128) -> v1283533 pub fn i64x2_extend_high_i32x4(a: v128) -> v128 {
3534     unsafe {
3535         simd_cast::<simd::i32x2, simd::i64x2>(simd_shuffle2!(a.as_i32x4(), a.as_i32x4(), [2, 3]))
3536             .v128()
3537     }
3538 }
3539 
3540 /// Converts low half of the smaller lane vector to a larger lane
3541 /// vector, zero extended.
3542 #[inline]
3543 #[cfg_attr(test, assert_instr(i64x2.extend_low_i32x4_u))]
3544 #[target_feature(enable = "simd128")]
3545 #[doc(alias("i64x2.extend_low_i32x4_u"))]
3546 #[stable(feature = "wasm_simd", since = "1.54.0")]
i64x2_extend_low_u32x4(a: v128) -> v1283547 pub fn i64x2_extend_low_u32x4(a: v128) -> v128 {
3548     unsafe {
3549         simd_cast::<simd::u32x2, simd::i64x2>(simd_shuffle2!(a.as_u32x4(), a.as_u32x4(), [0, 1]))
3550             .v128()
3551     }
3552 }
3553 
3554 #[stable(feature = "wasm_simd", since = "1.54.0")]
3555 pub use i64x2_extend_low_u32x4 as u64x2_extend_low_u32x4;
3556 
3557 /// Converts high half of the smaller lane vector to a larger lane
3558 /// vector, zero extended.
3559 #[inline]
3560 #[cfg_attr(test, assert_instr(i64x2.extend_high_i32x4_u))]
3561 #[target_feature(enable = "simd128")]
3562 #[doc(alias("i64x2.extend_high_i32x4_u"))]
3563 #[stable(feature = "wasm_simd", since = "1.54.0")]
i64x2_extend_high_u32x4(a: v128) -> v1283564 pub fn i64x2_extend_high_u32x4(a: v128) -> v128 {
3565     unsafe {
3566         simd_cast::<simd::u32x2, simd::i64x2>(simd_shuffle2!(a.as_u32x4(), a.as_u32x4(), [2, 3]))
3567             .v128()
3568     }
3569 }
3570 
3571 #[stable(feature = "wasm_simd", since = "1.54.0")]
3572 pub use i64x2_extend_high_u32x4 as u64x2_extend_high_u32x4;
3573 
3574 /// Shifts each lane to the left by the specified number of bits.
3575 ///
3576 /// Only the low bits of the shift amount are used if the shift amount is
3577 /// greater than the lane width.
3578 #[inline]
3579 #[cfg_attr(test, assert_instr(i64x2.shl))]
3580 #[target_feature(enable = "simd128")]
3581 #[doc(alias("i64x2.shl"))]
3582 #[stable(feature = "wasm_simd", since = "1.54.0")]
i64x2_shl(a: v128, amt: u32) -> v1283583 pub fn i64x2_shl(a: v128, amt: u32) -> v128 {
3584     unsafe { simd_shl(a.as_i64x2(), simd::i64x2::splat(amt as i64)).v128() }
3585 }
3586 
3587 #[stable(feature = "wasm_simd", since = "1.54.0")]
3588 pub use i64x2_shl as u64x2_shl;
3589 
3590 /// Shifts each lane to the right by the specified number of bits, sign
3591 /// extending.
3592 ///
3593 /// Only the low bits of the shift amount are used if the shift amount is
3594 /// greater than the lane width.
3595 #[inline]
3596 #[cfg_attr(test, assert_instr(i64x2.shr_s))]
3597 #[target_feature(enable = "simd128")]
3598 #[doc(alias("i64x2.shr_s"))]
3599 #[stable(feature = "wasm_simd", since = "1.54.0")]
i64x2_shr(a: v128, amt: u32) -> v1283600 pub fn i64x2_shr(a: v128, amt: u32) -> v128 {
3601     unsafe { simd_shr(a.as_i64x2(), simd::i64x2::splat(amt as i64)).v128() }
3602 }
3603 
3604 /// Shifts each lane to the right by the specified number of bits, shifting in
3605 /// zeros.
3606 ///
3607 /// Only the low bits of the shift amount are used if the shift amount is
3608 /// greater than the lane width.
3609 #[inline]
3610 #[cfg_attr(test, assert_instr(i64x2.shr_u))]
3611 #[target_feature(enable = "simd128")]
3612 #[doc(alias("i64x2.shr_u"))]
3613 #[stable(feature = "wasm_simd", since = "1.54.0")]
u64x2_shr(a: v128, amt: u32) -> v1283614 pub fn u64x2_shr(a: v128, amt: u32) -> v128 {
3615     unsafe { simd_shr(a.as_u64x2(), simd::u64x2::splat(amt as u64)).v128() }
3616 }
3617 
3618 /// Adds two 128-bit vectors as if they were two packed two 64-bit integers.
3619 #[inline]
3620 #[cfg_attr(test, assert_instr(i64x2.add))]
3621 #[target_feature(enable = "simd128")]
3622 #[doc(alias("i64x2.add"))]
3623 #[stable(feature = "wasm_simd", since = "1.54.0")]
i64x2_add(a: v128, b: v128) -> v1283624 pub fn i64x2_add(a: v128, b: v128) -> v128 {
3625     unsafe { simd_add(a.as_i64x2(), b.as_i64x2()).v128() }
3626 }
3627 
3628 #[stable(feature = "wasm_simd", since = "1.54.0")]
3629 pub use i64x2_add as u64x2_add;
3630 
3631 /// Subtracts two 128-bit vectors as if they were two packed two 64-bit integers.
3632 #[inline]
3633 #[cfg_attr(test, assert_instr(i64x2.sub))]
3634 #[target_feature(enable = "simd128")]
3635 #[doc(alias("i64x2.sub"))]
3636 #[stable(feature = "wasm_simd", since = "1.54.0")]
i64x2_sub(a: v128, b: v128) -> v1283637 pub fn i64x2_sub(a: v128, b: v128) -> v128 {
3638     unsafe { simd_sub(a.as_i64x2(), b.as_i64x2()).v128() }
3639 }
3640 
3641 #[stable(feature = "wasm_simd", since = "1.54.0")]
3642 pub use i64x2_sub as u64x2_sub;
3643 
3644 /// Multiplies two 128-bit vectors as if they were two packed two 64-bit integers.
3645 #[inline]
3646 #[cfg_attr(test, assert_instr(i64x2.mul))]
3647 #[target_feature(enable = "simd128")]
3648 #[doc(alias("i64x2.mul"))]
3649 #[stable(feature = "wasm_simd", since = "1.54.0")]
i64x2_mul(a: v128, b: v128) -> v1283650 pub fn i64x2_mul(a: v128, b: v128) -> v128 {
3651     unsafe { simd_mul(a.as_i64x2(), b.as_i64x2()).v128() }
3652 }
3653 
3654 #[stable(feature = "wasm_simd", since = "1.54.0")]
3655 pub use i64x2_mul as u64x2_mul;
3656 
3657 /// Lane-wise integer extended multiplication producing twice wider result than
3658 /// the inputs.
3659 ///
3660 /// Equivalent of `i64x2_mul(i64x2_extend_low_i32x4_s(a), i64x2_extend_low_i32x4_s(b))`
3661 #[inline]
3662 #[cfg_attr(test, assert_instr(i64x2.extmul_low_i32x4_s))]
3663 #[target_feature(enable = "simd128")]
3664 #[doc(alias("i64x2.extmul_low_i32x4_s"))]
3665 #[stable(feature = "wasm_simd", since = "1.54.0")]
i64x2_extmul_low_i32x4(a: v128, b: v128) -> v1283666 pub fn i64x2_extmul_low_i32x4(a: v128, b: v128) -> v128 {
3667     unsafe {
3668         let lhs = simd_cast::<simd::i32x2, simd::i64x2>(simd_shuffle2!(
3669             a.as_i32x4(),
3670             a.as_i32x4(),
3671             [0, 1]
3672         ));
3673         let rhs = simd_cast::<simd::i32x2, simd::i64x2>(simd_shuffle2!(
3674             b.as_i32x4(),
3675             b.as_i32x4(),
3676             [0, 1]
3677         ));
3678         simd_mul(lhs, rhs).v128()
3679     }
3680 }
3681 
3682 /// Lane-wise integer extended multiplication producing twice wider result than
3683 /// the inputs.
3684 ///
3685 /// Equivalent of `i64x2_mul(i64x2_extend_high_i32x4_s(a), i64x2_extend_high_i32x4_s(b))`
3686 #[inline]
3687 #[cfg_attr(test, assert_instr(i64x2.extmul_high_i32x4_s))]
3688 #[target_feature(enable = "simd128")]
3689 #[doc(alias("i64x2.extmul_high_i32x4_s"))]
3690 #[stable(feature = "wasm_simd", since = "1.54.0")]
i64x2_extmul_high_i32x4(a: v128, b: v128) -> v1283691 pub fn i64x2_extmul_high_i32x4(a: v128, b: v128) -> v128 {
3692     unsafe {
3693         let lhs = simd_cast::<simd::i32x2, simd::i64x2>(simd_shuffle2!(
3694             a.as_i32x4(),
3695             a.as_i32x4(),
3696             [2, 3]
3697         ));
3698         let rhs = simd_cast::<simd::i32x2, simd::i64x2>(simd_shuffle2!(
3699             b.as_i32x4(),
3700             b.as_i32x4(),
3701             [2, 3]
3702         ));
3703         simd_mul(lhs, rhs).v128()
3704     }
3705 }
3706 
3707 /// Lane-wise integer extended multiplication producing twice wider result than
3708 /// the inputs.
3709 ///
3710 /// Equivalent of `i64x2_mul(i64x2_extend_low_i32x4_u(a), i64x2_extend_low_i32x4_u(b))`
3711 #[inline]
3712 #[cfg_attr(test, assert_instr(i64x2.extmul_low_i32x4_u))]
3713 #[target_feature(enable = "simd128")]
3714 #[doc(alias("i64x2.extmul_low_i32x4_u"))]
3715 #[stable(feature = "wasm_simd", since = "1.54.0")]
i64x2_extmul_low_u32x4(a: v128, b: v128) -> v1283716 pub fn i64x2_extmul_low_u32x4(a: v128, b: v128) -> v128 {
3717     unsafe {
3718         let lhs = simd_cast::<simd::u32x2, simd::u64x2>(simd_shuffle2!(
3719             a.as_u32x4(),
3720             a.as_u32x4(),
3721             [0, 1]
3722         ));
3723         let rhs = simd_cast::<simd::u32x2, simd::u64x2>(simd_shuffle2!(
3724             b.as_u32x4(),
3725             b.as_u32x4(),
3726             [0, 1]
3727         ));
3728         simd_mul(lhs, rhs).v128()
3729     }
3730 }
3731 
3732 #[stable(feature = "wasm_simd", since = "1.54.0")]
3733 pub use i64x2_extmul_low_u32x4 as u64x2_extmul_low_u32x4;
3734 
3735 /// Lane-wise integer extended multiplication producing twice wider result than
3736 /// the inputs.
3737 ///
3738 /// Equivalent of `i64x2_mul(i64x2_extend_high_i32x4_u(a), i64x2_extend_high_i32x4_u(b))`
3739 #[inline]
3740 #[cfg_attr(test, assert_instr(i64x2.extmul_high_i32x4_u))]
3741 #[target_feature(enable = "simd128")]
3742 #[doc(alias("i64x2.extmul_high_i32x4_u"))]
3743 #[stable(feature = "wasm_simd", since = "1.54.0")]
i64x2_extmul_high_u32x4(a: v128, b: v128) -> v1283744 pub fn i64x2_extmul_high_u32x4(a: v128, b: v128) -> v128 {
3745     unsafe {
3746         let lhs = simd_cast::<simd::u32x2, simd::u64x2>(simd_shuffle2!(
3747             a.as_u32x4(),
3748             a.as_u32x4(),
3749             [2, 3]
3750         ));
3751         let rhs = simd_cast::<simd::u32x2, simd::u64x2>(simd_shuffle2!(
3752             b.as_u32x4(),
3753             b.as_u32x4(),
3754             [2, 3]
3755         ));
3756         simd_mul(lhs, rhs).v128()
3757     }
3758 }
3759 
3760 #[stable(feature = "wasm_simd", since = "1.54.0")]
3761 pub use i64x2_extmul_high_u32x4 as u64x2_extmul_high_u32x4;
3762 
3763 /// Lane-wise rounding to the nearest integral value not smaller than the input.
3764 #[inline]
3765 #[cfg_attr(test, assert_instr(f32x4.ceil))]
3766 #[target_feature(enable = "simd128")]
3767 #[doc(alias("f32x4.ceil"))]
3768 #[stable(feature = "wasm_simd", since = "1.54.0")]
f32x4_ceil(a: v128) -> v1283769 pub fn f32x4_ceil(a: v128) -> v128 {
3770     unsafe { llvm_f32x4_ceil(a.as_f32x4()).v128() }
3771 }
3772 
3773 /// Lane-wise rounding to the nearest integral value not greater than the input.
3774 #[inline]
3775 #[cfg_attr(test, assert_instr(f32x4.floor))]
3776 #[target_feature(enable = "simd128")]
3777 #[doc(alias("f32x4.floor"))]
3778 #[stable(feature = "wasm_simd", since = "1.54.0")]
f32x4_floor(a: v128) -> v1283779 pub fn f32x4_floor(a: v128) -> v128 {
3780     unsafe { llvm_f32x4_floor(a.as_f32x4()).v128() }
3781 }
3782 
3783 /// Lane-wise rounding to the nearest integral value with the magnitude not
3784 /// larger than the input.
3785 #[inline]
3786 #[cfg_attr(test, assert_instr(f32x4.trunc))]
3787 #[target_feature(enable = "simd128")]
3788 #[doc(alias("f32x4.trunc"))]
3789 #[stable(feature = "wasm_simd", since = "1.54.0")]
f32x4_trunc(a: v128) -> v1283790 pub fn f32x4_trunc(a: v128) -> v128 {
3791     unsafe { llvm_f32x4_trunc(a.as_f32x4()).v128() }
3792 }
3793 
3794 /// Lane-wise rounding to the nearest integral value; if two values are equally
3795 /// near, rounds to the even one.
3796 #[inline]
3797 #[cfg_attr(test, assert_instr(f32x4.nearest))]
3798 #[target_feature(enable = "simd128")]
3799 #[doc(alias("f32x4.nearest"))]
3800 #[stable(feature = "wasm_simd", since = "1.54.0")]
f32x4_nearest(a: v128) -> v1283801 pub fn f32x4_nearest(a: v128) -> v128 {
3802     unsafe { llvm_f32x4_nearest(a.as_f32x4()).v128() }
3803 }
3804 
3805 /// Calculates the absolute value of each lane of a 128-bit vector interpreted
3806 /// as four 32-bit floating point numbers.
3807 #[inline]
3808 #[cfg_attr(test, assert_instr(f32x4.abs))]
3809 #[target_feature(enable = "simd128")]
3810 #[doc(alias("f32x4.abs"))]
3811 #[stable(feature = "wasm_simd", since = "1.54.0")]
f32x4_abs(a: v128) -> v1283812 pub fn f32x4_abs(a: v128) -> v128 {
3813     unsafe { llvm_f32x4_abs(a.as_f32x4()).v128() }
3814 }
3815 
3816 /// Negates each lane of a 128-bit vector interpreted as four 32-bit floating
3817 /// point numbers.
3818 #[inline]
3819 #[cfg_attr(test, assert_instr(f32x4.neg))]
3820 #[target_feature(enable = "simd128")]
3821 #[doc(alias("f32x4.neg"))]
3822 #[stable(feature = "wasm_simd", since = "1.54.0")]
f32x4_neg(a: v128) -> v1283823 pub fn f32x4_neg(a: v128) -> v128 {
3824     f32x4_mul(a, f32x4_splat(-1.))
3825 }
3826 
3827 /// Calculates the square root of each lane of a 128-bit vector interpreted as
3828 /// four 32-bit floating point numbers.
3829 #[inline]
3830 #[cfg_attr(test, assert_instr(f32x4.sqrt))]
3831 #[target_feature(enable = "simd128")]
3832 #[doc(alias("f32x4.sqrt"))]
3833 #[stable(feature = "wasm_simd", since = "1.54.0")]
f32x4_sqrt(a: v128) -> v1283834 pub fn f32x4_sqrt(a: v128) -> v128 {
3835     unsafe { llvm_f32x4_sqrt(a.as_f32x4()).v128() }
3836 }
3837 
3838 /// Adds pairwise lanes of two 128-bit vectors interpreted as four 32-bit
3839 /// floating point numbers.
3840 #[inline]
3841 #[cfg_attr(test, assert_instr(f32x4.add))]
3842 #[target_feature(enable = "simd128")]
3843 #[doc(alias("f32x4.add"))]
3844 #[stable(feature = "wasm_simd", since = "1.54.0")]
f32x4_add(a: v128, b: v128) -> v1283845 pub fn f32x4_add(a: v128, b: v128) -> v128 {
3846     unsafe { simd_add(a.as_f32x4(), b.as_f32x4()).v128() }
3847 }
3848 
3849 /// Subtracts pairwise lanes of two 128-bit vectors interpreted as four 32-bit
3850 /// floating point numbers.
3851 #[inline]
3852 #[cfg_attr(test, assert_instr(f32x4.sub))]
3853 #[target_feature(enable = "simd128")]
3854 #[doc(alias("f32x4.sub"))]
3855 #[stable(feature = "wasm_simd", since = "1.54.0")]
f32x4_sub(a: v128, b: v128) -> v1283856 pub fn f32x4_sub(a: v128, b: v128) -> v128 {
3857     unsafe { simd_sub(a.as_f32x4(), b.as_f32x4()).v128() }
3858 }
3859 
3860 /// Multiplies pairwise lanes of two 128-bit vectors interpreted as four 32-bit
3861 /// floating point numbers.
3862 #[inline]
3863 #[cfg_attr(test, assert_instr(f32x4.mul))]
3864 #[target_feature(enable = "simd128")]
3865 #[doc(alias("f32x4.mul"))]
3866 #[stable(feature = "wasm_simd", since = "1.54.0")]
f32x4_mul(a: v128, b: v128) -> v1283867 pub fn f32x4_mul(a: v128, b: v128) -> v128 {
3868     unsafe { simd_mul(a.as_f32x4(), b.as_f32x4()).v128() }
3869 }
3870 
3871 /// Divides pairwise lanes of two 128-bit vectors interpreted as four 32-bit
3872 /// floating point numbers.
3873 #[inline]
3874 #[cfg_attr(test, assert_instr(f32x4.div))]
3875 #[target_feature(enable = "simd128")]
3876 #[doc(alias("f32x4.div"))]
3877 #[stable(feature = "wasm_simd", since = "1.54.0")]
f32x4_div(a: v128, b: v128) -> v1283878 pub fn f32x4_div(a: v128, b: v128) -> v128 {
3879     unsafe { simd_div(a.as_f32x4(), b.as_f32x4()).v128() }
3880 }
3881 
3882 /// Calculates the minimum of pairwise lanes of two 128-bit vectors interpreted
3883 /// as four 32-bit floating point numbers.
3884 #[inline]
3885 #[cfg_attr(test, assert_instr(f32x4.min))]
3886 #[target_feature(enable = "simd128")]
3887 #[doc(alias("f32x4.min"))]
3888 #[stable(feature = "wasm_simd", since = "1.54.0")]
f32x4_min(a: v128, b: v128) -> v1283889 pub fn f32x4_min(a: v128, b: v128) -> v128 {
3890     unsafe { llvm_f32x4_min(a.as_f32x4(), b.as_f32x4()).v128() }
3891 }
3892 
3893 /// Calculates the maximum of pairwise lanes of two 128-bit vectors interpreted
3894 /// as four 32-bit floating point numbers.
3895 #[inline]
3896 #[cfg_attr(test, assert_instr(f32x4.max))]
3897 #[target_feature(enable = "simd128")]
3898 #[doc(alias("f32x4.max"))]
3899 #[stable(feature = "wasm_simd", since = "1.54.0")]
f32x4_max(a: v128, b: v128) -> v1283900 pub fn f32x4_max(a: v128, b: v128) -> v128 {
3901     unsafe { llvm_f32x4_max(a.as_f32x4(), b.as_f32x4()).v128() }
3902 }
3903 
3904 /// Lane-wise minimum value, defined as `b < a ? b : a`
3905 #[inline]
3906 #[cfg_attr(test, assert_instr(f32x4.pmin))]
3907 #[target_feature(enable = "simd128")]
3908 #[doc(alias("f32x4.pmin"))]
3909 #[stable(feature = "wasm_simd", since = "1.54.0")]
f32x4_pmin(a: v128, b: v128) -> v1283910 pub fn f32x4_pmin(a: v128, b: v128) -> v128 {
3911     unsafe {
3912         simd_select::<simd::m32x4, simd::f32x4>(
3913             simd_lt(b.as_f32x4(), a.as_f32x4()),
3914             b.as_f32x4(),
3915             a.as_f32x4(),
3916         )
3917         .v128()
3918     }
3919 }
3920 
3921 /// Lane-wise maximum value, defined as `a < b ? b : a`
3922 #[inline]
3923 #[cfg_attr(test, assert_instr(f32x4.pmax))]
3924 #[target_feature(enable = "simd128")]
3925 #[doc(alias("f32x4.pmax"))]
3926 #[stable(feature = "wasm_simd", since = "1.54.0")]
f32x4_pmax(a: v128, b: v128) -> v1283927 pub fn f32x4_pmax(a: v128, b: v128) -> v128 {
3928     unsafe {
3929         simd_select::<simd::m32x4, simd::f32x4>(
3930             simd_lt(a.as_f32x4(), b.as_f32x4()),
3931             b.as_f32x4(),
3932             a.as_f32x4(),
3933         )
3934         .v128()
3935     }
3936 }
3937 
3938 /// Lane-wise rounding to the nearest integral value not smaller than the input.
3939 #[inline]
3940 #[cfg_attr(test, assert_instr(f64x2.ceil))]
3941 #[target_feature(enable = "simd128")]
3942 #[doc(alias("f64x2.ceil"))]
3943 #[stable(feature = "wasm_simd", since = "1.54.0")]
f64x2_ceil(a: v128) -> v1283944 pub fn f64x2_ceil(a: v128) -> v128 {
3945     unsafe { llvm_f64x2_ceil(a.as_f64x2()).v128() }
3946 }
3947 
3948 /// Lane-wise rounding to the nearest integral value not greater than the input.
3949 #[inline]
3950 #[cfg_attr(test, assert_instr(f64x2.floor))]
3951 #[target_feature(enable = "simd128")]
3952 #[doc(alias("f64x2.floor"))]
3953 #[stable(feature = "wasm_simd", since = "1.54.0")]
f64x2_floor(a: v128) -> v1283954 pub fn f64x2_floor(a: v128) -> v128 {
3955     unsafe { llvm_f64x2_floor(a.as_f64x2()).v128() }
3956 }
3957 
3958 /// Lane-wise rounding to the nearest integral value with the magnitude not
3959 /// larger than the input.
3960 #[inline]
3961 #[cfg_attr(test, assert_instr(f64x2.trunc))]
3962 #[target_feature(enable = "simd128")]
3963 #[doc(alias("f64x2.trunc"))]
3964 #[stable(feature = "wasm_simd", since = "1.54.0")]
f64x2_trunc(a: v128) -> v1283965 pub fn f64x2_trunc(a: v128) -> v128 {
3966     unsafe { llvm_f64x2_trunc(a.as_f64x2()).v128() }
3967 }
3968 
3969 /// Lane-wise rounding to the nearest integral value; if two values are equally
3970 /// near, rounds to the even one.
3971 #[inline]
3972 #[cfg_attr(test, assert_instr(f64x2.nearest))]
3973 #[target_feature(enable = "simd128")]
3974 #[doc(alias("f64x2.nearest"))]
3975 #[stable(feature = "wasm_simd", since = "1.54.0")]
f64x2_nearest(a: v128) -> v1283976 pub fn f64x2_nearest(a: v128) -> v128 {
3977     unsafe { llvm_f64x2_nearest(a.as_f64x2()).v128() }
3978 }
3979 
3980 /// Calculates the absolute value of each lane of a 128-bit vector interpreted
3981 /// as two 64-bit floating point numbers.
3982 #[inline]
3983 #[cfg_attr(test, assert_instr(f64x2.abs))]
3984 #[target_feature(enable = "simd128")]
3985 #[doc(alias("f64x2.abs"))]
3986 #[stable(feature = "wasm_simd", since = "1.54.0")]
f64x2_abs(a: v128) -> v1283987 pub fn f64x2_abs(a: v128) -> v128 {
3988     unsafe { llvm_f64x2_abs(a.as_f64x2()).v128() }
3989 }
3990 
3991 /// Negates each lane of a 128-bit vector interpreted as two 64-bit floating
3992 /// point numbers.
3993 #[inline]
3994 #[cfg_attr(test, assert_instr(f64x2.neg))]
3995 #[target_feature(enable = "simd128")]
3996 #[doc(alias("f64x2.neg"))]
3997 #[stable(feature = "wasm_simd", since = "1.54.0")]
f64x2_neg(a: v128) -> v1283998 pub fn f64x2_neg(a: v128) -> v128 {
3999     f64x2_mul(a, f64x2_splat(-1.0))
4000 }
4001 
4002 /// Calculates the square root of each lane of a 128-bit vector interpreted as
4003 /// two 64-bit floating point numbers.
4004 #[inline]
4005 #[cfg_attr(test, assert_instr(f64x2.sqrt))]
4006 #[target_feature(enable = "simd128")]
4007 #[doc(alias("f64x2.sqrt"))]
4008 #[stable(feature = "wasm_simd", since = "1.54.0")]
f64x2_sqrt(a: v128) -> v1284009 pub fn f64x2_sqrt(a: v128) -> v128 {
4010     unsafe { llvm_f64x2_sqrt(a.as_f64x2()).v128() }
4011 }
4012 
4013 /// Adds pairwise lanes of two 128-bit vectors interpreted as two 64-bit
4014 /// floating point numbers.
4015 #[inline]
4016 #[cfg_attr(test, assert_instr(f64x2.add))]
4017 #[target_feature(enable = "simd128")]
4018 #[doc(alias("f64x2.add"))]
4019 #[stable(feature = "wasm_simd", since = "1.54.0")]
f64x2_add(a: v128, b: v128) -> v1284020 pub fn f64x2_add(a: v128, b: v128) -> v128 {
4021     unsafe { simd_add(a.as_f64x2(), b.as_f64x2()).v128() }
4022 }
4023 
4024 /// Subtracts pairwise lanes of two 128-bit vectors interpreted as two 64-bit
4025 /// floating point numbers.
4026 #[inline]
4027 #[cfg_attr(test, assert_instr(f64x2.sub))]
4028 #[target_feature(enable = "simd128")]
4029 #[doc(alias("f64x2.sub"))]
4030 #[stable(feature = "wasm_simd", since = "1.54.0")]
f64x2_sub(a: v128, b: v128) -> v1284031 pub fn f64x2_sub(a: v128, b: v128) -> v128 {
4032     unsafe { simd_sub(a.as_f64x2(), b.as_f64x2()).v128() }
4033 }
4034 
4035 /// Multiplies pairwise lanes of two 128-bit vectors interpreted as two 64-bit
4036 /// floating point numbers.
4037 #[inline]
4038 #[cfg_attr(test, assert_instr(f64x2.mul))]
4039 #[target_feature(enable = "simd128")]
4040 #[doc(alias("f64x2.mul"))]
4041 #[stable(feature = "wasm_simd", since = "1.54.0")]
f64x2_mul(a: v128, b: v128) -> v1284042 pub fn f64x2_mul(a: v128, b: v128) -> v128 {
4043     unsafe { simd_mul(a.as_f64x2(), b.as_f64x2()).v128() }
4044 }
4045 
4046 /// Divides pairwise lanes of two 128-bit vectors interpreted as two 64-bit
4047 /// floating point numbers.
4048 #[inline]
4049 #[cfg_attr(test, assert_instr(f64x2.div))]
4050 #[target_feature(enable = "simd128")]
4051 #[doc(alias("f64x2.div"))]
4052 #[stable(feature = "wasm_simd", since = "1.54.0")]
f64x2_div(a: v128, b: v128) -> v1284053 pub fn f64x2_div(a: v128, b: v128) -> v128 {
4054     unsafe { simd_div(a.as_f64x2(), b.as_f64x2()).v128() }
4055 }
4056 
4057 /// Calculates the minimum of pairwise lanes of two 128-bit vectors interpreted
4058 /// as two 64-bit floating point numbers.
4059 #[inline]
4060 #[cfg_attr(test, assert_instr(f64x2.min))]
4061 #[target_feature(enable = "simd128")]
4062 #[doc(alias("f64x2.min"))]
4063 #[stable(feature = "wasm_simd", since = "1.54.0")]
f64x2_min(a: v128, b: v128) -> v1284064 pub fn f64x2_min(a: v128, b: v128) -> v128 {
4065     unsafe { llvm_f64x2_min(a.as_f64x2(), b.as_f64x2()).v128() }
4066 }
4067 
4068 /// Calculates the maximum of pairwise lanes of two 128-bit vectors interpreted
4069 /// as two 64-bit floating point numbers.
4070 #[inline]
4071 #[cfg_attr(test, assert_instr(f64x2.max))]
4072 #[target_feature(enable = "simd128")]
4073 #[doc(alias("f64x2.max"))]
4074 #[stable(feature = "wasm_simd", since = "1.54.0")]
f64x2_max(a: v128, b: v128) -> v1284075 pub fn f64x2_max(a: v128, b: v128) -> v128 {
4076     unsafe { llvm_f64x2_max(a.as_f64x2(), b.as_f64x2()).v128() }
4077 }
4078 
4079 /// Lane-wise minimum value, defined as `b < a ? b : a`
4080 #[inline]
4081 #[cfg_attr(test, assert_instr(f64x2.pmin))]
4082 #[target_feature(enable = "simd128")]
4083 #[doc(alias("f64x2.pmin"))]
4084 #[stable(feature = "wasm_simd", since = "1.54.0")]
f64x2_pmin(a: v128, b: v128) -> v1284085 pub fn f64x2_pmin(a: v128, b: v128) -> v128 {
4086     unsafe {
4087         simd_select::<simd::m64x2, simd::f64x2>(
4088             simd_lt(b.as_f64x2(), a.as_f64x2()),
4089             b.as_f64x2(),
4090             a.as_f64x2(),
4091         )
4092         .v128()
4093     }
4094 }
4095 
4096 /// Lane-wise maximum value, defined as `a < b ? b : a`
4097 #[inline]
4098 #[cfg_attr(test, assert_instr(f64x2.pmax))]
4099 #[target_feature(enable = "simd128")]
4100 #[doc(alias("f64x2.pmax"))]
4101 #[stable(feature = "wasm_simd", since = "1.54.0")]
f64x2_pmax(a: v128, b: v128) -> v1284102 pub fn f64x2_pmax(a: v128, b: v128) -> v128 {
4103     unsafe {
4104         simd_select::<simd::m64x2, simd::f64x2>(
4105             simd_lt(a.as_f64x2(), b.as_f64x2()),
4106             b.as_f64x2(),
4107             a.as_f64x2(),
4108         )
4109         .v128()
4110     }
4111 }
4112 
4113 /// Converts a 128-bit vector interpreted as four 32-bit floating point numbers
4114 /// into a 128-bit vector of four 32-bit signed integers.
4115 ///
4116 /// NaN is converted to 0 and if it's out of bounds it becomes the nearest
4117 /// representable intger.
4118 #[inline]
4119 #[cfg_attr(test, assert_instr(i32x4.trunc_sat_f32x4_s))]
4120 #[target_feature(enable = "simd128")]
4121 #[doc(alias("i32x4.trunc_sat_f32x4_s"))]
4122 #[stable(feature = "wasm_simd", since = "1.54.0")]
i32x4_trunc_sat_f32x4(a: v128) -> v1284123 pub fn i32x4_trunc_sat_f32x4(a: v128) -> v128 {
4124     unsafe { llvm_i32x4_trunc_sat_f32x4_s(a.as_f32x4()).v128() }
4125 }
4126 
4127 /// Converts a 128-bit vector interpreted as four 32-bit floating point numbers
4128 /// into a 128-bit vector of four 32-bit unsigned integers.
4129 ///
4130 /// NaN is converted to 0 and if it's out of bounds it becomes the nearest
4131 /// representable intger.
4132 #[inline]
4133 #[cfg_attr(test, assert_instr(i32x4.trunc_sat_f32x4_u))]
4134 #[target_feature(enable = "simd128")]
4135 #[doc(alias("i32x4.trunc_sat_f32x4_u"))]
4136 #[stable(feature = "wasm_simd", since = "1.54.0")]
u32x4_trunc_sat_f32x4(a: v128) -> v1284137 pub fn u32x4_trunc_sat_f32x4(a: v128) -> v128 {
4138     unsafe { llvm_i32x4_trunc_sat_f32x4_u(a.as_f32x4()).v128() }
4139 }
4140 
4141 /// Converts a 128-bit vector interpreted as four 32-bit signed integers into a
4142 /// 128-bit vector of four 32-bit floating point numbers.
4143 #[inline]
4144 #[cfg_attr(test, assert_instr(f32x4.convert_i32x4_s))]
4145 #[target_feature(enable = "simd128")]
4146 #[doc(alias("f32x4.convert_i32x4_s"))]
4147 #[stable(feature = "wasm_simd", since = "1.54.0")]
f32x4_convert_i32x4(a: v128) -> v1284148 pub fn f32x4_convert_i32x4(a: v128) -> v128 {
4149     unsafe { simd_cast::<_, simd::f32x4>(a.as_i32x4()).v128() }
4150 }
4151 
4152 /// Converts a 128-bit vector interpreted as four 32-bit unsigned integers into a
4153 /// 128-bit vector of four 32-bit floating point numbers.
4154 #[inline]
4155 #[cfg_attr(test, assert_instr(f32x4.convert_i32x4_u))]
4156 #[target_feature(enable = "simd128")]
4157 #[doc(alias("f32x4.convert_i32x4_u"))]
4158 #[stable(feature = "wasm_simd", since = "1.54.0")]
f32x4_convert_u32x4(a: v128) -> v1284159 pub fn f32x4_convert_u32x4(a: v128) -> v128 {
4160     unsafe { simd_cast::<_, simd::f32x4>(a.as_u32x4()).v128() }
4161 }
4162 
4163 /// Saturating conversion of the two double-precision floating point lanes to
4164 /// two lower integer lanes using the IEEE `convertToIntegerTowardZero`
4165 /// function.
4166 ///
4167 /// The two higher lanes of the result are initialized to zero. If any input
4168 /// lane is a NaN, the resulting lane is 0. If the rounded integer value of a
4169 /// lane is outside the range of the destination type, the result is saturated
4170 /// to the nearest representable integer value.
4171 #[inline]
4172 #[cfg_attr(test, assert_instr(i32x4.trunc_sat_f64x2_s_zero))]
4173 #[target_feature(enable = "simd128")]
4174 #[doc(alias("i32x4.trunc_sat_f64x2_s_zero"))]
4175 #[stable(feature = "wasm_simd", since = "1.54.0")]
i32x4_trunc_sat_f64x2_zero(a: v128) -> v1284176 pub fn i32x4_trunc_sat_f64x2_zero(a: v128) -> v128 {
4177     let ret: simd::i32x4 = unsafe {
4178         simd_shuffle4!(
4179             llvm_i32x2_trunc_sat_f64x2_s(a.as_f64x2()),
4180             simd::i32x2::splat(0),
4181             [0, 1, 2, 3],
4182         )
4183     };
4184     ret.v128()
4185 }
4186 
4187 /// Saturating conversion of the two double-precision floating point lanes to
4188 /// two lower integer lanes using the IEEE `convertToIntegerTowardZero`
4189 /// function.
4190 ///
4191 /// The two higher lanes of the result are initialized to zero. If any input
4192 /// lane is a NaN, the resulting lane is 0. If the rounded integer value of a
4193 /// lane is outside the range of the destination type, the result is saturated
4194 /// to the nearest representable integer value.
4195 #[inline]
4196 #[cfg_attr(test, assert_instr(i32x4.trunc_sat_f64x2_u_zero))]
4197 #[target_feature(enable = "simd128")]
4198 #[doc(alias("i32x4.trunc_sat_f64x2_u_zero"))]
4199 #[stable(feature = "wasm_simd", since = "1.54.0")]
u32x4_trunc_sat_f64x2_zero(a: v128) -> v1284200 pub fn u32x4_trunc_sat_f64x2_zero(a: v128) -> v128 {
4201     let ret: simd::i32x4 = unsafe {
4202         simd_shuffle4!(
4203             llvm_i32x2_trunc_sat_f64x2_u(a.as_f64x2()),
4204             simd::i32x2::splat(0),
4205             [0, 1, 2, 3],
4206         )
4207     };
4208     ret.v128()
4209 }
4210 
4211 /// Lane-wise conversion from integer to floating point.
4212 #[inline]
4213 #[cfg_attr(test, assert_instr(f64x2.convert_low_i32x4_s))]
4214 #[target_feature(enable = "simd128")]
4215 #[doc(alias("f64x2.convert_low_i32x4_s"))]
4216 #[stable(feature = "wasm_simd", since = "1.54.0")]
f64x2_convert_low_i32x4(a: v128) -> v1284217 pub fn f64x2_convert_low_i32x4(a: v128) -> v128 {
4218     unsafe {
4219         simd_cast::<simd::i32x2, simd::f64x2>(simd_shuffle2!(a.as_i32x4(), a.as_i32x4(), [0, 1],))
4220             .v128()
4221     }
4222 }
4223 
4224 /// Lane-wise conversion from integer to floating point.
4225 #[inline]
4226 #[cfg_attr(test, assert_instr(f64x2.convert_low_i32x4_u))]
4227 #[target_feature(enable = "simd128")]
4228 #[doc(alias("f64x2.convert_low_i32x4_u"))]
4229 #[stable(feature = "wasm_simd", since = "1.54.0")]
f64x2_convert_low_u32x4(a: v128) -> v1284230 pub fn f64x2_convert_low_u32x4(a: v128) -> v128 {
4231     unsafe {
4232         simd_cast::<simd::u32x2, simd::f64x2>(simd_shuffle2!(a.as_u32x4(), a.as_u32x4(), [0, 1],))
4233             .v128()
4234     }
4235 }
4236 
4237 /// Conversion of the two double-precision floating point lanes to two lower
4238 /// single-precision lanes of the result. The two higher lanes of the result are
4239 /// initialized to zero. If the conversion result is not representable as a
4240 /// single-precision floating point number, it is rounded to the nearest-even
4241 /// representable number.
4242 #[inline]
4243 #[cfg_attr(test, assert_instr(f32x4.demote_f64x2_zero))]
4244 #[target_feature(enable = "simd128")]
4245 #[doc(alias("f32x4.demote_f64x2_zero"))]
4246 #[stable(feature = "wasm_simd", since = "1.54.0")]
f32x4_demote_f64x2_zero(a: v128) -> v1284247 pub fn f32x4_demote_f64x2_zero(a: v128) -> v128 {
4248     unsafe {
4249         simd_cast::<simd::f64x4, simd::f32x4>(simd_shuffle4!(
4250             a.as_f64x2(),
4251             simd::f64x2::splat(0.0),
4252             [0, 1, 2, 3]
4253         ))
4254         .v128()
4255     }
4256 }
4257 
4258 /// Conversion of the two lower single-precision floating point lanes to the two
4259 /// double-precision lanes of the result.
4260 #[inline]
4261 #[cfg_attr(test, assert_instr(f64x2.promote_low_f32x4))]
4262 #[target_feature(enable = "simd128")]
4263 #[doc(alias("f32x4.promote_low_f32x4"))]
4264 #[stable(feature = "wasm_simd", since = "1.54.0")]
f64x2_promote_low_f32x4(a: v128) -> v1284265 pub fn f64x2_promote_low_f32x4(a: v128) -> v128 {
4266     unsafe {
4267         simd_cast::<simd::f32x2, simd::f64x2>(simd_shuffle2!(a.as_f32x4(), a.as_f32x4(), [0, 1]))
4268             .v128()
4269     }
4270 }
4271 
4272 #[cfg(test)]
4273 pub mod tests {
4274     use super::*;
4275     use core::ops::{Add, Div, Mul, Neg, Sub};
4276     use std;
4277     use std::fmt::Debug;
4278     use std::mem::transmute;
4279     use std::num::Wrapping;
4280     use std::prelude::v1::*;
4281 
compare_bytes(a: v128, b: v128)4282     fn compare_bytes(a: v128, b: v128) {
4283         let a: [u8; 16] = unsafe { transmute(a) };
4284         let b: [u8; 16] = unsafe { transmute(b) };
4285         assert_eq!(a, b);
4286     }
4287 
4288     #[test]
test_load()4289     fn test_load() {
4290         unsafe {
4291             let arr: [i32; 4] = [0, 1, 2, 3];
4292             let vec = v128_load(arr.as_ptr() as *const v128);
4293             compare_bytes(vec, i32x4(0, 1, 2, 3));
4294         }
4295     }
4296 
4297     #[test]
test_load_extend()4298     fn test_load_extend() {
4299         unsafe {
4300             let arr: [i8; 8] = [-3, -2, -1, 0, 1, 2, 3, 4];
4301             let vec = i16x8_load_extend_i8x8(arr.as_ptr());
4302             compare_bytes(vec, i16x8(-3, -2, -1, 0, 1, 2, 3, 4));
4303             let vec = i16x8_load_extend_u8x8(arr.as_ptr() as *const u8);
4304             compare_bytes(vec, i16x8(253, 254, 255, 0, 1, 2, 3, 4));
4305 
4306             let arr: [i16; 4] = [-1, 0, 1, 2];
4307             let vec = i32x4_load_extend_i16x4(arr.as_ptr());
4308             compare_bytes(vec, i32x4(-1, 0, 1, 2));
4309             let vec = i32x4_load_extend_u16x4(arr.as_ptr() as *const u16);
4310             compare_bytes(vec, i32x4(65535, 0, 1, 2));
4311 
4312             let arr: [i32; 2] = [-1, 1];
4313             let vec = i64x2_load_extend_i32x2(arr.as_ptr());
4314             compare_bytes(vec, i64x2(-1, 1));
4315             let vec = i64x2_load_extend_u32x2(arr.as_ptr() as *const u32);
4316             compare_bytes(vec, i64x2(u32::max_value().into(), 1));
4317         }
4318     }
4319 
4320     #[test]
test_load_splat()4321     fn test_load_splat() {
4322         unsafe {
4323             compare_bytes(v128_load8_splat(&8), i8x16_splat(8));
4324             compare_bytes(v128_load16_splat(&9), i16x8_splat(9));
4325             compare_bytes(v128_load32_splat(&10), i32x4_splat(10));
4326             compare_bytes(v128_load64_splat(&11), i64x2_splat(11));
4327         }
4328     }
4329 
4330     #[test]
test_load_zero()4331     fn test_load_zero() {
4332         unsafe {
4333             compare_bytes(v128_load32_zero(&10), i32x4(10, 0, 0, 0));
4334             compare_bytes(v128_load64_zero(&11), i64x2(11, 0));
4335         }
4336     }
4337 
4338     #[test]
test_store()4339     fn test_store() {
4340         unsafe {
4341             let mut spot = i8x16_splat(0);
4342             v128_store(&mut spot, i8x16_splat(1));
4343             compare_bytes(spot, i8x16_splat(1));
4344         }
4345     }
4346 
4347     #[test]
test_load_lane()4348     fn test_load_lane() {
4349         unsafe {
4350             let zero = i8x16_splat(0);
4351             compare_bytes(
4352                 v128_load8_lane::<2>(zero, &1),
4353                 i8x16_replace_lane::<2>(zero, 1),
4354             );
4355 
4356             compare_bytes(
4357                 v128_load16_lane::<2>(zero, &1),
4358                 i16x8_replace_lane::<2>(zero, 1),
4359             );
4360 
4361             compare_bytes(
4362                 v128_load32_lane::<2>(zero, &1),
4363                 i32x4_replace_lane::<2>(zero, 1),
4364             );
4365 
4366             compare_bytes(
4367                 v128_load64_lane::<1>(zero, &1),
4368                 i64x2_replace_lane::<1>(zero, 1),
4369             );
4370         }
4371     }
4372 
4373     #[test]
test_store_lane()4374     fn test_store_lane() {
4375         unsafe {
4376             let mut spot = 0;
4377             let zero = i8x16_splat(0);
4378             v128_store8_lane::<5>(i8x16_replace_lane::<5>(zero, 7), &mut spot);
4379             assert_eq!(spot, 7);
4380 
4381             let mut spot = 0;
4382             v128_store16_lane::<5>(i16x8_replace_lane::<5>(zero, 7), &mut spot);
4383             assert_eq!(spot, 7);
4384 
4385             let mut spot = 0;
4386             v128_store32_lane::<3>(i32x4_replace_lane::<3>(zero, 7), &mut spot);
4387             assert_eq!(spot, 7);
4388 
4389             let mut spot = 0;
4390             v128_store64_lane::<0>(i64x2_replace_lane::<0>(zero, 7), &mut spot);
4391             assert_eq!(spot, 7);
4392         }
4393     }
4394 
4395     #[test]
test_i8x16()4396     fn test_i8x16() {
4397         const A: v128 = super::i8x16(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
4398         compare_bytes(A, A);
4399 
4400         const _: v128 = i16x8(0, 1, 2, 3, 4, 5, 6, 7);
4401         const _: v128 = i32x4(0, 1, 2, 3);
4402         const _: v128 = i64x2(0, 1);
4403         const _: v128 = f32x4(0., 1., 2., 3.);
4404         const _: v128 = f64x2(0., 1.);
4405 
4406         let bytes: [i16; 8] = unsafe { mem::transmute(i16x8(-1, -2, -3, -4, -5, -6, -7, -8)) };
4407         assert_eq!(bytes, [-1, -2, -3, -4, -5, -6, -7, -8]);
4408         let bytes: [i8; 16] = unsafe {
4409             mem::transmute(i8x16(
4410                 -1, -2, -3, -4, -5, -6, -7, -8, -9, -10, -11, -12, -13, -14, -15, -16,
4411             ))
4412         };
4413         assert_eq!(
4414             bytes,
4415             [-1, -2, -3, -4, -5, -6, -7, -8, -9, -10, -11, -12, -13, -14, -15, -16]
4416         );
4417     }
4418 
4419     #[test]
test_shuffle()4420     fn test_shuffle() {
4421         let vec_a = i8x16(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
4422         let vec_b = i8x16(
4423             16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
4424         );
4425 
4426         let vec_r = i8x16_shuffle::<0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30>(
4427             vec_a, vec_b,
4428         );
4429         let vec_e = i8x16(0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30);
4430         compare_bytes(vec_r, vec_e);
4431 
4432         let vec_a = i16x8(0, 1, 2, 3, 4, 5, 6, 7);
4433         let vec_b = i16x8(8, 9, 10, 11, 12, 13, 14, 15);
4434         let vec_r = i16x8_shuffle::<0, 8, 2, 10, 4, 12, 6, 14>(vec_a, vec_b);
4435         let vec_e = i16x8(0, 8, 2, 10, 4, 12, 6, 14);
4436         compare_bytes(vec_r, vec_e);
4437 
4438         let vec_a = i32x4(0, 1, 2, 3);
4439         let vec_b = i32x4(4, 5, 6, 7);
4440         let vec_r = i32x4_shuffle::<0, 4, 2, 6>(vec_a, vec_b);
4441         let vec_e = i32x4(0, 4, 2, 6);
4442         compare_bytes(vec_r, vec_e);
4443 
4444         let vec_a = i64x2(0, 1);
4445         let vec_b = i64x2(2, 3);
4446         let vec_r = i64x2_shuffle::<0, 2>(vec_a, vec_b);
4447         let vec_e = i64x2(0, 2);
4448         compare_bytes(vec_r, vec_e);
4449     }
4450 
4451     // tests extract and replace lanes
4452     macro_rules! test_extract {
4453         (
4454             name: $test_id:ident,
4455             extract: $extract:ident,
4456             replace: $replace:ident,
4457             elem: $elem:ty,
4458             count: $count:expr,
4459             indices: [$($idx:expr),*],
4460         ) => {
4461             #[test]
4462             fn $test_id() {
4463                 unsafe {
4464                     let arr: [$elem; $count] = [123 as $elem; $count];
4465                     let vec: v128 = transmute(arr);
4466                     $(
4467                         assert_eq!($extract::<$idx>(vec), 123 as $elem);
4468                     )*
4469 
4470                     // create a vector from array and check that the indices contain
4471                     // the same values as in the array:
4472                     let arr: [$elem; $count] = [$($idx as $elem),*];
4473                     let vec: v128 = transmute(arr);
4474                     $(
4475                         assert_eq!($extract::<$idx>(vec), $idx as $elem);
4476 
4477                         let tmp = $replace::<$idx>(vec, 124 as $elem);
4478                         assert_eq!($extract::<$idx>(tmp), 124 as $elem);
4479                     )*
4480                 }
4481             }
4482         }
4483     }
4484 
4485     test_extract! {
4486         name: test_i8x16_extract_replace,
4487         extract: i8x16_extract_lane,
4488         replace: i8x16_replace_lane,
4489         elem: i8,
4490         count: 16,
4491         indices: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15],
4492     }
4493     test_extract! {
4494         name: test_i16x8_extract_replace,
4495         extract: i16x8_extract_lane,
4496         replace: i16x8_replace_lane,
4497         elem: i16,
4498         count: 8,
4499         indices: [0, 1, 2, 3, 4, 5, 6, 7],
4500     }
4501     test_extract! {
4502         name: test_i32x4_extract_replace,
4503         extract: i32x4_extract_lane,
4504         replace: i32x4_replace_lane,
4505         elem: i32,
4506         count: 4,
4507         indices: [0, 1, 2, 3],
4508     }
4509     test_extract! {
4510         name: test_i64x2_extract_replace,
4511         extract: i64x2_extract_lane,
4512         replace: i64x2_replace_lane,
4513         elem: i64,
4514         count: 2,
4515         indices: [0, 1],
4516     }
4517     test_extract! {
4518         name: test_f32x4_extract_replace,
4519         extract: f32x4_extract_lane,
4520         replace: f32x4_replace_lane,
4521         elem: f32,
4522         count: 4,
4523         indices: [0, 1, 2, 3],
4524     }
4525     test_extract! {
4526         name: test_f64x2_extract_replace,
4527         extract: f64x2_extract_lane,
4528         replace: f64x2_replace_lane,
4529         elem: f64,
4530         count: 2,
4531         indices: [0, 1],
4532     }
4533 
4534     #[test]
4535     #[rustfmt::skip]
test_swizzle()4536     fn test_swizzle() {
4537         compare_bytes(
4538             i8x16_swizzle(
4539                 i32x4(1, 2, 3, 4),
4540                 i8x16(
4541                     32, 31, 30, 29,
4542                     0, 1, 2, 3,
4543                     12, 13, 14, 15,
4544                     0, 4, 8, 12),
4545             ),
4546             i32x4(0, 1, 4, 0x04030201),
4547         );
4548     }
4549 
4550     macro_rules! test_splat {
4551         ($test_id:ident: $val:expr => $($vals:expr),*) => {
4552             #[test]
4553             fn $test_id() {
4554                 let a = super::$test_id($val);
4555                 let b = u8x16($($vals as u8),*);
4556                 compare_bytes(a, b);
4557             }
4558         }
4559     }
4560 
4561     mod splats {
4562         use super::*;
4563         test_splat!(i8x16_splat: 42 => 42,42,42,42,42,42,42,42,42,42,42,42,42,42,42,42);
4564         test_splat!(i16x8_splat: 42 => 42, 0, 42, 0, 42, 0, 42, 0, 42, 0, 42, 0, 42, 0, 42, 0);
4565         test_splat!(i32x4_splat: 42 => 42, 0, 0, 0, 42, 0, 0, 0, 42, 0, 0, 0, 42, 0, 0, 0);
4566         test_splat!(i64x2_splat: 42 => 42, 0, 0, 0, 0, 0, 0, 0, 42, 0, 0, 0, 0, 0, 0, 0);
4567         test_splat!(f32x4_splat: 42. => 0, 0, 40, 66, 0, 0, 40, 66, 0, 0, 40, 66, 0, 0, 40, 66);
4568         test_splat!(f64x2_splat: 42. => 0, 0, 0, 0, 0, 0, 69, 64, 0, 0, 0, 0, 0, 0, 69, 64);
4569     }
4570 
4571     #[test]
test_bitmasks()4572     fn test_bitmasks() {
4573         let zero = i8x16_splat(0);
4574         let ones = i8x16_splat(!0);
4575 
4576         assert_eq!(i8x16_bitmask(zero), 0);
4577         assert_eq!(i8x16_bitmask(ones), 0xffff);
4578         assert_eq!(i8x16_bitmask(i8x16_splat(i8::MAX)), 0);
4579         assert_eq!(i8x16_bitmask(i8x16_splat(i8::MIN)), 0xffff);
4580         assert_eq!(i8x16_bitmask(i8x16_replace_lane::<1>(zero, -1)), 0b10);
4581 
4582         assert_eq!(i16x8_bitmask(zero), 0);
4583         assert_eq!(i16x8_bitmask(ones), 0xff);
4584         assert_eq!(i16x8_bitmask(i16x8_splat(i16::MAX)), 0);
4585         assert_eq!(i16x8_bitmask(i16x8_splat(i16::MIN)), 0xff);
4586         assert_eq!(i16x8_bitmask(i16x8_replace_lane::<1>(zero, -1)), 0b10);
4587 
4588         assert_eq!(i32x4_bitmask(zero), 0);
4589         assert_eq!(i32x4_bitmask(ones), 0b1111);
4590         assert_eq!(i32x4_bitmask(i32x4_splat(i32::MAX)), 0);
4591         assert_eq!(i32x4_bitmask(i32x4_splat(i32::MIN)), 0b1111);
4592         assert_eq!(i32x4_bitmask(i32x4_replace_lane::<1>(zero, -1)), 0b10);
4593 
4594         assert_eq!(i64x2_bitmask(zero), 0);
4595         assert_eq!(i64x2_bitmask(ones), 0b11);
4596         assert_eq!(i64x2_bitmask(i64x2_splat(i64::MAX)), 0);
4597         assert_eq!(i64x2_bitmask(i64x2_splat(i64::MIN)), 0b11);
4598         assert_eq!(i64x2_bitmask(i64x2_replace_lane::<1>(zero, -1)), 0b10);
4599     }
4600 
4601     #[test]
test_narrow()4602     fn test_narrow() {
4603         let zero = i8x16_splat(0);
4604         let ones = i8x16_splat(!0);
4605 
4606         compare_bytes(i8x16_narrow_i16x8(zero, zero), zero);
4607         compare_bytes(u8x16_narrow_i16x8(zero, zero), zero);
4608         compare_bytes(i8x16_narrow_i16x8(ones, ones), ones);
4609         compare_bytes(u8x16_narrow_i16x8(ones, ones), zero);
4610 
4611         compare_bytes(
4612             i8x16_narrow_i16x8(
4613                 i16x8(
4614                     0,
4615                     1,
4616                     2,
4617                     -1,
4618                     i8::MIN.into(),
4619                     i8::MAX.into(),
4620                     u8::MIN.into(),
4621                     u8::MAX.into(),
4622                 ),
4623                 i16x8(
4624                     i16::MIN.into(),
4625                     i16::MAX.into(),
4626                     u16::MIN as i16,
4627                     u16::MAX as i16,
4628                     0,
4629                     0,
4630                     0,
4631                     0,
4632                 ),
4633             ),
4634             i8x16(0, 1, 2, -1, -128, 127, 0, 127, -128, 127, 0, -1, 0, 0, 0, 0),
4635         );
4636 
4637         compare_bytes(
4638             u8x16_narrow_i16x8(
4639                 i16x8(
4640                     0,
4641                     1,
4642                     2,
4643                     -1,
4644                     i8::MIN.into(),
4645                     i8::MAX.into(),
4646                     u8::MIN.into(),
4647                     u8::MAX.into(),
4648                 ),
4649                 i16x8(
4650                     i16::MIN.into(),
4651                     i16::MAX.into(),
4652                     u16::MIN as i16,
4653                     u16::MAX as i16,
4654                     0,
4655                     0,
4656                     0,
4657                     0,
4658                 ),
4659             ),
4660             i8x16(0, 1, 2, 0, 0, 127, 0, -1, 0, -1, 0, 0, 0, 0, 0, 0),
4661         );
4662 
4663         compare_bytes(i16x8_narrow_i32x4(zero, zero), zero);
4664         compare_bytes(u16x8_narrow_i32x4(zero, zero), zero);
4665         compare_bytes(i16x8_narrow_i32x4(ones, ones), ones);
4666         compare_bytes(u16x8_narrow_i32x4(ones, ones), zero);
4667 
4668         compare_bytes(
4669             i16x8_narrow_i32x4(
4670                 i32x4(0, -1, i16::MIN.into(), i16::MAX.into()),
4671                 i32x4(
4672                     i32::MIN.into(),
4673                     i32::MAX.into(),
4674                     u32::MIN as i32,
4675                     u32::MAX as i32,
4676                 ),
4677             ),
4678             i16x8(0, -1, i16::MIN, i16::MAX, i16::MIN, i16::MAX, 0, -1),
4679         );
4680 
4681         compare_bytes(
4682             u16x8_narrow_i32x4(
4683                 i32x4(u16::MAX.into(), -1, i16::MIN.into(), i16::MAX.into()),
4684                 i32x4(
4685                     i32::MIN.into(),
4686                     i32::MAX.into(),
4687                     u32::MIN as i32,
4688                     u32::MAX as i32,
4689                 ),
4690             ),
4691             i16x8(-1, 0, 0, i16::MAX, 0, -1, 0, 0),
4692         );
4693     }
4694 
4695     #[test]
test_extend()4696     fn test_extend() {
4697         let zero = i8x16_splat(0);
4698         let ones = i8x16_splat(!0);
4699 
4700         compare_bytes(i16x8_extend_low_i8x16(zero), zero);
4701         compare_bytes(i16x8_extend_high_i8x16(zero), zero);
4702         compare_bytes(i16x8_extend_low_u8x16(zero), zero);
4703         compare_bytes(i16x8_extend_high_u8x16(zero), zero);
4704         compare_bytes(i16x8_extend_low_i8x16(ones), ones);
4705         compare_bytes(i16x8_extend_high_i8x16(ones), ones);
4706         let halves = u16x8_splat(u8::MAX.into());
4707         compare_bytes(i16x8_extend_low_u8x16(ones), halves);
4708         compare_bytes(i16x8_extend_high_u8x16(ones), halves);
4709 
4710         compare_bytes(i32x4_extend_low_i16x8(zero), zero);
4711         compare_bytes(i32x4_extend_high_i16x8(zero), zero);
4712         compare_bytes(i32x4_extend_low_u16x8(zero), zero);
4713         compare_bytes(i32x4_extend_high_u16x8(zero), zero);
4714         compare_bytes(i32x4_extend_low_i16x8(ones), ones);
4715         compare_bytes(i32x4_extend_high_i16x8(ones), ones);
4716         let halves = u32x4_splat(u16::MAX.into());
4717         compare_bytes(i32x4_extend_low_u16x8(ones), halves);
4718         compare_bytes(i32x4_extend_high_u16x8(ones), halves);
4719 
4720         compare_bytes(i64x2_extend_low_i32x4(zero), zero);
4721         compare_bytes(i64x2_extend_high_i32x4(zero), zero);
4722         compare_bytes(i64x2_extend_low_u32x4(zero), zero);
4723         compare_bytes(i64x2_extend_high_u32x4(zero), zero);
4724         compare_bytes(i64x2_extend_low_i32x4(ones), ones);
4725         compare_bytes(i64x2_extend_high_i32x4(ones), ones);
4726         let halves = i64x2_splat(u32::MAX.into());
4727         compare_bytes(u64x2_extend_low_u32x4(ones), halves);
4728         compare_bytes(u64x2_extend_high_u32x4(ones), halves);
4729     }
4730 
4731     #[test]
test_dot()4732     fn test_dot() {
4733         let zero = i8x16_splat(0);
4734         let ones = i8x16_splat(!0);
4735         let two = i32x4_splat(2);
4736         compare_bytes(i32x4_dot_i16x8(zero, zero), zero);
4737         compare_bytes(i32x4_dot_i16x8(ones, ones), two);
4738     }
4739 
4740     macro_rules! test_binop {
4741         (
4742             $($name:ident => {
4743                 $([$($vec1:tt)*] ($op:ident | $f:ident) [$($vec2:tt)*],)*
4744             })*
4745         ) => ($(
4746             #[test]
4747             fn $name() {
4748                 unsafe {
4749                     $(
4750                         let v1 = [$($vec1)*];
4751                         let v2 = [$($vec2)*];
4752                         let v1_v128: v128 = mem::transmute(v1);
4753                         let v2_v128: v128 = mem::transmute(v2);
4754                         let v3_v128 = super::$f(v1_v128, v2_v128);
4755                         let mut v3 = [$($vec1)*];
4756                         drop(v3);
4757                         v3 = mem::transmute(v3_v128);
4758 
4759                         for (i, actual) in v3.iter().enumerate() {
4760                             let expected = v1[i].$op(v2[i]);
4761                             assert_eq!(*actual, expected);
4762                         }
4763                     )*
4764                 }
4765             }
4766         )*)
4767     }
4768 
4769     macro_rules! test_unop {
4770         (
4771             $($name:ident => {
4772                 $(($op:ident | $f:ident) [$($vec1:tt)*],)*
4773             })*
4774         ) => ($(
4775             #[test]
4776             fn $name() {
4777                 unsafe {
4778                     $(
4779                         let v1 = [$($vec1)*];
4780                         let v1_v128: v128 = mem::transmute(v1);
4781                         let v2_v128 = super::$f(v1_v128);
4782                         let mut v2 = [$($vec1)*];
4783                         drop(v2);
4784                         v2 = mem::transmute(v2_v128);
4785 
4786                         for (i, actual) in v2.iter().enumerate() {
4787                             let expected = v1[i].$op();
4788                             assert_eq!(*actual, expected);
4789                         }
4790                     )*
4791                 }
4792             }
4793         )*)
4794     }
4795 
4796     trait Avgr: Sized {
avgr(self, other: Self) -> Self4797         fn avgr(self, other: Self) -> Self;
4798     }
4799 
4800     macro_rules! impl_avgr {
4801         ($($i:ident)*) => ($(impl Avgr for $i {
4802             fn avgr(self, other: Self) -> Self {
4803                 ((self as u64 + other as u64 + 1) / 2) as $i
4804             }
4805         })*)
4806     }
4807 
4808     impl_avgr!(u8 u16);
4809 
4810     test_binop! {
4811         test_i8x16_add => {
4812             [0i8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
4813                 (wrapping_add | i8x16_add)
4814             [1i8, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
4815 
4816             [1i8, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]
4817                 (wrapping_add | i8x16_add)
4818             [-2, -3, -4, -5, -6, -7, -8, -9, -10, -11, -12, -13, -14, -15, -16, -18],
4819 
4820             [1i8, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]
4821                 (wrapping_add | i8x16_add)
4822             [127, -44, 43, 126, 4, 2, 9, -3, -59, -43, 39, -69, 79, -3, 9, -24],
4823         }
4824 
4825         test_i8x16_add_sat_s => {
4826             [0i8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
4827                 (saturating_add | i8x16_add_sat)
4828             [1i8, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
4829 
4830             [1i8, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]
4831                 (saturating_add | i8x16_add_sat)
4832             [-2, -3, -4, -5, -6, -7, -8, -9, -10, -11, -12, -13, -14, -15, -16, -18],
4833 
4834             [1i8, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]
4835                 (saturating_add | i8x16_add_sat)
4836             [127, -44, 43, 126, 4, 2, 9, -3, -59, -43, 39, -69, 79, -3, 9, -24],
4837         }
4838 
4839         test_i8x16_add_sat_u => {
4840             [0u8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
4841                 (saturating_add | u8x16_add_sat)
4842             [1u8, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
4843 
4844             [1u8, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]
4845                 (saturating_add | u8x16_add_sat)
4846             [255, 254, 253, 252, 251, 250, 249, 248, 247, 246, 245, 244, 243, 242, 241, 240],
4847 
4848             [1u8, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]
4849                 (saturating_add | u8x16_add_sat)
4850             [127, -44i8 as u8, 43, 126, 4, 2, 9, -3i8 as u8, -59i8 as u8, -43i8 as u8, 39, -69i8 as u8, 79, -3i8 as u8, 9, -24i8 as u8],
4851         }
4852 
4853         test_i8x16_sub => {
4854             [0i8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
4855                 (wrapping_sub | i8x16_sub)
4856             [1i8, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
4857 
4858             [1i8, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]
4859                 (wrapping_sub | i8x16_sub)
4860             [-2, -3, -4, -5, -6, -7, -8, -9, -10, -11, -12, -13, -14, -15, -16, -18],
4861 
4862             [1i8, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]
4863                 (wrapping_sub | i8x16_sub)
4864             [-127, -44, 43, 126, 4, 2, 9, -3, -59, -43, 39, -69, 79, -3, 4, 8],
4865         }
4866 
4867         test_i8x16_sub_sat_s => {
4868             [0i8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
4869                 (saturating_sub | i8x16_sub_sat)
4870             [1i8, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
4871 
4872             [1i8, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]
4873                 (saturating_sub | i8x16_sub_sat)
4874             [-2, -3, -4, -5, -6, -7, -8, -9, -10, -11, -12, -13, -14, -15, -16, -18],
4875 
4876             [1i8, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]
4877                 (saturating_sub | i8x16_sub_sat)
4878             [-127, -44, 43, 126, 4, 2, 9, -3, -59, -43, 39, -69, 79, -3, 4, 8],
4879         }
4880 
4881         test_i8x16_sub_sat_u => {
4882             [0u8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
4883                 (saturating_sub | u8x16_sub_sat)
4884             [1u8, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
4885 
4886             [1u8, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]
4887                 (saturating_sub | u8x16_sub_sat)
4888             [255, 254, 253, 252, 251, 250, 249, 248, 247, 246, 245, 244, 243, 242, 241, 240],
4889 
4890             [1u8, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]
4891                 (saturating_sub | u8x16_sub_sat)
4892             [127, -44i8 as u8, 43, 126, 4, 2, 9, -3i8 as u8, -59i8 as u8, -43i8 as u8, 39, -69i8 as u8, 79, -3i8 as u8, 9, -24i8 as u8],
4893         }
4894 
4895         test_i8x16_min_s => {
4896             [0i8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
4897                 (min | i8x16_min)
4898             [1i8, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
4899 
4900             [1i8, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]
4901                 (min | i8x16_min)
4902             [-2, -3, -4, -5, -6, -7, -8, -9, -10, -11, -12, -13, -14, -15, -16, -18],
4903 
4904             [1i8, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]
4905                 (min | i8x16_min)
4906             [-127, -44, 43, 126, 4, 2, 9, -3, -59, -43, 39, -69, 79, -3, 4, 8],
4907         }
4908 
4909         test_i8x16_min_u => {
4910             [0u8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
4911                 (min | u8x16_min)
4912             [1u8, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
4913 
4914             [1u8, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]
4915                 (min | u8x16_min)
4916             [255, 254, 253, 252, 251, 250, 249, 248, 247, 246, 245, 244, 243, 242, 241, 240],
4917 
4918             [1u8, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]
4919                 (min | u8x16_min)
4920             [127, -44i8 as u8, 43, 126, 4, 2, 9, -3i8 as u8, -59i8 as u8, -43i8 as u8, 39, -69i8 as u8, 79, -3i8 as u8, 9, -24i8 as u8],
4921         }
4922 
4923         test_i8x16_max_s => {
4924             [0i8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
4925                 (max | i8x16_max)
4926             [1i8, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
4927 
4928             [1i8, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]
4929                 (max | i8x16_max)
4930             [-2, -3, -4, -5, -6, -7, -8, -9, -10, -11, -12, -13, -14, -15, -16, -18],
4931 
4932             [1i8, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]
4933                 (max | i8x16_max)
4934             [-127, -44, 43, 126, 4, 2, 9, -3, -59, -43, 39, -69, 79, -3, 4, 8],
4935         }
4936 
4937         test_i8x16_max_u => {
4938             [0u8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
4939                 (max | u8x16_max)
4940             [1u8, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
4941 
4942             [1u8, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]
4943                 (max | u8x16_max)
4944             [255, 254, 253, 252, 251, 250, 249, 248, 247, 246, 245, 244, 243, 242, 241, 240],
4945 
4946             [1u8, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]
4947                 (max | u8x16_max)
4948             [127, -44i8 as u8, 43, 126, 4, 2, 9, -3i8 as u8, -59i8 as u8, -43i8 as u8, 39, -69i8 as u8, 79, -3i8 as u8, 9, -24i8 as u8],
4949         }
4950 
4951         test_i8x16_avgr_u => {
4952             [0u8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
4953                 (avgr | u8x16_avgr)
4954             [1u8, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
4955 
4956             [1u8, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]
4957                 (avgr | u8x16_avgr)
4958             [255, 254, 253, 252, 251, 250, 249, 248, 247, 246, 245, 244, 243, 242, 241, 240],
4959 
4960             [1u8, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]
4961                 (avgr | u8x16_avgr)
4962             [127, -44i8 as u8, 43, 126, 4, 2, 9, -3i8 as u8, -59i8 as u8, -43i8 as u8, 39, -69i8 as u8, 79, -3i8 as u8, 9, -24i8 as u8],
4963         }
4964 
4965         test_i16x8_add => {
4966             [0i16, 0, 0, 0, 0, 0, 0, 0]
4967                 (wrapping_add | i16x8_add)
4968             [1i16, 1, 1, 1, 1, 1, 1, 1],
4969 
4970             [1i16, 2, 3, 4, 5, 6, 7, 8]
4971                 (wrapping_add | i16x8_add)
4972             [32767, 8, -2494,-4, 4882, -4, 848, 3830],
4973         }
4974 
4975         test_i16x8_add_sat_s => {
4976             [0i16, 0, 0, 0, 0, 0, 0, 0]
4977                 (saturating_add | i16x8_add_sat)
4978             [1i16, 1, 1, 1, 1, 1, 1, 1],
4979 
4980             [1i16, 2, 3, 4, 5, 6, 7, 8]
4981                 (saturating_add | i16x8_add_sat)
4982             [32767, 8, -2494,-4, 4882, -4, 848, 3830],
4983         }
4984 
4985         test_i16x8_add_sat_u => {
4986             [0u16, 0, 0, 0, 0, 0, 0, 0]
4987                 (saturating_add | u16x8_add_sat)
4988             [1u16, 1, 1, 1, 1, 1, 1, 1],
4989 
4990             [1u16, 2, 3, 4, 5, 6, 7, 8]
4991                 (saturating_add | u16x8_add_sat)
4992             [32767, 8, -2494i16 as u16,-4i16 as u16, 4882, -4i16 as u16, 848, 3830],
4993         }
4994 
4995         test_i16x8_sub => {
4996             [0i16, 0, 0, 0, 0, 0, 0, 0]
4997                 (wrapping_sub | i16x8_sub)
4998             [1i16, 1, 1, 1, 1, 1, 1, 1],
4999 
5000             [1i16, 2, 3, 4, 5, 6, 7, 8]
5001                 (wrapping_sub | i16x8_sub)
5002             [32767, 8, -2494,-4, 4882, -4, 848, 3830],
5003         }
5004 
5005         test_i16x8_sub_sat_s => {
5006             [0i16, 0, 0, 0, 0, 0, 0, 0]
5007                 (saturating_sub | i16x8_sub_sat)
5008             [1i16, 1, 1, 1, 1, 1, 1, 1],
5009 
5010             [1i16, 2, 3, 4, 5, 6, 7, 8]
5011                 (saturating_sub | i16x8_sub_sat)
5012             [32767, 8, -2494,-4, 4882, -4, 848, 3830],
5013         }
5014 
5015         test_i16x8_sub_sat_u => {
5016             [0u16, 0, 0, 0, 0, 0, 0, 0]
5017                 (saturating_sub | u16x8_sub_sat)
5018             [1u16, 1, 1, 1, 1, 1, 1, 1],
5019 
5020             [1u16, 2, 3, 4, 5, 6, 7, 8]
5021                 (saturating_sub | u16x8_sub_sat)
5022             [32767, 8, -2494i16 as u16,-4i16 as u16, 4882, -4i16 as u16, 848, 3830],
5023         }
5024 
5025         test_i16x8_mul => {
5026             [0i16, 0, 0, 0, 0, 0, 0, 0]
5027                 (wrapping_mul | i16x8_mul)
5028             [1i16, 1, 1, 1, 1, 1, 1, 1],
5029 
5030             [1i16, 2, 3, 4, 5, 6, 7, 8]
5031                 (wrapping_mul | i16x8_mul)
5032             [32767, 8, -2494,-4, 4882, -4, 848, 3830],
5033         }
5034 
5035         test_i16x8_min_s => {
5036             [0i16, 0, 0, 0, 0, 0, 0, 0]
5037                 (min | i16x8_min)
5038             [1i16, 1, 1, 1, 1, 1, 1, 1],
5039 
5040             [1i16, 2, 3, 4, 5, 6, 7, 8]
5041                 (min | i16x8_min)
5042             [32767, 8, -2494,-4, 4882, -4, 848, 3830],
5043         }
5044 
5045         test_i16x8_min_u => {
5046             [0u16, 0, 0, 0, 0, 0, 0, 0]
5047                 (min | u16x8_min)
5048             [1u16, 1, 1, 1, 1, 1, 1, 1],
5049 
5050             [1u16, 2, 3, 4, 5, 6, 7, 8]
5051                 (min | u16x8_min)
5052             [32767, 8, -2494i16 as u16,-4i16 as u16, 4882, -4i16 as u16, 848, 3830],
5053         }
5054 
5055         test_i16x8_max_s => {
5056             [0i16, 0, 0, 0, 0, 0, 0, 0]
5057                 (max | i16x8_max)
5058             [1i16, 1, 1, 1, 1, 1, 1, 1],
5059 
5060             [1i16, 2, 3, 4, 5, 6, 7, 8]
5061                 (max | i16x8_max)
5062             [32767, 8, -2494,-4, 4882, -4, 848, 3830],
5063         }
5064 
5065         test_i16x8_max_u => {
5066             [0u16, 0, 0, 0, 0, 0, 0, 0]
5067                 (max | u16x8_max)
5068             [1u16, 1, 1, 1, 1, 1, 1, 1],
5069 
5070             [1u16, 2, 3, 4, 5, 6, 7, 8]
5071                 (max | u16x8_max)
5072             [32767, 8, -2494i16 as u16,-4i16 as u16, 4882, -4i16 as u16, 848, 3830],
5073         }
5074 
5075         test_i16x8_avgr_u => {
5076             [0u16, 0, 0, 0, 0, 0, 0, 0]
5077                 (avgr | u16x8_avgr)
5078             [1u16, 1, 1, 1, 1, 1, 1, 1],
5079 
5080             [1u16, 2, 3, 4, 5, 6, 7, 8]
5081                 (avgr | u16x8_avgr)
5082             [32767, 8, -2494i16 as u16,-4i16 as u16, 4882, -4i16 as u16, 848, 3830],
5083         }
5084 
5085         test_i32x4_add => {
5086             [0i32, 0, 0, 0] (wrapping_add | i32x4_add) [1, 2, 3, 4],
5087             [1i32, 1283, i32::MAX, i32::MIN]
5088                 (wrapping_add | i32x4_add)
5089             [i32::MAX; 4],
5090         }
5091 
5092         test_i32x4_sub => {
5093             [0i32, 0, 0, 0] (wrapping_sub | i32x4_sub) [1, 2, 3, 4],
5094             [1i32, 1283, i32::MAX, i32::MIN]
5095                 (wrapping_sub | i32x4_sub)
5096             [i32::MAX; 4],
5097         }
5098 
5099         test_i32x4_mul => {
5100             [0i32, 0, 0, 0] (wrapping_mul | i32x4_mul) [1, 2, 3, 4],
5101             [1i32, 1283, i32::MAX, i32::MIN]
5102                 (wrapping_mul | i32x4_mul)
5103             [i32::MAX; 4],
5104         }
5105 
5106         test_i32x4_min_s => {
5107             [0i32, 0, 0, 0] (min | i32x4_min) [1, 2, 3, 4],
5108             [1i32, 1283, i32::MAX, i32::MIN]
5109                 (min | i32x4_min)
5110             [i32::MAX; 4],
5111         }
5112 
5113         test_i32x4_min_u => {
5114             [0u32, 0, 0, 0] (min | u32x4_min) [1, 2, 3, 4],
5115             [1u32, 1283, i32::MAX as u32, i32::MIN as u32]
5116                 (min | u32x4_min)
5117             [i32::MAX as u32; 4],
5118         }
5119 
5120         test_i32x4_max_s => {
5121             [0i32, 0, 0, 0] (max | i32x4_max) [1, 2, 3, 4],
5122             [1i32, 1283, i32::MAX, i32::MIN]
5123                 (max | i32x4_max)
5124             [i32::MAX; 4],
5125         }
5126 
5127         test_i32x4_max_u => {
5128             [0u32, 0, 0, 0] (max | u32x4_max) [1, 2, 3, 4],
5129             [1u32, 1283, i32::MAX as u32, i32::MIN as u32]
5130                 (max | u32x4_max)
5131             [i32::MAX as u32; 4],
5132         }
5133 
5134         test_i64x2_add => {
5135             [0i64, 0] (wrapping_add | i64x2_add) [1, 2],
5136             [i64::MIN, i64::MAX] (wrapping_add | i64x2_add) [i64::MAX, i64::MIN],
5137             [i64::MAX; 2] (wrapping_add | i64x2_add) [i64::MAX; 2],
5138             [-4i64, -4] (wrapping_add | i64x2_add) [800, 939],
5139         }
5140 
5141         test_i64x2_sub => {
5142             [0i64, 0] (wrapping_sub | i64x2_sub) [1, 2],
5143             [i64::MIN, i64::MAX] (wrapping_sub | i64x2_sub) [i64::MAX, i64::MIN],
5144             [i64::MAX; 2] (wrapping_sub | i64x2_sub) [i64::MAX; 2],
5145             [-4i64, -4] (wrapping_sub | i64x2_sub) [800, 939],
5146         }
5147 
5148         test_i64x2_mul => {
5149             [0i64, 0] (wrapping_mul | i64x2_mul) [1, 2],
5150             [i64::MIN, i64::MAX] (wrapping_mul | i64x2_mul) [i64::MAX, i64::MIN],
5151             [i64::MAX; 2] (wrapping_mul | i64x2_mul) [i64::MAX; 2],
5152             [-4i64, -4] (wrapping_mul | i64x2_mul) [800, 939],
5153         }
5154 
5155         test_f32x4_add => {
5156             [-1.0f32, 2.0, 3.0, 4.0] (add | f32x4_add) [1., 2., 0., 0.],
5157             [f32::INFINITY, -0.0, f32::NEG_INFINITY, 3.0]
5158                 (add | f32x4_add)
5159             [1., 2., 0., 0.],
5160         }
5161 
5162         test_f32x4_sub => {
5163             [-1.0f32, 2.0, 3.0, 4.0] (sub | f32x4_sub) [1., 2., 0., 0.],
5164             [f32::INFINITY, -0.0, f32::NEG_INFINITY, 3.0]
5165                 (sub | f32x4_sub)
5166             [1., 2., 0., 0.],
5167         }
5168 
5169         test_f32x4_mul => {
5170             [-1.0f32, 2.0, 3.0, 4.0] (mul | f32x4_mul) [1., 2., 0., 0.],
5171             [f32::INFINITY, -0.0, f32::NEG_INFINITY, 3.0]
5172                 (mul | f32x4_mul)
5173             [1., 2., 1., 0.],
5174         }
5175 
5176         test_f32x4_div => {
5177             [-1.0f32, 2.0, 3.0, 4.0] (div | f32x4_div) [1., 2., 0., 0.],
5178             [f32::INFINITY, -0.0, f32::NEG_INFINITY, 3.0]
5179                 (div | f32x4_div)
5180             [1., 2., 0., 0.],
5181         }
5182 
5183         test_f32x4_min => {
5184             [-1.0f32, 2.0, 3.0, 4.0] (min | f32x4_min) [1., 2., 0., 0.],
5185             [f32::INFINITY, -0.0, f32::NEG_INFINITY, 3.0]
5186                 (min | f32x4_min)
5187             [1., 2., 0., 0.],
5188         }
5189 
5190         test_f32x4_max => {
5191             [-1.0f32, 2.0, 3.0, 4.0] (max | f32x4_max) [1., 2., 0., 0.],
5192             [f32::INFINITY, -0.0, f32::NEG_INFINITY, 3.0]
5193                 (max | f32x4_max)
5194             [1., 2., 0., 0.],
5195         }
5196 
5197         test_f32x4_pmin => {
5198             [-1.0f32, 2.0, 3.0, 4.0] (min | f32x4_pmin) [1., 2., 0., 0.],
5199             [f32::INFINITY, -0.0, f32::NEG_INFINITY, 3.0]
5200                 (min | f32x4_pmin)
5201             [1., 2., 0., 0.],
5202         }
5203 
5204         test_f32x4_pmax => {
5205             [-1.0f32, 2.0, 3.0, 4.0] (max | f32x4_pmax) [1., 2., 0., 0.],
5206             [f32::INFINITY, -0.0, f32::NEG_INFINITY, 3.0]
5207                 (max | f32x4_pmax)
5208             [1., 2., 0., 0.],
5209         }
5210 
5211         test_f64x2_add => {
5212             [-1.0f64, 2.0] (add | f64x2_add) [1., 2.],
5213             [f64::INFINITY, f64::NEG_INFINITY] (add | f64x2_add) [1., 2.],
5214         }
5215 
5216         test_f64x2_sub => {
5217             [-1.0f64, 2.0] (sub | f64x2_sub) [1., 2.],
5218             [f64::INFINITY, f64::NEG_INFINITY] (sub | f64x2_sub) [1., 2.],
5219         }
5220 
5221         test_f64x2_mul => {
5222             [-1.0f64, 2.0] (mul | f64x2_mul) [1., 2.],
5223             [f64::INFINITY, f64::NEG_INFINITY] (mul | f64x2_mul) [1., 2.],
5224         }
5225 
5226         test_f64x2_div => {
5227             [-1.0f64, 2.0] (div | f64x2_div) [1., 2.],
5228             [f64::INFINITY, f64::NEG_INFINITY] (div | f64x2_div) [1., 2.],
5229         }
5230 
5231         test_f64x2_min => {
5232             [-1.0f64, 2.0] (min | f64x2_min) [1., 2.],
5233             [f64::INFINITY, f64::NEG_INFINITY] (min | f64x2_min) [1., 2.],
5234         }
5235 
5236         test_f64x2_max => {
5237             [-1.0f64, 2.0] (max | f64x2_max) [1., 2.],
5238             [f64::INFINITY, f64::NEG_INFINITY] (max | f64x2_max) [1., 2.],
5239         }
5240 
5241         test_f64x2_pmin => {
5242             [-1.0f64, 2.0] (min | f64x2_pmin) [1., 2.],
5243             [f64::INFINITY, f64::NEG_INFINITY] (min | f64x2_pmin) [1., 2.],
5244         }
5245 
5246         test_f64x2_pmax => {
5247             [-1.0f64, 2.0] (max | f64x2_pmax) [1., 2.],
5248             [f64::INFINITY, f64::NEG_INFINITY] (max | f64x2_pmax) [1., 2.],
5249         }
5250     }
5251 
5252     test_unop! {
5253         test_i8x16_abs => {
5254             (wrapping_abs | i8x16_abs)
5255             [1i8, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
5256 
5257             (wrapping_abs | i8x16_abs)
5258             [-2i8, -3, -4, -5, -6, -7, -8, -9, -10, -11, -12, -13, -14, -15, -16, -18],
5259 
5260             (wrapping_abs | i8x16_abs)
5261             [-127i8, -44, 43, 126, 4, -128, 127, -59, -43, 39, -69, 79, -3, 35, 83, 13],
5262         }
5263 
5264         test_i8x16_neg => {
5265             (wrapping_neg | i8x16_neg)
5266             [1i8, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
5267 
5268             (wrapping_neg | i8x16_neg)
5269             [-2i8, -3, -4, -5, -6, -7, -8, -9, -10, -11, -12, -13, -14, -15, -16, -18],
5270 
5271             (wrapping_neg | i8x16_neg)
5272             [-127i8, -44, 43, 126, 4, -128, 127, -59, -43, 39, -69, 79, -3, 35, 83, 13],
5273         }
5274 
5275         test_i16x8_abs => {
5276             (wrapping_abs | i16x8_abs) [1i16, 1, 1, 1, 1, 1, 1, 1],
5277             (wrapping_abs | i16x8_abs) [2i16, 0x7fff, !0, 4, 42, -5, 33, -4847],
5278         }
5279 
5280         test_i16x8_neg => {
5281             (wrapping_neg | i16x8_neg) [1i16, 1, 1, 1, 1, 1, 1, 1],
5282             (wrapping_neg | i16x8_neg) [2i16, 0x7fff, !0, 4, 42, -5, 33, -4847],
5283         }
5284 
5285         test_i32x4_abs => {
5286             (wrapping_abs | i32x4_abs) [1i32, 2, 3, 4],
5287             (wrapping_abs | i32x4_abs) [i32::MIN, i32::MAX, 0, 4],
5288         }
5289 
5290         test_i32x4_neg => {
5291             (wrapping_neg | i32x4_neg) [1i32, 2, 3, 4],
5292             (wrapping_neg | i32x4_neg) [i32::MIN, i32::MAX, 0, 4],
5293         }
5294 
5295         test_i64x2_abs => {
5296             (wrapping_abs | i64x2_abs) [1i64, 2],
5297             (wrapping_abs | i64x2_abs) [i64::MIN, i64::MAX],
5298         }
5299 
5300         test_i64x2_neg => {
5301             (wrapping_neg | i64x2_neg) [1i64, 2],
5302             (wrapping_neg | i64x2_neg) [i64::MIN, i64::MAX],
5303         }
5304 
5305         test_f32x4_ceil => {
5306             (ceil | f32x4_ceil) [1.0f32, 2., 2.5, 3.3],
5307             (ceil | f32x4_ceil) [0.0, -0.3, f32::INFINITY, -0.0],
5308         }
5309 
5310         test_f32x4_floor => {
5311             (floor | f32x4_floor) [1.0f32, 2., 2.5, 3.3],
5312             (floor | f32x4_floor) [0.0, -0.3, f32::INFINITY, -0.0],
5313         }
5314 
5315         test_f32x4_trunc => {
5316             (trunc | f32x4_trunc) [1.0f32, 2., 2.5, 3.3],
5317             (trunc | f32x4_trunc) [0.0, -0.3, f32::INFINITY, -0.0],
5318         }
5319 
5320         test_f32x4_nearest => {
5321             (round | f32x4_nearest) [1.0f32, 2., 2.6, 3.3],
5322             (round | f32x4_nearest) [0.0, -0.3, f32::INFINITY, -0.0],
5323         }
5324 
5325         test_f32x4_abs => {
5326             (abs | f32x4_abs) [1.0f32, 2., 2.6, 3.3],
5327             (abs | f32x4_abs) [0.0, -0.3, f32::INFINITY, -0.0],
5328         }
5329 
5330         test_f32x4_neg => {
5331             (neg | f32x4_neg) [1.0f32, 2., 2.6, 3.3],
5332             (neg | f32x4_neg) [0.0, -0.3, f32::INFINITY, -0.0],
5333         }
5334 
5335         test_f32x4_sqrt => {
5336             (sqrt | f32x4_sqrt) [1.0f32, 2., 2.6, 3.3],
5337             (sqrt | f32x4_sqrt) [0.0, 0.3, f32::INFINITY, 0.1],
5338         }
5339 
5340         test_f64x2_ceil => {
5341             (ceil | f64x2_ceil) [1.0f64, 2.3],
5342             (ceil | f64x2_ceil) [f64::INFINITY, -0.1],
5343         }
5344 
5345         test_f64x2_floor => {
5346             (floor | f64x2_floor) [1.0f64, 2.3],
5347             (floor | f64x2_floor) [f64::INFINITY, -0.1],
5348         }
5349 
5350         test_f64x2_trunc => {
5351             (trunc | f64x2_trunc) [1.0f64, 2.3],
5352             (trunc | f64x2_trunc) [f64::INFINITY, -0.1],
5353         }
5354 
5355         test_f64x2_nearest => {
5356             (round | f64x2_nearest) [1.0f64, 2.3],
5357             (round | f64x2_nearest) [f64::INFINITY, -0.1],
5358         }
5359 
5360         test_f64x2_abs => {
5361             (abs | f64x2_abs) [1.0f64, 2.3],
5362             (abs | f64x2_abs) [f64::INFINITY, -0.1],
5363         }
5364 
5365         test_f64x2_neg => {
5366             (neg | f64x2_neg) [1.0f64, 2.3],
5367             (neg | f64x2_neg) [f64::INFINITY, -0.1],
5368         }
5369 
5370         test_f64x2_sqrt => {
5371             (sqrt | f64x2_sqrt) [1.0f64, 2.3],
5372             (sqrt | f64x2_sqrt) [f64::INFINITY, 0.1],
5373         }
5374     }
5375 
5376     macro_rules! floating_point {
5377         (f32) => {
5378             true
5379         };
5380         (f64) => {
5381             true
5382         };
5383         ($id:ident) => {
5384             false
5385         };
5386     }
5387 
5388     trait IsNan: Sized {
is_nan(self) -> bool5389         fn is_nan(self) -> bool {
5390             false
5391         }
5392     }
5393     impl IsNan for i8 {}
5394     impl IsNan for i16 {}
5395     impl IsNan for i32 {}
5396     impl IsNan for i64 {}
5397 
5398     macro_rules! test_bop {
5399          ($id:ident[$ety:ident; $ecount:expr] |
5400           $binary_op:ident [$op_test_id:ident] :
5401           ([$($in_a:expr),*], [$($in_b:expr),*]) => [$($out:expr),*]) => {
5402              test_bop!(
5403                  $id[$ety; $ecount] => $ety | $binary_op [ $op_test_id ]:
5404                  ([$($in_a),*], [$($in_b),*]) => [$($out),*]
5405              );
5406 
5407          };
5408          ($id:ident[$ety:ident; $ecount:expr] => $oty:ident |
5409           $binary_op:ident [$op_test_id:ident] :
5410           ([$($in_a:expr),*], [$($in_b:expr),*]) => [$($out:expr),*]) => {
5411              #[test]
5412              fn $op_test_id() {
5413                  unsafe {
5414                      let a_input: [$ety; $ecount] = [$($in_a),*];
5415                      let b_input: [$ety; $ecount] = [$($in_b),*];
5416                      let output: [$oty; $ecount] = [$($out),*];
5417 
5418                      let a_vec_in: v128 = transmute(a_input);
5419                      let b_vec_in: v128 = transmute(b_input);
5420                      let vec_res: v128 = $binary_op(a_vec_in, b_vec_in);
5421 
5422                      let res: [$oty; $ecount] = transmute(vec_res);
5423 
5424                      if !floating_point!($ety) {
5425                          assert_eq!(res, output);
5426                      } else {
5427                          for i in 0..$ecount {
5428                              let r = res[i];
5429                              let o = output[i];
5430                              assert_eq!(r.is_nan(), o.is_nan());
5431                              if !r.is_nan() {
5432                                  assert_eq!(r, o);
5433                              }
5434                          }
5435                      }
5436                  }
5437              }
5438          }
5439      }
5440 
5441     macro_rules! test_bops {
5442          ($id:ident[$ety:ident; $ecount:expr] |
5443           $binary_op:ident [$op_test_id:ident]:
5444           ([$($in_a:expr),*], $in_b:expr) => [$($out:expr),*]) => {
5445              #[test]
5446              fn $op_test_id() {
5447                  unsafe {
5448                      let a_input: [$ety; $ecount] = [$($in_a),*];
5449                      let output: [$ety; $ecount] = [$($out),*];
5450 
5451                      let a_vec_in: v128 = transmute(a_input);
5452                      let vec_res: v128 = $binary_op(a_vec_in, $in_b);
5453 
5454                      let res: [$ety; $ecount] = transmute(vec_res);
5455                      assert_eq!(res, output);
5456                  }
5457              }
5458          }
5459      }
5460 
5461     macro_rules! test_uop {
5462          ($id:ident[$ety:ident; $ecount:expr] |
5463           $unary_op:ident [$op_test_id:ident]: [$($in_a:expr),*] => [$($out:expr),*]) => {
5464              #[test]
5465              fn $op_test_id() {
5466                  unsafe {
5467                      let a_input: [$ety; $ecount] = [$($in_a),*];
5468                      let output: [$ety; $ecount] = [$($out),*];
5469 
5470                      let a_vec_in: v128 = transmute(a_input);
5471                      let vec_res: v128 = $unary_op(a_vec_in);
5472 
5473                      let res: [$ety; $ecount] = transmute(vec_res);
5474                      assert_eq!(res, output);
5475                  }
5476              }
5477          }
5478      }
5479 
5480     test_bops!(i8x16[i8; 16] | i8x16_shl[i8x16_shl_test]:
5481                ([0, -1, 2, 3, 4, 5, 6, i8::MAX, 1, 1, 1, 1, 1, 1, 1, 1], 1) =>
5482                [0, -2, 4, 6, 8, 10, 12, -2, 2, 2, 2, 2, 2, 2, 2, 2]);
5483     test_bops!(i16x8[i16; 8] | i16x8_shl[i16x8_shl_test]:
5484                 ([0, -1, 2, 3, 4, 5, 6, i16::MAX], 1) =>
5485                 [0, -2, 4, 6, 8, 10, 12, -2]);
5486     test_bops!(i32x4[i32; 4] | i32x4_shl[i32x4_shl_test]:
5487                 ([0, -1, 2, 3], 1) => [0, -2, 4, 6]);
5488     test_bops!(i64x2[i64; 2] | i64x2_shl[i64x2_shl_test]:
5489                 ([0, -1], 1) => [0, -2]);
5490 
5491     test_bops!(i8x16[i8; 16] | i8x16_shr[i8x16_shr_s_test]:
5492                ([0, -1, 2, 3, 4, 5, 6, i8::MAX, 1, 1, 1, 1, 1, 1, 1, 1], 1) =>
5493                [0, -1, 1, 1, 2, 2, 3, 63, 0, 0, 0, 0, 0, 0, 0, 0]);
5494     test_bops!(i16x8[i16; 8] | i16x8_shr[i16x8_shr_s_test]:
5495                ([0, -1, 2, 3, 4, 5, 6, i16::MAX], 1) =>
5496                [0, -1, 1, 1, 2, 2, 3, i16::MAX / 2]);
5497     test_bops!(i32x4[i32; 4] | i32x4_shr[i32x4_shr_s_test]:
5498                ([0, -1, 2, 3], 1) => [0, -1, 1, 1]);
5499     test_bops!(i64x2[i64; 2] | i64x2_shr[i64x2_shr_s_test]:
5500                ([0, -1], 1) => [0, -1]);
5501 
5502     test_bops!(i8x16[i8; 16] | u8x16_shr[i8x16_uhr_u_test]:
5503                 ([0, -1, 2, 3, 4, 5, 6, i8::MAX, 1, 1, 1, 1, 1, 1, 1, 1], 1) =>
5504                 [0, i8::MAX, 1, 1, 2, 2, 3, 63, 0, 0, 0, 0, 0, 0, 0, 0]);
5505     test_bops!(i16x8[i16; 8] | u16x8_shr[i16x8_uhr_u_test]:
5506                 ([0, -1, 2, 3, 4, 5, 6, i16::MAX], 1) =>
5507                 [0, i16::MAX, 1, 1, 2, 2, 3, i16::MAX / 2]);
5508     test_bops!(i32x4[i32; 4] | u32x4_shr[i32x4_uhr_u_test]:
5509                 ([0, -1, 2, 3], 1) => [0, i32::MAX, 1, 1]);
5510     test_bops!(i64x2[i64; 2] | u64x2_shr[i64x2_uhr_u_test]:
5511                 ([0, -1], 1) => [0, i64::MAX]);
5512 
5513     #[test]
v128_bitwise_logical_ops()5514     fn v128_bitwise_logical_ops() {
5515         unsafe {
5516             let a: [u32; 4] = [u32::MAX, 0, u32::MAX, 0];
5517             let b: [u32; 4] = [u32::MAX; 4];
5518             let c: [u32; 4] = [0; 4];
5519 
5520             let vec_a: v128 = transmute(a);
5521             let vec_b: v128 = transmute(b);
5522             let vec_c: v128 = transmute(c);
5523 
5524             let r: v128 = v128_and(vec_a, vec_a);
5525             compare_bytes(r, vec_a);
5526             let r: v128 = v128_and(vec_a, vec_b);
5527             compare_bytes(r, vec_a);
5528             let r: v128 = v128_andnot(vec_a, vec_b);
5529             compare_bytes(r, vec_c);
5530             let r: v128 = v128_andnot(vec_a, vec_a);
5531             compare_bytes(r, vec_c);
5532             let r: v128 = v128_andnot(vec_a, vec_c);
5533             compare_bytes(r, vec_a);
5534             let r: v128 = v128_or(vec_a, vec_b);
5535             compare_bytes(r, vec_b);
5536             let r: v128 = v128_not(vec_b);
5537             compare_bytes(r, vec_c);
5538             let r: v128 = v128_xor(vec_a, vec_c);
5539             compare_bytes(r, vec_a);
5540 
5541             let r: v128 = v128_bitselect(vec_b, vec_c, vec_b);
5542             compare_bytes(r, vec_b);
5543             let r: v128 = v128_bitselect(vec_b, vec_c, vec_c);
5544             compare_bytes(r, vec_c);
5545             let r: v128 = v128_bitselect(vec_b, vec_c, vec_a);
5546             compare_bytes(r, vec_a);
5547         }
5548     }
5549 
5550     macro_rules! test_bool_red {
5551          ([$test_id:ident, $any:ident, $all:ident] | [$($true:expr),*] | [$($false:expr),*] | [$($alt:expr),*]) => {
5552              #[test]
5553              fn $test_id() {
5554                  unsafe {
5555                      let vec_a: v128 = transmute([$($true),*]); // true
5556                      let vec_b: v128 = transmute([$($false),*]); // false
5557                      let vec_c: v128 = transmute([$($alt),*]); // alternating
5558 
5559                      // TODO
5560                      // assert_eq!($any(vec_a), true);
5561                      // assert_eq!($any(vec_b), false);
5562                      // assert_eq!($any(vec_c), true);
5563 
5564                      assert_eq!($all(vec_a), true);
5565                      assert_eq!($all(vec_b), false);
5566                      assert_eq!($all(vec_c), false);
5567                  }
5568              }
5569          }
5570      }
5571 
5572     test_bool_red!(
5573         [i8x16_boolean_reductions, v128_any_true, i8x16_all_true]
5574             | [1_i8, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
5575             | [0_i8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
5576             | [1_i8, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0]
5577     );
5578     test_bool_red!(
5579         [i16x8_boolean_reductions, v128_any_true, i16x8_all_true]
5580             | [1_i16, 1, 1, 1, 1, 1, 1, 1]
5581             | [0_i16, 0, 0, 0, 0, 0, 0, 0]
5582             | [1_i16, 0, 1, 0, 1, 0, 1, 0]
5583     );
5584     test_bool_red!(
5585         [i32x4_boolean_reductions, v128_any_true, i32x4_all_true]
5586             | [1_i32, 1, 1, 1]
5587             | [0_i32, 0, 0, 0]
5588             | [1_i32, 0, 1, 0]
5589     );
5590     test_bool_red!(
5591         [i64x2_boolean_reductions, v128_any_true, i64x2_all_true]
5592             | [1_i64, 1]
5593             | [0_i64, 0]
5594             | [1_i64, 0]
5595     );
5596 
5597     test_bop!(i8x16[i8; 16] | i8x16_eq[i8x16_eq_test]:
5598               ([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15],
5599                [0, 2, 2, 4, 4, 6, 6, 7, 8, 10, 10, 12, 12, 14, 14, 15]) =>
5600               [-1, 0, -1, 0 ,-1, 0, -1, -1, -1, 0, -1, 0 ,-1, 0, -1, -1]);
5601     test_bop!(i16x8[i16; 8] | i16x8_eq[i16x8_eq_test]:
5602                ([0, 1, 2, 3, 4, 5, 6, 7], [0, 2, 2, 4, 4, 6, 6, 7]) =>
5603                [-1, 0, -1, 0 ,-1, 0, -1, -1]);
5604     test_bop!(i32x4[i32; 4] | i32x4_eq[i32x4_eq_test]:
5605                ([0, 1, 2, 3], [0, 2, 2, 4]) => [-1, 0, -1, 0]);
5606     test_bop!(i64x2[i64; 2] | i64x2_eq[i64x2_eq_test]:
5607                ([0, 1], [0, 2]) => [-1, 0]);
5608     test_bop!(f32x4[f32; 4] => i32 | f32x4_eq[f32x4_eq_test]:
5609                ([0., 1., 2., 3.], [0., 2., 2., 4.]) => [-1, 0, -1, 0]);
5610     test_bop!(f64x2[f64; 2] => i64 | f64x2_eq[f64x2_eq_test]: ([0., 1.], [0., 2.]) => [-1, 0]);
5611 
5612     test_bop!(i8x16[i8; 16] | i8x16_ne[i8x16_ne_test]:
5613                ([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15],
5614                 [0, 2, 2, 4, 4, 6, 6, 7, 8, 10, 10, 12, 12, 14, 14, 15]) =>
5615                [0, -1, 0, -1 ,0, -1, 0, 0, 0, -1, 0, -1 ,0, -1, 0, 0]);
5616     test_bop!(i16x8[i16; 8] | i16x8_ne[i16x8_ne_test]:
5617                ([0, 1, 2, 3, 4, 5, 6, 7], [0, 2, 2, 4, 4, 6, 6, 7]) =>
5618                [0, -1, 0, -1 ,0, -1, 0, 0]);
5619     test_bop!(i32x4[i32; 4] | i32x4_ne[i32x4_ne_test]:
5620                ([0, 1, 2, 3], [0, 2, 2, 4]) => [0, -1, 0, -1]);
5621     test_bop!(i64x2[i64; 2] | i64x2_ne[i64x2_ne_test]:
5622                ([0, 1], [0, 2]) => [0, -1]);
5623     test_bop!(f32x4[f32; 4] => i32 | f32x4_ne[f32x4_ne_test]:
5624                ([0., 1., 2., 3.], [0., 2., 2., 4.]) => [0, -1, 0, -1]);
5625     test_bop!(f64x2[f64; 2] => i64 | f64x2_ne[f64x2_ne_test]: ([0., 1.], [0., 2.]) => [0, -1]);
5626 
5627     test_bop!(i8x16[i8; 16] | i8x16_lt[i8x16_lt_s_test]:
5628                ([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, -12, 13, 14, 15],
5629                 [0, 2, 2, 4, 4, 6, 6, 7, 8, 10, 10, 12, 12, 14, 14, 15]) =>
5630                [0, -1, 0, -1 ,0, -1, 0, 0, 0, -1, 0, -1, -1, -1, 0, 0]);
5631     test_bop!(i8x16[i8; 16] | u8x16_lt[i8x16_lt_u_test]:
5632                ([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, -12, 13, 14, 15],
5633                 [0, 2, 2, 4, 4, 6, 6, 7, 8, 10, 10, 12, 12, 14, 14, 15]) =>
5634                [0, -1, 0, -1 ,0, -1, 0, 0, 0, -1, 0, -1 ,0, -1, 0, 0]);
5635     test_bop!(i16x8[i16; 8] | i16x8_lt[i16x8_lt_s_test]:
5636                ([0, 1, 2, 3, 4, 5, 6, -7], [0, 2, 2, 4, 4, 6, 6, 7]) =>
5637                [0, -1, 0, -1 ,0, -1, 0, -1]);
5638     test_bop!(i16x8[i16; 8] | u16x8_lt[i16x8_lt_u_test]:
5639                ([0, 1, 2, 3, 4, 5, 6, -7], [0, 2, 2, 4, 4, 6, 6, 7]) =>
5640                [0, -1, 0, -1 ,0, -1, 0, 0]);
5641     test_bop!(i32x4[i32; 4] | i32x4_lt[i32x4_lt_s_test]:
5642                ([-1, 1, 2, 3], [0, 2, 2, 4]) => [-1, -1, 0, -1]);
5643     test_bop!(i32x4[i32; 4] | u32x4_lt[i32x4_lt_u_test]:
5644                ([-1, 1, 2, 3], [0, 2, 2, 4]) => [0, -1, 0, -1]);
5645     test_bop!(i64x2[i64; 2] | i64x2_lt[i64x2_lt_s_test]:
5646                ([-1, 3], [0, 2]) => [-1, 0]);
5647     test_bop!(f32x4[f32; 4] => i32 | f32x4_lt[f32x4_lt_test]:
5648                ([0., 1., 2., 3.], [0., 2., 2., 4.]) => [0, -1, 0, -1]);
5649     test_bop!(f64x2[f64; 2] => i64 | f64x2_lt[f64x2_lt_test]: ([0., 1.], [0., 2.]) => [0, -1]);
5650 
5651     test_bop!(i8x16[i8; 16] | i8x16_gt[i8x16_gt_s_test]:
5652            ([0, 2, 2, 4, 4, 6, 6, 7, 8, 10, 10, 12, 12, 14, 14, -15],
5653             [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) =>
5654                [0, -1, 0, -1 ,0, -1, 0, 0, 0, -1, 0, -1 ,0, -1, 0, 0]);
5655     test_bop!(i8x16[i8; 16] | u8x16_gt[i8x16_gt_u_test]:
5656            ([0, 2, 2, 4, 4, 6, 6, 7, 8, 10, 10, 12, 12, 14, 14, -15],
5657             [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) =>
5658                [0, -1, 0, -1 ,0, -1, 0, 0, 0, -1, 0, -1 ,0, -1, 0, -1]);
5659     test_bop!(i16x8[i16; 8] | i16x8_gt[i16x8_gt_s_test]:
5660                ([0, 2, 2, 4, 4, 6, 6, -7], [0, 1, 2, 3, 4, 5, 6, 7]) =>
5661                [0, -1, 0, -1 ,0, -1, 0, 0]);
5662     test_bop!(i16x8[i16; 8] | u16x8_gt[i16x8_gt_u_test]:
5663                ([0, 2, 2, 4, 4, 6, 6, -7], [0, 1, 2, 3, 4, 5, 6, 7]) =>
5664                [0, -1, 0, -1 ,0, -1, 0, -1]);
5665     test_bop!(i32x4[i32; 4] | i32x4_gt[i32x4_gt_s_test]:
5666                ([0, 2, 2, -4], [0, 1, 2, 3]) => [0, -1, 0, 0]);
5667     test_bop!(i32x4[i32; 4] | u32x4_gt[i32x4_gt_u_test]:
5668                ([0, 2, 2, -4], [0, 1, 2, 3]) => [0, -1, 0, -1]);
5669     test_bop!(i64x2[i64; 2] | i64x2_gt[i64x2_gt_s_test]:
5670                ([-1, 2], [0, 1]) => [0, -1]);
5671     test_bop!(f32x4[f32; 4] => i32 | f32x4_gt[f32x4_gt_test]:
5672                ([0., 2., 2., 4.], [0., 1., 2., 3.]) => [0, -1, 0, -1]);
5673     test_bop!(f64x2[f64; 2] => i64 | f64x2_gt[f64x2_gt_test]: ([0., 2.], [0., 1.]) => [0, -1]);
5674 
5675     test_bop!(i8x16[i8; 16] | i8x16_ge[i8x16_ge_s_test]:
5676                ([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, -15],
5677                 [0, 2, 2, 4, 4, 6, 6, 7, 8, 10, 10, 12, 12, 14, 14, 15]) =>
5678                [-1, 0, -1, 0 ,-1, 0, -1, -1, -1, 0, -1, 0 ,-1, 0, -1, 0]);
5679     test_bop!(i8x16[i8; 16] | u8x16_ge[i8x16_ge_u_test]:
5680                ([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, -15],
5681                 [0, 2, 2, 4, 4, 6, 6, 7, 8, 10, 10, 12, 12, 14, 14, 15]) =>
5682                [-1, 0, -1, 0 ,-1, 0, -1, -1, -1, 0, -1, 0 ,-1, 0, -1, -1]);
5683     test_bop!(i16x8[i16; 8] | i16x8_ge[i16x8_ge_s_test]:
5684                ([0, 1, 2, 3, 4, 5, 6, -7], [0, 2, 2, 4, 4, 6, 6, 7]) =>
5685                [-1, 0, -1, 0 ,-1, 0, -1, 0]);
5686     test_bop!(i16x8[i16; 8] | u16x8_ge[i16x8_ge_u_test]:
5687                ([0, 1, 2, 3, 4, 5, 6, -7], [0, 2, 2, 4, 4, 6, 6, 7]) =>
5688                [-1, 0, -1, 0 ,-1, 0, -1, -1]);
5689     test_bop!(i32x4[i32; 4] | i32x4_ge[i32x4_ge_s_test]:
5690                ([0, 1, 2, -3], [0, 2, 2, 4]) => [-1, 0, -1, 0]);
5691     test_bop!(i32x4[i32; 4] | u32x4_ge[i32x4_ge_u_test]:
5692                ([0, 1, 2, -3], [0, 2, 2, 4]) => [-1, 0, -1, -1]);
5693     test_bop!(i64x2[i64; 2] | i64x2_ge[i64x2_ge_s_test]:
5694                ([0, 1], [-1, 2]) => [-1, 0]);
5695     test_bop!(f32x4[f32; 4] => i32 | f32x4_ge[f32x4_ge_test]:
5696                ([0., 1., 2., 3.], [0., 2., 2., 4.]) => [-1, 0, -1, 0]);
5697     test_bop!(f64x2[f64; 2] => i64 | f64x2_ge[f64x2_ge_test]: ([0., 1.], [0., 2.]) => [-1, 0]);
5698 
5699     test_bop!(i8x16[i8; 16] | i8x16_le[i8x16_le_s_test]:
5700                ([0, 2, 2, 4, 4, 6, 6, 7, 8, 10, 10, 12, 12, 14, 14, -15],
5701                 [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
5702                ) =>
5703                [-1, 0, -1, 0 ,-1, 0, -1, -1, -1, 0, -1, 0 ,-1, 0, -1, -1]);
5704     test_bop!(i8x16[i8; 16] | u8x16_le[i8x16_le_u_test]:
5705                ([0, 2, 2, 4, 4, 6, 6, 7, 8, 10, 10, 12, 12, 14, 14, -15],
5706                 [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
5707                ) =>
5708                [-1, 0, -1, 0 ,-1, 0, -1, -1, -1, 0, -1, 0 ,-1, 0, -1, 0]);
5709     test_bop!(i16x8[i16; 8] | i16x8_le[i16x8_le_s_test]:
5710                ([0, 2, 2, 4, 4, 6, 6, -7], [0, 1, 2, 3, 4, 5, 6, 7]) =>
5711                [-1, 0, -1, 0 ,-1, 0, -1, -1]);
5712     test_bop!(i16x8[i16; 8] | u16x8_le[i16x8_le_u_test]:
5713                ([0, 2, 2, 4, 4, 6, 6, -7], [0, 1, 2, 3, 4, 5, 6, 7]) =>
5714                [-1, 0, -1, 0 ,-1, 0, -1, 0]);
5715     test_bop!(i32x4[i32; 4] | i32x4_le[i32x4_le_s_test]:
5716                ([0, 2, 2, -4], [0, 1, 2, 3]) => [-1, 0, -1, -1]);
5717     test_bop!(i32x4[i32; 4] | u32x4_le[i32x4_le_u_test]:
5718                ([0, 2, 2, -4], [0, 1, 2, 3]) => [-1, 0, -1, 0]);
5719     test_bop!(i64x2[i64; 2] | i64x2_le[i64x2_le_s_test]:
5720                ([0, 2], [0, 1]) => [-1, 0]);
5721     test_bop!(f32x4[f32; 4] => i32 | f32x4_le[f32x4_le_test]:
5722                ([0., 2., 2., 4.], [0., 1., 2., 3.]) => [-1, 0, -1, -0]);
5723     test_bop!(f64x2[f64; 2] => i64 | f64x2_le[f64x2_le_test]: ([0., 2.], [0., 1.]) => [-1, 0]);
5724 
5725     test_uop!(f32x4[f32; 4] | f32x4_neg[f32x4_neg_test]: [0., 1., 2., 3.] => [ 0., -1., -2., -3.]);
5726     test_uop!(f32x4[f32; 4] | f32x4_abs[f32x4_abs_test]: [0., -1., 2., -3.] => [ 0., 1., 2., 3.]);
5727     test_bop!(f32x4[f32; 4] | f32x4_min[f32x4_min_test]:
5728               ([0., -1., 7., 8.], [1., -3., -4., 10.]) => [0., -3., -4., 8.]);
5729     test_bop!(f32x4[f32; 4] | f32x4_min[f32x4_min_test_nan]:
5730               ([0., -1., 7., 8.], [1., -3., -4., std::f32::NAN])
5731               => [0., -3., -4., std::f32::NAN]);
5732     test_bop!(f32x4[f32; 4] | f32x4_max[f32x4_max_test]:
5733               ([0., -1., 7., 8.], [1., -3., -4., 10.]) => [1., -1., 7., 10.]);
5734     test_bop!(f32x4[f32; 4] | f32x4_max[f32x4_max_test_nan]:
5735               ([0., -1., 7., 8.], [1., -3., -4., std::f32::NAN])
5736               => [1., -1., 7., std::f32::NAN]);
5737     test_bop!(f32x4[f32; 4] | f32x4_add[f32x4_add_test]:
5738               ([0., -1., 7., 8.], [1., -3., -4., 10.]) => [1., -4., 3., 18.]);
5739     test_bop!(f32x4[f32; 4] | f32x4_sub[f32x4_sub_test]:
5740               ([0., -1., 7., 8.], [1., -3., -4., 10.]) => [-1., 2., 11., -2.]);
5741     test_bop!(f32x4[f32; 4] | f32x4_mul[f32x4_mul_test]:
5742               ([0., -1., 7., 8.], [1., -3., -4., 10.]) => [0., 3., -28., 80.]);
5743     test_bop!(f32x4[f32; 4] | f32x4_div[f32x4_div_test]:
5744               ([0., -8., 70., 8.], [1., 4., 10., 2.]) => [0., -2., 7., 4.]);
5745 
5746     test_uop!(f64x2[f64; 2] | f64x2_neg[f64x2_neg_test]: [0., 1.] => [ 0., -1.]);
5747     test_uop!(f64x2[f64; 2] | f64x2_abs[f64x2_abs_test]: [0., -1.] => [ 0., 1.]);
5748     test_bop!(f64x2[f64; 2] | f64x2_min[f64x2_min_test]:
5749                ([0., -1.], [1., -3.]) => [0., -3.]);
5750     test_bop!(f64x2[f64; 2] | f64x2_min[f64x2_min_test_nan]:
5751                ([7., 8.], [-4., std::f64::NAN])
5752                => [ -4., std::f64::NAN]);
5753     test_bop!(f64x2[f64; 2] | f64x2_max[f64x2_max_test]:
5754                ([0., -1.], [1., -3.]) => [1., -1.]);
5755     test_bop!(f64x2[f64; 2] | f64x2_max[f64x2_max_test_nan]:
5756                ([7., 8.], [ -4., std::f64::NAN])
5757                => [7., std::f64::NAN]);
5758     test_bop!(f64x2[f64; 2] | f64x2_add[f64x2_add_test]:
5759                ([0., -1.], [1., -3.]) => [1., -4.]);
5760     test_bop!(f64x2[f64; 2] | f64x2_sub[f64x2_sub_test]:
5761                ([0., -1.], [1., -3.]) => [-1., 2.]);
5762     test_bop!(f64x2[f64; 2] | f64x2_mul[f64x2_mul_test]:
5763                ([0., -1.], [1., -3.]) => [0., 3.]);
5764     test_bop!(f64x2[f64; 2] | f64x2_div[f64x2_div_test]:
5765                ([0., -8.], [1., 4.]) => [0., -2.]);
5766 
5767     macro_rules! test_conv {
5768         ($test_id:ident | $conv_id:ident | $to_ty:ident | $from:expr,  $to:expr) => {
5769             #[test]
5770             fn $test_id() {
5771                 unsafe {
5772                     let from: v128 = transmute($from);
5773                     let to: v128 = transmute($to);
5774 
5775                     let r: v128 = $conv_id(from);
5776 
5777                     compare_bytes(r, to);
5778                 }
5779             }
5780         };
5781     }
5782 
5783     test_conv!(
5784         f32x4_convert_s_i32x4 | f32x4_convert_i32x4 | f32x4 | [1_i32, 2, 3, 4],
5785         [1_f32, 2., 3., 4.]
5786     );
5787     test_conv!(
5788         f32x4_convert_u_i32x4 | f32x4_convert_u32x4 | f32x4 | [u32::MAX, 2, 3, 4],
5789         [u32::MAX as f32, 2., 3., 4.]
5790     );
5791 
5792     #[test]
test_conversions()5793     fn test_conversions() {
5794         compare_bytes(
5795             i32x4_trunc_sat_f32x4(f32x4(1., f32::NEG_INFINITY, f32::INFINITY, f32::NAN)),
5796             i32x4(1, i32::MIN, i32::MAX, 0),
5797         );
5798         compare_bytes(
5799             u32x4_trunc_sat_f32x4(f32x4(1., f32::NEG_INFINITY, f32::INFINITY, f32::NAN)),
5800             u32x4(1, 0, u32::MAX, 0),
5801         );
5802         compare_bytes(f64x2_convert_low_i32x4(i32x4(1, 2, 3, 4)), f64x2(1., 2.));
5803         compare_bytes(
5804             f64x2_convert_low_i32x4(i32x4(i32::MIN, i32::MAX, 3, 4)),
5805             f64x2(f64::from(i32::MIN), f64::from(i32::MAX)),
5806         );
5807         compare_bytes(f64x2_convert_low_u32x4(u32x4(1, 2, 3, 4)), f64x2(1., 2.));
5808         compare_bytes(
5809             f64x2_convert_low_u32x4(u32x4(u32::MIN, u32::MAX, 3, 4)),
5810             f64x2(f64::from(u32::MIN), f64::from(u32::MAX)),
5811         );
5812 
5813         compare_bytes(
5814             i32x4_trunc_sat_f64x2_zero(f64x2(1., f64::NEG_INFINITY)),
5815             i32x4(1, i32::MIN, 0, 0),
5816         );
5817         compare_bytes(
5818             i32x4_trunc_sat_f64x2_zero(f64x2(f64::NAN, f64::INFINITY)),
5819             i32x4(0, i32::MAX, 0, 0),
5820         );
5821         compare_bytes(
5822             u32x4_trunc_sat_f64x2_zero(f64x2(1., f64::NEG_INFINITY)),
5823             u32x4(1, 0, 0, 0),
5824         );
5825         compare_bytes(
5826             u32x4_trunc_sat_f64x2_zero(f64x2(f64::NAN, f64::INFINITY)),
5827             u32x4(0, u32::MAX, 0, 0),
5828         );
5829     }
5830 
5831     #[test]
test_popcnt()5832     fn test_popcnt() {
5833         unsafe {
5834             for i in 0..=255 {
5835                 compare_bytes(
5836                     i8x16_popcnt(u8x16_splat(i)),
5837                     u8x16_splat(i.count_ones() as u8),
5838                 )
5839             }
5840 
5841             let vectors = [
5842                 [0u8, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15],
5843                 [
5844                     100, 200, 50, 0, 10, 7, 38, 185, 192, 3, 34, 85, 93, 7, 31, 99,
5845                 ],
5846             ];
5847 
5848             for vector in vectors.iter() {
5849                 compare_bytes(
5850                     i8x16_popcnt(transmute(*vector)),
5851                     i8x16(
5852                         vector[0].count_ones() as i8,
5853                         vector[1].count_ones() as i8,
5854                         vector[2].count_ones() as i8,
5855                         vector[3].count_ones() as i8,
5856                         vector[4].count_ones() as i8,
5857                         vector[5].count_ones() as i8,
5858                         vector[6].count_ones() as i8,
5859                         vector[7].count_ones() as i8,
5860                         vector[8].count_ones() as i8,
5861                         vector[9].count_ones() as i8,
5862                         vector[10].count_ones() as i8,
5863                         vector[11].count_ones() as i8,
5864                         vector[12].count_ones() as i8,
5865                         vector[13].count_ones() as i8,
5866                         vector[14].count_ones() as i8,
5867                         vector[15].count_ones() as i8,
5868                     ),
5869                 )
5870             }
5871         }
5872     }
5873 
5874     #[test]
test_promote_demote()5875     fn test_promote_demote() {
5876         let tests = [
5877             [1., 2.],
5878             [f64::NAN, f64::INFINITY],
5879             [100., 201.],
5880             [0., -0.],
5881             [f64::NEG_INFINITY, 0.],
5882         ];
5883 
5884         for [a, b] in tests {
5885             compare_bytes(
5886                 f32x4_demote_f64x2_zero(f64x2(a, b)),
5887                 f32x4(a as f32, b as f32, 0., 0.),
5888             );
5889             compare_bytes(
5890                 f64x2_promote_low_f32x4(f32x4(a as f32, b as f32, 0., 0.)),
5891                 f64x2(a, b),
5892             );
5893         }
5894     }
5895 
5896     #[test]
test_extmul()5897     fn test_extmul() {
5898         macro_rules! test {
5899             ($(
5900                 $ctor:ident {
5901                     from: $from:ident,
5902                     to: $to:ident,
5903                     low: $low:ident,
5904                     high: $high:ident,
5905                 } => {
5906                     $(([$($a:tt)*] * [$($b:tt)*]))*
5907                 }
5908             )*) => ($(
5909                 $(unsafe {
5910                     let a: [$from; 16 / mem::size_of::<$from>()] = [$($a)*];
5911                     let b: [$from; 16 / mem::size_of::<$from>()] = [$($b)*];
5912                     let low = mem::transmute::<_, [$to; 16 / mem::size_of::<$to>()]>($low($ctor($($a)*), $ctor($($b)*)));
5913                     let high = mem::transmute::<_, [$to; 16 / mem::size_of::<$to>()]>($high($ctor($($a)*), $ctor($($b)*)));
5914 
5915                     let half = a.len() / 2;
5916                     for i in 0..half {
5917                         assert_eq!(
5918                             (a[i] as $to).wrapping_mul((b[i] as $to)),
5919                             low[i],
5920                             "expected {} * {}", a[i] as $to, b[i] as $to,
5921                         );
5922                         assert_eq!(
5923                             (a[half + i] as $to).wrapping_mul((b[half + i] as $to)),
5924                             high[i],
5925                             "expected {} * {}", a[half + i] as $to, b[half + i] as $to,
5926                         );
5927                     }
5928                 })*
5929             )*)
5930         }
5931         test! {
5932             i8x16 {
5933                 from: i8,
5934                 to: i16,
5935                 low: i16x8_extmul_low_i8x16,
5936                 high: i16x8_extmul_high_i8x16,
5937             } => {
5938                 (
5939                     [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
5940                         *
5941                     [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
5942                 )
5943                 (
5944                     [-1, -2, 3, 100, 124, -38, 33, 87, 92, 108, 22, 8, -43, -128, 22, 0]
5945                         *
5946                     [-5, -2, 6, 10, 45, -4, 4, -2, 0, 88, 92, -102, -98, 83, 73, 54]
5947                 )
5948             }
5949             u8x16 {
5950                 from: u8,
5951                 to: u16,
5952                 low: u16x8_extmul_low_u8x16,
5953                 high: u16x8_extmul_high_u8x16,
5954             } => {
5955                 (
5956                     [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
5957                         *
5958                     [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
5959                 )
5960                 (
5961                     [1, 2, 3, 100, 124, 38, 33, 87, 92, 198, 22, 8, 43, 128, 22, 0]
5962                         *
5963                     [5, 200, 6, 10, 45, 248, 4, 2, 0, 2, 92, 102, 234, 83, 73, 54]
5964                 )
5965             }
5966             i16x8 {
5967                 from: i16,
5968                 to: i32,
5969                 low: i32x4_extmul_low_i16x8,
5970                 high: i32x4_extmul_high_i16x8,
5971             } => {
5972                 (
5973                     [0, 0, 0, 0, 0, 0, 0, 0]
5974                         *
5975                     [0, 0, 0, 0, 0, 0, 0, 0]
5976                 )
5977                 (
5978                     [-1, 0, i16::MAX, 19931, -2259, 64, 200, 87]
5979                         *
5980                     [1, 1, i16::MIN, 29391, 105, 2, 100, -2]
5981                 )
5982             }
5983             u16x8 {
5984                 from: u16,
5985                 to: u32,
5986                 low: u32x4_extmul_low_u16x8,
5987                 high: u32x4_extmul_high_u16x8,
5988             } => {
5989                 (
5990                     [0, 0, 0, 0, 0, 0, 0, 0]
5991                         *
5992                     [0, 0, 0, 0, 0, 0, 0, 0]
5993                 )
5994                 (
5995                     [1, 0, u16::MAX, 19931, 2259, 64, 200, 87]
5996                         *
5997                     [1, 1, 3, 29391, 105, 2, 100, 2]
5998                 )
5999             }
6000             i32x4 {
6001                 from: i32,
6002                 to: i64,
6003                 low: i64x2_extmul_low_i32x4,
6004                 high: i64x2_extmul_high_i32x4,
6005             } => {
6006                 (
6007                     [0, 0, 0, 0]
6008                         *
6009                     [0, 0, 0, 0]
6010                 )
6011                 (
6012                     [-1, 0, i32::MAX, 19931]
6013                         *
6014                     [1, 1, i32::MIN, 29391]
6015                 )
6016                 (
6017                     [i32::MAX, 3003183, 3 << 20, 0xffffff]
6018                         *
6019                     [i32::MAX, i32::MIN, -40042, 300]
6020                 )
6021             }
6022             u32x4 {
6023                 from: u32,
6024                 to: u64,
6025                 low: u64x2_extmul_low_u32x4,
6026                 high: u64x2_extmul_high_u32x4,
6027             } => {
6028                 (
6029                     [0, 0, 0, 0]
6030                         *
6031                     [0, 0, 0, 0]
6032                 )
6033                 (
6034                     [1, 0, u32::MAX, 19931]
6035                         *
6036                     [1, 1, 3, 29391]
6037                 )
6038                 (
6039                     [u32::MAX, 3003183, 3 << 20, 0xffffff]
6040                         *
6041                     [u32::MAX, 3000, 40042, 300]
6042                 )
6043             }
6044         }
6045     }
6046 
6047     #[test]
test_q15mulr_sat_s()6048     fn test_q15mulr_sat_s() {
6049         fn test(a: [i16; 8], b: [i16; 8]) {
6050             let a_v = i16x8(a[0], a[1], a[2], a[3], a[4], a[5], a[6], a[7]);
6051             let b_v = i16x8(b[0], b[1], b[2], b[3], b[4], b[5], b[6], b[7]);
6052             let result = i16x8_q15mulr_sat(a_v, b_v);
6053             let result = unsafe { mem::transmute::<v128, [i16; 8]>(result) };
6054 
6055             for (i, (a, b)) in a.iter().zip(&b).enumerate() {
6056                 assert_eq!(
6057                     result[i],
6058                     (((*a as i32) * (*b as i32) + 0x4000) >> 15) as i16
6059                 );
6060             }
6061         }
6062 
6063         test([0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0]);
6064         test([1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1]);
6065         test(
6066             [-1, 100, 2003, -29494, 12, 128, 994, 1],
6067             [-4049, 8494, -10483, 0, 5, 2222, 883, -9],
6068         );
6069     }
6070 
6071     #[test]
test_extadd()6072     fn test_extadd() {
6073         macro_rules! test {
6074             ($(
6075                 $func:ident {
6076                     from: $from:ident,
6077                     to: $to:ident,
6078                 } => {
6079                     $([$($a:tt)*])*
6080                 }
6081             )*) => ($(
6082                 $(unsafe {
6083                     let a: [$from; 16 / mem::size_of::<$from>()] = [$($a)*];
6084                     let a_v = mem::transmute::<_, v128>(a);
6085                     let r = mem::transmute::<v128, [$to; 16 / mem::size_of::<$to>()]>($func(a_v));
6086 
6087                     let half = a.len() / 2;
6088                     for i in 0..half {
6089                         assert_eq!(
6090                             (a[2 * i] as $to).wrapping_add((a[2 * i + 1] as $to)),
6091                             r[i],
6092                             "failed {} + {} != {}",
6093                             a[2 * i] as $to,
6094                             a[2 * i + 1] as $to,
6095                             r[i],
6096                         );
6097                     }
6098                 })*
6099             )*)
6100         }
6101         test! {
6102             i16x8_extadd_pairwise_i8x16 {
6103                 from: i8,
6104                 to: i16,
6105             } => {
6106                 [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
6107                 [-1, -2, 3, 100, 124, -38, 33, 87, 92, 108, 22, 8, -43, -128, 22, 0]
6108                 [-5, -2, 6, 10, 45, -4, 4, -2, 0, 88, 92, -102, -98, 83, 73, 54]
6109             }
6110             i16x8_extadd_pairwise_u8x16 {
6111                 from: u8,
6112                 to: i16,
6113             } => {
6114                 [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
6115                 [1, 2, 3, 100, 124, 38, 33, 87, 92, 198, 22, 8, 43, 128, 22, 0]
6116                 [5, 200, 6, 10, 45, 248, 4, 2, 0, 2, 92, 102, 234, 83, 73, 54]
6117             }
6118             i32x4_extadd_pairwise_i16x8 {
6119                 from: i16,
6120                 to: i32,
6121             } => {
6122                 [0, 0, 0, 0, 0, 0, 0, 0]
6123                 [-1, 0, i16::MAX, 19931, -2259, 64, 200, 87]
6124                 [1, 1, i16::MIN, 29391, 105, 2, 100, -2]
6125             }
6126             i32x4_extadd_pairwise_u16x8 {
6127                 from: u16,
6128                 to: i32,
6129             } => {
6130                 [0, 0, 0, 0, 0, 0, 0, 0]
6131                 [1, 0, u16::MAX, 19931, 2259, 64, 200, 87]
6132                 [1, 1, 3, 29391, 105, 2, 100, 2]
6133             }
6134         }
6135     }
6136 }
6137