1 /*
2  * Copyright (c) 2016, Alliance for Open Media. All rights reserved
3  *
4  * This source code is subject to the terms of the BSD 2 Clause License and
5  * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
6  * was not distributed with this source code in the LICENSE file, you can
7  * obtain it at www.aomedia.org/license/software. If the Alliance for Open
8  * Media Patent License 1.0 was not distributed with this source code in the
9  * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
10  */
11 
12 #ifndef _V128_INTRINSICS_H
13 #define _V128_INTRINSICS_H
14 
15 #include <arm_neon.h>
16 #include "./v64_intrinsics_arm.h"
17 
18 typedef int64x2_t v128;
19 
v128_low_u32(v128 a)20 SIMD_INLINE uint32_t v128_low_u32(v128 a) {
21   return v64_low_u32(vget_low_s64(a));
22 }
23 
v128_low_v64(v128 a)24 SIMD_INLINE v64 v128_low_v64(v128 a) { return vget_low_s64(a); }
25 
v128_high_v64(v128 a)26 SIMD_INLINE v64 v128_high_v64(v128 a) { return vget_high_s64(a); }
27 
v128_from_v64(v64 a,v64 b)28 SIMD_INLINE v128 v128_from_v64(v64 a, v64 b) { return vcombine_s64(b, a); }
29 
v128_from_64(uint64_t a,uint64_t b)30 SIMD_INLINE v128 v128_from_64(uint64_t a, uint64_t b) {
31   return vcombine_s64((uint64x1_t)b, (uint64x1_t)a);
32 }
33 
v128_from_32(uint32_t a,uint32_t b,uint32_t c,uint32_t d)34 SIMD_INLINE v128 v128_from_32(uint32_t a, uint32_t b, uint32_t c, uint32_t d) {
35   return vcombine_s64(v64_from_32(c, d), v64_from_32(a, b));
36 }
37 
v128_load_aligned(const void * p)38 SIMD_INLINE v128 v128_load_aligned(const void *p) {
39   return vreinterpretq_s64_u8(vld1q_u8((const uint8_t *)p));
40 }
41 
v128_load_unaligned(const void * p)42 SIMD_INLINE v128 v128_load_unaligned(const void *p) {
43   return v128_load_aligned(p);
44 }
45 
v128_store_aligned(void * p,v128 r)46 SIMD_INLINE void v128_store_aligned(void *p, v128 r) {
47   vst1q_u8((uint8_t *)p, vreinterpretq_u8_s64(r));
48 }
49 
v128_store_unaligned(void * p,v128 r)50 SIMD_INLINE void v128_store_unaligned(void *p, v128 r) {
51   vst1q_u8((uint8_t *)p, vreinterpretq_u8_s64(r));
52 }
53 
v128_align(v128 a,v128 b,unsigned int c)54 SIMD_INLINE v128 v128_align(v128 a, v128 b, unsigned int c) {
55 // The following functions require an immediate.
56 // Some compilers will check this during optimisation, others wont.
57 #if defined(__OPTIMIZE__) && __OPTIMIZE__ && !defined(__clang__)
58   return c ? vreinterpretq_s64_s8(
59                  vextq_s8(vreinterpretq_s8_s64(b), vreinterpretq_s8_s64(a), c))
60            : b;
61 #else
62   return c < 8 ? v128_from_v64(v64_align(v128_low_v64(a), v128_high_v64(b), c),
63                                v64_align(v128_high_v64(b), v128_low_v64(b), c))
64                : v128_from_v64(
65                      v64_align(v128_high_v64(a), v128_low_v64(a), c - 8),
66                      v64_align(v128_low_v64(a), v128_high_v64(b), c - 8));
67 #endif
68 }
69 
v128_zero()70 SIMD_INLINE v128 v128_zero() { return vreinterpretq_s64_u8(vdupq_n_u8(0)); }
71 
v128_ones()72 SIMD_INLINE v128 v128_ones() { return vreinterpretq_s64_u8(vdupq_n_u8(-1)); }
73 
v128_dup_8(uint8_t x)74 SIMD_INLINE v128 v128_dup_8(uint8_t x) {
75   return vreinterpretq_s64_u8(vdupq_n_u8(x));
76 }
77 
v128_dup_16(uint16_t x)78 SIMD_INLINE v128 v128_dup_16(uint16_t x) {
79   return vreinterpretq_s64_u16(vdupq_n_u16(x));
80 }
81 
v128_dup_32(uint32_t x)82 SIMD_INLINE v128 v128_dup_32(uint32_t x) {
83   return vreinterpretq_s64_u32(vdupq_n_u32(x));
84 }
85 
v128_dotp_s16(v128 a,v128 b)86 SIMD_INLINE int64_t v128_dotp_s16(v128 a, v128 b) {
87   return v64_dotp_s16(vget_high_s64(a), vget_high_s64(b)) +
88          v64_dotp_s16(vget_low_s64(a), vget_low_s64(b));
89 }
90 
v128_hadd_u8(v128 x)91 SIMD_INLINE uint64_t v128_hadd_u8(v128 x) {
92   uint64x2_t t = vpaddlq_u32(vpaddlq_u16(vpaddlq_u8(vreinterpretq_u8_s64(x))));
93   return vget_lane_s32(
94       vreinterpret_s32_u64(vadd_u64(vget_high_u64(t), vget_low_u64(t))), 0);
95 }
96 
v128_padd_s16(v128 a)97 SIMD_INLINE v128 v128_padd_s16(v128 a) {
98   return vreinterpretq_s64_s32(vpaddlq_s16(vreinterpretq_s16_s64(a)));
99 }
100 
101 typedef struct { sad64_internal hi, lo; } sad128_internal;
102 
v128_sad_u8_init()103 SIMD_INLINE sad128_internal v128_sad_u8_init() {
104   sad128_internal s;
105   s.hi = s.lo = vdupq_n_u16(0);
106   return s;
107 }
108 
109 /* Implementation dependent return value.  Result must be finalised with
110    v128_sad_u8_sum().
111    The result for more than 32 v128_sad_u8() calls is undefined. */
v128_sad_u8(sad128_internal s,v128 a,v128 b)112 SIMD_INLINE sad128_internal v128_sad_u8(sad128_internal s, v128 a, v128 b) {
113   sad128_internal r;
114   r.hi = v64_sad_u8(s.hi, vget_high_s64(a), vget_high_s64(b));
115   r.lo = v64_sad_u8(s.lo, vget_low_s64(a), vget_low_s64(b));
116   return r;
117 }
118 
v128_sad_u8_sum(sad128_internal s)119 SIMD_INLINE uint32_t v128_sad_u8_sum(sad128_internal s) {
120   return (uint32_t)(v64_sad_u8_sum(s.hi) + v64_sad_u8_sum(s.lo));
121 }
122 
123 typedef struct { ssd64_internal hi, lo; } ssd128_internal;
124 
v128_ssd_u8_init()125 SIMD_INLINE ssd128_internal v128_ssd_u8_init() {
126   ssd128_internal s;
127   s.hi = s.lo = (ssd64_internal)(uint64_t)0;
128   return s;
129 }
130 
131 /* Implementation dependent return value.  Result must be finalised with
132  * v128_ssd_u8_sum(). */
v128_ssd_u8(ssd128_internal s,v128 a,v128 b)133 SIMD_INLINE ssd128_internal v128_ssd_u8(ssd128_internal s, v128 a, v128 b) {
134   ssd128_internal r;
135   r.hi = v64_ssd_u8(s.hi, vget_high_s64(a), vget_high_s64(b));
136   r.lo = v64_ssd_u8(s.lo, vget_low_s64(a), vget_low_s64(b));
137   return r;
138 }
139 
v128_ssd_u8_sum(ssd128_internal s)140 SIMD_INLINE uint32_t v128_ssd_u8_sum(ssd128_internal s) {
141   return (uint32_t)(v64_ssd_u8_sum(s.hi) + v64_ssd_u8_sum(s.lo));
142 }
143 
v128_or(v128 x,v128 y)144 SIMD_INLINE v128 v128_or(v128 x, v128 y) { return vorrq_s64(x, y); }
145 
v128_xor(v128 x,v128 y)146 SIMD_INLINE v128 v128_xor(v128 x, v128 y) { return veorq_s64(x, y); }
147 
v128_and(v128 x,v128 y)148 SIMD_INLINE v128 v128_and(v128 x, v128 y) { return vandq_s64(x, y); }
149 
v128_andn(v128 x,v128 y)150 SIMD_INLINE v128 v128_andn(v128 x, v128 y) { return vbicq_s64(x, y); }
151 
v128_add_8(v128 x,v128 y)152 SIMD_INLINE v128 v128_add_8(v128 x, v128 y) {
153   return vreinterpretq_s64_u8(
154       vaddq_u8(vreinterpretq_u8_s64(x), vreinterpretq_u8_s64(y)));
155 }
156 
v128_add_16(v128 x,v128 y)157 SIMD_INLINE v128 v128_add_16(v128 x, v128 y) {
158   return vreinterpretq_s64_s16(
159       vaddq_s16(vreinterpretq_s16_s64(x), vreinterpretq_s16_s64(y)));
160 }
161 
v128_sadd_s16(v128 x,v128 y)162 SIMD_INLINE v128 v128_sadd_s16(v128 x, v128 y) {
163   return vreinterpretq_s64_s16(
164       vqaddq_s16(vreinterpretq_s16_s64(x), vreinterpretq_s16_s64(y)));
165 }
166 
v128_add_32(v128 x,v128 y)167 SIMD_INLINE v128 v128_add_32(v128 x, v128 y) {
168   return vreinterpretq_s64_u32(
169       vaddq_u32(vreinterpretq_u32_s64(x), vreinterpretq_u32_s64(y)));
170 }
171 
v128_sub_8(v128 x,v128 y)172 SIMD_INLINE v128 v128_sub_8(v128 x, v128 y) {
173   return vreinterpretq_s64_u8(
174       vsubq_u8(vreinterpretq_u8_s64(x), vreinterpretq_u8_s64(y)));
175 }
176 
v128_sub_16(v128 x,v128 y)177 SIMD_INLINE v128 v128_sub_16(v128 x, v128 y) {
178   return vreinterpretq_s64_s16(
179       vsubq_s16(vreinterpretq_s16_s64(x), vreinterpretq_s16_s64(y)));
180 }
181 
v128_ssub_s16(v128 x,v128 y)182 SIMD_INLINE v128 v128_ssub_s16(v128 x, v128 y) {
183   return vreinterpretq_s64_s16(
184       vqsubq_s16(vreinterpretq_s16_s64(x), vreinterpretq_s16_s64(y)));
185 }
186 
v128_ssub_u16(v128 x,v128 y)187 SIMD_INLINE v128 v128_ssub_u16(v128 x, v128 y) {
188   return vreinterpretq_s64_u16(
189       vqsubq_u16(vreinterpretq_u16_s64(x), vreinterpretq_u16_s64(y)));
190 }
191 
v128_ssub_u8(v128 x,v128 y)192 SIMD_INLINE v128 v128_ssub_u8(v128 x, v128 y) {
193   return vreinterpretq_s64_u8(
194       vqsubq_u8(vreinterpretq_u8_s64(x), vreinterpretq_u8_s64(y)));
195 }
196 
v128_ssub_s8(v128 x,v128 y)197 SIMD_INLINE v128 v128_ssub_s8(v128 x, v128 y) {
198   return vreinterpretq_s64_s8(
199       vqsubq_s8(vreinterpretq_s8_s64(x), vreinterpretq_s8_s64(y)));
200 }
201 
v128_sub_32(v128 x,v128 y)202 SIMD_INLINE v128 v128_sub_32(v128 x, v128 y) {
203   return vreinterpretq_s64_s32(
204       vsubq_s32(vreinterpretq_s32_s64(x), vreinterpretq_s32_s64(y)));
205 }
206 
v128_abs_s16(v128 x)207 SIMD_INLINE v128 v128_abs_s16(v128 x) {
208   return vreinterpretq_s64_s16(vabsq_s16(vreinterpretq_s16_s64(x)));
209 }
210 
v128_abs_s8(v128 x)211 SIMD_INLINE v128 v128_abs_s8(v128 x) {
212   return vreinterpretq_s64_s8(vabsq_s8(vreinterpretq_s8_s64(x)));
213 }
214 
v128_mul_s16(v64 a,v64 b)215 SIMD_INLINE v128 v128_mul_s16(v64 a, v64 b) {
216   return vreinterpretq_s64_s32(
217       vmull_s16(vreinterpret_s16_s64(a), vreinterpret_s16_s64(b)));
218 }
219 
v128_mullo_s16(v128 a,v128 b)220 SIMD_INLINE v128 v128_mullo_s16(v128 a, v128 b) {
221   return vreinterpretq_s64_s16(
222       vmulq_s16(vreinterpretq_s16_s64(a), vreinterpretq_s16_s64(b)));
223 }
224 
v128_mulhi_s16(v128 a,v128 b)225 SIMD_INLINE v128 v128_mulhi_s16(v128 a, v128 b) {
226   return v128_from_v64(v64_mulhi_s16(vget_high_s64(a), vget_high_s64(b)),
227                        v64_mulhi_s16(vget_low_s64(a), vget_low_s64(b)));
228 }
229 
v128_mullo_s32(v128 a,v128 b)230 SIMD_INLINE v128 v128_mullo_s32(v128 a, v128 b) {
231   return vreinterpretq_s64_s32(
232       vmulq_s32(vreinterpretq_s32_s64(a), vreinterpretq_s32_s64(b)));
233 }
234 
v128_madd_s16(v128 a,v128 b)235 SIMD_INLINE v128 v128_madd_s16(v128 a, v128 b) {
236   return v128_from_v64(v64_madd_s16(vget_high_s64(a), vget_high_s64(b)),
237                        v64_madd_s16(vget_low_s64(a), vget_low_s64(b)));
238 }
239 
v128_madd_us8(v128 a,v128 b)240 SIMD_INLINE v128 v128_madd_us8(v128 a, v128 b) {
241   return v128_from_v64(v64_madd_us8(vget_high_s64(a), vget_high_s64(b)),
242                        v64_madd_us8(vget_low_s64(a), vget_low_s64(b)));
243 }
244 
v128_avg_u8(v128 x,v128 y)245 SIMD_INLINE v128 v128_avg_u8(v128 x, v128 y) {
246   return vreinterpretq_s64_u8(
247       vrhaddq_u8(vreinterpretq_u8_s64(x), vreinterpretq_u8_s64(y)));
248 }
249 
v128_rdavg_u8(v128 x,v128 y)250 SIMD_INLINE v128 v128_rdavg_u8(v128 x, v128 y) {
251   return vreinterpretq_s64_u8(
252       vhaddq_u8(vreinterpretq_u8_s64(x), vreinterpretq_u8_s64(y)));
253 }
254 
v128_avg_u16(v128 x,v128 y)255 SIMD_INLINE v128 v128_avg_u16(v128 x, v128 y) {
256   return vreinterpretq_s64_u16(
257       vrhaddq_u16(vreinterpretq_u16_s64(x), vreinterpretq_u16_s64(y)));
258 }
259 
v128_min_u8(v128 x,v128 y)260 SIMD_INLINE v128 v128_min_u8(v128 x, v128 y) {
261   return vreinterpretq_s64_u8(
262       vminq_u8(vreinterpretq_u8_s64(x), vreinterpretq_u8_s64(y)));
263 }
264 
v128_max_u8(v128 x,v128 y)265 SIMD_INLINE v128 v128_max_u8(v128 x, v128 y) {
266   return vreinterpretq_s64_u8(
267       vmaxq_u8(vreinterpretq_u8_s64(x), vreinterpretq_u8_s64(y)));
268 }
269 
v128_min_s8(v128 x,v128 y)270 SIMD_INLINE v128 v128_min_s8(v128 x, v128 y) {
271   return vreinterpretq_s64_s8(
272       vminq_s8(vreinterpretq_s8_s64(x), vreinterpretq_s8_s64(y)));
273 }
274 
v128_max_s8(v128 x,v128 y)275 SIMD_INLINE v128 v128_max_s8(v128 x, v128 y) {
276   return vreinterpretq_s64_s8(
277       vmaxq_s8(vreinterpretq_s8_s64(x), vreinterpretq_s8_s64(y)));
278 }
279 
v128_min_s16(v128 x,v128 y)280 SIMD_INLINE v128 v128_min_s16(v128 x, v128 y) {
281   return vreinterpretq_s64_s16(
282       vminq_s16(vreinterpretq_s16_s64(x), vreinterpretq_s16_s64(y)));
283 }
284 
v128_max_s16(v128 x,v128 y)285 SIMD_INLINE v128 v128_max_s16(v128 x, v128 y) {
286   return vreinterpretq_s64_s16(
287       vmaxq_s16(vreinterpretq_s16_s64(x), vreinterpretq_s16_s64(y)));
288 }
289 
v128_ziplo_8(v128 x,v128 y)290 SIMD_INLINE v128 v128_ziplo_8(v128 x, v128 y) {
291   uint8x16x2_t r = vzipq_u8(vreinterpretq_u8_s64(y), vreinterpretq_u8_s64(x));
292   return vreinterpretq_s64_u8(r.val[0]);
293 }
294 
v128_ziphi_8(v128 x,v128 y)295 SIMD_INLINE v128 v128_ziphi_8(v128 x, v128 y) {
296   uint8x16x2_t r = vzipq_u8(vreinterpretq_u8_s64(y), vreinterpretq_u8_s64(x));
297   return vreinterpretq_s64_u8(r.val[1]);
298 }
299 
v128_zip_8(v64 x,v64 y)300 SIMD_INLINE v128 v128_zip_8(v64 x, v64 y) {
301   uint8x8x2_t r = vzip_u8(vreinterpret_u8_s64(y), vreinterpret_u8_s64(x));
302   return vreinterpretq_s64_u8(vcombine_u8(r.val[0], r.val[1]));
303 }
304 
v128_ziplo_16(v128 x,v128 y)305 SIMD_INLINE v128 v128_ziplo_16(v128 x, v128 y) {
306   int16x8x2_t r = vzipq_s16(vreinterpretq_s16_s64(y), vreinterpretq_s16_s64(x));
307   return vreinterpretq_s64_s16(r.val[0]);
308 }
309 
v128_ziphi_16(v128 x,v128 y)310 SIMD_INLINE v128 v128_ziphi_16(v128 x, v128 y) {
311   int16x8x2_t r = vzipq_s16(vreinterpretq_s16_s64(y), vreinterpretq_s16_s64(x));
312   return vreinterpretq_s64_s16(r.val[1]);
313 }
314 
v128_zip_16(v64 x,v64 y)315 SIMD_INLINE v128 v128_zip_16(v64 x, v64 y) {
316   uint16x4x2_t r = vzip_u16(vreinterpret_u16_s64(y), vreinterpret_u16_s64(x));
317   return vreinterpretq_s64_u16(vcombine_u16(r.val[0], r.val[1]));
318 }
319 
v128_ziplo_32(v128 x,v128 y)320 SIMD_INLINE v128 v128_ziplo_32(v128 x, v128 y) {
321   int32x4x2_t r = vzipq_s32(vreinterpretq_s32_s64(y), vreinterpretq_s32_s64(x));
322   return vreinterpretq_s64_s32(r.val[0]);
323 }
324 
v128_ziphi_32(v128 x,v128 y)325 SIMD_INLINE v128 v128_ziphi_32(v128 x, v128 y) {
326   int32x4x2_t r = vzipq_s32(vreinterpretq_s32_s64(y), vreinterpretq_s32_s64(x));
327   return vreinterpretq_s64_s32(r.val[1]);
328 }
329 
v128_zip_32(v64 x,v64 y)330 SIMD_INLINE v128 v128_zip_32(v64 x, v64 y) {
331   uint32x2x2_t r = vzip_u32(vreinterpret_u32_s64(y), vreinterpret_u32_s64(x));
332   return vreinterpretq_s64_u32(vcombine_u32(r.val[0], r.val[1]));
333 }
334 
v128_ziplo_64(v128 a,v128 b)335 SIMD_INLINE v128 v128_ziplo_64(v128 a, v128 b) {
336   return v128_from_v64(vget_low_u64((uint64x2_t)a),
337                        vget_low_u64((uint64x2_t)b));
338 }
339 
v128_ziphi_64(v128 a,v128 b)340 SIMD_INLINE v128 v128_ziphi_64(v128 a, v128 b) {
341   return v128_from_v64(vget_high_u64((uint64x2_t)a),
342                        vget_high_u64((uint64x2_t)b));
343 }
344 
v128_unziplo_8(v128 x,v128 y)345 SIMD_INLINE v128 v128_unziplo_8(v128 x, v128 y) {
346   uint8x16x2_t r = vuzpq_u8(vreinterpretq_u8_s64(y), vreinterpretq_u8_s64(x));
347   return vreinterpretq_s64_u8(r.val[0]);
348 }
349 
v128_unziphi_8(v128 x,v128 y)350 SIMD_INLINE v128 v128_unziphi_8(v128 x, v128 y) {
351   uint8x16x2_t r = vuzpq_u8(vreinterpretq_u8_s64(y), vreinterpretq_u8_s64(x));
352   return vreinterpretq_s64_u8(r.val[1]);
353 }
354 
v128_unziplo_16(v128 x,v128 y)355 SIMD_INLINE v128 v128_unziplo_16(v128 x, v128 y) {
356   uint16x8x2_t r =
357       vuzpq_u16(vreinterpretq_u16_s64(y), vreinterpretq_u16_s64(x));
358   return vreinterpretq_s64_u16(r.val[0]);
359 }
360 
v128_unziphi_16(v128 x,v128 y)361 SIMD_INLINE v128 v128_unziphi_16(v128 x, v128 y) {
362   uint16x8x2_t r =
363       vuzpq_u16(vreinterpretq_u16_s64(y), vreinterpretq_u16_s64(x));
364   return vreinterpretq_s64_u16(r.val[1]);
365 }
366 
v128_unziplo_32(v128 x,v128 y)367 SIMD_INLINE v128 v128_unziplo_32(v128 x, v128 y) {
368   uint32x4x2_t r =
369       vuzpq_u32(vreinterpretq_u32_s64(y), vreinterpretq_u32_s64(x));
370   return vreinterpretq_s64_u32(r.val[0]);
371 }
372 
v128_unziphi_32(v128 x,v128 y)373 SIMD_INLINE v128 v128_unziphi_32(v128 x, v128 y) {
374   uint32x4x2_t r =
375       vuzpq_u32(vreinterpretq_u32_s64(y), vreinterpretq_u32_s64(x));
376   return vreinterpretq_s64_u32(r.val[1]);
377 }
378 
v128_unpack_u8_s16(v64 a)379 SIMD_INLINE v128 v128_unpack_u8_s16(v64 a) {
380   return vreinterpretq_s64_u16(vmovl_u8(vreinterpret_u8_s64(a)));
381 }
382 
v128_unpacklo_u8_s16(v128 a)383 SIMD_INLINE v128 v128_unpacklo_u8_s16(v128 a) {
384   return vreinterpretq_s64_u16(vmovl_u8(vreinterpret_u8_s64(vget_low_s64(a))));
385 }
386 
v128_unpackhi_u8_s16(v128 a)387 SIMD_INLINE v128 v128_unpackhi_u8_s16(v128 a) {
388   return vreinterpretq_s64_u16(vmovl_u8(vreinterpret_u8_s64(vget_high_s64(a))));
389 }
390 
v128_unpack_s8_s16(v64 a)391 SIMD_INLINE v128 v128_unpack_s8_s16(v64 a) {
392   return vreinterpretq_s64_s16(vmovl_s8(vreinterpret_s8_s64(a)));
393 }
394 
v128_unpacklo_s8_s16(v128 a)395 SIMD_INLINE v128 v128_unpacklo_s8_s16(v128 a) {
396   return vreinterpretq_s64_s16(vmovl_s8(vreinterpret_s8_s64(vget_low_s64(a))));
397 }
398 
v128_unpackhi_s8_s16(v128 a)399 SIMD_INLINE v128 v128_unpackhi_s8_s16(v128 a) {
400   return vreinterpretq_s64_s16(vmovl_s8(vreinterpret_s8_s64(vget_high_s64(a))));
401 }
402 
v128_pack_s32_s16(v128 a,v128 b)403 SIMD_INLINE v128 v128_pack_s32_s16(v128 a, v128 b) {
404   return v128_from_v64(
405       vreinterpret_s64_s16(vqmovn_s32(vreinterpretq_s32_s64(a))),
406       vreinterpret_s64_s16(vqmovn_s32(vreinterpretq_s32_s64(b))));
407 }
408 
v128_pack_s16_u8(v128 a,v128 b)409 SIMD_INLINE v128 v128_pack_s16_u8(v128 a, v128 b) {
410   return v128_from_v64(
411       vreinterpret_s64_u8(vqmovun_s16(vreinterpretq_s16_s64(a))),
412       vreinterpret_s64_u8(vqmovun_s16(vreinterpretq_s16_s64(b))));
413 }
414 
v128_pack_s16_s8(v128 a,v128 b)415 SIMD_INLINE v128 v128_pack_s16_s8(v128 a, v128 b) {
416   return v128_from_v64(
417       vreinterpret_s64_s8(vqmovn_s16(vreinterpretq_s16_s64(a))),
418       vreinterpret_s64_s8(vqmovn_s16(vreinterpretq_s16_s64(b))));
419 }
420 
v128_unpack_u16_s32(v64 a)421 SIMD_INLINE v128 v128_unpack_u16_s32(v64 a) {
422   return vreinterpretq_s64_u32(vmovl_u16(vreinterpret_u16_s64(a)));
423 }
424 
v128_unpack_s16_s32(v64 a)425 SIMD_INLINE v128 v128_unpack_s16_s32(v64 a) {
426   return vreinterpretq_s64_s32(vmovl_s16(vreinterpret_s16_s64(a)));
427 }
428 
v128_unpacklo_u16_s32(v128 a)429 SIMD_INLINE v128 v128_unpacklo_u16_s32(v128 a) {
430   return vreinterpretq_s64_u32(
431       vmovl_u16(vreinterpret_u16_s64(vget_low_s64(a))));
432 }
433 
v128_unpacklo_s16_s32(v128 a)434 SIMD_INLINE v128 v128_unpacklo_s16_s32(v128 a) {
435   return vreinterpretq_s64_s32(
436       vmovl_s16(vreinterpret_s16_s64(vget_low_s64(a))));
437 }
438 
v128_unpackhi_u16_s32(v128 a)439 SIMD_INLINE v128 v128_unpackhi_u16_s32(v128 a) {
440   return vreinterpretq_s64_u32(
441       vmovl_u16(vreinterpret_u16_s64(vget_high_s64(a))));
442 }
443 
v128_unpackhi_s16_s32(v128 a)444 SIMD_INLINE v128 v128_unpackhi_s16_s32(v128 a) {
445   return vreinterpretq_s64_s32(
446       vmovl_s16(vreinterpret_s16_s64(vget_high_s64(a))));
447 }
448 
v128_shuffle_8(v128 x,v128 pattern)449 SIMD_INLINE v128 v128_shuffle_8(v128 x, v128 pattern) {
450   return v128_from_64(
451       (uint64_t)vreinterpret_s64_u8(
452           vtbl2_u8((uint8x8x2_t){ { vget_low_u8(vreinterpretq_u8_s64(x)),
453                                     vget_high_u8(vreinterpretq_u8_s64(x)) } },
454                    vreinterpret_u8_s64(vget_high_s64(pattern)))),
455       (uint64_t)vreinterpret_s64_u8(
456           vtbl2_u8((uint8x8x2_t){ { vget_low_u8(vreinterpretq_u8_s64(x)),
457                                     vget_high_u8(vreinterpretq_u8_s64(x)) } },
458                    vreinterpret_u8_s64(vget_low_s64(pattern)))));
459 }
460 
v128_cmpgt_s8(v128 x,v128 y)461 SIMD_INLINE v128 v128_cmpgt_s8(v128 x, v128 y) {
462   return vreinterpretq_s64_u8(
463       vcgtq_s8(vreinterpretq_s8_s64(x), vreinterpretq_s8_s64(y)));
464 }
465 
v128_cmplt_s8(v128 x,v128 y)466 SIMD_INLINE v128 v128_cmplt_s8(v128 x, v128 y) {
467   return vreinterpretq_s64_u8(
468       vcltq_s8(vreinterpretq_s8_s64(x), vreinterpretq_s8_s64(y)));
469 }
470 
v128_cmpeq_8(v128 x,v128 y)471 SIMD_INLINE v128 v128_cmpeq_8(v128 x, v128 y) {
472   return vreinterpretq_s64_u8(
473       vceqq_u8(vreinterpretq_u8_s64(x), vreinterpretq_u8_s64(y)));
474 }
475 
v128_cmpgt_s16(v128 x,v128 y)476 SIMD_INLINE v128 v128_cmpgt_s16(v128 x, v128 y) {
477   return vreinterpretq_s64_u16(
478       vcgtq_s16(vreinterpretq_s16_s64(x), vreinterpretq_s16_s64(y)));
479 }
480 
v128_cmplt_s16(v128 x,v128 y)481 SIMD_INLINE v128 v128_cmplt_s16(v128 x, v128 y) {
482   return vreinterpretq_s64_u16(
483       vcltq_s16(vreinterpretq_s16_s64(x), vreinterpretq_s16_s64(y)));
484 }
485 
v128_cmpeq_16(v128 x,v128 y)486 SIMD_INLINE v128 v128_cmpeq_16(v128 x, v128 y) {
487   return vreinterpretq_s64_u16(
488       vceqq_s16(vreinterpretq_s16_s64(x), vreinterpretq_s16_s64(y)));
489 }
490 
v128_shl_8(v128 a,unsigned int c)491 SIMD_INLINE v128 v128_shl_8(v128 a, unsigned int c) {
492   return (c > 7) ? v128_zero() : vreinterpretq_s64_u8(vshlq_u8(
493                                      vreinterpretq_u8_s64(a), vdupq_n_s8(c)));
494 }
495 
v128_shr_u8(v128 a,unsigned int c)496 SIMD_INLINE v128 v128_shr_u8(v128 a, unsigned int c) {
497   return (c > 7) ? v128_zero() : vreinterpretq_s64_u8(vshlq_u8(
498                                      vreinterpretq_u8_s64(a), vdupq_n_s8(-c)));
499 }
500 
v128_shr_s8(v128 a,unsigned int c)501 SIMD_INLINE v128 v128_shr_s8(v128 a, unsigned int c) {
502   return (c > 7) ? v128_ones() : vreinterpretq_s64_s8(vshlq_s8(
503                                      vreinterpretq_s8_s64(a), vdupq_n_s8(-c)));
504 }
505 
v128_shl_16(v128 a,unsigned int c)506 SIMD_INLINE v128 v128_shl_16(v128 a, unsigned int c) {
507   return (c > 15) ? v128_zero()
508                   : vreinterpretq_s64_u16(
509                         vshlq_u16(vreinterpretq_u16_s64(a), vdupq_n_s16(c)));
510 }
511 
v128_shr_u16(v128 a,unsigned int c)512 SIMD_INLINE v128 v128_shr_u16(v128 a, unsigned int c) {
513   return (c > 15) ? v128_zero()
514                   : vreinterpretq_s64_u16(
515                         vshlq_u16(vreinterpretq_u16_s64(a), vdupq_n_s16(-c)));
516 }
517 
v128_shr_s16(v128 a,unsigned int c)518 SIMD_INLINE v128 v128_shr_s16(v128 a, unsigned int c) {
519   return (c > 15) ? v128_ones()
520                   : vreinterpretq_s64_s16(
521                         vshlq_s16(vreinterpretq_s16_s64(a), vdupq_n_s16(-c)));
522 }
523 
v128_shl_32(v128 a,unsigned int c)524 SIMD_INLINE v128 v128_shl_32(v128 a, unsigned int c) {
525   return (c > 31) ? v128_zero()
526                   : vreinterpretq_s64_u32(
527                         vshlq_u32(vreinterpretq_u32_s64(a), vdupq_n_s32(c)));
528 }
529 
v128_shr_u32(v128 a,unsigned int c)530 SIMD_INLINE v128 v128_shr_u32(v128 a, unsigned int c) {
531   return (c > 31) ? v128_zero()
532                   : vreinterpretq_s64_u32(
533                         vshlq_u32(vreinterpretq_u32_s64(a), vdupq_n_s32(-c)));
534 }
535 
v128_shr_s32(v128 a,unsigned int c)536 SIMD_INLINE v128 v128_shr_s32(v128 a, unsigned int c) {
537   return (c > 31) ? v128_ones()
538                   : vreinterpretq_s64_s32(
539                         vshlq_s32(vreinterpretq_s32_s64(a), vdupq_n_s32(-c)));
540 }
541 
542 #if defined(__OPTIMIZE__) && __OPTIMIZE__ && !defined(__clang__)
543 
v128_shl_n_byte(v128 a,unsigned int n)544 SIMD_INLINE v128 v128_shl_n_byte(v128 a, unsigned int n) {
545   return n < 8
546              ? v128_from_64(
547                    (uint64_t)vorr_u64(
548                        vshl_n_u64(vreinterpret_u64_s64(vget_high_s64(a)),
549                                   n * 8),
550                        vshr_n_u64(vreinterpret_u64_s64(vget_low_s64(a)),
551                                   (8 - n) * 8)),
552                    (uint64_t)vshl_n_u64(vreinterpret_u64_s64(vget_low_s64(a)),
553                                         n * 8))
554              : (n == 8 ? v128_from_64(
555                              (uint64_t)vreinterpret_u64_s64(vget_low_s64(a)), 0)
556                        : v128_from_64((uint64_t)vshl_n_u64(
557                                           vreinterpret_u64_s64(vget_low_s64(a)),
558                                           (n - 8) * 8),
559                                       0));
560 }
561 
v128_shr_n_byte(v128 a,unsigned int n)562 SIMD_INLINE v128 v128_shr_n_byte(v128 a, unsigned int n) {
563   return n < 8
564              ? v128_from_64(
565                    vshr_n_u64(vreinterpret_u64_s64(vget_high_s64(a)), n * 8),
566                    vorr_u64(
567                        vshr_n_u64(vreinterpret_u64_s64(vget_low_s64(a)), n * 8),
568                        vshl_n_u64(vreinterpret_u64_s64(vget_high_s64(a)),
569                                   (8 - n) * 8)))
570              : (n == 8
571                     ? v128_from_64(0, vreinterpret_u64_s64(vget_high_s64(a)))
572                     : v128_from_64(
573                           0, vshr_n_u64(vreinterpret_u64_s64(vget_high_s64(a)),
574                                         (n - 8) * 8)));
575 }
576 
v128_shl_n_8(v128 a,unsigned int c)577 SIMD_INLINE v128 v128_shl_n_8(v128 a, unsigned int c) {
578   return vreinterpretq_s64_u8(vshlq_n_u8(vreinterpretq_u8_s64(a), c));
579 }
580 
v128_shr_n_u8(v128 a,unsigned int c)581 SIMD_INLINE v128 v128_shr_n_u8(v128 a, unsigned int c) {
582   return vreinterpretq_s64_u8(vshrq_n_u8(vreinterpretq_u8_s64(a), c));
583 }
584 
v128_shr_n_s8(v128 a,unsigned int c)585 SIMD_INLINE v128 v128_shr_n_s8(v128 a, unsigned int c) {
586   return vreinterpretq_s64_s8(vshrq_n_s8(vreinterpretq_s8_s64(a), c));
587 }
588 
v128_shl_n_16(v128 a,unsigned int c)589 SIMD_INLINE v128 v128_shl_n_16(v128 a, unsigned int c) {
590   return vreinterpretq_s64_u16(vshlq_n_u16(vreinterpretq_u16_s64(a), c));
591 }
592 
v128_shr_n_u16(v128 a,unsigned int c)593 SIMD_INLINE v128 v128_shr_n_u16(v128 a, unsigned int c) {
594   return vreinterpretq_s64_u16(vshrq_n_u16(vreinterpretq_u16_s64(a), c));
595 }
596 
v128_shr_n_s16(v128 a,unsigned int c)597 SIMD_INLINE v128 v128_shr_n_s16(v128 a, unsigned int c) {
598   return vreinterpretq_s64_s16(vshrq_n_s16(vreinterpretq_s16_s64(a), c));
599 }
600 
v128_shl_n_32(v128 a,unsigned int c)601 SIMD_INLINE v128 v128_shl_n_32(v128 a, unsigned int c) {
602   return vreinterpretq_s64_u32(vshlq_n_u32(vreinterpretq_u32_s64(a), c));
603 }
604 
v128_shr_n_u32(v128 a,unsigned int c)605 SIMD_INLINE v128 v128_shr_n_u32(v128 a, unsigned int c) {
606   return vreinterpretq_s64_u32(vshrq_n_u32(vreinterpretq_u32_s64(a), c));
607 }
608 
v128_shr_n_s32(v128 a,unsigned int c)609 SIMD_INLINE v128 v128_shr_n_s32(v128 a, unsigned int c) {
610   return vreinterpretq_s64_s32(vshrq_n_s32(vreinterpretq_s32_s64(a), c));
611 }
612 
613 #else
614 
v128_shl_n_byte(v128 a,unsigned int n)615 SIMD_INLINE v128 v128_shl_n_byte(v128 a, unsigned int n) {
616   if (n < 8)
617     return v128_from_v64(v64_or(v64_shl_n_byte(v128_high_v64(a), n),
618                                 v64_shr_n_byte(v128_low_v64(a), 8 - n)),
619                          v64_shl_n_byte(v128_low_v64(a), n));
620   else
621     return v128_from_v64(v64_shl_n_byte(v128_low_v64(a), n - 8), v64_zero());
622 }
623 
v128_shr_n_byte(v128 a,unsigned int n)624 SIMD_INLINE v128 v128_shr_n_byte(v128 a, unsigned int n) {
625   if (n < 8)
626     return v128_from_v64(v64_shr_n_byte(v128_high_v64(a), n),
627                          v64_or(v64_shr_n_byte(v128_low_v64(a), n),
628                                 v64_shl_n_byte(v128_high_v64(a), 8 - n)));
629   else
630     return v128_from_v64(v64_zero(), v64_shr_n_byte(v128_high_v64(a), n - 8));
631 }
632 
v128_shl_n_8(v128 a,unsigned int c)633 SIMD_INLINE v128 v128_shl_n_8(v128 a, unsigned int c) {
634   return v128_shl_8(a, c);
635 }
636 
v128_shr_n_u8(v128 a,unsigned int c)637 SIMD_INLINE v128 v128_shr_n_u8(v128 a, unsigned int c) {
638   return v128_shr_u8(a, c);
639 }
640 
v128_shr_n_s8(v128 a,unsigned int c)641 SIMD_INLINE v128 v128_shr_n_s8(v128 a, unsigned int c) {
642   return v128_shr_s8(a, c);
643 }
644 
v128_shl_n_16(v128 a,unsigned int c)645 SIMD_INLINE v128 v128_shl_n_16(v128 a, unsigned int c) {
646   return v128_shl_16(a, c);
647 }
648 
v128_shr_n_u16(v128 a,unsigned int c)649 SIMD_INLINE v128 v128_shr_n_u16(v128 a, unsigned int c) {
650   return v128_shr_u16(a, c);
651 }
652 
v128_shr_n_s16(v128 a,unsigned int c)653 SIMD_INLINE v128 v128_shr_n_s16(v128 a, unsigned int c) {
654   return v128_shr_s16(a, c);
655 }
656 
v128_shl_n_32(v128 a,unsigned int c)657 SIMD_INLINE v128 v128_shl_n_32(v128 a, unsigned int c) {
658   return v128_shl_32(a, c);
659 }
660 
v128_shr_n_u32(v128 a,unsigned int c)661 SIMD_INLINE v128 v128_shr_n_u32(v128 a, unsigned int c) {
662   return v128_shr_u32(a, c);
663 }
664 
v128_shr_n_s32(v128 a,unsigned int c)665 SIMD_INLINE v128 v128_shr_n_s32(v128 a, unsigned int c) {
666   return v128_shr_s32(a, c);
667 }
668 
669 #endif
670 
671 #endif /* _V128_INTRINSICS_H */
672