1 /*
2  * Copyright (c) 2016, Alliance for Open Media. All rights reserved
3  *
4  * This source code is subject to the terms of the BSD 2 Clause License and
5  * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
6  * was not distributed with this source code in the LICENSE file, you can
7  * obtain it at www.aomedia.org/license/software. If the Alliance for Open
8  * Media Patent License 1.0 was not distributed with this source code in the
9  * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
10  */
11 
12 #ifndef AOM_AOM_DSP_SIMD_V64_INTRINSICS_ARM_H_
13 #define AOM_AOM_DSP_SIMD_V64_INTRINSICS_ARM_H_
14 
15 #include <arm_neon.h>
16 
17 #include "aom_dsp/simd/v64_intrinsics_arm.h"
18 #include "aom_ports/arm.h"
19 
20 #ifdef AOM_INCOMPATIBLE_GCC
21 #error Incompatible gcc
22 #endif
23 
24 typedef int64x1_t v64;
25 
v64_low_u32(v64 a)26 SIMD_INLINE uint32_t v64_low_u32(v64 a) {
27   return vget_lane_u32(vreinterpret_u32_s64(a), 0);
28 }
29 
v64_high_u32(v64 a)30 SIMD_INLINE uint32_t v64_high_u32(v64 a) {
31   return vget_lane_u32(vreinterpret_u32_s64(a), 1);
32 }
33 
v64_low_s32(v64 a)34 SIMD_INLINE int32_t v64_low_s32(v64 a) {
35   return vget_lane_s32(vreinterpret_s32_s64(a), 0);
36 }
37 
v64_high_s32(v64 a)38 SIMD_INLINE int32_t v64_high_s32(v64 a) {
39   return vget_lane_s32(vreinterpret_s32_s64(a), 1);
40 }
41 
v64_from_16(uint16_t a,uint16_t b,uint16_t c,uint16_t d)42 SIMD_INLINE v64 v64_from_16(uint16_t a, uint16_t b, uint16_t c, uint16_t d) {
43   return vcreate_s64((uint64_t)a << 48 | (uint64_t)b << 32 | (uint64_t)c << 16 |
44                      d);
45 }
46 
v64_from_32(uint32_t x,uint32_t y)47 SIMD_INLINE v64 v64_from_32(uint32_t x, uint32_t y) {
48   return vcreate_s64((uint64_t)x << 32 | y);
49 }
50 
v64_from_64(uint64_t x)51 SIMD_INLINE v64 v64_from_64(uint64_t x) { return vcreate_s64(x); }
52 
v64_u64(v64 x)53 SIMD_INLINE uint64_t v64_u64(v64 x) { return (uint64_t)x; }
54 
u32_load_aligned(const void * p)55 SIMD_INLINE uint32_t u32_load_aligned(const void *p) {
56   return *((uint32_t *)p);
57 }
58 
u32_load_unaligned(const void * p)59 SIMD_INLINE uint32_t u32_load_unaligned(const void *p) {
60   return vget_lane_u32(vreinterpret_u32_u8(vld1_u8((const uint8_t *)p)), 0);
61 }
62 
u32_store_aligned(void * p,uint32_t a)63 SIMD_INLINE void u32_store_aligned(void *p, uint32_t a) {
64   *((uint32_t *)p) = a;
65 }
66 
u32_store_unaligned(void * p,uint32_t a)67 SIMD_INLINE void u32_store_unaligned(void *p, uint32_t a) {
68 #if defined(__clang__)
69   vst1_lane_u32((uint32_t *)p, vreinterpret_u32_s64((uint64x1_t)(uint64_t)a),
70                 0);
71 #elif defined(__CC_ARM)
72   *(__packed uint32_t *)p) = a;
73 #elif defined(__GNUC__)
74   *((__attribute((packed)) uint32_t *)p) = a;
75 #else
76   vst1_lane_u32((uint32_t *)p, vreinterpret_u32_s64((uint64x1_t)(uint64_t)a),
77                 0);
78 #endif
79 }
80 
v64_load_aligned(const void * p)81 SIMD_INLINE v64 v64_load_aligned(const void *p) {
82   return vreinterpret_s64_u8(vld1_u8((const uint8_t *)p));
83 }
84 
v64_load_unaligned(const void * p)85 SIMD_INLINE v64 v64_load_unaligned(const void *p) {
86   return v64_load_aligned(p);
87 }
88 
v64_store_aligned(void * p,v64 r)89 SIMD_INLINE void v64_store_aligned(void *p, v64 r) {
90   vst1_u8((uint8_t *)p, vreinterpret_u8_s64(r));
91 }
92 
v64_store_unaligned(void * p,v64 r)93 SIMD_INLINE void v64_store_unaligned(void *p, v64 r) {
94   vst1_u8((uint8_t *)p, vreinterpret_u8_s64(r));
95 }
96 
97 // The following function requires an immediate.
98 // Some compilers will check this if it's optimising, others wont.
v64_align(v64 a,v64 b,unsigned int c)99 SIMD_INLINE v64 v64_align(v64 a, v64 b, unsigned int c) {
100 #if defined(__OPTIMIZE__) && __OPTIMIZE__ && !defined(__clang__)
101   return c ? vreinterpret_s64_s8(
102                  vext_s8(vreinterpret_s8_s64(b), vreinterpret_s8_s64(a), c))
103            : b;
104 #else
105   return c ? v64_from_64(((uint64_t)b >> c * 8) | ((uint64_t)a << (8 - c) * 8))
106            : b;
107 #endif
108 }
109 
v64_zero()110 SIMD_INLINE v64 v64_zero() { return vreinterpret_s64_u8(vdup_n_u8(0)); }
111 
v64_dup_8(uint8_t x)112 SIMD_INLINE v64 v64_dup_8(uint8_t x) {
113   return vreinterpret_s64_u8(vdup_n_u8(x));
114 }
115 
v64_dup_16(uint16_t x)116 SIMD_INLINE v64 v64_dup_16(uint16_t x) {
117   return vreinterpret_s64_u16(vdup_n_u16(x));
118 }
119 
v64_dup_32(uint32_t x)120 SIMD_INLINE v64 v64_dup_32(uint32_t x) {
121   return vreinterpret_s64_u32(vdup_n_u32(x));
122 }
123 
v64_dotp_su8(v64 x,v64 y)124 SIMD_INLINE int64_t v64_dotp_su8(v64 x, v64 y) {
125   int16x8_t t =
126       vmulq_s16(vmovl_s8(vreinterpret_s8_s64(x)),
127                 vreinterpretq_s16_u16(vmovl_u8(vreinterpret_u8_s64(y))));
128 #if defined(__aarch64__)
129   return vaddlvq_s16(t);
130 #else
131   int64x2_t r = vpaddlq_s32(vpaddlq_s16(t));
132   return (int64_t)vadd_s64(vget_high_s64(r), vget_low_s64(r));
133 #endif
134 }
135 
v64_dotp_s16(v64 x,v64 y)136 SIMD_INLINE int64_t v64_dotp_s16(v64 x, v64 y) {
137 #if defined(__aarch64__)
138   return vaddlvq_s32(
139       vmull_s16(vreinterpret_s16_s64(x), vreinterpret_s16_s64(y)));
140 #else
141   int64x2_t r =
142       vpaddlq_s32(vmull_s16(vreinterpret_s16_s64(x), vreinterpret_s16_s64(y)));
143   return (int64_t)(vget_high_s64(r) + vget_low_s64(r));
144 #endif
145 }
146 
v64_hadd_u8(v64 x)147 SIMD_INLINE uint64_t v64_hadd_u8(v64 x) {
148 #if defined(__aarch64__)
149   return vaddlv_u8(vreinterpret_u8_s64(x));
150 #else
151   return (uint64_t)vpaddl_u32(vpaddl_u16(vpaddl_u8(vreinterpret_u8_s64(x))));
152 #endif
153 }
154 
v64_hadd_s16(v64 a)155 SIMD_INLINE int64_t v64_hadd_s16(v64 a) {
156   return (int64_t)vpaddl_s32(vpaddl_s16(vreinterpret_s16_s64(a)));
157 }
158 
159 typedef uint16x8_t sad64_internal;
160 
v64_sad_u8_init()161 SIMD_INLINE sad64_internal v64_sad_u8_init() { return vdupq_n_u16(0); }
162 
163 // Implementation dependent return value. Result must be finalised with
164 // v64_sad_u8_sum().
v64_sad_u8(sad64_internal s,v64 a,v64 b)165 SIMD_INLINE sad64_internal v64_sad_u8(sad64_internal s, v64 a, v64 b) {
166   return vabal_u8(s, vreinterpret_u8_s64(a), vreinterpret_u8_s64(b));
167 }
168 
v64_sad_u8_sum(sad64_internal s)169 SIMD_INLINE uint32_t v64_sad_u8_sum(sad64_internal s) {
170 #if defined(__aarch64__)
171   return vaddlvq_u16(s);
172 #else
173   uint64x2_t r = vpaddlq_u32(vpaddlq_u16(s));
174   return (uint32_t)(uint64_t)(vget_high_u64(r) + vget_low_u64(r));
175 #endif
176 }
177 
178 typedef uint32x4_t ssd64_internal;
179 
v64_ssd_u8_init()180 SIMD_INLINE ssd64_internal v64_ssd_u8_init() { return vdupq_n_u32(0); }
181 
182 // Implementation dependent return value. Result must be finalised with
183 // v64_ssd_u8_sum().
v64_ssd_u8(ssd64_internal s,v64 a,v64 b)184 SIMD_INLINE ssd64_internal v64_ssd_u8(ssd64_internal s, v64 a, v64 b) {
185   uint8x8_t t = vabd_u8(vreinterpret_u8_s64(a), vreinterpret_u8_s64(b));
186   return vaddq_u32(s, vpaddlq_u16(vmull_u8(t, t)));
187 }
188 
v64_ssd_u8_sum(ssd64_internal s)189 SIMD_INLINE uint32_t v64_ssd_u8_sum(ssd64_internal s) {
190 #if defined(__aarch64__)
191   return vaddvq_u32(s);
192 #else
193   uint64x2_t t = vpaddlq_u32(s);
194   return vget_lane_u32(
195       vreinterpret_u32_u64(vadd_u64(vget_high_u64(t), vget_low_u64(t))), 0);
196 #endif
197 }
198 
v64_or(v64 x,v64 y)199 SIMD_INLINE v64 v64_or(v64 x, v64 y) { return vorr_s64(x, y); }
200 
v64_xor(v64 x,v64 y)201 SIMD_INLINE v64 v64_xor(v64 x, v64 y) { return veor_s64(x, y); }
202 
v64_and(v64 x,v64 y)203 SIMD_INLINE v64 v64_and(v64 x, v64 y) { return vand_s64(x, y); }
204 
v64_andn(v64 x,v64 y)205 SIMD_INLINE v64 v64_andn(v64 x, v64 y) { return vbic_s64(x, y); }
206 
v64_add_8(v64 x,v64 y)207 SIMD_INLINE v64 v64_add_8(v64 x, v64 y) {
208   return vreinterpret_s64_u8(
209       vadd_u8(vreinterpret_u8_s64(x), vreinterpret_u8_s64(y)));
210 }
211 
v64_sadd_u8(v64 x,v64 y)212 SIMD_INLINE v64 v64_sadd_u8(v64 x, v64 y) {
213   return vreinterpret_s64_u8(
214       vqadd_u8(vreinterpret_u8_s64(x), vreinterpret_u8_s64(y)));
215 }
216 
v64_sadd_s8(v64 x,v64 y)217 SIMD_INLINE v64 v64_sadd_s8(v64 x, v64 y) {
218   return vreinterpret_s64_s8(
219       vqadd_s8(vreinterpret_s8_s64(x), vreinterpret_s8_s64(y)));
220 }
221 
v64_add_16(v64 x,v64 y)222 SIMD_INLINE v64 v64_add_16(v64 x, v64 y) {
223   return vreinterpret_s64_s16(
224       vadd_s16(vreinterpret_s16_s64(x), vreinterpret_s16_s64(y)));
225 }
226 
v64_sadd_s16(v64 x,v64 y)227 SIMD_INLINE v64 v64_sadd_s16(v64 x, v64 y) {
228   return vreinterpret_s64_s16(
229       vqadd_s16(vreinterpret_s16_s64(x), vreinterpret_s16_s64(y)));
230 }
231 
v64_add_32(v64 x,v64 y)232 SIMD_INLINE v64 v64_add_32(v64 x, v64 y) {
233   return vreinterpret_s64_u32(
234       vadd_u32(vreinterpret_u32_s64(x), vreinterpret_u32_s64(y)));
235 }
236 
v64_sub_8(v64 x,v64 y)237 SIMD_INLINE v64 v64_sub_8(v64 x, v64 y) {
238   return vreinterpret_s64_u8(
239       vsub_u8(vreinterpret_u8_s64(x), vreinterpret_u8_s64(y)));
240 }
241 
v64_sub_16(v64 x,v64 y)242 SIMD_INLINE v64 v64_sub_16(v64 x, v64 y) {
243   return vreinterpret_s64_s16(
244       vsub_s16(vreinterpret_s16_s64(x), vreinterpret_s16_s64(y)));
245 }
246 
v64_ssub_s16(v64 x,v64 y)247 SIMD_INLINE v64 v64_ssub_s16(v64 x, v64 y) {
248   return vreinterpret_s64_s16(
249       vqsub_s16(vreinterpret_s16_s64(x), vreinterpret_s16_s64(y)));
250 }
251 
v64_ssub_u16(v64 x,v64 y)252 SIMD_INLINE v64 v64_ssub_u16(v64 x, v64 y) {
253   return vreinterpret_s64_u16(
254       vqsub_u16(vreinterpret_u16_s64(x), vreinterpret_u16_s64(y)));
255 }
256 
v64_ssub_u8(v64 x,v64 y)257 SIMD_INLINE v64 v64_ssub_u8(v64 x, v64 y) {
258   return vreinterpret_s64_u8(
259       vqsub_u8(vreinterpret_u8_s64(x), vreinterpret_u8_s64(y)));
260 }
261 
v64_ssub_s8(v64 x,v64 y)262 SIMD_INLINE v64 v64_ssub_s8(v64 x, v64 y) {
263   return vreinterpret_s64_s8(
264       vqsub_s8(vreinterpret_s8_s64(x), vreinterpret_s8_s64(y)));
265 }
266 
v64_sub_32(v64 x,v64 y)267 SIMD_INLINE v64 v64_sub_32(v64 x, v64 y) {
268   return vreinterpret_s64_s32(
269       vsub_s32(vreinterpret_s32_s64(x), vreinterpret_s32_s64(y)));
270 }
271 
v64_abs_s16(v64 x)272 SIMD_INLINE v64 v64_abs_s16(v64 x) {
273   return vreinterpret_s64_s16(vabs_s16(vreinterpret_s16_s64(x)));
274 }
275 
v64_abs_s8(v64 x)276 SIMD_INLINE v64 v64_abs_s8(v64 x) {
277   return vreinterpret_s64_s8(vabs_s8(vreinterpret_s8_s64(x)));
278 }
279 
v64_mullo_s16(v64 x,v64 y)280 SIMD_INLINE v64 v64_mullo_s16(v64 x, v64 y) {
281   return vreinterpret_s64_s16(
282       vmul_s16(vreinterpret_s16_s64(x), vreinterpret_s16_s64(y)));
283 }
284 
v64_mulhi_s16(v64 x,v64 y)285 SIMD_INLINE v64 v64_mulhi_s16(v64 x, v64 y) {
286 #if defined(__aarch64__)
287   int16x8_t t = vreinterpretq_s16_s32(
288       vmull_s16(vreinterpret_s16_s64(x), vreinterpret_s16_s64(y)));
289   return vget_low_s64(vreinterpretq_s64_s16(vuzp2q_s16(t, t)));
290 #else
291   return vreinterpret_s64_s16(vmovn_s32(vshrq_n_s32(
292       vmull_s16(vreinterpret_s16_s64(x), vreinterpret_s16_s64(y)), 16)));
293 #endif
294 }
295 
v64_mullo_s32(v64 x,v64 y)296 SIMD_INLINE v64 v64_mullo_s32(v64 x, v64 y) {
297   return vreinterpret_s64_s32(
298       vmul_s32(vreinterpret_s32_s64(x), vreinterpret_s32_s64(y)));
299 }
300 
v64_madd_s16(v64 x,v64 y)301 SIMD_INLINE v64 v64_madd_s16(v64 x, v64 y) {
302   int32x4_t t = vmull_s16(vreinterpret_s16_s64(x), vreinterpret_s16_s64(y));
303   return vreinterpret_s64_s32(
304       vpadd_s32(vreinterpret_s32_s64(vget_low_s64(vreinterpretq_s64_s32(t))),
305                 vreinterpret_s32_s64(vget_high_s64(vreinterpretq_s64_s32(t)))));
306 }
307 
v64_madd_us8(v64 x,v64 y)308 SIMD_INLINE v64 v64_madd_us8(v64 x, v64 y) {
309   int16x8_t t =
310       vmulq_s16(vreinterpretq_s16_u16(vmovl_u8(vreinterpret_u8_s64(x))),
311                 vmovl_s8(vreinterpret_s8_s64(y)));
312   return vreinterpret_s64_s16(vqmovn_s32(vpaddlq_s16(t)));
313 }
314 
v64_avg_u8(v64 x,v64 y)315 SIMD_INLINE v64 v64_avg_u8(v64 x, v64 y) {
316   return vreinterpret_s64_u8(
317       vrhadd_u8(vreinterpret_u8_s64(x), vreinterpret_u8_s64(y)));
318 }
319 
v64_rdavg_u8(v64 x,v64 y)320 SIMD_INLINE v64 v64_rdavg_u8(v64 x, v64 y) {
321   return vreinterpret_s64_u8(
322       vhadd_u8(vreinterpret_u8_s64(x), vreinterpret_u8_s64(y)));
323 }
324 
v64_rdavg_u16(v64 x,v64 y)325 SIMD_INLINE v64 v64_rdavg_u16(v64 x, v64 y) {
326   return vreinterpret_s64_u16(
327       vhadd_u16(vreinterpret_u16_s64(x), vreinterpret_u16_s64(y)));
328 }
329 
v64_avg_u16(v64 x,v64 y)330 SIMD_INLINE v64 v64_avg_u16(v64 x, v64 y) {
331   return vreinterpret_s64_u16(
332       vrhadd_u16(vreinterpret_u16_s64(x), vreinterpret_u16_s64(y)));
333 }
334 
v64_max_u8(v64 x,v64 y)335 SIMD_INLINE v64 v64_max_u8(v64 x, v64 y) {
336   return vreinterpret_s64_u8(
337       vmax_u8(vreinterpret_u8_s64(x), vreinterpret_u8_s64(y)));
338 }
339 
v64_min_u8(v64 x,v64 y)340 SIMD_INLINE v64 v64_min_u8(v64 x, v64 y) {
341   return vreinterpret_s64_u8(
342       vmin_u8(vreinterpret_u8_s64(x), vreinterpret_u8_s64(y)));
343 }
344 
v64_max_s8(v64 x,v64 y)345 SIMD_INLINE v64 v64_max_s8(v64 x, v64 y) {
346   return vreinterpret_s64_s8(
347       vmax_s8(vreinterpret_s8_s64(x), vreinterpret_s8_s64(y)));
348 }
349 
v64_min_s8(v64 x,v64 y)350 SIMD_INLINE v64 v64_min_s8(v64 x, v64 y) {
351   return vreinterpret_s64_s8(
352       vmin_s8(vreinterpret_s8_s64(x), vreinterpret_s8_s64(y)));
353 }
354 
v64_max_s16(v64 x,v64 y)355 SIMD_INLINE v64 v64_max_s16(v64 x, v64 y) {
356   return vreinterpret_s64_s16(
357       vmax_s16(vreinterpret_s16_s64(x), vreinterpret_s16_s64(y)));
358 }
359 
v64_min_s16(v64 x,v64 y)360 SIMD_INLINE v64 v64_min_s16(v64 x, v64 y) {
361   return vreinterpret_s64_s16(
362       vmin_s16(vreinterpret_s16_s64(x), vreinterpret_s16_s64(y)));
363 }
364 
v64_ziplo_8(v64 x,v64 y)365 SIMD_INLINE v64 v64_ziplo_8(v64 x, v64 y) {
366 #if defined(__aarch64__)
367   return vreinterpret_s64_u8(
368       vzip1_u8(vreinterpret_u8_s64(y), vreinterpret_u8_s64(x)));
369 #else
370   uint8x8x2_t r = vzip_u8(vreinterpret_u8_s64(y), vreinterpret_u8_s64(x));
371   return vreinterpret_s64_u8(r.val[0]);
372 #endif
373 }
374 
v64_ziphi_8(v64 x,v64 y)375 SIMD_INLINE v64 v64_ziphi_8(v64 x, v64 y) {
376 #if defined(__aarch64__)
377   return vreinterpret_s64_u8(
378       vzip2_u8(vreinterpret_u8_s64(y), vreinterpret_u8_s64(x)));
379 #else
380   uint8x8x2_t r = vzip_u8(vreinterpret_u8_s64(y), vreinterpret_u8_s64(x));
381   return vreinterpret_s64_u8(r.val[1]);
382 #endif
383 }
384 
v64_ziplo_16(v64 x,v64 y)385 SIMD_INLINE v64 v64_ziplo_16(v64 x, v64 y) {
386 #if defined(__aarch64__)
387   return vreinterpret_s64_u16(
388       vzip1_u16(vreinterpret_u16_s64(y), vreinterpret_u16_s64(x)));
389 #else
390   int16x4x2_t r = vzip_s16(vreinterpret_s16_s64(y), vreinterpret_s16_s64(x));
391   return vreinterpret_s64_s16(r.val[0]);
392 #endif
393 }
394 
v64_ziphi_16(v64 x,v64 y)395 SIMD_INLINE v64 v64_ziphi_16(v64 x, v64 y) {
396 #if defined(__aarch64__)
397   return vreinterpret_s64_u16(
398       vzip2_u16(vreinterpret_u16_s64(y), vreinterpret_u16_s64(x)));
399 #else
400   int16x4x2_t r = vzip_s16(vreinterpret_s16_s64(y), vreinterpret_s16_s64(x));
401   return vreinterpret_s64_s16(r.val[1]);
402 #endif
403 }
404 
v64_ziplo_32(v64 x,v64 y)405 SIMD_INLINE v64 v64_ziplo_32(v64 x, v64 y) {
406 #if defined(__aarch64__)
407   return vreinterpret_s64_u32(
408       vzip1_u32(vreinterpret_u32_s64(y), vreinterpret_u32_s64(x)));
409 #else
410   int32x2x2_t r = vzip_s32(vreinterpret_s32_s64(y), vreinterpret_s32_s64(x));
411   return vreinterpret_s64_s32(r.val[0]);
412 #endif
413 }
414 
v64_ziphi_32(v64 x,v64 y)415 SIMD_INLINE v64 v64_ziphi_32(v64 x, v64 y) {
416 #if defined(__aarch64__)
417   return vreinterpret_s64_u32(
418       vzip2_u32(vreinterpret_u32_s64(y), vreinterpret_u32_s64(x)));
419 #else
420   int32x2x2_t r = vzip_s32(vreinterpret_s32_s64(y), vreinterpret_s32_s64(x));
421   return vreinterpret_s64_s32(r.val[1]);
422 #endif
423 }
424 
v64_unpacklo_u8_s16(v64 a)425 SIMD_INLINE v64 v64_unpacklo_u8_s16(v64 a) {
426   return vreinterpret_s64_u16(vget_low_u16(vmovl_u8(vreinterpret_u8_s64(a))));
427 }
428 
v64_unpackhi_u8_s16(v64 a)429 SIMD_INLINE v64 v64_unpackhi_u8_s16(v64 a) {
430   return vreinterpret_s64_u16(vget_high_u16(vmovl_u8(vreinterpret_u8_s64(a))));
431 }
432 
v64_unpacklo_s8_s16(v64 a)433 SIMD_INLINE v64 v64_unpacklo_s8_s16(v64 a) {
434   return vreinterpret_s64_s16(vget_low_s16(vmovl_s8(vreinterpret_s8_s64(a))));
435 }
436 
v64_unpackhi_s8_s16(v64 a)437 SIMD_INLINE v64 v64_unpackhi_s8_s16(v64 a) {
438   return vreinterpret_s64_s16(vget_high_s16(vmovl_s8(vreinterpret_s8_s64(a))));
439 }
440 
v64_pack_s32_s16(v64 x,v64 y)441 SIMD_INLINE v64 v64_pack_s32_s16(v64 x, v64 y) {
442   return vreinterpret_s64_s16(vqmovn_s32(
443       vcombine_s32(vreinterpret_s32_s64(y), vreinterpret_s32_s64(x))));
444 }
445 
v64_pack_s32_u16(v64 x,v64 y)446 SIMD_INLINE v64 v64_pack_s32_u16(v64 x, v64 y) {
447   return vreinterpret_s64_u16(vqmovun_s32(
448       vcombine_s32(vreinterpret_s32_s64(y), vreinterpret_s32_s64(x))));
449 }
450 
v64_pack_s16_u8(v64 x,v64 y)451 SIMD_INLINE v64 v64_pack_s16_u8(v64 x, v64 y) {
452   return vreinterpret_s64_u8(vqmovun_s16(vreinterpretq_s16_s32(
453       vcombine_s32(vreinterpret_s32_s64(y), vreinterpret_s32_s64(x)))));
454 }
455 
v64_pack_s16_s8(v64 x,v64 y)456 SIMD_INLINE v64 v64_pack_s16_s8(v64 x, v64 y) {
457   return vreinterpret_s64_s8(vqmovn_s16(vreinterpretq_s16_s32(
458       vcombine_s32(vreinterpret_s32_s64(y), vreinterpret_s32_s64(x)))));
459 }
460 
v64_unziplo_8(v64 x,v64 y)461 SIMD_INLINE v64 v64_unziplo_8(v64 x, v64 y) {
462 #if defined(__aarch64__)
463   return vreinterpret_s64_u8(
464       vuzp1_u8(vreinterpret_u8_s64(y), vreinterpret_u8_s64(x)));
465 #else
466   uint8x8x2_t r = vuzp_u8(vreinterpret_u8_s64(y), vreinterpret_u8_s64(x));
467   return vreinterpret_s64_u8(r.val[0]);
468 #endif
469 }
470 
v64_unziphi_8(v64 x,v64 y)471 SIMD_INLINE v64 v64_unziphi_8(v64 x, v64 y) {
472 #if defined(__aarch64__)
473   return vreinterpret_s64_u8(
474       vuzp2_u8(vreinterpret_u8_s64(y), vreinterpret_u8_s64(x)));
475 #else
476   uint8x8x2_t r = vuzp_u8(vreinterpret_u8_s64(y), vreinterpret_u8_s64(x));
477   return vreinterpret_s64_u8(r.val[1]);
478 #endif
479 }
480 
v64_unziplo_16(v64 x,v64 y)481 SIMD_INLINE v64 v64_unziplo_16(v64 x, v64 y) {
482 #if defined(__aarch64__)
483   return vreinterpret_s64_u16(
484       vuzp1_u16(vreinterpret_u16_s64(y), vreinterpret_u16_s64(x)));
485 #else
486   uint16x4x2_t r = vuzp_u16(vreinterpret_u16_s64(y), vreinterpret_u16_s64(x));
487   return vreinterpret_s64_u16(r.val[0]);
488 #endif
489 }
490 
v64_unziphi_16(v64 x,v64 y)491 SIMD_INLINE v64 v64_unziphi_16(v64 x, v64 y) {
492 #if defined(__aarch64__)
493   return vreinterpret_s64_u16(
494       vuzp2_u16(vreinterpret_u16_s64(y), vreinterpret_u16_s64(x)));
495 #else
496   uint16x4x2_t r = vuzp_u16(vreinterpret_u16_s64(y), vreinterpret_u16_s64(x));
497   return vreinterpret_s64_u16(r.val[1]);
498 #endif
499 }
500 
v64_unpacklo_s16_s32(v64 x)501 SIMD_INLINE v64 v64_unpacklo_s16_s32(v64 x) {
502   return vreinterpret_s64_s32(vget_low_s32(vmovl_s16(vreinterpret_s16_s64(x))));
503 }
504 
v64_unpacklo_u16_s32(v64 x)505 SIMD_INLINE v64 v64_unpacklo_u16_s32(v64 x) {
506   return vreinterpret_s64_u32(vget_low_u32(vmovl_u16(vreinterpret_u16_s64(x))));
507 }
508 
v64_unpackhi_s16_s32(v64 x)509 SIMD_INLINE v64 v64_unpackhi_s16_s32(v64 x) {
510   return vreinterpret_s64_s32(
511       vget_high_s32(vmovl_s16(vreinterpret_s16_s64(x))));
512 }
513 
v64_unpackhi_u16_s32(v64 x)514 SIMD_INLINE v64 v64_unpackhi_u16_s32(v64 x) {
515   return vreinterpret_s64_u32(
516       vget_high_u32(vmovl_u16(vreinterpret_u16_s64(x))));
517 }
518 
v64_shuffle_8(v64 x,v64 pattern)519 SIMD_INLINE v64 v64_shuffle_8(v64 x, v64 pattern) {
520   return vreinterpret_s64_u8(
521       vtbl1_u8(vreinterpret_u8_s64(x), vreinterpret_u8_s64(pattern)));
522 }
523 
v64_cmpgt_s8(v64 x,v64 y)524 SIMD_INLINE v64 v64_cmpgt_s8(v64 x, v64 y) {
525   return vreinterpret_s64_u8(
526       vcgt_s8(vreinterpret_s8_s64(x), vreinterpret_s8_s64(y)));
527 }
528 
v64_cmplt_s8(v64 x,v64 y)529 SIMD_INLINE v64 v64_cmplt_s8(v64 x, v64 y) {
530   return vreinterpret_s64_u8(
531       vclt_s8(vreinterpret_s8_s64(x), vreinterpret_s8_s64(y)));
532 }
533 
v64_cmpeq_8(v64 x,v64 y)534 SIMD_INLINE v64 v64_cmpeq_8(v64 x, v64 y) {
535   return vreinterpret_s64_u8(
536       vceq_u8(vreinterpret_u8_s64(x), vreinterpret_u8_s64(y)));
537 }
538 
v64_cmpgt_s16(v64 x,v64 y)539 SIMD_INLINE v64 v64_cmpgt_s16(v64 x, v64 y) {
540   return vreinterpret_s64_u16(
541       vcgt_s16(vreinterpret_s16_s64(x), vreinterpret_s16_s64(y)));
542 }
543 
v64_cmplt_s16(v64 x,v64 y)544 SIMD_INLINE v64 v64_cmplt_s16(v64 x, v64 y) {
545   return vreinterpret_s64_u16(
546       vclt_s16(vreinterpret_s16_s64(x), vreinterpret_s16_s64(y)));
547 }
548 
v64_cmpeq_16(v64 x,v64 y)549 SIMD_INLINE v64 v64_cmpeq_16(v64 x, v64 y) {
550   return vreinterpret_s64_u16(
551       vceq_s16(vreinterpret_s16_s64(x), vreinterpret_s16_s64(y)));
552 }
553 
v64_shl_8(v64 a,unsigned int c)554 SIMD_INLINE v64 v64_shl_8(v64 a, unsigned int c) {
555   return vreinterpret_s64_u8(vshl_u8(vreinterpret_u8_s64(a), vdup_n_s8(c)));
556 }
557 
v64_shr_u8(v64 a,unsigned int c)558 SIMD_INLINE v64 v64_shr_u8(v64 a, unsigned int c) {
559   return vreinterpret_s64_u8(vshl_u8(vreinterpret_u8_s64(a), vdup_n_s8(-c)));
560 }
561 
v64_shr_s8(v64 a,unsigned int c)562 SIMD_INLINE v64 v64_shr_s8(v64 a, unsigned int c) {
563   return vreinterpret_s64_s8(vshl_s8(vreinterpret_s8_s64(a), vdup_n_s8(-c)));
564 }
565 
v64_shl_16(v64 a,unsigned int c)566 SIMD_INLINE v64 v64_shl_16(v64 a, unsigned int c) {
567   return vreinterpret_s64_u16(vshl_u16(vreinterpret_u16_s64(a), vdup_n_s16(c)));
568 }
569 
v64_shr_u16(v64 a,unsigned int c)570 SIMD_INLINE v64 v64_shr_u16(v64 a, unsigned int c) {
571   return vreinterpret_s64_u16(
572       vshl_u16(vreinterpret_u16_s64(a), vdup_n_s16(-(int)c)));
573 }
574 
v64_shr_s16(v64 a,unsigned int c)575 SIMD_INLINE v64 v64_shr_s16(v64 a, unsigned int c) {
576   return vreinterpret_s64_s16(
577       vshl_s16(vreinterpret_s16_s64(a), vdup_n_s16(-(int)c)));
578 }
579 
v64_shl_32(v64 a,unsigned int c)580 SIMD_INLINE v64 v64_shl_32(v64 a, unsigned int c) {
581   return vreinterpret_s64_u32(vshl_u32(vreinterpret_u32_s64(a), vdup_n_s32(c)));
582 }
583 
v64_shr_u32(v64 a,unsigned int c)584 SIMD_INLINE v64 v64_shr_u32(v64 a, unsigned int c) {
585   return vreinterpret_s64_u32(
586       vshl_u32(vreinterpret_u32_s64(a), vdup_n_s32(-(int)c)));
587 }
588 
v64_shr_s32(v64 a,unsigned int c)589 SIMD_INLINE v64 v64_shr_s32(v64 a, unsigned int c) {
590   return vreinterpret_s64_s32(
591       vshl_s32(vreinterpret_s32_s64(a), vdup_n_s32(-(int)c)));
592 }
593 
594 // The following functions require an immediate.
595 // Some compilers will check this during optimisation, others wont.
596 #if defined(__OPTIMIZE__) && __OPTIMIZE__ && !defined(__clang__)
597 
v64_shl_n_byte(v64 a,unsigned int c)598 SIMD_INLINE v64 v64_shl_n_byte(v64 a, unsigned int c) {
599   return vshl_n_s64(a, c * 8);
600 }
601 
v64_shr_n_byte(v64 a,unsigned int c)602 SIMD_INLINE v64 v64_shr_n_byte(v64 a, unsigned int c) {
603   return c ? (v64)vshr_n_u64(vreinterpret_u64_s64(a), c * 8) : a;
604 }
605 
v64_shl_n_8(v64 a,unsigned int c)606 SIMD_INLINE v64 v64_shl_n_8(v64 a, unsigned int c) {
607   return vreinterpret_s64_u8(vshl_n_u8(vreinterpret_u8_s64(a), c));
608 }
609 
v64_shr_n_u8(v64 a,unsigned int c)610 SIMD_INLINE v64 v64_shr_n_u8(v64 a, unsigned int c) {
611   return vreinterpret_s64_u8(vshr_n_u8(vreinterpret_u8_s64(a), c));
612 }
613 
v64_shr_n_s8(v64 a,unsigned int c)614 SIMD_INLINE v64 v64_shr_n_s8(v64 a, unsigned int c) {
615   return vreinterpret_s64_s8(vshr_n_s8(vreinterpret_s8_s64(a), c));
616 }
617 
v64_shl_n_16(v64 a,unsigned int c)618 SIMD_INLINE v64 v64_shl_n_16(v64 a, unsigned int c) {
619   return vreinterpret_s64_u16(vshl_n_u16(vreinterpret_u16_s64(a), c));
620 }
621 
v64_shr_n_u16(v64 a,unsigned int c)622 SIMD_INLINE v64 v64_shr_n_u16(v64 a, unsigned int c) {
623   return vreinterpret_s64_u16(vshr_n_u16(vreinterpret_u16_s64(a), c));
624 }
625 
v64_shr_n_s16(v64 a,unsigned int c)626 SIMD_INLINE v64 v64_shr_n_s16(v64 a, unsigned int c) {
627   return vreinterpret_s64_s16(vshr_n_s16(vreinterpret_s16_s64(a), c));
628 }
629 
v64_shl_n_32(v64 a,unsigned int c)630 SIMD_INLINE v64 v64_shl_n_32(v64 a, unsigned int c) {
631   return vreinterpret_s64_u32(vshl_n_u32(vreinterpret_u32_s64(a), c));
632 }
633 
v64_shr_n_u32(v64 a,unsigned int c)634 SIMD_INLINE v64 v64_shr_n_u32(v64 a, unsigned int c) {
635   return vreinterpret_s64_u32(vshr_n_u32(vreinterpret_u32_s64(a), c));
636 }
637 
v64_shr_n_s32(v64 a,unsigned int c)638 SIMD_INLINE v64 v64_shr_n_s32(v64 a, unsigned int c) {
639   return vreinterpret_s64_s32(vshr_n_s32(vreinterpret_s32_s64(a), c));
640 }
641 
642 #else
643 
v64_shl_n_byte(v64 a,unsigned int c)644 SIMD_INLINE v64 v64_shl_n_byte(v64 a, unsigned int c) {
645   return v64_from_64(v64_u64(a) << c * 8);
646 }
647 
v64_shr_n_byte(v64 a,unsigned int c)648 SIMD_INLINE v64 v64_shr_n_byte(v64 a, unsigned int c) {
649   return v64_from_64(v64_u64(a) >> c * 8);
650 }
651 
v64_shl_n_8(v64 a,unsigned int c)652 SIMD_INLINE v64 v64_shl_n_8(v64 a, unsigned int c) { return v64_shl_8(a, c); }
653 
v64_shr_n_u8(v64 a,unsigned int c)654 SIMD_INLINE v64 v64_shr_n_u8(v64 a, unsigned int c) { return v64_shr_u8(a, c); }
655 
v64_shr_n_s8(v64 a,unsigned int c)656 SIMD_INLINE v64 v64_shr_n_s8(v64 a, unsigned int c) { return v64_shr_s8(a, c); }
657 
v64_shl_n_16(v64 a,unsigned int c)658 SIMD_INLINE v64 v64_shl_n_16(v64 a, unsigned int c) { return v64_shl_16(a, c); }
659 
v64_shr_n_u16(v64 a,unsigned int c)660 SIMD_INLINE v64 v64_shr_n_u16(v64 a, unsigned int c) {
661   return v64_shr_u16(a, c);
662 }
663 
v64_shr_n_s16(v64 a,unsigned int c)664 SIMD_INLINE v64 v64_shr_n_s16(v64 a, unsigned int c) {
665   return v64_shr_s16(a, c);
666 }
667 
v64_shl_n_32(v64 a,unsigned int c)668 SIMD_INLINE v64 v64_shl_n_32(v64 a, unsigned int c) { return v64_shl_32(a, c); }
669 
v64_shr_n_u32(v64 a,unsigned int c)670 SIMD_INLINE v64 v64_shr_n_u32(v64 a, unsigned int c) {
671   return v64_shr_u32(a, c);
672 }
673 
v64_shr_n_s32(v64 a,unsigned int c)674 SIMD_INLINE v64 v64_shr_n_s32(v64 a, unsigned int c) {
675   return v64_shr_s32(a, c);
676 }
677 
678 #endif
679 
680 #endif  // AOM_AOM_DSP_SIMD_V64_INTRINSICS_ARM_H_
681