1 /*
2 * Copyright (c) 2016, Alliance for Open Media. All rights reserved
3 *
4 * This source code is subject to the terms of the BSD 2 Clause License and
5 * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
6 * was not distributed with this source code in the LICENSE file, you can
7 * obtain it at www.aomedia.org/license/software. If the Alliance for Open
8 * Media Patent License 1.0 was not distributed with this source code in the
9 * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
10 */
11
12 #ifndef AOM_AOM_DSP_SIMD_V64_INTRINSICS_ARM_H_
13 #define AOM_AOM_DSP_SIMD_V64_INTRINSICS_ARM_H_
14
15 #include <arm_neon.h>
16
17 #include "aom_dsp/simd/v64_intrinsics_arm.h"
18 #include "aom_ports/arm.h"
19
20 #ifdef AOM_INCOMPATIBLE_GCC
21 #error Incompatible gcc
22 #endif
23
24 typedef int64x1_t v64;
25
v64_low_u32(v64 a)26 SIMD_INLINE uint32_t v64_low_u32(v64 a) {
27 return vget_lane_u32(vreinterpret_u32_s64(a), 0);
28 }
29
v64_high_u32(v64 a)30 SIMD_INLINE uint32_t v64_high_u32(v64 a) {
31 return vget_lane_u32(vreinterpret_u32_s64(a), 1);
32 }
33
v64_low_s32(v64 a)34 SIMD_INLINE int32_t v64_low_s32(v64 a) {
35 return vget_lane_s32(vreinterpret_s32_s64(a), 0);
36 }
37
v64_high_s32(v64 a)38 SIMD_INLINE int32_t v64_high_s32(v64 a) {
39 return vget_lane_s32(vreinterpret_s32_s64(a), 1);
40 }
41
v64_from_16(uint16_t a,uint16_t b,uint16_t c,uint16_t d)42 SIMD_INLINE v64 v64_from_16(uint16_t a, uint16_t b, uint16_t c, uint16_t d) {
43 return vcreate_s64((uint64_t)a << 48 | (uint64_t)b << 32 | (uint64_t)c << 16 |
44 d);
45 }
46
v64_from_32(uint32_t x,uint32_t y)47 SIMD_INLINE v64 v64_from_32(uint32_t x, uint32_t y) {
48 return vcreate_s64((uint64_t)x << 32 | y);
49 }
50
v64_from_64(uint64_t x)51 SIMD_INLINE v64 v64_from_64(uint64_t x) { return vcreate_s64(x); }
52
v64_u64(v64 x)53 SIMD_INLINE uint64_t v64_u64(v64 x) { return (uint64_t)x; }
54
u32_load_aligned(const void * p)55 SIMD_INLINE uint32_t u32_load_aligned(const void *p) {
56 return *((uint32_t *)p);
57 }
58
u32_load_unaligned(const void * p)59 SIMD_INLINE uint32_t u32_load_unaligned(const void *p) {
60 return vget_lane_u32(vreinterpret_u32_u8(vld1_u8((const uint8_t *)p)), 0);
61 }
62
u32_store_aligned(void * p,uint32_t a)63 SIMD_INLINE void u32_store_aligned(void *p, uint32_t a) {
64 *((uint32_t *)p) = a;
65 }
66
u32_store_unaligned(void * p,uint32_t a)67 SIMD_INLINE void u32_store_unaligned(void *p, uint32_t a) {
68 #if defined(__clang__)
69 vst1_lane_u32((uint32_t *)p, vreinterpret_u32_s64((uint64x1_t)(uint64_t)a),
70 0);
71 #elif defined(__CC_ARM)
72 *(__packed uint32_t *)p) = a;
73 #elif defined(__GNUC__)
74 struct Unaligned32Struct {
75 uint32_t value;
76 uint8_t dummy; // To make the size non-power-of-two.
77 } __attribute__((__packed__));
78 ((struct Unaligned32Struct *)p)->value = a;
79 #else
80 vst1_lane_u32((uint32_t *)p, vreinterpret_u32_s64((uint64x1_t)(uint64_t)a),
81 0);
82 #endif
83 }
84
v64_load_aligned(const void * p)85 SIMD_INLINE v64 v64_load_aligned(const void *p) {
86 return vreinterpret_s64_u8(vld1_u8((const uint8_t *)p));
87 }
88
v64_load_unaligned(const void * p)89 SIMD_INLINE v64 v64_load_unaligned(const void *p) {
90 return v64_load_aligned(p);
91 }
92
v64_store_aligned(void * p,v64 r)93 SIMD_INLINE void v64_store_aligned(void *p, v64 r) {
94 vst1_u8((uint8_t *)p, vreinterpret_u8_s64(r));
95 }
96
v64_store_unaligned(void * p,v64 r)97 SIMD_INLINE void v64_store_unaligned(void *p, v64 r) {
98 vst1_u8((uint8_t *)p, vreinterpret_u8_s64(r));
99 }
100
101 // The following function requires an immediate.
102 // Some compilers will check this if it's optimising, others wont.
v64_align(v64 a,v64 b,unsigned int c)103 SIMD_INLINE v64 v64_align(v64 a, v64 b, unsigned int c) {
104 #if defined(__OPTIMIZE__) && __OPTIMIZE__ && !defined(__clang__)
105 return c ? vreinterpret_s64_s8(
106 vext_s8(vreinterpret_s8_s64(b), vreinterpret_s8_s64(a), c))
107 : b;
108 #else
109 return c ? v64_from_64(((uint64_t)b >> c * 8) | ((uint64_t)a << (8 - c) * 8))
110 : b;
111 #endif
112 }
113
v64_zero(void)114 SIMD_INLINE v64 v64_zero(void) { return vreinterpret_s64_u8(vdup_n_u8(0)); }
115
v64_dup_8(uint8_t x)116 SIMD_INLINE v64 v64_dup_8(uint8_t x) {
117 return vreinterpret_s64_u8(vdup_n_u8(x));
118 }
119
v64_dup_16(uint16_t x)120 SIMD_INLINE v64 v64_dup_16(uint16_t x) {
121 return vreinterpret_s64_u16(vdup_n_u16(x));
122 }
123
v64_dup_32(uint32_t x)124 SIMD_INLINE v64 v64_dup_32(uint32_t x) {
125 return vreinterpret_s64_u32(vdup_n_u32(x));
126 }
127
v64_dotp_su8(v64 x,v64 y)128 SIMD_INLINE int64_t v64_dotp_su8(v64 x, v64 y) {
129 int16x8_t t =
130 vmulq_s16(vmovl_s8(vreinterpret_s8_s64(x)),
131 vreinterpretq_s16_u16(vmovl_u8(vreinterpret_u8_s64(y))));
132 #if defined(__aarch64__)
133 return vaddlvq_s16(t);
134 #else
135 int64x2_t r = vpaddlq_s32(vpaddlq_s16(t));
136 return (int64_t)vadd_s64(vget_high_s64(r), vget_low_s64(r));
137 #endif
138 }
139
v64_dotp_s16(v64 x,v64 y)140 SIMD_INLINE int64_t v64_dotp_s16(v64 x, v64 y) {
141 #if defined(__aarch64__)
142 return vaddlvq_s32(
143 vmull_s16(vreinterpret_s16_s64(x), vreinterpret_s16_s64(y)));
144 #else
145 int64x2_t r =
146 vpaddlq_s32(vmull_s16(vreinterpret_s16_s64(x), vreinterpret_s16_s64(y)));
147 return (int64_t)(vget_high_s64(r) + vget_low_s64(r));
148 #endif
149 }
150
v64_hadd_u8(v64 x)151 SIMD_INLINE uint64_t v64_hadd_u8(v64 x) {
152 #if defined(__aarch64__)
153 return vaddlv_u8(vreinterpret_u8_s64(x));
154 #else
155 return (uint64_t)vpaddl_u32(vpaddl_u16(vpaddl_u8(vreinterpret_u8_s64(x))));
156 #endif
157 }
158
v64_hadd_s16(v64 a)159 SIMD_INLINE int64_t v64_hadd_s16(v64 a) {
160 return (int64_t)vpaddl_s32(vpaddl_s16(vreinterpret_s16_s64(a)));
161 }
162
163 typedef uint16x8_t sad64_internal;
164
v64_sad_u8_init(void)165 SIMD_INLINE sad64_internal v64_sad_u8_init(void) { return vdupq_n_u16(0); }
166
167 // Implementation dependent return value. Result must be finalised with
168 // v64_sad_u8_sum().
v64_sad_u8(sad64_internal s,v64 a,v64 b)169 SIMD_INLINE sad64_internal v64_sad_u8(sad64_internal s, v64 a, v64 b) {
170 return vabal_u8(s, vreinterpret_u8_s64(a), vreinterpret_u8_s64(b));
171 }
172
v64_sad_u8_sum(sad64_internal s)173 SIMD_INLINE uint32_t v64_sad_u8_sum(sad64_internal s) {
174 #if defined(__aarch64__)
175 return vaddlvq_u16(s);
176 #else
177 uint64x2_t r = vpaddlq_u32(vpaddlq_u16(s));
178 return (uint32_t)(uint64_t)(vget_high_u64(r) + vget_low_u64(r));
179 #endif
180 }
181
182 typedef uint32x4_t ssd64_internal;
183
v64_ssd_u8_init(void)184 SIMD_INLINE ssd64_internal v64_ssd_u8_init(void) { return vdupq_n_u32(0); }
185
186 // Implementation dependent return value. Result must be finalised with
187 // v64_ssd_u8_sum().
v64_ssd_u8(ssd64_internal s,v64 a,v64 b)188 SIMD_INLINE ssd64_internal v64_ssd_u8(ssd64_internal s, v64 a, v64 b) {
189 uint8x8_t t = vabd_u8(vreinterpret_u8_s64(a), vreinterpret_u8_s64(b));
190 return vaddq_u32(s, vpaddlq_u16(vmull_u8(t, t)));
191 }
192
v64_ssd_u8_sum(ssd64_internal s)193 SIMD_INLINE uint32_t v64_ssd_u8_sum(ssd64_internal s) {
194 #if defined(__aarch64__)
195 return vaddvq_u32(s);
196 #else
197 uint64x2_t t = vpaddlq_u32(s);
198 return vget_lane_u32(
199 vreinterpret_u32_u64(vadd_u64(vget_high_u64(t), vget_low_u64(t))), 0);
200 #endif
201 }
202
v64_or(v64 x,v64 y)203 SIMD_INLINE v64 v64_or(v64 x, v64 y) { return vorr_s64(x, y); }
204
v64_xor(v64 x,v64 y)205 SIMD_INLINE v64 v64_xor(v64 x, v64 y) { return veor_s64(x, y); }
206
v64_and(v64 x,v64 y)207 SIMD_INLINE v64 v64_and(v64 x, v64 y) { return vand_s64(x, y); }
208
v64_andn(v64 x,v64 y)209 SIMD_INLINE v64 v64_andn(v64 x, v64 y) { return vbic_s64(x, y); }
210
v64_add_8(v64 x,v64 y)211 SIMD_INLINE v64 v64_add_8(v64 x, v64 y) {
212 return vreinterpret_s64_u8(
213 vadd_u8(vreinterpret_u8_s64(x), vreinterpret_u8_s64(y)));
214 }
215
v64_sadd_u8(v64 x,v64 y)216 SIMD_INLINE v64 v64_sadd_u8(v64 x, v64 y) {
217 return vreinterpret_s64_u8(
218 vqadd_u8(vreinterpret_u8_s64(x), vreinterpret_u8_s64(y)));
219 }
220
v64_sadd_s8(v64 x,v64 y)221 SIMD_INLINE v64 v64_sadd_s8(v64 x, v64 y) {
222 return vreinterpret_s64_s8(
223 vqadd_s8(vreinterpret_s8_s64(x), vreinterpret_s8_s64(y)));
224 }
225
v64_add_16(v64 x,v64 y)226 SIMD_INLINE v64 v64_add_16(v64 x, v64 y) {
227 return vreinterpret_s64_s16(
228 vadd_s16(vreinterpret_s16_s64(x), vreinterpret_s16_s64(y)));
229 }
230
v64_sadd_s16(v64 x,v64 y)231 SIMD_INLINE v64 v64_sadd_s16(v64 x, v64 y) {
232 return vreinterpret_s64_s16(
233 vqadd_s16(vreinterpret_s16_s64(x), vreinterpret_s16_s64(y)));
234 }
235
v64_add_32(v64 x,v64 y)236 SIMD_INLINE v64 v64_add_32(v64 x, v64 y) {
237 return vreinterpret_s64_u32(
238 vadd_u32(vreinterpret_u32_s64(x), vreinterpret_u32_s64(y)));
239 }
240
v64_sub_8(v64 x,v64 y)241 SIMD_INLINE v64 v64_sub_8(v64 x, v64 y) {
242 return vreinterpret_s64_u8(
243 vsub_u8(vreinterpret_u8_s64(x), vreinterpret_u8_s64(y)));
244 }
245
v64_sub_16(v64 x,v64 y)246 SIMD_INLINE v64 v64_sub_16(v64 x, v64 y) {
247 return vreinterpret_s64_s16(
248 vsub_s16(vreinterpret_s16_s64(x), vreinterpret_s16_s64(y)));
249 }
250
v64_ssub_s16(v64 x,v64 y)251 SIMD_INLINE v64 v64_ssub_s16(v64 x, v64 y) {
252 return vreinterpret_s64_s16(
253 vqsub_s16(vreinterpret_s16_s64(x), vreinterpret_s16_s64(y)));
254 }
255
v64_ssub_u16(v64 x,v64 y)256 SIMD_INLINE v64 v64_ssub_u16(v64 x, v64 y) {
257 return vreinterpret_s64_u16(
258 vqsub_u16(vreinterpret_u16_s64(x), vreinterpret_u16_s64(y)));
259 }
260
v64_ssub_u8(v64 x,v64 y)261 SIMD_INLINE v64 v64_ssub_u8(v64 x, v64 y) {
262 return vreinterpret_s64_u8(
263 vqsub_u8(vreinterpret_u8_s64(x), vreinterpret_u8_s64(y)));
264 }
265
v64_ssub_s8(v64 x,v64 y)266 SIMD_INLINE v64 v64_ssub_s8(v64 x, v64 y) {
267 return vreinterpret_s64_s8(
268 vqsub_s8(vreinterpret_s8_s64(x), vreinterpret_s8_s64(y)));
269 }
270
v64_sub_32(v64 x,v64 y)271 SIMD_INLINE v64 v64_sub_32(v64 x, v64 y) {
272 return vreinterpret_s64_s32(
273 vsub_s32(vreinterpret_s32_s64(x), vreinterpret_s32_s64(y)));
274 }
275
v64_abs_s16(v64 x)276 SIMD_INLINE v64 v64_abs_s16(v64 x) {
277 return vreinterpret_s64_s16(vabs_s16(vreinterpret_s16_s64(x)));
278 }
279
v64_abs_s8(v64 x)280 SIMD_INLINE v64 v64_abs_s8(v64 x) {
281 return vreinterpret_s64_s8(vabs_s8(vreinterpret_s8_s64(x)));
282 }
283
v64_mullo_s16(v64 x,v64 y)284 SIMD_INLINE v64 v64_mullo_s16(v64 x, v64 y) {
285 return vreinterpret_s64_s16(
286 vmul_s16(vreinterpret_s16_s64(x), vreinterpret_s16_s64(y)));
287 }
288
v64_mulhi_s16(v64 x,v64 y)289 SIMD_INLINE v64 v64_mulhi_s16(v64 x, v64 y) {
290 #if defined(__aarch64__)
291 int16x8_t t = vreinterpretq_s16_s32(
292 vmull_s16(vreinterpret_s16_s64(x), vreinterpret_s16_s64(y)));
293 return vget_low_s64(vreinterpretq_s64_s16(vuzp2q_s16(t, t)));
294 #else
295 return vreinterpret_s64_s16(vmovn_s32(vshrq_n_s32(
296 vmull_s16(vreinterpret_s16_s64(x), vreinterpret_s16_s64(y)), 16)));
297 #endif
298 }
299
v64_mullo_s32(v64 x,v64 y)300 SIMD_INLINE v64 v64_mullo_s32(v64 x, v64 y) {
301 return vreinterpret_s64_s32(
302 vmul_s32(vreinterpret_s32_s64(x), vreinterpret_s32_s64(y)));
303 }
304
v64_madd_s16(v64 x,v64 y)305 SIMD_INLINE v64 v64_madd_s16(v64 x, v64 y) {
306 int32x4_t t = vmull_s16(vreinterpret_s16_s64(x), vreinterpret_s16_s64(y));
307 return vreinterpret_s64_s32(
308 vpadd_s32(vreinterpret_s32_s64(vget_low_s64(vreinterpretq_s64_s32(t))),
309 vreinterpret_s32_s64(vget_high_s64(vreinterpretq_s64_s32(t)))));
310 }
311
v64_madd_us8(v64 x,v64 y)312 SIMD_INLINE v64 v64_madd_us8(v64 x, v64 y) {
313 int16x8_t t =
314 vmulq_s16(vreinterpretq_s16_u16(vmovl_u8(vreinterpret_u8_s64(x))),
315 vmovl_s8(vreinterpret_s8_s64(y)));
316 return vreinterpret_s64_s16(vqmovn_s32(vpaddlq_s16(t)));
317 }
318
v64_avg_u8(v64 x,v64 y)319 SIMD_INLINE v64 v64_avg_u8(v64 x, v64 y) {
320 return vreinterpret_s64_u8(
321 vrhadd_u8(vreinterpret_u8_s64(x), vreinterpret_u8_s64(y)));
322 }
323
v64_rdavg_u8(v64 x,v64 y)324 SIMD_INLINE v64 v64_rdavg_u8(v64 x, v64 y) {
325 return vreinterpret_s64_u8(
326 vhadd_u8(vreinterpret_u8_s64(x), vreinterpret_u8_s64(y)));
327 }
328
v64_rdavg_u16(v64 x,v64 y)329 SIMD_INLINE v64 v64_rdavg_u16(v64 x, v64 y) {
330 return vreinterpret_s64_u16(
331 vhadd_u16(vreinterpret_u16_s64(x), vreinterpret_u16_s64(y)));
332 }
333
v64_avg_u16(v64 x,v64 y)334 SIMD_INLINE v64 v64_avg_u16(v64 x, v64 y) {
335 return vreinterpret_s64_u16(
336 vrhadd_u16(vreinterpret_u16_s64(x), vreinterpret_u16_s64(y)));
337 }
338
v64_max_u8(v64 x,v64 y)339 SIMD_INLINE v64 v64_max_u8(v64 x, v64 y) {
340 return vreinterpret_s64_u8(
341 vmax_u8(vreinterpret_u8_s64(x), vreinterpret_u8_s64(y)));
342 }
343
v64_min_u8(v64 x,v64 y)344 SIMD_INLINE v64 v64_min_u8(v64 x, v64 y) {
345 return vreinterpret_s64_u8(
346 vmin_u8(vreinterpret_u8_s64(x), vreinterpret_u8_s64(y)));
347 }
348
v64_max_s8(v64 x,v64 y)349 SIMD_INLINE v64 v64_max_s8(v64 x, v64 y) {
350 return vreinterpret_s64_s8(
351 vmax_s8(vreinterpret_s8_s64(x), vreinterpret_s8_s64(y)));
352 }
353
v64_min_s8(v64 x,v64 y)354 SIMD_INLINE v64 v64_min_s8(v64 x, v64 y) {
355 return vreinterpret_s64_s8(
356 vmin_s8(vreinterpret_s8_s64(x), vreinterpret_s8_s64(y)));
357 }
358
v64_max_s16(v64 x,v64 y)359 SIMD_INLINE v64 v64_max_s16(v64 x, v64 y) {
360 return vreinterpret_s64_s16(
361 vmax_s16(vreinterpret_s16_s64(x), vreinterpret_s16_s64(y)));
362 }
363
v64_min_s16(v64 x,v64 y)364 SIMD_INLINE v64 v64_min_s16(v64 x, v64 y) {
365 return vreinterpret_s64_s16(
366 vmin_s16(vreinterpret_s16_s64(x), vreinterpret_s16_s64(y)));
367 }
368
v64_ziplo_8(v64 x,v64 y)369 SIMD_INLINE v64 v64_ziplo_8(v64 x, v64 y) {
370 #if defined(__aarch64__)
371 return vreinterpret_s64_u8(
372 vzip1_u8(vreinterpret_u8_s64(y), vreinterpret_u8_s64(x)));
373 #else
374 uint8x8x2_t r = vzip_u8(vreinterpret_u8_s64(y), vreinterpret_u8_s64(x));
375 return vreinterpret_s64_u8(r.val[0]);
376 #endif
377 }
378
v64_ziphi_8(v64 x,v64 y)379 SIMD_INLINE v64 v64_ziphi_8(v64 x, v64 y) {
380 #if defined(__aarch64__)
381 return vreinterpret_s64_u8(
382 vzip2_u8(vreinterpret_u8_s64(y), vreinterpret_u8_s64(x)));
383 #else
384 uint8x8x2_t r = vzip_u8(vreinterpret_u8_s64(y), vreinterpret_u8_s64(x));
385 return vreinterpret_s64_u8(r.val[1]);
386 #endif
387 }
388
v64_ziplo_16(v64 x,v64 y)389 SIMD_INLINE v64 v64_ziplo_16(v64 x, v64 y) {
390 #if defined(__aarch64__)
391 return vreinterpret_s64_u16(
392 vzip1_u16(vreinterpret_u16_s64(y), vreinterpret_u16_s64(x)));
393 #else
394 int16x4x2_t r = vzip_s16(vreinterpret_s16_s64(y), vreinterpret_s16_s64(x));
395 return vreinterpret_s64_s16(r.val[0]);
396 #endif
397 }
398
v64_ziphi_16(v64 x,v64 y)399 SIMD_INLINE v64 v64_ziphi_16(v64 x, v64 y) {
400 #if defined(__aarch64__)
401 return vreinterpret_s64_u16(
402 vzip2_u16(vreinterpret_u16_s64(y), vreinterpret_u16_s64(x)));
403 #else
404 int16x4x2_t r = vzip_s16(vreinterpret_s16_s64(y), vreinterpret_s16_s64(x));
405 return vreinterpret_s64_s16(r.val[1]);
406 #endif
407 }
408
v64_ziplo_32(v64 x,v64 y)409 SIMD_INLINE v64 v64_ziplo_32(v64 x, v64 y) {
410 #if defined(__aarch64__)
411 return vreinterpret_s64_u32(
412 vzip1_u32(vreinterpret_u32_s64(y), vreinterpret_u32_s64(x)));
413 #else
414 int32x2x2_t r = vzip_s32(vreinterpret_s32_s64(y), vreinterpret_s32_s64(x));
415 return vreinterpret_s64_s32(r.val[0]);
416 #endif
417 }
418
v64_ziphi_32(v64 x,v64 y)419 SIMD_INLINE v64 v64_ziphi_32(v64 x, v64 y) {
420 #if defined(__aarch64__)
421 return vreinterpret_s64_u32(
422 vzip2_u32(vreinterpret_u32_s64(y), vreinterpret_u32_s64(x)));
423 #else
424 int32x2x2_t r = vzip_s32(vreinterpret_s32_s64(y), vreinterpret_s32_s64(x));
425 return vreinterpret_s64_s32(r.val[1]);
426 #endif
427 }
428
v64_unpacklo_u8_s16(v64 a)429 SIMD_INLINE v64 v64_unpacklo_u8_s16(v64 a) {
430 return vreinterpret_s64_u16(vget_low_u16(vmovl_u8(vreinterpret_u8_s64(a))));
431 }
432
v64_unpackhi_u8_s16(v64 a)433 SIMD_INLINE v64 v64_unpackhi_u8_s16(v64 a) {
434 return vreinterpret_s64_u16(vget_high_u16(vmovl_u8(vreinterpret_u8_s64(a))));
435 }
436
v64_unpacklo_s8_s16(v64 a)437 SIMD_INLINE v64 v64_unpacklo_s8_s16(v64 a) {
438 return vreinterpret_s64_s16(vget_low_s16(vmovl_s8(vreinterpret_s8_s64(a))));
439 }
440
v64_unpackhi_s8_s16(v64 a)441 SIMD_INLINE v64 v64_unpackhi_s8_s16(v64 a) {
442 return vreinterpret_s64_s16(vget_high_s16(vmovl_s8(vreinterpret_s8_s64(a))));
443 }
444
v64_pack_s32_s16(v64 x,v64 y)445 SIMD_INLINE v64 v64_pack_s32_s16(v64 x, v64 y) {
446 return vreinterpret_s64_s16(vqmovn_s32(
447 vcombine_s32(vreinterpret_s32_s64(y), vreinterpret_s32_s64(x))));
448 }
449
v64_pack_s32_u16(v64 x,v64 y)450 SIMD_INLINE v64 v64_pack_s32_u16(v64 x, v64 y) {
451 return vreinterpret_s64_u16(vqmovun_s32(
452 vcombine_s32(vreinterpret_s32_s64(y), vreinterpret_s32_s64(x))));
453 }
454
v64_pack_s16_u8(v64 x,v64 y)455 SIMD_INLINE v64 v64_pack_s16_u8(v64 x, v64 y) {
456 return vreinterpret_s64_u8(vqmovun_s16(vreinterpretq_s16_s32(
457 vcombine_s32(vreinterpret_s32_s64(y), vreinterpret_s32_s64(x)))));
458 }
459
v64_pack_s16_s8(v64 x,v64 y)460 SIMD_INLINE v64 v64_pack_s16_s8(v64 x, v64 y) {
461 return vreinterpret_s64_s8(vqmovn_s16(vreinterpretq_s16_s32(
462 vcombine_s32(vreinterpret_s32_s64(y), vreinterpret_s32_s64(x)))));
463 }
464
v64_unziplo_8(v64 x,v64 y)465 SIMD_INLINE v64 v64_unziplo_8(v64 x, v64 y) {
466 #if defined(__aarch64__)
467 return vreinterpret_s64_u8(
468 vuzp1_u8(vreinterpret_u8_s64(y), vreinterpret_u8_s64(x)));
469 #else
470 uint8x8x2_t r = vuzp_u8(vreinterpret_u8_s64(y), vreinterpret_u8_s64(x));
471 return vreinterpret_s64_u8(r.val[0]);
472 #endif
473 }
474
v64_unziphi_8(v64 x,v64 y)475 SIMD_INLINE v64 v64_unziphi_8(v64 x, v64 y) {
476 #if defined(__aarch64__)
477 return vreinterpret_s64_u8(
478 vuzp2_u8(vreinterpret_u8_s64(y), vreinterpret_u8_s64(x)));
479 #else
480 uint8x8x2_t r = vuzp_u8(vreinterpret_u8_s64(y), vreinterpret_u8_s64(x));
481 return vreinterpret_s64_u8(r.val[1]);
482 #endif
483 }
484
v64_unziplo_16(v64 x,v64 y)485 SIMD_INLINE v64 v64_unziplo_16(v64 x, v64 y) {
486 #if defined(__aarch64__)
487 return vreinterpret_s64_u16(
488 vuzp1_u16(vreinterpret_u16_s64(y), vreinterpret_u16_s64(x)));
489 #else
490 uint16x4x2_t r = vuzp_u16(vreinterpret_u16_s64(y), vreinterpret_u16_s64(x));
491 return vreinterpret_s64_u16(r.val[0]);
492 #endif
493 }
494
v64_unziphi_16(v64 x,v64 y)495 SIMD_INLINE v64 v64_unziphi_16(v64 x, v64 y) {
496 #if defined(__aarch64__)
497 return vreinterpret_s64_u16(
498 vuzp2_u16(vreinterpret_u16_s64(y), vreinterpret_u16_s64(x)));
499 #else
500 uint16x4x2_t r = vuzp_u16(vreinterpret_u16_s64(y), vreinterpret_u16_s64(x));
501 return vreinterpret_s64_u16(r.val[1]);
502 #endif
503 }
504
v64_unpacklo_s16_s32(v64 x)505 SIMD_INLINE v64 v64_unpacklo_s16_s32(v64 x) {
506 return vreinterpret_s64_s32(vget_low_s32(vmovl_s16(vreinterpret_s16_s64(x))));
507 }
508
v64_unpacklo_u16_s32(v64 x)509 SIMD_INLINE v64 v64_unpacklo_u16_s32(v64 x) {
510 return vreinterpret_s64_u32(vget_low_u32(vmovl_u16(vreinterpret_u16_s64(x))));
511 }
512
v64_unpackhi_s16_s32(v64 x)513 SIMD_INLINE v64 v64_unpackhi_s16_s32(v64 x) {
514 return vreinterpret_s64_s32(
515 vget_high_s32(vmovl_s16(vreinterpret_s16_s64(x))));
516 }
517
v64_unpackhi_u16_s32(v64 x)518 SIMD_INLINE v64 v64_unpackhi_u16_s32(v64 x) {
519 return vreinterpret_s64_u32(
520 vget_high_u32(vmovl_u16(vreinterpret_u16_s64(x))));
521 }
522
v64_shuffle_8(v64 x,v64 pattern)523 SIMD_INLINE v64 v64_shuffle_8(v64 x, v64 pattern) {
524 return vreinterpret_s64_u8(
525 vtbl1_u8(vreinterpret_u8_s64(x), vreinterpret_u8_s64(pattern)));
526 }
527
v64_cmpgt_s8(v64 x,v64 y)528 SIMD_INLINE v64 v64_cmpgt_s8(v64 x, v64 y) {
529 return vreinterpret_s64_u8(
530 vcgt_s8(vreinterpret_s8_s64(x), vreinterpret_s8_s64(y)));
531 }
532
v64_cmplt_s8(v64 x,v64 y)533 SIMD_INLINE v64 v64_cmplt_s8(v64 x, v64 y) {
534 return vreinterpret_s64_u8(
535 vclt_s8(vreinterpret_s8_s64(x), vreinterpret_s8_s64(y)));
536 }
537
v64_cmpeq_8(v64 x,v64 y)538 SIMD_INLINE v64 v64_cmpeq_8(v64 x, v64 y) {
539 return vreinterpret_s64_u8(
540 vceq_u8(vreinterpret_u8_s64(x), vreinterpret_u8_s64(y)));
541 }
542
v64_cmpgt_s16(v64 x,v64 y)543 SIMD_INLINE v64 v64_cmpgt_s16(v64 x, v64 y) {
544 return vreinterpret_s64_u16(
545 vcgt_s16(vreinterpret_s16_s64(x), vreinterpret_s16_s64(y)));
546 }
547
v64_cmplt_s16(v64 x,v64 y)548 SIMD_INLINE v64 v64_cmplt_s16(v64 x, v64 y) {
549 return vreinterpret_s64_u16(
550 vclt_s16(vreinterpret_s16_s64(x), vreinterpret_s16_s64(y)));
551 }
552
v64_cmpeq_16(v64 x,v64 y)553 SIMD_INLINE v64 v64_cmpeq_16(v64 x, v64 y) {
554 return vreinterpret_s64_u16(
555 vceq_s16(vreinterpret_s16_s64(x), vreinterpret_s16_s64(y)));
556 }
557
v64_shl_8(v64 a,unsigned int c)558 SIMD_INLINE v64 v64_shl_8(v64 a, unsigned int c) {
559 return vreinterpret_s64_u8(vshl_u8(vreinterpret_u8_s64(a), vdup_n_s8(c)));
560 }
561
v64_shr_u8(v64 a,unsigned int c)562 SIMD_INLINE v64 v64_shr_u8(v64 a, unsigned int c) {
563 return vreinterpret_s64_u8(vshl_u8(vreinterpret_u8_s64(a), vdup_n_s8(-c)));
564 }
565
v64_shr_s8(v64 a,unsigned int c)566 SIMD_INLINE v64 v64_shr_s8(v64 a, unsigned int c) {
567 return vreinterpret_s64_s8(vshl_s8(vreinterpret_s8_s64(a), vdup_n_s8(-c)));
568 }
569
v64_shl_16(v64 a,unsigned int c)570 SIMD_INLINE v64 v64_shl_16(v64 a, unsigned int c) {
571 return vreinterpret_s64_u16(vshl_u16(vreinterpret_u16_s64(a), vdup_n_s16(c)));
572 }
573
v64_shr_u16(v64 a,unsigned int c)574 SIMD_INLINE v64 v64_shr_u16(v64 a, unsigned int c) {
575 return vreinterpret_s64_u16(
576 vshl_u16(vreinterpret_u16_s64(a), vdup_n_s16(-(int)c)));
577 }
578
v64_shr_s16(v64 a,unsigned int c)579 SIMD_INLINE v64 v64_shr_s16(v64 a, unsigned int c) {
580 return vreinterpret_s64_s16(
581 vshl_s16(vreinterpret_s16_s64(a), vdup_n_s16(-(int)c)));
582 }
583
v64_shl_32(v64 a,unsigned int c)584 SIMD_INLINE v64 v64_shl_32(v64 a, unsigned int c) {
585 return vreinterpret_s64_u32(vshl_u32(vreinterpret_u32_s64(a), vdup_n_s32(c)));
586 }
587
v64_shr_u32(v64 a,unsigned int c)588 SIMD_INLINE v64 v64_shr_u32(v64 a, unsigned int c) {
589 return vreinterpret_s64_u32(
590 vshl_u32(vreinterpret_u32_s64(a), vdup_n_s32(-(int)c)));
591 }
592
v64_shr_s32(v64 a,unsigned int c)593 SIMD_INLINE v64 v64_shr_s32(v64 a, unsigned int c) {
594 return vreinterpret_s64_s32(
595 vshl_s32(vreinterpret_s32_s64(a), vdup_n_s32(-(int)c)));
596 }
597
598 // The following functions require an immediate.
599 // Some compilers will check this during optimisation, others wont.
600 #if defined(__OPTIMIZE__) && __OPTIMIZE__ && !defined(__clang__)
601
v64_shl_n_byte(v64 a,unsigned int c)602 SIMD_INLINE v64 v64_shl_n_byte(v64 a, unsigned int c) {
603 return vshl_n_s64(a, c * 8);
604 }
605
v64_shr_n_byte(v64 a,unsigned int c)606 SIMD_INLINE v64 v64_shr_n_byte(v64 a, unsigned int c) {
607 return c ? (v64)vshr_n_u64(vreinterpret_u64_s64(a), c * 8) : a;
608 }
609
v64_shl_n_8(v64 a,unsigned int c)610 SIMD_INLINE v64 v64_shl_n_8(v64 a, unsigned int c) {
611 return c ? vreinterpret_s64_u8(vshl_n_u8(vreinterpret_u8_s64(a), c)) : a;
612 }
613
v64_shr_n_u8(v64 a,unsigned int c)614 SIMD_INLINE v64 v64_shr_n_u8(v64 a, unsigned int c) {
615 return c ? vreinterpret_s64_u8(vshr_n_u8(vreinterpret_u8_s64(a), c)) : a;
616 }
617
v64_shr_n_s8(v64 a,unsigned int c)618 SIMD_INLINE v64 v64_shr_n_s8(v64 a, unsigned int c) {
619 return c ? vreinterpret_s64_s8(vshr_n_s8(vreinterpret_s8_s64(a), c)) : a;
620 }
621
v64_shl_n_16(v64 a,unsigned int c)622 SIMD_INLINE v64 v64_shl_n_16(v64 a, unsigned int c) {
623 return c ? vreinterpret_s64_u16(vshl_n_u16(vreinterpret_u16_s64(a), c)) : a;
624 }
625
v64_shr_n_u16(v64 a,unsigned int c)626 SIMD_INLINE v64 v64_shr_n_u16(v64 a, unsigned int c) {
627 return c ? vreinterpret_s64_u16(vshr_n_u16(vreinterpret_u16_s64(a), c)) : a;
628 }
629
v64_shr_n_s16(v64 a,unsigned int c)630 SIMD_INLINE v64 v64_shr_n_s16(v64 a, unsigned int c) {
631 return c ? vreinterpret_s64_s16(vshr_n_s16(vreinterpret_s16_s64(a), c)) : a;
632 }
633
v64_shl_n_32(v64 a,unsigned int c)634 SIMD_INLINE v64 v64_shl_n_32(v64 a, unsigned int c) {
635 return c ? vreinterpret_s64_u32(vshl_n_u32(vreinterpret_u32_s64(a), c)) : a;
636 }
637
v64_shr_n_u32(v64 a,unsigned int c)638 SIMD_INLINE v64 v64_shr_n_u32(v64 a, unsigned int c) {
639 return c ? vreinterpret_s64_u32(vshr_n_u32(vreinterpret_u32_s64(a), c)) : a;
640 }
641
v64_shr_n_s32(v64 a,unsigned int c)642 SIMD_INLINE v64 v64_shr_n_s32(v64 a, unsigned int c) {
643 return c ? vreinterpret_s64_s32(vshr_n_s32(vreinterpret_s32_s64(a), c)) : a;
644 }
645
646 #else
647
v64_shl_n_byte(v64 a,unsigned int c)648 SIMD_INLINE v64 v64_shl_n_byte(v64 a, unsigned int c) {
649 return v64_from_64(v64_u64(a) << c * 8);
650 }
651
v64_shr_n_byte(v64 a,unsigned int c)652 SIMD_INLINE v64 v64_shr_n_byte(v64 a, unsigned int c) {
653 return v64_from_64(v64_u64(a) >> c * 8);
654 }
655
v64_shl_n_8(v64 a,unsigned int c)656 SIMD_INLINE v64 v64_shl_n_8(v64 a, unsigned int c) { return v64_shl_8(a, c); }
657
v64_shr_n_u8(v64 a,unsigned int c)658 SIMD_INLINE v64 v64_shr_n_u8(v64 a, unsigned int c) { return v64_shr_u8(a, c); }
659
v64_shr_n_s8(v64 a,unsigned int c)660 SIMD_INLINE v64 v64_shr_n_s8(v64 a, unsigned int c) { return v64_shr_s8(a, c); }
661
v64_shl_n_16(v64 a,unsigned int c)662 SIMD_INLINE v64 v64_shl_n_16(v64 a, unsigned int c) { return v64_shl_16(a, c); }
663
v64_shr_n_u16(v64 a,unsigned int c)664 SIMD_INLINE v64 v64_shr_n_u16(v64 a, unsigned int c) {
665 return v64_shr_u16(a, c);
666 }
667
v64_shr_n_s16(v64 a,unsigned int c)668 SIMD_INLINE v64 v64_shr_n_s16(v64 a, unsigned int c) {
669 return v64_shr_s16(a, c);
670 }
671
v64_shl_n_32(v64 a,unsigned int c)672 SIMD_INLINE v64 v64_shl_n_32(v64 a, unsigned int c) { return v64_shl_32(a, c); }
673
v64_shr_n_u32(v64 a,unsigned int c)674 SIMD_INLINE v64 v64_shr_n_u32(v64 a, unsigned int c) {
675 return v64_shr_u32(a, c);
676 }
677
v64_shr_n_s32(v64 a,unsigned int c)678 SIMD_INLINE v64 v64_shr_n_s32(v64 a, unsigned int c) {
679 return v64_shr_s32(a, c);
680 }
681
682 #endif
683
684 #endif // AOM_AOM_DSP_SIMD_V64_INTRINSICS_ARM_H_
685