1 /*
2  * Copyright 2019 The libgav1 Authors
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #ifndef LIBGAV1_SRC_DSP_ARM_COMMON_NEON_H_
18 #define LIBGAV1_SRC_DSP_ARM_COMMON_NEON_H_
19 
20 #include "src/utils/cpu.h"
21 
22 #if LIBGAV1_ENABLE_NEON
23 
24 #include <arm_neon.h>
25 
26 #include <algorithm>
27 #include <cstddef>
28 #include <cstdint>
29 #include <cstring>
30 
31 #include "src/utils/compiler_attributes.h"
32 
33 #if 0
34 #include <cstdio>
35 #include <string>
36 
37 constexpr bool kEnablePrintRegs = true;
38 
39 union DebugRegister {
40   int8_t i8[8];
41   int16_t i16[4];
42   int32_t i32[2];
43   uint8_t u8[8];
44   uint16_t u16[4];
45   uint32_t u32[2];
46 };
47 
48 union DebugRegisterQ {
49   int8_t i8[16];
50   int16_t i16[8];
51   int32_t i32[4];
52   uint8_t u8[16];
53   uint16_t u16[8];
54   uint32_t u32[4];
55 };
56 
57 // Quite useful macro for debugging. Left here for convenience.
58 inline void PrintVect(const DebugRegister r, const char* const name, int size) {
59   int n;
60   if (kEnablePrintRegs) {
61     fprintf(stderr, "%s\t: ", name);
62     if (size == 8) {
63       for (n = 0; n < 8; ++n) fprintf(stderr, "%.2x ", r.u8[n]);
64     } else if (size == 16) {
65       for (n = 0; n < 4; ++n) fprintf(stderr, "%.4x ", r.u16[n]);
66     } else if (size == 32) {
67       for (n = 0; n < 2; ++n) fprintf(stderr, "%.8x ", r.u32[n]);
68     }
69     fprintf(stderr, "\n");
70   }
71 }
72 
73 // Debugging macro for 128-bit types.
74 inline void PrintVectQ(const DebugRegisterQ r, const char* const name,
75                        int size) {
76   int n;
77   if (kEnablePrintRegs) {
78     fprintf(stderr, "%s\t: ", name);
79     if (size == 8) {
80       for (n = 0; n < 16; ++n) fprintf(stderr, "%.2x ", r.u8[n]);
81     } else if (size == 16) {
82       for (n = 0; n < 8; ++n) fprintf(stderr, "%.4x ", r.u16[n]);
83     } else if (size == 32) {
84       for (n = 0; n < 4; ++n) fprintf(stderr, "%.8x ", r.u32[n]);
85     }
86     fprintf(stderr, "\n");
87   }
88 }
89 
90 inline void PrintReg(const int32x4x2_t val, const std::string& name) {
91   DebugRegisterQ r;
92   vst1q_s32(r.i32, val.val[0]);
93   const std::string name0 = name + std::string(".val[0]");
94   PrintVectQ(r, name0.c_str(), 32);
95   vst1q_s32(r.i32, val.val[1]);
96   const std::string name1 = name + std::string(".val[1]");
97   PrintVectQ(r, name1.c_str(), 32);
98 }
99 
100 inline void PrintReg(const uint32x4_t val, const char* name) {
101   DebugRegisterQ r;
102   vst1q_u32(r.u32, val);
103   PrintVectQ(r, name, 32);
104 }
105 
106 inline void PrintReg(const uint32x2_t val, const char* name) {
107   DebugRegister r;
108   vst1_u32(r.u32, val);
109   PrintVect(r, name, 32);
110 }
111 
112 inline void PrintReg(const uint16x8_t val, const char* name) {
113   DebugRegisterQ r;
114   vst1q_u16(r.u16, val);
115   PrintVectQ(r, name, 16);
116 }
117 
118 inline void PrintReg(const uint16x4_t val, const char* name) {
119   DebugRegister r;
120   vst1_u16(r.u16, val);
121   PrintVect(r, name, 16);
122 }
123 
124 inline void PrintReg(const uint8x16_t val, const char* name) {
125   DebugRegisterQ r;
126   vst1q_u8(r.u8, val);
127   PrintVectQ(r, name, 8);
128 }
129 
130 inline void PrintReg(const uint8x8_t val, const char* name) {
131   DebugRegister r;
132   vst1_u8(r.u8, val);
133   PrintVect(r, name, 8);
134 }
135 
136 inline void PrintReg(const int32x4_t val, const char* name) {
137   DebugRegisterQ r;
138   vst1q_s32(r.i32, val);
139   PrintVectQ(r, name, 32);
140 }
141 
142 inline void PrintReg(const int32x2_t val, const char* name) {
143   DebugRegister r;
144   vst1_s32(r.i32, val);
145   PrintVect(r, name, 32);
146 }
147 
148 inline void PrintReg(const int16x8_t val, const char* name) {
149   DebugRegisterQ r;
150   vst1q_s16(r.i16, val);
151   PrintVectQ(r, name, 16);
152 }
153 
154 inline void PrintReg(const int16x4_t val, const char* name) {
155   DebugRegister r;
156   vst1_s16(r.i16, val);
157   PrintVect(r, name, 16);
158 }
159 
160 inline void PrintReg(const int8x16_t val, const char* name) {
161   DebugRegisterQ r;
162   vst1q_s8(r.i8, val);
163   PrintVectQ(r, name, 8);
164 }
165 
166 inline void PrintReg(const int8x8_t val, const char* name) {
167   DebugRegister r;
168   vst1_s8(r.i8, val);
169   PrintVect(r, name, 8);
170 }
171 
172 // Print an individual (non-vector) value in decimal format.
173 inline void PrintReg(const int x, const char* name) {
174   if (kEnablePrintRegs) {
175     fprintf(stderr, "%s: %d\n", name, x);
176   }
177 }
178 
179 // Print an individual (non-vector) value in hexadecimal format.
180 inline void PrintHex(const int x, const char* name) {
181   if (kEnablePrintRegs) {
182     fprintf(stderr, "%s: %x\n", name, x);
183   }
184 }
185 
186 #define PR(x) PrintReg(x, #x)
187 #define PD(x) PrintReg(x, #x)
188 #define PX(x) PrintHex(x, #x)
189 
190 #if LIBGAV1_MSAN
191 #include <sanitizer/msan_interface.h>
192 
193 inline void PrintShadow(const void* r, const char* const name,
194                         const size_t size) {
195   if (kEnablePrintRegs) {
196     fprintf(stderr, "Shadow for %s:\n", name);
197     __msan_print_shadow(r, size);
198   }
199 }
200 #define PS(var, N) PrintShadow(var, #var, N)
201 
202 #endif  // LIBGAV1_MSAN
203 
204 #endif  // 0
205 
206 namespace libgav1 {
207 namespace dsp {
208 
209 //------------------------------------------------------------------------------
210 // Load functions.
211 
212 // Load 2 uint8_t values into lanes 0 and 1. Zeros the register before loading
213 // the values. Use caution when using this in loops because it will re-zero the
214 // register before loading on every iteration.
Load2(const void * const buf)215 inline uint8x8_t Load2(const void* const buf) {
216   const uint16x4_t zero = vdup_n_u16(0);
217   uint16_t temp;
218   memcpy(&temp, buf, 2);
219   return vreinterpret_u8_u16(vld1_lane_u16(&temp, zero, 0));
220 }
221 
222 // Load 2 uint8_t values into |lane| * 2 and |lane| * 2 + 1.
223 template <int lane>
Load2(const void * const buf,uint8x8_t val)224 inline uint8x8_t Load2(const void* const buf, uint8x8_t val) {
225   uint16_t temp;
226   memcpy(&temp, buf, 2);
227   return vreinterpret_u8_u16(
228       vld1_lane_u16(&temp, vreinterpret_u16_u8(val), lane));
229 }
230 
231 template <int lane>
Load2(const void * const buf,uint16x4_t val)232 inline uint16x4_t Load2(const void* const buf, uint16x4_t val) {
233   uint32_t temp;
234   memcpy(&temp, buf, 4);
235   return vreinterpret_u16_u32(
236       vld1_lane_u32(&temp, vreinterpret_u32_u16(val), lane));
237 }
238 
239 // Load 4 uint8_t values into the low half of a uint8x8_t register. Zeros the
240 // register before loading the values. Use caution when using this in loops
241 // because it will re-zero the register before loading on every iteration.
Load4(const void * const buf)242 inline uint8x8_t Load4(const void* const buf) {
243   const uint32x2_t zero = vdup_n_u32(0);
244   uint32_t temp;
245   memcpy(&temp, buf, 4);
246   return vreinterpret_u8_u32(vld1_lane_u32(&temp, zero, 0));
247 }
248 
249 // Load 4 uint8_t values into 4 lanes staring with |lane| * 4.
250 template <int lane>
Load4(const void * const buf,uint8x8_t val)251 inline uint8x8_t Load4(const void* const buf, uint8x8_t val) {
252   uint32_t temp;
253   memcpy(&temp, buf, 4);
254   return vreinterpret_u8_u32(
255       vld1_lane_u32(&temp, vreinterpret_u32_u8(val), lane));
256 }
257 
258 // Convenience functions for 16-bit loads from a uint8_t* source.
Load4U16(const void * const buf)259 inline uint16x4_t Load4U16(const void* const buf) {
260   return vld1_u16(static_cast<const uint16_t*>(buf));
261 }
262 
Load8U16(const void * const buf)263 inline uint16x8_t Load8U16(const void* const buf) {
264   return vld1q_u16(static_cast<const uint16_t*>(buf));
265 }
266 
267 //------------------------------------------------------------------------------
268 // Load functions to avoid MemorySanitizer's use-of-uninitialized-value warning.
269 
MaskOverreads(const uint8x8_t source,const ptrdiff_t over_read_in_bytes)270 inline uint8x8_t MaskOverreads(const uint8x8_t source,
271                                const ptrdiff_t over_read_in_bytes) {
272   uint8x8_t dst = source;
273 #if LIBGAV1_MSAN
274   if (over_read_in_bytes > 0) {
275     uint8x8_t mask = vdup_n_u8(0);
276     uint8x8_t valid_element_mask = vdup_n_u8(-1);
277     const int valid_bytes =
278         std::min(8, 8 - static_cast<int>(over_read_in_bytes));
279     for (int i = 0; i < valid_bytes; ++i) {
280       // Feed ff bytes into |mask| one at a time.
281       mask = vext_u8(valid_element_mask, mask, 7);
282     }
283     dst = vand_u8(dst, mask);
284   }
285 #else
286   static_cast<void>(over_read_in_bytes);
287 #endif
288   return dst;
289 }
290 
MaskOverreadsQ(const uint8x16_t source,const ptrdiff_t over_read_in_bytes)291 inline uint8x16_t MaskOverreadsQ(const uint8x16_t source,
292                                  const ptrdiff_t over_read_in_bytes) {
293   uint8x16_t dst = source;
294 #if LIBGAV1_MSAN
295   if (over_read_in_bytes > 0) {
296     uint8x16_t mask = vdupq_n_u8(0);
297     uint8x16_t valid_element_mask = vdupq_n_u8(-1);
298     const int valid_bytes =
299         std::min(16, 16 - static_cast<int>(over_read_in_bytes));
300     for (int i = 0; i < valid_bytes; ++i) {
301       // Feed ff bytes into |mask| one at a time.
302       mask = vextq_u8(valid_element_mask, mask, 15);
303     }
304     dst = vandq_u8(dst, mask);
305   }
306 #else
307   static_cast<void>(over_read_in_bytes);
308 #endif
309   return dst;
310 }
311 
Load1MsanU8(const uint8_t * const source,const ptrdiff_t over_read_in_bytes)312 inline uint8x8_t Load1MsanU8(const uint8_t* const source,
313                              const ptrdiff_t over_read_in_bytes) {
314   return MaskOverreads(vld1_u8(source), over_read_in_bytes);
315 }
316 
Load1QMsanU8(const uint8_t * const source,const ptrdiff_t over_read_in_bytes)317 inline uint8x16_t Load1QMsanU8(const uint8_t* const source,
318                                const ptrdiff_t over_read_in_bytes) {
319   return MaskOverreadsQ(vld1q_u8(source), over_read_in_bytes);
320 }
321 
Load1QMsanU16(const uint16_t * const source,const ptrdiff_t over_read_in_bytes)322 inline uint16x8_t Load1QMsanU16(const uint16_t* const source,
323                                 const ptrdiff_t over_read_in_bytes) {
324   return vreinterpretq_u16_u8(MaskOverreadsQ(
325       vreinterpretq_u8_u16(vld1q_u16(source)), over_read_in_bytes));
326 }
327 
Load2QMsanU16(const uint16_t * const source,const ptrdiff_t over_read_in_bytes)328 inline uint16x8x2_t Load2QMsanU16(const uint16_t* const source,
329                                   const ptrdiff_t over_read_in_bytes) {
330   // Relative source index of elements (2 bytes each):
331   // dst.val[0]: 00 02 04 06 08 10 12 14
332   // dst.val[1]: 01 03 05 07 09 11 13 15
333   uint16x8x2_t dst = vld2q_u16(source);
334   dst.val[0] = vreinterpretq_u16_u8(MaskOverreadsQ(
335       vreinterpretq_u8_u16(dst.val[0]), over_read_in_bytes >> 1));
336   dst.val[1] = vreinterpretq_u16_u8(
337       MaskOverreadsQ(vreinterpretq_u8_u16(dst.val[1]),
338                      (over_read_in_bytes >> 1) + (over_read_in_bytes % 4)));
339   return dst;
340 }
341 
Load1QMsanU32(const uint32_t * const source,const ptrdiff_t over_read_in_bytes)342 inline uint32x4_t Load1QMsanU32(const uint32_t* const source,
343                                 const ptrdiff_t over_read_in_bytes) {
344   return vreinterpretq_u32_u8(MaskOverreadsQ(
345       vreinterpretq_u8_u32(vld1q_u32(source)), over_read_in_bytes));
346 }
347 
348 //------------------------------------------------------------------------------
349 // Store functions.
350 
351 // Propagate type information to the compiler. Without this the compiler may
352 // assume the required alignment of the type (4 bytes in the case of uint32_t)
353 // and add alignment hints to the memory access.
354 template <typename T>
ValueToMem(void * const buf,T val)355 inline void ValueToMem(void* const buf, T val) {
356   memcpy(buf, &val, sizeof(val));
357 }
358 
359 // Store 4 int8_t values from the low half of an int8x8_t register.
StoreLo4(void * const buf,const int8x8_t val)360 inline void StoreLo4(void* const buf, const int8x8_t val) {
361   ValueToMem<int32_t>(buf, vget_lane_s32(vreinterpret_s32_s8(val), 0));
362 }
363 
364 // Store 4 uint8_t values from the low half of a uint8x8_t register.
StoreLo4(void * const buf,const uint8x8_t val)365 inline void StoreLo4(void* const buf, const uint8x8_t val) {
366   ValueToMem<uint32_t>(buf, vget_lane_u32(vreinterpret_u32_u8(val), 0));
367 }
368 
369 // Store 4 uint8_t values from the high half of a uint8x8_t register.
StoreHi4(void * const buf,const uint8x8_t val)370 inline void StoreHi4(void* const buf, const uint8x8_t val) {
371   ValueToMem<uint32_t>(buf, vget_lane_u32(vreinterpret_u32_u8(val), 1));
372 }
373 
374 // Store 2 uint8_t values from |lane| * 2 and |lane| * 2 + 1 of a uint8x8_t
375 // register.
376 template <int lane>
Store2(void * const buf,const uint8x8_t val)377 inline void Store2(void* const buf, const uint8x8_t val) {
378   ValueToMem<uint16_t>(buf, vget_lane_u16(vreinterpret_u16_u8(val), lane));
379 }
380 
381 // Store 2 uint16_t values from |lane| * 2 and |lane| * 2 + 1 of a uint16x8_t
382 // register.
383 template <int lane>
Store2(void * const buf,const uint16x8_t val)384 inline void Store2(void* const buf, const uint16x8_t val) {
385   ValueToMem<uint32_t>(buf, vgetq_lane_u32(vreinterpretq_u32_u16(val), lane));
386 }
387 
388 // Store 2 uint16_t values from |lane| * 2 and |lane| * 2 + 1 of a uint16x4_t
389 // register.
390 template <int lane>
Store2(void * const buf,const uint16x4_t val)391 inline void Store2(void* const buf, const uint16x4_t val) {
392   ValueToMem<uint32_t>(buf, vget_lane_u32(vreinterpret_u32_u16(val), lane));
393 }
394 
395 // Simplify code when caller has |buf| cast as uint8_t*.
Store4(void * const buf,const uint16x4_t val)396 inline void Store4(void* const buf, const uint16x4_t val) {
397   vst1_u16(static_cast<uint16_t*>(buf), val);
398 }
399 
400 // Simplify code when caller has |buf| cast as uint8_t*.
Store8(void * const buf,const uint16x8_t val)401 inline void Store8(void* const buf, const uint16x8_t val) {
402   vst1q_u16(static_cast<uint16_t*>(buf), val);
403 }
404 
405 //------------------------------------------------------------------------------
406 // Pointer helpers.
407 
408 // This function adds |stride|, given as a number of bytes, to a pointer to a
409 // larger type, using native pointer arithmetic.
410 template <typename T>
AddByteStride(T * ptr,const ptrdiff_t stride)411 inline T* AddByteStride(T* ptr, const ptrdiff_t stride) {
412   return reinterpret_cast<T*>(
413       const_cast<uint8_t*>(reinterpret_cast<const uint8_t*>(ptr) + stride));
414 }
415 
416 //------------------------------------------------------------------------------
417 // Multiply.
418 
419 // Shim vmull_high_u16 for armv7.
VMullHighU16(const uint16x8_t a,const uint16x8_t b)420 inline uint32x4_t VMullHighU16(const uint16x8_t a, const uint16x8_t b) {
421 #if defined(__aarch64__)
422   return vmull_high_u16(a, b);
423 #else
424   return vmull_u16(vget_high_u16(a), vget_high_u16(b));
425 #endif
426 }
427 
428 // Shim vmull_high_s16 for armv7.
VMullHighS16(const int16x8_t a,const int16x8_t b)429 inline int32x4_t VMullHighS16(const int16x8_t a, const int16x8_t b) {
430 #if defined(__aarch64__)
431   return vmull_high_s16(a, b);
432 #else
433   return vmull_s16(vget_high_s16(a), vget_high_s16(b));
434 #endif
435 }
436 
437 // Shim vmlal_high_u16 for armv7.
VMlalHighU16(const uint32x4_t a,const uint16x8_t b,const uint16x8_t c)438 inline uint32x4_t VMlalHighU16(const uint32x4_t a, const uint16x8_t b,
439                                const uint16x8_t c) {
440 #if defined(__aarch64__)
441   return vmlal_high_u16(a, b, c);
442 #else
443   return vmlal_u16(a, vget_high_u16(b), vget_high_u16(c));
444 #endif
445 }
446 
447 // Shim vmlal_high_s16 for armv7.
VMlalHighS16(const int32x4_t a,const int16x8_t b,const int16x8_t c)448 inline int32x4_t VMlalHighS16(const int32x4_t a, const int16x8_t b,
449                               const int16x8_t c) {
450 #if defined(__aarch64__)
451   return vmlal_high_s16(a, b, c);
452 #else
453   return vmlal_s16(a, vget_high_s16(b), vget_high_s16(c));
454 #endif
455 }
456 
457 // Shim vmul_laneq_u16 for armv7.
458 template <int lane>
VMulLaneQU16(const uint16x4_t a,const uint16x8_t b)459 inline uint16x4_t VMulLaneQU16(const uint16x4_t a, const uint16x8_t b) {
460 #if defined(__aarch64__)
461   return vmul_laneq_u16(a, b, lane);
462 #else
463   if (lane < 4) return vmul_lane_u16(a, vget_low_u16(b), lane & 0x3);
464   return vmul_lane_u16(a, vget_high_u16(b), (lane - 4) & 0x3);
465 #endif
466 }
467 
468 // Shim vmulq_laneq_u16 for armv7.
469 template <int lane>
VMulQLaneQU16(const uint16x8_t a,const uint16x8_t b)470 inline uint16x8_t VMulQLaneQU16(const uint16x8_t a, const uint16x8_t b) {
471 #if defined(__aarch64__)
472   return vmulq_laneq_u16(a, b, lane);
473 #else
474   if (lane < 4) return vmulq_lane_u16(a, vget_low_u16(b), lane & 0x3);
475   return vmulq_lane_u16(a, vget_high_u16(b), (lane - 4) & 0x3);
476 #endif
477 }
478 
479 // Shim vmla_laneq_u16 for armv7.
480 template <int lane>
VMlaLaneQU16(const uint16x4_t a,const uint16x4_t b,const uint16x8_t c)481 inline uint16x4_t VMlaLaneQU16(const uint16x4_t a, const uint16x4_t b,
482                                const uint16x8_t c) {
483 #if defined(__aarch64__)
484   return vmla_laneq_u16(a, b, c, lane);
485 #else
486   if (lane < 4) return vmla_lane_u16(a, b, vget_low_u16(c), lane & 0x3);
487   return vmla_lane_u16(a, b, vget_high_u16(c), (lane - 4) & 0x3);
488 #endif
489 }
490 
491 // Shim vmlaq_laneq_u16 for armv7.
492 template <int lane>
VMlaQLaneQU16(const uint16x8_t a,const uint16x8_t b,const uint16x8_t c)493 inline uint16x8_t VMlaQLaneQU16(const uint16x8_t a, const uint16x8_t b,
494                                 const uint16x8_t c) {
495 #if defined(__aarch64__)
496   return vmlaq_laneq_u16(a, b, c, lane);
497 #else
498   if (lane < 4) return vmlaq_lane_u16(a, b, vget_low_u16(c), lane & 0x3);
499   return vmlaq_lane_u16(a, b, vget_high_u16(c), (lane - 4) & 0x3);
500 #endif
501 }
502 
503 //------------------------------------------------------------------------------
504 // Bit manipulation.
505 
506 // vshXX_n_XX() requires an immediate.
507 template <int shift>
LeftShiftVector(const uint8x8_t vector)508 inline uint8x8_t LeftShiftVector(const uint8x8_t vector) {
509   return vreinterpret_u8_u64(vshl_n_u64(vreinterpret_u64_u8(vector), shift));
510 }
511 
512 template <int shift>
RightShiftVector(const uint8x8_t vector)513 inline uint8x8_t RightShiftVector(const uint8x8_t vector) {
514   return vreinterpret_u8_u64(vshr_n_u64(vreinterpret_u64_u8(vector), shift));
515 }
516 
517 template <int shift>
RightShiftVector(const int8x8_t vector)518 inline int8x8_t RightShiftVector(const int8x8_t vector) {
519   return vreinterpret_s8_u64(vshr_n_u64(vreinterpret_u64_s8(vector), shift));
520 }
521 
522 // Shim vqtbl1_u8 for armv7.
VQTbl1U8(const uint8x16_t a,const uint8x8_t index)523 inline uint8x8_t VQTbl1U8(const uint8x16_t a, const uint8x8_t index) {
524 #if defined(__aarch64__)
525   return vqtbl1_u8(a, index);
526 #else
527   const uint8x8x2_t b = {vget_low_u8(a), vget_high_u8(a)};
528   return vtbl2_u8(b, index);
529 #endif
530 }
531 
532 // Shim vqtbl2_u8 for armv7.
VQTbl2U8(const uint8x16x2_t a,const uint8x8_t index)533 inline uint8x8_t VQTbl2U8(const uint8x16x2_t a, const uint8x8_t index) {
534 #if defined(__aarch64__)
535   return vqtbl2_u8(a, index);
536 #else
537   const uint8x8x4_t b = {vget_low_u8(a.val[0]), vget_high_u8(a.val[0]),
538                          vget_low_u8(a.val[1]), vget_high_u8(a.val[1])};
539   return vtbl4_u8(b, index);
540 #endif
541 }
542 
543 // Shim vqtbl2q_u8 for armv7.
VQTbl2QU8(const uint8x16x2_t a,const uint8x16_t index)544 inline uint8x16_t VQTbl2QU8(const uint8x16x2_t a, const uint8x16_t index) {
545 #if defined(__aarch64__)
546   return vqtbl2q_u8(a, index);
547 #else
548   return vcombine_u8(VQTbl2U8(a, vget_low_u8(index)),
549                      VQTbl2U8(a, vget_high_u8(index)));
550 #endif
551 }
552 
553 // Shim vqtbl3q_u8 for armv7.
VQTbl3U8(const uint8x16x3_t a,const uint8x8_t index)554 inline uint8x8_t VQTbl3U8(const uint8x16x3_t a, const uint8x8_t index) {
555 #if defined(__aarch64__)
556   return vqtbl3_u8(a, index);
557 #else
558   const uint8x8x4_t b = {vget_low_u8(a.val[0]), vget_high_u8(a.val[0]),
559                          vget_low_u8(a.val[1]), vget_high_u8(a.val[1])};
560   const uint8x8x2_t c = {vget_low_u8(a.val[2]), vget_high_u8(a.val[2])};
561   const uint8x8_t index_ext = vsub_u8(index, vdup_n_u8(32));
562   const uint8x8_t partial_lookup = vtbl4_u8(b, index);
563   return vtbx2_u8(partial_lookup, c, index_ext);
564 #endif
565 }
566 
567 // Shim vqtbl3q_u8 for armv7.
VQTbl3QU8(const uint8x16x3_t a,const uint8x16_t index)568 inline uint8x16_t VQTbl3QU8(const uint8x16x3_t a, const uint8x16_t index) {
569 #if defined(__aarch64__)
570   return vqtbl3q_u8(a, index);
571 #else
572   return vcombine_u8(VQTbl3U8(a, vget_low_u8(index)),
573                      VQTbl3U8(a, vget_high_u8(index)));
574 #endif
575 }
576 
577 // Shim vqtbl1_s8 for armv7.
VQTbl1S8(const int8x16_t a,const uint8x8_t index)578 inline int8x8_t VQTbl1S8(const int8x16_t a, const uint8x8_t index) {
579 #if defined(__aarch64__)
580   return vqtbl1_s8(a, index);
581 #else
582   const int8x8x2_t b = {vget_low_s8(a), vget_high_s8(a)};
583   return vtbl2_s8(b, vreinterpret_s8_u8(index));
584 #endif
585 }
586 
587 //------------------------------------------------------------------------------
588 // Saturation helpers.
589 
Clip3S16(int16x4_t val,int16x4_t low,int16x4_t high)590 inline int16x4_t Clip3S16(int16x4_t val, int16x4_t low, int16x4_t high) {
591   return vmin_s16(vmax_s16(val, low), high);
592 }
593 
Clip3S16(const int16x8_t val,const int16x8_t low,const int16x8_t high)594 inline int16x8_t Clip3S16(const int16x8_t val, const int16x8_t low,
595                           const int16x8_t high) {
596   return vminq_s16(vmaxq_s16(val, low), high);
597 }
598 
ConvertToUnsignedPixelU16(int16x8_t val,int bitdepth)599 inline uint16x8_t ConvertToUnsignedPixelU16(int16x8_t val, int bitdepth) {
600   const int16x8_t low = vdupq_n_s16(0);
601   const uint16x8_t high = vdupq_n_u16((1 << bitdepth) - 1);
602 
603   return vminq_u16(vreinterpretq_u16_s16(vmaxq_s16(val, low)), high);
604 }
605 
606 //------------------------------------------------------------------------------
607 // Interleave.
608 
609 // vzipN is exclusive to A64.
InterleaveLow8(const uint8x8_t a,const uint8x8_t b)610 inline uint8x8_t InterleaveLow8(const uint8x8_t a, const uint8x8_t b) {
611 #if defined(__aarch64__)
612   return vzip1_u8(a, b);
613 #else
614   // Discard |.val[1]|
615   return vzip_u8(a, b).val[0];
616 #endif
617 }
618 
InterleaveLow32(const uint8x8_t a,const uint8x8_t b)619 inline uint8x8_t InterleaveLow32(const uint8x8_t a, const uint8x8_t b) {
620 #if defined(__aarch64__)
621   return vreinterpret_u8_u32(
622       vzip1_u32(vreinterpret_u32_u8(a), vreinterpret_u32_u8(b)));
623 #else
624   // Discard |.val[1]|
625   return vreinterpret_u8_u32(
626       vzip_u32(vreinterpret_u32_u8(a), vreinterpret_u32_u8(b)).val[0]);
627 #endif
628 }
629 
InterleaveLow32(const int8x8_t a,const int8x8_t b)630 inline int8x8_t InterleaveLow32(const int8x8_t a, const int8x8_t b) {
631 #if defined(__aarch64__)
632   return vreinterpret_s8_u32(
633       vzip1_u32(vreinterpret_u32_s8(a), vreinterpret_u32_s8(b)));
634 #else
635   // Discard |.val[1]|
636   return vreinterpret_s8_u32(
637       vzip_u32(vreinterpret_u32_s8(a), vreinterpret_u32_s8(b)).val[0]);
638 #endif
639 }
640 
InterleaveHigh32(const uint8x8_t a,const uint8x8_t b)641 inline uint8x8_t InterleaveHigh32(const uint8x8_t a, const uint8x8_t b) {
642 #if defined(__aarch64__)
643   return vreinterpret_u8_u32(
644       vzip2_u32(vreinterpret_u32_u8(a), vreinterpret_u32_u8(b)));
645 #else
646   // Discard |.val[0]|
647   return vreinterpret_u8_u32(
648       vzip_u32(vreinterpret_u32_u8(a), vreinterpret_u32_u8(b)).val[1]);
649 #endif
650 }
651 
InterleaveHigh32(const int8x8_t a,const int8x8_t b)652 inline int8x8_t InterleaveHigh32(const int8x8_t a, const int8x8_t b) {
653 #if defined(__aarch64__)
654   return vreinterpret_s8_u32(
655       vzip2_u32(vreinterpret_u32_s8(a), vreinterpret_u32_s8(b)));
656 #else
657   // Discard |.val[0]|
658   return vreinterpret_s8_u32(
659       vzip_u32(vreinterpret_u32_s8(a), vreinterpret_u32_s8(b)).val[1]);
660 #endif
661 }
662 
663 //------------------------------------------------------------------------------
664 // Sum.
665 
SumVector(const uint8x8_t a)666 inline uint16_t SumVector(const uint8x8_t a) {
667 #if defined(__aarch64__)
668   return vaddlv_u8(a);
669 #else
670   const uint16x4_t c = vpaddl_u8(a);
671   const uint32x2_t d = vpaddl_u16(c);
672   const uint64x1_t e = vpaddl_u32(d);
673   return static_cast<uint16_t>(vget_lane_u64(e, 0));
674 #endif  // defined(__aarch64__)
675 }
676 
SumVector(const uint32x2_t a)677 inline uint32_t SumVector(const uint32x2_t a) {
678 #if defined(__aarch64__)
679   return vaddv_u32(a);
680 #else
681   const uint64x1_t b = vpaddl_u32(a);
682   return vget_lane_u32(vreinterpret_u32_u64(b), 0);
683 #endif  // defined(__aarch64__)
684 }
685 
SumVector(const uint32x4_t a)686 inline uint32_t SumVector(const uint32x4_t a) {
687 #if defined(__aarch64__)
688   return vaddvq_u32(a);
689 #else
690   const uint64x2_t b = vpaddlq_u32(a);
691   const uint64x1_t c = vadd_u64(vget_low_u64(b), vget_high_u64(b));
692   return static_cast<uint32_t>(vget_lane_u64(c, 0));
693 #endif
694 }
695 
696 //------------------------------------------------------------------------------
697 // Transpose.
698 
699 // Transpose 32 bit elements such that:
700 // a: 00 01
701 // b: 02 03
702 // returns
703 // val[0]: 00 02
704 // val[1]: 01 03
Interleave32(const uint8x8_t a,const uint8x8_t b)705 inline uint8x8x2_t Interleave32(const uint8x8_t a, const uint8x8_t b) {
706   const uint32x2_t a_32 = vreinterpret_u32_u8(a);
707   const uint32x2_t b_32 = vreinterpret_u32_u8(b);
708   const uint32x2x2_t c = vtrn_u32(a_32, b_32);
709   const uint8x8x2_t d = {vreinterpret_u8_u32(c.val[0]),
710                          vreinterpret_u8_u32(c.val[1])};
711   return d;
712 }
713 
714 // Swap high and low 32 bit elements.
Transpose32(const uint8x8_t a)715 inline uint8x8_t Transpose32(const uint8x8_t a) {
716   const uint32x2_t b = vrev64_u32(vreinterpret_u32_u8(a));
717   return vreinterpret_u8_u32(b);
718 }
719 
720 // Swap high and low halves.
Transpose64(const uint16x8_t a)721 inline uint16x8_t Transpose64(const uint16x8_t a) { return vextq_u16(a, a, 4); }
722 
723 // Implement vtrnq_s64().
724 // Input:
725 // a0: 00 01 02 03 04 05 06 07
726 // a1: 16 17 18 19 20 21 22 23
727 // Output:
728 // b0.val[0]: 00 01 02 03 16 17 18 19
729 // b0.val[1]: 04 05 06 07 20 21 22 23
VtrnqS64(int32x4_t a0,int32x4_t a1)730 inline int16x8x2_t VtrnqS64(int32x4_t a0, int32x4_t a1) {
731   int16x8x2_t b0;
732   b0.val[0] = vcombine_s16(vreinterpret_s16_s32(vget_low_s32(a0)),
733                            vreinterpret_s16_s32(vget_low_s32(a1)));
734   b0.val[1] = vcombine_s16(vreinterpret_s16_s32(vget_high_s32(a0)),
735                            vreinterpret_s16_s32(vget_high_s32(a1)));
736   return b0;
737 }
738 
VtrnqU64(uint32x4_t a0,uint32x4_t a1)739 inline uint16x8x2_t VtrnqU64(uint32x4_t a0, uint32x4_t a1) {
740   uint16x8x2_t b0;
741   b0.val[0] = vcombine_u16(vreinterpret_u16_u32(vget_low_u32(a0)),
742                            vreinterpret_u16_u32(vget_low_u32(a1)));
743   b0.val[1] = vcombine_u16(vreinterpret_u16_u32(vget_high_u32(a0)),
744                            vreinterpret_u16_u32(vget_high_u32(a1)));
745   return b0;
746 }
747 
748 // Input:
749 // 00 01 02 03
750 // 10 11 12 13
751 // 20 21 22 23
752 // 30 31 32 33
Transpose4x4(uint16x4_t a[4])753 inline void Transpose4x4(uint16x4_t a[4]) {
754   // b:
755   // 00 10 02 12
756   // 01 11 03 13
757   const uint16x4x2_t b = vtrn_u16(a[0], a[1]);
758   // c:
759   // 20 30 22 32
760   // 21 31 23 33
761   const uint16x4x2_t c = vtrn_u16(a[2], a[3]);
762   // d:
763   // 00 10 20 30
764   // 02 12 22 32
765   const uint32x2x2_t d =
766       vtrn_u32(vreinterpret_u32_u16(b.val[0]), vreinterpret_u32_u16(c.val[0]));
767   // e:
768   // 01 11 21 31
769   // 03 13 23 33
770   const uint32x2x2_t e =
771       vtrn_u32(vreinterpret_u32_u16(b.val[1]), vreinterpret_u32_u16(c.val[1]));
772   a[0] = vreinterpret_u16_u32(d.val[0]);
773   a[1] = vreinterpret_u16_u32(e.val[0]);
774   a[2] = vreinterpret_u16_u32(d.val[1]);
775   a[3] = vreinterpret_u16_u32(e.val[1]);
776 }
777 
778 // Input:
779 // a: 00 01 02 03 10 11 12 13
780 // b: 20 21 22 23 30 31 32 33
781 // Output:
782 // Note that columns [1] and [2] are transposed.
783 // a: 00 10 20 30 02 12 22 32
784 // b: 01 11 21 31 03 13 23 33
Transpose4x4(uint8x8_t * a,uint8x8_t * b)785 inline void Transpose4x4(uint8x8_t* a, uint8x8_t* b) {
786   const uint16x4x2_t c =
787       vtrn_u16(vreinterpret_u16_u8(*a), vreinterpret_u16_u8(*b));
788   const uint32x2x2_t d =
789       vtrn_u32(vreinterpret_u32_u16(c.val[0]), vreinterpret_u32_u16(c.val[1]));
790   const uint8x8x2_t e =
791       vtrn_u8(vreinterpret_u8_u32(d.val[0]), vreinterpret_u8_u32(d.val[1]));
792   *a = e.val[0];
793   *b = e.val[1];
794 }
795 
796 // 4x8 Input:
797 // a[0]: 00 01 02 03 04 05 06 07
798 // a[1]: 10 11 12 13 14 15 16 17
799 // a[2]: 20 21 22 23 24 25 26 27
800 // a[3]: 30 31 32 33 34 35 36 37
801 // 8x4 Output:
802 // a[0]: 00 10 20 30 04 14 24 34
803 // a[1]: 01 11 21 31 05 15 25 35
804 // a[2]: 02 12 22 32 06 16 26 36
805 // a[3]: 03 13 23 33 07 17 27 37
Transpose4x8(uint16x8_t a[4])806 inline void Transpose4x8(uint16x8_t a[4]) {
807   // b0.val[0]: 00 10 02 12 04 14 06 16
808   // b0.val[1]: 01 11 03 13 05 15 07 17
809   // b1.val[0]: 20 30 22 32 24 34 26 36
810   // b1.val[1]: 21 31 23 33 25 35 27 37
811   const uint16x8x2_t b0 = vtrnq_u16(a[0], a[1]);
812   const uint16x8x2_t b1 = vtrnq_u16(a[2], a[3]);
813 
814   // c0.val[0]: 00 10 20 30 04 14 24 34
815   // c0.val[1]: 02 12 22 32 06 16 26 36
816   // c1.val[0]: 01 11 21 31 05 15 25 35
817   // c1.val[1]: 03 13 23 33 07 17 27 37
818   const uint32x4x2_t c0 = vtrnq_u32(vreinterpretq_u32_u16(b0.val[0]),
819                                     vreinterpretq_u32_u16(b1.val[0]));
820   const uint32x4x2_t c1 = vtrnq_u32(vreinterpretq_u32_u16(b0.val[1]),
821                                     vreinterpretq_u32_u16(b1.val[1]));
822 
823   a[0] = vreinterpretq_u16_u32(c0.val[0]);
824   a[1] = vreinterpretq_u16_u32(c1.val[0]);
825   a[2] = vreinterpretq_u16_u32(c0.val[1]);
826   a[3] = vreinterpretq_u16_u32(c1.val[1]);
827 }
828 
829 // Special transpose for loop filter.
830 // 4x8 Input:
831 // p_q:  p3 p2 p1 p0 q0 q1 q2 q3
832 // a[0]: 00 01 02 03 04 05 06 07
833 // a[1]: 10 11 12 13 14 15 16 17
834 // a[2]: 20 21 22 23 24 25 26 27
835 // a[3]: 30 31 32 33 34 35 36 37
836 // 8x4 Output:
837 // a[0]: 03 13 23 33 04 14 24 34  p0q0
838 // a[1]: 02 12 22 32 05 15 25 35  p1q1
839 // a[2]: 01 11 21 31 06 16 26 36  p2q2
840 // a[3]: 00 10 20 30 07 17 27 37  p3q3
841 // Direct reapplication of the function will reset the high halves, but
842 // reverse the low halves:
843 // p_q:  p0 p1 p2 p3 q0 q1 q2 q3
844 // a[0]: 33 32 31 30 04 05 06 07
845 // a[1]: 23 22 21 20 14 15 16 17
846 // a[2]: 13 12 11 10 24 25 26 27
847 // a[3]: 03 02 01 00 34 35 36 37
848 // Simply reordering the inputs (3, 2, 1, 0) will reset the low halves, but
849 // reverse the high halves.
850 // The standard Transpose4x8 will produce the same reversals, but with the
851 // order of the low halves also restored relative to the high halves. This is
852 // preferable because it puts all values from the same source row back together,
853 // but some post-processing is inevitable.
LoopFilterTranspose4x8(uint16x8_t a[4])854 inline void LoopFilterTranspose4x8(uint16x8_t a[4]) {
855   // b0.val[0]: 00 10 02 12 04 14 06 16
856   // b0.val[1]: 01 11 03 13 05 15 07 17
857   // b1.val[0]: 20 30 22 32 24 34 26 36
858   // b1.val[1]: 21 31 23 33 25 35 27 37
859   const uint16x8x2_t b0 = vtrnq_u16(a[0], a[1]);
860   const uint16x8x2_t b1 = vtrnq_u16(a[2], a[3]);
861 
862   // Reverse odd vectors to bring the appropriate items to the front of zips.
863   // b0.val[0]: 00 10 02 12 04 14 06 16
864   // r0       : 03 13 01 11 07 17 05 15
865   // b1.val[0]: 20 30 22 32 24 34 26 36
866   // r1       : 23 33 21 31 27 37 25 35
867   const uint32x4_t r0 = vrev64q_u32(vreinterpretq_u32_u16(b0.val[1]));
868   const uint32x4_t r1 = vrev64q_u32(vreinterpretq_u32_u16(b1.val[1]));
869 
870   // Zip to complete the halves.
871   // c0.val[0]: 00 10 20 30 02 12 22 32  p3p1
872   // c0.val[1]: 04 14 24 34 06 16 26 36  q0q2
873   // c1.val[0]: 03 13 23 33 01 11 21 31  p0p2
874   // c1.val[1]: 07 17 27 37 05 15 25 35  q3q1
875   const uint32x4x2_t c0 = vzipq_u32(vreinterpretq_u32_u16(b0.val[0]),
876                                     vreinterpretq_u32_u16(b1.val[0]));
877   const uint32x4x2_t c1 = vzipq_u32(r0, r1);
878 
879   // d0.val[0]: 00 10 20 30 07 17 27 37  p3q3
880   // d0.val[1]: 02 12 22 32 05 15 25 35  p1q1
881   // d1.val[0]: 03 13 23 33 04 14 24 34  p0q0
882   // d1.val[1]: 01 11 21 31 06 16 26 36  p2q2
883   const uint16x8x2_t d0 = VtrnqU64(c0.val[0], c1.val[1]);
884   // The third row of c comes first here to swap p2 with q0.
885   const uint16x8x2_t d1 = VtrnqU64(c1.val[0], c0.val[1]);
886 
887   // 8x4 Output:
888   // a[0]: 03 13 23 33 04 14 24 34  p0q0
889   // a[1]: 02 12 22 32 05 15 25 35  p1q1
890   // a[2]: 01 11 21 31 06 16 26 36  p2q2
891   // a[3]: 00 10 20 30 07 17 27 37  p3q3
892   a[0] = d1.val[0];  // p0q0
893   a[1] = d0.val[1];  // p1q1
894   a[2] = d1.val[1];  // p2q2
895   a[3] = d0.val[0];  // p3q3
896 }
897 
898 // Reversible if the x4 values are packed next to each other.
899 // x4 input / x8 output:
900 // a0: 00 01 02 03 40 41 42 43 44
901 // a1: 10 11 12 13 50 51 52 53 54
902 // a2: 20 21 22 23 60 61 62 63 64
903 // a3: 30 31 32 33 70 71 72 73 74
904 // x8 input / x4 output:
905 // a0: 00 10 20 30 40 50 60 70
906 // a1: 01 11 21 31 41 51 61 71
907 // a2: 02 12 22 32 42 52 62 72
908 // a3: 03 13 23 33 43 53 63 73
Transpose8x4(uint8x8_t * a0,uint8x8_t * a1,uint8x8_t * a2,uint8x8_t * a3)909 inline void Transpose8x4(uint8x8_t* a0, uint8x8_t* a1, uint8x8_t* a2,
910                          uint8x8_t* a3) {
911   const uint8x8x2_t b0 = vtrn_u8(*a0, *a1);
912   const uint8x8x2_t b1 = vtrn_u8(*a2, *a3);
913 
914   const uint16x4x2_t c0 =
915       vtrn_u16(vreinterpret_u16_u8(b0.val[0]), vreinterpret_u16_u8(b1.val[0]));
916   const uint16x4x2_t c1 =
917       vtrn_u16(vreinterpret_u16_u8(b0.val[1]), vreinterpret_u16_u8(b1.val[1]));
918 
919   *a0 = vreinterpret_u8_u16(c0.val[0]);
920   *a1 = vreinterpret_u8_u16(c1.val[0]);
921   *a2 = vreinterpret_u8_u16(c0.val[1]);
922   *a3 = vreinterpret_u8_u16(c1.val[1]);
923 }
924 
925 // Input:
926 // a[0]: 00 01 02 03 04 05 06 07
927 // a[1]: 10 11 12 13 14 15 16 17
928 // a[2]: 20 21 22 23 24 25 26 27
929 // a[3]: 30 31 32 33 34 35 36 37
930 // a[4]: 40 41 42 43 44 45 46 47
931 // a[5]: 50 51 52 53 54 55 56 57
932 // a[6]: 60 61 62 63 64 65 66 67
933 // a[7]: 70 71 72 73 74 75 76 77
934 
935 // Output:
936 // a[0]: 00 10 20 30 40 50 60 70
937 // a[1]: 01 11 21 31 41 51 61 71
938 // a[2]: 02 12 22 32 42 52 62 72
939 // a[3]: 03 13 23 33 43 53 63 73
940 // a[4]: 04 14 24 34 44 54 64 74
941 // a[5]: 05 15 25 35 45 55 65 75
942 // a[6]: 06 16 26 36 46 56 66 76
943 // a[7]: 07 17 27 37 47 57 67 77
Transpose8x8(int8x8_t a[8])944 inline void Transpose8x8(int8x8_t a[8]) {
945   // Swap 8 bit elements. Goes from:
946   // a[0]: 00 01 02 03 04 05 06 07
947   // a[1]: 10 11 12 13 14 15 16 17
948   // a[2]: 20 21 22 23 24 25 26 27
949   // a[3]: 30 31 32 33 34 35 36 37
950   // a[4]: 40 41 42 43 44 45 46 47
951   // a[5]: 50 51 52 53 54 55 56 57
952   // a[6]: 60 61 62 63 64 65 66 67
953   // a[7]: 70 71 72 73 74 75 76 77
954   // to:
955   // b0.val[0]: 00 10 02 12 04 14 06 16  40 50 42 52 44 54 46 56
956   // b0.val[1]: 01 11 03 13 05 15 07 17  41 51 43 53 45 55 47 57
957   // b1.val[0]: 20 30 22 32 24 34 26 36  60 70 62 72 64 74 66 76
958   // b1.val[1]: 21 31 23 33 25 35 27 37  61 71 63 73 65 75 67 77
959   const int8x16x2_t b0 =
960       vtrnq_s8(vcombine_s8(a[0], a[4]), vcombine_s8(a[1], a[5]));
961   const int8x16x2_t b1 =
962       vtrnq_s8(vcombine_s8(a[2], a[6]), vcombine_s8(a[3], a[7]));
963 
964   // Swap 16 bit elements resulting in:
965   // c0.val[0]: 00 10 20 30 04 14 24 34  40 50 60 70 44 54 64 74
966   // c0.val[1]: 02 12 22 32 06 16 26 36  42 52 62 72 46 56 66 76
967   // c1.val[0]: 01 11 21 31 05 15 25 35  41 51 61 71 45 55 65 75
968   // c1.val[1]: 03 13 23 33 07 17 27 37  43 53 63 73 47 57 67 77
969   const int16x8x2_t c0 = vtrnq_s16(vreinterpretq_s16_s8(b0.val[0]),
970                                    vreinterpretq_s16_s8(b1.val[0]));
971   const int16x8x2_t c1 = vtrnq_s16(vreinterpretq_s16_s8(b0.val[1]),
972                                    vreinterpretq_s16_s8(b1.val[1]));
973 
974   // Unzip 32 bit elements resulting in:
975   // d0.val[0]: 00 10 20 30 40 50 60 70  01 11 21 31 41 51 61 71
976   // d0.val[1]: 04 14 24 34 44 54 64 74  05 15 25 35 45 55 65 75
977   // d1.val[0]: 02 12 22 32 42 52 62 72  03 13 23 33 43 53 63 73
978   // d1.val[1]: 06 16 26 36 46 56 66 76  07 17 27 37 47 57 67 77
979   const int32x4x2_t d0 = vuzpq_s32(vreinterpretq_s32_s16(c0.val[0]),
980                                    vreinterpretq_s32_s16(c1.val[0]));
981   const int32x4x2_t d1 = vuzpq_s32(vreinterpretq_s32_s16(c0.val[1]),
982                                    vreinterpretq_s32_s16(c1.val[1]));
983 
984   a[0] = vreinterpret_s8_s32(vget_low_s32(d0.val[0]));
985   a[1] = vreinterpret_s8_s32(vget_high_s32(d0.val[0]));
986   a[2] = vreinterpret_s8_s32(vget_low_s32(d1.val[0]));
987   a[3] = vreinterpret_s8_s32(vget_high_s32(d1.val[0]));
988   a[4] = vreinterpret_s8_s32(vget_low_s32(d0.val[1]));
989   a[5] = vreinterpret_s8_s32(vget_high_s32(d0.val[1]));
990   a[6] = vreinterpret_s8_s32(vget_low_s32(d1.val[1]));
991   a[7] = vreinterpret_s8_s32(vget_high_s32(d1.val[1]));
992 }
993 
994 // Unsigned.
Transpose8x8(uint8x8_t a[8])995 inline void Transpose8x8(uint8x8_t a[8]) {
996   const uint8x16x2_t b0 =
997       vtrnq_u8(vcombine_u8(a[0], a[4]), vcombine_u8(a[1], a[5]));
998   const uint8x16x2_t b1 =
999       vtrnq_u8(vcombine_u8(a[2], a[6]), vcombine_u8(a[3], a[7]));
1000 
1001   const uint16x8x2_t c0 = vtrnq_u16(vreinterpretq_u16_u8(b0.val[0]),
1002                                     vreinterpretq_u16_u8(b1.val[0]));
1003   const uint16x8x2_t c1 = vtrnq_u16(vreinterpretq_u16_u8(b0.val[1]),
1004                                     vreinterpretq_u16_u8(b1.val[1]));
1005 
1006   const uint32x4x2_t d0 = vuzpq_u32(vreinterpretq_u32_u16(c0.val[0]),
1007                                     vreinterpretq_u32_u16(c1.val[0]));
1008   const uint32x4x2_t d1 = vuzpq_u32(vreinterpretq_u32_u16(c0.val[1]),
1009                                     vreinterpretq_u32_u16(c1.val[1]));
1010 
1011   a[0] = vreinterpret_u8_u32(vget_low_u32(d0.val[0]));
1012   a[1] = vreinterpret_u8_u32(vget_high_u32(d0.val[0]));
1013   a[2] = vreinterpret_u8_u32(vget_low_u32(d1.val[0]));
1014   a[3] = vreinterpret_u8_u32(vget_high_u32(d1.val[0]));
1015   a[4] = vreinterpret_u8_u32(vget_low_u32(d0.val[1]));
1016   a[5] = vreinterpret_u8_u32(vget_high_u32(d0.val[1]));
1017   a[6] = vreinterpret_u8_u32(vget_low_u32(d1.val[1]));
1018   a[7] = vreinterpret_u8_u32(vget_high_u32(d1.val[1]));
1019 }
1020 
Transpose8x8(uint8x8_t in[8],uint8x16_t out[4])1021 inline void Transpose8x8(uint8x8_t in[8], uint8x16_t out[4]) {
1022   const uint8x16x2_t a0 =
1023       vtrnq_u8(vcombine_u8(in[0], in[4]), vcombine_u8(in[1], in[5]));
1024   const uint8x16x2_t a1 =
1025       vtrnq_u8(vcombine_u8(in[2], in[6]), vcombine_u8(in[3], in[7]));
1026 
1027   const uint16x8x2_t b0 = vtrnq_u16(vreinterpretq_u16_u8(a0.val[0]),
1028                                     vreinterpretq_u16_u8(a1.val[0]));
1029   const uint16x8x2_t b1 = vtrnq_u16(vreinterpretq_u16_u8(a0.val[1]),
1030                                     vreinterpretq_u16_u8(a1.val[1]));
1031 
1032   const uint32x4x2_t c0 = vuzpq_u32(vreinterpretq_u32_u16(b0.val[0]),
1033                                     vreinterpretq_u32_u16(b1.val[0]));
1034   const uint32x4x2_t c1 = vuzpq_u32(vreinterpretq_u32_u16(b0.val[1]),
1035                                     vreinterpretq_u32_u16(b1.val[1]));
1036 
1037   out[0] = vreinterpretq_u8_u32(c0.val[0]);
1038   out[1] = vreinterpretq_u8_u32(c1.val[0]);
1039   out[2] = vreinterpretq_u8_u32(c0.val[1]);
1040   out[3] = vreinterpretq_u8_u32(c1.val[1]);
1041 }
1042 
1043 // Input:
1044 // a[0]: 00 01 02 03 04 05 06 07
1045 // a[1]: 10 11 12 13 14 15 16 17
1046 // a[2]: 20 21 22 23 24 25 26 27
1047 // a[3]: 30 31 32 33 34 35 36 37
1048 // a[4]: 40 41 42 43 44 45 46 47
1049 // a[5]: 50 51 52 53 54 55 56 57
1050 // a[6]: 60 61 62 63 64 65 66 67
1051 // a[7]: 70 71 72 73 74 75 76 77
1052 
1053 // Output:
1054 // a[0]: 00 10 20 30 40 50 60 70
1055 // a[1]: 01 11 21 31 41 51 61 71
1056 // a[2]: 02 12 22 32 42 52 62 72
1057 // a[3]: 03 13 23 33 43 53 63 73
1058 // a[4]: 04 14 24 34 44 54 64 74
1059 // a[5]: 05 15 25 35 45 55 65 75
1060 // a[6]: 06 16 26 36 46 56 66 76
1061 // a[7]: 07 17 27 37 47 57 67 77
Transpose8x8(int16x8_t a[8])1062 inline void Transpose8x8(int16x8_t a[8]) {
1063   const int16x8x2_t b0 = vtrnq_s16(a[0], a[1]);
1064   const int16x8x2_t b1 = vtrnq_s16(a[2], a[3]);
1065   const int16x8x2_t b2 = vtrnq_s16(a[4], a[5]);
1066   const int16x8x2_t b3 = vtrnq_s16(a[6], a[7]);
1067 
1068   const int32x4x2_t c0 = vtrnq_s32(vreinterpretq_s32_s16(b0.val[0]),
1069                                    vreinterpretq_s32_s16(b1.val[0]));
1070   const int32x4x2_t c1 = vtrnq_s32(vreinterpretq_s32_s16(b0.val[1]),
1071                                    vreinterpretq_s32_s16(b1.val[1]));
1072   const int32x4x2_t c2 = vtrnq_s32(vreinterpretq_s32_s16(b2.val[0]),
1073                                    vreinterpretq_s32_s16(b3.val[0]));
1074   const int32x4x2_t c3 = vtrnq_s32(vreinterpretq_s32_s16(b2.val[1]),
1075                                    vreinterpretq_s32_s16(b3.val[1]));
1076 
1077   const int16x8x2_t d0 = VtrnqS64(c0.val[0], c2.val[0]);
1078   const int16x8x2_t d1 = VtrnqS64(c1.val[0], c3.val[0]);
1079   const int16x8x2_t d2 = VtrnqS64(c0.val[1], c2.val[1]);
1080   const int16x8x2_t d3 = VtrnqS64(c1.val[1], c3.val[1]);
1081 
1082   a[0] = d0.val[0];
1083   a[1] = d1.val[0];
1084   a[2] = d2.val[0];
1085   a[3] = d3.val[0];
1086   a[4] = d0.val[1];
1087   a[5] = d1.val[1];
1088   a[6] = d2.val[1];
1089   a[7] = d3.val[1];
1090 }
1091 
1092 // Unsigned.
Transpose8x8(uint16x8_t a[8])1093 inline void Transpose8x8(uint16x8_t a[8]) {
1094   const uint16x8x2_t b0 = vtrnq_u16(a[0], a[1]);
1095   const uint16x8x2_t b1 = vtrnq_u16(a[2], a[3]);
1096   const uint16x8x2_t b2 = vtrnq_u16(a[4], a[5]);
1097   const uint16x8x2_t b3 = vtrnq_u16(a[6], a[7]);
1098 
1099   const uint32x4x2_t c0 = vtrnq_u32(vreinterpretq_u32_u16(b0.val[0]),
1100                                     vreinterpretq_u32_u16(b1.val[0]));
1101   const uint32x4x2_t c1 = vtrnq_u32(vreinterpretq_u32_u16(b0.val[1]),
1102                                     vreinterpretq_u32_u16(b1.val[1]));
1103   const uint32x4x2_t c2 = vtrnq_u32(vreinterpretq_u32_u16(b2.val[0]),
1104                                     vreinterpretq_u32_u16(b3.val[0]));
1105   const uint32x4x2_t c3 = vtrnq_u32(vreinterpretq_u32_u16(b2.val[1]),
1106                                     vreinterpretq_u32_u16(b3.val[1]));
1107 
1108   const uint16x8x2_t d0 = VtrnqU64(c0.val[0], c2.val[0]);
1109   const uint16x8x2_t d1 = VtrnqU64(c1.val[0], c3.val[0]);
1110   const uint16x8x2_t d2 = VtrnqU64(c0.val[1], c2.val[1]);
1111   const uint16x8x2_t d3 = VtrnqU64(c1.val[1], c3.val[1]);
1112 
1113   a[0] = d0.val[0];
1114   a[1] = d1.val[0];
1115   a[2] = d2.val[0];
1116   a[3] = d3.val[0];
1117   a[4] = d0.val[1];
1118   a[5] = d1.val[1];
1119   a[6] = d2.val[1];
1120   a[7] = d3.val[1];
1121 }
1122 
1123 // Input:
1124 // a[0]: 00 01 02 03 04 05 06 07  80 81 82 83 84 85 86 87
1125 // a[1]: 10 11 12 13 14 15 16 17  90 91 92 93 94 95 96 97
1126 // a[2]: 20 21 22 23 24 25 26 27  a0 a1 a2 a3 a4 a5 a6 a7
1127 // a[3]: 30 31 32 33 34 35 36 37  b0 b1 b2 b3 b4 b5 b6 b7
1128 // a[4]: 40 41 42 43 44 45 46 47  c0 c1 c2 c3 c4 c5 c6 c7
1129 // a[5]: 50 51 52 53 54 55 56 57  d0 d1 d2 d3 d4 d5 d6 d7
1130 // a[6]: 60 61 62 63 64 65 66 67  e0 e1 e2 e3 e4 e5 e6 e7
1131 // a[7]: 70 71 72 73 74 75 76 77  f0 f1 f2 f3 f4 f5 f6 f7
1132 
1133 // Output:
1134 // a[0]: 00 10 20 30 40 50 60 70  80 90 a0 b0 c0 d0 e0 f0
1135 // a[1]: 01 11 21 31 41 51 61 71  81 91 a1 b1 c1 d1 e1 f1
1136 // a[2]: 02 12 22 32 42 52 62 72  82 92 a2 b2 c2 d2 e2 f2
1137 // a[3]: 03 13 23 33 43 53 63 73  83 93 a3 b3 c3 d3 e3 f3
1138 // a[4]: 04 14 24 34 44 54 64 74  84 94 a4 b4 c4 d4 e4 f4
1139 // a[5]: 05 15 25 35 45 55 65 75  85 95 a5 b5 c5 d5 e5 f5
1140 // a[6]: 06 16 26 36 46 56 66 76  86 96 a6 b6 c6 d6 e6 f6
1141 // a[7]: 07 17 27 37 47 57 67 77  87 97 a7 b7 c7 d7 e7 f7
Transpose8x16(uint8x16_t a[8])1142 inline void Transpose8x16(uint8x16_t a[8]) {
1143   // b0.val[0]: 00 10 02 12 04 14 06 16  80 90 82 92 84 94 86 96
1144   // b0.val[1]: 01 11 03 13 05 15 07 17  81 91 83 93 85 95 87 97
1145   // b1.val[0]: 20 30 22 32 24 34 26 36  a0 b0 a2 b2 a4 b4 a6 b6
1146   // b1.val[1]: 21 31 23 33 25 35 27 37  a1 b1 a3 b3 a5 b5 a7 b7
1147   // b2.val[0]: 40 50 42 52 44 54 46 56  c0 d0 c2 d2 c4 d4 c6 d6
1148   // b2.val[1]: 41 51 43 53 45 55 47 57  c1 d1 c3 d3 c5 d5 c7 d7
1149   // b3.val[0]: 60 70 62 72 64 74 66 76  e0 f0 e2 f2 e4 f4 e6 f6
1150   // b3.val[1]: 61 71 63 73 65 75 67 77  e1 f1 e3 f3 e5 f5 e7 f7
1151   const uint8x16x2_t b0 = vtrnq_u8(a[0], a[1]);
1152   const uint8x16x2_t b1 = vtrnq_u8(a[2], a[3]);
1153   const uint8x16x2_t b2 = vtrnq_u8(a[4], a[5]);
1154   const uint8x16x2_t b3 = vtrnq_u8(a[6], a[7]);
1155 
1156   // c0.val[0]: 00 10 20 30 04 14 24 34  80 90 a0 b0 84 94 a4 b4
1157   // c0.val[1]: 02 12 22 32 06 16 26 36  82 92 a2 b2 86 96 a6 b6
1158   // c1.val[0]: 01 11 21 31 05 15 25 35  81 91 a1 b1 85 95 a5 b5
1159   // c1.val[1]: 03 13 23 33 07 17 27 37  83 93 a3 b3 87 97 a7 b7
1160   // c2.val[0]: 40 50 60 70 44 54 64 74  c0 d0 e0 f0 c4 d4 e4 f4
1161   // c2.val[1]: 42 52 62 72 46 56 66 76  c2 d2 e2 f2 c6 d6 e6 f6
1162   // c3.val[0]: 41 51 61 71 45 55 65 75  c1 d1 e1 f1 c5 d5 e5 f5
1163   // c3.val[1]: 43 53 63 73 47 57 67 77  c3 d3 e3 f3 c7 d7 e7 f7
1164   const uint16x8x2_t c0 = vtrnq_u16(vreinterpretq_u16_u8(b0.val[0]),
1165                                     vreinterpretq_u16_u8(b1.val[0]));
1166   const uint16x8x2_t c1 = vtrnq_u16(vreinterpretq_u16_u8(b0.val[1]),
1167                                     vreinterpretq_u16_u8(b1.val[1]));
1168   const uint16x8x2_t c2 = vtrnq_u16(vreinterpretq_u16_u8(b2.val[0]),
1169                                     vreinterpretq_u16_u8(b3.val[0]));
1170   const uint16x8x2_t c3 = vtrnq_u16(vreinterpretq_u16_u8(b2.val[1]),
1171                                     vreinterpretq_u16_u8(b3.val[1]));
1172 
1173   // d0.val[0]: 00 10 20 30 40 50 60 70  80 90 a0 b0 c0 d0 e0 f0
1174   // d0.val[1]: 04 14 24 34 44 54 64 74  84 94 a4 b4 c4 d4 e4 f4
1175   // d1.val[0]: 01 11 21 31 41 51 61 71  81 91 a1 b1 c1 d1 e1 f1
1176   // d1.val[1]: 05 15 25 35 45 55 65 75  85 95 a5 b5 c5 d5 e5 f5
1177   // d2.val[0]: 02 12 22 32 42 52 62 72  82 92 a2 b2 c2 d2 e2 f2
1178   // d2.val[1]: 06 16 26 36 46 56 66 76  86 96 a6 b6 c6 d6 e6 f6
1179   // d3.val[0]: 03 13 23 33 43 53 63 73  83 93 a3 b3 c3 d3 e3 f3
1180   // d3.val[1]: 07 17 27 37 47 57 67 77  87 97 a7 b7 c7 d7 e7 f7
1181   const uint32x4x2_t d0 = vtrnq_u32(vreinterpretq_u32_u16(c0.val[0]),
1182                                     vreinterpretq_u32_u16(c2.val[0]));
1183   const uint32x4x2_t d1 = vtrnq_u32(vreinterpretq_u32_u16(c1.val[0]),
1184                                     vreinterpretq_u32_u16(c3.val[0]));
1185   const uint32x4x2_t d2 = vtrnq_u32(vreinterpretq_u32_u16(c0.val[1]),
1186                                     vreinterpretq_u32_u16(c2.val[1]));
1187   const uint32x4x2_t d3 = vtrnq_u32(vreinterpretq_u32_u16(c1.val[1]),
1188                                     vreinterpretq_u32_u16(c3.val[1]));
1189 
1190   a[0] = vreinterpretq_u8_u32(d0.val[0]);
1191   a[1] = vreinterpretq_u8_u32(d1.val[0]);
1192   a[2] = vreinterpretq_u8_u32(d2.val[0]);
1193   a[3] = vreinterpretq_u8_u32(d3.val[0]);
1194   a[4] = vreinterpretq_u8_u32(d0.val[1]);
1195   a[5] = vreinterpretq_u8_u32(d1.val[1]);
1196   a[6] = vreinterpretq_u8_u32(d2.val[1]);
1197   a[7] = vreinterpretq_u8_u32(d3.val[1]);
1198 }
1199 
ZeroExtend(const uint8x8_t in)1200 inline int16x8_t ZeroExtend(const uint8x8_t in) {
1201   return vreinterpretq_s16_u16(vmovl_u8(in));
1202 }
1203 
1204 }  // namespace dsp
1205 }  // namespace libgav1
1206 
1207 #endif  // LIBGAV1_ENABLE_NEON
1208 #endif  // LIBGAV1_SRC_DSP_ARM_COMMON_NEON_H_
1209