1 /*
2  *  Copyright (c) 2014 The WebM project authors. All Rights Reserved.
3  *
4  *  Use of this source code is governed by a BSD-style license
5  *  that can be found in the LICENSE file in the root of the source
6  *  tree. An additional intellectual property rights grant can be found
7  *  in the file PATENTS.  All contributing project authors may
8  *  be found in the AUTHORS file in the root of the source tree.
9  */
10 
11 #include <arm_neon.h>
12 
13 #include "./vp8_rtcd.h"
14 #include "vpx_ports/arm.h"
15 
16 #ifdef VPX_INCOMPATIBLE_GCC
17 #include "./vp8_rtcd.h"
vp8_short_walsh4x4_neon(int16_t * input,int16_t * output,int pitch)18 void vp8_short_walsh4x4_neon(int16_t *input, int16_t *output, int pitch) {
19   vp8_short_walsh4x4_c(input, output, pitch);
20 }
21 #else
vp8_short_walsh4x4_neon(int16_t * input,int16_t * output,int pitch)22 void vp8_short_walsh4x4_neon(int16_t *input, int16_t *output, int pitch) {
23   uint16x4_t d16u16;
24   int16x8_t q0s16, q1s16;
25   int16x4_t dEmptys16, d0s16, d1s16, d2s16, d3s16, d4s16, d5s16, d6s16, d7s16;
26   int32x4_t qEmptys32, q0s32, q1s32, q2s32, q3s32, q8s32;
27   int32x4_t q9s32, q10s32, q11s32, q15s32;
28   uint32x4_t q8u32, q9u32, q10u32, q11u32;
29   int16x4x2_t v2tmp0, v2tmp1;
30   int32x2x2_t v2tmp2, v2tmp3;
31 
32   dEmptys16 = vdup_n_s16(0);
33   qEmptys32 = vdupq_n_s32(0);
34   q15s32 = vdupq_n_s32(3);
35 
36   d0s16 = vld1_s16(input);
37   input += pitch / 2;
38   d1s16 = vld1_s16(input);
39   input += pitch / 2;
40   d2s16 = vld1_s16(input);
41   input += pitch / 2;
42   d3s16 = vld1_s16(input);
43 
44   v2tmp2 = vtrn_s32(vreinterpret_s32_s16(d0s16), vreinterpret_s32_s16(d2s16));
45   v2tmp3 = vtrn_s32(vreinterpret_s32_s16(d1s16), vreinterpret_s32_s16(d3s16));
46   v2tmp0 = vtrn_s16(vreinterpret_s16_s32(v2tmp2.val[0]),   // d0
47                     vreinterpret_s16_s32(v2tmp3.val[0]));  // d1
48   v2tmp1 = vtrn_s16(vreinterpret_s16_s32(v2tmp2.val[1]),   // d2
49                     vreinterpret_s16_s32(v2tmp3.val[1]));  // d3
50 
51   d4s16 = vadd_s16(v2tmp0.val[0], v2tmp1.val[0]);
52   d5s16 = vadd_s16(v2tmp0.val[1], v2tmp1.val[1]);
53   d6s16 = vsub_s16(v2tmp0.val[1], v2tmp1.val[1]);
54   d7s16 = vsub_s16(v2tmp0.val[0], v2tmp1.val[0]);
55 
56   d4s16 = vshl_n_s16(d4s16, 2);
57   d5s16 = vshl_n_s16(d5s16, 2);
58   d6s16 = vshl_n_s16(d6s16, 2);
59   d7s16 = vshl_n_s16(d7s16, 2);
60 
61   d16u16 = vceq_s16(d4s16, dEmptys16);
62   d16u16 = vmvn_u16(d16u16);
63 
64   d0s16 = vadd_s16(d4s16, d5s16);
65   d3s16 = vsub_s16(d4s16, d5s16);
66   d1s16 = vadd_s16(d7s16, d6s16);
67   d2s16 = vsub_s16(d7s16, d6s16);
68 
69   d0s16 = vsub_s16(d0s16, vreinterpret_s16_u16(d16u16));
70 
71   // Second for-loop
72   v2tmp2 = vtrn_s32(vreinterpret_s32_s16(d1s16), vreinterpret_s32_s16(d3s16));
73   v2tmp3 = vtrn_s32(vreinterpret_s32_s16(d0s16), vreinterpret_s32_s16(d2s16));
74   v2tmp0 = vtrn_s16(vreinterpret_s16_s32(v2tmp3.val[1]),   // d2
75                     vreinterpret_s16_s32(v2tmp2.val[1]));  // d3
76   v2tmp1 = vtrn_s16(vreinterpret_s16_s32(v2tmp3.val[0]),   // d0
77                     vreinterpret_s16_s32(v2tmp2.val[0]));  // d1
78 
79   q8s32 = vaddl_s16(v2tmp1.val[0], v2tmp0.val[0]);
80   q9s32 = vaddl_s16(v2tmp1.val[1], v2tmp0.val[1]);
81   q10s32 = vsubl_s16(v2tmp1.val[1], v2tmp0.val[1]);
82   q11s32 = vsubl_s16(v2tmp1.val[0], v2tmp0.val[0]);
83 
84   q0s32 = vaddq_s32(q8s32, q9s32);
85   q1s32 = vaddq_s32(q11s32, q10s32);
86   q2s32 = vsubq_s32(q11s32, q10s32);
87   q3s32 = vsubq_s32(q8s32, q9s32);
88 
89   q8u32 = vcltq_s32(q0s32, qEmptys32);
90   q9u32 = vcltq_s32(q1s32, qEmptys32);
91   q10u32 = vcltq_s32(q2s32, qEmptys32);
92   q11u32 = vcltq_s32(q3s32, qEmptys32);
93 
94   q8s32 = vreinterpretq_s32_u32(q8u32);
95   q9s32 = vreinterpretq_s32_u32(q9u32);
96   q10s32 = vreinterpretq_s32_u32(q10u32);
97   q11s32 = vreinterpretq_s32_u32(q11u32);
98 
99   q0s32 = vsubq_s32(q0s32, q8s32);
100   q1s32 = vsubq_s32(q1s32, q9s32);
101   q2s32 = vsubq_s32(q2s32, q10s32);
102   q3s32 = vsubq_s32(q3s32, q11s32);
103 
104   q8s32 = vaddq_s32(q0s32, q15s32);
105   q9s32 = vaddq_s32(q1s32, q15s32);
106   q10s32 = vaddq_s32(q2s32, q15s32);
107   q11s32 = vaddq_s32(q3s32, q15s32);
108 
109   d0s16 = vshrn_n_s32(q8s32, 3);
110   d1s16 = vshrn_n_s32(q9s32, 3);
111   d2s16 = vshrn_n_s32(q10s32, 3);
112   d3s16 = vshrn_n_s32(q11s32, 3);
113 
114   q0s16 = vcombine_s16(d0s16, d1s16);
115   q1s16 = vcombine_s16(d2s16, d3s16);
116 
117   vst1q_s16(output, q0s16);
118   vst1q_s16(output + 8, q1s16);
119   return;
120 }
121 #endif  // VPX_INCOMPATIBLE_GCC
122