1 /*
2  *  Copyright (c) 2014 The WebM project authors. All Rights Reserved.
3  *
4  *  Use of this source code is governed by a BSD-style license
5  *  that can be found in the LICENSE file in the root of the source
6  *  tree. An additional intellectual property rights grant can be found
7  *  in the file PATENTS.  All contributing project authors may
8  *  be found in the AUTHORS file in the root of the source tree.
9  */
10 
11 #include <arm_neon.h>
12 
13 #include "./vp8_rtcd.h"
14 
15 static const int16_t cospi8sqrt2minus1 = 20091;
16 // 35468 exceeds INT16_MAX and gets converted to a negative number. Because of
17 // the way it is used in vqdmulh, where the result is doubled, it can be divided
18 // by 2 beforehand. This saves compensating for the negative value as well as
19 // shifting the result.
20 static const int16_t sinpi8sqrt2 = 35468 >> 1;
21 
vp8_dequant_idct_add_neon(int16_t * input,int16_t * dq,unsigned char * dst,int stride)22 void vp8_dequant_idct_add_neon(int16_t *input, int16_t *dq, unsigned char *dst,
23                                int stride) {
24   unsigned char *dst0;
25   int32x2_t d14, d15;
26   int16x4_t d2, d3, d4, d5, d10, d11, d12, d13;
27   int16x8_t q1, q2, q3, q4, q5, q6;
28   int16x8_t qEmpty = vdupq_n_s16(0);
29   int32x2x2_t d2tmp0, d2tmp1;
30   int16x4x2_t d2tmp2, d2tmp3;
31 
32   d14 = d15 = vdup_n_s32(0);
33 
34   // load input
35   q3 = vld1q_s16(input);
36   vst1q_s16(input, qEmpty);
37   input += 8;
38   q4 = vld1q_s16(input);
39   vst1q_s16(input, qEmpty);
40 
41   // load dq
42   q5 = vld1q_s16(dq);
43   dq += 8;
44   q6 = vld1q_s16(dq);
45 
46   // load src from dst
47   dst0 = dst;
48   d14 = vld1_lane_s32((const int32_t *)dst0, d14, 0);
49   dst0 += stride;
50   d14 = vld1_lane_s32((const int32_t *)dst0, d14, 1);
51   dst0 += stride;
52   d15 = vld1_lane_s32((const int32_t *)dst0, d15, 0);
53   dst0 += stride;
54   d15 = vld1_lane_s32((const int32_t *)dst0, d15, 1);
55 
56   q1 = vreinterpretq_s16_u16(
57       vmulq_u16(vreinterpretq_u16_s16(q3), vreinterpretq_u16_s16(q5)));
58   q2 = vreinterpretq_s16_u16(
59       vmulq_u16(vreinterpretq_u16_s16(q4), vreinterpretq_u16_s16(q6)));
60 
61   d12 = vqadd_s16(vget_low_s16(q1), vget_low_s16(q2));
62   d13 = vqsub_s16(vget_low_s16(q1), vget_low_s16(q2));
63 
64   q2 = vcombine_s16(vget_high_s16(q1), vget_high_s16(q2));
65 
66   q3 = vqdmulhq_n_s16(q2, sinpi8sqrt2);
67   q4 = vqdmulhq_n_s16(q2, cospi8sqrt2minus1);
68 
69   q4 = vshrq_n_s16(q4, 1);
70 
71   q4 = vqaddq_s16(q4, q2);
72 
73   d10 = vqsub_s16(vget_low_s16(q3), vget_high_s16(q4));
74   d11 = vqadd_s16(vget_high_s16(q3), vget_low_s16(q4));
75 
76   d2 = vqadd_s16(d12, d11);
77   d3 = vqadd_s16(d13, d10);
78   d4 = vqsub_s16(d13, d10);
79   d5 = vqsub_s16(d12, d11);
80 
81   d2tmp0 = vtrn_s32(vreinterpret_s32_s16(d2), vreinterpret_s32_s16(d4));
82   d2tmp1 = vtrn_s32(vreinterpret_s32_s16(d3), vreinterpret_s32_s16(d5));
83   d2tmp2 = vtrn_s16(vreinterpret_s16_s32(d2tmp0.val[0]),
84                     vreinterpret_s16_s32(d2tmp1.val[0]));
85   d2tmp3 = vtrn_s16(vreinterpret_s16_s32(d2tmp0.val[1]),
86                     vreinterpret_s16_s32(d2tmp1.val[1]));
87 
88   // loop 2
89   q2 = vcombine_s16(d2tmp2.val[1], d2tmp3.val[1]);
90 
91   q3 = vqdmulhq_n_s16(q2, sinpi8sqrt2);
92   q4 = vqdmulhq_n_s16(q2, cospi8sqrt2minus1);
93 
94   d12 = vqadd_s16(d2tmp2.val[0], d2tmp3.val[0]);
95   d13 = vqsub_s16(d2tmp2.val[0], d2tmp3.val[0]);
96 
97   q4 = vshrq_n_s16(q4, 1);
98 
99   q4 = vqaddq_s16(q4, q2);
100 
101   d10 = vqsub_s16(vget_low_s16(q3), vget_high_s16(q4));
102   d11 = vqadd_s16(vget_high_s16(q3), vget_low_s16(q4));
103 
104   d2 = vqadd_s16(d12, d11);
105   d3 = vqadd_s16(d13, d10);
106   d4 = vqsub_s16(d13, d10);
107   d5 = vqsub_s16(d12, d11);
108 
109   d2 = vrshr_n_s16(d2, 3);
110   d3 = vrshr_n_s16(d3, 3);
111   d4 = vrshr_n_s16(d4, 3);
112   d5 = vrshr_n_s16(d5, 3);
113 
114   d2tmp0 = vtrn_s32(vreinterpret_s32_s16(d2), vreinterpret_s32_s16(d4));
115   d2tmp1 = vtrn_s32(vreinterpret_s32_s16(d3), vreinterpret_s32_s16(d5));
116   d2tmp2 = vtrn_s16(vreinterpret_s16_s32(d2tmp0.val[0]),
117                     vreinterpret_s16_s32(d2tmp1.val[0]));
118   d2tmp3 = vtrn_s16(vreinterpret_s16_s32(d2tmp0.val[1]),
119                     vreinterpret_s16_s32(d2tmp1.val[1]));
120 
121   q1 = vcombine_s16(d2tmp2.val[0], d2tmp2.val[1]);
122   q2 = vcombine_s16(d2tmp3.val[0], d2tmp3.val[1]);
123 
124   q1 = vreinterpretq_s16_u16(
125       vaddw_u8(vreinterpretq_u16_s16(q1), vreinterpret_u8_s32(d14)));
126   q2 = vreinterpretq_s16_u16(
127       vaddw_u8(vreinterpretq_u16_s16(q2), vreinterpret_u8_s32(d15)));
128 
129   d14 = vreinterpret_s32_u8(vqmovun_s16(q1));
130   d15 = vreinterpret_s32_u8(vqmovun_s16(q2));
131 
132   dst0 = dst;
133   vst1_lane_s32((int32_t *)dst0, d14, 0);
134   dst0 += stride;
135   vst1_lane_s32((int32_t *)dst0, d14, 1);
136   dst0 += stride;
137   vst1_lane_s32((int32_t *)dst0, d15, 0);
138   dst0 += stride;
139   vst1_lane_s32((int32_t *)dst0, d15, 1);
140   return;
141 }
142