1 /*
2  *  Copyright (c) 2017 The WebM project authors. All Rights Reserved.
3  *
4  *  Use of this source code is governed by a BSD-style license
5  *  that can be found in the LICENSE file in the root of the source
6  *  tree. An additional intellectual property rights grant can be found
7  *  in the file PATENTS.  All contributing project authors may
8  *  be found in the AUTHORS file in the root of the source tree.
9  */
10 
11 #include <arm_neon.h>
12 
13 #include "./vpx_config.h"
14 #include "./vpx_dsp_rtcd.h"
15 #include "vpx_dsp/arm/idct_neon.h"
16 #include "vpx_dsp/arm/transpose_neon.h"
17 #include "vpx_dsp/txfm_common.h"
18 
load_from_transformed(const int32_t * const trans_buf,const int first,const int second,int32x4x2_t * const q0,int32x4x2_t * const q1)19 static INLINE void load_from_transformed(const int32_t *const trans_buf,
20                                          const int first, const int second,
21                                          int32x4x2_t *const q0,
22                                          int32x4x2_t *const q1) {
23   q0->val[0] = vld1q_s32(trans_buf + first * 8);
24   q0->val[1] = vld1q_s32(trans_buf + first * 8 + 4);
25   q1->val[0] = vld1q_s32(trans_buf + second * 8);
26   q1->val[1] = vld1q_s32(trans_buf + second * 8 + 4);
27 }
28 
load_from_output(const int32_t * const out,const int first,const int second,int32x4x2_t * const q0,int32x4x2_t * const q1)29 static INLINE void load_from_output(const int32_t *const out, const int first,
30                                     const int second, int32x4x2_t *const q0,
31                                     int32x4x2_t *const q1) {
32   q0->val[0] = vld1q_s32(out + first * 32);
33   q0->val[1] = vld1q_s32(out + first * 32 + 4);
34   q1->val[0] = vld1q_s32(out + second * 32);
35   q1->val[1] = vld1q_s32(out + second * 32 + 4);
36 }
37 
store_in_output(int32_t * const out,const int first,const int second,const int32x4x2_t q0,const int32x4x2_t q1)38 static INLINE void store_in_output(int32_t *const out, const int first,
39                                    const int second, const int32x4x2_t q0,
40                                    const int32x4x2_t q1) {
41   vst1q_s32(out + first * 32, q0.val[0]);
42   vst1q_s32(out + first * 32 + 4, q0.val[1]);
43   vst1q_s32(out + second * 32, q1.val[0]);
44   vst1q_s32(out + second * 32 + 4, q1.val[1]);
45 }
46 
highbd_store_combine_results(uint16_t * p1,uint16_t * p2,const int stride,const int32x4x2_t q0,const int32x4x2_t q1,const int32x4x2_t q2,const int32x4x2_t q3,const int16x8_t max)47 static INLINE void highbd_store_combine_results(
48     uint16_t *p1, uint16_t *p2, const int stride, const int32x4x2_t q0,
49     const int32x4x2_t q1, const int32x4x2_t q2, const int32x4x2_t q3,
50     const int16x8_t max) {
51   int16x8_t o[4];
52   uint16x8_t d[4];
53 
54   d[0] = vld1q_u16(p1);
55   p1 += stride;
56   d[1] = vld1q_u16(p1);
57   d[3] = vld1q_u16(p2);
58   p2 -= stride;
59   d[2] = vld1q_u16(p2);
60 
61   o[0] = vcombine_s16(vrshrn_n_s32(q0.val[0], 6), vrshrn_n_s32(q0.val[1], 6));
62   o[1] = vcombine_s16(vrshrn_n_s32(q1.val[0], 6), vrshrn_n_s32(q1.val[1], 6));
63   o[2] = vcombine_s16(vrshrn_n_s32(q2.val[0], 6), vrshrn_n_s32(q2.val[1], 6));
64   o[3] = vcombine_s16(vrshrn_n_s32(q3.val[0], 6), vrshrn_n_s32(q3.val[1], 6));
65 
66   o[0] = vqaddq_s16(o[0], vreinterpretq_s16_u16(d[0]));
67   o[1] = vqaddq_s16(o[1], vreinterpretq_s16_u16(d[1]));
68   o[2] = vqaddq_s16(o[2], vreinterpretq_s16_u16(d[2]));
69   o[3] = vqaddq_s16(o[3], vreinterpretq_s16_u16(d[3]));
70   o[0] = vminq_s16(o[0], max);
71   o[1] = vminq_s16(o[1], max);
72   o[2] = vminq_s16(o[2], max);
73   o[3] = vminq_s16(o[3], max);
74   d[0] = vqshluq_n_s16(o[0], 0);
75   d[1] = vqshluq_n_s16(o[1], 0);
76   d[2] = vqshluq_n_s16(o[2], 0);
77   d[3] = vqshluq_n_s16(o[3], 0);
78 
79   vst1q_u16(p1, d[1]);
80   p1 -= stride;
81   vst1q_u16(p1, d[0]);
82   vst1q_u16(p2, d[2]);
83   p2 += stride;
84   vst1q_u16(p2, d[3]);
85 }
86 
do_butterfly(const int32x4x2_t qIn0,const int32x4x2_t qIn1,const int32_t first_const,const int32_t second_const,int32x4x2_t * const qOut0,int32x4x2_t * const qOut1)87 static INLINE void do_butterfly(const int32x4x2_t qIn0, const int32x4x2_t qIn1,
88                                 const int32_t first_const,
89                                 const int32_t second_const,
90                                 int32x4x2_t *const qOut0,
91                                 int32x4x2_t *const qOut1) {
92   int64x2x2_t q[4];
93   int32x2_t d[6];
94 
95   // Note: using v{mul, mla, mls}l_n_s32 here slows down 35% with gcc 4.9.
96   d[4] = vdup_n_s32(first_const);
97   d[5] = vdup_n_s32(second_const);
98 
99   q[0].val[0] = vmull_s32(vget_low_s32(qIn0.val[0]), d[4]);
100   q[0].val[1] = vmull_s32(vget_high_s32(qIn0.val[0]), d[4]);
101   q[1].val[0] = vmull_s32(vget_low_s32(qIn0.val[1]), d[4]);
102   q[1].val[1] = vmull_s32(vget_high_s32(qIn0.val[1]), d[4]);
103   q[0].val[0] = vmlsl_s32(q[0].val[0], vget_low_s32(qIn1.val[0]), d[5]);
104   q[0].val[1] = vmlsl_s32(q[0].val[1], vget_high_s32(qIn1.val[0]), d[5]);
105   q[1].val[0] = vmlsl_s32(q[1].val[0], vget_low_s32(qIn1.val[1]), d[5]);
106   q[1].val[1] = vmlsl_s32(q[1].val[1], vget_high_s32(qIn1.val[1]), d[5]);
107 
108   q[2].val[0] = vmull_s32(vget_low_s32(qIn0.val[0]), d[5]);
109   q[2].val[1] = vmull_s32(vget_high_s32(qIn0.val[0]), d[5]);
110   q[3].val[0] = vmull_s32(vget_low_s32(qIn0.val[1]), d[5]);
111   q[3].val[1] = vmull_s32(vget_high_s32(qIn0.val[1]), d[5]);
112   q[2].val[0] = vmlal_s32(q[2].val[0], vget_low_s32(qIn1.val[0]), d[4]);
113   q[2].val[1] = vmlal_s32(q[2].val[1], vget_high_s32(qIn1.val[0]), d[4]);
114   q[3].val[0] = vmlal_s32(q[3].val[0], vget_low_s32(qIn1.val[1]), d[4]);
115   q[3].val[1] = vmlal_s32(q[3].val[1], vget_high_s32(qIn1.val[1]), d[4]);
116 
117   qOut0->val[0] = vcombine_s32(vrshrn_n_s64(q[0].val[0], DCT_CONST_BITS),
118                                vrshrn_n_s64(q[0].val[1], DCT_CONST_BITS));
119   qOut0->val[1] = vcombine_s32(vrshrn_n_s64(q[1].val[0], DCT_CONST_BITS),
120                                vrshrn_n_s64(q[1].val[1], DCT_CONST_BITS));
121   qOut1->val[0] = vcombine_s32(vrshrn_n_s64(q[2].val[0], DCT_CONST_BITS),
122                                vrshrn_n_s64(q[2].val[1], DCT_CONST_BITS));
123   qOut1->val[1] = vcombine_s32(vrshrn_n_s64(q[3].val[0], DCT_CONST_BITS),
124                                vrshrn_n_s64(q[3].val[1], DCT_CONST_BITS));
125 }
126 
load_s32x4q_dual(const int32_t * in,int32x4x2_t * const s)127 static INLINE void load_s32x4q_dual(const int32_t *in, int32x4x2_t *const s) {
128   s[0].val[0] = vld1q_s32(in);
129   s[0].val[1] = vld1q_s32(in + 4);
130   in += 32;
131   s[1].val[0] = vld1q_s32(in);
132   s[1].val[1] = vld1q_s32(in + 4);
133   in += 32;
134   s[2].val[0] = vld1q_s32(in);
135   s[2].val[1] = vld1q_s32(in + 4);
136   in += 32;
137   s[3].val[0] = vld1q_s32(in);
138   s[3].val[1] = vld1q_s32(in + 4);
139   in += 32;
140   s[4].val[0] = vld1q_s32(in);
141   s[4].val[1] = vld1q_s32(in + 4);
142   in += 32;
143   s[5].val[0] = vld1q_s32(in);
144   s[5].val[1] = vld1q_s32(in + 4);
145   in += 32;
146   s[6].val[0] = vld1q_s32(in);
147   s[6].val[1] = vld1q_s32(in + 4);
148   in += 32;
149   s[7].val[0] = vld1q_s32(in);
150   s[7].val[1] = vld1q_s32(in + 4);
151 }
152 
transpose_and_store_s32_8x8(int32x4x2_t * const a,int32_t ** out)153 static INLINE void transpose_and_store_s32_8x8(int32x4x2_t *const a,
154                                                int32_t **out) {
155   transpose_s32_8x8(&a[0], &a[1], &a[2], &a[3], &a[4], &a[5], &a[6], &a[7]);
156 
157   vst1q_s32(*out, a[0].val[0]);
158   *out += 4;
159   vst1q_s32(*out, a[0].val[1]);
160   *out += 4;
161   vst1q_s32(*out, a[1].val[0]);
162   *out += 4;
163   vst1q_s32(*out, a[1].val[1]);
164   *out += 4;
165   vst1q_s32(*out, a[2].val[0]);
166   *out += 4;
167   vst1q_s32(*out, a[2].val[1]);
168   *out += 4;
169   vst1q_s32(*out, a[3].val[0]);
170   *out += 4;
171   vst1q_s32(*out, a[3].val[1]);
172   *out += 4;
173   vst1q_s32(*out, a[4].val[0]);
174   *out += 4;
175   vst1q_s32(*out, a[4].val[1]);
176   *out += 4;
177   vst1q_s32(*out, a[5].val[0]);
178   *out += 4;
179   vst1q_s32(*out, a[5].val[1]);
180   *out += 4;
181   vst1q_s32(*out, a[6].val[0]);
182   *out += 4;
183   vst1q_s32(*out, a[6].val[1]);
184   *out += 4;
185   vst1q_s32(*out, a[7].val[0]);
186   *out += 4;
187   vst1q_s32(*out, a[7].val[1]);
188   *out += 4;
189 }
190 
idct32_transpose_pair(const int32_t * input,int32_t * t_buf)191 static INLINE void idct32_transpose_pair(const int32_t *input, int32_t *t_buf) {
192   int i;
193   int32x4x2_t s[8];
194 
195   for (i = 0; i < 4; i++, input += 8) {
196     load_s32x4q_dual(input, s);
197     transpose_and_store_s32_8x8(s, &t_buf);
198   }
199 }
200 
idct32_bands_end_1st_pass(int32_t * const out,int32x4x2_t * const q)201 static INLINE void idct32_bands_end_1st_pass(int32_t *const out,
202                                              int32x4x2_t *const q) {
203   store_in_output(out, 16, 17, q[6], q[7]);
204   store_in_output(out, 14, 15, q[8], q[9]);
205 
206   load_from_output(out, 30, 31, &q[0], &q[1]);
207   q[4] = highbd_idct_add_dual(q[2], q[1]);
208   q[5] = highbd_idct_add_dual(q[3], q[0]);
209   q[6] = highbd_idct_sub_dual(q[3], q[0]);
210   q[7] = highbd_idct_sub_dual(q[2], q[1]);
211   store_in_output(out, 30, 31, q[6], q[7]);
212   store_in_output(out, 0, 1, q[4], q[5]);
213 
214   load_from_output(out, 12, 13, &q[0], &q[1]);
215   q[2] = highbd_idct_add_dual(q[10], q[1]);
216   q[3] = highbd_idct_add_dual(q[11], q[0]);
217   q[4] = highbd_idct_sub_dual(q[11], q[0]);
218   q[5] = highbd_idct_sub_dual(q[10], q[1]);
219 
220   load_from_output(out, 18, 19, &q[0], &q[1]);
221   q[8] = highbd_idct_add_dual(q[4], q[1]);
222   q[9] = highbd_idct_add_dual(q[5], q[0]);
223   q[6] = highbd_idct_sub_dual(q[5], q[0]);
224   q[7] = highbd_idct_sub_dual(q[4], q[1]);
225   store_in_output(out, 18, 19, q[6], q[7]);
226   store_in_output(out, 12, 13, q[8], q[9]);
227 
228   load_from_output(out, 28, 29, &q[0], &q[1]);
229   q[4] = highbd_idct_add_dual(q[2], q[1]);
230   q[5] = highbd_idct_add_dual(q[3], q[0]);
231   q[6] = highbd_idct_sub_dual(q[3], q[0]);
232   q[7] = highbd_idct_sub_dual(q[2], q[1]);
233   store_in_output(out, 28, 29, q[6], q[7]);
234   store_in_output(out, 2, 3, q[4], q[5]);
235 
236   load_from_output(out, 10, 11, &q[0], &q[1]);
237   q[2] = highbd_idct_add_dual(q[12], q[1]);
238   q[3] = highbd_idct_add_dual(q[13], q[0]);
239   q[4] = highbd_idct_sub_dual(q[13], q[0]);
240   q[5] = highbd_idct_sub_dual(q[12], q[1]);
241 
242   load_from_output(out, 20, 21, &q[0], &q[1]);
243   q[8] = highbd_idct_add_dual(q[4], q[1]);
244   q[9] = highbd_idct_add_dual(q[5], q[0]);
245   q[6] = highbd_idct_sub_dual(q[5], q[0]);
246   q[7] = highbd_idct_sub_dual(q[4], q[1]);
247   store_in_output(out, 20, 21, q[6], q[7]);
248   store_in_output(out, 10, 11, q[8], q[9]);
249 
250   load_from_output(out, 26, 27, &q[0], &q[1]);
251   q[4] = highbd_idct_add_dual(q[2], q[1]);
252   q[5] = highbd_idct_add_dual(q[3], q[0]);
253   q[6] = highbd_idct_sub_dual(q[3], q[0]);
254   q[7] = highbd_idct_sub_dual(q[2], q[1]);
255   store_in_output(out, 26, 27, q[6], q[7]);
256   store_in_output(out, 4, 5, q[4], q[5]);
257 
258   load_from_output(out, 8, 9, &q[0], &q[1]);
259   q[2] = highbd_idct_add_dual(q[14], q[1]);
260   q[3] = highbd_idct_add_dual(q[15], q[0]);
261   q[4] = highbd_idct_sub_dual(q[15], q[0]);
262   q[5] = highbd_idct_sub_dual(q[14], q[1]);
263 
264   load_from_output(out, 22, 23, &q[0], &q[1]);
265   q[8] = highbd_idct_add_dual(q[4], q[1]);
266   q[9] = highbd_idct_add_dual(q[5], q[0]);
267   q[6] = highbd_idct_sub_dual(q[5], q[0]);
268   q[7] = highbd_idct_sub_dual(q[4], q[1]);
269   store_in_output(out, 22, 23, q[6], q[7]);
270   store_in_output(out, 8, 9, q[8], q[9]);
271 
272   load_from_output(out, 24, 25, &q[0], &q[1]);
273   q[4] = highbd_idct_add_dual(q[2], q[1]);
274   q[5] = highbd_idct_add_dual(q[3], q[0]);
275   q[6] = highbd_idct_sub_dual(q[3], q[0]);
276   q[7] = highbd_idct_sub_dual(q[2], q[1]);
277   store_in_output(out, 24, 25, q[6], q[7]);
278   store_in_output(out, 6, 7, q[4], q[5]);
279 }
280 
idct32_bands_end_2nd_pass(const int32_t * const out,uint16_t * const dest,const int stride,const int16x8_t max,int32x4x2_t * const q)281 static INLINE void idct32_bands_end_2nd_pass(const int32_t *const out,
282                                              uint16_t *const dest,
283                                              const int stride,
284                                              const int16x8_t max,
285                                              int32x4x2_t *const q) {
286   uint16_t *dest0 = dest + 0 * stride;
287   uint16_t *dest1 = dest + 31 * stride;
288   uint16_t *dest2 = dest + 16 * stride;
289   uint16_t *dest3 = dest + 15 * stride;
290   const int str2 = stride << 1;
291 
292   highbd_store_combine_results(dest2, dest3, stride, q[6], q[7], q[8], q[9],
293                                max);
294   dest2 += str2;
295   dest3 -= str2;
296 
297   load_from_output(out, 30, 31, &q[0], &q[1]);
298   q[4] = highbd_idct_add_dual(q[2], q[1]);
299   q[5] = highbd_idct_add_dual(q[3], q[0]);
300   q[6] = highbd_idct_sub_dual(q[3], q[0]);
301   q[7] = highbd_idct_sub_dual(q[2], q[1]);
302   highbd_store_combine_results(dest0, dest1, stride, q[4], q[5], q[6], q[7],
303                                max);
304   dest0 += str2;
305   dest1 -= str2;
306 
307   load_from_output(out, 12, 13, &q[0], &q[1]);
308   q[2] = highbd_idct_add_dual(q[10], q[1]);
309   q[3] = highbd_idct_add_dual(q[11], q[0]);
310   q[4] = highbd_idct_sub_dual(q[11], q[0]);
311   q[5] = highbd_idct_sub_dual(q[10], q[1]);
312 
313   load_from_output(out, 18, 19, &q[0], &q[1]);
314   q[8] = highbd_idct_add_dual(q[4], q[1]);
315   q[9] = highbd_idct_add_dual(q[5], q[0]);
316   q[6] = highbd_idct_sub_dual(q[5], q[0]);
317   q[7] = highbd_idct_sub_dual(q[4], q[1]);
318   highbd_store_combine_results(dest2, dest3, stride, q[6], q[7], q[8], q[9],
319                                max);
320   dest2 += str2;
321   dest3 -= str2;
322 
323   load_from_output(out, 28, 29, &q[0], &q[1]);
324   q[4] = highbd_idct_add_dual(q[2], q[1]);
325   q[5] = highbd_idct_add_dual(q[3], q[0]);
326   q[6] = highbd_idct_sub_dual(q[3], q[0]);
327   q[7] = highbd_idct_sub_dual(q[2], q[1]);
328   highbd_store_combine_results(dest0, dest1, stride, q[4], q[5], q[6], q[7],
329                                max);
330   dest0 += str2;
331   dest1 -= str2;
332 
333   load_from_output(out, 10, 11, &q[0], &q[1]);
334   q[2] = highbd_idct_add_dual(q[12], q[1]);
335   q[3] = highbd_idct_add_dual(q[13], q[0]);
336   q[4] = highbd_idct_sub_dual(q[13], q[0]);
337   q[5] = highbd_idct_sub_dual(q[12], q[1]);
338 
339   load_from_output(out, 20, 21, &q[0], &q[1]);
340   q[8] = highbd_idct_add_dual(q[4], q[1]);
341   q[9] = highbd_idct_add_dual(q[5], q[0]);
342   q[6] = highbd_idct_sub_dual(q[5], q[0]);
343   q[7] = highbd_idct_sub_dual(q[4], q[1]);
344   highbd_store_combine_results(dest2, dest3, stride, q[6], q[7], q[8], q[9],
345                                max);
346   dest2 += str2;
347   dest3 -= str2;
348 
349   load_from_output(out, 26, 27, &q[0], &q[1]);
350   q[4] = highbd_idct_add_dual(q[2], q[1]);
351   q[5] = highbd_idct_add_dual(q[3], q[0]);
352   q[6] = highbd_idct_sub_dual(q[3], q[0]);
353   q[7] = highbd_idct_sub_dual(q[2], q[1]);
354   highbd_store_combine_results(dest0, dest1, stride, q[4], q[5], q[6], q[7],
355                                max);
356   dest0 += str2;
357   dest1 -= str2;
358 
359   load_from_output(out, 8, 9, &q[0], &q[1]);
360   q[2] = highbd_idct_add_dual(q[14], q[1]);
361   q[3] = highbd_idct_add_dual(q[15], q[0]);
362   q[4] = highbd_idct_sub_dual(q[15], q[0]);
363   q[5] = highbd_idct_sub_dual(q[14], q[1]);
364 
365   load_from_output(out, 22, 23, &q[0], &q[1]);
366   q[8] = highbd_idct_add_dual(q[4], q[1]);
367   q[9] = highbd_idct_add_dual(q[5], q[0]);
368   q[6] = highbd_idct_sub_dual(q[5], q[0]);
369   q[7] = highbd_idct_sub_dual(q[4], q[1]);
370   highbd_store_combine_results(dest2, dest3, stride, q[6], q[7], q[8], q[9],
371                                max);
372 
373   load_from_output(out, 24, 25, &q[0], &q[1]);
374   q[4] = highbd_idct_add_dual(q[2], q[1]);
375   q[5] = highbd_idct_add_dual(q[3], q[0]);
376   q[6] = highbd_idct_sub_dual(q[3], q[0]);
377   q[7] = highbd_idct_sub_dual(q[2], q[1]);
378   highbd_store_combine_results(dest0, dest1, stride, q[4], q[5], q[6], q[7],
379                                max);
380 }
381 
vpx_highbd_idct32_32_neon(const tran_low_t * input,uint16_t * dst,const int stride,const int bd)382 static INLINE void vpx_highbd_idct32_32_neon(const tran_low_t *input,
383                                              uint16_t *dst, const int stride,
384                                              const int bd) {
385   int i, idct32_pass_loop;
386   int32_t trans_buf[32 * 8];
387   int32_t pass1[32 * 32];
388   int32_t pass2[32 * 32];
389   int32_t *out;
390   int32x4x2_t q[16];
391 
392   for (idct32_pass_loop = 0, out = pass1; idct32_pass_loop < 2;
393        idct32_pass_loop++, input = pass1, out = pass2) {
394     for (i = 0; i < 4; i++, out += 8) {  // idct32_bands_loop
395       idct32_transpose_pair(input, trans_buf);
396       input += 32 * 8;
397 
398       // -----------------------------------------
399       // BLOCK A: 16-19,28-31
400       // -----------------------------------------
401       // generate 16,17,30,31
402       // part of stage 1
403       load_from_transformed(trans_buf, 1, 31, &q[14], &q[13]);
404       do_butterfly(q[14], q[13], cospi_31_64, cospi_1_64, &q[0], &q[2]);
405       load_from_transformed(trans_buf, 17, 15, &q[14], &q[13]);
406       do_butterfly(q[14], q[13], cospi_15_64, cospi_17_64, &q[1], &q[3]);
407       // part of stage 2
408       q[4] = highbd_idct_add_dual(q[0], q[1]);
409       q[13] = highbd_idct_sub_dual(q[0], q[1]);
410       q[6] = highbd_idct_add_dual(q[2], q[3]);
411       q[14] = highbd_idct_sub_dual(q[2], q[3]);
412       // part of stage 3
413       do_butterfly(q[14], q[13], cospi_28_64, cospi_4_64, &q[5], &q[7]);
414 
415       // generate 18,19,28,29
416       // part of stage 1
417       load_from_transformed(trans_buf, 9, 23, &q[14], &q[13]);
418       do_butterfly(q[14], q[13], cospi_23_64, cospi_9_64, &q[0], &q[2]);
419       load_from_transformed(trans_buf, 25, 7, &q[14], &q[13]);
420       do_butterfly(q[14], q[13], cospi_7_64, cospi_25_64, &q[1], &q[3]);
421       // part of stage 2
422       q[13] = highbd_idct_sub_dual(q[3], q[2]);
423       q[3] = highbd_idct_add_dual(q[3], q[2]);
424       q[14] = highbd_idct_sub_dual(q[1], q[0]);
425       q[2] = highbd_idct_add_dual(q[1], q[0]);
426       // part of stage 3
427       do_butterfly(q[14], q[13], -cospi_4_64, -cospi_28_64, &q[1], &q[0]);
428       // part of stage 4
429       q[8] = highbd_idct_add_dual(q[4], q[2]);
430       q[9] = highbd_idct_add_dual(q[5], q[0]);
431       q[10] = highbd_idct_add_dual(q[7], q[1]);
432       q[15] = highbd_idct_add_dual(q[6], q[3]);
433       q[13] = highbd_idct_sub_dual(q[5], q[0]);
434       q[14] = highbd_idct_sub_dual(q[7], q[1]);
435       store_in_output(out, 16, 31, q[8], q[15]);
436       store_in_output(out, 17, 30, q[9], q[10]);
437       // part of stage 5
438       do_butterfly(q[14], q[13], cospi_24_64, cospi_8_64, &q[0], &q[1]);
439       store_in_output(out, 29, 18, q[1], q[0]);
440       // part of stage 4
441       q[13] = highbd_idct_sub_dual(q[4], q[2]);
442       q[14] = highbd_idct_sub_dual(q[6], q[3]);
443       // part of stage 5
444       do_butterfly(q[14], q[13], cospi_24_64, cospi_8_64, &q[4], &q[6]);
445       store_in_output(out, 19, 28, q[4], q[6]);
446 
447       // -----------------------------------------
448       // BLOCK B: 20-23,24-27
449       // -----------------------------------------
450       // generate 20,21,26,27
451       // part of stage 1
452       load_from_transformed(trans_buf, 5, 27, &q[14], &q[13]);
453       do_butterfly(q[14], q[13], cospi_27_64, cospi_5_64, &q[0], &q[2]);
454       load_from_transformed(trans_buf, 21, 11, &q[14], &q[13]);
455       do_butterfly(q[14], q[13], cospi_11_64, cospi_21_64, &q[1], &q[3]);
456       // part of stage 2
457       q[13] = highbd_idct_sub_dual(q[0], q[1]);
458       q[0] = highbd_idct_add_dual(q[0], q[1]);
459       q[14] = highbd_idct_sub_dual(q[2], q[3]);
460       q[2] = highbd_idct_add_dual(q[2], q[3]);
461       // part of stage 3
462       do_butterfly(q[14], q[13], cospi_12_64, cospi_20_64, &q[1], &q[3]);
463 
464       // generate 22,23,24,25
465       // part of stage 1
466       load_from_transformed(trans_buf, 13, 19, &q[14], &q[13]);
467       do_butterfly(q[14], q[13], cospi_19_64, cospi_13_64, &q[5], &q[7]);
468       load_from_transformed(trans_buf, 29, 3, &q[14], &q[13]);
469       do_butterfly(q[14], q[13], cospi_3_64, cospi_29_64, &q[4], &q[6]);
470       // part of stage 2
471       q[14] = highbd_idct_sub_dual(q[4], q[5]);
472       q[5] = highbd_idct_add_dual(q[4], q[5]);
473       q[13] = highbd_idct_sub_dual(q[6], q[7]);
474       q[6] = highbd_idct_add_dual(q[6], q[7]);
475       // part of stage 3
476       do_butterfly(q[14], q[13], -cospi_20_64, -cospi_12_64, &q[4], &q[7]);
477       // part of stage 4
478       q[10] = highbd_idct_add_dual(q[7], q[1]);
479       q[11] = highbd_idct_add_dual(q[5], q[0]);
480       q[12] = highbd_idct_add_dual(q[6], q[2]);
481       q[15] = highbd_idct_add_dual(q[4], q[3]);
482       // part of stage 6
483       load_from_output(out, 16, 17, &q[14], &q[13]);
484       q[8] = highbd_idct_add_dual(q[14], q[11]);
485       q[9] = highbd_idct_add_dual(q[13], q[10]);
486       q[13] = highbd_idct_sub_dual(q[13], q[10]);
487       q[11] = highbd_idct_sub_dual(q[14], q[11]);
488       store_in_output(out, 17, 16, q[9], q[8]);
489       load_from_output(out, 30, 31, &q[14], &q[9]);
490       q[8] = highbd_idct_sub_dual(q[9], q[12]);
491       q[10] = highbd_idct_add_dual(q[14], q[15]);
492       q[14] = highbd_idct_sub_dual(q[14], q[15]);
493       q[12] = highbd_idct_add_dual(q[9], q[12]);
494       store_in_output(out, 30, 31, q[10], q[12]);
495       // part of stage 7
496       do_butterfly(q[14], q[13], cospi_16_64, cospi_16_64, &q[13], &q[14]);
497       store_in_output(out, 25, 22, q[14], q[13]);
498       do_butterfly(q[8], q[11], cospi_16_64, cospi_16_64, &q[13], &q[14]);
499       store_in_output(out, 24, 23, q[14], q[13]);
500       // part of stage 4
501       q[14] = highbd_idct_sub_dual(q[5], q[0]);
502       q[13] = highbd_idct_sub_dual(q[6], q[2]);
503       do_butterfly(q[14], q[13], -cospi_8_64, -cospi_24_64, &q[5], &q[6]);
504       q[14] = highbd_idct_sub_dual(q[7], q[1]);
505       q[13] = highbd_idct_sub_dual(q[4], q[3]);
506       do_butterfly(q[14], q[13], -cospi_8_64, -cospi_24_64, &q[0], &q[1]);
507       // part of stage 6
508       load_from_output(out, 18, 19, &q[14], &q[13]);
509       q[8] = highbd_idct_add_dual(q[14], q[1]);
510       q[9] = highbd_idct_add_dual(q[13], q[6]);
511       q[13] = highbd_idct_sub_dual(q[13], q[6]);
512       q[1] = highbd_idct_sub_dual(q[14], q[1]);
513       store_in_output(out, 18, 19, q[8], q[9]);
514       load_from_output(out, 28, 29, &q[8], &q[9]);
515       q[14] = highbd_idct_sub_dual(q[8], q[5]);
516       q[10] = highbd_idct_add_dual(q[8], q[5]);
517       q[11] = highbd_idct_add_dual(q[9], q[0]);
518       q[0] = highbd_idct_sub_dual(q[9], q[0]);
519       store_in_output(out, 28, 29, q[10], q[11]);
520       // part of stage 7
521       do_butterfly(q[14], q[13], cospi_16_64, cospi_16_64, &q[13], &q[14]);
522       store_in_output(out, 20, 27, q[13], q[14]);
523       do_butterfly(q[0], q[1], cospi_16_64, cospi_16_64, &q[1], &q[0]);
524       store_in_output(out, 21, 26, q[1], q[0]);
525 
526       // -----------------------------------------
527       // BLOCK C: 8-10,11-15
528       // -----------------------------------------
529       // generate 8,9,14,15
530       // part of stage 2
531       load_from_transformed(trans_buf, 2, 30, &q[14], &q[13]);
532       do_butterfly(q[14], q[13], cospi_30_64, cospi_2_64, &q[0], &q[2]);
533       load_from_transformed(trans_buf, 18, 14, &q[14], &q[13]);
534       do_butterfly(q[14], q[13], cospi_14_64, cospi_18_64, &q[1], &q[3]);
535       // part of stage 3
536       q[13] = highbd_idct_sub_dual(q[0], q[1]);
537       q[0] = highbd_idct_add_dual(q[0], q[1]);
538       q[14] = highbd_idct_sub_dual(q[2], q[3]);
539       q[2] = highbd_idct_add_dual(q[2], q[3]);
540       // part of stage 4
541       do_butterfly(q[14], q[13], cospi_24_64, cospi_8_64, &q[1], &q[3]);
542 
543       // generate 10,11,12,13
544       // part of stage 2
545       load_from_transformed(trans_buf, 10, 22, &q[14], &q[13]);
546       do_butterfly(q[14], q[13], cospi_22_64, cospi_10_64, &q[5], &q[7]);
547       load_from_transformed(trans_buf, 26, 6, &q[14], &q[13]);
548       do_butterfly(q[14], q[13], cospi_6_64, cospi_26_64, &q[4], &q[6]);
549       // part of stage 3
550       q[14] = highbd_idct_sub_dual(q[4], q[5]);
551       q[5] = highbd_idct_add_dual(q[4], q[5]);
552       q[13] = highbd_idct_sub_dual(q[6], q[7]);
553       q[6] = highbd_idct_add_dual(q[6], q[7]);
554       // part of stage 4
555       do_butterfly(q[14], q[13], -cospi_8_64, -cospi_24_64, &q[4], &q[7]);
556       // part of stage 5
557       q[8] = highbd_idct_add_dual(q[0], q[5]);
558       q[9] = highbd_idct_add_dual(q[1], q[7]);
559       q[13] = highbd_idct_sub_dual(q[1], q[7]);
560       q[14] = highbd_idct_sub_dual(q[3], q[4]);
561       q[10] = highbd_idct_add_dual(q[3], q[4]);
562       q[15] = highbd_idct_add_dual(q[2], q[6]);
563       store_in_output(out, 8, 15, q[8], q[15]);
564       store_in_output(out, 9, 14, q[9], q[10]);
565       // part of stage 6
566       do_butterfly(q[14], q[13], cospi_16_64, cospi_16_64, &q[1], &q[3]);
567       store_in_output(out, 13, 10, q[3], q[1]);
568       q[13] = highbd_idct_sub_dual(q[0], q[5]);
569       q[14] = highbd_idct_sub_dual(q[2], q[6]);
570       do_butterfly(q[14], q[13], cospi_16_64, cospi_16_64, &q[1], &q[3]);
571       store_in_output(out, 11, 12, q[1], q[3]);
572 
573       // -----------------------------------------
574       // BLOCK D: 0-3,4-7
575       // -----------------------------------------
576       // generate 4,5,6,7
577       // part of stage 3
578       load_from_transformed(trans_buf, 4, 28, &q[14], &q[13]);
579       do_butterfly(q[14], q[13], cospi_28_64, cospi_4_64, &q[0], &q[2]);
580       load_from_transformed(trans_buf, 20, 12, &q[14], &q[13]);
581       do_butterfly(q[14], q[13], cospi_12_64, cospi_20_64, &q[1], &q[3]);
582       // part of stage 4
583       q[13] = highbd_idct_sub_dual(q[0], q[1]);
584       q[0] = highbd_idct_add_dual(q[0], q[1]);
585       q[14] = highbd_idct_sub_dual(q[2], q[3]);
586       q[2] = highbd_idct_add_dual(q[2], q[3]);
587       // part of stage 5
588       do_butterfly(q[14], q[13], cospi_16_64, cospi_16_64, &q[1], &q[3]);
589 
590       // generate 0,1,2,3
591       // part of stage 4
592       load_from_transformed(trans_buf, 0, 16, &q[14], &q[13]);
593       do_butterfly(q[14], q[13], cospi_16_64, cospi_16_64, &q[5], &q[7]);
594       load_from_transformed(trans_buf, 8, 24, &q[14], &q[13]);
595       do_butterfly(q[14], q[13], cospi_24_64, cospi_8_64, &q[14], &q[6]);
596       // part of stage 5
597       q[4] = highbd_idct_add_dual(q[7], q[6]);
598       q[7] = highbd_idct_sub_dual(q[7], q[6]);
599       q[6] = highbd_idct_sub_dual(q[5], q[14]);
600       q[5] = highbd_idct_add_dual(q[5], q[14]);
601       // part of stage 6
602       q[8] = highbd_idct_add_dual(q[4], q[2]);
603       q[9] = highbd_idct_add_dual(q[5], q[3]);
604       q[10] = highbd_idct_add_dual(q[6], q[1]);
605       q[11] = highbd_idct_add_dual(q[7], q[0]);
606       q[12] = highbd_idct_sub_dual(q[7], q[0]);
607       q[13] = highbd_idct_sub_dual(q[6], q[1]);
608       q[14] = highbd_idct_sub_dual(q[5], q[3]);
609       q[15] = highbd_idct_sub_dual(q[4], q[2]);
610       // part of stage 7
611       load_from_output(out, 14, 15, &q[0], &q[1]);
612       q[2] = highbd_idct_add_dual(q[8], q[1]);
613       q[3] = highbd_idct_add_dual(q[9], q[0]);
614       q[4] = highbd_idct_sub_dual(q[9], q[0]);
615       q[5] = highbd_idct_sub_dual(q[8], q[1]);
616       load_from_output(out, 16, 17, &q[0], &q[1]);
617       q[8] = highbd_idct_add_dual(q[4], q[1]);
618       q[9] = highbd_idct_add_dual(q[5], q[0]);
619       q[6] = highbd_idct_sub_dual(q[5], q[0]);
620       q[7] = highbd_idct_sub_dual(q[4], q[1]);
621 
622       if (idct32_pass_loop == 0) {
623         idct32_bands_end_1st_pass(out, q);
624       } else {
625         const int16x8_t max = vdupq_n_s16((1 << bd) - 1);
626         idct32_bands_end_2nd_pass(out, dst, stride, max, q);
627         dst += 8;
628       }
629     }
630   }
631 }
632 
vpx_highbd_idct32x32_1024_add_neon(const tran_low_t * input,uint16_t * dest,int stride,int bd)633 void vpx_highbd_idct32x32_1024_add_neon(const tran_low_t *input, uint16_t *dest,
634                                         int stride, int bd) {
635   if (bd == 8) {
636     vpx_idct32_32_neon(input, CAST_TO_BYTEPTR(dest), stride, 1);
637   } else {
638     vpx_highbd_idct32_32_neon(input, dest, stride, bd);
639   }
640 }
641